• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_MIPS)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "codegen.h"
35 #include "regexp-macro-assembler.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 
41 #define __ ACCESS_MASM(masm)
42 
43 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
44                                           Label* slow,
45                                           Condition cc,
46                                           bool never_nan_nan);
47 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
48                                     Register lhs,
49                                     Register rhs,
50                                     Label* rhs_not_nan,
51                                     Label* slow,
52                                     bool strict);
53 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
54 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
55                                            Register lhs,
56                                            Register rhs);
57 
58 
59 // Check if the operand is a heap number.
EmitCheckForHeapNumber(MacroAssembler * masm,Register operand,Register scratch1,Register scratch2,Label * not_a_heap_number)60 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand,
61                                    Register scratch1, Register scratch2,
62                                    Label* not_a_heap_number) {
63   __ lw(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset));
64   __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex);
65   __ Branch(not_a_heap_number, ne, scratch1, Operand(scratch2));
66 }
67 
68 
Generate(MacroAssembler * masm)69 void ToNumberStub::Generate(MacroAssembler* masm) {
70   // The ToNumber stub takes one argument in a0.
71   Label check_heap_number, call_builtin;
72   __ JumpIfNotSmi(a0, &check_heap_number);
73   __ Ret(USE_DELAY_SLOT);
74   __ mov(v0, a0);
75 
76   __ bind(&check_heap_number);
77   EmitCheckForHeapNumber(masm, a0, a1, t0, &call_builtin);
78   __ Ret(USE_DELAY_SLOT);
79   __ mov(v0, a0);
80 
81   __ bind(&call_builtin);
82   __ push(a0);
83   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
84 }
85 
86 
Generate(MacroAssembler * masm)87 void FastNewClosureStub::Generate(MacroAssembler* masm) {
88   // Create a new closure from the given function info in new
89   // space. Set the context to the current context in cp.
90   Label gc;
91 
92   // Pop the function info from the stack.
93   __ pop(a3);
94 
95   // Attempt to allocate new JSFunction in new space.
96   __ AllocateInNewSpace(JSFunction::kSize,
97                         v0,
98                         a1,
99                         a2,
100                         &gc,
101                         TAG_OBJECT);
102 
103   int map_index = (language_mode_ == CLASSIC_MODE)
104       ? Context::FUNCTION_MAP_INDEX
105       : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
106 
107   // Compute the function map in the current global context and set that
108   // as the map of the allocated object.
109   __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
110   __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
111   __ lw(a2, MemOperand(a2, Context::SlotOffset(map_index)));
112   __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
113 
114   // Initialize the rest of the function. We don't have to update the
115   // write barrier because the allocated object is in new space.
116   __ LoadRoot(a1, Heap::kEmptyFixedArrayRootIndex);
117   __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
118   __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
119   __ sw(a1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
120   __ sw(a1, FieldMemOperand(v0, JSObject::kElementsOffset));
121   __ sw(a2, FieldMemOperand(v0, JSFunction::kPrototypeOrInitialMapOffset));
122   __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
123   __ sw(cp, FieldMemOperand(v0, JSFunction::kContextOffset));
124   __ sw(a1, FieldMemOperand(v0, JSFunction::kLiteralsOffset));
125   __ sw(t0, FieldMemOperand(v0, JSFunction::kNextFunctionLinkOffset));
126 
127   // Initialize the code pointer in the function to be the one
128   // found in the shared function info object.
129   __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130   __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
131 
132   // Return result. The argument function info has been popped already.
133   __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
134   __ Ret();
135 
136   // Create a new closure through the slower runtime call.
137   __ bind(&gc);
138   __ LoadRoot(t0, Heap::kFalseValueRootIndex);
139   __ Push(cp, a3, t0);
140   __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
141 }
142 
143 
Generate(MacroAssembler * masm)144 void FastNewContextStub::Generate(MacroAssembler* masm) {
145   // Try to allocate the context in new space.
146   Label gc;
147   int length = slots_ + Context::MIN_CONTEXT_SLOTS;
148 
149   // Attempt to allocate the context in new space.
150   __ AllocateInNewSpace(FixedArray::SizeFor(length),
151                         v0,
152                         a1,
153                         a2,
154                         &gc,
155                         TAG_OBJECT);
156 
157   // Load the function from the stack.
158   __ lw(a3, MemOperand(sp, 0));
159 
160   // Set up the object header.
161   __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
162   __ li(a2, Operand(Smi::FromInt(length)));
163   __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
164   __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
165 
166   // Set up the fixed slots, copy the global object from the previous context.
167   __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
168   __ li(a1, Operand(Smi::FromInt(0)));
169   __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
170   __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
171   __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
172   __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
173 
174   // Initialize the rest of the slots to undefined.
175   __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
176   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
177     __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
178   }
179 
180   // Remove the on-stack argument and return.
181   __ mov(cp, v0);
182   __ DropAndRet(1);
183 
184   // Need to collect. Call into runtime system.
185   __ bind(&gc);
186   __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
187 }
188 
189 
Generate(MacroAssembler * masm)190 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
191   // Stack layout on entry:
192   //
193   // [sp]: function.
194   // [sp + kPointerSize]: serialized scope info
195 
196   // Try to allocate the context in new space.
197   Label gc;
198   int length = slots_ + Context::MIN_CONTEXT_SLOTS;
199   __ AllocateInNewSpace(FixedArray::SizeFor(length),
200                         v0, a1, a2, &gc, TAG_OBJECT);
201 
202   // Load the function from the stack.
203   __ lw(a3, MemOperand(sp, 0));
204 
205   // Load the serialized scope info from the stack.
206   __ lw(a1, MemOperand(sp, 1 * kPointerSize));
207 
208   // Set up the object header.
209   __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
210   __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
211   __ li(a2, Operand(Smi::FromInt(length)));
212   __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
213 
214   // If this block context is nested in the global context we get a smi
215   // sentinel instead of a function. The block context should get the
216   // canonical empty function of the global context as its closure which
217   // we still have to look up.
218   Label after_sentinel;
219   __ JumpIfNotSmi(a3, &after_sentinel);
220   if (FLAG_debug_code) {
221     const char* message = "Expected 0 as a Smi sentinel";
222     __ Assert(eq, message, a3, Operand(zero_reg));
223   }
224   __ lw(a3, GlobalObjectOperand());
225   __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
226   __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
227   __ bind(&after_sentinel);
228 
229   // Set up the fixed slots, copy the global object from the previous context.
230   __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
231   __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
232   __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
233   __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
234   __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
235 
236   // Initialize the rest of the slots to the hole value.
237   __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
238   for (int i = 0; i < slots_; i++) {
239     __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
240   }
241 
242   // Remove the on-stack argument and return.
243   __ mov(cp, v0);
244   __ DropAndRet(2);
245 
246   // Need to collect. Call into runtime system.
247   __ bind(&gc);
248   __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
249 }
250 
251 
GenerateFastCloneShallowArrayCommon(MacroAssembler * masm,int length,FastCloneShallowArrayStub::Mode mode,Label * fail)252 static void GenerateFastCloneShallowArrayCommon(
253     MacroAssembler* masm,
254     int length,
255     FastCloneShallowArrayStub::Mode mode,
256     Label* fail) {
257   // Registers on entry:
258   // a3: boilerplate literal array.
259   ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
260 
261   // All sizes here are multiples of kPointerSize.
262   int elements_size = 0;
263   if (length > 0) {
264     elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
265         ? FixedDoubleArray::SizeFor(length)
266         : FixedArray::SizeFor(length);
267   }
268   int size = JSArray::kSize + elements_size;
269 
270   // Allocate both the JS array and the elements array in one big
271   // allocation. This avoids multiple limit checks.
272   __ AllocateInNewSpace(size,
273                         v0,
274                         a1,
275                         a2,
276                         fail,
277                         TAG_OBJECT);
278 
279   // Copy the JS array part.
280   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
281     if ((i != JSArray::kElementsOffset) || (length == 0)) {
282       __ lw(a1, FieldMemOperand(a3, i));
283       __ sw(a1, FieldMemOperand(v0, i));
284     }
285   }
286 
287   if (length > 0) {
288     // Get hold of the elements array of the boilerplate and setup the
289     // elements pointer in the resulting object.
290     __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
291     __ Addu(a2, v0, Operand(JSArray::kSize));
292     __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
293 
294     // Copy the elements array.
295     ASSERT((elements_size % kPointerSize) == 0);
296     __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
297   }
298 }
299 
Generate(MacroAssembler * masm)300 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
301   // Stack layout on entry:
302   //
303   // [sp]: constant elements.
304   // [sp + kPointerSize]: literal index.
305   // [sp + (2 * kPointerSize)]: literals array.
306 
307   // Load boilerplate object into r3 and check if we need to create a
308   // boilerplate.
309   Label slow_case;
310   __ lw(a3, MemOperand(sp, 2 * kPointerSize));
311   __ lw(a0, MemOperand(sp, 1 * kPointerSize));
312   __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
313   __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
314   __ Addu(t0, a3, t0);
315   __ lw(a3, MemOperand(t0));
316   __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
317   __ Branch(&slow_case, eq, a3, Operand(t1));
318 
319   FastCloneShallowArrayStub::Mode mode = mode_;
320   if (mode == CLONE_ANY_ELEMENTS) {
321     Label double_elements, check_fast_elements;
322     __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
323     __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
324     __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
325     __ Branch(&check_fast_elements, ne, v0, Operand(t1));
326     GenerateFastCloneShallowArrayCommon(masm, 0,
327                                         COPY_ON_WRITE_ELEMENTS, &slow_case);
328     // Return and remove the on-stack parameters.
329     __ DropAndRet(3);
330 
331     __ bind(&check_fast_elements);
332     __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
333     __ Branch(&double_elements, ne, v0, Operand(t1));
334     GenerateFastCloneShallowArrayCommon(masm, length_,
335                                         CLONE_ELEMENTS, &slow_case);
336     // Return and remove the on-stack parameters.
337     __ DropAndRet(3);
338 
339     __ bind(&double_elements);
340     mode = CLONE_DOUBLE_ELEMENTS;
341     // Fall through to generate the code to handle double elements.
342   }
343 
344   if (FLAG_debug_code) {
345     const char* message;
346     Heap::RootListIndex expected_map_index;
347     if (mode == CLONE_ELEMENTS) {
348       message = "Expected (writable) fixed array";
349       expected_map_index = Heap::kFixedArrayMapRootIndex;
350     } else if (mode == CLONE_DOUBLE_ELEMENTS) {
351       message = "Expected (writable) fixed double array";
352       expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
353     } else {
354       ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
355       message = "Expected copy-on-write fixed array";
356       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
357     }
358     __ push(a3);
359     __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
360     __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
361     __ LoadRoot(at, expected_map_index);
362     __ Assert(eq, message, a3, Operand(at));
363     __ pop(a3);
364   }
365 
366   GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
367 
368   // Return and remove the on-stack parameters.
369   __ DropAndRet(3);
370 
371   __ bind(&slow_case);
372   __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
373 }
374 
375 
Generate(MacroAssembler * masm)376 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
377   // Stack layout on entry:
378   //
379   // [sp]: object literal flags.
380   // [sp + kPointerSize]: constant properties.
381   // [sp + (2 * kPointerSize)]: literal index.
382   // [sp + (3 * kPointerSize)]: literals array.
383 
384   // Load boilerplate object into a3 and check if we need to create a
385   // boilerplate.
386   Label slow_case;
387   __ lw(a3, MemOperand(sp, 3 * kPointerSize));
388   __ lw(a0, MemOperand(sp, 2 * kPointerSize));
389   __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
390   __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
391   __ Addu(a3, t0, a3);
392   __ lw(a3, MemOperand(a3));
393   __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
394   __ Branch(&slow_case, eq, a3, Operand(t0));
395 
396   // Check that the boilerplate contains only fast properties and we can
397   // statically determine the instance size.
398   int size = JSObject::kHeaderSize + length_ * kPointerSize;
399   __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
400   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
401   __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
402 
403   // Allocate the JS object and copy header together with all in-object
404   // properties from the boilerplate.
405   __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
406   for (int i = 0; i < size; i += kPointerSize) {
407     __ lw(a1, FieldMemOperand(a3, i));
408     __ sw(a1, FieldMemOperand(v0, i));
409   }
410 
411   // Return and remove the on-stack parameters.
412   __ DropAndRet(4);
413 
414   __ bind(&slow_case);
415   __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
416 }
417 
418 
419 // Takes a Smi and converts to an IEEE 64 bit floating point value in two
420 // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
421 // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
422 // scratch register.  Destroys the source register.  No GC occurs during this
423 // stub so you don't have to set up the frame.
424 class ConvertToDoubleStub : public CodeStub {
425  public:
ConvertToDoubleStub(Register result_reg_1,Register result_reg_2,Register source_reg,Register scratch_reg)426   ConvertToDoubleStub(Register result_reg_1,
427                       Register result_reg_2,
428                       Register source_reg,
429                       Register scratch_reg)
430       : result1_(result_reg_1),
431         result2_(result_reg_2),
432         source_(source_reg),
433         zeros_(scratch_reg) { }
434 
435  private:
436   Register result1_;
437   Register result2_;
438   Register source_;
439   Register zeros_;
440 
441   // Minor key encoding in 16 bits.
442   class ModeBits: public BitField<OverwriteMode, 0, 2> {};
443   class OpBits: public BitField<Token::Value, 2, 14> {};
444 
MajorKey()445   Major MajorKey() { return ConvertToDouble; }
MinorKey()446   int MinorKey() {
447     // Encode the parameters in a unique 16 bit value.
448     return  result1_.code() +
449            (result2_.code() << 4) +
450            (source_.code() << 8) +
451            (zeros_.code() << 12);
452   }
453 
454   void Generate(MacroAssembler* masm);
455 };
456 
457 
Generate(MacroAssembler * masm)458 void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
459 #ifndef BIG_ENDIAN_FLOATING_POINT
460   Register exponent = result1_;
461   Register mantissa = result2_;
462 #else
463   Register exponent = result2_;
464   Register mantissa = result1_;
465 #endif
466   Label not_special;
467   // Convert from Smi to integer.
468   __ sra(source_, source_, kSmiTagSize);
469   // Move sign bit from source to destination.  This works because the sign bit
470   // in the exponent word of the double has the same position and polarity as
471   // the 2's complement sign bit in a Smi.
472   STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
473   __ And(exponent, source_, Operand(HeapNumber::kSignMask));
474   // Subtract from 0 if source was negative.
475   __ subu(at, zero_reg, source_);
476   __ Movn(source_, at, exponent);
477 
478   // We have -1, 0 or 1, which we treat specially. Register source_ contains
479   // absolute value: it is either equal to 1 (special case of -1 and 1),
480   // greater than 1 (not a special case) or less than 1 (special case of 0).
481   __ Branch(&not_special, gt, source_, Operand(1));
482 
483   // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
484   const uint32_t exponent_word_for_1 =
485       HeapNumber::kExponentBias << HeapNumber::kExponentShift;
486   // Safe to use 'at' as dest reg here.
487   __ Or(at, exponent, Operand(exponent_word_for_1));
488   __ Movn(exponent, at, source_);  // Write exp when source not 0.
489   // 1, 0 and -1 all have 0 for the second word.
490   __ Ret(USE_DELAY_SLOT);
491   __ mov(mantissa, zero_reg);
492 
493   __ bind(&not_special);
494   // Count leading zeros.
495   // Gets the wrong answer for 0, but we already checked for that case above.
496   __ Clz(zeros_, source_);
497   // Compute exponent and or it into the exponent register.
498   // We use mantissa as a scratch register here.
499   __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
500   __ subu(mantissa, mantissa, zeros_);
501   __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
502   __ Or(exponent, exponent, mantissa);
503 
504   // Shift up the source chopping the top bit off.
505   __ Addu(zeros_, zeros_, Operand(1));
506   // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
507   __ sllv(source_, source_, zeros_);
508   // Compute lower part of fraction (last 12 bits).
509   __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
510   // And the top (top 20 bits).
511   __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
512 
513   __ Ret(USE_DELAY_SLOT);
514   __ or_(exponent, exponent, source_);
515 }
516 
517 
LoadSmis(MacroAssembler * masm,FloatingPointHelper::Destination destination,Register scratch1,Register scratch2)518 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
519                                    FloatingPointHelper::Destination destination,
520                                    Register scratch1,
521                                    Register scratch2) {
522   if (CpuFeatures::IsSupported(FPU)) {
523     CpuFeatures::Scope scope(FPU);
524     __ sra(scratch1, a0, kSmiTagSize);
525     __ mtc1(scratch1, f14);
526     __ cvt_d_w(f14, f14);
527     __ sra(scratch1, a1, kSmiTagSize);
528     __ mtc1(scratch1, f12);
529     __ cvt_d_w(f12, f12);
530     if (destination == kCoreRegisters) {
531       __ Move(a2, a3, f14);
532       __ Move(a0, a1, f12);
533     }
534   } else {
535     ASSERT(destination == kCoreRegisters);
536     // Write Smi from a0 to a3 and a2 in double format.
537     __ mov(scratch1, a0);
538     ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
539     __ push(ra);
540     __ Call(stub1.GetCode());
541     // Write Smi from a1 to a1 and a0 in double format.
542     __ mov(scratch1, a1);
543     ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
544     __ Call(stub2.GetCode());
545     __ pop(ra);
546   }
547 }
548 
549 
LoadOperands(MacroAssembler * masm,FloatingPointHelper::Destination destination,Register heap_number_map,Register scratch1,Register scratch2,Label * slow)550 void FloatingPointHelper::LoadOperands(
551     MacroAssembler* masm,
552     FloatingPointHelper::Destination destination,
553     Register heap_number_map,
554     Register scratch1,
555     Register scratch2,
556     Label* slow) {
557 
558   // Load right operand (a0) to f12 or a2/a3.
559   LoadNumber(masm, destination,
560              a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
561 
562   // Load left operand (a1) to f14 or a0/a1.
563   LoadNumber(masm, destination,
564              a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
565 }
566 
567 
LoadNumber(MacroAssembler * masm,Destination destination,Register object,FPURegister dst,Register dst1,Register dst2,Register heap_number_map,Register scratch1,Register scratch2,Label * not_number)568 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
569                                      Destination destination,
570                                      Register object,
571                                      FPURegister dst,
572                                      Register dst1,
573                                      Register dst2,
574                                      Register heap_number_map,
575                                      Register scratch1,
576                                      Register scratch2,
577                                      Label* not_number) {
578   if (FLAG_debug_code) {
579     __ AbortIfNotRootValue(heap_number_map,
580                            Heap::kHeapNumberMapRootIndex,
581                            "HeapNumberMap register clobbered.");
582   }
583 
584   Label is_smi, done;
585 
586   // Smi-check
587   __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
588   // Heap number check
589   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
590 
591   // Handle loading a double from a heap number.
592   if (CpuFeatures::IsSupported(FPU) &&
593       destination == kFPURegisters) {
594     CpuFeatures::Scope scope(FPU);
595     // Load the double from tagged HeapNumber to double register.
596 
597     // ARM uses a workaround here because of the unaligned HeapNumber
598     // kValueOffset. On MIPS this workaround is built into ldc1 so there's no
599     // point in generating even more instructions.
600     __ ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
601   } else {
602     ASSERT(destination == kCoreRegisters);
603     // Load the double from heap number to dst1 and dst2 in double format.
604     __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset));
605     __ lw(dst2, FieldMemOperand(object,
606         HeapNumber::kValueOffset + kPointerSize));
607   }
608   __ Branch(&done);
609 
610   // Handle loading a double from a smi.
611   __ bind(&is_smi);
612   if (CpuFeatures::IsSupported(FPU)) {
613     CpuFeatures::Scope scope(FPU);
614     // Convert smi to double using FPU instructions.
615     __ mtc1(scratch1, dst);
616     __ cvt_d_w(dst, dst);
617     if (destination == kCoreRegisters) {
618       // Load the converted smi to dst1 and dst2 in double format.
619       __ Move(dst1, dst2, dst);
620     }
621   } else {
622     ASSERT(destination == kCoreRegisters);
623     // Write smi to dst1 and dst2 double format.
624     __ mov(scratch1, object);
625     ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
626     __ push(ra);
627     __ Call(stub.GetCode());
628     __ pop(ra);
629   }
630 
631   __ bind(&done);
632 }
633 
634 
ConvertNumberToInt32(MacroAssembler * masm,Register object,Register dst,Register heap_number_map,Register scratch1,Register scratch2,Register scratch3,FPURegister double_scratch,Label * not_number)635 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
636                                                Register object,
637                                                Register dst,
638                                                Register heap_number_map,
639                                                Register scratch1,
640                                                Register scratch2,
641                                                Register scratch3,
642                                                FPURegister double_scratch,
643                                                Label* not_number) {
644   if (FLAG_debug_code) {
645     __ AbortIfNotRootValue(heap_number_map,
646                            Heap::kHeapNumberMapRootIndex,
647                            "HeapNumberMap register clobbered.");
648   }
649   Label done;
650   Label not_in_int32_range;
651 
652   __ UntagAndJumpIfSmi(dst, object, &done);
653   __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
654   __ Branch(not_number, ne, scratch1, Operand(heap_number_map));
655   __ ConvertToInt32(object,
656                     dst,
657                     scratch1,
658                     scratch2,
659                     double_scratch,
660                     &not_in_int32_range);
661   __ jmp(&done);
662 
663   __ bind(&not_in_int32_range);
664   __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
665   __ lw(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
666 
667   __ EmitOutOfInt32RangeTruncate(dst,
668                                  scratch1,
669                                  scratch2,
670                                  scratch3);
671 
672   __ bind(&done);
673 }
674 
675 
ConvertIntToDouble(MacroAssembler * masm,Register int_scratch,Destination destination,FPURegister double_dst,Register dst1,Register dst2,Register scratch2,FPURegister single_scratch)676 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
677                                              Register int_scratch,
678                                              Destination destination,
679                                              FPURegister double_dst,
680                                              Register dst1,
681                                              Register dst2,
682                                              Register scratch2,
683                                              FPURegister single_scratch) {
684   ASSERT(!int_scratch.is(scratch2));
685   ASSERT(!int_scratch.is(dst1));
686   ASSERT(!int_scratch.is(dst2));
687 
688   Label done;
689 
690   if (CpuFeatures::IsSupported(FPU)) {
691     CpuFeatures::Scope scope(FPU);
692     __ mtc1(int_scratch, single_scratch);
693     __ cvt_d_w(double_dst, single_scratch);
694     if (destination == kCoreRegisters) {
695       __ Move(dst1, dst2, double_dst);
696     }
697   } else {
698     Label fewer_than_20_useful_bits;
699     // Expected output:
700     // |         dst2            |         dst1            |
701     // | s |   exp   |              mantissa               |
702 
703     // Check for zero.
704     __ mov(dst2, int_scratch);
705     __ mov(dst1, int_scratch);
706     __ Branch(&done, eq, int_scratch, Operand(zero_reg));
707 
708     // Preload the sign of the value.
709     __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
710     // Get the absolute value of the object (as an unsigned integer).
711     Label skip_sub;
712     __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
713     __ Subu(int_scratch, zero_reg, int_scratch);
714     __ bind(&skip_sub);
715 
716     // Get mantissa[51:20].
717 
718     // Get the position of the first set bit.
719     __ Clz(dst1, int_scratch);
720     __ li(scratch2, 31);
721     __ Subu(dst1, scratch2, dst1);
722 
723     // Set the exponent.
724     __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
725     __ Ins(dst2, scratch2,
726         HeapNumber::kExponentShift, HeapNumber::kExponentBits);
727 
728     // Clear the first non null bit.
729     __ li(scratch2, Operand(1));
730     __ sllv(scratch2, scratch2, dst1);
731     __ li(at, -1);
732     __ Xor(scratch2, scratch2, at);
733     __ And(int_scratch, int_scratch, scratch2);
734 
735     // Get the number of bits to set in the lower part of the mantissa.
736     __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
737     __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
738     // Set the higher 20 bits of the mantissa.
739     __ srlv(at, int_scratch, scratch2);
740     __ or_(dst2, dst2, at);
741     __ li(at, 32);
742     __ subu(scratch2, at, scratch2);
743     __ sllv(dst1, int_scratch, scratch2);
744     __ Branch(&done);
745 
746     __ bind(&fewer_than_20_useful_bits);
747     __ li(at, HeapNumber::kMantissaBitsInTopWord);
748     __ subu(scratch2, at, dst1);
749     __ sllv(scratch2, int_scratch, scratch2);
750     __ Or(dst2, dst2, scratch2);
751     // Set dst1 to 0.
752     __ mov(dst1, zero_reg);
753   }
754   __ bind(&done);
755 }
756 
757 
LoadNumberAsInt32Double(MacroAssembler * masm,Register object,Destination destination,DoubleRegister double_dst,Register dst1,Register dst2,Register heap_number_map,Register scratch1,Register scratch2,FPURegister single_scratch,Label * not_int32)758 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
759                                                   Register object,
760                                                   Destination destination,
761                                                   DoubleRegister double_dst,
762                                                   Register dst1,
763                                                   Register dst2,
764                                                   Register heap_number_map,
765                                                   Register scratch1,
766                                                   Register scratch2,
767                                                   FPURegister single_scratch,
768                                                   Label* not_int32) {
769   ASSERT(!scratch1.is(object) && !scratch2.is(object));
770   ASSERT(!scratch1.is(scratch2));
771   ASSERT(!heap_number_map.is(object) &&
772          !heap_number_map.is(scratch1) &&
773          !heap_number_map.is(scratch2));
774 
775   Label done, obj_is_not_smi;
776 
777   __ JumpIfNotSmi(object, &obj_is_not_smi);
778   __ SmiUntag(scratch1, object);
779   ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
780                      scratch2, single_scratch);
781   __ Branch(&done);
782 
783   __ bind(&obj_is_not_smi);
784   if (FLAG_debug_code) {
785     __ AbortIfNotRootValue(heap_number_map,
786                            Heap::kHeapNumberMapRootIndex,
787                            "HeapNumberMap register clobbered.");
788   }
789   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
790 
791   // Load the number.
792   if (CpuFeatures::IsSupported(FPU)) {
793     CpuFeatures::Scope scope(FPU);
794     // Load the double value.
795     __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
796 
797     Register except_flag = scratch2;
798     __ EmitFPUTruncate(kRoundToZero,
799                        single_scratch,
800                        double_dst,
801                        scratch1,
802                        except_flag,
803                        kCheckForInexactConversion);
804 
805     // Jump to not_int32 if the operation did not succeed.
806     __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
807 
808     if (destination == kCoreRegisters) {
809       __ Move(dst1, dst2, double_dst);
810     }
811 
812   } else {
813     ASSERT(!scratch1.is(object) && !scratch2.is(object));
814     // Load the double value in the destination registers.
815     __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
816     __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
817 
818     // Check for 0 and -0.
819     __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
820     __ Or(scratch1, scratch1, Operand(dst2));
821     __ Branch(&done, eq, scratch1, Operand(zero_reg));
822 
823     // Check that the value can be exactly represented by a 32-bit integer.
824     // Jump to not_int32 if that's not the case.
825     DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
826 
827     // dst1 and dst2 were trashed. Reload the double value.
828     __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
829     __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
830   }
831 
832   __ bind(&done);
833 }
834 
835 
LoadNumberAsInt32(MacroAssembler * masm,Register object,Register dst,Register heap_number_map,Register scratch1,Register scratch2,Register scratch3,DoubleRegister double_scratch,Label * not_int32)836 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
837                                             Register object,
838                                             Register dst,
839                                             Register heap_number_map,
840                                             Register scratch1,
841                                             Register scratch2,
842                                             Register scratch3,
843                                             DoubleRegister double_scratch,
844                                             Label* not_int32) {
845   ASSERT(!dst.is(object));
846   ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
847   ASSERT(!scratch1.is(scratch2) &&
848          !scratch1.is(scratch3) &&
849          !scratch2.is(scratch3));
850 
851   Label done;
852 
853   __ UntagAndJumpIfSmi(dst, object, &done);
854 
855   if (FLAG_debug_code) {
856     __ AbortIfNotRootValue(heap_number_map,
857                            Heap::kHeapNumberMapRootIndex,
858                            "HeapNumberMap register clobbered.");
859   }
860   __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
861 
862   // Object is a heap number.
863   // Convert the floating point value to a 32-bit integer.
864   if (CpuFeatures::IsSupported(FPU)) {
865     CpuFeatures::Scope scope(FPU);
866     // Load the double value.
867     __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
868 
869     FPURegister single_scratch = double_scratch.low();
870     Register except_flag = scratch2;
871     __ EmitFPUTruncate(kRoundToZero,
872                        single_scratch,
873                        double_scratch,
874                        scratch1,
875                        except_flag,
876                        kCheckForInexactConversion);
877 
878     // Jump to not_int32 if the operation did not succeed.
879     __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
880     // Get the result in the destination register.
881     __ mfc1(dst, single_scratch);
882 
883   } else {
884     // Load the double value in the destination registers.
885     __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
886     __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
887 
888     // Check for 0 and -0.
889     __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
890     __ Or(dst, scratch2, Operand(dst));
891     __ Branch(&done, eq, dst, Operand(zero_reg));
892 
893     DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
894 
895     // Registers state after DoubleIs32BitInteger.
896     // dst: mantissa[51:20].
897     // scratch2: 1
898 
899     // Shift back the higher bits of the mantissa.
900     __ srlv(dst, dst, scratch3);
901     // Set the implicit first bit.
902     __ li(at, 32);
903     __ subu(scratch3, at, scratch3);
904     __ sllv(scratch2, scratch2, scratch3);
905     __ Or(dst, dst, scratch2);
906     // Set the sign.
907     __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
908     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
909     Label skip_sub;
910     __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
911     __ Subu(dst, zero_reg, dst);
912     __ bind(&skip_sub);
913   }
914 
915   __ bind(&done);
916 }
917 
918 
DoubleIs32BitInteger(MacroAssembler * masm,Register src1,Register src2,Register dst,Register scratch,Label * not_int32)919 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
920                                                Register src1,
921                                                Register src2,
922                                                Register dst,
923                                                Register scratch,
924                                                Label* not_int32) {
925   // Get exponent alone in scratch.
926   __ Ext(scratch,
927          src1,
928          HeapNumber::kExponentShift,
929          HeapNumber::kExponentBits);
930 
931   // Substract the bias from the exponent.
932   __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
933 
934   // src1: higher (exponent) part of the double value.
935   // src2: lower (mantissa) part of the double value.
936   // scratch: unbiased exponent.
937 
938   // Fast cases. Check for obvious non 32-bit integer values.
939   // Negative exponent cannot yield 32-bit integers.
940   __ Branch(not_int32, lt, scratch, Operand(zero_reg));
941   // Exponent greater than 31 cannot yield 32-bit integers.
942   // Also, a positive value with an exponent equal to 31 is outside of the
943   // signed 32-bit integer range.
944   // Another way to put it is that if (exponent - signbit) > 30 then the
945   // number cannot be represented as an int32.
946   Register tmp = dst;
947   __ srl(at, src1, 31);
948   __ subu(tmp, scratch, at);
949   __ Branch(not_int32, gt, tmp, Operand(30));
950   // - Bits [21:0] in the mantissa are not null.
951   __ And(tmp, src2, 0x3fffff);
952   __ Branch(not_int32, ne, tmp, Operand(zero_reg));
953 
954   // Otherwise the exponent needs to be big enough to shift left all the
955   // non zero bits left. So we need the (30 - exponent) last bits of the
956   // 31 higher bits of the mantissa to be null.
957   // Because bits [21:0] are null, we can check instead that the
958   // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
959 
960   // Get the 32 higher bits of the mantissa in dst.
961   __ Ext(dst,
962          src2,
963          HeapNumber::kMantissaBitsInTopWord,
964          32 - HeapNumber::kMantissaBitsInTopWord);
965   __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
966   __ or_(dst, dst, at);
967 
968   // Create the mask and test the lower bits (of the higher bits).
969   __ li(at, 32);
970   __ subu(scratch, at, scratch);
971   __ li(src2, 1);
972   __ sllv(src1, src2, scratch);
973   __ Subu(src1, src1, Operand(1));
974   __ And(src1, dst, src1);
975   __ Branch(not_int32, ne, src1, Operand(zero_reg));
976 }
977 
978 
CallCCodeForDoubleOperation(MacroAssembler * masm,Token::Value op,Register heap_number_result,Register scratch)979 void FloatingPointHelper::CallCCodeForDoubleOperation(
980     MacroAssembler* masm,
981     Token::Value op,
982     Register heap_number_result,
983     Register scratch) {
984   // Using core registers:
985   // a0: Left value (least significant part of mantissa).
986   // a1: Left value (sign, exponent, top of mantissa).
987   // a2: Right value (least significant part of mantissa).
988   // a3: Right value (sign, exponent, top of mantissa).
989 
990   // Assert that heap_number_result is saved.
991   // We currently always use s0 to pass it.
992   ASSERT(heap_number_result.is(s0));
993 
994   // Push the current return address before the C call.
995   __ push(ra);
996   __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
997   if (!IsMipsSoftFloatABI) {
998     CpuFeatures::Scope scope(FPU);
999     // We are not using MIPS FPU instructions, and parameters for the runtime
1000     // function call are prepaired in a0-a3 registers, but function we are
1001     // calling is compiled with hard-float flag and expecting hard float ABI
1002     // (parameters in f12/f14 registers). We need to copy parameters from
1003     // a0-a3 registers to f12/f14 register pairs.
1004     __ Move(f12, a0, a1);
1005     __ Move(f14, a2, a3);
1006   }
1007   {
1008     AllowExternalCallThatCantCauseGC scope(masm);
1009     __ CallCFunction(
1010         ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1011   }
1012   // Store answer in the overwritable heap number.
1013   if (!IsMipsSoftFloatABI) {
1014     CpuFeatures::Scope scope(FPU);
1015     // Double returned in register f0.
1016     __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1017   } else {
1018     // Double returned in registers v0 and v1.
1019     __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset));
1020     __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset));
1021   }
1022   // Place heap_number_result in v0 and return to the pushed return address.
1023   __ pop(ra);
1024   __ Ret(USE_DELAY_SLOT);
1025   __ mov(v0, heap_number_result);
1026 }
1027 
1028 
IsPregenerated()1029 bool WriteInt32ToHeapNumberStub::IsPregenerated() {
1030   // These variants are compiled ahead of time.  See next method.
1031   if (the_int_.is(a1) &&
1032       the_heap_number_.is(v0) &&
1033       scratch_.is(a2) &&
1034       sign_.is(a3)) {
1035     return true;
1036   }
1037   if (the_int_.is(a2) &&
1038       the_heap_number_.is(v0) &&
1039       scratch_.is(a3) &&
1040       sign_.is(a0)) {
1041     return true;
1042   }
1043   // Other register combinations are generated as and when they are needed,
1044   // so it is unsafe to call them from stubs (we can't generate a stub while
1045   // we are generating a stub).
1046   return false;
1047 }
1048 
1049 
GenerateFixedRegStubsAheadOfTime()1050 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
1051   WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1052   WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1053   stub1.GetCode()->set_is_pregenerated(true);
1054   stub2.GetCode()->set_is_pregenerated(true);
1055 }
1056 
1057 
1058 // See comment for class, this does NOT work for int32's that are in Smi range.
Generate(MacroAssembler * masm)1059 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
1060   Label max_negative_int;
1061   // the_int_ has the answer which is a signed int32 but not a Smi.
1062   // We test for the special value that has a different exponent.
1063   STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
1064   // Test sign, and save for later conditionals.
1065   __ And(sign_, the_int_, Operand(0x80000000u));
1066   __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
1067 
1068   // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
1069   // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
1070   uint32_t non_smi_exponent =
1071       (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
1072   __ li(scratch_, Operand(non_smi_exponent));
1073   // Set the sign bit in scratch_ if the value was negative.
1074   __ or_(scratch_, scratch_, sign_);
1075   // Subtract from 0 if the value was negative.
1076   __ subu(at, zero_reg, the_int_);
1077   __ Movn(the_int_, at, sign_);
1078   // We should be masking the implict first digit of the mantissa away here,
1079   // but it just ends up combining harmlessly with the last digit of the
1080   // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
1081   // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
1082   ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
1083   const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1084   __ srl(at, the_int_, shift_distance);
1085   __ or_(scratch_, scratch_, at);
1086   __ sw(scratch_, FieldMemOperand(the_heap_number_,
1087                                    HeapNumber::kExponentOffset));
1088   __ sll(scratch_, the_int_, 32 - shift_distance);
1089   __ sw(scratch_, FieldMemOperand(the_heap_number_,
1090                                    HeapNumber::kMantissaOffset));
1091   __ Ret();
1092 
1093   __ bind(&max_negative_int);
1094   // The max negative int32 is stored as a positive number in the mantissa of
1095   // a double because it uses a sign bit instead of using two's complement.
1096   // The actual mantissa bits stored are all 0 because the implicit most
1097   // significant 1 bit is not stored.
1098   non_smi_exponent += 1 << HeapNumber::kExponentShift;
1099   __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
1100   __ sw(scratch_,
1101         FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
1102   __ mov(scratch_, zero_reg);
1103   __ sw(scratch_,
1104         FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
1105   __ Ret();
1106 }
1107 
1108 
1109 // Handle the case where the lhs and rhs are the same object.
1110 // Equality is almost reflexive (everything but NaN), so this is a test
1111 // for "identity and not NaN".
EmitIdenticalObjectComparison(MacroAssembler * masm,Label * slow,Condition cc,bool never_nan_nan)1112 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
1113                                           Label* slow,
1114                                           Condition cc,
1115                                           bool never_nan_nan) {
1116   Label not_identical;
1117   Label heap_number, return_equal;
1118   Register exp_mask_reg = t5;
1119 
1120   __ Branch(&not_identical, ne, a0, Operand(a1));
1121 
1122   // The two objects are identical. If we know that one of them isn't NaN then
1123   // we now know they test equal.
1124   if (cc != eq || !never_nan_nan) {
1125     __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
1126 
1127     // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
1128     // so we do the second best thing - test it ourselves.
1129     // They are both equal and they are not both Smis so both of them are not
1130     // Smis. If it's not a heap number, then return equal.
1131     if (cc == less || cc == greater) {
1132       __ GetObjectType(a0, t4, t4);
1133       __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1134     } else {
1135       __ GetObjectType(a0, t4, t4);
1136       __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
1137       // Comparing JS objects with <=, >= is complicated.
1138       if (cc != eq) {
1139       __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
1140         // Normally here we fall through to return_equal, but undefined is
1141         // special: (undefined == undefined) == true, but
1142         // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
1143         if (cc == less_equal || cc == greater_equal) {
1144           __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
1145           __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
1146           __ Branch(&return_equal, ne, a0, Operand(t2));
1147           if (cc == le) {
1148             // undefined <= undefined should fail.
1149             __ li(v0, Operand(GREATER));
1150           } else  {
1151             // undefined >= undefined should fail.
1152             __ li(v0, Operand(LESS));
1153           }
1154           __ Ret();
1155         }
1156       }
1157     }
1158   }
1159 
1160   __ bind(&return_equal);
1161 
1162   if (cc == less) {
1163     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
1164   } else if (cc == greater) {
1165     __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
1166   } else {
1167     __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
1168   }
1169   __ Ret();
1170 
1171   if (cc != eq || !never_nan_nan) {
1172     // For less and greater we don't have to check for NaN since the result of
1173     // x < x is false regardless.  For the others here is some code to check
1174     // for NaN.
1175     if (cc != lt && cc != gt) {
1176       __ bind(&heap_number);
1177       // It is a heap number, so return non-equal if it's NaN and equal if it's
1178       // not NaN.
1179 
1180       // The representation of NaN values has all exponent bits (52..62) set,
1181       // and not all mantissa bits (0..51) clear.
1182       // Read top bits of double representation (second word of value).
1183       __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
1184       // Test that exponent bits are all set.
1185       __ And(t3, t2, Operand(exp_mask_reg));
1186       // If all bits not set (ne cond), then not a NaN, objects are equal.
1187       __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
1188 
1189       // Shift out flag and all exponent bits, retaining only mantissa.
1190       __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
1191       // Or with all low-bits of mantissa.
1192       __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
1193       __ Or(v0, t3, Operand(t2));
1194       // For equal we already have the right value in v0:  Return zero (equal)
1195       // if all bits in mantissa are zero (it's an Infinity) and non-zero if
1196       // not (it's a NaN).  For <= and >= we need to load v0 with the failing
1197       // value if it's a NaN.
1198       if (cc != eq) {
1199         // All-zero means Infinity means equal.
1200         __ Ret(eq, v0, Operand(zero_reg));
1201         if (cc == le) {
1202           __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
1203         } else {
1204           __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
1205         }
1206       }
1207       __ Ret();
1208     }
1209     // No fall through here.
1210   }
1211 
1212   __ bind(&not_identical);
1213 }
1214 
1215 
EmitSmiNonsmiComparison(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * slow,bool strict)1216 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
1217                                     Register lhs,
1218                                     Register rhs,
1219                                     Label* both_loaded_as_doubles,
1220                                     Label* slow,
1221                                     bool strict) {
1222   ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1223          (lhs.is(a1) && rhs.is(a0)));
1224 
1225   Label lhs_is_smi;
1226   __ JumpIfSmi(lhs, &lhs_is_smi);
1227   // Rhs is a Smi.
1228   // Check whether the non-smi is a heap number.
1229   __ GetObjectType(lhs, t4, t4);
1230   if (strict) {
1231     // If lhs was not a number and rhs was a Smi then strict equality cannot
1232     // succeed. Return non-equal (lhs is already not zero).
1233     __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1234     __ mov(v0, lhs);
1235   } else {
1236     // Smi compared non-strictly with a non-Smi non-heap-number. Call
1237     // the runtime.
1238     __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1239   }
1240 
1241   // Rhs is a smi, lhs is a number.
1242   // Convert smi rhs to double.
1243   if (CpuFeatures::IsSupported(FPU)) {
1244     CpuFeatures::Scope scope(FPU);
1245     __ sra(at, rhs, kSmiTagSize);
1246     __ mtc1(at, f14);
1247     __ cvt_d_w(f14, f14);
1248     __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1249   } else {
1250     // Load lhs to a double in a2, a3.
1251     __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1252     __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1253 
1254     // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
1255     __ mov(t6, rhs);
1256     ConvertToDoubleStub stub1(a1, a0, t6, t5);
1257     __ push(ra);
1258     __ Call(stub1.GetCode());
1259 
1260     __ pop(ra);
1261   }
1262 
1263   // We now have both loaded as doubles.
1264   __ jmp(both_loaded_as_doubles);
1265 
1266   __ bind(&lhs_is_smi);
1267   // Lhs is a Smi.  Check whether the non-smi is a heap number.
1268   __ GetObjectType(rhs, t4, t4);
1269   if (strict) {
1270     // If lhs was not a number and rhs was a Smi then strict equality cannot
1271     // succeed. Return non-equal.
1272     __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
1273     __ li(v0, Operand(1));
1274   } else {
1275     // Smi compared non-strictly with a non-Smi non-heap-number. Call
1276     // the runtime.
1277     __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
1278   }
1279 
1280   // Lhs is a smi, rhs is a number.
1281   // Convert smi lhs to double.
1282   if (CpuFeatures::IsSupported(FPU)) {
1283     CpuFeatures::Scope scope(FPU);
1284     __ sra(at, lhs, kSmiTagSize);
1285     __ mtc1(at, f12);
1286     __ cvt_d_w(f12, f12);
1287     __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1288   } else {
1289     // Convert lhs to a double format. t5 is scratch.
1290     __ mov(t6, lhs);
1291     ConvertToDoubleStub stub2(a3, a2, t6, t5);
1292     __ push(ra);
1293     __ Call(stub2.GetCode());
1294     __ pop(ra);
1295     // Load rhs to a double in a1, a0.
1296     if (rhs.is(a0)) {
1297       __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1298       __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1299     } else {
1300       __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1301       __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1302     }
1303   }
1304   // Fall through to both_loaded_as_doubles.
1305 }
1306 
1307 
EmitNanCheck(MacroAssembler * masm,Condition cc)1308 void EmitNanCheck(MacroAssembler* masm, Condition cc) {
1309   bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1310   if (CpuFeatures::IsSupported(FPU)) {
1311     CpuFeatures::Scope scope(FPU);
1312     // Lhs and rhs are already loaded to f12 and f14 register pairs.
1313     __ Move(t0, t1, f14);
1314     __ Move(t2, t3, f12);
1315   } else {
1316     // Lhs and rhs are already loaded to GP registers.
1317     __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
1318     __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
1319     __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
1320     __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
1321   }
1322   Register rhs_exponent = exp_first ? t0 : t1;
1323   Register lhs_exponent = exp_first ? t2 : t3;
1324   Register rhs_mantissa = exp_first ? t1 : t0;
1325   Register lhs_mantissa = exp_first ? t3 : t2;
1326   Label one_is_nan, neither_is_nan;
1327   Label lhs_not_nan_exp_mask_is_loaded;
1328 
1329   Register exp_mask_reg = t4;
1330   __ li(exp_mask_reg, HeapNumber::kExponentMask);
1331   __ and_(t5, lhs_exponent, exp_mask_reg);
1332   __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
1333 
1334   __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1335   __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1336 
1337   __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
1338 
1339   __ li(exp_mask_reg, HeapNumber::kExponentMask);
1340   __ bind(&lhs_not_nan_exp_mask_is_loaded);
1341   __ and_(t5, rhs_exponent, exp_mask_reg);
1342 
1343   __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
1344 
1345   __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
1346   __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
1347 
1348   __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
1349 
1350   __ bind(&one_is_nan);
1351   // NaN comparisons always fail.
1352   // Load whatever we need in v0 to make the comparison fail.
1353 
1354   if (cc == lt || cc == le) {
1355     __ li(v0, Operand(GREATER));
1356   } else {
1357     __ li(v0, Operand(LESS));
1358   }
1359   __ Ret();
1360 
1361   __ bind(&neither_is_nan);
1362 }
1363 
1364 
EmitTwoNonNanDoubleComparison(MacroAssembler * masm,Condition cc)1365 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
1366   // f12 and f14 have the two doubles.  Neither is a NaN.
1367   // Call a native function to do a comparison between two non-NaNs.
1368   // Call C routine that may not cause GC or other trouble.
1369   // We use a call_was and return manually because we need arguments slots to
1370   // be freed.
1371 
1372   Label return_result_not_equal, return_result_equal;
1373   if (cc == eq) {
1374     // Doubles are not equal unless they have the same bit pattern.
1375     // Exception: 0 and -0.
1376     bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
1377     if (CpuFeatures::IsSupported(FPU)) {
1378       CpuFeatures::Scope scope(FPU);
1379       // Lhs and rhs are already loaded to f12 and f14 register pairs.
1380       __ Move(t0, t1, f14);
1381       __ Move(t2, t3, f12);
1382     } else {
1383       // Lhs and rhs are already loaded to GP registers.
1384       __ mov(t0, a0);  // a0 has LS 32 bits of rhs.
1385       __ mov(t1, a1);  // a1 has MS 32 bits of rhs.
1386       __ mov(t2, a2);  // a2 has LS 32 bits of lhs.
1387       __ mov(t3, a3);  // a3 has MS 32 bits of lhs.
1388     }
1389     Register rhs_exponent = exp_first ? t0 : t1;
1390     Register lhs_exponent = exp_first ? t2 : t3;
1391     Register rhs_mantissa = exp_first ? t1 : t0;
1392     Register lhs_mantissa = exp_first ? t3 : t2;
1393 
1394     __ xor_(v0, rhs_mantissa, lhs_mantissa);
1395     __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
1396 
1397     __ subu(v0, rhs_exponent, lhs_exponent);
1398     __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
1399     // 0, -0 case.
1400     __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
1401     __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
1402     __ or_(t4, rhs_exponent, lhs_exponent);
1403     __ or_(t4, t4, rhs_mantissa);
1404 
1405     __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
1406 
1407     __ bind(&return_result_equal);
1408 
1409     __ li(v0, Operand(EQUAL));
1410     __ Ret();
1411   }
1412 
1413   __ bind(&return_result_not_equal);
1414 
1415   if (!CpuFeatures::IsSupported(FPU)) {
1416     __ push(ra);
1417     __ PrepareCallCFunction(0, 2, t4);
1418     if (!IsMipsSoftFloatABI) {
1419       // We are not using MIPS FPU instructions, and parameters for the runtime
1420       // function call are prepaired in a0-a3 registers, but function we are
1421       // calling is compiled with hard-float flag and expecting hard float ABI
1422       // (parameters in f12/f14 registers). We need to copy parameters from
1423       // a0-a3 registers to f12/f14 register pairs.
1424       __ Move(f12, a0, a1);
1425       __ Move(f14, a2, a3);
1426     }
1427 
1428     AllowExternalCallThatCantCauseGC scope(masm);
1429     __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
1430        0, 2);
1431     __ pop(ra);  // Because this function returns int, result is in v0.
1432     __ Ret();
1433   } else {
1434     CpuFeatures::Scope scope(FPU);
1435     Label equal, less_than;
1436     __ BranchF(&equal, NULL, eq, f12, f14);
1437     __ BranchF(&less_than, NULL, lt, f12, f14);
1438 
1439     // Not equal, not less, not NaN, must be greater.
1440 
1441     __ li(v0, Operand(GREATER));
1442     __ Ret();
1443 
1444     __ bind(&equal);
1445     __ li(v0, Operand(EQUAL));
1446     __ Ret();
1447 
1448     __ bind(&less_than);
1449     __ li(v0, Operand(LESS));
1450     __ Ret();
1451   }
1452 }
1453 
1454 
EmitStrictTwoHeapObjectCompare(MacroAssembler * masm,Register lhs,Register rhs)1455 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
1456                                            Register lhs,
1457                                            Register rhs) {
1458     // If either operand is a JS object or an oddball value, then they are
1459     // not equal since their pointers are different.
1460     // There is no test for undetectability in strict equality.
1461     STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
1462     Label first_non_object;
1463     // Get the type of the first operand into a2 and compare it with
1464     // FIRST_SPEC_OBJECT_TYPE.
1465     __ GetObjectType(lhs, a2, a2);
1466     __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1467 
1468     // Return non-zero.
1469     Label return_not_equal;
1470     __ bind(&return_not_equal);
1471     __ Ret(USE_DELAY_SLOT);
1472     __ li(v0, Operand(1));
1473 
1474     __ bind(&first_non_object);
1475     // Check for oddballs: true, false, null, undefined.
1476     __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
1477 
1478     __ GetObjectType(rhs, a3, a3);
1479     __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1480 
1481     // Check for oddballs: true, false, null, undefined.
1482     __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1483 
1484     // Now that we have the types we might as well check for symbol-symbol.
1485     // Ensure that no non-strings have the symbol bit set.
1486     STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
1487     STATIC_ASSERT(kSymbolTag != 0);
1488     __ And(t2, a2, Operand(a3));
1489     __ And(t0, t2, Operand(kIsSymbolMask));
1490     __ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
1491 }
1492 
1493 
EmitCheckForTwoHeapNumbers(MacroAssembler * masm,Register lhs,Register rhs,Label * both_loaded_as_doubles,Label * not_heap_numbers,Label * slow)1494 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
1495                                        Register lhs,
1496                                        Register rhs,
1497                                        Label* both_loaded_as_doubles,
1498                                        Label* not_heap_numbers,
1499                                        Label* slow) {
1500   __ GetObjectType(lhs, a3, a2);
1501   __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
1502   __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
1503   // If first was a heap number & second wasn't, go to slow case.
1504   __ Branch(slow, ne, a3, Operand(a2));
1505 
1506   // Both are heap numbers. Load them up then jump to the code we have
1507   // for that.
1508   if (CpuFeatures::IsSupported(FPU)) {
1509     CpuFeatures::Scope scope(FPU);
1510     __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1511     __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1512   } else {
1513     __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
1514     __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1515     if (rhs.is(a0)) {
1516       __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1517       __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1518     } else {
1519       __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
1520       __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
1521     }
1522   }
1523   __ jmp(both_loaded_as_doubles);
1524 }
1525 
1526 
1527 // Fast negative check for symbol-to-symbol equality.
EmitCheckForSymbolsOrObjects(MacroAssembler * masm,Register lhs,Register rhs,Label * possible_strings,Label * not_both_strings)1528 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
1529                                          Register lhs,
1530                                          Register rhs,
1531                                          Label* possible_strings,
1532                                          Label* not_both_strings) {
1533   ASSERT((lhs.is(a0) && rhs.is(a1)) ||
1534          (lhs.is(a1) && rhs.is(a0)));
1535 
1536   // a2 is object type of lhs.
1537   // Ensure that no non-strings have the symbol bit set.
1538   Label object_test;
1539   STATIC_ASSERT(kSymbolTag != 0);
1540   __ And(at, a2, Operand(kIsNotStringMask));
1541   __ Branch(&object_test, ne, at, Operand(zero_reg));
1542   __ And(at, a2, Operand(kIsSymbolMask));
1543   __ Branch(possible_strings, eq, at, Operand(zero_reg));
1544   __ GetObjectType(rhs, a3, a3);
1545   __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1546   __ And(at, a3, Operand(kIsSymbolMask));
1547   __ Branch(possible_strings, eq, at, Operand(zero_reg));
1548 
1549   // Both are symbols. We already checked they weren't the same pointer
1550   // so they are not equal.
1551   __ Ret(USE_DELAY_SLOT);
1552   __ li(v0, Operand(1));   // Non-zero indicates not equal.
1553 
1554   __ bind(&object_test);
1555   __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
1556   __ GetObjectType(rhs, a2, a3);
1557   __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1558 
1559   // If both objects are undetectable, they are equal.  Otherwise, they
1560   // are not equal, since they are different objects and an object is not
1561   // equal to undefined.
1562   __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1563   __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1564   __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1565   __ and_(a0, a2, a3);
1566   __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1567   __ Ret(USE_DELAY_SLOT);
1568   __ xori(v0, a0, 1 << Map::kIsUndetectable);
1569 }
1570 
1571 
GenerateLookupNumberStringCache(MacroAssembler * masm,Register object,Register result,Register scratch1,Register scratch2,Register scratch3,bool object_is_smi,Label * not_found)1572 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
1573                                                          Register object,
1574                                                          Register result,
1575                                                          Register scratch1,
1576                                                          Register scratch2,
1577                                                          Register scratch3,
1578                                                          bool object_is_smi,
1579                                                          Label* not_found) {
1580   // Use of registers. Register result is used as a temporary.
1581   Register number_string_cache = result;
1582   Register mask = scratch3;
1583 
1584   // Load the number string cache.
1585   __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1586 
1587   // Make the hash mask from the length of the number string cache. It
1588   // contains two elements (number and string) for each cache entry.
1589   __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1590   // Divide length by two (length is a smi).
1591   __ sra(mask, mask, kSmiTagSize + 1);
1592   __ Addu(mask, mask, -1);  // Make mask.
1593 
1594   // Calculate the entry in the number string cache. The hash value in the
1595   // number string cache for smis is just the smi value, and the hash for
1596   // doubles is the xor of the upper and lower words. See
1597   // Heap::GetNumberStringCache.
1598   Isolate* isolate = masm->isolate();
1599   Label is_smi;
1600   Label load_result_from_cache;
1601   if (!object_is_smi) {
1602     __ JumpIfSmi(object, &is_smi);
1603     if (CpuFeatures::IsSupported(FPU)) {
1604       CpuFeatures::Scope scope(FPU);
1605       __ CheckMap(object,
1606                   scratch1,
1607                   Heap::kHeapNumberMapRootIndex,
1608                   not_found,
1609                   DONT_DO_SMI_CHECK);
1610 
1611       STATIC_ASSERT(8 == kDoubleSize);
1612       __ Addu(scratch1,
1613               object,
1614               Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1615       __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1616       __ lw(scratch1, MemOperand(scratch1, 0));
1617       __ Xor(scratch1, scratch1, Operand(scratch2));
1618       __ And(scratch1, scratch1, Operand(mask));
1619 
1620       // Calculate address of entry in string cache: each entry consists
1621       // of two pointer sized fields.
1622       __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1623       __ Addu(scratch1, number_string_cache, scratch1);
1624 
1625       Register probe = mask;
1626       __ lw(probe,
1627              FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1628       __ JumpIfSmi(probe, not_found);
1629       __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1630       __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1631       __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
1632       __ Branch(not_found);
1633     } else {
1634       // Note that there is no cache check for non-FPU case, even though
1635       // it seems there could be. May be a tiny opimization for non-FPU
1636       // cores.
1637       __ Branch(not_found);
1638     }
1639   }
1640 
1641   __ bind(&is_smi);
1642   Register scratch = scratch1;
1643   __ sra(scratch, object, 1);   // Shift away the tag.
1644   __ And(scratch, mask, Operand(scratch));
1645 
1646   // Calculate address of entry in string cache: each entry consists
1647   // of two pointer sized fields.
1648   __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1649   __ Addu(scratch, number_string_cache, scratch);
1650 
1651   // Check if the entry is the smi we are looking for.
1652   Register probe = mask;
1653   __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1654   __ Branch(not_found, ne, object, Operand(probe));
1655 
1656   // Get the result from the cache.
1657   __ bind(&load_result_from_cache);
1658   __ lw(result,
1659          FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1660 
1661   __ IncrementCounter(isolate->counters()->number_to_string_native(),
1662                       1,
1663                       scratch1,
1664                       scratch2);
1665 }
1666 
1667 
Generate(MacroAssembler * masm)1668 void NumberToStringStub::Generate(MacroAssembler* masm) {
1669   Label runtime;
1670 
1671   __ lw(a1, MemOperand(sp, 0));
1672 
1673   // Generate code to lookup number in the number string cache.
1674   GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1675   __ DropAndRet(1);
1676 
1677   __ bind(&runtime);
1678   // Handle number to string in the runtime system if not found in the cache.
1679   __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1680 }
1681 
1682 
1683 // On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
1684 // On exit, v0 is 0, positive, or negative (smi) to indicate the result
1685 // of the comparison.
Generate(MacroAssembler * masm)1686 void CompareStub::Generate(MacroAssembler* masm) {
1687   Label slow;  // Call builtin.
1688   Label not_smis, both_loaded_as_doubles;
1689 
1690 
1691   if (include_smi_compare_) {
1692     Label not_two_smis, smi_done;
1693     __ Or(a2, a1, a0);
1694     __ JumpIfNotSmi(a2, &not_two_smis);
1695     __ sra(a1, a1, 1);
1696     __ sra(a0, a0, 1);
1697     __ Ret(USE_DELAY_SLOT);
1698     __ subu(v0, a1, a0);
1699     __ bind(&not_two_smis);
1700   } else if (FLAG_debug_code) {
1701     __ Or(a2, a1, a0);
1702     __ And(a2, a2, kSmiTagMask);
1703     __ Assert(ne, "CompareStub: unexpected smi operands.",
1704         a2, Operand(zero_reg));
1705   }
1706 
1707 
1708   // NOTICE! This code is only reached after a smi-fast-case check, so
1709   // it is certain that at least one operand isn't a smi.
1710 
1711   // Handle the case where the objects are identical.  Either returns the answer
1712   // or goes to slow.  Only falls through if the objects were not identical.
1713   EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
1714 
1715   // If either is a Smi (we know that not both are), then they can only
1716   // be strictly equal if the other is a HeapNumber.
1717   STATIC_ASSERT(kSmiTag == 0);
1718   ASSERT_EQ(0, Smi::FromInt(0));
1719   __ And(t2, lhs_, Operand(rhs_));
1720   __ JumpIfNotSmi(t2, &not_smis, t0);
1721   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1722   // 1) Return the answer.
1723   // 2) Go to slow.
1724   // 3) Fall through to both_loaded_as_doubles.
1725   // 4) Jump to rhs_not_nan.
1726   // In cases 3 and 4 we have found out we were dealing with a number-number
1727   // comparison and the numbers have been loaded into f12 and f14 as doubles,
1728   // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1729   EmitSmiNonsmiComparison(masm, lhs_, rhs_,
1730                           &both_loaded_as_doubles, &slow, strict_);
1731 
1732   __ bind(&both_loaded_as_doubles);
1733   // f12, f14 are the double representations of the left hand side
1734   // and the right hand side if we have FPU. Otherwise a2, a3 represent
1735   // left hand side and a0, a1 represent right hand side.
1736 
1737   Isolate* isolate = masm->isolate();
1738   if (CpuFeatures::IsSupported(FPU)) {
1739     CpuFeatures::Scope scope(FPU);
1740     Label nan;
1741     __ li(t0, Operand(LESS));
1742     __ li(t1, Operand(GREATER));
1743     __ li(t2, Operand(EQUAL));
1744 
1745     // Check if either rhs or lhs is NaN.
1746     __ BranchF(NULL, &nan, eq, f12, f14);
1747 
1748     // Check if LESS condition is satisfied. If true, move conditionally
1749     // result to v0.
1750     __ c(OLT, D, f12, f14);
1751     __ Movt(v0, t0);
1752     // Use previous check to store conditionally to v0 oposite condition
1753     // (GREATER). If rhs is equal to lhs, this will be corrected in next
1754     // check.
1755     __ Movf(v0, t1);
1756     // Check if EQUAL condition is satisfied. If true, move conditionally
1757     // result to v0.
1758     __ c(EQ, D, f12, f14);
1759     __ Movt(v0, t2);
1760 
1761     __ Ret();
1762 
1763     __ bind(&nan);
1764     // NaN comparisons always fail.
1765     // Load whatever we need in v0 to make the comparison fail.
1766     if (cc_ == lt || cc_ == le) {
1767       __ li(v0, Operand(GREATER));
1768     } else {
1769       __ li(v0, Operand(LESS));
1770     }
1771     __ Ret();
1772   } else {
1773     // Checks for NaN in the doubles we have loaded.  Can return the answer or
1774     // fall through if neither is a NaN.  Also binds rhs_not_nan.
1775     EmitNanCheck(masm, cc_);
1776 
1777     // Compares two doubles that are not NaNs. Returns the answer.
1778     // Never falls through.
1779     EmitTwoNonNanDoubleComparison(masm, cc_);
1780   }
1781 
1782   __ bind(&not_smis);
1783   // At this point we know we are dealing with two different objects,
1784   // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1785   if (strict_) {
1786     // This returns non-equal for some object types, or falls through if it
1787     // was not lucky.
1788     EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
1789   }
1790 
1791   Label check_for_symbols;
1792   Label flat_string_check;
1793   // Check for heap-number-heap-number comparison. Can jump to slow case,
1794   // or load both doubles and jump to the code that handles
1795   // that case. If the inputs are not doubles then jumps to check_for_symbols.
1796   // In this case a2 will contain the type of lhs_.
1797   EmitCheckForTwoHeapNumbers(masm,
1798                              lhs_,
1799                              rhs_,
1800                              &both_loaded_as_doubles,
1801                              &check_for_symbols,
1802                              &flat_string_check);
1803 
1804   __ bind(&check_for_symbols);
1805   if (cc_ == eq && !strict_) {
1806     // Returns an answer for two symbols or two detectable objects.
1807     // Otherwise jumps to string case or not both strings case.
1808     // Assumes that a2 is the type of lhs_ on entry.
1809     EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
1810   }
1811 
1812   // Check for both being sequential ASCII strings, and inline if that is the
1813   // case.
1814   __ bind(&flat_string_check);
1815 
1816   __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1817 
1818   __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1819   if (cc_ == eq) {
1820     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1821                                                      lhs_,
1822                                                      rhs_,
1823                                                      a2,
1824                                                      a3,
1825                                                      t0);
1826   } else {
1827     StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1828                                                        lhs_,
1829                                                        rhs_,
1830                                                        a2,
1831                                                        a3,
1832                                                        t0,
1833                                                        t1);
1834   }
1835   // Never falls through to here.
1836 
1837   __ bind(&slow);
1838   // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1839   // a1 (rhs) second.
1840   __ Push(lhs_, rhs_);
1841   // Figure out which native to call and setup the arguments.
1842   Builtins::JavaScript native;
1843   if (cc_ == eq) {
1844     native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1845   } else {
1846     native = Builtins::COMPARE;
1847     int ncr;  // NaN compare result.
1848     if (cc_ == lt || cc_ == le) {
1849       ncr = GREATER;
1850     } else {
1851       ASSERT(cc_ == gt || cc_ == ge);  // Remaining cases.
1852       ncr = LESS;
1853     }
1854     __ li(a0, Operand(Smi::FromInt(ncr)));
1855     __ push(a0);
1856   }
1857 
1858   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1859   // tagged as a small integer.
1860   __ InvokeBuiltin(native, JUMP_FUNCTION);
1861 }
1862 
1863 
1864 // The stub expects its argument in the tos_ register and returns its result in
1865 // it, too: zero for false, and a non-zero value for true.
Generate(MacroAssembler * masm)1866 void ToBooleanStub::Generate(MacroAssembler* masm) {
1867   // This stub uses FPU instructions.
1868   CpuFeatures::Scope scope(FPU);
1869 
1870   Label patch;
1871   const Register map = t5.is(tos_) ? t3 : t5;
1872 
1873   // undefined -> false.
1874   CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
1875 
1876   // Boolean -> its value.
1877   CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
1878   CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
1879 
1880   // 'null' -> false.
1881   CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
1882 
1883   if (types_.Contains(SMI)) {
1884     // Smis: 0 -> false, all other -> true
1885     __ And(at, tos_, kSmiTagMask);
1886     // tos_ contains the correct return value already
1887     __ Ret(eq, at, Operand(zero_reg));
1888   } else if (types_.NeedsMap()) {
1889     // If we need a map later and have a Smi -> patch.
1890     __ JumpIfSmi(tos_, &patch);
1891   }
1892 
1893   if (types_.NeedsMap()) {
1894     __ lw(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
1895 
1896     if (types_.CanBeUndetectable()) {
1897       __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1898       __ And(at, at, Operand(1 << Map::kIsUndetectable));
1899       // Undetectable -> false.
1900       __ Movn(tos_, zero_reg, at);
1901       __ Ret(ne, at, Operand(zero_reg));
1902     }
1903   }
1904 
1905   if (types_.Contains(SPEC_OBJECT)) {
1906     // Spec object -> true.
1907     __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1908     // tos_ contains the correct non-zero return value already.
1909     __ Ret(ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1910   }
1911 
1912   if (types_.Contains(STRING)) {
1913     // String value -> false iff empty.
1914     __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1915     Label skip;
1916     __ Branch(&skip, ge, at, Operand(FIRST_NONSTRING_TYPE));
1917     __ Ret(USE_DELAY_SLOT);  // the string length is OK as the return value
1918     __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset));
1919     __ bind(&skip);
1920   }
1921 
1922   if (types_.Contains(HEAP_NUMBER)) {
1923     // Heap number -> false iff +0, -0, or NaN.
1924     Label not_heap_number;
1925     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1926     __ Branch(&not_heap_number, ne, map, Operand(at));
1927     Label zero_or_nan, number;
1928     __ ldc1(f2, FieldMemOperand(tos_, HeapNumber::kValueOffset));
1929     __ BranchF(&number, &zero_or_nan, ne, f2, kDoubleRegZero);
1930     // "tos_" is a register, and contains a non zero value by default.
1931     // Hence we only need to overwrite "tos_" with zero to return false for
1932     // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
1933     __ bind(&zero_or_nan);
1934     __ mov(tos_, zero_reg);
1935     __ bind(&number);
1936     __ Ret();
1937     __ bind(&not_heap_number);
1938   }
1939 
1940   __ bind(&patch);
1941   GenerateTypeTransition(masm);
1942 }
1943 
1944 
CheckOddball(MacroAssembler * masm,Type type,Heap::RootListIndex value,bool result)1945 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
1946                                  Type type,
1947                                  Heap::RootListIndex value,
1948                                  bool result) {
1949   if (types_.Contains(type)) {
1950     // If we see an expected oddball, return its ToBoolean value tos_.
1951     __ LoadRoot(at, value);
1952     __ Subu(at, at, tos_);  // This is a check for equality for the movz below.
1953     // The value of a root is never NULL, so we can avoid loading a non-null
1954     // value into tos_ when we want to return 'true'.
1955     if (!result) {
1956       __ Movz(tos_, zero_reg, at);
1957     }
1958     __ Ret(eq, at, Operand(zero_reg));
1959   }
1960 }
1961 
1962 
GenerateTypeTransition(MacroAssembler * masm)1963 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
1964   __ Move(a3, tos_);
1965   __ li(a2, Operand(Smi::FromInt(tos_.code())));
1966   __ li(a1, Operand(Smi::FromInt(types_.ToByte())));
1967   __ Push(a3, a2, a1);
1968   // Patch the caller to an appropriate specialized stub and return the
1969   // operation result to the caller of the stub.
1970   __ TailCallExternalReference(
1971       ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1972       3,
1973       1);
1974 }
1975 
1976 
Generate(MacroAssembler * masm)1977 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1978   // We don't allow a GC during a store buffer overflow so there is no need to
1979   // store the registers in any particular way, but we do have to store and
1980   // restore them.
1981   __ MultiPush(kJSCallerSaved | ra.bit());
1982   if (save_doubles_ == kSaveFPRegs) {
1983     CpuFeatures::Scope scope(FPU);
1984     __ MultiPushFPU(kCallerSavedFPU);
1985   }
1986   const int argument_count = 1;
1987   const int fp_argument_count = 0;
1988   const Register scratch = a1;
1989 
1990   AllowExternalCallThatCantCauseGC scope(masm);
1991   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1992   __ li(a0, Operand(ExternalReference::isolate_address()));
1993   __ CallCFunction(
1994       ExternalReference::store_buffer_overflow_function(masm->isolate()),
1995       argument_count);
1996   if (save_doubles_ == kSaveFPRegs) {
1997     CpuFeatures::Scope scope(FPU);
1998     __ MultiPopFPU(kCallerSavedFPU);
1999   }
2000 
2001   __ MultiPop(kJSCallerSaved | ra.bit());
2002   __ Ret();
2003 }
2004 
2005 
PrintName(StringStream * stream)2006 void UnaryOpStub::PrintName(StringStream* stream) {
2007   const char* op_name = Token::Name(op_);
2008   const char* overwrite_name = NULL;  // Make g++ happy.
2009   switch (mode_) {
2010     case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
2011     case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
2012   }
2013   stream->Add("UnaryOpStub_%s_%s_%s",
2014               op_name,
2015               overwrite_name,
2016               UnaryOpIC::GetName(operand_type_));
2017 }
2018 
2019 
2020 // TODO(svenpanne): Use virtual functions instead of switch.
Generate(MacroAssembler * masm)2021 void UnaryOpStub::Generate(MacroAssembler* masm) {
2022   switch (operand_type_) {
2023     case UnaryOpIC::UNINITIALIZED:
2024       GenerateTypeTransition(masm);
2025       break;
2026     case UnaryOpIC::SMI:
2027       GenerateSmiStub(masm);
2028       break;
2029     case UnaryOpIC::HEAP_NUMBER:
2030       GenerateHeapNumberStub(masm);
2031       break;
2032     case UnaryOpIC::GENERIC:
2033       GenerateGenericStub(masm);
2034       break;
2035   }
2036 }
2037 
2038 
GenerateTypeTransition(MacroAssembler * masm)2039 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2040   // Argument is in a0 and v0 at this point, so we can overwrite a0.
2041   __ li(a2, Operand(Smi::FromInt(op_)));
2042   __ li(a1, Operand(Smi::FromInt(mode_)));
2043   __ li(a0, Operand(Smi::FromInt(operand_type_)));
2044   __ Push(v0, a2, a1, a0);
2045 
2046   __ TailCallExternalReference(
2047       ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
2048 }
2049 
2050 
2051 // TODO(svenpanne): Use virtual functions instead of switch.
GenerateSmiStub(MacroAssembler * masm)2052 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2053   switch (op_) {
2054     case Token::SUB:
2055       GenerateSmiStubSub(masm);
2056       break;
2057     case Token::BIT_NOT:
2058       GenerateSmiStubBitNot(masm);
2059       break;
2060     default:
2061       UNREACHABLE();
2062   }
2063 }
2064 
2065 
GenerateSmiStubSub(MacroAssembler * masm)2066 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
2067   Label non_smi, slow;
2068   GenerateSmiCodeSub(masm, &non_smi, &slow);
2069   __ bind(&non_smi);
2070   __ bind(&slow);
2071   GenerateTypeTransition(masm);
2072 }
2073 
2074 
GenerateSmiStubBitNot(MacroAssembler * masm)2075 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
2076   Label non_smi;
2077   GenerateSmiCodeBitNot(masm, &non_smi);
2078   __ bind(&non_smi);
2079   GenerateTypeTransition(masm);
2080 }
2081 
2082 
GenerateSmiCodeSub(MacroAssembler * masm,Label * non_smi,Label * slow)2083 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
2084                                      Label* non_smi,
2085                                      Label* slow) {
2086   __ JumpIfNotSmi(a0, non_smi);
2087 
2088   // The result of negating zero or the smallest negative smi is not a smi.
2089   __ And(t0, a0, ~0x80000000);
2090   __ Branch(slow, eq, t0, Operand(zero_reg));
2091 
2092   // Return '0 - value'.
2093   __ Ret(USE_DELAY_SLOT);
2094   __ subu(v0, zero_reg, a0);
2095 }
2096 
2097 
GenerateSmiCodeBitNot(MacroAssembler * masm,Label * non_smi)2098 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
2099                                         Label* non_smi) {
2100   __ JumpIfNotSmi(a0, non_smi);
2101 
2102   // Flip bits and revert inverted smi-tag.
2103   __ Neg(v0, a0);
2104   __ And(v0, v0, ~kSmiTagMask);
2105   __ Ret();
2106 }
2107 
2108 
2109 // TODO(svenpanne): Use virtual functions instead of switch.
GenerateHeapNumberStub(MacroAssembler * masm)2110 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2111   switch (op_) {
2112     case Token::SUB:
2113       GenerateHeapNumberStubSub(masm);
2114       break;
2115     case Token::BIT_NOT:
2116       GenerateHeapNumberStubBitNot(masm);
2117       break;
2118     default:
2119       UNREACHABLE();
2120   }
2121 }
2122 
2123 
GenerateHeapNumberStubSub(MacroAssembler * masm)2124 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
2125   Label non_smi, slow, call_builtin;
2126   GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
2127   __ bind(&non_smi);
2128   GenerateHeapNumberCodeSub(masm, &slow);
2129   __ bind(&slow);
2130   GenerateTypeTransition(masm);
2131   __ bind(&call_builtin);
2132   GenerateGenericCodeFallback(masm);
2133 }
2134 
2135 
GenerateHeapNumberStubBitNot(MacroAssembler * masm)2136 void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
2137   Label non_smi, slow;
2138   GenerateSmiCodeBitNot(masm, &non_smi);
2139   __ bind(&non_smi);
2140   GenerateHeapNumberCodeBitNot(masm, &slow);
2141   __ bind(&slow);
2142   GenerateTypeTransition(masm);
2143 }
2144 
2145 
GenerateHeapNumberCodeSub(MacroAssembler * masm,Label * slow)2146 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
2147                                             Label* slow) {
2148   EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2149   // a0 is a heap number.  Get a new heap number in a1.
2150   if (mode_ == UNARY_OVERWRITE) {
2151     __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2152     __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
2153     __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2154   } else {
2155     Label slow_allocate_heapnumber, heapnumber_allocated;
2156     __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2157     __ jmp(&heapnumber_allocated);
2158 
2159     __ bind(&slow_allocate_heapnumber);
2160     {
2161       FrameScope scope(masm, StackFrame::INTERNAL);
2162       __ push(a0);
2163       __ CallRuntime(Runtime::kNumberAlloc, 0);
2164       __ mov(a1, v0);
2165       __ pop(a0);
2166     }
2167 
2168     __ bind(&heapnumber_allocated);
2169     __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
2170     __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
2171     __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
2172     __ Xor(a2, a2, Operand(HeapNumber::kSignMask));  // Flip sign.
2173     __ sw(a2, FieldMemOperand(a1, HeapNumber::kExponentOffset));
2174     __ mov(v0, a1);
2175   }
2176   __ Ret();
2177 }
2178 
2179 
GenerateHeapNumberCodeBitNot(MacroAssembler * masm,Label * slow)2180 void UnaryOpStub::GenerateHeapNumberCodeBitNot(
2181     MacroAssembler* masm,
2182     Label* slow) {
2183   Label impossible;
2184 
2185   EmitCheckForHeapNumber(masm, a0, a1, t2, slow);
2186   // Convert the heap number in a0 to an untagged integer in a1.
2187   __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2188 
2189   // Do the bitwise operation and check if the result fits in a smi.
2190   Label try_float;
2191   __ Neg(a1, a1);
2192   __ Addu(a2, a1, Operand(0x40000000));
2193   __ Branch(&try_float, lt, a2, Operand(zero_reg));
2194 
2195   // Tag the result as a smi and we're done.
2196   __ SmiTag(v0, a1);
2197   __ Ret();
2198 
2199   // Try to store the result in a heap number.
2200   __ bind(&try_float);
2201   if (mode_ == UNARY_NO_OVERWRITE) {
2202     Label slow_allocate_heapnumber, heapnumber_allocated;
2203     // Allocate a new heap number without zapping v0, which we need if it fails.
2204     __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
2205     __ jmp(&heapnumber_allocated);
2206 
2207     __ bind(&slow_allocate_heapnumber);
2208     {
2209       FrameScope scope(masm, StackFrame::INTERNAL);
2210       __ push(v0);  // Push the heap number, not the untagged int32.
2211       __ CallRuntime(Runtime::kNumberAlloc, 0);
2212       __ mov(a2, v0);  // Move the new heap number into a2.
2213       // Get the heap number into v0, now that the new heap number is in a2.
2214       __ pop(v0);
2215     }
2216 
2217     // Convert the heap number in v0 to an untagged integer in a1.
2218     // This can't go slow-case because it's the same number we already
2219     // converted once again.
2220     __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2221     // Negate the result.
2222     __ Xor(a1, a1, -1);
2223 
2224     __ bind(&heapnumber_allocated);
2225     __ mov(v0, a2);  // Move newly allocated heap number to v0.
2226   }
2227 
2228   if (CpuFeatures::IsSupported(FPU)) {
2229     // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
2230     CpuFeatures::Scope scope(FPU);
2231     __ mtc1(a1, f0);
2232     __ cvt_d_w(f0, f0);
2233     __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2234     __ Ret();
2235   } else {
2236     // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
2237     // have to set up a frame.
2238     WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2239     __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2240   }
2241 
2242   __ bind(&impossible);
2243   if (FLAG_debug_code) {
2244     __ stop("Incorrect assumption in bit-not stub");
2245   }
2246 }
2247 
2248 
2249 // TODO(svenpanne): Use virtual functions instead of switch.
GenerateGenericStub(MacroAssembler * masm)2250 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
2251   switch (op_) {
2252     case Token::SUB:
2253       GenerateGenericStubSub(masm);
2254       break;
2255     case Token::BIT_NOT:
2256       GenerateGenericStubBitNot(masm);
2257       break;
2258     default:
2259       UNREACHABLE();
2260   }
2261 }
2262 
2263 
GenerateGenericStubSub(MacroAssembler * masm)2264 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
2265   Label non_smi, slow;
2266   GenerateSmiCodeSub(masm, &non_smi, &slow);
2267   __ bind(&non_smi);
2268   GenerateHeapNumberCodeSub(masm, &slow);
2269   __ bind(&slow);
2270   GenerateGenericCodeFallback(masm);
2271 }
2272 
2273 
GenerateGenericStubBitNot(MacroAssembler * masm)2274 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
2275   Label non_smi, slow;
2276   GenerateSmiCodeBitNot(masm, &non_smi);
2277   __ bind(&non_smi);
2278   GenerateHeapNumberCodeBitNot(masm, &slow);
2279   __ bind(&slow);
2280   GenerateGenericCodeFallback(masm);
2281 }
2282 
2283 
GenerateGenericCodeFallback(MacroAssembler * masm)2284 void UnaryOpStub::GenerateGenericCodeFallback(
2285     MacroAssembler* masm) {
2286   // Handle the slow case by jumping to the JavaScript builtin.
2287   __ push(a0);
2288   switch (op_) {
2289     case Token::SUB:
2290       __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
2291       break;
2292     case Token::BIT_NOT:
2293       __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
2294       break;
2295     default:
2296       UNREACHABLE();
2297   }
2298 }
2299 
2300 
GenerateTypeTransition(MacroAssembler * masm)2301 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2302   Label get_result;
2303 
2304   __ Push(a1, a0);
2305 
2306   __ li(a2, Operand(Smi::FromInt(MinorKey())));
2307   __ li(a1, Operand(Smi::FromInt(op_)));
2308   __ li(a0, Operand(Smi::FromInt(operands_type_)));
2309   __ Push(a2, a1, a0);
2310 
2311   __ TailCallExternalReference(
2312       ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
2313                         masm->isolate()),
2314       5,
2315       1);
2316 }
2317 
2318 
GenerateTypeTransitionWithSavedArgs(MacroAssembler * masm)2319 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2320     MacroAssembler* masm) {
2321   UNIMPLEMENTED();
2322 }
2323 
2324 
Generate(MacroAssembler * masm)2325 void BinaryOpStub::Generate(MacroAssembler* masm) {
2326   // Explicitly allow generation of nested stubs. It is safe here because
2327   // generation code does not use any raw pointers.
2328   AllowStubCallsScope allow_stub_calls(masm, true);
2329   switch (operands_type_) {
2330     case BinaryOpIC::UNINITIALIZED:
2331       GenerateTypeTransition(masm);
2332       break;
2333     case BinaryOpIC::SMI:
2334       GenerateSmiStub(masm);
2335       break;
2336     case BinaryOpIC::INT32:
2337       GenerateInt32Stub(masm);
2338       break;
2339     case BinaryOpIC::HEAP_NUMBER:
2340       GenerateHeapNumberStub(masm);
2341       break;
2342     case BinaryOpIC::ODDBALL:
2343       GenerateOddballStub(masm);
2344       break;
2345     case BinaryOpIC::BOTH_STRING:
2346       GenerateBothStringStub(masm);
2347       break;
2348     case BinaryOpIC::STRING:
2349       GenerateStringStub(masm);
2350       break;
2351     case BinaryOpIC::GENERIC:
2352       GenerateGeneric(masm);
2353       break;
2354     default:
2355       UNREACHABLE();
2356   }
2357 }
2358 
2359 
PrintName(StringStream * stream)2360 void BinaryOpStub::PrintName(StringStream* stream) {
2361   const char* op_name = Token::Name(op_);
2362   const char* overwrite_name;
2363   switch (mode_) {
2364     case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2365     case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2366     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2367     default: overwrite_name = "UnknownOverwrite"; break;
2368   }
2369   stream->Add("BinaryOpStub_%s_%s_%s",
2370               op_name,
2371               overwrite_name,
2372               BinaryOpIC::GetName(operands_type_));
2373 }
2374 
2375 
2376 
GenerateSmiSmiOperation(MacroAssembler * masm)2377 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
2378   Register left = a1;
2379   Register right = a0;
2380 
2381   Register scratch1 = t0;
2382   Register scratch2 = t1;
2383 
2384   ASSERT(right.is(a0));
2385   STATIC_ASSERT(kSmiTag == 0);
2386 
2387   Label not_smi_result;
2388   switch (op_) {
2389     case Token::ADD:
2390       __ AdduAndCheckForOverflow(v0, left, right, scratch1);
2391       __ RetOnNoOverflow(scratch1);
2392       // No need to revert anything - right and left are intact.
2393       break;
2394     case Token::SUB:
2395       __ SubuAndCheckForOverflow(v0, left, right, scratch1);
2396       __ RetOnNoOverflow(scratch1);
2397       // No need to revert anything - right and left are intact.
2398       break;
2399     case Token::MUL: {
2400       // Remove tag from one of the operands. This way the multiplication result
2401       // will be a smi if it fits the smi range.
2402       __ SmiUntag(scratch1, right);
2403       // Do multiplication.
2404       // lo = lower 32 bits of scratch1 * left.
2405       // hi = higher 32 bits of scratch1 * left.
2406       __ Mult(left, scratch1);
2407       // Check for overflowing the smi range - no overflow if higher 33 bits of
2408       // the result are identical.
2409       __ mflo(scratch1);
2410       __ mfhi(scratch2);
2411       __ sra(scratch1, scratch1, 31);
2412       __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
2413       // Go slow on zero result to handle -0.
2414       __ mflo(v0);
2415       __ Ret(ne, v0, Operand(zero_reg));
2416       // We need -0 if we were multiplying a negative number with 0 to get 0.
2417       // We know one of them was zero.
2418       __ Addu(scratch2, right, left);
2419       Label skip;
2420       // ARM uses the 'pl' condition, which is 'ge'.
2421       // Negating it results in 'lt'.
2422       __ Branch(&skip, lt, scratch2, Operand(zero_reg));
2423       ASSERT(Smi::FromInt(0) == 0);
2424       __ Ret(USE_DELAY_SLOT);
2425       __ mov(v0, zero_reg);  // Return smi 0 if the non-zero one was positive.
2426       __ bind(&skip);
2427       // We fall through here if we multiplied a negative number with 0, because
2428       // that would mean we should produce -0.
2429       }
2430       break;
2431     case Token::DIV: {
2432       Label done;
2433       __ SmiUntag(scratch2, right);
2434       __ SmiUntag(scratch1, left);
2435       __ Div(scratch1, scratch2);
2436       // A minor optimization: div may be calculated asynchronously, so we check
2437       // for division by zero before getting the result.
2438       __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2439       // If the result is 0, we need to make sure the dividsor (right) is
2440       // positive, otherwise it is a -0 case.
2441       // Quotient is in 'lo', remainder is in 'hi'.
2442       // Check for no remainder first.
2443       __ mfhi(scratch1);
2444       __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2445       __ mflo(scratch1);
2446       __ Branch(&done, ne, scratch1, Operand(zero_reg));
2447       __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2448       __ bind(&done);
2449       // Check that the signed result fits in a Smi.
2450       __ Addu(scratch2, scratch1, Operand(0x40000000));
2451       __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2452       __ SmiTag(v0, scratch1);
2453       __ Ret();
2454       }
2455       break;
2456     case Token::MOD: {
2457       Label done;
2458       __ SmiUntag(scratch2, right);
2459       __ SmiUntag(scratch1, left);
2460       __ Div(scratch1, scratch2);
2461       // A minor optimization: div may be calculated asynchronously, so we check
2462       // for division by 0 before calling mfhi.
2463       // Check for zero on the right hand side.
2464       __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
2465       // If the result is 0, we need to make sure the dividend (left) is
2466       // positive (or 0), otherwise it is a -0 case.
2467       // Remainder is in 'hi'.
2468       __ mfhi(scratch2);
2469       __ Branch(&done, ne, scratch2, Operand(zero_reg));
2470       __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2471       __ bind(&done);
2472       // Check that the signed result fits in a Smi.
2473       __ Addu(scratch1, scratch2, Operand(0x40000000));
2474       __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
2475       __ SmiTag(v0, scratch2);
2476       __ Ret();
2477       }
2478       break;
2479     case Token::BIT_OR:
2480       __ Ret(USE_DELAY_SLOT);
2481       __ or_(v0, left, right);
2482       break;
2483     case Token::BIT_AND:
2484       __ Ret(USE_DELAY_SLOT);
2485       __ and_(v0, left, right);
2486       break;
2487     case Token::BIT_XOR:
2488       __ Ret(USE_DELAY_SLOT);
2489       __ xor_(v0, left, right);
2490       break;
2491     case Token::SAR:
2492       // Remove tags from right operand.
2493       __ GetLeastBitsFromSmi(scratch1, right, 5);
2494       __ srav(scratch1, left, scratch1);
2495       // Smi tag result.
2496       __ And(v0, scratch1, ~kSmiTagMask);
2497       __ Ret();
2498       break;
2499     case Token::SHR:
2500       // Remove tags from operands. We can't do this on a 31 bit number
2501       // because then the 0s get shifted into bit 30 instead of bit 31.
2502       __ SmiUntag(scratch1, left);
2503       __ GetLeastBitsFromSmi(scratch2, right, 5);
2504       __ srlv(v0, scratch1, scratch2);
2505       // Unsigned shift is not allowed to produce a negative number, so
2506       // check the sign bit and the sign bit after Smi tagging.
2507       __ And(scratch1, v0, Operand(0xc0000000));
2508       __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
2509       // Smi tag result.
2510       __ SmiTag(v0);
2511       __ Ret();
2512       break;
2513     case Token::SHL:
2514       // Remove tags from operands.
2515       __ SmiUntag(scratch1, left);
2516       __ GetLeastBitsFromSmi(scratch2, right, 5);
2517       __ sllv(scratch1, scratch1, scratch2);
2518       // Check that the signed result fits in a Smi.
2519       __ Addu(scratch2, scratch1, Operand(0x40000000));
2520       __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
2521       __ SmiTag(v0, scratch1);
2522       __ Ret();
2523       break;
2524     default:
2525       UNREACHABLE();
2526   }
2527   __ bind(&not_smi_result);
2528 }
2529 
2530 
GenerateFPOperation(MacroAssembler * masm,bool smi_operands,Label * not_numbers,Label * gc_required)2531 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
2532                                        bool smi_operands,
2533                                        Label* not_numbers,
2534                                        Label* gc_required) {
2535   Register left = a1;
2536   Register right = a0;
2537   Register scratch1 = t3;
2538   Register scratch2 = t5;
2539   Register scratch3 = t0;
2540 
2541   ASSERT(smi_operands || (not_numbers != NULL));
2542   if (smi_operands && FLAG_debug_code) {
2543     __ AbortIfNotSmi(left);
2544     __ AbortIfNotSmi(right);
2545   }
2546 
2547   Register heap_number_map = t2;
2548   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2549 
2550   switch (op_) {
2551     case Token::ADD:
2552     case Token::SUB:
2553     case Token::MUL:
2554     case Token::DIV:
2555     case Token::MOD: {
2556       // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2557       // depending on whether FPU is available or not.
2558       FloatingPointHelper::Destination destination =
2559           CpuFeatures::IsSupported(FPU) &&
2560           op_ != Token::MOD ?
2561               FloatingPointHelper::kFPURegisters :
2562               FloatingPointHelper::kCoreRegisters;
2563 
2564       // Allocate new heap number for result.
2565       Register result = s0;
2566       GenerateHeapResultAllocation(
2567           masm, result, heap_number_map, scratch1, scratch2, gc_required);
2568 
2569       // Load the operands.
2570       if (smi_operands) {
2571         FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2572       } else {
2573         FloatingPointHelper::LoadOperands(masm,
2574                                           destination,
2575                                           heap_number_map,
2576                                           scratch1,
2577                                           scratch2,
2578                                           not_numbers);
2579       }
2580 
2581       // Calculate the result.
2582       if (destination == FloatingPointHelper::kFPURegisters) {
2583         // Using FPU registers:
2584         // f12: Left value.
2585         // f14: Right value.
2586         CpuFeatures::Scope scope(FPU);
2587         switch (op_) {
2588         case Token::ADD:
2589           __ add_d(f10, f12, f14);
2590           break;
2591         case Token::SUB:
2592           __ sub_d(f10, f12, f14);
2593           break;
2594         case Token::MUL:
2595           __ mul_d(f10, f12, f14);
2596           break;
2597         case Token::DIV:
2598           __ div_d(f10, f12, f14);
2599           break;
2600         default:
2601           UNREACHABLE();
2602         }
2603 
2604         // ARM uses a workaround here because of the unaligned HeapNumber
2605         // kValueOffset. On MIPS this workaround is built into sdc1 so
2606         // there's no point in generating even more instructions.
2607         __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
2608         __ Ret(USE_DELAY_SLOT);
2609         __ mov(v0, result);
2610       } else {
2611         // Call the C function to handle the double operation.
2612         FloatingPointHelper::CallCCodeForDoubleOperation(masm,
2613                                                          op_,
2614                                                          result,
2615                                                          scratch1);
2616         if (FLAG_debug_code) {
2617           __ stop("Unreachable code.");
2618         }
2619       }
2620       break;
2621     }
2622     case Token::BIT_OR:
2623     case Token::BIT_XOR:
2624     case Token::BIT_AND:
2625     case Token::SAR:
2626     case Token::SHR:
2627     case Token::SHL: {
2628       if (smi_operands) {
2629         __ SmiUntag(a3, left);
2630         __ SmiUntag(a2, right);
2631       } else {
2632         // Convert operands to 32-bit integers. Right in a2 and left in a3.
2633         FloatingPointHelper::ConvertNumberToInt32(masm,
2634                                                   left,
2635                                                   a3,
2636                                                   heap_number_map,
2637                                                   scratch1,
2638                                                   scratch2,
2639                                                   scratch3,
2640                                                   f0,
2641                                                   not_numbers);
2642         FloatingPointHelper::ConvertNumberToInt32(masm,
2643                                                   right,
2644                                                   a2,
2645                                                   heap_number_map,
2646                                                   scratch1,
2647                                                   scratch2,
2648                                                   scratch3,
2649                                                   f0,
2650                                                   not_numbers);
2651       }
2652       Label result_not_a_smi;
2653       switch (op_) {
2654         case Token::BIT_OR:
2655           __ Or(a2, a3, Operand(a2));
2656           break;
2657         case Token::BIT_XOR:
2658           __ Xor(a2, a3, Operand(a2));
2659           break;
2660         case Token::BIT_AND:
2661           __ And(a2, a3, Operand(a2));
2662           break;
2663         case Token::SAR:
2664           // Use only the 5 least significant bits of the shift count.
2665           __ GetLeastBitsFromInt32(a2, a2, 5);
2666           __ srav(a2, a3, a2);
2667           break;
2668         case Token::SHR:
2669           // Use only the 5 least significant bits of the shift count.
2670           __ GetLeastBitsFromInt32(a2, a2, 5);
2671           __ srlv(a2, a3, a2);
2672           // SHR is special because it is required to produce a positive answer.
2673           // The code below for writing into heap numbers isn't capable of
2674           // writing the register as an unsigned int so we go to slow case if we
2675           // hit this case.
2676           if (CpuFeatures::IsSupported(FPU)) {
2677             __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
2678           } else {
2679             __ Branch(not_numbers, lt, a2, Operand(zero_reg));
2680           }
2681           break;
2682         case Token::SHL:
2683           // Use only the 5 least significant bits of the shift count.
2684           __ GetLeastBitsFromInt32(a2, a2, 5);
2685           __ sllv(a2, a3, a2);
2686           break;
2687         default:
2688           UNREACHABLE();
2689       }
2690       // Check that the *signed* result fits in a smi.
2691       __ Addu(a3, a2, Operand(0x40000000));
2692       __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2693       __ SmiTag(v0, a2);
2694       __ Ret();
2695 
2696       // Allocate new heap number for result.
2697       __ bind(&result_not_a_smi);
2698       Register result = t1;
2699       if (smi_operands) {
2700         __ AllocateHeapNumber(
2701             result, scratch1, scratch2, heap_number_map, gc_required);
2702       } else {
2703         GenerateHeapResultAllocation(
2704             masm, result, heap_number_map, scratch1, scratch2, gc_required);
2705       }
2706 
2707       // a2: Answer as signed int32.
2708       // t1: Heap number to write answer into.
2709 
2710       // Nothing can go wrong now, so move the heap number to v0, which is the
2711       // result.
2712       __ mov(v0, t1);
2713 
2714       if (CpuFeatures::IsSupported(FPU)) {
2715         // Convert the int32 in a2 to the heap number in a0. As
2716         // mentioned above SHR needs to always produce a positive result.
2717         CpuFeatures::Scope scope(FPU);
2718         __ mtc1(a2, f0);
2719         if (op_ == Token::SHR) {
2720           __ Cvt_d_uw(f0, f0, f22);
2721         } else {
2722           __ cvt_d_w(f0, f0);
2723         }
2724         // ARM uses a workaround here because of the unaligned HeapNumber
2725         // kValueOffset. On MIPS this workaround is built into sdc1 so
2726         // there's no point in generating even more instructions.
2727         __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
2728         __ Ret();
2729       } else {
2730         // Tail call that writes the int32 in a2 to the heap number in v0, using
2731         // a3 and a0 as scratch. v0 is preserved and returned.
2732         WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2733         __ TailCallStub(&stub);
2734       }
2735       break;
2736     }
2737     default:
2738       UNREACHABLE();
2739   }
2740 }
2741 
2742 
2743 // Generate the smi code. If the operation on smis are successful this return is
2744 // generated. If the result is not a smi and heap number allocation is not
2745 // requested the code falls through. If number allocation is requested but a
2746 // heap number cannot be allocated the code jumps to the lable gc_required.
GenerateSmiCode(MacroAssembler * masm,Label * use_runtime,Label * gc_required,SmiCodeGenerateHeapNumberResults allow_heapnumber_results)2747 void BinaryOpStub::GenerateSmiCode(
2748     MacroAssembler* masm,
2749     Label* use_runtime,
2750     Label* gc_required,
2751     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2752   Label not_smis;
2753 
2754   Register left = a1;
2755   Register right = a0;
2756   Register scratch1 = t3;
2757 
2758   // Perform combined smi check on both operands.
2759   __ Or(scratch1, left, Operand(right));
2760   STATIC_ASSERT(kSmiTag == 0);
2761   __ JumpIfNotSmi(scratch1, &not_smis);
2762 
2763   // If the smi-smi operation results in a smi return is generated.
2764   GenerateSmiSmiOperation(masm);
2765 
2766   // If heap number results are possible generate the result in an allocated
2767   // heap number.
2768   if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2769     GenerateFPOperation(masm, true, use_runtime, gc_required);
2770   }
2771   __ bind(&not_smis);
2772 }
2773 
2774 
GenerateSmiStub(MacroAssembler * masm)2775 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2776   Label not_smis, call_runtime;
2777 
2778   if (result_type_ == BinaryOpIC::UNINITIALIZED ||
2779       result_type_ == BinaryOpIC::SMI) {
2780     // Only allow smi results.
2781     GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
2782   } else {
2783     // Allow heap number result and don't make a transition if a heap number
2784     // cannot be allocated.
2785     GenerateSmiCode(masm,
2786                     &call_runtime,
2787                     &call_runtime,
2788                     ALLOW_HEAPNUMBER_RESULTS);
2789   }
2790 
2791   // Code falls through if the result is not returned as either a smi or heap
2792   // number.
2793   GenerateTypeTransition(masm);
2794 
2795   __ bind(&call_runtime);
2796   GenerateCallRuntime(masm);
2797 }
2798 
2799 
GenerateStringStub(MacroAssembler * masm)2800 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2801   ASSERT(operands_type_ == BinaryOpIC::STRING);
2802   // Try to add arguments as strings, otherwise, transition to the generic
2803   // BinaryOpIC type.
2804   GenerateAddStrings(masm);
2805   GenerateTypeTransition(masm);
2806 }
2807 
2808 
GenerateBothStringStub(MacroAssembler * masm)2809 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
2810   Label call_runtime;
2811   ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
2812   ASSERT(op_ == Token::ADD);
2813   // If both arguments are strings, call the string add stub.
2814   // Otherwise, do a transition.
2815 
2816   // Registers containing left and right operands respectively.
2817   Register left = a1;
2818   Register right = a0;
2819 
2820   // Test if left operand is a string.
2821   __ JumpIfSmi(left, &call_runtime);
2822   __ GetObjectType(left, a2, a2);
2823   __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2824 
2825   // Test if right operand is a string.
2826   __ JumpIfSmi(right, &call_runtime);
2827   __ GetObjectType(right, a2, a2);
2828   __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2829 
2830   StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2831   GenerateRegisterArgsPush(masm);
2832   __ TailCallStub(&string_add_stub);
2833 
2834   __ bind(&call_runtime);
2835   GenerateTypeTransition(masm);
2836 }
2837 
2838 
GenerateInt32Stub(MacroAssembler * masm)2839 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2840   ASSERT(operands_type_ == BinaryOpIC::INT32);
2841 
2842   Register left = a1;
2843   Register right = a0;
2844   Register scratch1 = t3;
2845   Register scratch2 = t5;
2846   FPURegister double_scratch = f0;
2847   FPURegister single_scratch = f6;
2848 
2849   Register heap_number_result = no_reg;
2850   Register heap_number_map = t2;
2851   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2852 
2853   Label call_runtime;
2854   // Labels for type transition, used for wrong input or output types.
2855   // Both label are currently actually bound to the same position. We use two
2856   // different label to differentiate the cause leading to type transition.
2857   Label transition;
2858 
2859   // Smi-smi fast case.
2860   Label skip;
2861   __ Or(scratch1, left, right);
2862   __ JumpIfNotSmi(scratch1, &skip);
2863   GenerateSmiSmiOperation(masm);
2864   // Fall through if the result is not a smi.
2865   __ bind(&skip);
2866 
2867   switch (op_) {
2868     case Token::ADD:
2869     case Token::SUB:
2870     case Token::MUL:
2871     case Token::DIV:
2872     case Token::MOD: {
2873       // Load both operands and check that they are 32-bit integer.
2874       // Jump to type transition if they are not. The registers a0 and a1 (right
2875       // and left) are preserved for the runtime call.
2876       FloatingPointHelper::Destination destination =
2877           (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
2878               ? FloatingPointHelper::kFPURegisters
2879               : FloatingPointHelper::kCoreRegisters;
2880 
2881       FloatingPointHelper::LoadNumberAsInt32Double(masm,
2882                                                    right,
2883                                                    destination,
2884                                                    f14,
2885                                                    a2,
2886                                                    a3,
2887                                                    heap_number_map,
2888                                                    scratch1,
2889                                                    scratch2,
2890                                                    f2,
2891                                                    &transition);
2892       FloatingPointHelper::LoadNumberAsInt32Double(masm,
2893                                                    left,
2894                                                    destination,
2895                                                    f12,
2896                                                    t0,
2897                                                    t1,
2898                                                    heap_number_map,
2899                                                    scratch1,
2900                                                    scratch2,
2901                                                    f2,
2902                                                    &transition);
2903 
2904       if (destination == FloatingPointHelper::kFPURegisters) {
2905         CpuFeatures::Scope scope(FPU);
2906         Label return_heap_number;
2907         switch (op_) {
2908           case Token::ADD:
2909             __ add_d(f10, f12, f14);
2910             break;
2911           case Token::SUB:
2912             __ sub_d(f10, f12, f14);
2913             break;
2914           case Token::MUL:
2915             __ mul_d(f10, f12, f14);
2916             break;
2917           case Token::DIV:
2918             __ div_d(f10, f12, f14);
2919             break;
2920           default:
2921             UNREACHABLE();
2922         }
2923 
2924         if (op_ != Token::DIV) {
2925           // These operations produce an integer result.
2926           // Try to return a smi if we can.
2927           // Otherwise return a heap number if allowed, or jump to type
2928           // transition.
2929 
2930           Register except_flag = scratch2;
2931           __ EmitFPUTruncate(kRoundToZero,
2932                              single_scratch,
2933                              f10,
2934                              scratch1,
2935                              except_flag);
2936 
2937           if (result_type_ <= BinaryOpIC::INT32) {
2938             // If except_flag != 0, result does not fit in a 32-bit integer.
2939             __ Branch(&transition, ne, except_flag, Operand(zero_reg));
2940           }
2941 
2942           // Check if the result fits in a smi.
2943           __ mfc1(scratch1, single_scratch);
2944           __ Addu(scratch2, scratch1, Operand(0x40000000));
2945           // If not try to return a heap number.
2946           __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
2947           // Check for minus zero. Return heap number for minus zero.
2948           Label not_zero;
2949           __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
2950           __ mfc1(scratch2, f11);
2951           __ And(scratch2, scratch2, HeapNumber::kSignMask);
2952           __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg));
2953           __ bind(&not_zero);
2954 
2955           // Tag the result and return.
2956           __ SmiTag(v0, scratch1);
2957           __ Ret();
2958         } else {
2959           // DIV just falls through to allocating a heap number.
2960         }
2961 
2962         __ bind(&return_heap_number);
2963         // Return a heap number, or fall through to type transition or runtime
2964         // call if we can't.
2965         if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
2966                                                  : BinaryOpIC::INT32)) {
2967           // We are using FPU registers so s0 is available.
2968           heap_number_result = s0;
2969           GenerateHeapResultAllocation(masm,
2970                                        heap_number_result,
2971                                        heap_number_map,
2972                                        scratch1,
2973                                        scratch2,
2974                                        &call_runtime);
2975           __ mov(v0, heap_number_result);
2976           __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
2977           __ Ret();
2978         }
2979 
2980         // A DIV operation expecting an integer result falls through
2981         // to type transition.
2982 
2983       } else {
2984         // We preserved a0 and a1 to be able to call runtime.
2985         // Save the left value on the stack.
2986         __ Push(t1, t0);
2987 
2988         Label pop_and_call_runtime;
2989 
2990         // Allocate a heap number to store the result.
2991         heap_number_result = s0;
2992         GenerateHeapResultAllocation(masm,
2993                                      heap_number_result,
2994                                      heap_number_map,
2995                                      scratch1,
2996                                      scratch2,
2997                                      &pop_and_call_runtime);
2998 
2999         // Load the left value from the value saved on the stack.
3000         __ Pop(a1, a0);
3001 
3002         // Call the C function to handle the double operation.
3003         FloatingPointHelper::CallCCodeForDoubleOperation(
3004             masm, op_, heap_number_result, scratch1);
3005         if (FLAG_debug_code) {
3006           __ stop("Unreachable code.");
3007         }
3008 
3009         __ bind(&pop_and_call_runtime);
3010         __ Drop(2);
3011         __ Branch(&call_runtime);
3012       }
3013 
3014       break;
3015     }
3016 
3017     case Token::BIT_OR:
3018     case Token::BIT_XOR:
3019     case Token::BIT_AND:
3020     case Token::SAR:
3021     case Token::SHR:
3022     case Token::SHL: {
3023       Label return_heap_number;
3024       Register scratch3 = t1;
3025       // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3026       // registers a0 and a1 (right and left) are preserved for the runtime
3027       // call.
3028       FloatingPointHelper::LoadNumberAsInt32(masm,
3029                                              left,
3030                                              a3,
3031                                              heap_number_map,
3032                                              scratch1,
3033                                              scratch2,
3034                                              scratch3,
3035                                              f0,
3036                                              &transition);
3037       FloatingPointHelper::LoadNumberAsInt32(masm,
3038                                              right,
3039                                              a2,
3040                                              heap_number_map,
3041                                              scratch1,
3042                                              scratch2,
3043                                              scratch3,
3044                                              f0,
3045                                              &transition);
3046 
3047       // The ECMA-262 standard specifies that, for shift operations, only the
3048       // 5 least significant bits of the shift value should be used.
3049       switch (op_) {
3050         case Token::BIT_OR:
3051           __ Or(a2, a3, Operand(a2));
3052           break;
3053         case Token::BIT_XOR:
3054           __ Xor(a2, a3, Operand(a2));
3055           break;
3056         case Token::BIT_AND:
3057           __ And(a2, a3, Operand(a2));
3058           break;
3059         case Token::SAR:
3060           __ And(a2, a2, Operand(0x1f));
3061           __ srav(a2, a3, a2);
3062           break;
3063         case Token::SHR:
3064           __ And(a2, a2, Operand(0x1f));
3065           __ srlv(a2, a3, a2);
3066           // SHR is special because it is required to produce a positive answer.
3067           // We only get a negative result if the shift value (a2) is 0.
3068           // This result cannot be respresented as a signed 32-bit integer, try
3069           // to return a heap number if we can.
3070           // The non FPU code does not support this special case, so jump to
3071           // runtime if we don't support it.
3072           if (CpuFeatures::IsSupported(FPU)) {
3073             __ Branch((result_type_ <= BinaryOpIC::INT32)
3074                         ? &transition
3075                         : &return_heap_number,
3076                        lt,
3077                        a2,
3078                        Operand(zero_reg));
3079           } else {
3080             __ Branch((result_type_ <= BinaryOpIC::INT32)
3081                         ? &transition
3082                         : &call_runtime,
3083                        lt,
3084                        a2,
3085                        Operand(zero_reg));
3086           }
3087           break;
3088         case Token::SHL:
3089           __ And(a2, a2, Operand(0x1f));
3090           __ sllv(a2, a3, a2);
3091           break;
3092         default:
3093           UNREACHABLE();
3094       }
3095 
3096       // Check if the result fits in a smi.
3097       __ Addu(scratch1, a2, Operand(0x40000000));
3098       // If not try to return a heap number. (We know the result is an int32.)
3099       __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
3100       // Tag the result and return.
3101       __ SmiTag(v0, a2);
3102       __ Ret();
3103 
3104       __ bind(&return_heap_number);
3105       heap_number_result = t1;
3106       GenerateHeapResultAllocation(masm,
3107                                    heap_number_result,
3108                                    heap_number_map,
3109                                    scratch1,
3110                                    scratch2,
3111                                    &call_runtime);
3112 
3113       if (CpuFeatures::IsSupported(FPU)) {
3114         CpuFeatures::Scope scope(FPU);
3115 
3116         if (op_ != Token::SHR) {
3117           // Convert the result to a floating point value.
3118           __ mtc1(a2, double_scratch);
3119           __ cvt_d_w(double_scratch, double_scratch);
3120         } else {
3121           // The result must be interpreted as an unsigned 32-bit integer.
3122           __ mtc1(a2, double_scratch);
3123           __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
3124         }
3125 
3126         // Store the result.
3127         __ mov(v0, heap_number_result);
3128         __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
3129         __ Ret();
3130       } else {
3131         // Tail call that writes the int32 in a2 to the heap number in v0, using
3132         // a3 and a0 as scratch. v0 is preserved and returned.
3133         __ mov(a0, t1);
3134         WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
3135         __ TailCallStub(&stub);
3136       }
3137 
3138       break;
3139     }
3140 
3141     default:
3142       UNREACHABLE();
3143   }
3144 
3145   // We never expect DIV to yield an integer result, so we always generate
3146   // type transition code for DIV operations expecting an integer result: the
3147   // code will fall through to this type transition.
3148   if (transition.is_linked() ||
3149       ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
3150     __ bind(&transition);
3151     GenerateTypeTransition(masm);
3152   }
3153 
3154   __ bind(&call_runtime);
3155   GenerateCallRuntime(masm);
3156 }
3157 
3158 
GenerateOddballStub(MacroAssembler * masm)3159 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
3160   Label call_runtime;
3161 
3162   if (op_ == Token::ADD) {
3163     // Handle string addition here, because it is the only operation
3164     // that does not do a ToNumber conversion on the operands.
3165     GenerateAddStrings(masm);
3166   }
3167 
3168   // Convert oddball arguments to numbers.
3169   Label check, done;
3170   __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3171   __ Branch(&check, ne, a1, Operand(t0));
3172   if (Token::IsBitOp(op_)) {
3173     __ li(a1, Operand(Smi::FromInt(0)));
3174   } else {
3175     __ LoadRoot(a1, Heap::kNanValueRootIndex);
3176   }
3177   __ jmp(&done);
3178   __ bind(&check);
3179   __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3180   __ Branch(&done, ne, a0, Operand(t0));
3181   if (Token::IsBitOp(op_)) {
3182     __ li(a0, Operand(Smi::FromInt(0)));
3183   } else {
3184     __ LoadRoot(a0, Heap::kNanValueRootIndex);
3185   }
3186   __ bind(&done);
3187 
3188   GenerateHeapNumberStub(masm);
3189 }
3190 
3191 
GenerateHeapNumberStub(MacroAssembler * masm)3192 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
3193   Label call_runtime;
3194   GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
3195 
3196   __ bind(&call_runtime);
3197   GenerateCallRuntime(masm);
3198 }
3199 
3200 
GenerateGeneric(MacroAssembler * masm)3201 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
3202   Label call_runtime, call_string_add_or_runtime;
3203 
3204   GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
3205 
3206   GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
3207 
3208   __ bind(&call_string_add_or_runtime);
3209   if (op_ == Token::ADD) {
3210     GenerateAddStrings(masm);
3211   }
3212 
3213   __ bind(&call_runtime);
3214   GenerateCallRuntime(masm);
3215 }
3216 
3217 
GenerateAddStrings(MacroAssembler * masm)3218 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
3219   ASSERT(op_ == Token::ADD);
3220   Label left_not_string, call_runtime;
3221 
3222   Register left = a1;
3223   Register right = a0;
3224 
3225   // Check if left argument is a string.
3226   __ JumpIfSmi(left, &left_not_string);
3227   __ GetObjectType(left, a2, a2);
3228   __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3229 
3230   StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
3231   GenerateRegisterArgsPush(masm);
3232   __ TailCallStub(&string_add_left_stub);
3233 
3234   // Left operand is not a string, test right.
3235   __ bind(&left_not_string);
3236   __ JumpIfSmi(right, &call_runtime);
3237   __ GetObjectType(right, a2, a2);
3238   __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
3239 
3240   StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
3241   GenerateRegisterArgsPush(masm);
3242   __ TailCallStub(&string_add_right_stub);
3243 
3244   // At least one argument is not a string.
3245   __ bind(&call_runtime);
3246 }
3247 
3248 
GenerateCallRuntime(MacroAssembler * masm)3249 void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
3250   GenerateRegisterArgsPush(masm);
3251   switch (op_) {
3252     case Token::ADD:
3253       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
3254       break;
3255     case Token::SUB:
3256       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
3257       break;
3258     case Token::MUL:
3259       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
3260       break;
3261     case Token::DIV:
3262       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
3263       break;
3264     case Token::MOD:
3265       __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
3266       break;
3267     case Token::BIT_OR:
3268       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
3269       break;
3270     case Token::BIT_AND:
3271       __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
3272       break;
3273     case Token::BIT_XOR:
3274       __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
3275       break;
3276     case Token::SAR:
3277       __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
3278       break;
3279     case Token::SHR:
3280       __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
3281       break;
3282     case Token::SHL:
3283       __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
3284       break;
3285     default:
3286       UNREACHABLE();
3287   }
3288 }
3289 
3290 
GenerateHeapResultAllocation(MacroAssembler * masm,Register result,Register heap_number_map,Register scratch1,Register scratch2,Label * gc_required)3291 void BinaryOpStub::GenerateHeapResultAllocation(
3292     MacroAssembler* masm,
3293     Register result,
3294     Register heap_number_map,
3295     Register scratch1,
3296     Register scratch2,
3297     Label* gc_required) {
3298 
3299   // Code below will scratch result if allocation fails. To keep both arguments
3300   // intact for the runtime call result cannot be one of these.
3301   ASSERT(!result.is(a0) && !result.is(a1));
3302 
3303   if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
3304     Label skip_allocation, allocated;
3305     Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
3306     // If the overwritable operand is already an object, we skip the
3307     // allocation of a heap number.
3308     __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
3309     // Allocate a heap number for the result.
3310     __ AllocateHeapNumber(
3311         result, scratch1, scratch2, heap_number_map, gc_required);
3312     __ Branch(&allocated);
3313     __ bind(&skip_allocation);
3314     // Use object holding the overwritable operand for result.
3315     __ mov(result, overwritable_operand);
3316     __ bind(&allocated);
3317   } else {
3318     ASSERT(mode_ == NO_OVERWRITE);
3319     __ AllocateHeapNumber(
3320         result, scratch1, scratch2, heap_number_map, gc_required);
3321   }
3322 }
3323 
3324 
GenerateRegisterArgsPush(MacroAssembler * masm)3325 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
3326   __ Push(a1, a0);
3327 }
3328 
3329 
3330 
Generate(MacroAssembler * masm)3331 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
3332   // Untagged case: double input in f4, double result goes
3333   //   into f4.
3334   // Tagged case: tagged input on top of stack and in a0,
3335   //   tagged result (heap number) goes into v0.
3336 
3337   Label input_not_smi;
3338   Label loaded;
3339   Label calculate;
3340   Label invalid_cache;
3341   const Register scratch0 = t5;
3342   const Register scratch1 = t3;
3343   const Register cache_entry = a0;
3344   const bool tagged = (argument_type_ == TAGGED);
3345 
3346   if (CpuFeatures::IsSupported(FPU)) {
3347     CpuFeatures::Scope scope(FPU);
3348 
3349     if (tagged) {
3350       // Argument is a number and is on stack and in a0.
3351       // Load argument and check if it is a smi.
3352       __ JumpIfNotSmi(a0, &input_not_smi);
3353 
3354       // Input is a smi. Convert to double and load the low and high words
3355       // of the double into a2, a3.
3356       __ sra(t0, a0, kSmiTagSize);
3357       __ mtc1(t0, f4);
3358       __ cvt_d_w(f4, f4);
3359       __ Move(a2, a3, f4);
3360       __ Branch(&loaded);
3361 
3362       __ bind(&input_not_smi);
3363       // Check if input is a HeapNumber.
3364       __ CheckMap(a0,
3365                   a1,
3366                   Heap::kHeapNumberMapRootIndex,
3367                   &calculate,
3368                   DONT_DO_SMI_CHECK);
3369       // Input is a HeapNumber. Store the
3370       // low and high words into a2, a3.
3371       __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
3372       __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3373     } else {
3374       // Input is untagged double in f4. Output goes to f4.
3375       __ Move(a2, a3, f4);
3376     }
3377     __ bind(&loaded);
3378     // a2 = low 32 bits of double value.
3379     // a3 = high 32 bits of double value.
3380     // Compute hash (the shifts are arithmetic):
3381     //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
3382     __ Xor(a1, a2, a3);
3383     __ sra(t0, a1, 16);
3384     __ Xor(a1, a1, t0);
3385     __ sra(t0, a1, 8);
3386     __ Xor(a1, a1, t0);
3387     ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
3388     __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
3389 
3390     // a2 = low 32 bits of double value.
3391     // a3 = high 32 bits of double value.
3392     // a1 = TranscendentalCache::hash(double value).
3393     __ li(cache_entry, Operand(
3394         ExternalReference::transcendental_cache_array_address(
3395             masm->isolate())));
3396     // a0 points to cache array.
3397     __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
3398         Isolate::Current()->transcendental_cache()->caches_[0])));
3399     // a0 points to the cache for the type type_.
3400     // If NULL, the cache hasn't been initialized yet, so go through runtime.
3401     __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
3402 
3403 #ifdef DEBUG
3404     // Check that the layout of cache elements match expectations.
3405     { TranscendentalCache::SubCache::Element test_elem[2];
3406       char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
3407       char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
3408       char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
3409       char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
3410       char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
3411       CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
3412       CHECK_EQ(0, elem_in0 - elem_start);
3413       CHECK_EQ(kIntSize, elem_in1 - elem_start);
3414       CHECK_EQ(2 * kIntSize, elem_out - elem_start);
3415     }
3416 #endif
3417 
3418     // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
3419     __ sll(t0, a1, 1);
3420     __ Addu(a1, a1, t0);
3421     __ sll(t0, a1, 2);
3422     __ Addu(cache_entry, cache_entry, t0);
3423 
3424     // Check if cache matches: Double value is stored in uint32_t[2] array.
3425     __ lw(t0, MemOperand(cache_entry, 0));
3426     __ lw(t1, MemOperand(cache_entry, 4));
3427     __ lw(t2, MemOperand(cache_entry, 8));
3428     __ Branch(&calculate, ne, a2, Operand(t0));
3429     __ Branch(&calculate, ne, a3, Operand(t1));
3430     // Cache hit. Load result, cleanup and return.
3431     Counters* counters = masm->isolate()->counters();
3432     __ IncrementCounter(
3433         counters->transcendental_cache_hit(), 1, scratch0, scratch1);
3434     if (tagged) {
3435       // Pop input value from stack and load result into v0.
3436       __ Drop(1);
3437       __ mov(v0, t2);
3438     } else {
3439       // Load result into f4.
3440       __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3441     }
3442     __ Ret();
3443   }  // if (CpuFeatures::IsSupported(FPU))
3444 
3445   __ bind(&calculate);
3446   Counters* counters = masm->isolate()->counters();
3447   __ IncrementCounter(
3448       counters->transcendental_cache_miss(), 1, scratch0, scratch1);
3449   if (tagged) {
3450     __ bind(&invalid_cache);
3451     __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
3452                                                    masm->isolate()),
3453                                  1,
3454                                  1);
3455   } else {
3456     if (!CpuFeatures::IsSupported(FPU)) UNREACHABLE();
3457     CpuFeatures::Scope scope(FPU);
3458 
3459     Label no_update;
3460     Label skip_cache;
3461 
3462     // Call C function to calculate the result and update the cache.
3463     // Register a0 holds precalculated cache entry address; preserve
3464     // it on the stack and pop it into register cache_entry after the
3465     // call.
3466     __ Push(cache_entry, a2, a3);
3467     GenerateCallCFunction(masm, scratch0);
3468     __ GetCFunctionDoubleResult(f4);
3469 
3470     // Try to update the cache. If we cannot allocate a
3471     // heap number, we return the result without updating.
3472     __ Pop(cache_entry, a2, a3);
3473     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3474     __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
3475     __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
3476 
3477     __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
3478     __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3479     __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
3480 
3481     __ Ret(USE_DELAY_SLOT);
3482     __ mov(v0, cache_entry);
3483 
3484     __ bind(&invalid_cache);
3485     // The cache is invalid. Call runtime which will recreate the
3486     // cache.
3487     __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3488     __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
3489     __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
3490     {
3491       FrameScope scope(masm, StackFrame::INTERNAL);
3492       __ push(a0);
3493       __ CallRuntime(RuntimeFunction(), 1);
3494     }
3495     __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
3496     __ Ret();
3497 
3498     __ bind(&skip_cache);
3499     // Call C function to calculate the result and answer directly
3500     // without updating the cache.
3501     GenerateCallCFunction(masm, scratch0);
3502     __ GetCFunctionDoubleResult(f4);
3503     __ bind(&no_update);
3504 
3505     // We return the value in f4 without adding it to the cache, but
3506     // we cause a scavenging GC so that future allocations will succeed.
3507     {
3508       FrameScope scope(masm, StackFrame::INTERNAL);
3509 
3510       // Allocate an aligned object larger than a HeapNumber.
3511       ASSERT(4 * kPointerSize >= HeapNumber::kSize);
3512       __ li(scratch0, Operand(4 * kPointerSize));
3513       __ push(scratch0);
3514       __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
3515     }
3516     __ Ret();
3517   }
3518 }
3519 
3520 
GenerateCallCFunction(MacroAssembler * masm,Register scratch)3521 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
3522                                                     Register scratch) {
3523   __ push(ra);
3524   __ PrepareCallCFunction(2, scratch);
3525   if (IsMipsSoftFloatABI) {
3526     __ Move(a0, a1, f4);
3527   } else {
3528     __ mov_d(f12, f4);
3529   }
3530   AllowExternalCallThatCantCauseGC scope(masm);
3531   Isolate* isolate = masm->isolate();
3532   switch (type_) {
3533     case TranscendentalCache::SIN:
3534       __ CallCFunction(
3535           ExternalReference::math_sin_double_function(isolate),
3536           0, 1);
3537       break;
3538     case TranscendentalCache::COS:
3539       __ CallCFunction(
3540           ExternalReference::math_cos_double_function(isolate),
3541           0, 1);
3542       break;
3543     case TranscendentalCache::TAN:
3544       __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
3545           0, 1);
3546       break;
3547     case TranscendentalCache::LOG:
3548       __ CallCFunction(
3549           ExternalReference::math_log_double_function(isolate),
3550           0, 1);
3551       break;
3552     default:
3553       UNIMPLEMENTED();
3554       break;
3555   }
3556   __ pop(ra);
3557 }
3558 
3559 
RuntimeFunction()3560 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
3561   switch (type_) {
3562     // Add more cases when necessary.
3563     case TranscendentalCache::SIN: return Runtime::kMath_sin;
3564     case TranscendentalCache::COS: return Runtime::kMath_cos;
3565     case TranscendentalCache::TAN: return Runtime::kMath_tan;
3566     case TranscendentalCache::LOG: return Runtime::kMath_log;
3567     default:
3568       UNIMPLEMENTED();
3569       return Runtime::kAbort;
3570   }
3571 }
3572 
3573 
Generate(MacroAssembler * masm)3574 void StackCheckStub::Generate(MacroAssembler* masm) {
3575   __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3576 }
3577 
3578 
Generate(MacroAssembler * masm)3579 void InterruptStub::Generate(MacroAssembler* masm) {
3580   __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3581 }
3582 
3583 
Generate(MacroAssembler * masm)3584 void MathPowStub::Generate(MacroAssembler* masm) {
3585   CpuFeatures::Scope fpu_scope(FPU);
3586   const Register base = a1;
3587   const Register exponent = a2;
3588   const Register heapnumbermap = t1;
3589   const Register heapnumber = v0;
3590   const DoubleRegister double_base = f2;
3591   const DoubleRegister double_exponent = f4;
3592   const DoubleRegister double_result = f0;
3593   const DoubleRegister double_scratch = f6;
3594   const FPURegister single_scratch = f8;
3595   const Register scratch = t5;
3596   const Register scratch2 = t3;
3597 
3598   Label call_runtime, done, int_exponent;
3599   if (exponent_type_ == ON_STACK) {
3600     Label base_is_smi, unpack_exponent;
3601     // The exponent and base are supplied as arguments on the stack.
3602     // This can only happen if the stub is called from non-optimized code.
3603     // Load input parameters from stack to double registers.
3604     __ lw(base, MemOperand(sp, 1 * kPointerSize));
3605     __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
3606 
3607     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
3608 
3609     __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
3610     __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
3611     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3612 
3613     __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
3614     __ jmp(&unpack_exponent);
3615 
3616     __ bind(&base_is_smi);
3617     __ mtc1(scratch, single_scratch);
3618     __ cvt_d_w(double_base, single_scratch);
3619     __ bind(&unpack_exponent);
3620 
3621     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3622 
3623     __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
3624     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
3625     __ ldc1(double_exponent,
3626             FieldMemOperand(exponent, HeapNumber::kValueOffset));
3627   } else if (exponent_type_ == TAGGED) {
3628     // Base is already in double_base.
3629     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
3630 
3631     __ ldc1(double_exponent,
3632             FieldMemOperand(exponent, HeapNumber::kValueOffset));
3633   }
3634 
3635   if (exponent_type_ != INTEGER) {
3636     Label int_exponent_convert;
3637     // Detect integer exponents stored as double.
3638     __ EmitFPUTruncate(kRoundToMinusInf,
3639                        single_scratch,
3640                        double_exponent,
3641                        scratch,
3642                        scratch2,
3643                        kCheckForInexactConversion);
3644     // scratch2 == 0 means there was no conversion error.
3645     __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
3646 
3647     if (exponent_type_ == ON_STACK) {
3648       // Detect square root case.  Crankshaft detects constant +/-0.5 at
3649       // compile time and uses DoMathPowHalf instead.  We then skip this check
3650       // for non-constant cases of +/-0.5 as these hardly occur.
3651       Label not_plus_half;
3652 
3653       // Test for 0.5.
3654       __ Move(double_scratch, 0.5);
3655       __ BranchF(USE_DELAY_SLOT,
3656                  &not_plus_half,
3657                  NULL,
3658                  ne,
3659                  double_exponent,
3660                  double_scratch);
3661       // double_scratch can be overwritten in the delay slot.
3662       // Calculates square root of base.  Check for the special case of
3663       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3664       __ Move(double_scratch, -V8_INFINITY);
3665       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3666       __ neg_d(double_result, double_scratch);
3667 
3668       // Add +0 to convert -0 to +0.
3669       __ add_d(double_scratch, double_base, kDoubleRegZero);
3670       __ sqrt_d(double_result, double_scratch);
3671       __ jmp(&done);
3672 
3673       __ bind(&not_plus_half);
3674       __ Move(double_scratch, -0.5);
3675       __ BranchF(USE_DELAY_SLOT,
3676                  &call_runtime,
3677                  NULL,
3678                  ne,
3679                  double_exponent,
3680                  double_scratch);
3681       // double_scratch can be overwritten in the delay slot.
3682       // Calculates square root of base.  Check for the special case of
3683       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3684       __ Move(double_scratch, -V8_INFINITY);
3685       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
3686       __ Move(double_result, kDoubleRegZero);
3687 
3688       // Add +0 to convert -0 to +0.
3689       __ add_d(double_scratch, double_base, kDoubleRegZero);
3690       __ Move(double_result, 1);
3691       __ sqrt_d(double_scratch, double_scratch);
3692       __ div_d(double_result, double_result, double_scratch);
3693       __ jmp(&done);
3694     }
3695 
3696     __ push(ra);
3697     {
3698       AllowExternalCallThatCantCauseGC scope(masm);
3699       __ PrepareCallCFunction(0, 2, scratch);
3700       __ SetCallCDoubleArguments(double_base, double_exponent);
3701       __ CallCFunction(
3702           ExternalReference::power_double_double_function(masm->isolate()),
3703           0, 2);
3704     }
3705     __ pop(ra);
3706     __ GetCFunctionDoubleResult(double_result);
3707     __ jmp(&done);
3708 
3709     __ bind(&int_exponent_convert);
3710     __ mfc1(scratch, single_scratch);
3711   }
3712 
3713   // Calculate power with integer exponent.
3714   __ bind(&int_exponent);
3715 
3716   // Get two copies of exponent in the registers scratch and exponent.
3717   if (exponent_type_ == INTEGER) {
3718     __ mov(scratch, exponent);
3719   } else {
3720     // Exponent has previously been stored into scratch as untagged integer.
3721     __ mov(exponent, scratch);
3722   }
3723 
3724   __ mov_d(double_scratch, double_base);  // Back up base.
3725   __ Move(double_result, 1.0);
3726 
3727   // Get absolute value of exponent.
3728   Label positive_exponent;
3729   __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
3730   __ Subu(scratch, zero_reg, scratch);
3731   __ bind(&positive_exponent);
3732 
3733   Label while_true, no_carry, loop_end;
3734   __ bind(&while_true);
3735 
3736   __ And(scratch2, scratch, 1);
3737 
3738   __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
3739   __ mul_d(double_result, double_result, double_scratch);
3740   __ bind(&no_carry);
3741 
3742   __ sra(scratch, scratch, 1);
3743 
3744   __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
3745   __ mul_d(double_scratch, double_scratch, double_scratch);
3746 
3747   __ Branch(&while_true);
3748 
3749   __ bind(&loop_end);
3750 
3751   __ Branch(&done, ge, exponent, Operand(zero_reg));
3752   __ Move(double_scratch, 1.0);
3753   __ div_d(double_result, double_scratch, double_result);
3754   // Test whether result is zero.  Bail out to check for subnormal result.
3755   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3756   __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
3757 
3758   // double_exponent may not contain the exponent value if the input was a
3759   // smi.  We set it with exponent value before bailing out.
3760   __ mtc1(exponent, single_scratch);
3761   __ cvt_d_w(double_exponent, single_scratch);
3762 
3763   // Returning or bailing out.
3764   Counters* counters = masm->isolate()->counters();
3765   if (exponent_type_ == ON_STACK) {
3766     // The arguments are still on the stack.
3767     __ bind(&call_runtime);
3768     __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3769 
3770     // The stub is called from non-optimized code, which expects the result
3771     // as heap number in exponent.
3772     __ bind(&done);
3773     __ AllocateHeapNumber(
3774         heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
3775     __ sdc1(double_result,
3776             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
3777     ASSERT(heapnumber.is(v0));
3778     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3779     __ DropAndRet(2);
3780   } else {
3781     __ push(ra);
3782     {
3783       AllowExternalCallThatCantCauseGC scope(masm);
3784       __ PrepareCallCFunction(0, 2, scratch);
3785       __ SetCallCDoubleArguments(double_base, double_exponent);
3786       __ CallCFunction(
3787           ExternalReference::power_double_double_function(masm->isolate()),
3788           0, 2);
3789     }
3790     __ pop(ra);
3791     __ GetCFunctionDoubleResult(double_result);
3792 
3793     __ bind(&done);
3794     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
3795     __ Ret();
3796   }
3797 }
3798 
3799 
NeedsImmovableCode()3800 bool CEntryStub::NeedsImmovableCode() {
3801   return true;
3802 }
3803 
3804 
IsPregenerated()3805 bool CEntryStub::IsPregenerated() {
3806   return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3807           result_size_ == 1;
3808 }
3809 
3810 
GenerateStubsAheadOfTime()3811 void CodeStub::GenerateStubsAheadOfTime() {
3812   CEntryStub::GenerateAheadOfTime();
3813   WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3814   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3815   RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3816 }
3817 
3818 
GenerateFPStubs()3819 void CodeStub::GenerateFPStubs() {
3820   CEntryStub save_doubles(1, kSaveFPRegs);
3821   Handle<Code> code = save_doubles.GetCode();
3822   code->set_is_pregenerated(true);
3823   StoreBufferOverflowStub stub(kSaveFPRegs);
3824   stub.GetCode()->set_is_pregenerated(true);
3825   code->GetIsolate()->set_fp_stubs_generated(true);
3826 }
3827 
3828 
GenerateAheadOfTime()3829 void CEntryStub::GenerateAheadOfTime() {
3830   CEntryStub stub(1, kDontSaveFPRegs);
3831   Handle<Code> code = stub.GetCode();
3832   code->set_is_pregenerated(true);
3833 }
3834 
3835 
GenerateCore(MacroAssembler * masm,Label * throw_normal_exception,Label * throw_termination_exception,Label * throw_out_of_memory_exception,bool do_gc,bool always_allocate)3836 void CEntryStub::GenerateCore(MacroAssembler* masm,
3837                               Label* throw_normal_exception,
3838                               Label* throw_termination_exception,
3839                               Label* throw_out_of_memory_exception,
3840                               bool do_gc,
3841                               bool always_allocate) {
3842   // v0: result parameter for PerformGC, if any
3843   // s0: number of arguments including receiver (C callee-saved)
3844   // s1: pointer to the first argument          (C callee-saved)
3845   // s2: pointer to builtin function            (C callee-saved)
3846 
3847   Isolate* isolate = masm->isolate();
3848 
3849   if (do_gc) {
3850     // Move result passed in v0 into a0 to call PerformGC.
3851     __ mov(a0, v0);
3852     __ PrepareCallCFunction(1, 0, a1);
3853     __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
3854   }
3855 
3856   ExternalReference scope_depth =
3857       ExternalReference::heap_always_allocate_scope_depth(isolate);
3858   if (always_allocate) {
3859     __ li(a0, Operand(scope_depth));
3860     __ lw(a1, MemOperand(a0));
3861     __ Addu(a1, a1, Operand(1));
3862     __ sw(a1, MemOperand(a0));
3863   }
3864 
3865   // Prepare arguments for C routine.
3866   // a0 = argc
3867   __ mov(a0, s0);
3868   // a1 = argv (set in the delay slot after find_ra below).
3869 
3870   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
3871   // also need to reserve the 4 argument slots on the stack.
3872 
3873   __ AssertStackIsAligned();
3874 
3875   __ li(a2, Operand(ExternalReference::isolate_address()));
3876 
3877   // To let the GC traverse the return address of the exit frames, we need to
3878   // know where the return address is. The CEntryStub is unmovable, so
3879   // we can store the address on the stack to be able to find it again and
3880   // we never have to restore it, because it will not change.
3881   { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
3882     // This branch-and-link sequence is needed to find the current PC on mips,
3883     // saved to the ra register.
3884     // Use masm-> here instead of the double-underscore macro since extra
3885     // coverage code can interfere with the proper calculation of ra.
3886     Label find_ra;
3887     masm->bal(&find_ra);  // bal exposes branch delay slot.
3888     masm->mov(a1, s1);
3889     masm->bind(&find_ra);
3890 
3891     // Adjust the value in ra to point to the correct return location, 2nd
3892     // instruction past the real call into C code (the jalr(t9)), and push it.
3893     // This is the return address of the exit frame.
3894     const int kNumInstructionsToJump = 5;
3895     masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
3896     masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
3897     // Stack space reservation moved to the branch delay slot below.
3898     // Stack is still aligned.
3899 
3900     // Call the C routine.
3901     masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
3902     masm->jalr(t9);
3903     // Set up sp in the delay slot.
3904     masm->addiu(sp, sp, -kCArgsSlotsSize);
3905     // Make sure the stored 'ra' points to this position.
3906     ASSERT_EQ(kNumInstructionsToJump,
3907               masm->InstructionsGeneratedSince(&find_ra));
3908   }
3909 
3910   if (always_allocate) {
3911     // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3912     __ li(a2, Operand(scope_depth));
3913     __ lw(a3, MemOperand(a2));
3914     __ Subu(a3, a3, Operand(1));
3915     __ sw(a3, MemOperand(a2));
3916   }
3917 
3918   // Check for failure result.
3919   Label failure_returned;
3920   STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3921   __ addiu(a2, v0, 1);
3922   __ andi(t0, a2, kFailureTagMask);
3923   __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
3924   // Restore stack (remove arg slots) in branch delay slot.
3925   __ addiu(sp, sp, kCArgsSlotsSize);
3926 
3927 
3928   // Exit C frame and return.
3929   // v0:v1: result
3930   // sp: stack pointer
3931   // fp: frame pointer
3932   __ LeaveExitFrame(save_doubles_, s0, true);
3933 
3934   // Check if we should retry or throw exception.
3935   Label retry;
3936   __ bind(&failure_returned);
3937   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3938   __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
3939   __ Branch(&retry, eq, t0, Operand(zero_reg));
3940 
3941   // Special handling of out of memory exceptions.
3942   Failure* out_of_memory = Failure::OutOfMemoryException();
3943   __ Branch(USE_DELAY_SLOT,
3944             throw_out_of_memory_exception,
3945             eq,
3946             v0,
3947             Operand(reinterpret_cast<int32_t>(out_of_memory)));
3948   // If we throw the OOM exception, the value of a3 doesn't matter.
3949   // Any instruction can be in the delay slot that's not a jump.
3950 
3951   // Retrieve the pending exception and clear the variable.
3952   __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
3953   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3954                                       isolate)));
3955   __ lw(v0, MemOperand(t0));
3956   __ sw(a3, MemOperand(t0));
3957 
3958   // Special handling of termination exceptions which are uncatchable
3959   // by javascript code.
3960   __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
3961   __ Branch(throw_termination_exception, eq, v0, Operand(t0));
3962 
3963   // Handle normal exception.
3964   __ jmp(throw_normal_exception);
3965 
3966   __ bind(&retry);
3967   // Last failure (v0) will be moved to (a0) for parameter when retrying.
3968 }
3969 
3970 
Generate(MacroAssembler * masm)3971 void CEntryStub::Generate(MacroAssembler* masm) {
3972   // Called from JavaScript; parameters are on stack as if calling JS function
3973   // s0: number of arguments including receiver
3974   // s1: size of arguments excluding receiver
3975   // s2: pointer to builtin function
3976   // fp: frame pointer    (restored after C call)
3977   // sp: stack pointer    (restored as callee's sp after C call)
3978   // cp: current context  (C callee-saved)
3979 
3980   // NOTE: Invocations of builtins may return failure objects
3981   // instead of a proper result. The builtin entry handles
3982   // this by performing a garbage collection and retrying the
3983   // builtin once.
3984 
3985   // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
3986   // The reason for this is that these arguments would need to be saved anyway
3987   // so it's faster to set them up directly.
3988   // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
3989 
3990   // Compute the argv pointer in a callee-saved register.
3991   __ Addu(s1, sp, s1);
3992 
3993   // Enter the exit frame that transitions from JavaScript to C++.
3994   FrameScope scope(masm, StackFrame::MANUAL);
3995   __ EnterExitFrame(save_doubles_);
3996 
3997   // s0: number of arguments (C callee-saved)
3998   // s1: pointer to first argument (C callee-saved)
3999   // s2: pointer to builtin function (C callee-saved)
4000 
4001   Label throw_normal_exception;
4002   Label throw_termination_exception;
4003   Label throw_out_of_memory_exception;
4004 
4005   // Call into the runtime system.
4006   GenerateCore(masm,
4007                &throw_normal_exception,
4008                &throw_termination_exception,
4009                &throw_out_of_memory_exception,
4010                false,
4011                false);
4012 
4013   // Do space-specific GC and retry runtime call.
4014   GenerateCore(masm,
4015                &throw_normal_exception,
4016                &throw_termination_exception,
4017                &throw_out_of_memory_exception,
4018                true,
4019                false);
4020 
4021   // Do full GC and retry runtime call one final time.
4022   Failure* failure = Failure::InternalError();
4023   __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
4024   GenerateCore(masm,
4025                &throw_normal_exception,
4026                &throw_termination_exception,
4027                &throw_out_of_memory_exception,
4028                true,
4029                true);
4030 
4031   __ bind(&throw_out_of_memory_exception);
4032   // Set external caught exception to false.
4033   Isolate* isolate = masm->isolate();
4034   ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4035                                     isolate);
4036   __ li(a0, Operand(false, RelocInfo::NONE));
4037   __ li(a2, Operand(external_caught));
4038   __ sw(a0, MemOperand(a2));
4039 
4040   // Set pending exception and v0 to out of memory exception.
4041   Failure* out_of_memory = Failure::OutOfMemoryException();
4042   __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
4043   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4044                                       isolate)));
4045   __ sw(v0, MemOperand(a2));
4046   // Fall through to the next label.
4047 
4048   __ bind(&throw_termination_exception);
4049   __ ThrowUncatchable(v0);
4050 
4051   __ bind(&throw_normal_exception);
4052   __ Throw(v0);
4053 }
4054 
4055 
GenerateBody(MacroAssembler * masm,bool is_construct)4056 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4057   Label invoke, handler_entry, exit;
4058   Isolate* isolate = masm->isolate();
4059 
4060   // Registers:
4061   // a0: entry address
4062   // a1: function
4063   // a2: receiver
4064   // a3: argc
4065   //
4066   // Stack:
4067   // 4 args slots
4068   // args
4069 
4070   // Save callee saved registers on the stack.
4071   __ MultiPush(kCalleeSaved | ra.bit());
4072 
4073   if (CpuFeatures::IsSupported(FPU)) {
4074     CpuFeatures::Scope scope(FPU);
4075     // Save callee-saved FPU registers.
4076     __ MultiPushFPU(kCalleeSavedFPU);
4077     // Set up the reserved register for 0.0.
4078     __ Move(kDoubleRegZero, 0.0);
4079   }
4080 
4081 
4082   // Load argv in s0 register.
4083   int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
4084   if (CpuFeatures::IsSupported(FPU)) {
4085     offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
4086   }
4087 
4088   __ InitializeRootRegister();
4089   __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
4090 
4091   // We build an EntryFrame.
4092   __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
4093   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4094   __ li(t2, Operand(Smi::FromInt(marker)));
4095   __ li(t1, Operand(Smi::FromInt(marker)));
4096   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4097                                       isolate)));
4098   __ lw(t0, MemOperand(t0));
4099   __ Push(t3, t2, t1, t0);
4100   // Set up frame pointer for the frame to be pushed.
4101   __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
4102 
4103   // Registers:
4104   // a0: entry_address
4105   // a1: function
4106   // a2: receiver_pointer
4107   // a3: argc
4108   // s0: argv
4109   //
4110   // Stack:
4111   // caller fp          |
4112   // function slot      | entry frame
4113   // context slot       |
4114   // bad fp (0xff...f)  |
4115   // callee saved registers + ra
4116   // 4 args slots
4117   // args
4118 
4119   // If this is the outermost JS call, set js_entry_sp value.
4120   Label non_outermost_js;
4121   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4122   __ li(t1, Operand(ExternalReference(js_entry_sp)));
4123   __ lw(t2, MemOperand(t1));
4124   __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
4125   __ sw(fp, MemOperand(t1));
4126   __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4127   Label cont;
4128   __ b(&cont);
4129   __ nop();   // Branch delay slot nop.
4130   __ bind(&non_outermost_js);
4131   __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
4132   __ bind(&cont);
4133   __ push(t0);
4134 
4135   // Jump to a faked try block that does the invoke, with a faked catch
4136   // block that sets the pending exception.
4137   __ jmp(&invoke);
4138   __ bind(&handler_entry);
4139   handler_offset_ = handler_entry.pos();
4140   // Caught exception: Store result (exception) in the pending exception
4141   // field in the JSEnv and return a failure sentinel.  Coming in here the
4142   // fp will be invalid because the PushTryHandler below sets it to 0 to
4143   // signal the existence of the JSEntry frame.
4144   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4145                                       isolate)));
4146   __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
4147   __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
4148   __ b(&exit);  // b exposes branch delay slot.
4149   __ nop();   // Branch delay slot nop.
4150 
4151   // Invoke: Link this frame into the handler chain.  There's only one
4152   // handler block in this code object, so its index is 0.
4153   __ bind(&invoke);
4154   __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4155   // If an exception not caught by another handler occurs, this handler
4156   // returns control to the code after the bal(&invoke) above, which
4157   // restores all kCalleeSaved registers (including cp and fp) to their
4158   // saved values before returning a failure to C.
4159 
4160   // Clear any pending exceptions.
4161   __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4162   __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
4163                                       isolate)));
4164   __ sw(t1, MemOperand(t0));
4165 
4166   // Invoke the function by calling through JS entry trampoline builtin.
4167   // Notice that we cannot store a reference to the trampoline code directly in
4168   // this stub, because runtime stubs are not traversed when doing GC.
4169 
4170   // Registers:
4171   // a0: entry_address
4172   // a1: function
4173   // a2: receiver_pointer
4174   // a3: argc
4175   // s0: argv
4176   //
4177   // Stack:
4178   // handler frame
4179   // entry frame
4180   // callee saved registers + ra
4181   // 4 args slots
4182   // args
4183 
4184   if (is_construct) {
4185     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4186                                       isolate);
4187     __ li(t0, Operand(construct_entry));
4188   } else {
4189     ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
4190     __ li(t0, Operand(entry));
4191   }
4192   __ lw(t9, MemOperand(t0));  // Deref address.
4193 
4194   // Call JSEntryTrampoline.
4195   __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
4196   __ Call(t9);
4197 
4198   // Unlink this frame from the handler chain.
4199   __ PopTryHandler();
4200 
4201   __ bind(&exit);  // v0 holds result
4202   // Check if the current stack frame is marked as the outermost JS frame.
4203   Label non_outermost_js_2;
4204   __ pop(t1);
4205   __ Branch(&non_outermost_js_2,
4206             ne,
4207             t1,
4208             Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
4209   __ li(t1, Operand(ExternalReference(js_entry_sp)));
4210   __ sw(zero_reg, MemOperand(t1));
4211   __ bind(&non_outermost_js_2);
4212 
4213   // Restore the top frame descriptors from the stack.
4214   __ pop(t1);
4215   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
4216                                       isolate)));
4217   __ sw(t1, MemOperand(t0));
4218 
4219   // Reset the stack to the callee saved registers.
4220   __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
4221 
4222   if (CpuFeatures::IsSupported(FPU)) {
4223     CpuFeatures::Scope scope(FPU);
4224     // Restore callee-saved fpu registers.
4225     __ MultiPopFPU(kCalleeSavedFPU);
4226   }
4227 
4228   // Restore callee saved registers from the stack.
4229   __ MultiPop(kCalleeSaved | ra.bit());
4230   // Return.
4231   __ Jump(ra);
4232 }
4233 
4234 
4235 // Uses registers a0 to t0.
4236 // Expected input (depending on whether args are in registers or on the stack):
4237 // * object: a0 or at sp + 1 * kPointerSize.
4238 // * function: a1 or at sp.
4239 //
4240 // An inlined call site may have been generated before calling this stub.
4241 // In this case the offset to the inline site to patch is passed on the stack,
4242 // in the safepoint slot for register t0.
Generate(MacroAssembler * masm)4243 void InstanceofStub::Generate(MacroAssembler* masm) {
4244   // Call site inlining and patching implies arguments in registers.
4245   ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
4246   // ReturnTrueFalse is only implemented for inlined call sites.
4247   ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
4248 
4249   // Fixed register usage throughout the stub:
4250   const Register object = a0;  // Object (lhs).
4251   Register map = a3;  // Map of the object.
4252   const Register function = a1;  // Function (rhs).
4253   const Register prototype = t0;  // Prototype of the function.
4254   const Register inline_site = t5;
4255   const Register scratch = a2;
4256 
4257   const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
4258 
4259   Label slow, loop, is_instance, is_not_instance, not_js_object;
4260 
4261   if (!HasArgsInRegisters()) {
4262     __ lw(object, MemOperand(sp, 1 * kPointerSize));
4263     __ lw(function, MemOperand(sp, 0));
4264   }
4265 
4266   // Check that the left hand is a JS object and load map.
4267   __ JumpIfSmi(object, &not_js_object);
4268   __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
4269 
4270   // If there is a call site cache don't look in the global cache, but do the
4271   // real lookup and update the call site cache.
4272   if (!HasCallSiteInlineCheck()) {
4273     Label miss;
4274     __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
4275     __ Branch(&miss, ne, function, Operand(at));
4276     __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
4277     __ Branch(&miss, ne, map, Operand(at));
4278     __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4279     __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4280 
4281     __ bind(&miss);
4282   }
4283 
4284   // Get the prototype of the function.
4285   __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
4286 
4287   // Check that the function prototype is a JS object.
4288   __ JumpIfSmi(prototype, &slow);
4289   __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
4290 
4291   // Update the global instanceof or call site inlined cache with the current
4292   // map and function. The cached answer will be set when it is known below.
4293   if (!HasCallSiteInlineCheck()) {
4294     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
4295     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
4296   } else {
4297     ASSERT(HasArgsInRegisters());
4298     // Patch the (relocated) inlined map check.
4299 
4300     // The offset was stored in t0 safepoint slot.
4301     // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
4302     __ LoadFromSafepointRegisterSlot(scratch, t0);
4303     __ Subu(inline_site, ra, scratch);
4304     // Get the map location in scratch and patch it.
4305     __ GetRelocatedValue(inline_site, scratch, v1);  // v1 used as scratch.
4306     __ sw(map, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
4307   }
4308 
4309   // Register mapping: a3 is object map and t0 is function prototype.
4310   // Get prototype of object into a2.
4311   __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
4312 
4313   // We don't need map any more. Use it as a scratch register.
4314   Register scratch2 = map;
4315   map = no_reg;
4316 
4317   // Loop through the prototype chain looking for the function prototype.
4318   __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
4319   __ bind(&loop);
4320   __ Branch(&is_instance, eq, scratch, Operand(prototype));
4321   __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
4322   __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
4323   __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
4324   __ Branch(&loop);
4325 
4326   __ bind(&is_instance);
4327   ASSERT(Smi::FromInt(0) == 0);
4328   if (!HasCallSiteInlineCheck()) {
4329     __ mov(v0, zero_reg);
4330     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4331   } else {
4332     // Patch the call site to return true.
4333     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4334     __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4335     // Get the boolean result location in scratch and patch it.
4336     __ PatchRelocatedValue(inline_site, scratch, v0);
4337 
4338     if (!ReturnTrueFalseObject()) {
4339       ASSERT_EQ(Smi::FromInt(0), 0);
4340       __ mov(v0, zero_reg);
4341     }
4342   }
4343   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4344 
4345   __ bind(&is_not_instance);
4346   if (!HasCallSiteInlineCheck()) {
4347     __ li(v0, Operand(Smi::FromInt(1)));
4348     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
4349   } else {
4350     // Patch the call site to return false.
4351     __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4352     __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
4353     // Get the boolean result location in scratch and patch it.
4354     __ PatchRelocatedValue(inline_site, scratch, v0);
4355 
4356     if (!ReturnTrueFalseObject()) {
4357       __ li(v0, Operand(Smi::FromInt(1)));
4358     }
4359   }
4360 
4361   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4362 
4363   Label object_not_null, object_not_null_or_smi;
4364   __ bind(&not_js_object);
4365   // Before null, smi and string value checks, check that the rhs is a function
4366   // as for a non-function rhs an exception needs to be thrown.
4367   __ JumpIfSmi(function, &slow);
4368   __ GetObjectType(function, scratch2, scratch);
4369   __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
4370 
4371   // Null is not instance of anything.
4372   __ Branch(&object_not_null,
4373             ne,
4374             scratch,
4375             Operand(masm->isolate()->factory()->null_value()));
4376   __ li(v0, Operand(Smi::FromInt(1)));
4377   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4378 
4379   __ bind(&object_not_null);
4380   // Smi values are not instances of anything.
4381   __ JumpIfNotSmi(object, &object_not_null_or_smi);
4382   __ li(v0, Operand(Smi::FromInt(1)));
4383   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4384 
4385   __ bind(&object_not_null_or_smi);
4386   // String values are not instances of anything.
4387   __ IsObjectJSStringType(object, scratch, &slow);
4388   __ li(v0, Operand(Smi::FromInt(1)));
4389   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4390 
4391   // Slow-case.  Tail call builtin.
4392   __ bind(&slow);
4393   if (!ReturnTrueFalseObject()) {
4394     if (HasArgsInRegisters()) {
4395       __ Push(a0, a1);
4396     }
4397   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4398   } else {
4399     {
4400       FrameScope scope(masm, StackFrame::INTERNAL);
4401       __ Push(a0, a1);
4402       __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
4403     }
4404     __ mov(a0, v0);
4405     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
4406     __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
4407     __ LoadRoot(v0, Heap::kFalseValueRootIndex);
4408     __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
4409   }
4410 }
4411 
4412 
left()4413 Register InstanceofStub::left() { return a0; }
4414 
4415 
right()4416 Register InstanceofStub::right() { return a1; }
4417 
4418 
GenerateReadElement(MacroAssembler * masm)4419 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
4420   // The displacement is the offset of the last parameter (if any)
4421   // relative to the frame pointer.
4422   const int kDisplacement =
4423       StandardFrameConstants::kCallerSPOffset - kPointerSize;
4424 
4425   // Check that the key is a smiGenerateReadElement.
4426   Label slow;
4427   __ JumpIfNotSmi(a1, &slow);
4428 
4429   // Check if the calling frame is an arguments adaptor frame.
4430   Label adaptor;
4431   __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4432   __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4433   __ Branch(&adaptor,
4434             eq,
4435             a3,
4436             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4437 
4438   // Check index (a1) against formal parameters count limit passed in
4439   // through register a0. Use unsigned comparison to get negative
4440   // check for free.
4441   __ Branch(&slow, hs, a1, Operand(a0));
4442 
4443   // Read the argument from the stack and return it.
4444   __ subu(a3, a0, a1);
4445   __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4446   __ Addu(a3, fp, Operand(t3));
4447   __ lw(v0, MemOperand(a3, kDisplacement));
4448   __ Ret();
4449 
4450   // Arguments adaptor case: Check index (a1) against actual arguments
4451   // limit found in the arguments adaptor frame. Use unsigned
4452   // comparison to get negative check for free.
4453   __ bind(&adaptor);
4454   __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4455   __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
4456 
4457   // Read the argument from the adaptor frame and return it.
4458   __ subu(a3, a0, a1);
4459   __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4460   __ Addu(a3, a2, Operand(t3));
4461   __ lw(v0, MemOperand(a3, kDisplacement));
4462   __ Ret();
4463 
4464   // Slow-case: Handle non-smi or out-of-bounds access to arguments
4465   // by calling the runtime system.
4466   __ bind(&slow);
4467   __ push(a1);
4468   __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
4469 }
4470 
4471 
GenerateNewNonStrictSlow(MacroAssembler * masm)4472 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
4473   // sp[0] : number of parameters
4474   // sp[4] : receiver displacement
4475   // sp[8] : function
4476   // Check if the calling frame is an arguments adaptor frame.
4477   Label runtime;
4478   __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4479   __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4480   __ Branch(&runtime,
4481             ne,
4482             a2,
4483             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4484 
4485   // Patch the arguments.length and the parameters pointer in the current frame.
4486   __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4487   __ sw(a2, MemOperand(sp, 0 * kPointerSize));
4488   __ sll(t3, a2, 1);
4489   __ Addu(a3, a3, Operand(t3));
4490   __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4491   __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4492 
4493   __ bind(&runtime);
4494   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4495 }
4496 
4497 
GenerateNewNonStrictFast(MacroAssembler * masm)4498 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
4499   // Stack layout:
4500   //  sp[0] : number of parameters (tagged)
4501   //  sp[4] : address of receiver argument
4502   //  sp[8] : function
4503   // Registers used over whole function:
4504   //  t2 : allocated object (tagged)
4505   //  t5 : mapped parameter count (tagged)
4506 
4507   __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4508   // a1 = parameter count (tagged)
4509 
4510   // Check if the calling frame is an arguments adaptor frame.
4511   Label runtime;
4512   Label adaptor_frame, try_allocate;
4513   __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4514   __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4515   __ Branch(&adaptor_frame,
4516             eq,
4517             a2,
4518             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4519 
4520   // No adaptor, parameter count = argument count.
4521   __ mov(a2, a1);
4522   __ b(&try_allocate);
4523   __ nop();   // Branch delay slot nop.
4524 
4525   // We have an adaptor frame. Patch the parameters pointer.
4526   __ bind(&adaptor_frame);
4527   __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4528   __ sll(t6, a2, 1);
4529   __ Addu(a3, a3, Operand(t6));
4530   __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4531   __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4532 
4533   // a1 = parameter count (tagged)
4534   // a2 = argument count (tagged)
4535   // Compute the mapped parameter count = min(a1, a2) in a1.
4536   Label skip_min;
4537   __ Branch(&skip_min, lt, a1, Operand(a2));
4538   __ mov(a1, a2);
4539   __ bind(&skip_min);
4540 
4541   __ bind(&try_allocate);
4542 
4543   // Compute the sizes of backing store, parameter map, and arguments object.
4544   // 1. Parameter map, has 2 extra words containing context and backing store.
4545   const int kParameterMapHeaderSize =
4546       FixedArray::kHeaderSize + 2 * kPointerSize;
4547   // If there are no mapped parameters, we do not need the parameter_map.
4548   Label param_map_size;
4549   ASSERT_EQ(0, Smi::FromInt(0));
4550   __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
4551   __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when a1 == 0.
4552   __ sll(t5, a1, 1);
4553   __ addiu(t5, t5, kParameterMapHeaderSize);
4554   __ bind(&param_map_size);
4555 
4556   // 2. Backing store.
4557   __ sll(t6, a2, 1);
4558   __ Addu(t5, t5, Operand(t6));
4559   __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
4560 
4561   // 3. Arguments object.
4562   __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
4563 
4564   // Do the allocation of all three objects in one go.
4565   __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4566 
4567   // v0 = address of new object(s) (tagged)
4568   // a2 = argument count (tagged)
4569   // Get the arguments boilerplate from the current (global) context into t0.
4570   const int kNormalOffset =
4571       Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
4572   const int kAliasedOffset =
4573       Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
4574 
4575   __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4576   __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4577   Label skip2_ne, skip2_eq;
4578   __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
4579   __ lw(t0, MemOperand(t0, kNormalOffset));
4580   __ bind(&skip2_ne);
4581 
4582   __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
4583   __ lw(t0, MemOperand(t0, kAliasedOffset));
4584   __ bind(&skip2_eq);
4585 
4586   // v0 = address of new object (tagged)
4587   // a1 = mapped parameter count (tagged)
4588   // a2 = argument count (tagged)
4589   // t0 = address of boilerplate object (tagged)
4590   // Copy the JS object part.
4591   for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
4592     __ lw(a3, FieldMemOperand(t0, i));
4593     __ sw(a3, FieldMemOperand(v0, i));
4594   }
4595 
4596   // Set up the callee in-object property.
4597   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
4598   __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4599   const int kCalleeOffset = JSObject::kHeaderSize +
4600       Heap::kArgumentsCalleeIndex * kPointerSize;
4601   __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4602 
4603   // Use the length (smi tagged) and set that as an in-object property too.
4604   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4605   const int kLengthOffset = JSObject::kHeaderSize +
4606       Heap::kArgumentsLengthIndex * kPointerSize;
4607   __ sw(a2, FieldMemOperand(v0, kLengthOffset));
4608 
4609   // Set up the elements pointer in the allocated arguments object.
4610   // If we allocated a parameter map, t0 will point there, otherwise
4611   // it will point to the backing store.
4612   __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
4613   __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4614 
4615   // v0 = address of new object (tagged)
4616   // a1 = mapped parameter count (tagged)
4617   // a2 = argument count (tagged)
4618   // t0 = address of parameter map or backing store (tagged)
4619   // Initialize parameter map. If there are no mapped arguments, we're done.
4620   Label skip_parameter_map;
4621   Label skip3;
4622   __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
4623   // Move backing store address to a3, because it is
4624   // expected there when filling in the unmapped arguments.
4625   __ mov(a3, t0);
4626   __ bind(&skip3);
4627 
4628   __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
4629 
4630   __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
4631   __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
4632   __ Addu(t2, a1, Operand(Smi::FromInt(2)));
4633   __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
4634   __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
4635   __ sll(t6, a1, 1);
4636   __ Addu(t2, t0, Operand(t6));
4637   __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
4638   __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
4639 
4640   // Copy the parameter slots and the holes in the arguments.
4641   // We need to fill in mapped_parameter_count slots. They index the context,
4642   // where parameters are stored in reverse order, at
4643   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
4644   // The mapped parameter thus need to get indices
4645   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
4646   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
4647   // We loop from right to left.
4648   Label parameters_loop, parameters_test;
4649   __ mov(t2, a1);
4650   __ lw(t5, MemOperand(sp, 0 * kPointerSize));
4651   __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
4652   __ Subu(t5, t5, Operand(a1));
4653   __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
4654   __ sll(t6, t2, 1);
4655   __ Addu(a3, t0, Operand(t6));
4656   __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4657 
4658   // t2 = loop variable (tagged)
4659   // a1 = mapping index (tagged)
4660   // a3 = address of backing store (tagged)
4661   // t0 = address of parameter map (tagged)
4662   // t1 = temporary scratch (a.o., for address calculation)
4663   // t3 = the hole value
4664   __ jmp(&parameters_test);
4665 
4666   __ bind(&parameters_loop);
4667   __ Subu(t2, t2, Operand(Smi::FromInt(1)));
4668   __ sll(t1, t2, 1);
4669   __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
4670   __ Addu(t6, t0, t1);
4671   __ sw(t5, MemOperand(t6));
4672   __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
4673   __ Addu(t6, a3, t1);
4674   __ sw(t3, MemOperand(t6));
4675   __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4676   __ bind(&parameters_test);
4677   __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
4678 
4679   __ bind(&skip_parameter_map);
4680   // a2 = argument count (tagged)
4681   // a3 = address of backing store (tagged)
4682   // t1 = scratch
4683   // Copy arguments header and remaining slots (if there are any).
4684   __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
4685   __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4686   __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4687 
4688   Label arguments_loop, arguments_test;
4689   __ mov(t5, a1);
4690   __ lw(t0, MemOperand(sp, 1 * kPointerSize));
4691   __ sll(t6, t5, 1);
4692   __ Subu(t0, t0, Operand(t6));
4693   __ jmp(&arguments_test);
4694 
4695   __ bind(&arguments_loop);
4696   __ Subu(t0, t0, Operand(kPointerSize));
4697   __ lw(t2, MemOperand(t0, 0));
4698   __ sll(t6, t5, 1);
4699   __ Addu(t1, a3, Operand(t6));
4700   __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
4701   __ Addu(t5, t5, Operand(Smi::FromInt(1)));
4702 
4703   __ bind(&arguments_test);
4704   __ Branch(&arguments_loop, lt, t5, Operand(a2));
4705 
4706   // Return and remove the on-stack parameters.
4707   __ DropAndRet(3);
4708 
4709   // Do the runtime call to allocate the arguments object.
4710   // a2 = argument count (tagged)
4711   __ bind(&runtime);
4712   __ sw(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
4713   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
4714 }
4715 
4716 
GenerateNewStrict(MacroAssembler * masm)4717 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
4718   // sp[0] : number of parameters
4719   // sp[4] : receiver displacement
4720   // sp[8] : function
4721   // Check if the calling frame is an arguments adaptor frame.
4722   Label adaptor_frame, try_allocate, runtime;
4723   __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4724   __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4725   __ Branch(&adaptor_frame,
4726             eq,
4727             a3,
4728             Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4729 
4730   // Get the length from the frame.
4731   __ lw(a1, MemOperand(sp, 0));
4732   __ Branch(&try_allocate);
4733 
4734   // Patch the arguments.length and the parameters pointer.
4735   __ bind(&adaptor_frame);
4736   __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
4737   __ sw(a1, MemOperand(sp, 0));
4738   __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
4739   __ Addu(a3, a2, Operand(at));
4740 
4741   __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4742   __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4743 
4744   // Try the new space allocation. Start out with computing the size
4745   // of the arguments object and the elements array in words.
4746   Label add_arguments_object;
4747   __ bind(&try_allocate);
4748   __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
4749   __ srl(a1, a1, kSmiTagSize);
4750 
4751   __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
4752   __ bind(&add_arguments_object);
4753   __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
4754 
4755   // Do the allocation of both objects in one go.
4756   __ AllocateInNewSpace(a1,
4757                         v0,
4758                         a2,
4759                         a3,
4760                         &runtime,
4761                         static_cast<AllocationFlags>(TAG_OBJECT |
4762                                                      SIZE_IN_WORDS));
4763 
4764   // Get the arguments boilerplate from the current (global) context.
4765   __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
4766   __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset));
4767   __ lw(t0, MemOperand(t0, Context::SlotOffset(
4768       Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
4769 
4770   // Copy the JS object part.
4771   __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4772 
4773   // Get the length (smi tagged) and set that as an in-object property too.
4774   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
4775   __ lw(a1, MemOperand(sp, 0 * kPointerSize));
4776   __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
4777       Heap::kArgumentsLengthIndex * kPointerSize));
4778 
4779   Label done;
4780   __ Branch(&done, eq, a1, Operand(zero_reg));
4781 
4782   // Get the parameters pointer from the stack.
4783   __ lw(a2, MemOperand(sp, 1 * kPointerSize));
4784 
4785   // Set up the elements pointer in the allocated arguments object and
4786   // initialize the header in the elements fixed array.
4787   __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
4788   __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
4789   __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4790   __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4791   __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
4792   // Untag the length for the loop.
4793   __ srl(a1, a1, kSmiTagSize);
4794 
4795   // Copy the fixed array slots.
4796   Label loop;
4797   // Set up t0 to point to the first array slot.
4798   __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4799   __ bind(&loop);
4800   // Pre-decrement a2 with kPointerSize on each iteration.
4801   // Pre-decrement in order to skip receiver.
4802   __ Addu(a2, a2, Operand(-kPointerSize));
4803   __ lw(a3, MemOperand(a2));
4804   // Post-increment t0 with kPointerSize on each iteration.
4805   __ sw(a3, MemOperand(t0));
4806   __ Addu(t0, t0, Operand(kPointerSize));
4807   __ Subu(a1, a1, Operand(1));
4808   __ Branch(&loop, ne, a1, Operand(zero_reg));
4809 
4810   // Return and remove the on-stack parameters.
4811   __ bind(&done);
4812   __ DropAndRet(3);
4813 
4814   // Do the runtime call to allocate the arguments object.
4815   __ bind(&runtime);
4816   __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
4817 }
4818 
4819 
Generate(MacroAssembler * masm)4820 void RegExpExecStub::Generate(MacroAssembler* masm) {
4821   // Just jump directly to runtime if native RegExp is not selected at compile
4822   // time or if regexp entry in generated code is turned off runtime switch or
4823   // at compilation.
4824 #ifdef V8_INTERPRETED_REGEXP
4825   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4826 #else  // V8_INTERPRETED_REGEXP
4827 
4828   // Stack frame on entry.
4829   //  sp[0]: last_match_info (expected JSArray)
4830   //  sp[4]: previous index
4831   //  sp[8]: subject string
4832   //  sp[12]: JSRegExp object
4833 
4834   const int kLastMatchInfoOffset = 0 * kPointerSize;
4835   const int kPreviousIndexOffset = 1 * kPointerSize;
4836   const int kSubjectOffset = 2 * kPointerSize;
4837   const int kJSRegExpOffset = 3 * kPointerSize;
4838 
4839   Isolate* isolate = masm->isolate();
4840 
4841   Label runtime, invoke_regexp;
4842 
4843   // Allocation of registers for this function. These are in callee save
4844   // registers and will be preserved by the call to the native RegExp code, as
4845   // this code is called using the normal C calling convention. When calling
4846   // directly from generated code the native RegExp code will not do a GC and
4847   // therefore the content of these registers are safe to use after the call.
4848   // MIPS - using s0..s2, since we are not using CEntry Stub.
4849   Register subject = s0;
4850   Register regexp_data = s1;
4851   Register last_match_info_elements = s2;
4852 
4853   // Ensure that a RegExp stack is allocated.
4854   ExternalReference address_of_regexp_stack_memory_address =
4855       ExternalReference::address_of_regexp_stack_memory_address(
4856           isolate);
4857   ExternalReference address_of_regexp_stack_memory_size =
4858       ExternalReference::address_of_regexp_stack_memory_size(isolate);
4859   __ li(a0, Operand(address_of_regexp_stack_memory_size));
4860   __ lw(a0, MemOperand(a0, 0));
4861   __ Branch(&runtime, eq, a0, Operand(zero_reg));
4862 
4863   // Check that the first argument is a JSRegExp object.
4864   __ lw(a0, MemOperand(sp, kJSRegExpOffset));
4865   STATIC_ASSERT(kSmiTag == 0);
4866   __ JumpIfSmi(a0, &runtime);
4867   __ GetObjectType(a0, a1, a1);
4868   __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
4869 
4870   // Check that the RegExp has been compiled (data contains a fixed array).
4871   __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
4872   if (FLAG_debug_code) {
4873     __ And(t0, regexp_data, Operand(kSmiTagMask));
4874     __ Check(nz,
4875              "Unexpected type for RegExp data, FixedArray expected",
4876              t0,
4877              Operand(zero_reg));
4878     __ GetObjectType(regexp_data, a0, a0);
4879     __ Check(eq,
4880              "Unexpected type for RegExp data, FixedArray expected",
4881              a0,
4882              Operand(FIXED_ARRAY_TYPE));
4883   }
4884 
4885   // regexp_data: RegExp data (FixedArray)
4886   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
4887   __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
4888   __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
4889 
4890   // regexp_data: RegExp data (FixedArray)
4891   // Check that the number of captures fit in the static offsets vector buffer.
4892   __ lw(a2,
4893          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
4894   // Calculate number of capture registers (number_of_captures + 1) * 2. This
4895   // uses the asumption that smis are 2 * their untagged value.
4896   STATIC_ASSERT(kSmiTag == 0);
4897   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4898   __ Addu(a2, a2, Operand(2));  // a2 was a smi.
4899   // Check that the static offsets vector buffer is large enough.
4900   __ Branch(&runtime, hi, a2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
4901 
4902   // a2: Number of capture registers
4903   // regexp_data: RegExp data (FixedArray)
4904   // Check that the second argument is a string.
4905   __ lw(subject, MemOperand(sp, kSubjectOffset));
4906   __ JumpIfSmi(subject, &runtime);
4907   __ GetObjectType(subject, a0, a0);
4908   __ And(a0, a0, Operand(kIsNotStringMask));
4909   STATIC_ASSERT(kStringTag == 0);
4910   __ Branch(&runtime, ne, a0, Operand(zero_reg));
4911 
4912   // Get the length of the string to r3.
4913   __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4914 
4915   // a2: Number of capture registers
4916   // a3: Length of subject string as a smi
4917   // subject: Subject string
4918   // regexp_data: RegExp data (FixedArray)
4919   // Check that the third argument is a positive smi less than the subject
4920   // string length. A negative value will be greater (unsigned comparison).
4921   __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
4922   __ JumpIfNotSmi(a0, &runtime);
4923   __ Branch(&runtime, ls, a3, Operand(a0));
4924 
4925   // a2: Number of capture registers
4926   // subject: Subject string
4927   // regexp_data: RegExp data (FixedArray)
4928   // Check that the fourth object is a JSArray object.
4929   __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
4930   __ JumpIfSmi(a0, &runtime);
4931   __ GetObjectType(a0, a1, a1);
4932   __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
4933   // Check that the JSArray is in fast case.
4934   __ lw(last_match_info_elements,
4935          FieldMemOperand(a0, JSArray::kElementsOffset));
4936   __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
4937   __ Branch(&runtime, ne, a0, Operand(
4938       isolate->factory()->fixed_array_map()));
4939   // Check that the last match info has space for the capture registers and the
4940   // additional information.
4941   __ lw(a0,
4942          FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
4943   __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
4944   __ sra(at, a0, kSmiTagSize);  // Untag length for comparison.
4945   __ Branch(&runtime, gt, a2, Operand(at));
4946 
4947   // Reset offset for possibly sliced string.
4948   __ mov(t0, zero_reg);
4949   // subject: Subject string
4950   // regexp_data: RegExp data (FixedArray)
4951   // Check the representation and encoding of the subject string.
4952   Label seq_string;
4953   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
4954   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
4955   // First check for flat string.  None of the following string type tests will
4956   // succeed if subject is not a string or a short external string.
4957   __ And(a1,
4958          a0,
4959          Operand(kIsNotStringMask |
4960                  kStringRepresentationMask |
4961                  kShortExternalStringMask));
4962   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
4963   __ Branch(&seq_string, eq, a1, Operand(zero_reg));
4964 
4965   // subject: Subject string
4966   // a0: instance type if Subject string
4967   // regexp_data: RegExp data (FixedArray)
4968   // a1: whether subject is a string and if yes, its string representation
4969   // Check for flat cons string or sliced string.
4970   // A flat cons string is a cons string where the second part is the empty
4971   // string. In that case the subject string is just the first part of the cons
4972   // string. Also in this case the first part of the cons string is known to be
4973   // a sequential string or an external string.
4974   // In the case of a sliced string its offset has to be taken into account.
4975   Label cons_string, external_string, check_encoding;
4976   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
4977   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
4978   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
4979   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
4980   __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
4981   __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
4982 
4983   // Catch non-string subject or short external string.
4984   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
4985   __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
4986   __ Branch(&runtime, ne, at, Operand(zero_reg));
4987 
4988   // String is sliced.
4989   __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
4990   __ sra(t0, t0, kSmiTagSize);
4991   __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
4992   // t5: offset of sliced string, smi-tagged.
4993   __ jmp(&check_encoding);
4994   // String is a cons string, check whether it is flat.
4995   __ bind(&cons_string);
4996   __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
4997   __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
4998   __ Branch(&runtime, ne, a0, Operand(a1));
4999   __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
5000   // Is first part of cons or parent of slice a flat string?
5001   __ bind(&check_encoding);
5002   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5003   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5004   STATIC_ASSERT(kSeqStringTag == 0);
5005   __ And(at, a0, Operand(kStringRepresentationMask));
5006   __ Branch(&external_string, ne, at, Operand(zero_reg));
5007 
5008   __ bind(&seq_string);
5009   // subject: Subject string
5010   // regexp_data: RegExp data (FixedArray)
5011   // a0: Instance type of subject string
5012   STATIC_ASSERT(kStringEncodingMask == 4);
5013   STATIC_ASSERT(kAsciiStringTag == 4);
5014   STATIC_ASSERT(kTwoByteStringTag == 0);
5015   // Find the code object based on the assumptions above.
5016   __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ASCII.
5017   __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
5018   __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
5019   __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
5020   __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
5021 
5022   // Check that the irregexp code has been generated for the actual string
5023   // encoding. If it has, the field contains a code object otherwise it contains
5024   // a smi (code flushing support).
5025   __ JumpIfSmi(t9, &runtime);
5026 
5027   // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5028   // t9: code
5029   // subject: Subject string
5030   // regexp_data: RegExp data (FixedArray)
5031   // Load used arguments before starting to push arguments for call to native
5032   // RegExp code to avoid handling changing stack height.
5033   __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
5034   __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
5035 
5036   // a1: previous index
5037   // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5038   // t9: code
5039   // subject: Subject string
5040   // regexp_data: RegExp data (FixedArray)
5041   // All checks done. Now push arguments for native regexp code.
5042   __ IncrementCounter(isolate->counters()->regexp_entry_native(),
5043                       1, a0, a2);
5044 
5045   // Isolates: note we add an additional parameter here (isolate pointer).
5046   const int kRegExpExecuteArguments = 8;
5047   const int kParameterRegisters = 4;
5048   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
5049 
5050   // Stack pointer now points to cell where return address is to be written.
5051   // Arguments are before that on the stack or in registers, meaning we
5052   // treat the return address as argument 5. Thus every argument after that
5053   // needs to be shifted back by 1. Since DirectCEntryStub will handle
5054   // allocating space for the c argument slots, we don't need to calculate
5055   // that into the argument positions on the stack. This is how the stack will
5056   // look (sp meaning the value of sp at this moment):
5057   // [sp + 4] - Argument 8
5058   // [sp + 3] - Argument 7
5059   // [sp + 2] - Argument 6
5060   // [sp + 1] - Argument 5
5061   // [sp + 0] - saved ra
5062 
5063   // Argument 8: Pass current isolate address.
5064   // CFunctionArgumentOperand handles MIPS stack argument slots.
5065   __ li(a0, Operand(ExternalReference::isolate_address()));
5066   __ sw(a0, MemOperand(sp, 4 * kPointerSize));
5067 
5068   // Argument 7: Indicate that this is a direct call from JavaScript.
5069   __ li(a0, Operand(1));
5070   __ sw(a0, MemOperand(sp, 3 * kPointerSize));
5071 
5072   // Argument 6: Start (high end) of backtracking stack memory area.
5073   __ li(a0, Operand(address_of_regexp_stack_memory_address));
5074   __ lw(a0, MemOperand(a0, 0));
5075   __ li(a2, Operand(address_of_regexp_stack_memory_size));
5076   __ lw(a2, MemOperand(a2, 0));
5077   __ addu(a0, a0, a2);
5078   __ sw(a0, MemOperand(sp, 2 * kPointerSize));
5079 
5080   // Argument 5: static offsets vector buffer.
5081   __ li(a0, Operand(
5082         ExternalReference::address_of_static_offsets_vector(isolate)));
5083   __ sw(a0, MemOperand(sp, 1 * kPointerSize));
5084 
5085   // For arguments 4 and 3 get string length, calculate start of string data
5086   // and calculate the shift of the index (0 for ASCII and 1 for two byte).
5087   __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
5088   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
5089   // Load the length from the original subject string from the previous stack
5090   // frame. Therefore we have to use fp, which points exactly to two pointer
5091   // sizes below the previous sp. (Because creating a new stack frame pushes
5092   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
5093   __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
5094   // If slice offset is not 0, load the length from the original sliced string.
5095   // Argument 4, a3: End of string data
5096   // Argument 3, a2: Start of string data
5097   // Prepare start and end index of the input.
5098   __ sllv(t1, t0, a3);
5099   __ addu(t0, t2, t1);
5100   __ sllv(t1, a1, a3);
5101   __ addu(a2, t0, t1);
5102 
5103   __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
5104   __ sra(t2, t2, kSmiTagSize);
5105   __ sllv(t1, t2, a3);
5106   __ addu(a3, t0, t1);
5107   // Argument 2 (a1): Previous index.
5108   // Already there
5109 
5110   // Argument 1 (a0): Subject string.
5111   __ mov(a0, subject);
5112 
5113   // Locate the code entry and call it.
5114   __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
5115   DirectCEntryStub stub;
5116   stub.GenerateCall(masm, t9);
5117 
5118   __ LeaveExitFrame(false, no_reg);
5119 
5120   // v0: result
5121   // subject: subject string (callee saved)
5122   // regexp_data: RegExp data (callee saved)
5123   // last_match_info_elements: Last match info elements (callee saved)
5124 
5125   // Check the result.
5126 
5127   Label success;
5128   __ Branch(&success, eq, v0, Operand(NativeRegExpMacroAssembler::SUCCESS));
5129   Label failure;
5130   __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
5131   // If not exception it can only be retry. Handle that in the runtime system.
5132   __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
5133   // Result must now be exception. If there is no pending exception already a
5134   // stack overflow (on the backtrack stack) was detected in RegExp code but
5135   // haven't created the exception yet. Handle that in the runtime system.
5136   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
5137   __ li(a1, Operand(isolate->factory()->the_hole_value()));
5138   __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
5139                                       isolate)));
5140   __ lw(v0, MemOperand(a2, 0));
5141   __ Branch(&runtime, eq, v0, Operand(a1));
5142 
5143   __ sw(a1, MemOperand(a2, 0));  // Clear pending exception.
5144 
5145   // Check if the exception is a termination. If so, throw as uncatchable.
5146   __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
5147   Label termination_exception;
5148   __ Branch(&termination_exception, eq, v0, Operand(a0));
5149 
5150   __ Throw(v0);
5151 
5152   __ bind(&termination_exception);
5153   __ ThrowUncatchable(v0);
5154 
5155   __ bind(&failure);
5156   // For failure and exception return null.
5157   __ li(v0, Operand(isolate->factory()->null_value()));
5158   __ DropAndRet(4);
5159 
5160   // Process the result from the native regexp code.
5161   __ bind(&success);
5162   __ lw(a1,
5163          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
5164   // Calculate number of capture registers (number_of_captures + 1) * 2.
5165   STATIC_ASSERT(kSmiTag == 0);
5166   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
5167   __ Addu(a1, a1, Operand(2));  // a1 was a smi.
5168 
5169   // a1: number of capture registers
5170   // subject: subject string
5171   // Store the capture count.
5172   __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
5173   __ sw(a2, FieldMemOperand(last_match_info_elements,
5174                              RegExpImpl::kLastCaptureCountOffset));
5175   // Store last subject and last input.
5176   __ sw(subject,
5177          FieldMemOperand(last_match_info_elements,
5178                          RegExpImpl::kLastSubjectOffset));
5179   __ mov(a2, subject);
5180   __ RecordWriteField(last_match_info_elements,
5181                       RegExpImpl::kLastSubjectOffset,
5182                       a2,
5183                       t3,
5184                       kRAHasNotBeenSaved,
5185                       kDontSaveFPRegs);
5186   __ sw(subject,
5187          FieldMemOperand(last_match_info_elements,
5188                          RegExpImpl::kLastInputOffset));
5189   __ RecordWriteField(last_match_info_elements,
5190                       RegExpImpl::kLastInputOffset,
5191                       subject,
5192                       t3,
5193                       kRAHasNotBeenSaved,
5194                       kDontSaveFPRegs);
5195 
5196   // Get the static offsets vector filled by the native regexp code.
5197   ExternalReference address_of_static_offsets_vector =
5198       ExternalReference::address_of_static_offsets_vector(isolate);
5199   __ li(a2, Operand(address_of_static_offsets_vector));
5200 
5201   // a1: number of capture registers
5202   // a2: offsets vector
5203   Label next_capture, done;
5204   // Capture register counter starts from number of capture registers and
5205   // counts down until wrapping after zero.
5206   __ Addu(a0,
5207          last_match_info_elements,
5208          Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
5209   __ bind(&next_capture);
5210   __ Subu(a1, a1, Operand(1));
5211   __ Branch(&done, lt, a1, Operand(zero_reg));
5212   // Read the value from the static offsets vector buffer.
5213   __ lw(a3, MemOperand(a2, 0));
5214   __ addiu(a2, a2, kPointerSize);
5215   // Store the smi value in the last match info.
5216   __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
5217   __ sw(a3, MemOperand(a0, 0));
5218   __ Branch(&next_capture, USE_DELAY_SLOT);
5219   __ addiu(a0, a0, kPointerSize);  // In branch delay slot.
5220 
5221   __ bind(&done);
5222 
5223   // Return last match info.
5224   __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
5225   __ DropAndRet(4);
5226 
5227   // External string.  Short external strings have already been ruled out.
5228   // a0: scratch
5229   __ bind(&external_string);
5230   __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
5231   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
5232   if (FLAG_debug_code) {
5233     // Assert that we do not have a cons or slice (indirect strings) here.
5234     // Sequential strings have already been ruled out.
5235     __ And(at, a0, Operand(kIsIndirectStringMask));
5236     __ Assert(eq,
5237               "external string expected, but not found",
5238               at,
5239               Operand(zero_reg));
5240   }
5241   __ lw(subject,
5242         FieldMemOperand(subject, ExternalString::kResourceDataOffset));
5243   // Move the pointer so that offset-wise, it looks like a sequential string.
5244   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5245   __ Subu(subject,
5246           subject,
5247           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
5248   __ jmp(&seq_string);
5249 
5250   // Do the runtime call to execute the regexp.
5251   __ bind(&runtime);
5252   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
5253 #endif  // V8_INTERPRETED_REGEXP
5254 }
5255 
5256 
Generate(MacroAssembler * masm)5257 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
5258   const int kMaxInlineLength = 100;
5259   Label slowcase;
5260   Label done;
5261   __ lw(a1, MemOperand(sp, kPointerSize * 2));
5262   STATIC_ASSERT(kSmiTag == 0);
5263   STATIC_ASSERT(kSmiTagSize == 1);
5264   __ JumpIfNotSmi(a1, &slowcase);
5265   __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
5266   // Smi-tagging is equivalent to multiplying by 2.
5267   // Allocate RegExpResult followed by FixedArray with size in ebx.
5268   // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
5269   // Elements:  [Map][Length][..elements..]
5270   // Size of JSArray with two in-object properties and the header of a
5271   // FixedArray.
5272   int objects_size =
5273       (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
5274   __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
5275   __ Addu(a2, t1, Operand(objects_size));
5276   __ AllocateInNewSpace(
5277       a2,  // In: Size, in words.
5278       v0,  // Out: Start of allocation (tagged).
5279       a3,  // Scratch register.
5280       t0,  // Scratch register.
5281       &slowcase,
5282       static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
5283   // v0: Start of allocated area, object-tagged.
5284   // a1: Number of elements in array, as smi.
5285   // t1: Number of elements, untagged.
5286 
5287   // Set JSArray map to global.regexp_result_map().
5288   // Set empty properties FixedArray.
5289   // Set elements to point to FixedArray allocated right after the JSArray.
5290   // Interleave operations for better latency.
5291   __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
5292   __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5293   __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
5294   __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalContextOffset));
5295   __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
5296   __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
5297   __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
5298   __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
5299 
5300   // Set input, index and length fields from arguments.
5301   __ lw(a1, MemOperand(sp, kPointerSize * 0));
5302   __ lw(a2, MemOperand(sp, kPointerSize * 1));
5303   __ lw(t2, MemOperand(sp, kPointerSize * 2));
5304   __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
5305   __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
5306   __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
5307 
5308   // Fill out the elements FixedArray.
5309   // v0: JSArray, tagged.
5310   // a3: FixedArray, tagged.
5311   // t1: Number of elements in array, untagged.
5312 
5313   // Set map.
5314   __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
5315   __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
5316   // Set FixedArray length.
5317   __ sll(t2, t1, kSmiTagSize);
5318   __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
5319   // Fill contents of fixed-array with the-hole.
5320   __ li(a2, Operand(masm->isolate()->factory()->the_hole_value()));
5321   __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5322   // Fill fixed array elements with hole.
5323   // v0: JSArray, tagged.
5324   // a2: the hole.
5325   // a3: Start of elements in FixedArray.
5326   // t1: Number of elements to fill.
5327   Label loop;
5328   __ sll(t1, t1, kPointerSizeLog2);  // Convert num elements to num bytes.
5329   __ addu(t1, t1, a3);  // Point past last element to store.
5330   __ bind(&loop);
5331   __ Branch(&done, ge, a3, Operand(t1));  // Break when a3 past end of elem.
5332   __ sw(a2, MemOperand(a3));
5333   __ Branch(&loop, USE_DELAY_SLOT);
5334   __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
5335 
5336   __ bind(&done);
5337   __ DropAndRet(3);
5338 
5339   __ bind(&slowcase);
5340   __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
5341 }
5342 
5343 
GenerateRecordCallTarget(MacroAssembler * masm)5344 static void GenerateRecordCallTarget(MacroAssembler* masm) {
5345   // Cache the called function in a global property cell.  Cache states
5346   // are uninitialized, monomorphic (indicated by a JSFunction), and
5347   // megamorphic.
5348   // a1 : the function to call
5349   // a2 : cache cell for call target
5350   Label done;
5351 
5352   ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
5353             masm->isolate()->heap()->undefined_value());
5354   ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
5355             masm->isolate()->heap()->the_hole_value());
5356 
5357   // Load the cache state into a3.
5358   __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5359 
5360   // A monomorphic cache hit or an already megamorphic state: invoke the
5361   // function without changing the state.
5362   __ Branch(&done, eq, a3, Operand(a1));
5363   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5364   __ Branch(&done, eq, a3, Operand(at));
5365 
5366   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
5367   // megamorphic.
5368   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5369 
5370   __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
5371   // An uninitialized cache is patched with the function.
5372   // Store a1 in the delay slot. This may or may not get overwritten depending
5373   // on the result of the comparison.
5374   __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5375   // No need for a write barrier here - cells are rescanned.
5376 
5377   // MegamorphicSentinel is an immortal immovable object (undefined) so no
5378   // write-barrier is needed.
5379   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5380   __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5381 
5382   __ bind(&done);
5383 }
5384 
5385 
Generate(MacroAssembler * masm)5386 void CallFunctionStub::Generate(MacroAssembler* masm) {
5387   // a1 : the function to call
5388   // a2 : cache cell for call target
5389   Label slow, non_function;
5390 
5391   // The receiver might implicitly be the global object. This is
5392   // indicated by passing the hole as the receiver to the call
5393   // function stub.
5394   if (ReceiverMightBeImplicit()) {
5395     Label call;
5396     // Get the receiver from the stack.
5397     // function, receiver [, arguments]
5398     __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
5399     // Call as function is indicated with the hole.
5400     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5401     __ Branch(&call, ne, t0, Operand(at));
5402     // Patch the receiver on the stack with the global receiver object.
5403     __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
5404     __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
5405     __ sw(a2, MemOperand(sp, argc_ * kPointerSize));
5406     __ bind(&call);
5407   }
5408 
5409   // Check that the function is really a JavaScript function.
5410   // a1: pushed function (to be verified)
5411   __ JumpIfSmi(a1, &non_function);
5412   // Get the map of the function object.
5413   __ GetObjectType(a1, a2, a2);
5414   __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
5415 
5416   // Fast-case: Invoke the function now.
5417   // a1: pushed function
5418   ParameterCount actual(argc_);
5419 
5420   if (ReceiverMightBeImplicit()) {
5421     Label call_as_function;
5422     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
5423     __ Branch(&call_as_function, eq, t0, Operand(at));
5424     __ InvokeFunction(a1,
5425                       actual,
5426                       JUMP_FUNCTION,
5427                       NullCallWrapper(),
5428                       CALL_AS_METHOD);
5429     __ bind(&call_as_function);
5430   }
5431   __ InvokeFunction(a1,
5432                     actual,
5433                     JUMP_FUNCTION,
5434                     NullCallWrapper(),
5435                     CALL_AS_FUNCTION);
5436 
5437   // Slow-case: Non-function called.
5438   __ bind(&slow);
5439   // Check for function proxy.
5440   __ Branch(&non_function, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
5441   __ push(a1);  // Put proxy as additional argument.
5442   __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
5443   __ li(a2, Operand(0, RelocInfo::NONE));
5444   __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5445   __ SetCallKind(t1, CALL_AS_METHOD);
5446   {
5447     Handle<Code> adaptor =
5448       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
5449     __ Jump(adaptor, RelocInfo::CODE_TARGET);
5450   }
5451 
5452   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
5453   // of the original receiver from the call site).
5454   __ bind(&non_function);
5455   __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
5456   __ li(a0, Operand(argc_));  // Set up the number of arguments.
5457   __ mov(a2, zero_reg);
5458   __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
5459   __ SetCallKind(t1, CALL_AS_METHOD);
5460   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5461           RelocInfo::CODE_TARGET);
5462 }
5463 
5464 
Generate(MacroAssembler * masm)5465 void CallConstructStub::Generate(MacroAssembler* masm) {
5466   // a0 : number of arguments
5467   // a1 : the function to call
5468   // a2 : cache cell for call target
5469   Label slow, non_function_call;
5470 
5471   // Check that the function is not a smi.
5472   __ JumpIfSmi(a1, &non_function_call);
5473   // Check that the function is a JSFunction.
5474   __ GetObjectType(a1, a3, a3);
5475   __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5476 
5477   if (RecordCallTarget()) {
5478     GenerateRecordCallTarget(masm);
5479   }
5480 
5481   // Jump to the function-specific construct stub.
5482   __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
5483   __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
5484   __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
5485   __ Jump(at);
5486 
5487   // a0: number of arguments
5488   // a1: called object
5489   // a3: object type
5490   Label do_call;
5491   __ bind(&slow);
5492   __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5493   __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5494   __ jmp(&do_call);
5495 
5496   __ bind(&non_function_call);
5497   __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
5498   __ bind(&do_call);
5499   // Set expected number of arguments to zero (not changing r0).
5500   __ li(a2, Operand(0, RelocInfo::NONE));
5501   __ SetCallKind(t1, CALL_AS_METHOD);
5502   __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
5503           RelocInfo::CODE_TARGET);
5504 }
5505 
5506 
5507 // Unfortunately you have to run without snapshots to see most of these
5508 // names in the profile since most compare stubs end up in the snapshot.
PrintName(StringStream * stream)5509 void CompareStub::PrintName(StringStream* stream) {
5510   ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5511          (lhs_.is(a1) && rhs_.is(a0)));
5512   const char* cc_name;
5513   switch (cc_) {
5514     case lt: cc_name = "LT"; break;
5515     case gt: cc_name = "GT"; break;
5516     case le: cc_name = "LE"; break;
5517     case ge: cc_name = "GE"; break;
5518     case eq: cc_name = "EQ"; break;
5519     case ne: cc_name = "NE"; break;
5520     default: cc_name = "UnknownCondition"; break;
5521   }
5522   bool is_equality = cc_ == eq || cc_ == ne;
5523   stream->Add("CompareStub_%s", cc_name);
5524   stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
5525   stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
5526   if (strict_ && is_equality) stream->Add("_STRICT");
5527   if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5528   if (!include_number_compare_) stream->Add("_NO_NUMBER");
5529   if (!include_smi_compare_) stream->Add("_NO_SMI");
5530 }
5531 
5532 
MinorKey()5533 int CompareStub::MinorKey() {
5534   // Encode the two parameters in a unique 16 bit value.
5535   ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
5536   ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
5537          (lhs_.is(a1) && rhs_.is(a0)));
5538   return ConditionField::encode(static_cast<unsigned>(cc_))
5539          | RegisterField::encode(lhs_.is(a0))
5540          | StrictField::encode(strict_)
5541          | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
5542          | IncludeSmiCompareField::encode(include_smi_compare_);
5543 }
5544 
5545 
5546 // StringCharCodeAtGenerator.
GenerateFast(MacroAssembler * masm)5547 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5548   Label flat_string;
5549   Label ascii_string;
5550   Label got_char_code;
5551   Label sliced_string;
5552 
5553   ASSERT(!t0.is(index_));
5554   ASSERT(!t0.is(result_));
5555   ASSERT(!t0.is(object_));
5556 
5557   // If the receiver is a smi trigger the non-string case.
5558   __ JumpIfSmi(object_, receiver_not_string_);
5559 
5560   // Fetch the instance type of the receiver into result register.
5561   __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5562   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5563   // If the receiver is not a string trigger the non-string case.
5564   __ And(t0, result_, Operand(kIsNotStringMask));
5565   __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
5566 
5567   // If the index is non-smi trigger the non-smi case.
5568   __ JumpIfNotSmi(index_, &index_not_smi_);
5569 
5570   __ bind(&got_smi_index_);
5571 
5572   // Check for index out of range.
5573   __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
5574   __ Branch(index_out_of_range_, ls, t0, Operand(index_));
5575 
5576   __ sra(index_, index_, kSmiTagSize);
5577 
5578   StringCharLoadGenerator::Generate(masm,
5579                                     object_,
5580                                     index_,
5581                                     result_,
5582                                     &call_runtime_);
5583 
5584   __ sll(result_, result_, kSmiTagSize);
5585   __ bind(&exit_);
5586 }
5587 
5588 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)5589 void StringCharCodeAtGenerator::GenerateSlow(
5590     MacroAssembler* masm,
5591     const RuntimeCallHelper& call_helper) {
5592   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5593 
5594   // Index is not a smi.
5595   __ bind(&index_not_smi_);
5596   // If index is a heap number, try converting it to an integer.
5597   __ CheckMap(index_,
5598               result_,
5599               Heap::kHeapNumberMapRootIndex,
5600               index_not_number_,
5601               DONT_DO_SMI_CHECK);
5602   call_helper.BeforeCall(masm);
5603   // Consumed by runtime conversion function:
5604   __ Push(object_, index_);
5605   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5606     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5607   } else {
5608     ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5609     // NumberToSmi discards numbers that are not exact integers.
5610     __ CallRuntime(Runtime::kNumberToSmi, 1);
5611   }
5612 
5613   // Save the conversion result before the pop instructions below
5614   // have a chance to overwrite it.
5615 
5616   __ Move(index_, v0);
5617   __ pop(object_);
5618   // Reload the instance type.
5619   __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
5620   __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
5621   call_helper.AfterCall(masm);
5622   // If index is still not a smi, it must be out of range.
5623   __ JumpIfNotSmi(index_, index_out_of_range_);
5624   // Otherwise, return to the fast path.
5625   __ Branch(&got_smi_index_);
5626 
5627   // Call runtime. We get here when the receiver is a string and the
5628   // index is a number, but the code of getting the actual character
5629   // is too complex (e.g., when the string needs to be flattened).
5630   __ bind(&call_runtime_);
5631   call_helper.BeforeCall(masm);
5632   __ sll(index_, index_, kSmiTagSize);
5633   __ Push(object_, index_);
5634   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5635 
5636   __ Move(result_, v0);
5637 
5638   call_helper.AfterCall(masm);
5639   __ jmp(&exit_);
5640 
5641   __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5642 }
5643 
5644 
5645 // -------------------------------------------------------------------------
5646 // StringCharFromCodeGenerator
5647 
GenerateFast(MacroAssembler * masm)5648 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5649   // Fast case of Heap::LookupSingleCharacterStringFromCode.
5650 
5651   ASSERT(!t0.is(result_));
5652   ASSERT(!t0.is(code_));
5653 
5654   STATIC_ASSERT(kSmiTag == 0);
5655   STATIC_ASSERT(kSmiShiftSize == 0);
5656   ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5657   __ And(t0,
5658          code_,
5659          Operand(kSmiTagMask |
5660                  ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5661   __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
5662 
5663   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
5664   // At this point code register contains smi tagged ASCII char code.
5665   STATIC_ASSERT(kSmiTag == 0);
5666   __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
5667   __ Addu(result_, result_, t0);
5668   __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
5669   __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
5670   __ Branch(&slow_case_, eq, result_, Operand(t0));
5671   __ bind(&exit_);
5672 }
5673 
5674 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)5675 void StringCharFromCodeGenerator::GenerateSlow(
5676     MacroAssembler* masm,
5677     const RuntimeCallHelper& call_helper) {
5678   __ Abort("Unexpected fallthrough to CharFromCode slow case");
5679 
5680   __ bind(&slow_case_);
5681   call_helper.BeforeCall(masm);
5682   __ push(code_);
5683   __ CallRuntime(Runtime::kCharFromCode, 1);
5684   __ Move(result_, v0);
5685 
5686   call_helper.AfterCall(masm);
5687   __ Branch(&exit_);
5688 
5689   __ Abort("Unexpected fallthrough from CharFromCode slow case");
5690 }
5691 
5692 
5693 // -------------------------------------------------------------------------
5694 // StringCharAtGenerator
5695 
GenerateFast(MacroAssembler * masm)5696 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5697   char_code_at_generator_.GenerateFast(masm);
5698   char_from_code_generator_.GenerateFast(masm);
5699 }
5700 
5701 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)5702 void StringCharAtGenerator::GenerateSlow(
5703     MacroAssembler* masm,
5704     const RuntimeCallHelper& call_helper) {
5705   char_code_at_generator_.GenerateSlow(masm, call_helper);
5706   char_from_code_generator_.GenerateSlow(masm, call_helper);
5707 }
5708 
5709 
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,bool ascii)5710 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5711                                           Register dest,
5712                                           Register src,
5713                                           Register count,
5714                                           Register scratch,
5715                                           bool ascii) {
5716   Label loop;
5717   Label done;
5718   // This loop just copies one character at a time, as it is only used for
5719   // very short strings.
5720   if (!ascii) {
5721     __ addu(count, count, count);
5722   }
5723   __ Branch(&done, eq, count, Operand(zero_reg));
5724   __ addu(count, dest, count);  // Count now points to the last dest byte.
5725 
5726   __ bind(&loop);
5727   __ lbu(scratch, MemOperand(src));
5728   __ addiu(src, src, 1);
5729   __ sb(scratch, MemOperand(dest));
5730   __ addiu(dest, dest, 1);
5731   __ Branch(&loop, lt, dest, Operand(count));
5732 
5733   __ bind(&done);
5734 }
5735 
5736 
5737 enum CopyCharactersFlags {
5738   COPY_ASCII = 1,
5739   DEST_ALWAYS_ALIGNED = 2
5740 };
5741 
5742 
GenerateCopyCharactersLong(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch1,Register scratch2,Register scratch3,Register scratch4,Register scratch5,int flags)5743 void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
5744                                               Register dest,
5745                                               Register src,
5746                                               Register count,
5747                                               Register scratch1,
5748                                               Register scratch2,
5749                                               Register scratch3,
5750                                               Register scratch4,
5751                                               Register scratch5,
5752                                               int flags) {
5753   bool ascii = (flags & COPY_ASCII) != 0;
5754   bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
5755 
5756   if (dest_always_aligned && FLAG_debug_code) {
5757     // Check that destination is actually word aligned if the flag says
5758     // that it is.
5759     __ And(scratch4, dest, Operand(kPointerAlignmentMask));
5760     __ Check(eq,
5761              "Destination of copy not aligned.",
5762              scratch4,
5763              Operand(zero_reg));
5764   }
5765 
5766   const int kReadAlignment = 4;
5767   const int kReadAlignmentMask = kReadAlignment - 1;
5768   // Ensure that reading an entire aligned word containing the last character
5769   // of a string will not read outside the allocated area (because we pad up
5770   // to kObjectAlignment).
5771   STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
5772   // Assumes word reads and writes are little endian.
5773   // Nothing to do for zero characters.
5774   Label done;
5775 
5776   if (!ascii) {
5777     __ addu(count, count, count);
5778   }
5779   __ Branch(&done, eq, count, Operand(zero_reg));
5780 
5781   Label byte_loop;
5782   // Must copy at least eight bytes, otherwise just do it one byte at a time.
5783   __ Subu(scratch1, count, Operand(8));
5784   __ Addu(count, dest, Operand(count));
5785   Register limit = count;  // Read until src equals this.
5786   __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
5787 
5788   if (!dest_always_aligned) {
5789     // Align dest by byte copying. Copies between zero and three bytes.
5790     __ And(scratch4, dest, Operand(kReadAlignmentMask));
5791     Label dest_aligned;
5792     __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
5793     Label aligned_loop;
5794     __ bind(&aligned_loop);
5795     __ lbu(scratch1, MemOperand(src));
5796     __ addiu(src, src, 1);
5797     __ sb(scratch1, MemOperand(dest));
5798     __ addiu(dest, dest, 1);
5799     __ addiu(scratch4, scratch4, 1);
5800     __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
5801     __ bind(&dest_aligned);
5802   }
5803 
5804   Label simple_loop;
5805 
5806   __ And(scratch4, src, Operand(kReadAlignmentMask));
5807   __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
5808 
5809   // Loop for src/dst that are not aligned the same way.
5810   // This loop uses lwl and lwr instructions. These instructions
5811   // depend on the endianness, and the implementation assumes little-endian.
5812   {
5813     Label loop;
5814     __ bind(&loop);
5815     __ lwr(scratch1, MemOperand(src));
5816     __ Addu(src, src, Operand(kReadAlignment));
5817     __ lwl(scratch1, MemOperand(src, -1));
5818     __ sw(scratch1, MemOperand(dest));
5819     __ Addu(dest, dest, Operand(kReadAlignment));
5820     __ Subu(scratch2, limit, dest);
5821     __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5822   }
5823 
5824   __ Branch(&byte_loop);
5825 
5826   // Simple loop.
5827   // Copy words from src to dest, until less than four bytes left.
5828   // Both src and dest are word aligned.
5829   __ bind(&simple_loop);
5830   {
5831     Label loop;
5832     __ bind(&loop);
5833     __ lw(scratch1, MemOperand(src));
5834     __ Addu(src, src, Operand(kReadAlignment));
5835     __ sw(scratch1, MemOperand(dest));
5836     __ Addu(dest, dest, Operand(kReadAlignment));
5837     __ Subu(scratch2, limit, dest);
5838     __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
5839   }
5840 
5841   // Copy bytes from src to dest until dest hits limit.
5842   __ bind(&byte_loop);
5843   // Test if dest has already reached the limit.
5844   __ Branch(&done, ge, dest, Operand(limit));
5845   __ lbu(scratch1, MemOperand(src));
5846   __ addiu(src, src, 1);
5847   __ sb(scratch1, MemOperand(dest));
5848   __ addiu(dest, dest, 1);
5849   __ Branch(&byte_loop);
5850 
5851   __ bind(&done);
5852 }
5853 
5854 
GenerateTwoCharacterSymbolTableProbe(MacroAssembler * masm,Register c1,Register c2,Register scratch1,Register scratch2,Register scratch3,Register scratch4,Register scratch5,Label * not_found)5855 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5856                                                         Register c1,
5857                                                         Register c2,
5858                                                         Register scratch1,
5859                                                         Register scratch2,
5860                                                         Register scratch3,
5861                                                         Register scratch4,
5862                                                         Register scratch5,
5863                                                         Label* not_found) {
5864   // Register scratch3 is the general scratch register in this function.
5865   Register scratch = scratch3;
5866 
5867   // Make sure that both characters are not digits as such strings has a
5868   // different hash algorithm. Don't try to look for these in the symbol table.
5869   Label not_array_index;
5870   __ Subu(scratch, c1, Operand(static_cast<int>('0')));
5871   __ Branch(&not_array_index,
5872             Ugreater,
5873             scratch,
5874             Operand(static_cast<int>('9' - '0')));
5875   __ Subu(scratch, c2, Operand(static_cast<int>('0')));
5876 
5877   // If check failed combine both characters into single halfword.
5878   // This is required by the contract of the method: code at the
5879   // not_found branch expects this combination in c1 register.
5880   Label tmp;
5881   __ sll(scratch1, c2, kBitsPerByte);
5882   __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
5883   __ Or(c1, c1, scratch1);
5884   __ bind(&tmp);
5885   __ Branch(
5886       not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
5887 
5888   __ bind(&not_array_index);
5889   // Calculate the two character string hash.
5890   Register hash = scratch1;
5891   StringHelper::GenerateHashInit(masm, hash, c1);
5892   StringHelper::GenerateHashAddCharacter(masm, hash, c2);
5893   StringHelper::GenerateHashGetHash(masm, hash);
5894 
5895   // Collect the two characters in a register.
5896   Register chars = c1;
5897   __ sll(scratch, c2, kBitsPerByte);
5898   __ Or(chars, chars, scratch);
5899 
5900   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5901   // hash:  hash of two character string.
5902 
5903   // Load symbol table.
5904   // Load address of first element of the symbol table.
5905   Register symbol_table = c2;
5906   __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
5907 
5908   Register undefined = scratch4;
5909   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5910 
5911   // Calculate capacity mask from the symbol table capacity.
5912   Register mask = scratch2;
5913   __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
5914   __ sra(mask, mask, 1);
5915   __ Addu(mask, mask, -1);
5916 
5917   // Calculate untagged address of the first element of the symbol table.
5918   Register first_symbol_table_element = symbol_table;
5919   __ Addu(first_symbol_table_element, symbol_table,
5920          Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
5921 
5922   // Registers.
5923   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5924   // hash:  hash of two character string
5925   // mask:  capacity mask
5926   // first_symbol_table_element: address of the first element of
5927   //                             the symbol table
5928   // undefined: the undefined object
5929   // scratch: -
5930 
5931   // Perform a number of probes in the symbol table.
5932   const int kProbes = 4;
5933   Label found_in_symbol_table;
5934   Label next_probe[kProbes];
5935   Register candidate = scratch5;  // Scratch register contains candidate.
5936   for (int i = 0; i < kProbes; i++) {
5937     // Calculate entry in symbol table.
5938     if (i > 0) {
5939       __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
5940     } else {
5941       __ mov(candidate, hash);
5942     }
5943 
5944     __ And(candidate, candidate, Operand(mask));
5945 
5946     // Load the entry from the symble table.
5947     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5948     __ sll(scratch, candidate, kPointerSizeLog2);
5949     __ Addu(scratch, scratch, first_symbol_table_element);
5950     __ lw(candidate, MemOperand(scratch));
5951 
5952     // If entry is undefined no string with this hash can be found.
5953     Label is_string;
5954     __ GetObjectType(candidate, scratch, scratch);
5955     __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
5956 
5957     __ Branch(not_found, eq, undefined, Operand(candidate));
5958     // Must be the hole (deleted entry).
5959     if (FLAG_debug_code) {
5960       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
5961       __ Assert(eq, "oddball in symbol table is not undefined or the hole",
5962           scratch, Operand(candidate));
5963     }
5964     __ jmp(&next_probe[i]);
5965 
5966     __ bind(&is_string);
5967 
5968     // Check that the candidate is a non-external ASCII string.  The instance
5969     // type is still in the scratch register from the CompareObjectType
5970     // operation.
5971     __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
5972 
5973     // If length is not 2 the string is not a candidate.
5974     __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
5975     __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
5976 
5977     // Check if the two characters match.
5978     // Assumes that word load is little endian.
5979     __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
5980     __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
5981     __ bind(&next_probe[i]);
5982   }
5983 
5984   // No matching 2 character string found by probing.
5985   __ jmp(not_found);
5986 
5987   // Scratch register contains result when we fall through to here.
5988   Register result = candidate;
5989   __ bind(&found_in_symbol_table);
5990   __ mov(v0, result);
5991 }
5992 
5993 
GenerateHashInit(MacroAssembler * masm,Register hash,Register character)5994 void StringHelper::GenerateHashInit(MacroAssembler* masm,
5995                                     Register hash,
5996                                     Register character) {
5997   // hash = seed + character + ((seed + character) << 10);
5998   __ LoadRoot(hash, Heap::kHashSeedRootIndex);
5999   // Untag smi seed and add the character.
6000   __ SmiUntag(hash);
6001   __ addu(hash, hash, character);
6002   __ sll(at, hash, 10);
6003   __ addu(hash, hash, at);
6004   // hash ^= hash >> 6;
6005   __ srl(at, hash, 6);
6006   __ xor_(hash, hash, at);
6007 }
6008 
6009 
GenerateHashAddCharacter(MacroAssembler * masm,Register hash,Register character)6010 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6011                                             Register hash,
6012                                             Register character) {
6013   // hash += character;
6014   __ addu(hash, hash, character);
6015   // hash += hash << 10;
6016   __ sll(at, hash, 10);
6017   __ addu(hash, hash, at);
6018   // hash ^= hash >> 6;
6019   __ srl(at, hash, 6);
6020   __ xor_(hash, hash, at);
6021 }
6022 
6023 
GenerateHashGetHash(MacroAssembler * masm,Register hash)6024 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6025                                        Register hash) {
6026   // hash += hash << 3;
6027   __ sll(at, hash, 3);
6028   __ addu(hash, hash, at);
6029   // hash ^= hash >> 11;
6030   __ srl(at, hash, 11);
6031   __ xor_(hash, hash, at);
6032   // hash += hash << 15;
6033   __ sll(at, hash, 15);
6034   __ addu(hash, hash, at);
6035 
6036   __ li(at, Operand(String::kHashBitMask));
6037   __ and_(hash, hash, at);
6038 
6039   // if (hash == 0) hash = 27;
6040   __ ori(at, zero_reg, StringHasher::kZeroHash);
6041   __ Movz(hash, at, hash);
6042 }
6043 
6044 
Generate(MacroAssembler * masm)6045 void SubStringStub::Generate(MacroAssembler* masm) {
6046   Label runtime;
6047   // Stack frame on entry.
6048   //  ra: return address
6049   //  sp[0]: to
6050   //  sp[4]: from
6051   //  sp[8]: string
6052 
6053   // This stub is called from the native-call %_SubString(...), so
6054   // nothing can be assumed about the arguments. It is tested that:
6055   //  "string" is a sequential string,
6056   //  both "from" and "to" are smis, and
6057   //  0 <= from <= to <= string.length.
6058   // If any of these assumptions fail, we call the runtime system.
6059 
6060   const int kToOffset = 0 * kPointerSize;
6061   const int kFromOffset = 1 * kPointerSize;
6062   const int kStringOffset = 2 * kPointerSize;
6063 
6064   __ lw(a2, MemOperand(sp, kToOffset));
6065   __ lw(a3, MemOperand(sp, kFromOffset));
6066   STATIC_ASSERT(kFromOffset == kToOffset + 4);
6067   STATIC_ASSERT(kSmiTag == 0);
6068   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
6069 
6070   // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
6071   // safe in this case.
6072   __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
6073   __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
6074   // Both a2 and a3 are untagged integers.
6075 
6076   __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
6077 
6078   __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
6079   __ Subu(a2, a2, a3);
6080 
6081   // Make sure first argument is a string.
6082   __ lw(v0, MemOperand(sp, kStringOffset));
6083   __ JumpIfSmi(v0, &runtime);
6084   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
6085   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6086   __ And(t0, a1, Operand(kIsNotStringMask));
6087 
6088   __ Branch(&runtime, ne, t0, Operand(zero_reg));
6089 
6090   // Short-cut for the case of trivial substring.
6091   Label return_v0;
6092   // v0: original string
6093   // a2: result string length
6094   __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
6095   __ sra(t0, t0, 1);
6096   __ Branch(&return_v0, eq, a2, Operand(t0));
6097 
6098 
6099   Label result_longer_than_two;
6100   // Check for special case of two character ASCII string, in which case
6101   // we do a lookup in the symbol table first.
6102   __ li(t0, 2);
6103   __ Branch(&result_longer_than_two, gt, a2, Operand(t0));
6104   __ Branch(&runtime, lt, a2, Operand(t0));
6105 
6106   __ JumpIfInstanceTypeIsNotSequentialAscii(a1, a1, &runtime);
6107 
6108   // Get the two characters forming the sub string.
6109   __ Addu(v0, v0, Operand(a3));
6110   __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6111   __ lbu(t0, FieldMemOperand(v0, SeqAsciiString::kHeaderSize + 1));
6112 
6113   // Try to lookup two character string in symbol table.
6114   Label make_two_character_string;
6115   StringHelper::GenerateTwoCharacterSymbolTableProbe(
6116       masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
6117   __ jmp(&return_v0);
6118 
6119   // a2: result string length.
6120   // a3: two characters combined into halfword in little endian byte order.
6121   __ bind(&make_two_character_string);
6122   __ AllocateAsciiString(v0, a2, t0, t1, t4, &runtime);
6123   __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6124   __ jmp(&return_v0);
6125 
6126   __ bind(&result_longer_than_two);
6127 
6128   // Deal with different string types: update the index if necessary
6129   // and put the underlying string into t1.
6130   // v0: original string
6131   // a1: instance type
6132   // a2: length
6133   // a3: from index (untagged)
6134   Label underlying_unpacked, sliced_string, seq_or_external_string;
6135   // If the string is not indirect, it can only be sequential or external.
6136   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6137   STATIC_ASSERT(kIsIndirectStringMask != 0);
6138   __ And(t0, a1, Operand(kIsIndirectStringMask));
6139   __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
6140   // t0 is used as a scratch register and can be overwritten in either case.
6141   __ And(t0, a1, Operand(kSlicedNotConsMask));
6142   __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
6143   // Cons string.  Check whether it is flat, then fetch first part.
6144   __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
6145   __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
6146   __ Branch(&runtime, ne, t1, Operand(t0));
6147   __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
6148   // Update instance type.
6149   __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6150   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6151   __ jmp(&underlying_unpacked);
6152 
6153   __ bind(&sliced_string);
6154   // Sliced string.  Fetch parent and correct start index by offset.
6155   __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6156   __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6157   __ sra(t0, t0, 1);  // Add offset to index.
6158   __ Addu(a3, a3, t0);
6159   // Update instance type.
6160   __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
6161   __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
6162   __ jmp(&underlying_unpacked);
6163 
6164   __ bind(&seq_or_external_string);
6165   // Sequential or external string.  Just move string to the expected register.
6166   __ mov(t1, v0);
6167 
6168   __ bind(&underlying_unpacked);
6169 
6170   if (FLAG_string_slices) {
6171     Label copy_routine;
6172     // t1: underlying subject string
6173     // a1: instance type of underlying subject string
6174     // a2: length
6175     // a3: adjusted start index (untagged)
6176     // Short slice.  Copy instead of slicing.
6177     __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
6178     // Allocate new sliced string.  At this point we do not reload the instance
6179     // type including the string encoding because we simply rely on the info
6180     // provided by the original string.  It does not matter if the original
6181     // string's encoding is wrong because we always have to recheck encoding of
6182     // the newly created string's parent anyways due to externalized strings.
6183     Label two_byte_slice, set_slice_header;
6184     STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6185     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6186     __ And(t0, a1, Operand(kStringEncodingMask));
6187     __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
6188     __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
6189     __ jmp(&set_slice_header);
6190     __ bind(&two_byte_slice);
6191     __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
6192     __ bind(&set_slice_header);
6193     __ sll(a3, a3, 1);
6194     __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
6195     __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6196     __ jmp(&return_v0);
6197 
6198     __ bind(&copy_routine);
6199   }
6200 
6201   // t1: underlying subject string
6202   // a1: instance type of underlying subject string
6203   // a2: length
6204   // a3: adjusted start index (untagged)
6205   Label two_byte_sequential, sequential_string, allocate_result;
6206   STATIC_ASSERT(kExternalStringTag != 0);
6207   STATIC_ASSERT(kSeqStringTag == 0);
6208   __ And(t0, a1, Operand(kExternalStringTag));
6209   __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
6210 
6211   // Handle external string.
6212   // Rule out short external strings.
6213   STATIC_CHECK(kShortExternalStringTag != 0);
6214   __ And(t0, a1, Operand(kShortExternalStringTag));
6215   __ Branch(&runtime, ne, t0, Operand(zero_reg));
6216   __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
6217   // t1 already points to the first character of underlying string.
6218   __ jmp(&allocate_result);
6219 
6220   __ bind(&sequential_string);
6221   // Locate first character of underlying subject string.
6222   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
6223   __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6224 
6225   __ bind(&allocate_result);
6226   // Sequential acii string.  Allocate the result.
6227   STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6228   __ And(t0, a1, Operand(kStringEncodingMask));
6229   __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
6230 
6231   // Allocate and copy the resulting ASCII string.
6232   __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
6233 
6234   // Locate first character of substring to copy.
6235   __ Addu(t1, t1, a3);
6236 
6237   // Locate first character of result.
6238   __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6239 
6240   // v0: result string
6241   // a1: first character of result string
6242   // a2: result string length
6243   // t1: first character of substring to copy
6244   STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
6245   StringHelper::GenerateCopyCharactersLong(
6246       masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
6247   __ jmp(&return_v0);
6248 
6249   // Allocate and copy the resulting two-byte string.
6250   __ bind(&two_byte_sequential);
6251   __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
6252 
6253   // Locate first character of substring to copy.
6254   STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6255   __ sll(t0, a3, 1);
6256   __ Addu(t1, t1, t0);
6257   // Locate first character of result.
6258   __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6259 
6260   // v0: result string.
6261   // a1: first character of result.
6262   // a2: result length.
6263   // t1: first character of substring to copy.
6264   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
6265   StringHelper::GenerateCopyCharactersLong(
6266       masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
6267 
6268   __ bind(&return_v0);
6269   Counters* counters = masm->isolate()->counters();
6270   __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6271   __ DropAndRet(3);
6272 
6273   // Just jump to runtime to create the sub string.
6274   __ bind(&runtime);
6275   __ TailCallRuntime(Runtime::kSubString, 3, 1);
6276 }
6277 
6278 
GenerateFlatAsciiStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)6279 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6280                                                       Register left,
6281                                                       Register right,
6282                                                       Register scratch1,
6283                                                       Register scratch2,
6284                                                       Register scratch3) {
6285   Register length = scratch1;
6286 
6287   // Compare lengths.
6288   Label strings_not_equal, check_zero_length;
6289   __ lw(length, FieldMemOperand(left, String::kLengthOffset));
6290   __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6291   __ Branch(&check_zero_length, eq, length, Operand(scratch2));
6292   __ bind(&strings_not_equal);
6293   __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
6294   __ Ret();
6295 
6296   // Check if the length is zero.
6297   Label compare_chars;
6298   __ bind(&check_zero_length);
6299   STATIC_ASSERT(kSmiTag == 0);
6300   __ Branch(&compare_chars, ne, length, Operand(zero_reg));
6301   __ li(v0, Operand(Smi::FromInt(EQUAL)));
6302   __ Ret();
6303 
6304   // Compare characters.
6305   __ bind(&compare_chars);
6306 
6307   GenerateAsciiCharsCompareLoop(masm,
6308                                 left, right, length, scratch2, scratch3, v0,
6309                                 &strings_not_equal);
6310 
6311   // Characters are equal.
6312   __ li(v0, Operand(Smi::FromInt(EQUAL)));
6313   __ Ret();
6314 }
6315 
6316 
GenerateCompareFlatAsciiStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)6317 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6318                                                         Register left,
6319                                                         Register right,
6320                                                         Register scratch1,
6321                                                         Register scratch2,
6322                                                         Register scratch3,
6323                                                         Register scratch4) {
6324   Label result_not_equal, compare_lengths;
6325   // Find minimum length and length difference.
6326   __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
6327   __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
6328   __ Subu(scratch3, scratch1, Operand(scratch2));
6329   Register length_delta = scratch3;
6330   __ slt(scratch4, scratch2, scratch1);
6331   __ Movn(scratch1, scratch2, scratch4);
6332   Register min_length = scratch1;
6333   STATIC_ASSERT(kSmiTag == 0);
6334   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
6335 
6336   // Compare loop.
6337   GenerateAsciiCharsCompareLoop(masm,
6338                                 left, right, min_length, scratch2, scratch4, v0,
6339                                 &result_not_equal);
6340 
6341   // Compare lengths - strings up to min-length are equal.
6342   __ bind(&compare_lengths);
6343   ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
6344   // Use length_delta as result if it's zero.
6345   __ mov(scratch2, length_delta);
6346   __ mov(scratch4, zero_reg);
6347   __ mov(v0, zero_reg);
6348 
6349   __ bind(&result_not_equal);
6350   // Conditionally update the result based either on length_delta or
6351   // the last comparion performed in the loop above.
6352   Label ret;
6353   __ Branch(&ret, eq, scratch2, Operand(scratch4));
6354   __ li(v0, Operand(Smi::FromInt(GREATER)));
6355   __ Branch(&ret, gt, scratch2, Operand(scratch4));
6356   __ li(v0, Operand(Smi::FromInt(LESS)));
6357   __ bind(&ret);
6358   __ Ret();
6359 }
6360 
6361 
GenerateAsciiCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch1,Register scratch2,Register scratch3,Label * chars_not_equal)6362 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6363     MacroAssembler* masm,
6364     Register left,
6365     Register right,
6366     Register length,
6367     Register scratch1,
6368     Register scratch2,
6369     Register scratch3,
6370     Label* chars_not_equal) {
6371   // Change index to run from -length to -1 by adding length to string
6372   // start. This means that loop ends when index reaches zero, which
6373   // doesn't need an additional compare.
6374   __ SmiUntag(length);
6375   __ Addu(scratch1, length,
6376           Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6377   __ Addu(left, left, Operand(scratch1));
6378   __ Addu(right, right, Operand(scratch1));
6379   __ Subu(length, zero_reg, length);
6380   Register index = length;  // index = -length;
6381 
6382 
6383   // Compare loop.
6384   Label loop;
6385   __ bind(&loop);
6386   __ Addu(scratch3, left, index);
6387   __ lbu(scratch1, MemOperand(scratch3));
6388   __ Addu(scratch3, right, index);
6389   __ lbu(scratch2, MemOperand(scratch3));
6390   __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
6391   __ Addu(index, index, 1);
6392   __ Branch(&loop, ne, index, Operand(zero_reg));
6393 }
6394 
6395 
Generate(MacroAssembler * masm)6396 void StringCompareStub::Generate(MacroAssembler* masm) {
6397   Label runtime;
6398 
6399   Counters* counters = masm->isolate()->counters();
6400 
6401   // Stack frame on entry.
6402   //  sp[0]: right string
6403   //  sp[4]: left string
6404   __ lw(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
6405   __ lw(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
6406 
6407   Label not_same;
6408   __ Branch(&not_same, ne, a0, Operand(a1));
6409   STATIC_ASSERT(EQUAL == 0);
6410   STATIC_ASSERT(kSmiTag == 0);
6411   __ li(v0, Operand(Smi::FromInt(EQUAL)));
6412   __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
6413   __ DropAndRet(2);
6414 
6415   __ bind(&not_same);
6416 
6417   // Check that both objects are sequential ASCII strings.
6418   __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6419 
6420   // Compare flat ASCII strings natively. Remove arguments from stack first.
6421   __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6422   __ Addu(sp, sp, Operand(2 * kPointerSize));
6423   GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6424 
6425   __ bind(&runtime);
6426   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6427 }
6428 
6429 
Generate(MacroAssembler * masm)6430 void StringAddStub::Generate(MacroAssembler* masm) {
6431   Label call_runtime, call_builtin;
6432   Builtins::JavaScript builtin_id = Builtins::ADD;
6433 
6434   Counters* counters = masm->isolate()->counters();
6435 
6436   // Stack on entry:
6437   // sp[0]: second argument (right).
6438   // sp[4]: first argument (left).
6439 
6440   // Load the two arguments.
6441   __ lw(a0, MemOperand(sp, 1 * kPointerSize));  // First argument.
6442   __ lw(a1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
6443 
6444   // Make sure that both arguments are strings if not known in advance.
6445   if (flags_ == NO_STRING_ADD_FLAGS) {
6446     __ JumpIfEitherSmi(a0, a1, &call_runtime);
6447     // Load instance types.
6448     __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6449     __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6450     __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6451     __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6452     STATIC_ASSERT(kStringTag == 0);
6453     // If either is not a string, go to runtime.
6454     __ Or(t4, t0, Operand(t1));
6455     __ And(t4, t4, Operand(kIsNotStringMask));
6456     __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6457   } else {
6458     // Here at least one of the arguments is definitely a string.
6459     // We convert the one that is not known to be a string.
6460     if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
6461       ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
6462       GenerateConvertArgument(
6463           masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6464       builtin_id = Builtins::STRING_ADD_RIGHT;
6465     } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
6466       ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
6467       GenerateConvertArgument(
6468           masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6469       builtin_id = Builtins::STRING_ADD_LEFT;
6470     }
6471   }
6472 
6473   // Both arguments are strings.
6474   // a0: first string
6475   // a1: second string
6476   // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6477   // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6478   {
6479     Label strings_not_empty;
6480     // Check if either of the strings are empty. In that case return the other.
6481     // These tests use zero-length check on string-length whch is an Smi.
6482     // Assert that Smi::FromInt(0) is really 0.
6483     STATIC_ASSERT(kSmiTag == 0);
6484     ASSERT(Smi::FromInt(0) == 0);
6485     __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
6486     __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6487     __ mov(v0, a0);       // Assume we'll return first string (from a0).
6488     __ Movz(v0, a1, a2);  // If first is empty, return second (from a1).
6489     __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
6490     __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
6491     __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
6492     __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
6493 
6494     __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6495     __ DropAndRet(2);
6496 
6497     __ bind(&strings_not_empty);
6498   }
6499 
6500   // Untag both string-lengths.
6501   __ sra(a2, a2, kSmiTagSize);
6502   __ sra(a3, a3, kSmiTagSize);
6503 
6504   // Both strings are non-empty.
6505   // a0: first string
6506   // a1: second string
6507   // a2: length of first string
6508   // a3: length of second string
6509   // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6510   // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6511   // Look at the length of the result of adding the two strings.
6512   Label string_add_flat_result, longer_than_two;
6513   // Adding two lengths can't overflow.
6514   STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
6515   __ Addu(t2, a2, Operand(a3));
6516   // Use the symbol table when adding two one character strings, as it
6517   // helps later optimizations to return a symbol here.
6518   __ Branch(&longer_than_two, ne, t2, Operand(2));
6519 
6520   // Check that both strings are non-external ASCII strings.
6521   if (flags_ != NO_STRING_ADD_FLAGS) {
6522     __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6523     __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6524     __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6525     __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6526   }
6527   __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
6528                                                  &call_runtime);
6529 
6530   // Get the two characters forming the sub string.
6531   __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
6532   __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6533 
6534   // Try to lookup two character string in symbol table. If it is not found
6535   // just allocate a new one.
6536   Label make_two_character_string;
6537   StringHelper::GenerateTwoCharacterSymbolTableProbe(
6538       masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
6539   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6540   __ DropAndRet(2);
6541 
6542   __ bind(&make_two_character_string);
6543   // Resulting string has length 2 and first chars of two strings
6544   // are combined into single halfword in a2 register.
6545   // So we can fill resulting string without two loops by a single
6546   // halfword store instruction (which assumes that processor is
6547   // in a little endian mode).
6548   __ li(t2, Operand(2));
6549   __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6550   __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6551   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6552   __ DropAndRet(2);
6553 
6554   __ bind(&longer_than_two);
6555   // Check if resulting string will be flat.
6556   __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
6557   // Handle exceptionally long strings in the runtime system.
6558   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
6559   ASSERT(IsPowerOf2(String::kMaxLength + 1));
6560   // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
6561   __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
6562 
6563   // If result is not supposed to be flat, allocate a cons string object.
6564   // If both strings are ASCII the result is an ASCII cons string.
6565   if (flags_ != NO_STRING_ADD_FLAGS) {
6566     __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6567     __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6568     __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6569     __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6570   }
6571   Label non_ascii, allocated, ascii_data;
6572   STATIC_ASSERT(kTwoByteStringTag == 0);
6573   // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
6574   __ And(t4, t0, Operand(t1));
6575   __ And(t4, t4, Operand(kStringEncodingMask));
6576   __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
6577 
6578   // Allocate an ASCII cons string.
6579   __ bind(&ascii_data);
6580   __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
6581   __ bind(&allocated);
6582   // Fill the fields of the cons string.
6583   __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
6584   __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
6585   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6586   __ DropAndRet(2);
6587 
6588   __ bind(&non_ascii);
6589   // At least one of the strings is two-byte. Check whether it happens
6590   // to contain only ASCII characters.
6591   // t0: first instance type.
6592   // t1: second instance type.
6593   // Branch to if _both_ instances have kAsciiDataHintMask set.
6594   __ And(at, t0, Operand(kAsciiDataHintMask));
6595   __ and_(at, at, t1);
6596   __ Branch(&ascii_data, ne, at, Operand(zero_reg));
6597 
6598   __ xor_(t0, t0, t1);
6599   STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
6600   __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6601   __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
6602 
6603   // Allocate a two byte cons string.
6604   __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
6605   __ Branch(&allocated);
6606 
6607   // We cannot encounter sliced strings or cons strings here since:
6608   STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
6609   // Handle creating a flat result from either external or sequential strings.
6610   // Locate the first characters' locations.
6611   // a0: first string
6612   // a1: second string
6613   // a2: length of first string
6614   // a3: length of second string
6615   // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6616   // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
6617   // t2: sum of lengths.
6618   Label first_prepared, second_prepared;
6619   __ bind(&string_add_flat_result);
6620   if (flags_ != NO_STRING_ADD_FLAGS) {
6621     __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
6622     __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
6623     __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
6624     __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
6625   }
6626   // Check whether both strings have same encoding
6627   __ Xor(t3, t0, Operand(t1));
6628   __ And(t3, t3, Operand(kStringEncodingMask));
6629   __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
6630 
6631   STATIC_ASSERT(kSeqStringTag == 0);
6632   __ And(t4, t0, Operand(kStringRepresentationMask));
6633 
6634   STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6635   Label skip_first_add;
6636   __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
6637   __ Branch(USE_DELAY_SLOT, &first_prepared);
6638   __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
6639   __ bind(&skip_first_add);
6640   // External string: rule out short external string and load string resource.
6641   STATIC_ASSERT(kShortExternalStringTag != 0);
6642   __ And(t4, t0, Operand(kShortExternalStringMask));
6643   __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6644   __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
6645   __ bind(&first_prepared);
6646 
6647   STATIC_ASSERT(kSeqStringTag == 0);
6648   __ And(t4, t1, Operand(kStringRepresentationMask));
6649   STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
6650   Label skip_second_add;
6651   __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
6652   __ Branch(USE_DELAY_SLOT, &second_prepared);
6653   __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
6654   __ bind(&skip_second_add);
6655   // External string: rule out short external string and load string resource.
6656   STATIC_ASSERT(kShortExternalStringTag != 0);
6657   __ And(t4, t1, Operand(kShortExternalStringMask));
6658   __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
6659   __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
6660   __ bind(&second_prepared);
6661 
6662   Label non_ascii_string_add_flat_result;
6663   // t3: first character of first string
6664   // a1: first character of second string
6665   // a2: length of first string
6666   // a3: length of second string
6667   // t2: sum of lengths.
6668   // Both strings have the same encoding.
6669   STATIC_ASSERT(kTwoByteStringTag == 0);
6670   __ And(t4, t1, Operand(kStringEncodingMask));
6671   __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
6672 
6673   __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
6674   __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6675   // v0: result string.
6676   // t3: first character of first string.
6677   // a1: first character of second string
6678   // a2: length of first string.
6679   // a3: length of second string.
6680   // t2: first character of result.
6681 
6682   StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
6683   // t2: next character of result.
6684   StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6685   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6686   __ DropAndRet(2);
6687 
6688   __ bind(&non_ascii_string_add_flat_result);
6689   __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
6690   __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6691   // v0: result string.
6692   // t3: first character of first string.
6693   // a1: first character of second string.
6694   // a2: length of first string.
6695   // a3: length of second string.
6696   // t2: first character of result.
6697   StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
6698   // t2: next character of result.
6699   StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6700 
6701   __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6702   __ DropAndRet(2);
6703 
6704   // Just jump to runtime to add the two strings.
6705   __ bind(&call_runtime);
6706   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
6707 
6708   if (call_builtin.is_linked()) {
6709     __ bind(&call_builtin);
6710     __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
6711   }
6712 }
6713 
6714 
GenerateConvertArgument(MacroAssembler * masm,int stack_offset,Register arg,Register scratch1,Register scratch2,Register scratch3,Register scratch4,Label * slow)6715 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
6716                                             int stack_offset,
6717                                             Register arg,
6718                                             Register scratch1,
6719                                             Register scratch2,
6720                                             Register scratch3,
6721                                             Register scratch4,
6722                                             Label* slow) {
6723   // First check if the argument is already a string.
6724   Label not_string, done;
6725   __ JumpIfSmi(arg, &not_string);
6726   __ GetObjectType(arg, scratch1, scratch1);
6727   __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
6728 
6729   // Check the number to string cache.
6730   Label not_cached;
6731   __ bind(&not_string);
6732   // Puts the cached result into scratch1.
6733   NumberToStringStub::GenerateLookupNumberStringCache(masm,
6734                                                       arg,
6735                                                       scratch1,
6736                                                       scratch2,
6737                                                       scratch3,
6738                                                       scratch4,
6739                                                       false,
6740                                                       &not_cached);
6741   __ mov(arg, scratch1);
6742   __ sw(arg, MemOperand(sp, stack_offset));
6743   __ jmp(&done);
6744 
6745   // Check if the argument is a safe string wrapper.
6746   __ bind(&not_cached);
6747   __ JumpIfSmi(arg, slow);
6748   __ GetObjectType(arg, scratch1, scratch2);  // map -> scratch1.
6749   __ Branch(slow, ne, scratch2, Operand(JS_VALUE_TYPE));
6750   __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
6751   __ li(scratch4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
6752   __ And(scratch2, scratch2, scratch4);
6753   __ Branch(slow, ne, scratch2, Operand(scratch4));
6754   __ lw(arg, FieldMemOperand(arg, JSValue::kValueOffset));
6755   __ sw(arg, MemOperand(sp, stack_offset));
6756 
6757   __ bind(&done);
6758 }
6759 
6760 
GenerateSmis(MacroAssembler * masm)6761 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6762   ASSERT(state_ == CompareIC::SMIS);
6763   Label miss;
6764   __ Or(a2, a1, a0);
6765   __ JumpIfNotSmi(a2, &miss);
6766 
6767   if (GetCondition() == eq) {
6768     // For equality we do not care about the sign of the result.
6769     __ Subu(v0, a0, a1);
6770   } else {
6771     // Untag before subtracting to avoid handling overflow.
6772     __ SmiUntag(a1);
6773     __ SmiUntag(a0);
6774     __ Subu(v0, a1, a0);
6775   }
6776   __ Ret();
6777 
6778   __ bind(&miss);
6779   GenerateMiss(masm);
6780 }
6781 
6782 
GenerateHeapNumbers(MacroAssembler * masm)6783 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6784   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6785 
6786   Label generic_stub;
6787   Label unordered, maybe_undefined1, maybe_undefined2;
6788   Label miss;
6789   __ And(a2, a1, Operand(a0));
6790   __ JumpIfSmi(a2, &generic_stub);
6791 
6792   __ GetObjectType(a0, a2, a2);
6793   __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
6794   __ GetObjectType(a1, a2, a2);
6795   __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6796 
6797   // Inlining the double comparison and falling back to the general compare
6798   // stub if NaN is involved or FPU is unsupported.
6799   if (CpuFeatures::IsSupported(FPU)) {
6800     CpuFeatures::Scope scope(FPU);
6801 
6802     // Load left and right operand.
6803     __ Subu(a2, a1, Operand(kHeapObjectTag));
6804     __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
6805     __ Subu(a2, a0, Operand(kHeapObjectTag));
6806     __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
6807 
6808     // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
6809     Label fpu_eq, fpu_lt;
6810     // Test if equal, and also handle the unordered/NaN case.
6811     __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
6812 
6813     // Test if less (unordered case is already handled).
6814     __ BranchF(&fpu_lt, NULL, lt, f0, f2);
6815 
6816     // Otherwise it's greater, so just fall thru, and return.
6817     __ li(v0, Operand(GREATER));
6818     __ Ret();
6819 
6820     __ bind(&fpu_eq);
6821     __ li(v0, Operand(EQUAL));
6822     __ Ret();
6823 
6824     __ bind(&fpu_lt);
6825     __ li(v0, Operand(LESS));
6826     __ Ret();
6827   }
6828 
6829   __ bind(&unordered);
6830 
6831   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
6832   __ bind(&generic_stub);
6833   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
6834 
6835   __ bind(&maybe_undefined1);
6836   if (Token::IsOrderedRelationalCompareOp(op_)) {
6837     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6838     __ Branch(&miss, ne, a0, Operand(at));
6839     __ GetObjectType(a1, a2, a2);
6840     __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
6841     __ jmp(&unordered);
6842   }
6843 
6844   __ bind(&maybe_undefined2);
6845   if (Token::IsOrderedRelationalCompareOp(op_)) {
6846     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6847     __ Branch(&unordered, eq, a1, Operand(at));
6848   }
6849 
6850   __ bind(&miss);
6851   GenerateMiss(masm);
6852 }
6853 
6854 
GenerateSymbols(MacroAssembler * masm)6855 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6856   ASSERT(state_ == CompareIC::SYMBOLS);
6857   Label miss;
6858 
6859   // Registers containing left and right operands respectively.
6860   Register left = a1;
6861   Register right = a0;
6862   Register tmp1 = a2;
6863   Register tmp2 = a3;
6864 
6865   // Check that both operands are heap objects.
6866   __ JumpIfEitherSmi(left, right, &miss);
6867 
6868   // Check that both operands are symbols.
6869   __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6870   __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6871   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6872   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6873   STATIC_ASSERT(kSymbolTag != 0);
6874   __ And(tmp1, tmp1, Operand(tmp2));
6875   __ And(tmp1, tmp1, kIsSymbolMask);
6876   __ Branch(&miss, eq, tmp1, Operand(zero_reg));
6877   // Make sure a0 is non-zero. At this point input operands are
6878   // guaranteed to be non-zero.
6879   ASSERT(right.is(a0));
6880   STATIC_ASSERT(EQUAL == 0);
6881   STATIC_ASSERT(kSmiTag == 0);
6882   __ mov(v0, right);
6883   // Symbols are compared by identity.
6884   __ Ret(ne, left, Operand(right));
6885   __ li(v0, Operand(Smi::FromInt(EQUAL)));
6886   __ Ret();
6887 
6888   __ bind(&miss);
6889   GenerateMiss(masm);
6890 }
6891 
6892 
GenerateStrings(MacroAssembler * masm)6893 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6894   ASSERT(state_ == CompareIC::STRINGS);
6895   Label miss;
6896 
6897   bool equality = Token::IsEqualityOp(op_);
6898 
6899   // Registers containing left and right operands respectively.
6900   Register left = a1;
6901   Register right = a0;
6902   Register tmp1 = a2;
6903   Register tmp2 = a3;
6904   Register tmp3 = t0;
6905   Register tmp4 = t1;
6906   Register tmp5 = t2;
6907 
6908   // Check that both operands are heap objects.
6909   __ JumpIfEitherSmi(left, right, &miss);
6910 
6911   // Check that both operands are strings. This leaves the instance
6912   // types loaded in tmp1 and tmp2.
6913   __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
6914   __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
6915   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
6916   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
6917   STATIC_ASSERT(kNotStringTag != 0);
6918   __ Or(tmp3, tmp1, tmp2);
6919   __ And(tmp5, tmp3, Operand(kIsNotStringMask));
6920   __ Branch(&miss, ne, tmp5, Operand(zero_reg));
6921 
6922   // Fast check for identical strings.
6923   Label left_ne_right;
6924   STATIC_ASSERT(EQUAL == 0);
6925   STATIC_ASSERT(kSmiTag == 0);
6926   __ Branch(&left_ne_right, ne, left, Operand(right));
6927   __ Ret(USE_DELAY_SLOT);
6928   __ mov(v0, zero_reg);  // In the delay slot.
6929   __ bind(&left_ne_right);
6930 
6931   // Handle not identical strings.
6932 
6933   // Check that both strings are symbols. If they are, we're done
6934   // because we already know they are not identical.
6935   if (equality) {
6936     ASSERT(GetCondition() == eq);
6937     STATIC_ASSERT(kSymbolTag != 0);
6938     __ And(tmp3, tmp1, Operand(tmp2));
6939     __ And(tmp5, tmp3, Operand(kIsSymbolMask));
6940     Label is_symbol;
6941     __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
6942     // Make sure a0 is non-zero. At this point input operands are
6943     // guaranteed to be non-zero.
6944     ASSERT(right.is(a0));
6945     __ Ret(USE_DELAY_SLOT);
6946     __ mov(v0, a0);  // In the delay slot.
6947     __ bind(&is_symbol);
6948   }
6949 
6950   // Check that both strings are sequential ASCII.
6951   Label runtime;
6952   __ JumpIfBothInstanceTypesAreNotSequentialAscii(
6953       tmp1, tmp2, tmp3, tmp4, &runtime);
6954 
6955   // Compare flat ASCII strings. Returns when done.
6956   if (equality) {
6957     StringCompareStub::GenerateFlatAsciiStringEquals(
6958         masm, left, right, tmp1, tmp2, tmp3);
6959   } else {
6960     StringCompareStub::GenerateCompareFlatAsciiStrings(
6961         masm, left, right, tmp1, tmp2, tmp3, tmp4);
6962   }
6963 
6964   // Handle more complex cases in runtime.
6965   __ bind(&runtime);
6966   __ Push(left, right);
6967   if (equality) {
6968     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6969   } else {
6970     __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6971   }
6972 
6973   __ bind(&miss);
6974   GenerateMiss(masm);
6975 }
6976 
6977 
GenerateObjects(MacroAssembler * masm)6978 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6979   ASSERT(state_ == CompareIC::OBJECTS);
6980   Label miss;
6981   __ And(a2, a1, Operand(a0));
6982   __ JumpIfSmi(a2, &miss);
6983 
6984   __ GetObjectType(a0, a2, a2);
6985   __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6986   __ GetObjectType(a1, a2, a2);
6987   __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
6988 
6989   ASSERT(GetCondition() == eq);
6990   __ Ret(USE_DELAY_SLOT);
6991   __ subu(v0, a0, a1);
6992 
6993   __ bind(&miss);
6994   GenerateMiss(masm);
6995 }
6996 
6997 
GenerateKnownObjects(MacroAssembler * masm)6998 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6999   Label miss;
7000   __ And(a2, a1, a0);
7001   __ JumpIfSmi(a2, &miss);
7002   __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
7003   __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
7004   __ Branch(&miss, ne, a2, Operand(known_map_));
7005   __ Branch(&miss, ne, a3, Operand(known_map_));
7006 
7007   __ Ret(USE_DELAY_SLOT);
7008   __ subu(v0, a0, a1);
7009 
7010   __ bind(&miss);
7011   GenerateMiss(masm);
7012 }
7013 
GenerateMiss(MacroAssembler * masm)7014 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
7015   {
7016     // Call the runtime system in a fresh internal frame.
7017     ExternalReference miss =
7018         ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
7019     FrameScope scope(masm, StackFrame::INTERNAL);
7020     __ Push(a1, a0);
7021     __ push(ra);
7022     __ Push(a1, a0);
7023     __ li(t0, Operand(Smi::FromInt(op_)));
7024     __ addiu(sp, sp, -kPointerSize);
7025     __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
7026     __ sw(t0, MemOperand(sp));  // In the delay slot.
7027     // Compute the entry point of the rewritten stub.
7028     __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
7029     // Restore registers.
7030     __ Pop(a1, a0, ra);
7031   }
7032   __ Jump(a2);
7033 }
7034 
7035 
Generate(MacroAssembler * masm)7036 void DirectCEntryStub::Generate(MacroAssembler* masm) {
7037   // No need to pop or drop anything, LeaveExitFrame will restore the old
7038   // stack, thus dropping the allocated space for the return value.
7039   // The saved ra is after the reserved stack space for the 4 args.
7040   __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
7041 
7042   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
7043     // In case of an error the return address may point to a memory area
7044     // filled with kZapValue by the GC.
7045     // Dereference the address and check for this.
7046     __ lw(t0, MemOperand(t9));
7047     __ Assert(ne, "Received invalid return address.", t0,
7048         Operand(reinterpret_cast<uint32_t>(kZapValue)));
7049   }
7050   __ Jump(t9);
7051 }
7052 
7053 
GenerateCall(MacroAssembler * masm,ExternalReference function)7054 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7055                                     ExternalReference function) {
7056   __ li(t9, Operand(function));
7057   this->GenerateCall(masm, t9);
7058 }
7059 
7060 
GenerateCall(MacroAssembler * masm,Register target)7061 void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
7062                                     Register target) {
7063   __ Move(t9, target);
7064   __ AssertStackIsAligned();
7065   // Allocate space for arg slots.
7066   __ Subu(sp, sp, kCArgsSlotsSize);
7067 
7068   // Block the trampoline pool through the whole function to make sure the
7069   // number of generated instructions is constant.
7070   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
7071 
7072   // We need to get the current 'pc' value, which is not available on MIPS.
7073   Label find_ra;
7074   masm->bal(&find_ra);  // ra = pc + 8.
7075   masm->nop();  // Branch delay slot nop.
7076   masm->bind(&find_ra);
7077 
7078   const int kNumInstructionsToJump = 6;
7079   masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
7080   // Push return address (accessible to GC through exit frame pc).
7081   // This spot for ra was reserved in EnterExitFrame.
7082   masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
7083   masm->li(ra,
7084            Operand(reinterpret_cast<intptr_t>(GetCode().location()),
7085                    RelocInfo::CODE_TARGET),
7086            CONSTANT_SIZE);
7087   // Call the function.
7088   masm->Jump(t9);
7089   // Make sure the stored 'ra' points to this position.
7090   ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
7091 }
7092 
7093 
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register receiver,Register properties,Handle<String> name,Register scratch0)7094 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
7095                                                         Label* miss,
7096                                                         Label* done,
7097                                                         Register receiver,
7098                                                         Register properties,
7099                                                         Handle<String> name,
7100                                                         Register scratch0) {
7101   // If names of slots in range from 1 to kProbes - 1 for the hash value are
7102   // not equal to the name and kProbes-th slot is not used (its name is the
7103   // undefined value), it guarantees the hash table doesn't contain the
7104   // property. It's true even if some slots represent deleted properties
7105   // (their names are the hole value).
7106   for (int i = 0; i < kInlinedProbes; i++) {
7107     // scratch0 points to properties hash.
7108     // Compute the masked index: (hash + i + i * i) & mask.
7109     Register index = scratch0;
7110     // Capacity is smi 2^n.
7111     __ lw(index, FieldMemOperand(properties, kCapacityOffset));
7112     __ Subu(index, index, Operand(1));
7113     __ And(index, index, Operand(
7114         Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i))));
7115 
7116     // Scale the index by multiplying by the entry size.
7117     ASSERT(StringDictionary::kEntrySize == 3);
7118     __ sll(at, index, 1);
7119     __ Addu(index, index, at);
7120 
7121     Register entity_name = scratch0;
7122     // Having undefined at this place means the name is not contained.
7123     ASSERT_EQ(kSmiTagSize, 1);
7124     Register tmp = properties;
7125     __ sll(scratch0, index, 1);
7126     __ Addu(tmp, properties, scratch0);
7127     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
7128 
7129     ASSERT(!tmp.is(entity_name));
7130     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
7131     __ Branch(done, eq, entity_name, Operand(tmp));
7132 
7133     if (i != kInlinedProbes - 1) {
7134       // Load the hole ready for use below:
7135       __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
7136 
7137       // Stop if found the property.
7138       __ Branch(miss, eq, entity_name, Operand(Handle<String>(name)));
7139 
7140       Label the_hole;
7141       __ Branch(&the_hole, eq, entity_name, Operand(tmp));
7142 
7143       // Check if the entry name is not a symbol.
7144       __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
7145       __ lbu(entity_name,
7146              FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
7147       __ And(scratch0, entity_name, Operand(kIsSymbolMask));
7148       __ Branch(miss, eq, scratch0, Operand(zero_reg));
7149 
7150       __ bind(&the_hole);
7151 
7152       // Restore the properties.
7153       __ lw(properties,
7154             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7155     }
7156   }
7157 
7158   const int spill_mask =
7159       (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
7160        a2.bit() | a1.bit() | a0.bit() | v0.bit());
7161 
7162   __ MultiPush(spill_mask);
7163   __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
7164   __ li(a1, Operand(Handle<String>(name)));
7165   StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
7166   __ CallStub(&stub);
7167   __ mov(at, v0);
7168   __ MultiPop(spill_mask);
7169 
7170   __ Branch(done, eq, at, Operand(zero_reg));
7171   __ Branch(miss, ne, at, Operand(zero_reg));
7172 }
7173 
7174 
7175 // Probe the string dictionary in the |elements| register. Jump to the
7176 // |done| label if a property with the given name is found. Jump to
7177 // the |miss| label otherwise.
7178 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register scratch1,Register scratch2)7179 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
7180                                                         Label* miss,
7181                                                         Label* done,
7182                                                         Register elements,
7183                                                         Register name,
7184                                                         Register scratch1,
7185                                                         Register scratch2) {
7186   ASSERT(!elements.is(scratch1));
7187   ASSERT(!elements.is(scratch2));
7188   ASSERT(!name.is(scratch1));
7189   ASSERT(!name.is(scratch2));
7190 
7191   // Assert that name contains a string.
7192   if (FLAG_debug_code) __ AbortIfNotString(name);
7193 
7194   // Compute the capacity mask.
7195   __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
7196   __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
7197   __ Subu(scratch1, scratch1, Operand(1));
7198 
7199   // Generate an unrolled loop that performs a few probes before
7200   // giving up. Measurements done on Gmail indicate that 2 probes
7201   // cover ~93% of loads from dictionaries.
7202   for (int i = 0; i < kInlinedProbes; i++) {
7203     // Compute the masked index: (hash + i + i * i) & mask.
7204     __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
7205     if (i > 0) {
7206       // Add the probe offset (i + i * i) left shifted to avoid right shifting
7207       // the hash in a separate instruction. The value hash + i + i * i is right
7208       // shifted in the following and instruction.
7209       ASSERT(StringDictionary::GetProbeOffset(i) <
7210              1 << (32 - String::kHashFieldOffset));
7211       __ Addu(scratch2, scratch2, Operand(
7212            StringDictionary::GetProbeOffset(i) << String::kHashShift));
7213     }
7214     __ srl(scratch2, scratch2, String::kHashShift);
7215     __ And(scratch2, scratch1, scratch2);
7216 
7217     // Scale the index by multiplying by the element size.
7218     ASSERT(StringDictionary::kEntrySize == 3);
7219     // scratch2 = scratch2 * 3.
7220 
7221     __ sll(at, scratch2, 1);
7222     __ Addu(scratch2, scratch2, at);
7223 
7224     // Check if the key is identical to the name.
7225     __ sll(at, scratch2, 2);
7226     __ Addu(scratch2, elements, at);
7227     __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
7228     __ Branch(done, eq, name, Operand(at));
7229   }
7230 
7231   const int spill_mask =
7232       (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
7233        a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
7234       ~(scratch1.bit() | scratch2.bit());
7235 
7236   __ MultiPush(spill_mask);
7237   if (name.is(a0)) {
7238     ASSERT(!elements.is(a1));
7239     __ Move(a1, name);
7240     __ Move(a0, elements);
7241   } else {
7242     __ Move(a0, elements);
7243     __ Move(a1, name);
7244   }
7245   StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
7246   __ CallStub(&stub);
7247   __ mov(scratch2, a2);
7248   __ mov(at, v0);
7249   __ MultiPop(spill_mask);
7250 
7251   __ Branch(done, ne, at, Operand(zero_reg));
7252   __ Branch(miss, eq, at, Operand(zero_reg));
7253 }
7254 
7255 
Generate(MacroAssembler * masm)7256 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
7257   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
7258   // we cannot call anything that could cause a GC from this stub.
7259   // Registers:
7260   //  result: StringDictionary to probe
7261   //  a1: key
7262   //  : StringDictionary to probe.
7263   //  index_: will hold an index of entry if lookup is successful.
7264   //          might alias with result_.
7265   // Returns:
7266   //  result_ is zero if lookup failed, non zero otherwise.
7267 
7268   Register result = v0;
7269   Register dictionary = a0;
7270   Register key = a1;
7271   Register index = a2;
7272   Register mask = a3;
7273   Register hash = t0;
7274   Register undefined = t1;
7275   Register entry_key = t2;
7276 
7277   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
7278 
7279   __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
7280   __ sra(mask, mask, kSmiTagSize);
7281   __ Subu(mask, mask, Operand(1));
7282 
7283   __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
7284 
7285   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
7286 
7287   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
7288     // Compute the masked index: (hash + i + i * i) & mask.
7289     // Capacity is smi 2^n.
7290     if (i > 0) {
7291       // Add the probe offset (i + i * i) left shifted to avoid right shifting
7292       // the hash in a separate instruction. The value hash + i + i * i is right
7293       // shifted in the following and instruction.
7294       ASSERT(StringDictionary::GetProbeOffset(i) <
7295              1 << (32 - String::kHashFieldOffset));
7296       __ Addu(index, hash, Operand(
7297            StringDictionary::GetProbeOffset(i) << String::kHashShift));
7298     } else {
7299       __ mov(index, hash);
7300     }
7301     __ srl(index, index, String::kHashShift);
7302     __ And(index, mask, index);
7303 
7304     // Scale the index by multiplying by the entry size.
7305     ASSERT(StringDictionary::kEntrySize == 3);
7306     // index *= 3.
7307     __ mov(at, index);
7308     __ sll(index, index, 1);
7309     __ Addu(index, index, at);
7310 
7311 
7312     ASSERT_EQ(kSmiTagSize, 1);
7313     __ sll(index, index, 2);
7314     __ Addu(index, index, dictionary);
7315     __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
7316 
7317     // Having undefined at this place means the name is not contained.
7318     __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
7319 
7320     // Stop if found the property.
7321     __ Branch(&in_dictionary, eq, entry_key, Operand(key));
7322 
7323     if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
7324       // Check if the entry name is not a symbol.
7325       __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
7326       __ lbu(entry_key,
7327              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
7328       __ And(result, entry_key, Operand(kIsSymbolMask));
7329       __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
7330     }
7331   }
7332 
7333   __ bind(&maybe_in_dictionary);
7334   // If we are doing negative lookup then probing failure should be
7335   // treated as a lookup success. For positive lookup probing failure
7336   // should be treated as lookup failure.
7337   if (mode_ == POSITIVE_LOOKUP) {
7338     __ Ret(USE_DELAY_SLOT);
7339     __ mov(result, zero_reg);
7340   }
7341 
7342   __ bind(&in_dictionary);
7343   __ Ret(USE_DELAY_SLOT);
7344   __ li(result, 1);
7345 
7346   __ bind(&not_in_dictionary);
7347   __ Ret(USE_DELAY_SLOT);
7348   __ mov(result, zero_reg);
7349 }
7350 
7351 
7352 struct AheadOfTimeWriteBarrierStubList {
7353   Register object, value, address;
7354   RememberedSetAction action;
7355 };
7356 
7357 #define REG(Name) { kRegister_ ## Name ## _Code }
7358 
7359 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7360   // Used in RegExpExecStub.
7361   { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
7362   { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
7363   // Used in CompileArrayPushCall.
7364   // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
7365   // Also used in KeyedStoreIC::GenerateGeneric.
7366   { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
7367   // Used in CompileStoreGlobal.
7368   { REG(t0), REG(a1), REG(a2), OMIT_REMEMBERED_SET },
7369   // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
7370   { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
7371   { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
7372   // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7373   { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
7374   { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
7375   // KeyedStoreStubCompiler::GenerateStoreFastElement.
7376   { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
7377   { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
7378   // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7379   // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7380   // and ElementsTransitionGenerator::GenerateDoubleToObject
7381   { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
7382   { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
7383   // ElementsTransitionGenerator::GenerateDoubleToObject
7384   { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
7385   { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
7386   // StoreArrayLiteralElementStub::Generate
7387   { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
7388   // Null termination.
7389   { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7390 };
7391 
7392 #undef REG
7393 
7394 
IsPregenerated()7395 bool RecordWriteStub::IsPregenerated() {
7396   for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7397        !entry->object.is(no_reg);
7398        entry++) {
7399     if (object_.is(entry->object) &&
7400         value_.is(entry->value) &&
7401         address_.is(entry->address) &&
7402         remembered_set_action_ == entry->action &&
7403         save_fp_regs_mode_ == kDontSaveFPRegs) {
7404       return true;
7405     }
7406   }
7407   return false;
7408 }
7409 
7410 
IsPregenerated()7411 bool StoreBufferOverflowStub::IsPregenerated() {
7412   return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
7413 }
7414 
7415 
GenerateFixedRegStubsAheadOfTime()7416 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7417   StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7418   stub1.GetCode()->set_is_pregenerated(true);
7419 }
7420 
7421 
GenerateFixedRegStubsAheadOfTime()7422 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7423   for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7424        !entry->object.is(no_reg);
7425        entry++) {
7426     RecordWriteStub stub(entry->object,
7427                          entry->value,
7428                          entry->address,
7429                          entry->action,
7430                          kDontSaveFPRegs);
7431     stub.GetCode()->set_is_pregenerated(true);
7432   }
7433 }
7434 
7435 
7436 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
7437 // the value has just been written into the object, now this stub makes sure
7438 // we keep the GC informed.  The word in the object where the value has been
7439 // written is in the address register.
Generate(MacroAssembler * masm)7440 void RecordWriteStub::Generate(MacroAssembler* masm) {
7441   Label skip_to_incremental_noncompacting;
7442   Label skip_to_incremental_compacting;
7443 
7444   // The first two branch+nop instructions are generated with labels so as to
7445   // get the offset fixed up correctly by the bind(Label*) call.  We patch it
7446   // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
7447   // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
7448   // incremental heap marking.
7449   // See RecordWriteStub::Patch for details.
7450   __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
7451   __ nop();
7452   __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
7453   __ nop();
7454 
7455   if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7456     __ RememberedSetHelper(object_,
7457                            address_,
7458                            value_,
7459                            save_fp_regs_mode_,
7460                            MacroAssembler::kReturnAtEnd);
7461   }
7462   __ Ret();
7463 
7464   __ bind(&skip_to_incremental_noncompacting);
7465   GenerateIncremental(masm, INCREMENTAL);
7466 
7467   __ bind(&skip_to_incremental_compacting);
7468   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7469 
7470   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7471   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7472 
7473   PatchBranchIntoNop(masm, 0);
7474   PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
7475 }
7476 
7477 
GenerateIncremental(MacroAssembler * masm,Mode mode)7478 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7479   regs_.Save(masm);
7480 
7481   if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7482     Label dont_need_remembered_set;
7483 
7484     __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7485     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
7486                            regs_.scratch0(),
7487                            &dont_need_remembered_set);
7488 
7489     __ CheckPageFlag(regs_.object(),
7490                      regs_.scratch0(),
7491                      1 << MemoryChunk::SCAN_ON_SCAVENGE,
7492                      ne,
7493                      &dont_need_remembered_set);
7494 
7495     // First notify the incremental marker if necessary, then update the
7496     // remembered set.
7497     CheckNeedsToInformIncrementalMarker(
7498         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
7499     InformIncrementalMarker(masm, mode);
7500     regs_.Restore(masm);
7501     __ RememberedSetHelper(object_,
7502                            address_,
7503                            value_,
7504                            save_fp_regs_mode_,
7505                            MacroAssembler::kReturnAtEnd);
7506 
7507     __ bind(&dont_need_remembered_set);
7508   }
7509 
7510   CheckNeedsToInformIncrementalMarker(
7511       masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
7512   InformIncrementalMarker(masm, mode);
7513   regs_.Restore(masm);
7514   __ Ret();
7515 }
7516 
7517 
InformIncrementalMarker(MacroAssembler * masm,Mode mode)7518 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7519   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7520   int argument_count = 3;
7521   __ PrepareCallCFunction(argument_count, regs_.scratch0());
7522   Register address =
7523       a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
7524   ASSERT(!address.is(regs_.object()));
7525   ASSERT(!address.is(a0));
7526   __ Move(address, regs_.address());
7527   __ Move(a0, regs_.object());
7528   if (mode == INCREMENTAL_COMPACTION) {
7529     __ Move(a1, address);
7530   } else {
7531     ASSERT(mode == INCREMENTAL);
7532     __ lw(a1, MemOperand(address, 0));
7533   }
7534   __ li(a2, Operand(ExternalReference::isolate_address()));
7535 
7536   AllowExternalCallThatCantCauseGC scope(masm);
7537   if (mode == INCREMENTAL_COMPACTION) {
7538     __ CallCFunction(
7539         ExternalReference::incremental_evacuation_record_write_function(
7540             masm->isolate()),
7541         argument_count);
7542   } else {
7543     ASSERT(mode == INCREMENTAL);
7544     __ CallCFunction(
7545         ExternalReference::incremental_marking_record_write_function(
7546             masm->isolate()),
7547         argument_count);
7548   }
7549   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7550 }
7551 
7552 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)7553 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7554     MacroAssembler* masm,
7555     OnNoNeedToInformIncrementalMarker on_no_need,
7556     Mode mode) {
7557   Label on_black;
7558   Label need_incremental;
7559   Label need_incremental_pop_scratch;
7560 
7561   // Let's look at the color of the object:  If it is not black we don't have
7562   // to inform the incremental marker.
7563   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7564 
7565   regs_.Restore(masm);
7566   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7567     __ RememberedSetHelper(object_,
7568                            address_,
7569                            value_,
7570                            save_fp_regs_mode_,
7571                            MacroAssembler::kReturnAtEnd);
7572   } else {
7573     __ Ret();
7574   }
7575 
7576   __ bind(&on_black);
7577 
7578   // Get the value from the slot.
7579   __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
7580 
7581   if (mode == INCREMENTAL_COMPACTION) {
7582     Label ensure_not_white;
7583 
7584     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
7585                      regs_.scratch1(),  // Scratch.
7586                      MemoryChunk::kEvacuationCandidateMask,
7587                      eq,
7588                      &ensure_not_white);
7589 
7590     __ CheckPageFlag(regs_.object(),
7591                      regs_.scratch1(),  // Scratch.
7592                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7593                      eq,
7594                      &need_incremental);
7595 
7596     __ bind(&ensure_not_white);
7597   }
7598 
7599   // We need extra registers for this, so we push the object and the address
7600   // register temporarily.
7601   __ Push(regs_.object(), regs_.address());
7602   __ EnsureNotWhite(regs_.scratch0(),  // The value.
7603                     regs_.scratch1(),  // Scratch.
7604                     regs_.object(),  // Scratch.
7605                     regs_.address(),  // Scratch.
7606                     &need_incremental_pop_scratch);
7607   __ Pop(regs_.object(), regs_.address());
7608 
7609   regs_.Restore(masm);
7610   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7611     __ RememberedSetHelper(object_,
7612                            address_,
7613                            value_,
7614                            save_fp_regs_mode_,
7615                            MacroAssembler::kReturnAtEnd);
7616   } else {
7617     __ Ret();
7618   }
7619 
7620   __ bind(&need_incremental_pop_scratch);
7621   __ Pop(regs_.object(), regs_.address());
7622 
7623   __ bind(&need_incremental);
7624 
7625   // Fall through when we need to inform the incremental marker.
7626 }
7627 
7628 
Generate(MacroAssembler * masm)7629 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7630   // ----------- S t a t e -------------
7631   //  -- a0    : element value to store
7632   //  -- a1    : array literal
7633   //  -- a2    : map of array literal
7634   //  -- a3    : element index as smi
7635   //  -- t0    : array literal index in function as smi
7636   // -----------------------------------
7637 
7638   Label element_done;
7639   Label double_elements;
7640   Label smi_element;
7641   Label slow_elements;
7642   Label fast_elements;
7643 
7644   __ CheckFastElements(a2, t1, &double_elements);
7645   // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7646   __ JumpIfSmi(a0, &smi_element);
7647   __ CheckFastSmiOnlyElements(a2, t1, &fast_elements);
7648 
7649   // Store into the array literal requires a elements transition. Call into
7650   // the runtime.
7651   __ bind(&slow_elements);
7652   // call.
7653   __ Push(a1, a3, a0);
7654   __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
7655   __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
7656   __ Push(t1, t0);
7657   __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7658 
7659   // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7660   __ bind(&fast_elements);
7661   __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7662   __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7663   __ Addu(t2, t1, t2);
7664   __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
7665   __ sw(a0, MemOperand(t2, 0));
7666   // Update the write barrier for the array store.
7667   __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
7668                  EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
7669   __ Ret(USE_DELAY_SLOT);
7670   __ mov(v0, a0);
7671 
7672   // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7673   // FAST_ELEMENTS, and value is Smi.
7674   __ bind(&smi_element);
7675   __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7676   __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7677   __ Addu(t2, t1, t2);
7678   __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
7679   __ Ret(USE_DELAY_SLOT);
7680   __ mov(v0, a0);
7681 
7682   // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7683   __ bind(&double_elements);
7684   __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
7685   __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,
7686                                  &slow_elements);
7687   __ Ret(USE_DELAY_SLOT);
7688   __ mov(v0, a0);
7689 }
7690 
7691 
7692 #undef __
7693 
7694 } }  // namespace v8::internal
7695 
7696 #endif  // V8_TARGET_ARCH_MIPS
7697