• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_IA32)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "isolate.h"
35 #include "jsregexp.h"
36 #include "regexp-macro-assembler.h"
37 #include "stub-cache.h"
38 #include "codegen.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 #define __ ACCESS_MASM(masm)
44 
Generate(MacroAssembler * masm)45 void ToNumberStub::Generate(MacroAssembler* masm) {
46   // The ToNumber stub takes one argument in eax.
47   Label check_heap_number, call_builtin;
48   __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
49   __ ret(0);
50 
51   __ bind(&check_heap_number);
52   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
53   Factory* factory = masm->isolate()->factory();
54   __ cmp(ebx, Immediate(factory->heap_number_map()));
55   __ j(not_equal, &call_builtin, Label::kNear);
56   __ ret(0);
57 
58   __ bind(&call_builtin);
59   __ pop(ecx);  // Pop return address.
60   __ push(eax);
61   __ push(ecx);  // Push return address.
62   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
63 }
64 
65 
Generate(MacroAssembler * masm)66 void FastNewClosureStub::Generate(MacroAssembler* masm) {
67   // Create a new closure from the given function info in new
68   // space. Set the context to the current context in esi.
69   Label gc;
70   __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
71 
72   // Get the function info from the stack.
73   __ mov(edx, Operand(esp, 1 * kPointerSize));
74 
75   int map_index = (language_mode_ == CLASSIC_MODE)
76       ? Context::FUNCTION_MAP_INDEX
77       : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
78 
79   // Compute the function map in the current global context and set that
80   // as the map of the allocated object.
81   __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
82   __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
83   __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
84   __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
85 
86   // Initialize the rest of the function. We don't have to update the
87   // write barrier because the allocated object is in new space.
88   Factory* factory = masm->isolate()->factory();
89   __ mov(ebx, Immediate(factory->empty_fixed_array()));
90   __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
91   __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
92   __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
93          Immediate(factory->the_hole_value()));
94   __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
95   __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
96   __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
97   __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
98          Immediate(factory->undefined_value()));
99 
100   // Initialize the code pointer in the function to be the one
101   // found in the shared function info object.
102   __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
103   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
104   __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
105 
106   // Return and remove the on-stack parameter.
107   __ ret(1 * kPointerSize);
108 
109   // Create a new closure through the slower runtime call.
110   __ bind(&gc);
111   __ pop(ecx);  // Temporarily remove return address.
112   __ pop(edx);
113   __ push(esi);
114   __ push(edx);
115   __ push(Immediate(factory->false_value()));
116   __ push(ecx);  // Restore return address.
117   __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
118 }
119 
120 
Generate(MacroAssembler * masm)121 void FastNewContextStub::Generate(MacroAssembler* masm) {
122   // Try to allocate the context in new space.
123   Label gc;
124   int length = slots_ + Context::MIN_CONTEXT_SLOTS;
125   __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
126                         eax, ebx, ecx, &gc, TAG_OBJECT);
127 
128   // Get the function from the stack.
129   __ mov(ecx, Operand(esp, 1 * kPointerSize));
130 
131   // Set up the object header.
132   Factory* factory = masm->isolate()->factory();
133   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
134          factory->function_context_map());
135   __ mov(FieldOperand(eax, Context::kLengthOffset),
136          Immediate(Smi::FromInt(length)));
137 
138   // Set up the fixed slots.
139   __ Set(ebx, Immediate(0));  // Set to NULL.
140   __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
141   __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
142   __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
143 
144   // Copy the global object from the previous context.
145   __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
146   __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
147 
148   // Initialize the rest of the slots to undefined.
149   __ mov(ebx, factory->undefined_value());
150   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
151     __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
152   }
153 
154   // Return and remove the on-stack parameter.
155   __ mov(esi, eax);
156   __ ret(1 * kPointerSize);
157 
158   // Need to collect. Call into runtime system.
159   __ bind(&gc);
160   __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
161 }
162 
163 
Generate(MacroAssembler * masm)164 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
165   // Stack layout on entry:
166   //
167   // [esp + (1 * kPointerSize)]: function
168   // [esp + (2 * kPointerSize)]: serialized scope info
169 
170   // Try to allocate the context in new space.
171   Label gc;
172   int length = slots_ + Context::MIN_CONTEXT_SLOTS;
173   __ AllocateInNewSpace(FixedArray::SizeFor(length),
174                         eax, ebx, ecx, &gc, TAG_OBJECT);
175 
176   // Get the function or sentinel from the stack.
177   __ mov(ecx, Operand(esp, 1 * kPointerSize));
178 
179   // Get the serialized scope info from the stack.
180   __ mov(ebx, Operand(esp, 2 * kPointerSize));
181 
182   // Set up the object header.
183   Factory* factory = masm->isolate()->factory();
184   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
185          factory->block_context_map());
186   __ mov(FieldOperand(eax, Context::kLengthOffset),
187          Immediate(Smi::FromInt(length)));
188 
189   // If this block context is nested in the global context we get a smi
190   // sentinel instead of a function. The block context should get the
191   // canonical empty function of the global context as its closure which
192   // we still have to look up.
193   Label after_sentinel;
194   __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
195   if (FLAG_debug_code) {
196     const char* message = "Expected 0 as a Smi sentinel";
197     __ cmp(ecx, 0);
198     __ Assert(equal, message);
199   }
200   __ mov(ecx, GlobalObjectOperand());
201   __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
202   __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
203   __ bind(&after_sentinel);
204 
205   // Set up the fixed slots.
206   __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
207   __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
208   __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
209 
210   // Copy the global object from the previous context.
211   __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
212   __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
213 
214   // Initialize the rest of the slots to the hole value.
215   if (slots_ == 1) {
216     __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
217            factory->the_hole_value());
218   } else {
219     __ mov(ebx, factory->the_hole_value());
220     for (int i = 0; i < slots_; i++) {
221       __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
222     }
223   }
224 
225   // Return and remove the on-stack parameters.
226   __ mov(esi, eax);
227   __ ret(2 * kPointerSize);
228 
229   // Need to collect. Call into runtime system.
230   __ bind(&gc);
231   __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
232 }
233 
234 
GenerateFastCloneShallowArrayCommon(MacroAssembler * masm,int length,FastCloneShallowArrayStub::Mode mode,Label * fail)235 static void GenerateFastCloneShallowArrayCommon(
236     MacroAssembler* masm,
237     int length,
238     FastCloneShallowArrayStub::Mode mode,
239     Label* fail) {
240   // Registers on entry:
241   //
242   // ecx: boilerplate literal array.
243   ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
244 
245   // All sizes here are multiples of kPointerSize.
246   int elements_size = 0;
247   if (length > 0) {
248     elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
249         ? FixedDoubleArray::SizeFor(length)
250         : FixedArray::SizeFor(length);
251   }
252   int size = JSArray::kSize + elements_size;
253 
254   // Allocate both the JS array and the elements array in one big
255   // allocation. This avoids multiple limit checks.
256   __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
257 
258   // Copy the JS array part.
259   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
260     if ((i != JSArray::kElementsOffset) || (length == 0)) {
261       __ mov(ebx, FieldOperand(ecx, i));
262       __ mov(FieldOperand(eax, i), ebx);
263     }
264   }
265 
266   if (length > 0) {
267     // Get hold of the elements array of the boilerplate and setup the
268     // elements pointer in the resulting object.
269     __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
270     __ lea(edx, Operand(eax, JSArray::kSize));
271     __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
272 
273     // Copy the elements array.
274     if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
275       for (int i = 0; i < elements_size; i += kPointerSize) {
276         __ mov(ebx, FieldOperand(ecx, i));
277         __ mov(FieldOperand(edx, i), ebx);
278       }
279     } else {
280       ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
281       int i;
282       for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
283         __ mov(ebx, FieldOperand(ecx, i));
284         __ mov(FieldOperand(edx, i), ebx);
285       }
286       while (i < elements_size) {
287         __ fld_d(FieldOperand(ecx, i));
288         __ fstp_d(FieldOperand(edx, i));
289         i += kDoubleSize;
290       }
291       ASSERT(i == elements_size);
292     }
293   }
294 }
295 
296 
Generate(MacroAssembler * masm)297 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
298   // Stack layout on entry:
299   //
300   // [esp + kPointerSize]: constant elements.
301   // [esp + (2 * kPointerSize)]: literal index.
302   // [esp + (3 * kPointerSize)]: literals array.
303 
304   // Load boilerplate object into ecx and check if we need to create a
305   // boilerplate.
306   __ mov(ecx, Operand(esp, 3 * kPointerSize));
307   __ mov(eax, Operand(esp, 2 * kPointerSize));
308   STATIC_ASSERT(kPointerSize == 4);
309   STATIC_ASSERT(kSmiTagSize == 1);
310   STATIC_ASSERT(kSmiTag == 0);
311   __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
312                            FixedArray::kHeaderSize));
313   Factory* factory = masm->isolate()->factory();
314   __ cmp(ecx, factory->undefined_value());
315   Label slow_case;
316   __ j(equal, &slow_case);
317 
318   FastCloneShallowArrayStub::Mode mode = mode_;
319   // ecx is boilerplate object.
320   if (mode == CLONE_ANY_ELEMENTS) {
321     Label double_elements, check_fast_elements;
322     __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
323     __ CheckMap(ebx, factory->fixed_cow_array_map(),
324                 &check_fast_elements, DONT_DO_SMI_CHECK);
325     GenerateFastCloneShallowArrayCommon(masm, 0,
326                                         COPY_ON_WRITE_ELEMENTS, &slow_case);
327     __ ret(3 * kPointerSize);
328 
329     __ bind(&check_fast_elements);
330     __ CheckMap(ebx, factory->fixed_array_map(),
331                 &double_elements, DONT_DO_SMI_CHECK);
332     GenerateFastCloneShallowArrayCommon(masm, length_,
333                                         CLONE_ELEMENTS, &slow_case);
334     __ ret(3 * kPointerSize);
335 
336     __ bind(&double_elements);
337     mode = CLONE_DOUBLE_ELEMENTS;
338     // Fall through to generate the code to handle double elements.
339   }
340 
341   if (FLAG_debug_code) {
342     const char* message;
343     Handle<Map> expected_map;
344     if (mode == CLONE_ELEMENTS) {
345       message = "Expected (writable) fixed array";
346       expected_map = factory->fixed_array_map();
347     } else if (mode == CLONE_DOUBLE_ELEMENTS) {
348       message = "Expected (writable) fixed double array";
349       expected_map = factory->fixed_double_array_map();
350     } else {
351       ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
352       message = "Expected copy-on-write fixed array";
353       expected_map = factory->fixed_cow_array_map();
354     }
355     __ push(ecx);
356     __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
357     __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
358     __ Assert(equal, message);
359     __ pop(ecx);
360   }
361 
362   GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
363   // Return and remove the on-stack parameters.
364   __ ret(3 * kPointerSize);
365 
366   __ bind(&slow_case);
367   __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
368 }
369 
370 
Generate(MacroAssembler * masm)371 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
372   // Stack layout on entry:
373   //
374   // [esp + kPointerSize]: object literal flags.
375   // [esp + (2 * kPointerSize)]: constant properties.
376   // [esp + (3 * kPointerSize)]: literal index.
377   // [esp + (4 * kPointerSize)]: literals array.
378 
379   // Load boilerplate object into ecx and check if we need to create a
380   // boilerplate.
381   Label slow_case;
382   __ mov(ecx, Operand(esp, 4 * kPointerSize));
383   __ mov(eax, Operand(esp, 3 * kPointerSize));
384   STATIC_ASSERT(kPointerSize == 4);
385   STATIC_ASSERT(kSmiTagSize == 1);
386   STATIC_ASSERT(kSmiTag == 0);
387   __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
388                            FixedArray::kHeaderSize));
389   Factory* factory = masm->isolate()->factory();
390   __ cmp(ecx, factory->undefined_value());
391   __ j(equal, &slow_case);
392 
393   // Check that the boilerplate contains only fast properties and we can
394   // statically determine the instance size.
395   int size = JSObject::kHeaderSize + length_ * kPointerSize;
396   __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
397   __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
398   __ cmp(eax, Immediate(size >> kPointerSizeLog2));
399   __ j(not_equal, &slow_case);
400 
401   // Allocate the JS object and copy header together with all in-object
402   // properties from the boilerplate.
403   __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
404   for (int i = 0; i < size; i += kPointerSize) {
405     __ mov(ebx, FieldOperand(ecx, i));
406     __ mov(FieldOperand(eax, i), ebx);
407   }
408 
409   // Return and remove the on-stack parameters.
410   __ ret(4 * kPointerSize);
411 
412   __ bind(&slow_case);
413   __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
414 }
415 
416 
417 // The stub expects its argument on the stack and returns its result in tos_:
418 // zero for false, and a non-zero value for true.
Generate(MacroAssembler * masm)419 void ToBooleanStub::Generate(MacroAssembler* masm) {
420   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
421   // we cannot call anything that could cause a GC from this stub.
422   Label patch;
423   Factory* factory = masm->isolate()->factory();
424   const Register argument = eax;
425   const Register map = edx;
426 
427   if (!types_.IsEmpty()) {
428     __ mov(argument, Operand(esp, 1 * kPointerSize));
429   }
430 
431   // undefined -> false
432   CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
433 
434   // Boolean -> its value
435   CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
436   CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
437 
438   // 'null' -> false.
439   CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
440 
441   if (types_.Contains(SMI)) {
442     // Smis: 0 -> false, all other -> true
443     Label not_smi;
444     __ JumpIfNotSmi(argument, &not_smi, Label::kNear);
445     // argument contains the correct return value already.
446     if (!tos_.is(argument)) {
447       __ mov(tos_, argument);
448     }
449     __ ret(1 * kPointerSize);
450     __ bind(&not_smi);
451   } else if (types_.NeedsMap()) {
452     // If we need a map later and have a Smi -> patch.
453     __ JumpIfSmi(argument, &patch, Label::kNear);
454   }
455 
456   if (types_.NeedsMap()) {
457     __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
458 
459     if (types_.CanBeUndetectable()) {
460       __ test_b(FieldOperand(map, Map::kBitFieldOffset),
461                 1 << Map::kIsUndetectable);
462       // Undetectable -> false.
463       Label not_undetectable;
464       __ j(zero, &not_undetectable, Label::kNear);
465       __ Set(tos_, Immediate(0));
466       __ ret(1 * kPointerSize);
467       __ bind(&not_undetectable);
468     }
469   }
470 
471   if (types_.Contains(SPEC_OBJECT)) {
472     // spec object -> true.
473     Label not_js_object;
474     __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
475     __ j(below, &not_js_object, Label::kNear);
476     // argument contains the correct return value already.
477     if (!tos_.is(argument)) {
478       __ Set(tos_, Immediate(1));
479     }
480     __ ret(1 * kPointerSize);
481     __ bind(&not_js_object);
482   }
483 
484   if (types_.Contains(STRING)) {
485     // String value -> false iff empty.
486     Label not_string;
487     __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
488     __ j(above_equal, &not_string, Label::kNear);
489     __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
490     __ ret(1 * kPointerSize);  // the string length is OK as the return value
491     __ bind(&not_string);
492   }
493 
494   if (types_.Contains(HEAP_NUMBER)) {
495     // heap number -> false iff +0, -0, or NaN.
496     Label not_heap_number, false_result;
497     __ cmp(map, factory->heap_number_map());
498     __ j(not_equal, &not_heap_number, Label::kNear);
499     __ fldz();
500     __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
501     __ FCmp();
502     __ j(zero, &false_result, Label::kNear);
503     // argument contains the correct return value already.
504     if (!tos_.is(argument)) {
505       __ Set(tos_, Immediate(1));
506     }
507     __ ret(1 * kPointerSize);
508     __ bind(&false_result);
509     __ Set(tos_, Immediate(0));
510     __ ret(1 * kPointerSize);
511     __ bind(&not_heap_number);
512   }
513 
514   __ bind(&patch);
515   GenerateTypeTransition(masm);
516 }
517 
518 
Generate(MacroAssembler * masm)519 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
520   // We don't allow a GC during a store buffer overflow so there is no need to
521   // store the registers in any particular way, but we do have to store and
522   // restore them.
523   __ pushad();
524   if (save_doubles_ == kSaveFPRegs) {
525     CpuFeatures::Scope scope(SSE2);
526     __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
527     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
528       XMMRegister reg = XMMRegister::from_code(i);
529       __ movdbl(Operand(esp, i * kDoubleSize), reg);
530     }
531   }
532   const int argument_count = 1;
533 
534   AllowExternalCallThatCantCauseGC scope(masm);
535   __ PrepareCallCFunction(argument_count, ecx);
536   __ mov(Operand(esp, 0 * kPointerSize),
537          Immediate(ExternalReference::isolate_address()));
538   __ CallCFunction(
539       ExternalReference::store_buffer_overflow_function(masm->isolate()),
540       argument_count);
541   if (save_doubles_ == kSaveFPRegs) {
542     CpuFeatures::Scope scope(SSE2);
543     for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
544       XMMRegister reg = XMMRegister::from_code(i);
545       __ movdbl(reg, Operand(esp, i * kDoubleSize));
546     }
547     __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
548   }
549   __ popad();
550   __ ret(0);
551 }
552 
553 
CheckOddball(MacroAssembler * masm,Type type,Heap::RootListIndex value,bool result)554 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
555                                  Type type,
556                                  Heap::RootListIndex value,
557                                  bool result) {
558   const Register argument = eax;
559   if (types_.Contains(type)) {
560     // If we see an expected oddball, return its ToBoolean value tos_.
561     Label different_value;
562     __ CompareRoot(argument, value);
563     __ j(not_equal, &different_value, Label::kNear);
564     if (!result) {
565       // If we have to return zero, there is no way around clearing tos_.
566       __ Set(tos_, Immediate(0));
567     } else if (!tos_.is(argument)) {
568       // If we have to return non-zero, we can re-use the argument if it is the
569       // same register as the result, because we never see Smi-zero here.
570       __ Set(tos_, Immediate(1));
571     }
572     __ ret(1 * kPointerSize);
573     __ bind(&different_value);
574   }
575 }
576 
577 
GenerateTypeTransition(MacroAssembler * masm)578 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
579   __ pop(ecx);  // Get return address, operand is now on top of stack.
580   __ push(Immediate(Smi::FromInt(tos_.code())));
581   __ push(Immediate(Smi::FromInt(types_.ToByte())));
582   __ push(ecx);  // Push return address.
583   // Patch the caller to an appropriate specialized stub and return the
584   // operation result to the caller of the stub.
585   __ TailCallExternalReference(
586       ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
587       3,
588       1);
589 }
590 
591 
592 class FloatingPointHelper : public AllStatic {
593  public:
594   enum ArgLocation {
595     ARGS_ON_STACK,
596     ARGS_IN_REGISTERS
597   };
598 
599   // Code pattern for loading a floating point value. Input value must
600   // be either a smi or a heap number object (fp value). Requirements:
601   // operand in register number. Returns operand as floating point number
602   // on FPU stack.
603   static void LoadFloatOperand(MacroAssembler* masm, Register number);
604 
605   // Code pattern for loading floating point values. Input values must
606   // be either smi or heap number objects (fp values). Requirements:
607   // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
608   // Returns operands as floating point numbers on FPU stack.
609   static void LoadFloatOperands(MacroAssembler* masm,
610                                 Register scratch,
611                                 ArgLocation arg_location = ARGS_ON_STACK);
612 
613   // Similar to LoadFloatOperand but assumes that both operands are smis.
614   // Expects operands in edx, eax.
615   static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
616 
617   // Test if operands are smi or number objects (fp). Requirements:
618   // operand_1 in eax, operand_2 in edx; falls through on float
619   // operands, jumps to the non_float label otherwise.
620   static void CheckFloatOperands(MacroAssembler* masm,
621                                  Label* non_float,
622                                  Register scratch);
623 
624   // Checks that the two floating point numbers on top of the FPU stack
625   // have int32 values.
626   static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
627                                          Label* non_int32);
628 
629   // Takes the operands in edx and eax and loads them as integers in eax
630   // and ecx.
631   static void LoadUnknownsAsIntegers(MacroAssembler* masm,
632                                      bool use_sse3,
633                                      Label* operand_conversion_failure);
634 
635   // Must only be called after LoadUnknownsAsIntegers.  Assumes that the
636   // operands are pushed on the stack, and that their conversions to int32
637   // are in eax and ecx.  Checks that the original numbers were in the int32
638   // range.
639   static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
640                                            bool use_sse3,
641                                            Label* not_int32);
642 
643   // Assumes that operands are smis or heap numbers and loads them
644   // into xmm0 and xmm1. Operands are in edx and eax.
645   // Leaves operands unchanged.
646   static void LoadSSE2Operands(MacroAssembler* masm);
647 
648   // Test if operands are numbers (smi or HeapNumber objects), and load
649   // them into xmm0 and xmm1 if they are.  Jump to label not_numbers if
650   // either operand is not a number.  Operands are in edx and eax.
651   // Leaves operands unchanged.
652   static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
653 
654   // Similar to LoadSSE2Operands but assumes that both operands are smis.
655   // Expects operands in edx, eax.
656   static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
657 
658   // Checks that the two floating point numbers loaded into xmm0 and xmm1
659   // have int32 values.
660   static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
661                                         Label* non_int32,
662                                         Register scratch);
663 };
664 
665 
666 // Get the integer part of a heap number.  Surprisingly, all this bit twiddling
667 // is faster than using the built-in instructions on floating point registers.
668 // Trashes edi and ebx.  Dest is ecx.  Source cannot be ecx or one of the
669 // trashed registers.
IntegerConvert(MacroAssembler * masm,Register source,bool use_sse3,Label * conversion_failure)670 static void IntegerConvert(MacroAssembler* masm,
671                            Register source,
672                            bool use_sse3,
673                            Label* conversion_failure) {
674   ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
675   Label done, right_exponent, normal_exponent;
676   Register scratch = ebx;
677   Register scratch2 = edi;
678   // Get exponent word.
679   __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
680   // Get exponent alone in scratch2.
681   __ mov(scratch2, scratch);
682   __ and_(scratch2, HeapNumber::kExponentMask);
683   if (use_sse3) {
684     CpuFeatures::Scope scope(SSE3);
685     // Check whether the exponent is too big for a 64 bit signed integer.
686     static const uint32_t kTooBigExponent =
687         (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
688     __ cmp(scratch2, Immediate(kTooBigExponent));
689     __ j(greater_equal, conversion_failure);
690     // Load x87 register with heap number.
691     __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
692     // Reserve space for 64 bit answer.
693     __ sub(esp, Immediate(sizeof(uint64_t)));  // Nolint.
694     // Do conversion, which cannot fail because we checked the exponent.
695     __ fisttp_d(Operand(esp, 0));
696     __ mov(ecx, Operand(esp, 0));  // Load low word of answer into ecx.
697     __ add(esp, Immediate(sizeof(uint64_t)));  // Nolint.
698   } else {
699     // Load ecx with zero.  We use this either for the final shift or
700     // for the answer.
701     __ xor_(ecx, ecx);
702     // Check whether the exponent matches a 32 bit signed int that cannot be
703     // represented by a Smi.  A non-smi 32 bit integer is 1.xxx * 2^30 so the
704     // exponent is 30 (biased).  This is the exponent that we are fastest at and
705     // also the highest exponent we can handle here.
706     const uint32_t non_smi_exponent =
707         (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
708     __ cmp(scratch2, Immediate(non_smi_exponent));
709     // If we have a match of the int32-but-not-Smi exponent then skip some
710     // logic.
711     __ j(equal, &right_exponent, Label::kNear);
712     // If the exponent is higher than that then go to slow case.  This catches
713     // numbers that don't fit in a signed int32, infinities and NaNs.
714     __ j(less, &normal_exponent, Label::kNear);
715 
716     {
717       // Handle a big exponent.  The only reason we have this code is that the
718       // >>> operator has a tendency to generate numbers with an exponent of 31.
719       const uint32_t big_non_smi_exponent =
720           (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
721       __ cmp(scratch2, Immediate(big_non_smi_exponent));
722       __ j(not_equal, conversion_failure);
723       // We have the big exponent, typically from >>>.  This means the number is
724       // in the range 2^31 to 2^32 - 1.  Get the top bits of the mantissa.
725       __ mov(scratch2, scratch);
726       __ and_(scratch2, HeapNumber::kMantissaMask);
727       // Put back the implicit 1.
728       __ or_(scratch2, 1 << HeapNumber::kExponentShift);
729       // Shift up the mantissa bits to take up the space the exponent used to
730       // take. We just orred in the implicit bit so that took care of one and
731       // we want to use the full unsigned range so we subtract 1 bit from the
732       // shift distance.
733       const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
734       __ shl(scratch2, big_shift_distance);
735       // Get the second half of the double.
736       __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
737       // Shift down 21 bits to get the most significant 11 bits or the low
738       // mantissa word.
739       __ shr(ecx, 32 - big_shift_distance);
740       __ or_(ecx, scratch2);
741       // We have the answer in ecx, but we may need to negate it.
742       __ test(scratch, scratch);
743       __ j(positive, &done, Label::kNear);
744       __ neg(ecx);
745       __ jmp(&done, Label::kNear);
746     }
747 
748     __ bind(&normal_exponent);
749     // Exponent word in scratch, exponent part of exponent word in scratch2.
750     // Zero in ecx.
751     // We know the exponent is smaller than 30 (biased).  If it is less than
752     // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
753     // it rounds to zero.
754     const uint32_t zero_exponent =
755         (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
756     __ sub(scratch2, Immediate(zero_exponent));
757     // ecx already has a Smi zero.
758     __ j(less, &done, Label::kNear);
759 
760     // We have a shifted exponent between 0 and 30 in scratch2.
761     __ shr(scratch2, HeapNumber::kExponentShift);
762     __ mov(ecx, Immediate(30));
763     __ sub(ecx, scratch2);
764 
765     __ bind(&right_exponent);
766     // Here ecx is the shift, scratch is the exponent word.
767     // Get the top bits of the mantissa.
768     __ and_(scratch, HeapNumber::kMantissaMask);
769     // Put back the implicit 1.
770     __ or_(scratch, 1 << HeapNumber::kExponentShift);
771     // Shift up the mantissa bits to take up the space the exponent used to
772     // take. We have kExponentShift + 1 significant bits int he low end of the
773     // word.  Shift them to the top bits.
774     const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
775     __ shl(scratch, shift_distance);
776     // Get the second half of the double. For some exponents we don't
777     // actually need this because the bits get shifted out again, but
778     // it's probably slower to test than just to do it.
779     __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
780     // Shift down 22 bits to get the most significant 10 bits or the low
781     // mantissa word.
782     __ shr(scratch2, 32 - shift_distance);
783     __ or_(scratch2, scratch);
784     // Move down according to the exponent.
785     __ shr_cl(scratch2);
786     // Now the unsigned answer is in scratch2.  We need to move it to ecx and
787     // we may need to fix the sign.
788     Label negative;
789     __ xor_(ecx, ecx);
790     __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
791     __ j(greater, &negative, Label::kNear);
792     __ mov(ecx, scratch2);
793     __ jmp(&done, Label::kNear);
794     __ bind(&negative);
795     __ sub(ecx, scratch2);
796     __ bind(&done);
797   }
798 }
799 
800 
PrintName(StringStream * stream)801 void UnaryOpStub::PrintName(StringStream* stream) {
802   const char* op_name = Token::Name(op_);
803   const char* overwrite_name = NULL;  // Make g++ happy.
804   switch (mode_) {
805     case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
806     case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
807   }
808   stream->Add("UnaryOpStub_%s_%s_%s",
809               op_name,
810               overwrite_name,
811               UnaryOpIC::GetName(operand_type_));
812 }
813 
814 
815 // TODO(svenpanne): Use virtual functions instead of switch.
Generate(MacroAssembler * masm)816 void UnaryOpStub::Generate(MacroAssembler* masm) {
817   switch (operand_type_) {
818     case UnaryOpIC::UNINITIALIZED:
819       GenerateTypeTransition(masm);
820       break;
821     case UnaryOpIC::SMI:
822       GenerateSmiStub(masm);
823       break;
824     case UnaryOpIC::HEAP_NUMBER:
825       GenerateHeapNumberStub(masm);
826       break;
827     case UnaryOpIC::GENERIC:
828       GenerateGenericStub(masm);
829       break;
830   }
831 }
832 
833 
GenerateTypeTransition(MacroAssembler * masm)834 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
835   __ pop(ecx);  // Save return address.
836 
837   __ push(eax);  // the operand
838   __ push(Immediate(Smi::FromInt(op_)));
839   __ push(Immediate(Smi::FromInt(mode_)));
840   __ push(Immediate(Smi::FromInt(operand_type_)));
841 
842   __ push(ecx);  // Push return address.
843 
844   // Patch the caller to an appropriate specialized stub and return the
845   // operation result to the caller of the stub.
846   __ TailCallExternalReference(
847       ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
848 }
849 
850 
851 // TODO(svenpanne): Use virtual functions instead of switch.
GenerateSmiStub(MacroAssembler * masm)852 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
853   switch (op_) {
854     case Token::SUB:
855       GenerateSmiStubSub(masm);
856       break;
857     case Token::BIT_NOT:
858       GenerateSmiStubBitNot(masm);
859       break;
860     default:
861       UNREACHABLE();
862   }
863 }
864 
865 
GenerateSmiStubSub(MacroAssembler * masm)866 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
867   Label non_smi, undo, slow;
868   GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
869                      Label::kNear, Label::kNear, Label::kNear);
870   __ bind(&undo);
871   GenerateSmiCodeUndo(masm);
872   __ bind(&non_smi);
873   __ bind(&slow);
874   GenerateTypeTransition(masm);
875 }
876 
877 
GenerateSmiStubBitNot(MacroAssembler * masm)878 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
879   Label non_smi;
880   GenerateSmiCodeBitNot(masm, &non_smi);
881   __ bind(&non_smi);
882   GenerateTypeTransition(masm);
883 }
884 
885 
GenerateSmiCodeSub(MacroAssembler * masm,Label * non_smi,Label * undo,Label * slow,Label::Distance non_smi_near,Label::Distance undo_near,Label::Distance slow_near)886 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
887                                      Label* non_smi,
888                                      Label* undo,
889                                      Label* slow,
890                                      Label::Distance non_smi_near,
891                                      Label::Distance undo_near,
892                                      Label::Distance slow_near) {
893   // Check whether the value is a smi.
894   __ JumpIfNotSmi(eax, non_smi, non_smi_near);
895 
896   // We can't handle -0 with smis, so use a type transition for that case.
897   __ test(eax, eax);
898   __ j(zero, slow, slow_near);
899 
900   // Try optimistic subtraction '0 - value', saving operand in eax for undo.
901   __ mov(edx, eax);
902   __ Set(eax, Immediate(0));
903   __ sub(eax, edx);
904   __ j(overflow, undo, undo_near);
905   __ ret(0);
906 }
907 
908 
GenerateSmiCodeBitNot(MacroAssembler * masm,Label * non_smi,Label::Distance non_smi_near)909 void UnaryOpStub::GenerateSmiCodeBitNot(
910     MacroAssembler* masm,
911     Label* non_smi,
912     Label::Distance non_smi_near) {
913   // Check whether the value is a smi.
914   __ JumpIfNotSmi(eax, non_smi, non_smi_near);
915 
916   // Flip bits and revert inverted smi-tag.
917   __ not_(eax);
918   __ and_(eax, ~kSmiTagMask);
919   __ ret(0);
920 }
921 
922 
GenerateSmiCodeUndo(MacroAssembler * masm)923 void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
924   __ mov(eax, edx);
925 }
926 
927 
928 // TODO(svenpanne): Use virtual functions instead of switch.
GenerateHeapNumberStub(MacroAssembler * masm)929 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
930   switch (op_) {
931     case Token::SUB:
932       GenerateHeapNumberStubSub(masm);
933       break;
934     case Token::BIT_NOT:
935       GenerateHeapNumberStubBitNot(masm);
936       break;
937     default:
938       UNREACHABLE();
939   }
940 }
941 
942 
GenerateHeapNumberStubSub(MacroAssembler * masm)943 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
944   Label non_smi, undo, slow, call_builtin;
945   GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
946   __ bind(&non_smi);
947   GenerateHeapNumberCodeSub(masm, &slow);
948   __ bind(&undo);
949   GenerateSmiCodeUndo(masm);
950   __ bind(&slow);
951   GenerateTypeTransition(masm);
952   __ bind(&call_builtin);
953   GenerateGenericCodeFallback(masm);
954 }
955 
956 
GenerateHeapNumberStubBitNot(MacroAssembler * masm)957 void UnaryOpStub::GenerateHeapNumberStubBitNot(
958     MacroAssembler* masm) {
959   Label non_smi, slow;
960   GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
961   __ bind(&non_smi);
962   GenerateHeapNumberCodeBitNot(masm, &slow);
963   __ bind(&slow);
964   GenerateTypeTransition(masm);
965 }
966 
967 
GenerateHeapNumberCodeSub(MacroAssembler * masm,Label * slow)968 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
969                                             Label* slow) {
970   __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
971   __ cmp(edx, masm->isolate()->factory()->heap_number_map());
972   __ j(not_equal, slow);
973 
974   if (mode_ == UNARY_OVERWRITE) {
975     __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
976             Immediate(HeapNumber::kSignMask));  // Flip sign.
977   } else {
978     __ mov(edx, eax);
979     // edx: operand
980 
981     Label slow_allocate_heapnumber, heapnumber_allocated;
982     __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
983     __ jmp(&heapnumber_allocated, Label::kNear);
984 
985     __ bind(&slow_allocate_heapnumber);
986     {
987       FrameScope scope(masm, StackFrame::INTERNAL);
988       __ push(edx);
989       __ CallRuntime(Runtime::kNumberAlloc, 0);
990       __ pop(edx);
991     }
992 
993     __ bind(&heapnumber_allocated);
994     // eax: allocated 'empty' number
995     __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
996     __ xor_(ecx, HeapNumber::kSignMask);  // Flip sign.
997     __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
998     __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
999     __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
1000   }
1001   __ ret(0);
1002 }
1003 
1004 
GenerateHeapNumberCodeBitNot(MacroAssembler * masm,Label * slow)1005 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
1006                                                Label* slow) {
1007   __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
1008   __ cmp(edx, masm->isolate()->factory()->heap_number_map());
1009   __ j(not_equal, slow);
1010 
1011   // Convert the heap number in eax to an untagged integer in ecx.
1012   IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
1013 
1014   // Do the bitwise operation and check if the result fits in a smi.
1015   Label try_float;
1016   __ not_(ecx);
1017   __ cmp(ecx, 0xc0000000);
1018   __ j(sign, &try_float, Label::kNear);
1019 
1020   // Tag the result as a smi and we're done.
1021   STATIC_ASSERT(kSmiTagSize == 1);
1022   __ lea(eax, Operand(ecx, times_2, kSmiTag));
1023   __ ret(0);
1024 
1025   // Try to store the result in a heap number.
1026   __ bind(&try_float);
1027   if (mode_ == UNARY_NO_OVERWRITE) {
1028     Label slow_allocate_heapnumber, heapnumber_allocated;
1029     __ mov(ebx, eax);
1030     __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
1031     __ jmp(&heapnumber_allocated);
1032 
1033     __ bind(&slow_allocate_heapnumber);
1034     {
1035       FrameScope scope(masm, StackFrame::INTERNAL);
1036       // Push the original HeapNumber on the stack. The integer value can't
1037       // be stored since it's untagged and not in the smi range (so we can't
1038       // smi-tag it). We'll recalculate the value after the GC instead.
1039       __ push(ebx);
1040       __ CallRuntime(Runtime::kNumberAlloc, 0);
1041       // New HeapNumber is in eax.
1042       __ pop(edx);
1043     }
1044     // IntegerConvert uses ebx and edi as scratch registers.
1045     // This conversion won't go slow-case.
1046     IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
1047     __ not_(ecx);
1048 
1049     __ bind(&heapnumber_allocated);
1050   }
1051   if (CpuFeatures::IsSupported(SSE2)) {
1052     CpuFeatures::Scope use_sse2(SSE2);
1053     __ cvtsi2sd(xmm0, ecx);
1054     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1055   } else {
1056     __ push(ecx);
1057     __ fild_s(Operand(esp, 0));
1058     __ pop(ecx);
1059     __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1060   }
1061   __ ret(0);
1062 }
1063 
1064 
1065 // TODO(svenpanne): Use virtual functions instead of switch.
GenerateGenericStub(MacroAssembler * masm)1066 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
1067   switch (op_) {
1068     case Token::SUB:
1069       GenerateGenericStubSub(masm);
1070       break;
1071     case Token::BIT_NOT:
1072       GenerateGenericStubBitNot(masm);
1073       break;
1074     default:
1075       UNREACHABLE();
1076   }
1077 }
1078 
1079 
GenerateGenericStubSub(MacroAssembler * masm)1080 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm)  {
1081   Label non_smi, undo, slow;
1082   GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
1083   __ bind(&non_smi);
1084   GenerateHeapNumberCodeSub(masm, &slow);
1085   __ bind(&undo);
1086   GenerateSmiCodeUndo(masm);
1087   __ bind(&slow);
1088   GenerateGenericCodeFallback(masm);
1089 }
1090 
1091 
GenerateGenericStubBitNot(MacroAssembler * masm)1092 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
1093   Label non_smi, slow;
1094   GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
1095   __ bind(&non_smi);
1096   GenerateHeapNumberCodeBitNot(masm, &slow);
1097   __ bind(&slow);
1098   GenerateGenericCodeFallback(masm);
1099 }
1100 
1101 
GenerateGenericCodeFallback(MacroAssembler * masm)1102 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
1103   // Handle the slow case by jumping to the corresponding JavaScript builtin.
1104   __ pop(ecx);  // pop return address.
1105   __ push(eax);
1106   __ push(ecx);  // push return address
1107   switch (op_) {
1108     case Token::SUB:
1109       __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1110       break;
1111     case Token::BIT_NOT:
1112       __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1113       break;
1114     default:
1115       UNREACHABLE();
1116   }
1117 }
1118 
1119 
GenerateTypeTransition(MacroAssembler * masm)1120 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1121   __ pop(ecx);  // Save return address.
1122   __ push(edx);
1123   __ push(eax);
1124   // Left and right arguments are now on top.
1125   // Push this stub's key. Although the operation and the type info are
1126   // encoded into the key, the encoding is opaque, so push them too.
1127   __ push(Immediate(Smi::FromInt(MinorKey())));
1128   __ push(Immediate(Smi::FromInt(op_)));
1129   __ push(Immediate(Smi::FromInt(operands_type_)));
1130 
1131   __ push(ecx);  // Push return address.
1132 
1133   // Patch the caller to an appropriate specialized stub and return the
1134   // operation result to the caller of the stub.
1135   __ TailCallExternalReference(
1136       ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1137                         masm->isolate()),
1138       5,
1139       1);
1140 }
1141 
1142 
1143 // Prepare for a type transition runtime call when the args are already on
1144 // the stack, under the return address.
GenerateTypeTransitionWithSavedArgs(MacroAssembler * masm)1145 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
1146   __ pop(ecx);  // Save return address.
1147   // Left and right arguments are already on top of the stack.
1148   // Push this stub's key. Although the operation and the type info are
1149   // encoded into the key, the encoding is opaque, so push them too.
1150   __ push(Immediate(Smi::FromInt(MinorKey())));
1151   __ push(Immediate(Smi::FromInt(op_)));
1152   __ push(Immediate(Smi::FromInt(operands_type_)));
1153 
1154   __ push(ecx);  // Push return address.
1155 
1156   // Patch the caller to an appropriate specialized stub and return the
1157   // operation result to the caller of the stub.
1158   __ TailCallExternalReference(
1159       ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1160                         masm->isolate()),
1161       5,
1162       1);
1163 }
1164 
1165 
Generate(MacroAssembler * masm)1166 void BinaryOpStub::Generate(MacroAssembler* masm) {
1167   // Explicitly allow generation of nested stubs. It is safe here because
1168   // generation code does not use any raw pointers.
1169   AllowStubCallsScope allow_stub_calls(masm, true);
1170 
1171   switch (operands_type_) {
1172     case BinaryOpIC::UNINITIALIZED:
1173       GenerateTypeTransition(masm);
1174       break;
1175     case BinaryOpIC::SMI:
1176       GenerateSmiStub(masm);
1177       break;
1178     case BinaryOpIC::INT32:
1179       GenerateInt32Stub(masm);
1180       break;
1181     case BinaryOpIC::HEAP_NUMBER:
1182       GenerateHeapNumberStub(masm);
1183       break;
1184     case BinaryOpIC::ODDBALL:
1185       GenerateOddballStub(masm);
1186       break;
1187     case BinaryOpIC::BOTH_STRING:
1188       GenerateBothStringStub(masm);
1189       break;
1190     case BinaryOpIC::STRING:
1191       GenerateStringStub(masm);
1192       break;
1193     case BinaryOpIC::GENERIC:
1194       GenerateGeneric(masm);
1195       break;
1196     default:
1197       UNREACHABLE();
1198   }
1199 }
1200 
1201 
PrintName(StringStream * stream)1202 void BinaryOpStub::PrintName(StringStream* stream) {
1203   const char* op_name = Token::Name(op_);
1204   const char* overwrite_name;
1205   switch (mode_) {
1206     case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1207     case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1208     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1209     default: overwrite_name = "UnknownOverwrite"; break;
1210   }
1211   stream->Add("BinaryOpStub_%s_%s_%s",
1212               op_name,
1213               overwrite_name,
1214               BinaryOpIC::GetName(operands_type_));
1215 }
1216 
1217 
GenerateSmiCode(MacroAssembler * masm,Label * slow,SmiCodeGenerateHeapNumberResults allow_heapnumber_results)1218 void BinaryOpStub::GenerateSmiCode(
1219     MacroAssembler* masm,
1220     Label* slow,
1221     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1222   // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1223   // dividend in eax and edx free for the division.  Use eax, ebx for those.
1224   Comment load_comment(masm, "-- Load arguments");
1225   Register left = edx;
1226   Register right = eax;
1227   if (op_ == Token::DIV || op_ == Token::MOD) {
1228     left = eax;
1229     right = ebx;
1230     __ mov(ebx, eax);
1231     __ mov(eax, edx);
1232   }
1233 
1234 
1235   // 2. Prepare the smi check of both operands by oring them together.
1236   Comment smi_check_comment(masm, "-- Smi check arguments");
1237   Label not_smis;
1238   Register combined = ecx;
1239   ASSERT(!left.is(combined) && !right.is(combined));
1240   switch (op_) {
1241     case Token::BIT_OR:
1242       // Perform the operation into eax and smi check the result.  Preserve
1243       // eax in case the result is not a smi.
1244       ASSERT(!left.is(ecx) && !right.is(ecx));
1245       __ mov(ecx, right);
1246       __ or_(right, left);  // Bitwise or is commutative.
1247       combined = right;
1248       break;
1249 
1250     case Token::BIT_XOR:
1251     case Token::BIT_AND:
1252     case Token::ADD:
1253     case Token::SUB:
1254     case Token::MUL:
1255     case Token::DIV:
1256     case Token::MOD:
1257       __ mov(combined, right);
1258       __ or_(combined, left);
1259       break;
1260 
1261     case Token::SHL:
1262     case Token::SAR:
1263     case Token::SHR:
1264       // Move the right operand into ecx for the shift operation, use eax
1265       // for the smi check register.
1266       ASSERT(!left.is(ecx) && !right.is(ecx));
1267       __ mov(ecx, right);
1268       __ or_(right, left);
1269       combined = right;
1270       break;
1271 
1272     default:
1273       break;
1274   }
1275 
1276   // 3. Perform the smi check of the operands.
1277   STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
1278   __ JumpIfNotSmi(combined, &not_smis);
1279 
1280   // 4. Operands are both smis, perform the operation leaving the result in
1281   // eax and check the result if necessary.
1282   Comment perform_smi(masm, "-- Perform smi operation");
1283   Label use_fp_on_smis;
1284   switch (op_) {
1285     case Token::BIT_OR:
1286       // Nothing to do.
1287       break;
1288 
1289     case Token::BIT_XOR:
1290       ASSERT(right.is(eax));
1291       __ xor_(right, left);  // Bitwise xor is commutative.
1292       break;
1293 
1294     case Token::BIT_AND:
1295       ASSERT(right.is(eax));
1296       __ and_(right, left);  // Bitwise and is commutative.
1297       break;
1298 
1299     case Token::SHL:
1300       // Remove tags from operands (but keep sign).
1301       __ SmiUntag(left);
1302       __ SmiUntag(ecx);
1303       // Perform the operation.
1304       __ shl_cl(left);
1305       // Check that the *signed* result fits in a smi.
1306       __ cmp(left, 0xc0000000);
1307       __ j(sign, &use_fp_on_smis);
1308       // Tag the result and store it in register eax.
1309       __ SmiTag(left);
1310       __ mov(eax, left);
1311       break;
1312 
1313     case Token::SAR:
1314       // Remove tags from operands (but keep sign).
1315       __ SmiUntag(left);
1316       __ SmiUntag(ecx);
1317       // Perform the operation.
1318       __ sar_cl(left);
1319       // Tag the result and store it in register eax.
1320       __ SmiTag(left);
1321       __ mov(eax, left);
1322       break;
1323 
1324     case Token::SHR:
1325       // Remove tags from operands (but keep sign).
1326       __ SmiUntag(left);
1327       __ SmiUntag(ecx);
1328       // Perform the operation.
1329       __ shr_cl(left);
1330       // Check that the *unsigned* result fits in a smi.
1331       // Neither of the two high-order bits can be set:
1332       // - 0x80000000: high bit would be lost when smi tagging.
1333       // - 0x40000000: this number would convert to negative when
1334       // Smi tagging these two cases can only happen with shifts
1335       // by 0 or 1 when handed a valid smi.
1336       __ test(left, Immediate(0xc0000000));
1337       __ j(not_zero, &use_fp_on_smis);
1338       // Tag the result and store it in register eax.
1339       __ SmiTag(left);
1340       __ mov(eax, left);
1341       break;
1342 
1343     case Token::ADD:
1344       ASSERT(right.is(eax));
1345       __ add(right, left);  // Addition is commutative.
1346       __ j(overflow, &use_fp_on_smis);
1347       break;
1348 
1349     case Token::SUB:
1350       __ sub(left, right);
1351       __ j(overflow, &use_fp_on_smis);
1352       __ mov(eax, left);
1353       break;
1354 
1355     case Token::MUL:
1356       // If the smi tag is 0 we can just leave the tag on one operand.
1357       STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
1358       // We can't revert the multiplication if the result is not a smi
1359       // so save the right operand.
1360       __ mov(ebx, right);
1361       // Remove tag from one of the operands (but keep sign).
1362       __ SmiUntag(right);
1363       // Do multiplication.
1364       __ imul(right, left);  // Multiplication is commutative.
1365       __ j(overflow, &use_fp_on_smis);
1366       // Check for negative zero result.  Use combined = left | right.
1367       __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1368       break;
1369 
1370     case Token::DIV:
1371       // We can't revert the division if the result is not a smi so
1372       // save the left operand.
1373       __ mov(edi, left);
1374       // Check for 0 divisor.
1375       __ test(right, right);
1376       __ j(zero, &use_fp_on_smis);
1377       // Sign extend left into edx:eax.
1378       ASSERT(left.is(eax));
1379       __ cdq();
1380       // Divide edx:eax by right.
1381       __ idiv(right);
1382       // Check for the corner case of dividing the most negative smi by
1383       // -1. We cannot use the overflow flag, since it is not set by idiv
1384       // instruction.
1385       STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1386       __ cmp(eax, 0x40000000);
1387       __ j(equal, &use_fp_on_smis);
1388       // Check for negative zero result.  Use combined = left | right.
1389       __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1390       // Check that the remainder is zero.
1391       __ test(edx, edx);
1392       __ j(not_zero, &use_fp_on_smis);
1393       // Tag the result and store it in register eax.
1394       __ SmiTag(eax);
1395       break;
1396 
1397     case Token::MOD:
1398       // Check for 0 divisor.
1399       __ test(right, right);
1400       __ j(zero, &not_smis);
1401 
1402       // Sign extend left into edx:eax.
1403       ASSERT(left.is(eax));
1404       __ cdq();
1405       // Divide edx:eax by right.
1406       __ idiv(right);
1407       // Check for negative zero result.  Use combined = left | right.
1408       __ NegativeZeroTest(edx, combined, slow);
1409       // Move remainder to register eax.
1410       __ mov(eax, edx);
1411       break;
1412 
1413     default:
1414       UNREACHABLE();
1415   }
1416 
1417   // 5. Emit return of result in eax.  Some operations have registers pushed.
1418   switch (op_) {
1419     case Token::ADD:
1420     case Token::SUB:
1421     case Token::MUL:
1422     case Token::DIV:
1423       __ ret(0);
1424       break;
1425     case Token::MOD:
1426     case Token::BIT_OR:
1427     case Token::BIT_AND:
1428     case Token::BIT_XOR:
1429     case Token::SAR:
1430     case Token::SHL:
1431     case Token::SHR:
1432       __ ret(2 * kPointerSize);
1433       break;
1434     default:
1435       UNREACHABLE();
1436   }
1437 
1438   // 6. For some operations emit inline code to perform floating point
1439   // operations on known smis (e.g., if the result of the operation
1440   // overflowed the smi range).
1441   if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1442     __ bind(&use_fp_on_smis);
1443     switch (op_) {
1444       // Undo the effects of some operations, and some register moves.
1445       case Token::SHL:
1446         // The arguments are saved on the stack, and only used from there.
1447         break;
1448       case Token::ADD:
1449         // Revert right = right + left.
1450         __ sub(right, left);
1451         break;
1452       case Token::SUB:
1453         // Revert left = left - right.
1454         __ add(left, right);
1455         break;
1456       case Token::MUL:
1457         // Right was clobbered but a copy is in ebx.
1458         __ mov(right, ebx);
1459         break;
1460       case Token::DIV:
1461         // Left was clobbered but a copy is in edi.  Right is in ebx for
1462         // division.  They should be in eax, ebx for jump to not_smi.
1463         __ mov(eax, edi);
1464         break;
1465       default:
1466         // No other operators jump to use_fp_on_smis.
1467         break;
1468     }
1469     __ jmp(&not_smis);
1470   } else {
1471     ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1472     switch (op_) {
1473       case Token::SHL:
1474       case Token::SHR: {
1475         Comment perform_float(masm, "-- Perform float operation on smis");
1476         __ bind(&use_fp_on_smis);
1477         // Result we want is in left == edx, so we can put the allocated heap
1478         // number in eax.
1479         __ AllocateHeapNumber(eax, ecx, ebx, slow);
1480         // Store the result in the HeapNumber and return.
1481         // It's OK to overwrite the arguments on the stack because we
1482         // are about to return.
1483         if (op_ == Token::SHR) {
1484           __ mov(Operand(esp, 1 * kPointerSize), left);
1485           __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
1486           __ fild_d(Operand(esp, 1 * kPointerSize));
1487           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1488         } else {
1489           ASSERT_EQ(Token::SHL, op_);
1490           if (CpuFeatures::IsSupported(SSE2)) {
1491             CpuFeatures::Scope use_sse2(SSE2);
1492             __ cvtsi2sd(xmm0, left);
1493             __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1494           } else {
1495             __ mov(Operand(esp, 1 * kPointerSize), left);
1496             __ fild_s(Operand(esp, 1 * kPointerSize));
1497             __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1498           }
1499         }
1500         __ ret(2 * kPointerSize);
1501         break;
1502       }
1503 
1504       case Token::ADD:
1505       case Token::SUB:
1506       case Token::MUL:
1507       case Token::DIV: {
1508         Comment perform_float(masm, "-- Perform float operation on smis");
1509         __ bind(&use_fp_on_smis);
1510         // Restore arguments to edx, eax.
1511         switch (op_) {
1512           case Token::ADD:
1513             // Revert right = right + left.
1514             __ sub(right, left);
1515             break;
1516           case Token::SUB:
1517             // Revert left = left - right.
1518             __ add(left, right);
1519             break;
1520           case Token::MUL:
1521             // Right was clobbered but a copy is in ebx.
1522             __ mov(right, ebx);
1523             break;
1524           case Token::DIV:
1525             // Left was clobbered but a copy is in edi.  Right is in ebx for
1526             // division.
1527             __ mov(edx, edi);
1528             __ mov(eax, right);
1529             break;
1530           default: UNREACHABLE();
1531             break;
1532         }
1533         __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1534         if (CpuFeatures::IsSupported(SSE2)) {
1535           CpuFeatures::Scope use_sse2(SSE2);
1536           FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1537           switch (op_) {
1538             case Token::ADD: __ addsd(xmm0, xmm1); break;
1539             case Token::SUB: __ subsd(xmm0, xmm1); break;
1540             case Token::MUL: __ mulsd(xmm0, xmm1); break;
1541             case Token::DIV: __ divsd(xmm0, xmm1); break;
1542             default: UNREACHABLE();
1543           }
1544           __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1545         } else {  // SSE2 not available, use FPU.
1546           FloatingPointHelper::LoadFloatSmis(masm, ebx);
1547           switch (op_) {
1548             case Token::ADD: __ faddp(1); break;
1549             case Token::SUB: __ fsubp(1); break;
1550             case Token::MUL: __ fmulp(1); break;
1551             case Token::DIV: __ fdivp(1); break;
1552             default: UNREACHABLE();
1553           }
1554           __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1555         }
1556         __ mov(eax, ecx);
1557         __ ret(0);
1558         break;
1559       }
1560 
1561       default:
1562         break;
1563     }
1564   }
1565 
1566   // 7. Non-smi operands, fall out to the non-smi code with the operands in
1567   // edx and eax.
1568   Comment done_comment(masm, "-- Enter non-smi code");
1569   __ bind(&not_smis);
1570   switch (op_) {
1571     case Token::BIT_OR:
1572     case Token::SHL:
1573     case Token::SAR:
1574     case Token::SHR:
1575       // Right operand is saved in ecx and eax was destroyed by the smi
1576       // check.
1577       __ mov(eax, ecx);
1578       break;
1579 
1580     case Token::DIV:
1581     case Token::MOD:
1582       // Operands are in eax, ebx at this point.
1583       __ mov(edx, eax);
1584       __ mov(eax, ebx);
1585       break;
1586 
1587     default:
1588       break;
1589   }
1590 }
1591 
1592 
GenerateSmiStub(MacroAssembler * masm)1593 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1594   Label call_runtime;
1595 
1596   switch (op_) {
1597     case Token::ADD:
1598     case Token::SUB:
1599     case Token::MUL:
1600     case Token::DIV:
1601       break;
1602     case Token::MOD:
1603     case Token::BIT_OR:
1604     case Token::BIT_AND:
1605     case Token::BIT_XOR:
1606     case Token::SAR:
1607     case Token::SHL:
1608     case Token::SHR:
1609       GenerateRegisterArgsPush(masm);
1610       break;
1611     default:
1612       UNREACHABLE();
1613   }
1614 
1615   if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1616       result_type_ == BinaryOpIC::SMI) {
1617     GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1618   } else {
1619     GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1620   }
1621   __ bind(&call_runtime);
1622   switch (op_) {
1623     case Token::ADD:
1624     case Token::SUB:
1625     case Token::MUL:
1626     case Token::DIV:
1627       GenerateTypeTransition(masm);
1628       break;
1629     case Token::MOD:
1630     case Token::BIT_OR:
1631     case Token::BIT_AND:
1632     case Token::BIT_XOR:
1633     case Token::SAR:
1634     case Token::SHL:
1635     case Token::SHR:
1636       GenerateTypeTransitionWithSavedArgs(masm);
1637       break;
1638     default:
1639       UNREACHABLE();
1640   }
1641 }
1642 
1643 
GenerateStringStub(MacroAssembler * masm)1644 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1645   ASSERT(operands_type_ == BinaryOpIC::STRING);
1646   ASSERT(op_ == Token::ADD);
1647   // Try to add arguments as strings, otherwise, transition to the generic
1648   // BinaryOpIC type.
1649   GenerateAddStrings(masm);
1650   GenerateTypeTransition(masm);
1651 }
1652 
1653 
GenerateBothStringStub(MacroAssembler * masm)1654 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1655   Label call_runtime;
1656   ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1657   ASSERT(op_ == Token::ADD);
1658   // If both arguments are strings, call the string add stub.
1659   // Otherwise, do a transition.
1660 
1661   // Registers containing left and right operands respectively.
1662   Register left = edx;
1663   Register right = eax;
1664 
1665   // Test if left operand is a string.
1666   __ JumpIfSmi(left, &call_runtime, Label::kNear);
1667   __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1668   __ j(above_equal, &call_runtime, Label::kNear);
1669 
1670   // Test if right operand is a string.
1671   __ JumpIfSmi(right, &call_runtime, Label::kNear);
1672   __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1673   __ j(above_equal, &call_runtime, Label::kNear);
1674 
1675   StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1676   GenerateRegisterArgsPush(masm);
1677   __ TailCallStub(&string_add_stub);
1678 
1679   __ bind(&call_runtime);
1680   GenerateTypeTransition(masm);
1681 }
1682 
1683 
GenerateInt32Stub(MacroAssembler * masm)1684 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1685   Label call_runtime;
1686   ASSERT(operands_type_ == BinaryOpIC::INT32);
1687 
1688   // Floating point case.
1689   switch (op_) {
1690     case Token::ADD:
1691     case Token::SUB:
1692     case Token::MUL:
1693     case Token::DIV: {
1694       Label not_floats;
1695       Label not_int32;
1696       if (CpuFeatures::IsSupported(SSE2)) {
1697         CpuFeatures::Scope use_sse2(SSE2);
1698         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1699         FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1700         switch (op_) {
1701           case Token::ADD: __ addsd(xmm0, xmm1); break;
1702           case Token::SUB: __ subsd(xmm0, xmm1); break;
1703           case Token::MUL: __ mulsd(xmm0, xmm1); break;
1704           case Token::DIV: __ divsd(xmm0, xmm1); break;
1705           default: UNREACHABLE();
1706         }
1707         // Check result type if it is currently Int32.
1708         if (result_type_ <= BinaryOpIC::INT32) {
1709           __ cvttsd2si(ecx, Operand(xmm0));
1710           __ cvtsi2sd(xmm2, ecx);
1711           __ ucomisd(xmm0, xmm2);
1712           __ j(not_zero, &not_int32);
1713           __ j(carry, &not_int32);
1714         }
1715         GenerateHeapResultAllocation(masm, &call_runtime);
1716         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1717         __ ret(0);
1718       } else {  // SSE2 not available, use FPU.
1719         FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1720         FloatingPointHelper::LoadFloatOperands(
1721             masm,
1722             ecx,
1723             FloatingPointHelper::ARGS_IN_REGISTERS);
1724         FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
1725         switch (op_) {
1726           case Token::ADD: __ faddp(1); break;
1727           case Token::SUB: __ fsubp(1); break;
1728           case Token::MUL: __ fmulp(1); break;
1729           case Token::DIV: __ fdivp(1); break;
1730           default: UNREACHABLE();
1731         }
1732         Label after_alloc_failure;
1733         GenerateHeapResultAllocation(masm, &after_alloc_failure);
1734         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1735         __ ret(0);
1736         __ bind(&after_alloc_failure);
1737         __ ffree();
1738         __ jmp(&call_runtime);
1739       }
1740 
1741       __ bind(&not_floats);
1742       __ bind(&not_int32);
1743       GenerateTypeTransition(masm);
1744       break;
1745     }
1746 
1747     case Token::MOD: {
1748       // For MOD we go directly to runtime in the non-smi case.
1749       break;
1750     }
1751     case Token::BIT_OR:
1752     case Token::BIT_AND:
1753     case Token::BIT_XOR:
1754     case Token::SAR:
1755     case Token::SHL:
1756     case Token::SHR: {
1757       GenerateRegisterArgsPush(masm);
1758       Label not_floats;
1759       Label not_int32;
1760       Label non_smi_result;
1761       /*  {
1762         CpuFeatures::Scope use_sse2(SSE2);
1763         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1764         FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1765         }*/
1766       FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1767                                                   use_sse3_,
1768                                                   &not_floats);
1769       FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1770                                                         &not_int32);
1771       switch (op_) {
1772         case Token::BIT_OR:  __ or_(eax, ecx); break;
1773         case Token::BIT_AND: __ and_(eax, ecx); break;
1774         case Token::BIT_XOR: __ xor_(eax, ecx); break;
1775         case Token::SAR: __ sar_cl(eax); break;
1776         case Token::SHL: __ shl_cl(eax); break;
1777         case Token::SHR: __ shr_cl(eax); break;
1778         default: UNREACHABLE();
1779       }
1780       if (op_ == Token::SHR) {
1781         // Check if result is non-negative and fits in a smi.
1782         __ test(eax, Immediate(0xc0000000));
1783         __ j(not_zero, &call_runtime);
1784       } else {
1785         // Check if result fits in a smi.
1786         __ cmp(eax, 0xc0000000);
1787         __ j(negative, &non_smi_result, Label::kNear);
1788       }
1789       // Tag smi result and return.
1790       __ SmiTag(eax);
1791       __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1792 
1793       // All ops except SHR return a signed int32 that we load in
1794       // a HeapNumber.
1795       if (op_ != Token::SHR) {
1796         __ bind(&non_smi_result);
1797         // Allocate a heap number if needed.
1798         __ mov(ebx, eax);  // ebx: result
1799         Label skip_allocation;
1800         switch (mode_) {
1801           case OVERWRITE_LEFT:
1802           case OVERWRITE_RIGHT:
1803             // If the operand was an object, we skip the
1804             // allocation of a heap number.
1805             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1806                                 1 * kPointerSize : 2 * kPointerSize));
1807             __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1808             // Fall through!
1809           case NO_OVERWRITE:
1810             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1811             __ bind(&skip_allocation);
1812             break;
1813           default: UNREACHABLE();
1814         }
1815         // Store the result in the HeapNumber and return.
1816         if (CpuFeatures::IsSupported(SSE2)) {
1817           CpuFeatures::Scope use_sse2(SSE2);
1818           __ cvtsi2sd(xmm0, ebx);
1819           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1820         } else {
1821           __ mov(Operand(esp, 1 * kPointerSize), ebx);
1822           __ fild_s(Operand(esp, 1 * kPointerSize));
1823           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1824         }
1825         __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
1826       }
1827 
1828       __ bind(&not_floats);
1829       __ bind(&not_int32);
1830       GenerateTypeTransitionWithSavedArgs(masm);
1831       break;
1832     }
1833     default: UNREACHABLE(); break;
1834   }
1835 
1836   // If an allocation fails, or SHR or MOD hit a hard case,
1837   // use the runtime system to get the correct result.
1838   __ bind(&call_runtime);
1839 
1840   switch (op_) {
1841     case Token::ADD:
1842       GenerateRegisterArgsPush(masm);
1843       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1844       break;
1845     case Token::SUB:
1846       GenerateRegisterArgsPush(masm);
1847       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1848       break;
1849     case Token::MUL:
1850       GenerateRegisterArgsPush(masm);
1851       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1852       break;
1853     case Token::DIV:
1854       GenerateRegisterArgsPush(masm);
1855       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1856       break;
1857     case Token::MOD:
1858       GenerateRegisterArgsPush(masm);
1859       __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1860       break;
1861     case Token::BIT_OR:
1862       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1863       break;
1864     case Token::BIT_AND:
1865       __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1866       break;
1867     case Token::BIT_XOR:
1868       __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1869       break;
1870     case Token::SAR:
1871       __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1872       break;
1873     case Token::SHL:
1874       __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1875       break;
1876     case Token::SHR:
1877       __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1878       break;
1879     default:
1880       UNREACHABLE();
1881   }
1882 }
1883 
1884 
GenerateOddballStub(MacroAssembler * masm)1885 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1886   if (op_ == Token::ADD) {
1887     // Handle string addition here, because it is the only operation
1888     // that does not do a ToNumber conversion on the operands.
1889     GenerateAddStrings(masm);
1890   }
1891 
1892   Factory* factory = masm->isolate()->factory();
1893 
1894   // Convert odd ball arguments to numbers.
1895   Label check, done;
1896   __ cmp(edx, factory->undefined_value());
1897   __ j(not_equal, &check, Label::kNear);
1898   if (Token::IsBitOp(op_)) {
1899     __ xor_(edx, edx);
1900   } else {
1901     __ mov(edx, Immediate(factory->nan_value()));
1902   }
1903   __ jmp(&done, Label::kNear);
1904   __ bind(&check);
1905   __ cmp(eax, factory->undefined_value());
1906   __ j(not_equal, &done, Label::kNear);
1907   if (Token::IsBitOp(op_)) {
1908     __ xor_(eax, eax);
1909   } else {
1910     __ mov(eax, Immediate(factory->nan_value()));
1911   }
1912   __ bind(&done);
1913 
1914   GenerateHeapNumberStub(masm);
1915 }
1916 
1917 
GenerateHeapNumberStub(MacroAssembler * masm)1918 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1919   Label call_runtime;
1920 
1921   // Floating point case.
1922   switch (op_) {
1923     case Token::ADD:
1924     case Token::SUB:
1925     case Token::MUL:
1926     case Token::DIV: {
1927       Label not_floats;
1928       if (CpuFeatures::IsSupported(SSE2)) {
1929         CpuFeatures::Scope use_sse2(SSE2);
1930         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1931 
1932         switch (op_) {
1933           case Token::ADD: __ addsd(xmm0, xmm1); break;
1934           case Token::SUB: __ subsd(xmm0, xmm1); break;
1935           case Token::MUL: __ mulsd(xmm0, xmm1); break;
1936           case Token::DIV: __ divsd(xmm0, xmm1); break;
1937           default: UNREACHABLE();
1938         }
1939         GenerateHeapResultAllocation(masm, &call_runtime);
1940         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1941         __ ret(0);
1942       } else {  // SSE2 not available, use FPU.
1943         FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1944         FloatingPointHelper::LoadFloatOperands(
1945             masm,
1946             ecx,
1947             FloatingPointHelper::ARGS_IN_REGISTERS);
1948         switch (op_) {
1949           case Token::ADD: __ faddp(1); break;
1950           case Token::SUB: __ fsubp(1); break;
1951           case Token::MUL: __ fmulp(1); break;
1952           case Token::DIV: __ fdivp(1); break;
1953           default: UNREACHABLE();
1954         }
1955         Label after_alloc_failure;
1956         GenerateHeapResultAllocation(masm, &after_alloc_failure);
1957         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1958         __ ret(0);
1959         __ bind(&after_alloc_failure);
1960         __ ffree();
1961         __ jmp(&call_runtime);
1962       }
1963 
1964       __ bind(&not_floats);
1965       GenerateTypeTransition(masm);
1966       break;
1967     }
1968 
1969     case Token::MOD: {
1970       // For MOD we go directly to runtime in the non-smi case.
1971       break;
1972     }
1973     case Token::BIT_OR:
1974     case Token::BIT_AND:
1975     case Token::BIT_XOR:
1976     case Token::SAR:
1977     case Token::SHL:
1978     case Token::SHR: {
1979       GenerateRegisterArgsPush(masm);
1980       Label not_floats;
1981       Label non_smi_result;
1982       FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1983                                                   use_sse3_,
1984                                                   &not_floats);
1985       switch (op_) {
1986         case Token::BIT_OR:  __ or_(eax, ecx); break;
1987         case Token::BIT_AND: __ and_(eax, ecx); break;
1988         case Token::BIT_XOR: __ xor_(eax, ecx); break;
1989         case Token::SAR: __ sar_cl(eax); break;
1990         case Token::SHL: __ shl_cl(eax); break;
1991         case Token::SHR: __ shr_cl(eax); break;
1992         default: UNREACHABLE();
1993       }
1994       if (op_ == Token::SHR) {
1995         // Check if result is non-negative and fits in a smi.
1996         __ test(eax, Immediate(0xc0000000));
1997         __ j(not_zero, &call_runtime);
1998       } else {
1999         // Check if result fits in a smi.
2000         __ cmp(eax, 0xc0000000);
2001         __ j(negative, &non_smi_result, Label::kNear);
2002       }
2003       // Tag smi result and return.
2004       __ SmiTag(eax);
2005       __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
2006 
2007       // All ops except SHR return a signed int32 that we load in
2008       // a HeapNumber.
2009       if (op_ != Token::SHR) {
2010         __ bind(&non_smi_result);
2011         // Allocate a heap number if needed.
2012         __ mov(ebx, eax);  // ebx: result
2013         Label skip_allocation;
2014         switch (mode_) {
2015           case OVERWRITE_LEFT:
2016           case OVERWRITE_RIGHT:
2017             // If the operand was an object, we skip the
2018             // allocation of a heap number.
2019             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2020                                 1 * kPointerSize : 2 * kPointerSize));
2021             __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2022             // Fall through!
2023           case NO_OVERWRITE:
2024             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2025             __ bind(&skip_allocation);
2026             break;
2027           default: UNREACHABLE();
2028         }
2029         // Store the result in the HeapNumber and return.
2030         if (CpuFeatures::IsSupported(SSE2)) {
2031           CpuFeatures::Scope use_sse2(SSE2);
2032           __ cvtsi2sd(xmm0, ebx);
2033           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2034         } else {
2035           __ mov(Operand(esp, 1 * kPointerSize), ebx);
2036           __ fild_s(Operand(esp, 1 * kPointerSize));
2037           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2038         }
2039         __ ret(2 * kPointerSize);  // Drop two pushed arguments from the stack.
2040       }
2041 
2042       __ bind(&not_floats);
2043       GenerateTypeTransitionWithSavedArgs(masm);
2044       break;
2045     }
2046     default: UNREACHABLE(); break;
2047   }
2048 
2049   // If an allocation fails, or SHR or MOD hit a hard case,
2050   // use the runtime system to get the correct result.
2051   __ bind(&call_runtime);
2052 
2053   switch (op_) {
2054     case Token::ADD:
2055       GenerateRegisterArgsPush(masm);
2056       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2057       break;
2058     case Token::SUB:
2059       GenerateRegisterArgsPush(masm);
2060       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2061       break;
2062     case Token::MUL:
2063       GenerateRegisterArgsPush(masm);
2064       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2065       break;
2066     case Token::DIV:
2067       GenerateRegisterArgsPush(masm);
2068       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2069       break;
2070     case Token::MOD:
2071       GenerateRegisterArgsPush(masm);
2072       __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2073       break;
2074     case Token::BIT_OR:
2075       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2076       break;
2077     case Token::BIT_AND:
2078       __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2079       break;
2080     case Token::BIT_XOR:
2081       __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2082       break;
2083     case Token::SAR:
2084       __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2085       break;
2086     case Token::SHL:
2087       __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2088       break;
2089     case Token::SHR:
2090       __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2091       break;
2092     default:
2093       UNREACHABLE();
2094   }
2095 }
2096 
2097 
GenerateGeneric(MacroAssembler * masm)2098 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2099   Label call_runtime;
2100 
2101   Counters* counters = masm->isolate()->counters();
2102   __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
2103 
2104   switch (op_) {
2105     case Token::ADD:
2106     case Token::SUB:
2107     case Token::MUL:
2108     case Token::DIV:
2109       break;
2110     case Token::MOD:
2111     case Token::BIT_OR:
2112     case Token::BIT_AND:
2113     case Token::BIT_XOR:
2114     case Token::SAR:
2115     case Token::SHL:
2116     case Token::SHR:
2117       GenerateRegisterArgsPush(masm);
2118       break;
2119     default:
2120       UNREACHABLE();
2121   }
2122 
2123   GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2124 
2125   // Floating point case.
2126   switch (op_) {
2127     case Token::ADD:
2128     case Token::SUB:
2129     case Token::MUL:
2130     case Token::DIV: {
2131       Label not_floats;
2132       if (CpuFeatures::IsSupported(SSE2)) {
2133         CpuFeatures::Scope use_sse2(SSE2);
2134         FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2135 
2136         switch (op_) {
2137           case Token::ADD: __ addsd(xmm0, xmm1); break;
2138           case Token::SUB: __ subsd(xmm0, xmm1); break;
2139           case Token::MUL: __ mulsd(xmm0, xmm1); break;
2140           case Token::DIV: __ divsd(xmm0, xmm1); break;
2141           default: UNREACHABLE();
2142         }
2143         GenerateHeapResultAllocation(masm, &call_runtime);
2144         __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2145         __ ret(0);
2146       } else {  // SSE2 not available, use FPU.
2147         FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2148         FloatingPointHelper::LoadFloatOperands(
2149             masm,
2150             ecx,
2151             FloatingPointHelper::ARGS_IN_REGISTERS);
2152         switch (op_) {
2153           case Token::ADD: __ faddp(1); break;
2154           case Token::SUB: __ fsubp(1); break;
2155           case Token::MUL: __ fmulp(1); break;
2156           case Token::DIV: __ fdivp(1); break;
2157           default: UNREACHABLE();
2158         }
2159         Label after_alloc_failure;
2160         GenerateHeapResultAllocation(masm, &after_alloc_failure);
2161         __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2162         __ ret(0);
2163         __ bind(&after_alloc_failure);
2164           __ ffree();
2165           __ jmp(&call_runtime);
2166       }
2167         __ bind(&not_floats);
2168         break;
2169       }
2170     case Token::MOD: {
2171       // For MOD we go directly to runtime in the non-smi case.
2172       break;
2173     }
2174     case Token::BIT_OR:
2175     case Token::BIT_AND:
2176       case Token::BIT_XOR:
2177     case Token::SAR:
2178     case Token::SHL:
2179     case Token::SHR: {
2180       Label non_smi_result;
2181       FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2182                                                   use_sse3_,
2183                                                   &call_runtime);
2184       switch (op_) {
2185         case Token::BIT_OR:  __ or_(eax, ecx); break;
2186         case Token::BIT_AND: __ and_(eax, ecx); break;
2187         case Token::BIT_XOR: __ xor_(eax, ecx); break;
2188         case Token::SAR: __ sar_cl(eax); break;
2189         case Token::SHL: __ shl_cl(eax); break;
2190         case Token::SHR: __ shr_cl(eax); break;
2191         default: UNREACHABLE();
2192       }
2193       if (op_ == Token::SHR) {
2194         // Check if result is non-negative and fits in a smi.
2195         __ test(eax, Immediate(0xc0000000));
2196         __ j(not_zero, &call_runtime);
2197       } else {
2198         // Check if result fits in a smi.
2199         __ cmp(eax, 0xc0000000);
2200         __ j(negative, &non_smi_result, Label::kNear);
2201       }
2202       // Tag smi result and return.
2203       __ SmiTag(eax);
2204       __ ret(2 * kPointerSize);  // Drop the arguments from the stack.
2205 
2206       // All ops except SHR return a signed int32 that we load in
2207       // a HeapNumber.
2208       if (op_ != Token::SHR) {
2209         __ bind(&non_smi_result);
2210         // Allocate a heap number if needed.
2211         __ mov(ebx, eax);  // ebx: result
2212         Label skip_allocation;
2213         switch (mode_) {
2214           case OVERWRITE_LEFT:
2215           case OVERWRITE_RIGHT:
2216             // If the operand was an object, we skip the
2217               // allocation of a heap number.
2218             __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2219                                 1 * kPointerSize : 2 * kPointerSize));
2220             __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2221             // Fall through!
2222           case NO_OVERWRITE:
2223             __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2224             __ bind(&skip_allocation);
2225             break;
2226           default: UNREACHABLE();
2227         }
2228         // Store the result in the HeapNumber and return.
2229         if (CpuFeatures::IsSupported(SSE2)) {
2230           CpuFeatures::Scope use_sse2(SSE2);
2231           __ cvtsi2sd(xmm0, ebx);
2232           __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2233         } else {
2234           __ mov(Operand(esp, 1 * kPointerSize), ebx);
2235           __ fild_s(Operand(esp, 1 * kPointerSize));
2236           __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2237         }
2238         __ ret(2 * kPointerSize);
2239       }
2240       break;
2241     }
2242     default: UNREACHABLE(); break;
2243   }
2244 
2245   // If all else fails, use the runtime system to get the correct
2246   // result.
2247   __ bind(&call_runtime);
2248   switch (op_) {
2249     case Token::ADD: {
2250       GenerateAddStrings(masm);
2251       GenerateRegisterArgsPush(masm);
2252       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2253       break;
2254     }
2255     case Token::SUB:
2256       GenerateRegisterArgsPush(masm);
2257       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2258       break;
2259     case Token::MUL:
2260       GenerateRegisterArgsPush(masm);
2261       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2262       break;
2263     case Token::DIV:
2264       GenerateRegisterArgsPush(masm);
2265       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2266       break;
2267     case Token::MOD:
2268       __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2269       break;
2270     case Token::BIT_OR:
2271       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2272       break;
2273     case Token::BIT_AND:
2274       __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2275       break;
2276     case Token::BIT_XOR:
2277       __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2278       break;
2279     case Token::SAR:
2280       __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2281       break;
2282     case Token::SHL:
2283       __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2284       break;
2285     case Token::SHR:
2286       __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2287       break;
2288     default:
2289       UNREACHABLE();
2290   }
2291 }
2292 
2293 
GenerateAddStrings(MacroAssembler * masm)2294 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2295   ASSERT(op_ == Token::ADD);
2296   Label left_not_string, call_runtime;
2297 
2298   // Registers containing left and right operands respectively.
2299   Register left = edx;
2300   Register right = eax;
2301 
2302   // Test if left operand is a string.
2303   __ JumpIfSmi(left, &left_not_string, Label::kNear);
2304   __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
2305   __ j(above_equal, &left_not_string, Label::kNear);
2306 
2307   StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
2308   GenerateRegisterArgsPush(masm);
2309   __ TailCallStub(&string_add_left_stub);
2310 
2311   // Left operand is not a string, test right.
2312   __ bind(&left_not_string);
2313   __ JumpIfSmi(right, &call_runtime, Label::kNear);
2314   __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
2315   __ j(above_equal, &call_runtime, Label::kNear);
2316 
2317   StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2318   GenerateRegisterArgsPush(masm);
2319   __ TailCallStub(&string_add_right_stub);
2320 
2321   // Neither argument is a string.
2322   __ bind(&call_runtime);
2323 }
2324 
2325 
GenerateHeapResultAllocation(MacroAssembler * masm,Label * alloc_failure)2326 void BinaryOpStub::GenerateHeapResultAllocation(
2327     MacroAssembler* masm,
2328     Label* alloc_failure) {
2329   Label skip_allocation;
2330   OverwriteMode mode = mode_;
2331   switch (mode) {
2332     case OVERWRITE_LEFT: {
2333       // If the argument in edx is already an object, we skip the
2334       // allocation of a heap number.
2335       __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
2336       // Allocate a heap number for the result. Keep eax and edx intact
2337       // for the possible runtime call.
2338       __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2339       // Now edx can be overwritten losing one of the arguments as we are
2340       // now done and will not need it any more.
2341       __ mov(edx, ebx);
2342       __ bind(&skip_allocation);
2343       // Use object in edx as a result holder
2344       __ mov(eax, edx);
2345       break;
2346     }
2347     case OVERWRITE_RIGHT:
2348       // If the argument in eax is already an object, we skip the
2349       // allocation of a heap number.
2350       __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2351       // Fall through!
2352     case NO_OVERWRITE:
2353       // Allocate a heap number for the result. Keep eax and edx intact
2354       // for the possible runtime call.
2355       __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2356       // Now eax can be overwritten losing one of the arguments as we are
2357       // now done and will not need it any more.
2358       __ mov(eax, ebx);
2359       __ bind(&skip_allocation);
2360       break;
2361     default: UNREACHABLE();
2362   }
2363 }
2364 
2365 
GenerateRegisterArgsPush(MacroAssembler * masm)2366 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2367   __ pop(ecx);
2368   __ push(edx);
2369   __ push(eax);
2370   __ push(ecx);
2371 }
2372 
2373 
Generate(MacroAssembler * masm)2374 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2375   // TAGGED case:
2376   //   Input:
2377   //     esp[4]: tagged number input argument (should be number).
2378   //     esp[0]: return address.
2379   //   Output:
2380   //     eax: tagged double result.
2381   // UNTAGGED case:
2382   //   Input::
2383   //     esp[0]: return address.
2384   //     xmm1: untagged double input argument
2385   //   Output:
2386   //     xmm1: untagged double result.
2387 
2388   Label runtime_call;
2389   Label runtime_call_clear_stack;
2390   Label skip_cache;
2391   const bool tagged = (argument_type_ == TAGGED);
2392   if (tagged) {
2393     // Test that eax is a number.
2394     Label input_not_smi;
2395     Label loaded;
2396     __ mov(eax, Operand(esp, kPointerSize));
2397     __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
2398     // Input is a smi. Untag and load it onto the FPU stack.
2399     // Then load the low and high words of the double into ebx, edx.
2400     STATIC_ASSERT(kSmiTagSize == 1);
2401     __ sar(eax, 1);
2402     __ sub(esp, Immediate(2 * kPointerSize));
2403     __ mov(Operand(esp, 0), eax);
2404     __ fild_s(Operand(esp, 0));
2405     __ fst_d(Operand(esp, 0));
2406     __ pop(edx);
2407     __ pop(ebx);
2408     __ jmp(&loaded, Label::kNear);
2409     __ bind(&input_not_smi);
2410     // Check if input is a HeapNumber.
2411     __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2412     Factory* factory = masm->isolate()->factory();
2413     __ cmp(ebx, Immediate(factory->heap_number_map()));
2414     __ j(not_equal, &runtime_call);
2415     // Input is a HeapNumber. Push it on the FPU stack and load its
2416     // low and high words into ebx, edx.
2417     __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
2418     __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
2419     __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
2420 
2421     __ bind(&loaded);
2422   } else {  // UNTAGGED.
2423     if (CpuFeatures::IsSupported(SSE4_1)) {
2424       CpuFeatures::Scope sse4_scope(SSE4_1);
2425       __ pextrd(edx, xmm1, 0x1);  // copy xmm1[63..32] to edx.
2426     } else {
2427       __ pshufd(xmm0, xmm1, 0x1);
2428       __ movd(edx, xmm0);
2429     }
2430     __ movd(ebx, xmm1);
2431   }
2432 
2433   // ST[0] or xmm1  == double value
2434   // ebx = low 32 bits of double value
2435   // edx = high 32 bits of double value
2436   // Compute hash (the shifts are arithmetic):
2437   //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2438   __ mov(ecx, ebx);
2439   __ xor_(ecx, edx);
2440   __ mov(eax, ecx);
2441   __ sar(eax, 16);
2442   __ xor_(ecx, eax);
2443   __ mov(eax, ecx);
2444   __ sar(eax, 8);
2445   __ xor_(ecx, eax);
2446   ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
2447   __ and_(ecx,
2448           Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
2449 
2450   // ST[0] or xmm1 == double value.
2451   // ebx = low 32 bits of double value.
2452   // edx = high 32 bits of double value.
2453   // ecx = TranscendentalCache::hash(double value).
2454   ExternalReference cache_array =
2455       ExternalReference::transcendental_cache_array_address(masm->isolate());
2456   __ mov(eax, Immediate(cache_array));
2457   int cache_array_index =
2458       type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
2459   __ mov(eax, Operand(eax, cache_array_index));
2460   // Eax points to the cache for the type type_.
2461   // If NULL, the cache hasn't been initialized yet, so go through runtime.
2462   __ test(eax, eax);
2463   __ j(zero, &runtime_call_clear_stack);
2464 #ifdef DEBUG
2465   // Check that the layout of cache elements match expectations.
2466   { TranscendentalCache::SubCache::Element test_elem[2];
2467     char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2468     char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2469     char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2470     char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2471     char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2472     CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
2473     CHECK_EQ(0, elem_in0 - elem_start);
2474     CHECK_EQ(kIntSize, elem_in1 - elem_start);
2475     CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2476   }
2477 #endif
2478   // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2479   __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2480   __ lea(ecx, Operand(eax, ecx, times_4, 0));
2481   // Check if cache matches: Double value is stored in uint32_t[2] array.
2482   Label cache_miss;
2483   __ cmp(ebx, Operand(ecx, 0));
2484   __ j(not_equal, &cache_miss, Label::kNear);
2485   __ cmp(edx, Operand(ecx, kIntSize));
2486   __ j(not_equal, &cache_miss, Label::kNear);
2487   // Cache hit!
2488   Counters* counters = masm->isolate()->counters();
2489   __ IncrementCounter(counters->transcendental_cache_hit(), 1);
2490   __ mov(eax, Operand(ecx, 2 * kIntSize));
2491   if (tagged) {
2492     __ fstp(0);
2493     __ ret(kPointerSize);
2494   } else {  // UNTAGGED.
2495     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2496     __ Ret();
2497   }
2498 
2499   __ bind(&cache_miss);
2500   __ IncrementCounter(counters->transcendental_cache_miss(), 1);
2501   // Update cache with new value.
2502   // We are short on registers, so use no_reg as scratch.
2503   // This gives slightly larger code.
2504   if (tagged) {
2505     __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2506   } else {  // UNTAGGED.
2507     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2508     __ sub(esp, Immediate(kDoubleSize));
2509     __ movdbl(Operand(esp, 0), xmm1);
2510     __ fld_d(Operand(esp, 0));
2511     __ add(esp, Immediate(kDoubleSize));
2512   }
2513   GenerateOperation(masm, type_);
2514   __ mov(Operand(ecx, 0), ebx);
2515   __ mov(Operand(ecx, kIntSize), edx);
2516   __ mov(Operand(ecx, 2 * kIntSize), eax);
2517   __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2518   if (tagged) {
2519     __ ret(kPointerSize);
2520   } else {  // UNTAGGED.
2521     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2522     __ Ret();
2523 
2524     // Skip cache and return answer directly, only in untagged case.
2525     __ bind(&skip_cache);
2526     __ sub(esp, Immediate(kDoubleSize));
2527     __ movdbl(Operand(esp, 0), xmm1);
2528     __ fld_d(Operand(esp, 0));
2529     GenerateOperation(masm, type_);
2530     __ fstp_d(Operand(esp, 0));
2531     __ movdbl(xmm1, Operand(esp, 0));
2532     __ add(esp, Immediate(kDoubleSize));
2533     // We return the value in xmm1 without adding it to the cache, but
2534     // we cause a scavenging GC so that future allocations will succeed.
2535     {
2536       FrameScope scope(masm, StackFrame::INTERNAL);
2537       // Allocate an unused object bigger than a HeapNumber.
2538       __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2539       __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2540     }
2541     __ Ret();
2542   }
2543 
2544   // Call runtime, doing whatever allocation and cleanup is necessary.
2545   if (tagged) {
2546     __ bind(&runtime_call_clear_stack);
2547     __ fstp(0);
2548     __ bind(&runtime_call);
2549     ExternalReference runtime =
2550         ExternalReference(RuntimeFunction(), masm->isolate());
2551     __ TailCallExternalReference(runtime, 1, 1);
2552   } else {  // UNTAGGED.
2553     __ bind(&runtime_call_clear_stack);
2554     __ bind(&runtime_call);
2555     __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2556     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
2557     {
2558       FrameScope scope(masm, StackFrame::INTERNAL);
2559       __ push(eax);
2560       __ CallRuntime(RuntimeFunction(), 1);
2561     }
2562     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2563     __ Ret();
2564   }
2565 }
2566 
2567 
RuntimeFunction()2568 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2569   switch (type_) {
2570     case TranscendentalCache::SIN: return Runtime::kMath_sin;
2571     case TranscendentalCache::COS: return Runtime::kMath_cos;
2572     case TranscendentalCache::TAN: return Runtime::kMath_tan;
2573     case TranscendentalCache::LOG: return Runtime::kMath_log;
2574     default:
2575       UNIMPLEMENTED();
2576       return Runtime::kAbort;
2577   }
2578 }
2579 
2580 
GenerateOperation(MacroAssembler * masm,TranscendentalCache::Type type)2581 void TranscendentalCacheStub::GenerateOperation(
2582     MacroAssembler* masm, TranscendentalCache::Type type) {
2583   // Only free register is edi.
2584   // Input value is on FP stack, and also in ebx/edx.
2585   // Input value is possibly in xmm1.
2586   // Address of result (a newly allocated HeapNumber) may be in eax.
2587   if (type == TranscendentalCache::SIN ||
2588       type == TranscendentalCache::COS ||
2589       type == TranscendentalCache::TAN) {
2590     // Both fsin and fcos require arguments in the range +/-2^63 and
2591     // return NaN for infinities and NaN. They can share all code except
2592     // the actual fsin/fcos operation.
2593     Label in_range, done;
2594     // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2595     // work. We must reduce it to the appropriate range.
2596     __ mov(edi, edx);
2597     __ and_(edi, Immediate(0x7ff00000));  // Exponent only.
2598     int supported_exponent_limit =
2599         (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
2600     __ cmp(edi, Immediate(supported_exponent_limit));
2601     __ j(below, &in_range, Label::kNear);
2602     // Check for infinity and NaN. Both return NaN for sin.
2603     __ cmp(edi, Immediate(0x7ff00000));
2604     Label non_nan_result;
2605     __ j(not_equal, &non_nan_result, Label::kNear);
2606     // Input is +/-Infinity or NaN. Result is NaN.
2607     __ fstp(0);
2608     // NaN is represented by 0x7ff8000000000000.
2609     __ push(Immediate(0x7ff80000));
2610     __ push(Immediate(0));
2611     __ fld_d(Operand(esp, 0));
2612     __ add(esp, Immediate(2 * kPointerSize));
2613     __ jmp(&done, Label::kNear);
2614 
2615     __ bind(&non_nan_result);
2616 
2617     // Use fpmod to restrict argument to the range +/-2*PI.
2618     __ mov(edi, eax);  // Save eax before using fnstsw_ax.
2619     __ fldpi();
2620     __ fadd(0);
2621     __ fld(1);
2622     // FPU Stack: input, 2*pi, input.
2623     {
2624       Label no_exceptions;
2625       __ fwait();
2626       __ fnstsw_ax();
2627       // Clear if Illegal Operand or Zero Division exceptions are set.
2628       __ test(eax, Immediate(5));
2629       __ j(zero, &no_exceptions, Label::kNear);
2630       __ fnclex();
2631       __ bind(&no_exceptions);
2632     }
2633 
2634     // Compute st(0) % st(1)
2635     {
2636       Label partial_remainder_loop;
2637       __ bind(&partial_remainder_loop);
2638       __ fprem1();
2639       __ fwait();
2640       __ fnstsw_ax();
2641       __ test(eax, Immediate(0x400 /* C2 */));
2642       // If C2 is set, computation only has partial result. Loop to
2643       // continue computation.
2644       __ j(not_zero, &partial_remainder_loop);
2645     }
2646     // FPU Stack: input, 2*pi, input % 2*pi
2647     __ fstp(2);
2648     __ fstp(0);
2649     __ mov(eax, edi);  // Restore eax (allocated HeapNumber pointer).
2650 
2651     // FPU Stack: input % 2*pi
2652     __ bind(&in_range);
2653     switch (type) {
2654       case TranscendentalCache::SIN:
2655         __ fsin();
2656         break;
2657       case TranscendentalCache::COS:
2658         __ fcos();
2659         break;
2660       case TranscendentalCache::TAN:
2661         // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
2662         // FP register stack.
2663         __ fptan();
2664         __ fstp(0);  // Pop FP register stack.
2665         break;
2666       default:
2667         UNREACHABLE();
2668     }
2669     __ bind(&done);
2670   } else {
2671     ASSERT(type == TranscendentalCache::LOG);
2672     __ fldln2();
2673     __ fxch();
2674     __ fyl2x();
2675   }
2676 }
2677 
2678 
2679 // Input: edx, eax are the left and right objects of a bit op.
2680 // Output: eax, ecx are left and right integers for a bit op.
LoadUnknownsAsIntegers(MacroAssembler * masm,bool use_sse3,Label * conversion_failure)2681 void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
2682                                                  bool use_sse3,
2683                                                  Label* conversion_failure) {
2684   // Check float operands.
2685   Label arg1_is_object, check_undefined_arg1;
2686   Label arg2_is_object, check_undefined_arg2;
2687   Label load_arg2, done;
2688 
2689   // Test if arg1 is a Smi.
2690   __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
2691 
2692   __ SmiUntag(edx);
2693   __ jmp(&load_arg2);
2694 
2695   // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2696   __ bind(&check_undefined_arg1);
2697   Factory* factory = masm->isolate()->factory();
2698   __ cmp(edx, factory->undefined_value());
2699   __ j(not_equal, conversion_failure);
2700   __ mov(edx, Immediate(0));
2701   __ jmp(&load_arg2);
2702 
2703   __ bind(&arg1_is_object);
2704   __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
2705   __ cmp(ebx, factory->heap_number_map());
2706   __ j(not_equal, &check_undefined_arg1);
2707 
2708   // Get the untagged integer version of the edx heap number in ecx.
2709   IntegerConvert(masm, edx, use_sse3, conversion_failure);
2710   __ mov(edx, ecx);
2711 
2712   // Here edx has the untagged integer, eax has a Smi or a heap number.
2713   __ bind(&load_arg2);
2714 
2715   // Test if arg2 is a Smi.
2716   __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
2717 
2718   __ SmiUntag(eax);
2719   __ mov(ecx, eax);
2720   __ jmp(&done);
2721 
2722   // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2723   __ bind(&check_undefined_arg2);
2724   __ cmp(eax, factory->undefined_value());
2725   __ j(not_equal, conversion_failure);
2726   __ mov(ecx, Immediate(0));
2727   __ jmp(&done);
2728 
2729   __ bind(&arg2_is_object);
2730   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2731   __ cmp(ebx, factory->heap_number_map());
2732   __ j(not_equal, &check_undefined_arg2);
2733 
2734   // Get the untagged integer version of the eax heap number in ecx.
2735   IntegerConvert(masm, eax, use_sse3, conversion_failure);
2736   __ bind(&done);
2737   __ mov(eax, edx);
2738 }
2739 
2740 
CheckLoadedIntegersWereInt32(MacroAssembler * masm,bool use_sse3,Label * not_int32)2741 void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
2742                                                        bool use_sse3,
2743                                                        Label* not_int32) {
2744   return;
2745 }
2746 
2747 
LoadFloatOperand(MacroAssembler * masm,Register number)2748 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
2749                                            Register number) {
2750   Label load_smi, done;
2751 
2752   __ JumpIfSmi(number, &load_smi, Label::kNear);
2753   __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
2754   __ jmp(&done, Label::kNear);
2755 
2756   __ bind(&load_smi);
2757   __ SmiUntag(number);
2758   __ push(number);
2759   __ fild_s(Operand(esp, 0));
2760   __ pop(number);
2761 
2762   __ bind(&done);
2763 }
2764 
2765 
LoadSSE2Operands(MacroAssembler * masm)2766 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
2767   Label load_smi_edx, load_eax, load_smi_eax, done;
2768   // Load operand in edx into xmm0.
2769   __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2770   __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2771 
2772   __ bind(&load_eax);
2773   // Load operand in eax into xmm1.
2774   __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2775   __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2776   __ jmp(&done, Label::kNear);
2777 
2778   __ bind(&load_smi_edx);
2779   __ SmiUntag(edx);  // Untag smi before converting to float.
2780   __ cvtsi2sd(xmm0, edx);
2781   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
2782   __ jmp(&load_eax);
2783 
2784   __ bind(&load_smi_eax);
2785   __ SmiUntag(eax);  // Untag smi before converting to float.
2786   __ cvtsi2sd(xmm1, eax);
2787   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
2788 
2789   __ bind(&done);
2790 }
2791 
2792 
LoadSSE2Operands(MacroAssembler * masm,Label * not_numbers)2793 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
2794                                            Label* not_numbers) {
2795   Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
2796   // Load operand in edx into xmm0, or branch to not_numbers.
2797   __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2798   Factory* factory = masm->isolate()->factory();
2799   __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
2800   __ j(not_equal, not_numbers);  // Argument in edx is not a number.
2801   __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2802   __ bind(&load_eax);
2803   // Load operand in eax into xmm1, or branch to not_numbers.
2804   __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2805   __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
2806   __ j(equal, &load_float_eax, Label::kNear);
2807   __ jmp(not_numbers);  // Argument in eax is not a number.
2808   __ bind(&load_smi_edx);
2809   __ SmiUntag(edx);  // Untag smi before converting to float.
2810   __ cvtsi2sd(xmm0, edx);
2811   __ SmiTag(edx);  // Retag smi for heap number overwriting test.
2812   __ jmp(&load_eax);
2813   __ bind(&load_smi_eax);
2814   __ SmiUntag(eax);  // Untag smi before converting to float.
2815   __ cvtsi2sd(xmm1, eax);
2816   __ SmiTag(eax);  // Retag smi for heap number overwriting test.
2817   __ jmp(&done, Label::kNear);
2818   __ bind(&load_float_eax);
2819   __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2820   __ bind(&done);
2821 }
2822 
2823 
LoadSSE2Smis(MacroAssembler * masm,Register scratch)2824 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2825                                        Register scratch) {
2826   const Register left = edx;
2827   const Register right = eax;
2828   __ mov(scratch, left);
2829   ASSERT(!scratch.is(right));  // We're about to clobber scratch.
2830   __ SmiUntag(scratch);
2831   __ cvtsi2sd(xmm0, scratch);
2832 
2833   __ mov(scratch, right);
2834   __ SmiUntag(scratch);
2835   __ cvtsi2sd(xmm1, scratch);
2836 }
2837 
2838 
CheckSSE2OperandsAreInt32(MacroAssembler * masm,Label * non_int32,Register scratch)2839 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
2840                                                     Label* non_int32,
2841                                                     Register scratch) {
2842   __ cvttsd2si(scratch, Operand(xmm0));
2843   __ cvtsi2sd(xmm2, scratch);
2844   __ ucomisd(xmm0, xmm2);
2845   __ j(not_zero, non_int32);
2846   __ j(carry, non_int32);
2847   __ cvttsd2si(scratch, Operand(xmm1));
2848   __ cvtsi2sd(xmm2, scratch);
2849   __ ucomisd(xmm1, xmm2);
2850   __ j(not_zero, non_int32);
2851   __ j(carry, non_int32);
2852 }
2853 
2854 
LoadFloatOperands(MacroAssembler * masm,Register scratch,ArgLocation arg_location)2855 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
2856                                             Register scratch,
2857                                             ArgLocation arg_location) {
2858   Label load_smi_1, load_smi_2, done_load_1, done;
2859   if (arg_location == ARGS_IN_REGISTERS) {
2860     __ mov(scratch, edx);
2861   } else {
2862     __ mov(scratch, Operand(esp, 2 * kPointerSize));
2863   }
2864   __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
2865   __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2866   __ bind(&done_load_1);
2867 
2868   if (arg_location == ARGS_IN_REGISTERS) {
2869     __ mov(scratch, eax);
2870   } else {
2871     __ mov(scratch, Operand(esp, 1 * kPointerSize));
2872   }
2873   __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
2874   __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2875   __ jmp(&done, Label::kNear);
2876 
2877   __ bind(&load_smi_1);
2878   __ SmiUntag(scratch);
2879   __ push(scratch);
2880   __ fild_s(Operand(esp, 0));
2881   __ pop(scratch);
2882   __ jmp(&done_load_1);
2883 
2884   __ bind(&load_smi_2);
2885   __ SmiUntag(scratch);
2886   __ push(scratch);
2887   __ fild_s(Operand(esp, 0));
2888   __ pop(scratch);
2889 
2890   __ bind(&done);
2891 }
2892 
2893 
LoadFloatSmis(MacroAssembler * masm,Register scratch)2894 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
2895                                         Register scratch) {
2896   const Register left = edx;
2897   const Register right = eax;
2898   __ mov(scratch, left);
2899   ASSERT(!scratch.is(right));  // We're about to clobber scratch.
2900   __ SmiUntag(scratch);
2901   __ push(scratch);
2902   __ fild_s(Operand(esp, 0));
2903 
2904   __ mov(scratch, right);
2905   __ SmiUntag(scratch);
2906   __ mov(Operand(esp, 0), scratch);
2907   __ fild_s(Operand(esp, 0));
2908   __ pop(scratch);
2909 }
2910 
2911 
CheckFloatOperands(MacroAssembler * masm,Label * non_float,Register scratch)2912 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
2913                                              Label* non_float,
2914                                              Register scratch) {
2915   Label test_other, done;
2916   // Test if both operands are floats or smi -> scratch=k_is_float;
2917   // Otherwise scratch = k_not_float.
2918   __ JumpIfSmi(edx, &test_other, Label::kNear);
2919   __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
2920   Factory* factory = masm->isolate()->factory();
2921   __ cmp(scratch, factory->heap_number_map());
2922   __ j(not_equal, non_float);  // argument in edx is not a number -> NaN
2923 
2924   __ bind(&test_other);
2925   __ JumpIfSmi(eax, &done, Label::kNear);
2926   __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
2927   __ cmp(scratch, factory->heap_number_map());
2928   __ j(not_equal, non_float);  // argument in eax is not a number -> NaN
2929 
2930   // Fall-through: Both operands are numbers.
2931   __ bind(&done);
2932 }
2933 
2934 
CheckFloatOperandsAreInt32(MacroAssembler * masm,Label * non_int32)2935 void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
2936                                                      Label* non_int32) {
2937   return;
2938 }
2939 
2940 
Generate(MacroAssembler * masm)2941 void MathPowStub::Generate(MacroAssembler* masm) {
2942   CpuFeatures::Scope use_sse2(SSE2);
2943   Factory* factory = masm->isolate()->factory();
2944   const Register exponent = eax;
2945   const Register base = edx;
2946   const Register scratch = ecx;
2947   const XMMRegister double_result = xmm3;
2948   const XMMRegister double_base = xmm2;
2949   const XMMRegister double_exponent = xmm1;
2950   const XMMRegister double_scratch = xmm4;
2951 
2952   Label call_runtime, done, exponent_not_smi, int_exponent;
2953 
2954   // Save 1 in double_result - we need this several times later on.
2955   __ mov(scratch, Immediate(1));
2956   __ cvtsi2sd(double_result, scratch);
2957 
2958   if (exponent_type_ == ON_STACK) {
2959     Label base_is_smi, unpack_exponent;
2960     // The exponent and base are supplied as arguments on the stack.
2961     // This can only happen if the stub is called from non-optimized code.
2962     // Load input parameters from stack.
2963     __ mov(base, Operand(esp, 2 * kPointerSize));
2964     __ mov(exponent, Operand(esp, 1 * kPointerSize));
2965 
2966     __ JumpIfSmi(base, &base_is_smi, Label::kNear);
2967     __ cmp(FieldOperand(base, HeapObject::kMapOffset),
2968            factory->heap_number_map());
2969     __ j(not_equal, &call_runtime);
2970 
2971     __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
2972     __ jmp(&unpack_exponent, Label::kNear);
2973 
2974     __ bind(&base_is_smi);
2975     __ SmiUntag(base);
2976     __ cvtsi2sd(double_base, base);
2977 
2978     __ bind(&unpack_exponent);
2979     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2980     __ SmiUntag(exponent);
2981     __ jmp(&int_exponent);
2982 
2983     __ bind(&exponent_not_smi);
2984     __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
2985            factory->heap_number_map());
2986     __ j(not_equal, &call_runtime);
2987     __ movdbl(double_exponent,
2988               FieldOperand(exponent, HeapNumber::kValueOffset));
2989   } else if (exponent_type_ == TAGGED) {
2990     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2991     __ SmiUntag(exponent);
2992     __ jmp(&int_exponent);
2993 
2994     __ bind(&exponent_not_smi);
2995     __ movdbl(double_exponent,
2996               FieldOperand(exponent, HeapNumber::kValueOffset));
2997   }
2998 
2999   if (exponent_type_ != INTEGER) {
3000     Label fast_power;
3001     // Detect integer exponents stored as double.
3002     __ cvttsd2si(exponent, Operand(double_exponent));
3003     // Skip to runtime if possibly NaN (indicated by the indefinite integer).
3004     __ cmp(exponent, Immediate(0x80000000u));
3005     __ j(equal, &call_runtime);
3006     __ cvtsi2sd(double_scratch, exponent);
3007     // Already ruled out NaNs for exponent.
3008     __ ucomisd(double_exponent, double_scratch);
3009     __ j(equal, &int_exponent);
3010 
3011     if (exponent_type_ == ON_STACK) {
3012       // Detect square root case.  Crankshaft detects constant +/-0.5 at
3013       // compile time and uses DoMathPowHalf instead.  We then skip this check
3014       // for non-constant cases of +/-0.5 as these hardly occur.
3015       Label continue_sqrt, continue_rsqrt, not_plus_half;
3016       // Test for 0.5.
3017       // Load double_scratch with 0.5.
3018       __ mov(scratch, Immediate(0x3F000000u));
3019       __ movd(double_scratch, scratch);
3020       __ cvtss2sd(double_scratch, double_scratch);
3021       // Already ruled out NaNs for exponent.
3022       __ ucomisd(double_scratch, double_exponent);
3023       __ j(not_equal, &not_plus_half, Label::kNear);
3024 
3025       // Calculates square root of base.  Check for the special case of
3026       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3027       // According to IEEE-754, single-precision -Infinity has the highest
3028       // 9 bits set and the lowest 23 bits cleared.
3029       __ mov(scratch, 0xFF800000u);
3030       __ movd(double_scratch, scratch);
3031       __ cvtss2sd(double_scratch, double_scratch);
3032       __ ucomisd(double_base, double_scratch);
3033       // Comparing -Infinity with NaN results in "unordered", which sets the
3034       // zero flag as if both were equal.  However, it also sets the carry flag.
3035       __ j(not_equal, &continue_sqrt, Label::kNear);
3036       __ j(carry, &continue_sqrt, Label::kNear);
3037 
3038       // Set result to Infinity in the special case.
3039       __ xorps(double_result, double_result);
3040       __ subsd(double_result, double_scratch);
3041       __ jmp(&done);
3042 
3043       __ bind(&continue_sqrt);
3044       // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
3045       __ xorps(double_scratch, double_scratch);
3046       __ addsd(double_scratch, double_base);  // Convert -0 to +0.
3047       __ sqrtsd(double_result, double_scratch);
3048       __ jmp(&done);
3049 
3050       // Test for -0.5.
3051       __ bind(&not_plus_half);
3052       // Load double_exponent with -0.5 by substracting 1.
3053       __ subsd(double_scratch, double_result);
3054       // Already ruled out NaNs for exponent.
3055       __ ucomisd(double_scratch, double_exponent);
3056       __ j(not_equal, &fast_power, Label::kNear);
3057 
3058       // Calculates reciprocal of square root of base.  Check for the special
3059       // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3060       // According to IEEE-754, single-precision -Infinity has the highest
3061       // 9 bits set and the lowest 23 bits cleared.
3062       __ mov(scratch, 0xFF800000u);
3063       __ movd(double_scratch, scratch);
3064       __ cvtss2sd(double_scratch, double_scratch);
3065       __ ucomisd(double_base, double_scratch);
3066       // Comparing -Infinity with NaN results in "unordered", which sets the
3067       // zero flag as if both were equal.  However, it also sets the carry flag.
3068       __ j(not_equal, &continue_rsqrt, Label::kNear);
3069       __ j(carry, &continue_rsqrt, Label::kNear);
3070 
3071       // Set result to 0 in the special case.
3072       __ xorps(double_result, double_result);
3073       __ jmp(&done);
3074 
3075       __ bind(&continue_rsqrt);
3076       // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
3077       __ xorps(double_exponent, double_exponent);
3078       __ addsd(double_exponent, double_base);  // Convert -0 to +0.
3079       __ sqrtsd(double_exponent, double_exponent);
3080       __ divsd(double_result, double_exponent);
3081       __ jmp(&done);
3082     }
3083 
3084     // Using FPU instructions to calculate power.
3085     Label fast_power_failed;
3086     __ bind(&fast_power);
3087     __ fnclex();  // Clear flags to catch exceptions later.
3088     // Transfer (B)ase and (E)xponent onto the FPU register stack.
3089     __ sub(esp, Immediate(kDoubleSize));
3090     __ movdbl(Operand(esp, 0), double_exponent);
3091     __ fld_d(Operand(esp, 0));  // E
3092     __ movdbl(Operand(esp, 0), double_base);
3093     __ fld_d(Operand(esp, 0));  // B, E
3094 
3095     // Exponent is in st(1) and base is in st(0)
3096     // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
3097     // FYL2X calculates st(1) * log2(st(0))
3098     __ fyl2x();    // X
3099     __ fld(0);     // X, X
3100     __ frndint();  // rnd(X), X
3101     __ fsub(1);    // rnd(X), X-rnd(X)
3102     __ fxch(1);    // X - rnd(X), rnd(X)
3103     // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
3104     __ f2xm1();    // 2^(X-rnd(X)) - 1, rnd(X)
3105     __ fld1();     // 1, 2^(X-rnd(X)) - 1, rnd(X)
3106     __ faddp(1);   // 1, 2^(X-rnd(X)), rnd(X)
3107     // FSCALE calculates st(0) * 2^st(1)
3108     __ fscale();   // 2^X, rnd(X)
3109     __ fstp(1);
3110     // Bail out to runtime in case of exceptions in the status word.
3111     __ fnstsw_ax();
3112     __ test_b(eax, 0x5F);  // We check for all but precision exception.
3113     __ j(not_zero, &fast_power_failed, Label::kNear);
3114     __ fstp_d(Operand(esp, 0));
3115     __ movdbl(double_result, Operand(esp, 0));
3116     __ add(esp, Immediate(kDoubleSize));
3117     __ jmp(&done);
3118 
3119     __ bind(&fast_power_failed);
3120     __ fninit();
3121     __ add(esp, Immediate(kDoubleSize));
3122     __ jmp(&call_runtime);
3123   }
3124 
3125   // Calculate power with integer exponent.
3126   __ bind(&int_exponent);
3127   const XMMRegister double_scratch2 = double_exponent;
3128   __ mov(scratch, exponent);  // Back up exponent.
3129   __ movsd(double_scratch, double_base);  // Back up base.
3130   __ movsd(double_scratch2, double_result);  // Load double_exponent with 1.
3131 
3132   // Get absolute value of exponent.
3133   Label no_neg, while_true, no_multiply;
3134   __ test(scratch, scratch);
3135   __ j(positive, &no_neg, Label::kNear);
3136   __ neg(scratch);
3137   __ bind(&no_neg);
3138 
3139   __ bind(&while_true);
3140   __ shr(scratch, 1);
3141   __ j(not_carry, &no_multiply, Label::kNear);
3142   __ mulsd(double_result, double_scratch);
3143   __ bind(&no_multiply);
3144 
3145   __ mulsd(double_scratch, double_scratch);
3146   __ j(not_zero, &while_true);
3147 
3148   // scratch has the original value of the exponent - if the exponent is
3149   // negative, return 1/result.
3150   __ test(exponent, exponent);
3151   __ j(positive, &done);
3152   __ divsd(double_scratch2, double_result);
3153   __ movsd(double_result, double_scratch2);
3154   // Test whether result is zero.  Bail out to check for subnormal result.
3155   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3156   __ xorps(double_scratch2, double_scratch2);
3157   __ ucomisd(double_scratch2, double_result);  // Result cannot be NaN.
3158   // double_exponent aliased as double_scratch2 has already been overwritten
3159   // and may not have contained the exponent value in the first place when the
3160   // exponent is a smi.  We reset it with exponent value before bailing out.
3161   __ j(not_equal, &done);
3162   __ cvtsi2sd(double_exponent, exponent);
3163 
3164   // Returning or bailing out.
3165   Counters* counters = masm->isolate()->counters();
3166   if (exponent_type_ == ON_STACK) {
3167     // The arguments are still on the stack.
3168     __ bind(&call_runtime);
3169     __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3170 
3171     // The stub is called from non-optimized code, which expects the result
3172     // as heap number in exponent.
3173     __ bind(&done);
3174     __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
3175     __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
3176     __ IncrementCounter(counters->math_pow(), 1);
3177     __ ret(2 * kPointerSize);
3178   } else {
3179     __ bind(&call_runtime);
3180     {
3181       AllowExternalCallThatCantCauseGC scope(masm);
3182       __ PrepareCallCFunction(4, scratch);
3183       __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
3184       __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
3185       __ CallCFunction(
3186           ExternalReference::power_double_double_function(masm->isolate()), 4);
3187     }
3188     // Return value is in st(0) on ia32.
3189     // Store it into the (fixed) result register.
3190     __ sub(esp, Immediate(kDoubleSize));
3191     __ fstp_d(Operand(esp, 0));
3192     __ movdbl(double_result, Operand(esp, 0));
3193     __ add(esp, Immediate(kDoubleSize));
3194 
3195     __ bind(&done);
3196     __ IncrementCounter(counters->math_pow(), 1);
3197     __ ret(0);
3198   }
3199 }
3200 
3201 
GenerateReadElement(MacroAssembler * masm)3202 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3203   // The key is in edx and the parameter count is in eax.
3204 
3205   // The displacement is used for skipping the frame pointer on the
3206   // stack. It is the offset of the last parameter (if any) relative
3207   // to the frame pointer.
3208   static const int kDisplacement = 1 * kPointerSize;
3209 
3210   // Check that the key is a smi.
3211   Label slow;
3212   __ JumpIfNotSmi(edx, &slow, Label::kNear);
3213 
3214   // Check if the calling frame is an arguments adaptor frame.
3215   Label adaptor;
3216   __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3217   __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
3218   __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3219   __ j(equal, &adaptor, Label::kNear);
3220 
3221   // Check index against formal parameters count limit passed in
3222   // through register eax. Use unsigned comparison to get negative
3223   // check for free.
3224   __ cmp(edx, eax);
3225   __ j(above_equal, &slow, Label::kNear);
3226 
3227   // Read the argument from the stack and return it.
3228   STATIC_ASSERT(kSmiTagSize == 1);
3229   STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
3230   __ lea(ebx, Operand(ebp, eax, times_2, 0));
3231   __ neg(edx);
3232   __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3233   __ ret(0);
3234 
3235   // Arguments adaptor case: Check index against actual arguments
3236   // limit found in the arguments adaptor frame. Use unsigned
3237   // comparison to get negative check for free.
3238   __ bind(&adaptor);
3239   __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3240   __ cmp(edx, ecx);
3241   __ j(above_equal, &slow, Label::kNear);
3242 
3243   // Read the argument from the stack and return it.
3244   STATIC_ASSERT(kSmiTagSize == 1);
3245   STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
3246   __ lea(ebx, Operand(ebx, ecx, times_2, 0));
3247   __ neg(edx);
3248   __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3249   __ ret(0);
3250 
3251   // Slow-case: Handle non-smi or out-of-bounds access to arguments
3252   // by calling the runtime system.
3253   __ bind(&slow);
3254   __ pop(ebx);  // Return address.
3255   __ push(edx);
3256   __ push(ebx);
3257   __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
3258 }
3259 
3260 
GenerateNewNonStrictSlow(MacroAssembler * masm)3261 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
3262   // esp[0] : return address
3263   // esp[4] : number of parameters
3264   // esp[8] : receiver displacement
3265   // esp[12] : function
3266 
3267   // Check if the calling frame is an arguments adaptor frame.
3268   Label runtime;
3269   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3270   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3271   __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3272   __ j(not_equal, &runtime, Label::kNear);
3273 
3274   // Patch the arguments.length and the parameters pointer.
3275   __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3276   __ mov(Operand(esp, 1 * kPointerSize), ecx);
3277   __ lea(edx, Operand(edx, ecx, times_2,
3278               StandardFrameConstants::kCallerSPOffset));
3279   __ mov(Operand(esp, 2 * kPointerSize), edx);
3280 
3281   __ bind(&runtime);
3282   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3283 }
3284 
3285 
GenerateNewNonStrictFast(MacroAssembler * masm)3286 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
3287   // esp[0] : return address
3288   // esp[4] : number of parameters (tagged)
3289   // esp[8] : receiver displacement
3290   // esp[12] : function
3291 
3292   // ebx = parameter count (tagged)
3293   __ mov(ebx, Operand(esp, 1 * kPointerSize));
3294 
3295   // Check if the calling frame is an arguments adaptor frame.
3296   // TODO(rossberg): Factor out some of the bits that are shared with the other
3297   // Generate* functions.
3298   Label runtime;
3299   Label adaptor_frame, try_allocate;
3300   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3301   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3302   __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3303   __ j(equal, &adaptor_frame, Label::kNear);
3304 
3305   // No adaptor, parameter count = argument count.
3306   __ mov(ecx, ebx);
3307   __ jmp(&try_allocate, Label::kNear);
3308 
3309   // We have an adaptor frame. Patch the parameters pointer.
3310   __ bind(&adaptor_frame);
3311   __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3312   __ lea(edx, Operand(edx, ecx, times_2,
3313                       StandardFrameConstants::kCallerSPOffset));
3314   __ mov(Operand(esp, 2 * kPointerSize), edx);
3315 
3316   // ebx = parameter count (tagged)
3317   // ecx = argument count (tagged)
3318   // esp[4] = parameter count (tagged)
3319   // esp[8] = address of receiver argument
3320   // Compute the mapped parameter count = min(ebx, ecx) in ebx.
3321   __ cmp(ebx, ecx);
3322   __ j(less_equal, &try_allocate, Label::kNear);
3323   __ mov(ebx, ecx);
3324 
3325   __ bind(&try_allocate);
3326 
3327   // Save mapped parameter count.
3328   __ push(ebx);
3329 
3330   // Compute the sizes of backing store, parameter map, and arguments object.
3331   // 1. Parameter map, has 2 extra words containing context and backing store.
3332   const int kParameterMapHeaderSize =
3333       FixedArray::kHeaderSize + 2 * kPointerSize;
3334   Label no_parameter_map;
3335   __ test(ebx, ebx);
3336   __ j(zero, &no_parameter_map, Label::kNear);
3337   __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
3338   __ bind(&no_parameter_map);
3339 
3340   // 2. Backing store.
3341   __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
3342 
3343   // 3. Arguments object.
3344   __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
3345 
3346   // Do the allocation of all three objects in one go.
3347   __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
3348 
3349   // eax = address of new object(s) (tagged)
3350   // ecx = argument count (tagged)
3351   // esp[0] = mapped parameter count (tagged)
3352   // esp[8] = parameter count (tagged)
3353   // esp[12] = address of receiver argument
3354   // Get the arguments boilerplate from the current (global) context into edi.
3355   Label has_mapped_parameters, copy;
3356   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3357   __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3358   __ mov(ebx, Operand(esp, 0 * kPointerSize));
3359   __ test(ebx, ebx);
3360   __ j(not_zero, &has_mapped_parameters, Label::kNear);
3361   __ mov(edi, Operand(edi,
3362          Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
3363   __ jmp(&copy, Label::kNear);
3364 
3365   __ bind(&has_mapped_parameters);
3366   __ mov(edi, Operand(edi,
3367             Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
3368   __ bind(&copy);
3369 
3370   // eax = address of new object (tagged)
3371   // ebx = mapped parameter count (tagged)
3372   // ecx = argument count (tagged)
3373   // edi = address of boilerplate object (tagged)
3374   // esp[0] = mapped parameter count (tagged)
3375   // esp[8] = parameter count (tagged)
3376   // esp[12] = address of receiver argument
3377   // Copy the JS object part.
3378   for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3379     __ mov(edx, FieldOperand(edi, i));
3380     __ mov(FieldOperand(eax, i), edx);
3381   }
3382 
3383   // Set up the callee in-object property.
3384   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
3385   __ mov(edx, Operand(esp, 4 * kPointerSize));
3386   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3387                       Heap::kArgumentsCalleeIndex * kPointerSize),
3388          edx);
3389 
3390   // Use the length (smi tagged) and set that as an in-object property too.
3391   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
3392   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3393                       Heap::kArgumentsLengthIndex * kPointerSize),
3394          ecx);
3395 
3396   // Set up the elements pointer in the allocated arguments object.
3397   // If we allocated a parameter map, edi will point there, otherwise to the
3398   // backing store.
3399   __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
3400   __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3401 
3402   // eax = address of new object (tagged)
3403   // ebx = mapped parameter count (tagged)
3404   // ecx = argument count (tagged)
3405   // edi = address of parameter map or backing store (tagged)
3406   // esp[0] = mapped parameter count (tagged)
3407   // esp[8] = parameter count (tagged)
3408   // esp[12] = address of receiver argument
3409   // Free a register.
3410   __ push(eax);
3411 
3412   // Initialize parameter map. If there are no mapped arguments, we're done.
3413   Label skip_parameter_map;
3414   __ test(ebx, ebx);
3415   __ j(zero, &skip_parameter_map);
3416 
3417   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3418          Immediate(FACTORY->non_strict_arguments_elements_map()));
3419   __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
3420   __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
3421   __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
3422   __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
3423   __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
3424 
3425   // Copy the parameter slots and the holes in the arguments.
3426   // We need to fill in mapped_parameter_count slots. They index the context,
3427   // where parameters are stored in reverse order, at
3428   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3429   // The mapped parameter thus need to get indices
3430   //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
3431   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3432   // We loop from right to left.
3433   Label parameters_loop, parameters_test;
3434   __ push(ecx);
3435   __ mov(eax, Operand(esp, 2 * kPointerSize));
3436   __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3437   __ add(ebx, Operand(esp, 4 * kPointerSize));
3438   __ sub(ebx, eax);
3439   __ mov(ecx, FACTORY->the_hole_value());
3440   __ mov(edx, edi);
3441   __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
3442   // eax = loop variable (tagged)
3443   // ebx = mapping index (tagged)
3444   // ecx = the hole value
3445   // edx = address of parameter map (tagged)
3446   // edi = address of backing store (tagged)
3447   // esp[0] = argument count (tagged)
3448   // esp[4] = address of new object (tagged)
3449   // esp[8] = mapped parameter count (tagged)
3450   // esp[16] = parameter count (tagged)
3451   // esp[20] = address of receiver argument
3452   __ jmp(&parameters_test, Label::kNear);
3453 
3454   __ bind(&parameters_loop);
3455   __ sub(eax, Immediate(Smi::FromInt(1)));
3456   __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
3457   __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
3458   __ add(ebx, Immediate(Smi::FromInt(1)));
3459   __ bind(&parameters_test);
3460   __ test(eax, eax);
3461   __ j(not_zero, &parameters_loop, Label::kNear);
3462   __ pop(ecx);
3463 
3464   __ bind(&skip_parameter_map);
3465 
3466   // ecx = argument count (tagged)
3467   // edi = address of backing store (tagged)
3468   // esp[0] = address of new object (tagged)
3469   // esp[4] = mapped parameter count (tagged)
3470   // esp[12] = parameter count (tagged)
3471   // esp[16] = address of receiver argument
3472   // Copy arguments header and remaining slots (if there are any).
3473   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3474          Immediate(FACTORY->fixed_array_map()));
3475   __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3476 
3477   Label arguments_loop, arguments_test;
3478   __ mov(ebx, Operand(esp, 1 * kPointerSize));
3479   __ mov(edx, Operand(esp, 4 * kPointerSize));
3480   __ sub(edx, ebx);  // Is there a smarter way to do negative scaling?
3481   __ sub(edx, ebx);
3482   __ jmp(&arguments_test, Label::kNear);
3483 
3484   __ bind(&arguments_loop);
3485   __ sub(edx, Immediate(kPointerSize));
3486   __ mov(eax, Operand(edx, 0));
3487   __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
3488   __ add(ebx, Immediate(Smi::FromInt(1)));
3489 
3490   __ bind(&arguments_test);
3491   __ cmp(ebx, ecx);
3492   __ j(less, &arguments_loop, Label::kNear);
3493 
3494   // Restore.
3495   __ pop(eax);  // Address of arguments object.
3496   __ pop(ebx);  // Parameter count.
3497 
3498   // Return and remove the on-stack parameters.
3499   __ ret(3 * kPointerSize);
3500 
3501   // Do the runtime call to allocate the arguments object.
3502   __ bind(&runtime);
3503   __ pop(eax);  // Remove saved parameter count.
3504   __ mov(Operand(esp, 1 * kPointerSize), ecx);  // Patch argument count.
3505   __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3506 }
3507 
3508 
GenerateNewStrict(MacroAssembler * masm)3509 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3510   // esp[0] : return address
3511   // esp[4] : number of parameters
3512   // esp[8] : receiver displacement
3513   // esp[12] : function
3514 
3515   // Check if the calling frame is an arguments adaptor frame.
3516   Label adaptor_frame, try_allocate, runtime;
3517   __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3518   __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3519   __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3520   __ j(equal, &adaptor_frame, Label::kNear);
3521 
3522   // Get the length from the frame.
3523   __ mov(ecx, Operand(esp, 1 * kPointerSize));
3524   __ jmp(&try_allocate, Label::kNear);
3525 
3526   // Patch the arguments.length and the parameters pointer.
3527   __ bind(&adaptor_frame);
3528   __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3529   __ mov(Operand(esp, 1 * kPointerSize), ecx);
3530   __ lea(edx, Operand(edx, ecx, times_2,
3531                       StandardFrameConstants::kCallerSPOffset));
3532   __ mov(Operand(esp, 2 * kPointerSize), edx);
3533 
3534   // Try the new space allocation. Start out with computing the size of
3535   // the arguments object and the elements array.
3536   Label add_arguments_object;
3537   __ bind(&try_allocate);
3538   __ test(ecx, ecx);
3539   __ j(zero, &add_arguments_object, Label::kNear);
3540   __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
3541   __ bind(&add_arguments_object);
3542   __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
3543 
3544   // Do the allocation of both objects in one go.
3545   __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3546 
3547   // Get the arguments boilerplate from the current (global) context.
3548   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3549   __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3550   const int offset =
3551       Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
3552   __ mov(edi, Operand(edi, offset));
3553 
3554   // Copy the JS object part.
3555   for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3556     __ mov(ebx, FieldOperand(edi, i));
3557     __ mov(FieldOperand(eax, i), ebx);
3558   }
3559 
3560   // Get the length (smi tagged) and set that as an in-object property too.
3561   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
3562   __ mov(ecx, Operand(esp, 1 * kPointerSize));
3563   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3564                       Heap::kArgumentsLengthIndex * kPointerSize),
3565          ecx);
3566 
3567   // If there are no actual arguments, we're done.
3568   Label done;
3569   __ test(ecx, ecx);
3570   __ j(zero, &done, Label::kNear);
3571 
3572   // Get the parameters pointer from the stack.
3573   __ mov(edx, Operand(esp, 2 * kPointerSize));
3574 
3575   // Set up the elements pointer in the allocated arguments object and
3576   // initialize the header in the elements fixed array.
3577   __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
3578   __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3579   __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3580          Immediate(FACTORY->fixed_array_map()));
3581 
3582   __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3583   // Untag the length for the loop below.
3584   __ SmiUntag(ecx);
3585 
3586   // Copy the fixed array slots.
3587   Label loop;
3588   __ bind(&loop);
3589   __ mov(ebx, Operand(edx, -1 * kPointerSize));  // Skip receiver.
3590   __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
3591   __ add(edi, Immediate(kPointerSize));
3592   __ sub(edx, Immediate(kPointerSize));
3593   __ dec(ecx);
3594   __ j(not_zero, &loop);
3595 
3596   // Return and remove the on-stack parameters.
3597   __ bind(&done);
3598   __ ret(3 * kPointerSize);
3599 
3600   // Do the runtime call to allocate the arguments object.
3601   __ bind(&runtime);
3602   __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3603 }
3604 
3605 
Generate(MacroAssembler * masm)3606 void RegExpExecStub::Generate(MacroAssembler* masm) {
3607   // Just jump directly to runtime if native RegExp is not selected at compile
3608   // time or if regexp entry in generated code is turned off runtime switch or
3609   // at compilation.
3610 #ifdef V8_INTERPRETED_REGEXP
3611   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3612 #else  // V8_INTERPRETED_REGEXP
3613 
3614   // Stack frame on entry.
3615   //  esp[0]: return address
3616   //  esp[4]: last_match_info (expected JSArray)
3617   //  esp[8]: previous index
3618   //  esp[12]: subject string
3619   //  esp[16]: JSRegExp object
3620 
3621   static const int kLastMatchInfoOffset = 1 * kPointerSize;
3622   static const int kPreviousIndexOffset = 2 * kPointerSize;
3623   static const int kSubjectOffset = 3 * kPointerSize;
3624   static const int kJSRegExpOffset = 4 * kPointerSize;
3625 
3626   Label runtime, invoke_regexp;
3627 
3628   // Ensure that a RegExp stack is allocated.
3629   ExternalReference address_of_regexp_stack_memory_address =
3630       ExternalReference::address_of_regexp_stack_memory_address(
3631           masm->isolate());
3632   ExternalReference address_of_regexp_stack_memory_size =
3633       ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
3634   __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3635   __ test(ebx, ebx);
3636   __ j(zero, &runtime);
3637 
3638   // Check that the first argument is a JSRegExp object.
3639   __ mov(eax, Operand(esp, kJSRegExpOffset));
3640   STATIC_ASSERT(kSmiTag == 0);
3641   __ JumpIfSmi(eax, &runtime);
3642   __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
3643   __ j(not_equal, &runtime);
3644   // Check that the RegExp has been compiled (data contains a fixed array).
3645   __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3646   if (FLAG_debug_code) {
3647     __ test(ecx, Immediate(kSmiTagMask));
3648     __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
3649     __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
3650     __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
3651   }
3652 
3653   // ecx: RegExp data (FixedArray)
3654   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3655   __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
3656   __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
3657   __ j(not_equal, &runtime);
3658 
3659   // ecx: RegExp data (FixedArray)
3660   // Check that the number of captures fit in the static offsets vector buffer.
3661   __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3662   // Calculate number of capture registers (number_of_captures + 1) * 2. This
3663   // uses the asumption that smis are 2 * their untagged value.
3664   STATIC_ASSERT(kSmiTag == 0);
3665   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3666   __ add(edx, Immediate(2));  // edx was a smi.
3667   // Check that the static offsets vector buffer is large enough.
3668   __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
3669   __ j(above, &runtime);
3670 
3671   // ecx: RegExp data (FixedArray)
3672   // edx: Number of capture registers
3673   // Check that the second argument is a string.
3674   __ mov(eax, Operand(esp, kSubjectOffset));
3675   __ JumpIfSmi(eax, &runtime);
3676   Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
3677   __ j(NegateCondition(is_string), &runtime);
3678   // Get the length of the string to ebx.
3679   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
3680 
3681   // ebx: Length of subject string as a smi
3682   // ecx: RegExp data (FixedArray)
3683   // edx: Number of capture registers
3684   // Check that the third argument is a positive smi less than the subject
3685   // string length. A negative value will be greater (unsigned comparison).
3686   __ mov(eax, Operand(esp, kPreviousIndexOffset));
3687   __ JumpIfNotSmi(eax, &runtime);
3688   __ cmp(eax, ebx);
3689   __ j(above_equal, &runtime);
3690 
3691   // ecx: RegExp data (FixedArray)
3692   // edx: Number of capture registers
3693   // Check that the fourth object is a JSArray object.
3694   __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3695   __ JumpIfSmi(eax, &runtime);
3696   __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
3697   __ j(not_equal, &runtime);
3698   // Check that the JSArray is in fast case.
3699   __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3700   __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
3701   Factory* factory = masm->isolate()->factory();
3702   __ cmp(eax, factory->fixed_array_map());
3703   __ j(not_equal, &runtime);
3704   // Check that the last match info has space for the capture registers and the
3705   // additional information.
3706   __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
3707   __ SmiUntag(eax);
3708   __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
3709   __ cmp(edx, eax);
3710   __ j(greater, &runtime);
3711 
3712   // Reset offset for possibly sliced string.
3713   __ Set(edi, Immediate(0));
3714   // ecx: RegExp data (FixedArray)
3715   // Check the representation and encoding of the subject string.
3716   Label seq_ascii_string, seq_two_byte_string, check_code;
3717   __ mov(eax, Operand(esp, kSubjectOffset));
3718   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3719   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
3720   // First check for flat two byte string.
3721   __ and_(ebx, kIsNotStringMask |
3722                kStringRepresentationMask |
3723                kStringEncodingMask |
3724                kShortExternalStringMask);
3725   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
3726   __ j(zero, &seq_two_byte_string, Label::kNear);
3727   // Any other flat string must be a flat ASCII string.  None of the following
3728   // string type tests will succeed if subject is not a string or a short
3729   // external string.
3730   __ and_(ebx, Immediate(kIsNotStringMask |
3731                          kStringRepresentationMask |
3732                          kShortExternalStringMask));
3733   __ j(zero, &seq_ascii_string, Label::kNear);
3734 
3735   // ebx: whether subject is a string and if yes, its string representation
3736   // Check for flat cons string or sliced string.
3737   // A flat cons string is a cons string where the second part is the empty
3738   // string. In that case the subject string is just the first part of the cons
3739   // string. Also in this case the first part of the cons string is known to be
3740   // a sequential string or an external string.
3741   // In the case of a sliced string its offset has to be taken into account.
3742   Label cons_string, external_string, check_encoding;
3743   STATIC_ASSERT(kConsStringTag < kExternalStringTag);
3744   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
3745   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
3746   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
3747   __ cmp(ebx, Immediate(kExternalStringTag));
3748   __ j(less, &cons_string);
3749   __ j(equal, &external_string);
3750 
3751   // Catch non-string subject or short external string.
3752   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
3753   __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
3754   __ j(not_zero, &runtime);
3755 
3756   // String is sliced.
3757   __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
3758   __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
3759   // edi: offset of sliced string, smi-tagged.
3760   // eax: parent string.
3761   __ jmp(&check_encoding, Label::kNear);
3762   // String is a cons string, check whether it is flat.
3763   __ bind(&cons_string);
3764   __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
3765   __ j(not_equal, &runtime);
3766   __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
3767   __ bind(&check_encoding);
3768   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3769   // eax: first part of cons string or parent of sliced string.
3770   // ebx: map of first part of cons string or map of parent of sliced string.
3771   // Is first part of cons or parent of slice a flat two byte string?
3772   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3773             kStringRepresentationMask | kStringEncodingMask);
3774   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
3775   __ j(zero, &seq_two_byte_string, Label::kNear);
3776   // Any other flat string must be sequential ASCII or external.
3777   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3778             kStringRepresentationMask);
3779   __ j(not_zero, &external_string);
3780 
3781   __ bind(&seq_ascii_string);
3782   // eax: subject string (flat ASCII)
3783   // ecx: RegExp data (FixedArray)
3784   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
3785   __ Set(ecx, Immediate(1));  // Type is ASCII.
3786   __ jmp(&check_code, Label::kNear);
3787 
3788   __ bind(&seq_two_byte_string);
3789   // eax: subject string (flat two byte)
3790   // ecx: RegExp data (FixedArray)
3791   __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
3792   __ Set(ecx, Immediate(0));  // Type is two byte.
3793 
3794   __ bind(&check_code);
3795   // Check that the irregexp code has been generated for the actual string
3796   // encoding. If it has, the field contains a code object otherwise it contains
3797   // a smi (code flushing support).
3798   __ JumpIfSmi(edx, &runtime);
3799 
3800   // eax: subject string
3801   // edx: code
3802   // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
3803   // Load used arguments before starting to push arguments for call to native
3804   // RegExp code to avoid handling changing stack height.
3805   __ mov(ebx, Operand(esp, kPreviousIndexOffset));
3806   __ SmiUntag(ebx);  // Previous index from smi.
3807 
3808   // eax: subject string
3809   // ebx: previous index
3810   // edx: code
3811   // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
3812   // All checks done. Now push arguments for native regexp code.
3813   Counters* counters = masm->isolate()->counters();
3814   __ IncrementCounter(counters->regexp_entry_native(), 1);
3815 
3816   // Isolates: note we add an additional parameter here (isolate pointer).
3817   static const int kRegExpExecuteArguments = 8;
3818   __ EnterApiExitFrame(kRegExpExecuteArguments);
3819 
3820   // Argument 8: Pass current isolate address.
3821   __ mov(Operand(esp, 7 * kPointerSize),
3822       Immediate(ExternalReference::isolate_address()));
3823 
3824   // Argument 7: Indicate that this is a direct call from JavaScript.
3825   __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
3826 
3827   // Argument 6: Start (high end) of backtracking stack memory area.
3828   __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
3829   __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3830   __ mov(Operand(esp, 5 * kPointerSize), esi);
3831 
3832   // Argument 5: static offsets vector buffer.
3833   __ mov(Operand(esp, 4 * kPointerSize),
3834          Immediate(ExternalReference::address_of_static_offsets_vector(
3835              masm->isolate())));
3836 
3837   // Argument 2: Previous index.
3838   __ mov(Operand(esp, 1 * kPointerSize), ebx);
3839 
3840   // Argument 1: Original subject string.
3841   // The original subject is in the previous stack frame. Therefore we have to
3842   // use ebp, which points exactly to one pointer size below the previous esp.
3843   // (Because creating a new stack frame pushes the previous ebp onto the stack
3844   // and thereby moves up esp by one kPointerSize.)
3845   __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
3846   __ mov(Operand(esp, 0 * kPointerSize), esi);
3847 
3848   // esi: original subject string
3849   // eax: underlying subject string
3850   // ebx: previous index
3851   // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
3852   // edx: code
3853   // Argument 4: End of string data
3854   // Argument 3: Start of string data
3855   // Prepare start and end index of the input.
3856   // Load the length from the original sliced string if that is the case.
3857   __ mov(esi, FieldOperand(esi, String::kLengthOffset));
3858   __ add(esi, edi);  // Calculate input end wrt offset.
3859   __ SmiUntag(edi);
3860   __ add(ebx, edi);  // Calculate input start wrt offset.
3861 
3862   // ebx: start index of the input string
3863   // esi: end index of the input string
3864   Label setup_two_byte, setup_rest;
3865   __ test(ecx, ecx);
3866   __ j(zero, &setup_two_byte, Label::kNear);
3867   __ SmiUntag(esi);
3868   __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
3869   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
3870   __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
3871   __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
3872   __ jmp(&setup_rest, Label::kNear);
3873 
3874   __ bind(&setup_two_byte);
3875   STATIC_ASSERT(kSmiTag == 0);
3876   STATIC_ASSERT(kSmiTagSize == 1);  // esi is smi (powered by 2).
3877   __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
3878   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
3879   __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
3880   __ mov(Operand(esp, 2 * kPointerSize), ecx);  // Argument 3.
3881 
3882   __ bind(&setup_rest);
3883 
3884   // Locate the code entry and call it.
3885   __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3886   __ call(edx);
3887 
3888   // Drop arguments and come back to JS mode.
3889   __ LeaveApiExitFrame();
3890 
3891   // Check the result.
3892   Label success;
3893   __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
3894   __ j(equal, &success);
3895   Label failure;
3896   __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
3897   __ j(equal, &failure);
3898   __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
3899   // If not exception it can only be retry. Handle that in the runtime system.
3900   __ j(not_equal, &runtime);
3901   // Result must now be exception. If there is no pending exception already a
3902   // stack overflow (on the backtrack stack) was detected in RegExp code but
3903   // haven't created the exception yet. Handle that in the runtime system.
3904   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3905   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
3906                                       masm->isolate());
3907   __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
3908   __ mov(eax, Operand::StaticVariable(pending_exception));
3909   __ cmp(edx, eax);
3910   __ j(equal, &runtime);
3911   // For exception, throw the exception again.
3912 
3913   // Clear the pending exception variable.
3914   __ mov(Operand::StaticVariable(pending_exception), edx);
3915 
3916   // Special handling of termination exceptions which are uncatchable
3917   // by javascript code.
3918   __ cmp(eax, factory->termination_exception());
3919   Label throw_termination_exception;
3920   __ j(equal, &throw_termination_exception, Label::kNear);
3921 
3922   // Handle normal exception by following handler chain.
3923   __ Throw(eax);
3924 
3925   __ bind(&throw_termination_exception);
3926   __ ThrowUncatchable(eax);
3927 
3928   __ bind(&failure);
3929   // For failure to match, return null.
3930   __ mov(eax, factory->null_value());
3931   __ ret(4 * kPointerSize);
3932 
3933   // Load RegExp data.
3934   __ bind(&success);
3935   __ mov(eax, Operand(esp, kJSRegExpOffset));
3936   __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3937   __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3938   // Calculate number of capture registers (number_of_captures + 1) * 2.
3939   STATIC_ASSERT(kSmiTag == 0);
3940   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3941   __ add(edx, Immediate(2));  // edx was a smi.
3942 
3943   // edx: Number of capture registers
3944   // Load last_match_info which is still known to be a fast case JSArray.
3945   __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3946   __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3947 
3948   // ebx: last_match_info backing store (FixedArray)
3949   // edx: number of capture registers
3950   // Store the capture count.
3951   __ SmiTag(edx);  // Number of capture registers to smi.
3952   __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
3953   __ SmiUntag(edx);  // Number of capture registers back from smi.
3954   // Store last subject and last input.
3955   __ mov(eax, Operand(esp, kSubjectOffset));
3956   __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
3957   __ RecordWriteField(ebx,
3958                       RegExpImpl::kLastSubjectOffset,
3959                       eax,
3960                       edi,
3961                       kDontSaveFPRegs);
3962   __ mov(eax, Operand(esp, kSubjectOffset));
3963   __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
3964   __ RecordWriteField(ebx,
3965                       RegExpImpl::kLastInputOffset,
3966                       eax,
3967                       edi,
3968                       kDontSaveFPRegs);
3969 
3970   // Get the static offsets vector filled by the native regexp code.
3971   ExternalReference address_of_static_offsets_vector =
3972       ExternalReference::address_of_static_offsets_vector(masm->isolate());
3973   __ mov(ecx, Immediate(address_of_static_offsets_vector));
3974 
3975   // ebx: last_match_info backing store (FixedArray)
3976   // ecx: offsets vector
3977   // edx: number of capture registers
3978   Label next_capture, done;
3979   // Capture register counter starts from number of capture registers and
3980   // counts down until wraping after zero.
3981   __ bind(&next_capture);
3982   __ sub(edx, Immediate(1));
3983   __ j(negative, &done, Label::kNear);
3984   // Read the value from the static offsets vector buffer.
3985   __ mov(edi, Operand(ecx, edx, times_int_size, 0));
3986   __ SmiTag(edi);
3987   // Store the smi value in the last match info.
3988   __ mov(FieldOperand(ebx,
3989                       edx,
3990                       times_pointer_size,
3991                       RegExpImpl::kFirstCaptureOffset),
3992                       edi);
3993   __ jmp(&next_capture);
3994   __ bind(&done);
3995 
3996   // Return last match info.
3997   __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3998   __ ret(4 * kPointerSize);
3999 
4000   // External string.  Short external strings have already been ruled out.
4001   // eax: subject string (expected to be external)
4002   // ebx: scratch
4003   __ bind(&external_string);
4004   __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
4005   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
4006   if (FLAG_debug_code) {
4007     // Assert that we do not have a cons or slice (indirect strings) here.
4008     // Sequential strings have already been ruled out.
4009     __ test_b(ebx, kIsIndirectStringMask);
4010     __ Assert(zero, "external string expected, but not found");
4011   }
4012   __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
4013   // Move the pointer so that offset-wise, it looks like a sequential string.
4014   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
4015   __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4016   STATIC_ASSERT(kTwoByteStringTag == 0);
4017   __ test_b(ebx, kStringEncodingMask);
4018   __ j(not_zero, &seq_ascii_string);
4019   __ jmp(&seq_two_byte_string);
4020 
4021   // Do the runtime call to execute the regexp.
4022   __ bind(&runtime);
4023   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4024 #endif  // V8_INTERPRETED_REGEXP
4025 }
4026 
4027 
Generate(MacroAssembler * masm)4028 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4029   const int kMaxInlineLength = 100;
4030   Label slowcase;
4031   Label done;
4032   __ mov(ebx, Operand(esp, kPointerSize * 3));
4033   __ JumpIfNotSmi(ebx, &slowcase);
4034   __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
4035   __ j(above, &slowcase);
4036   // Smi-tagging is equivalent to multiplying by 2.
4037   STATIC_ASSERT(kSmiTag == 0);
4038   STATIC_ASSERT(kSmiTagSize == 1);
4039   // Allocate RegExpResult followed by FixedArray with size in ebx.
4040   // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
4041   // Elements:  [Map][Length][..elements..]
4042   __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
4043                         times_half_pointer_size,
4044                         ebx,  // In: Number of elements (times 2, being a smi)
4045                         eax,  // Out: Start of allocation (tagged).
4046                         ecx,  // Out: End of allocation.
4047                         edx,  // Scratch register
4048                         &slowcase,
4049                         TAG_OBJECT);
4050   // eax: Start of allocated area, object-tagged.
4051 
4052   // Set JSArray map to global.regexp_result_map().
4053   // Set empty properties FixedArray.
4054   // Set elements to point to FixedArray allocated right after the JSArray.
4055   // Interleave operations for better latency.
4056   __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
4057   Factory* factory = masm->isolate()->factory();
4058   __ mov(ecx, Immediate(factory->empty_fixed_array()));
4059   __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
4060   __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
4061   __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
4062   __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
4063   __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
4064   __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
4065 
4066   // Set input, index and length fields from arguments.
4067   __ mov(ecx, Operand(esp, kPointerSize * 1));
4068   __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
4069   __ mov(ecx, Operand(esp, kPointerSize * 2));
4070   __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
4071   __ mov(ecx, Operand(esp, kPointerSize * 3));
4072   __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
4073 
4074   // Fill out the elements FixedArray.
4075   // eax: JSArray.
4076   // ebx: FixedArray.
4077   // ecx: Number of elements in array, as smi.
4078 
4079   // Set map.
4080   __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
4081          Immediate(factory->fixed_array_map()));
4082   // Set length.
4083   __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
4084   // Fill contents of fixed-array with the-hole.
4085   __ SmiUntag(ecx);
4086   __ mov(edx, Immediate(factory->the_hole_value()));
4087   __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
4088   // Fill fixed array elements with hole.
4089   // eax: JSArray.
4090   // ecx: Number of elements to fill.
4091   // ebx: Start of elements in FixedArray.
4092   // edx: the hole.
4093   Label loop;
4094   __ test(ecx, ecx);
4095   __ bind(&loop);
4096   __ j(less_equal, &done, Label::kNear);  // Jump if ecx is negative or zero.
4097   __ sub(ecx, Immediate(1));
4098   __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
4099   __ jmp(&loop);
4100 
4101   __ bind(&done);
4102   __ ret(3 * kPointerSize);
4103 
4104   __ bind(&slowcase);
4105   __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4106 }
4107 
4108 
GenerateLookupNumberStringCache(MacroAssembler * masm,Register object,Register result,Register scratch1,Register scratch2,bool object_is_smi,Label * not_found)4109 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
4110                                                          Register object,
4111                                                          Register result,
4112                                                          Register scratch1,
4113                                                          Register scratch2,
4114                                                          bool object_is_smi,
4115                                                          Label* not_found) {
4116   // Use of registers. Register result is used as a temporary.
4117   Register number_string_cache = result;
4118   Register mask = scratch1;
4119   Register scratch = scratch2;
4120 
4121   // Load the number string cache.
4122   ExternalReference roots_array_start =
4123       ExternalReference::roots_array_start(masm->isolate());
4124   __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
4125   __ mov(number_string_cache,
4126          Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
4127   // Make the hash mask from the length of the number string cache. It
4128   // contains two elements (number and string) for each cache entry.
4129   __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
4130   __ shr(mask, kSmiTagSize + 1);  // Untag length and divide it by two.
4131   __ sub(mask, Immediate(1));  // Make mask.
4132 
4133   // Calculate the entry in the number string cache. The hash value in the
4134   // number string cache for smis is just the smi value, and the hash for
4135   // doubles is the xor of the upper and lower words. See
4136   // Heap::GetNumberStringCache.
4137   Label smi_hash_calculated;
4138   Label load_result_from_cache;
4139   if (object_is_smi) {
4140     __ mov(scratch, object);
4141     __ SmiUntag(scratch);
4142   } else {
4143     Label not_smi;
4144     STATIC_ASSERT(kSmiTag == 0);
4145     __ JumpIfNotSmi(object, &not_smi, Label::kNear);
4146     __ mov(scratch, object);
4147     __ SmiUntag(scratch);
4148     __ jmp(&smi_hash_calculated, Label::kNear);
4149     __ bind(&not_smi);
4150     __ cmp(FieldOperand(object, HeapObject::kMapOffset),
4151            masm->isolate()->factory()->heap_number_map());
4152     __ j(not_equal, not_found);
4153     STATIC_ASSERT(8 == kDoubleSize);
4154     __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
4155     __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
4156     // Object is heap number and hash is now in scratch. Calculate cache index.
4157     __ and_(scratch, mask);
4158     Register index = scratch;
4159     Register probe = mask;
4160     __ mov(probe,
4161            FieldOperand(number_string_cache,
4162                         index,
4163                         times_twice_pointer_size,
4164                         FixedArray::kHeaderSize));
4165     __ JumpIfSmi(probe, not_found);
4166     if (CpuFeatures::IsSupported(SSE2)) {
4167       CpuFeatures::Scope fscope(SSE2);
4168       __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
4169       __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
4170       __ ucomisd(xmm0, xmm1);
4171     } else {
4172       __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
4173       __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
4174       __ FCmp();
4175     }
4176     __ j(parity_even, not_found);  // Bail out if NaN is involved.
4177     __ j(not_equal, not_found);  // The cache did not contain this value.
4178     __ jmp(&load_result_from_cache, Label::kNear);
4179   }
4180 
4181   __ bind(&smi_hash_calculated);
4182   // Object is smi and hash is now in scratch. Calculate cache index.
4183   __ and_(scratch, mask);
4184   Register index = scratch;
4185   // Check if the entry is the smi we are looking for.
4186   __ cmp(object,
4187          FieldOperand(number_string_cache,
4188                       index,
4189                       times_twice_pointer_size,
4190                       FixedArray::kHeaderSize));
4191   __ j(not_equal, not_found);
4192 
4193   // Get the result from the cache.
4194   __ bind(&load_result_from_cache);
4195   __ mov(result,
4196          FieldOperand(number_string_cache,
4197                       index,
4198                       times_twice_pointer_size,
4199                       FixedArray::kHeaderSize + kPointerSize));
4200   Counters* counters = masm->isolate()->counters();
4201   __ IncrementCounter(counters->number_to_string_native(), 1);
4202 }
4203 
4204 
Generate(MacroAssembler * masm)4205 void NumberToStringStub::Generate(MacroAssembler* masm) {
4206   Label runtime;
4207 
4208   __ mov(ebx, Operand(esp, kPointerSize));
4209 
4210   // Generate code to lookup number in the number string cache.
4211   GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
4212   __ ret(1 * kPointerSize);
4213 
4214   __ bind(&runtime);
4215   // Handle number to string in the runtime system if not found in the cache.
4216   __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
4217 }
4218 
4219 
NegativeComparisonResult(Condition cc)4220 static int NegativeComparisonResult(Condition cc) {
4221   ASSERT(cc != equal);
4222   ASSERT((cc == less) || (cc == less_equal)
4223       || (cc == greater) || (cc == greater_equal));
4224   return (cc == greater || cc == greater_equal) ? LESS : GREATER;
4225 }
4226 
Generate(MacroAssembler * masm)4227 void CompareStub::Generate(MacroAssembler* masm) {
4228   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4229 
4230   Label check_unequal_objects;
4231 
4232   // Compare two smis if required.
4233   if (include_smi_compare_) {
4234     Label non_smi, smi_done;
4235     __ mov(ecx, edx);
4236     __ or_(ecx, eax);
4237     __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
4238     __ sub(edx, eax);  // Return on the result of the subtraction.
4239     __ j(no_overflow, &smi_done, Label::kNear);
4240     __ not_(edx);  // Correct sign in case of overflow. edx is never 0 here.
4241     __ bind(&smi_done);
4242     __ mov(eax, edx);
4243     __ ret(0);
4244     __ bind(&non_smi);
4245   } else if (FLAG_debug_code) {
4246     __ mov(ecx, edx);
4247     __ or_(ecx, eax);
4248     __ test(ecx, Immediate(kSmiTagMask));
4249     __ Assert(not_zero, "Unexpected smi operands.");
4250   }
4251 
4252   // NOTICE! This code is only reached after a smi-fast-case check, so
4253   // it is certain that at least one operand isn't a smi.
4254 
4255   // Identical objects can be compared fast, but there are some tricky cases
4256   // for NaN and undefined.
4257   {
4258     Label not_identical;
4259     __ cmp(eax, edx);
4260     __ j(not_equal, &not_identical);
4261 
4262     if (cc_ != equal) {
4263       // Check for undefined.  undefined OP undefined is false even though
4264       // undefined == undefined.
4265       Label check_for_nan;
4266       __ cmp(edx, masm->isolate()->factory()->undefined_value());
4267       __ j(not_equal, &check_for_nan, Label::kNear);
4268       __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4269       __ ret(0);
4270       __ bind(&check_for_nan);
4271     }
4272 
4273     // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
4274     // so we do the second best thing - test it ourselves.
4275     // Note: if cc_ != equal, never_nan_nan_ is not used.
4276     if (never_nan_nan_ && (cc_ == equal)) {
4277       __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4278       __ ret(0);
4279     } else {
4280       Label heap_number;
4281       __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
4282              Immediate(masm->isolate()->factory()->heap_number_map()));
4283       __ j(equal, &heap_number, Label::kNear);
4284       if (cc_ != equal) {
4285         // Call runtime on identical JSObjects.  Otherwise return equal.
4286         __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4287         __ j(above_equal, &not_identical);
4288       }
4289       __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4290       __ ret(0);
4291 
4292       __ bind(&heap_number);
4293       // It is a heap number, so return non-equal if it's NaN and equal if
4294       // it's not NaN.
4295       // The representation of NaN values has all exponent bits (52..62) set,
4296       // and not all mantissa bits (0..51) clear.
4297       // We only accept QNaNs, which have bit 51 set.
4298       // Read top bits of double representation (second word of value).
4299 
4300       // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
4301       // all bits in the mask are set. We only need to check the word
4302       // that contains the exponent and high bit of the mantissa.
4303       STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
4304       __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
4305       __ Set(eax, Immediate(0));
4306       // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
4307       // bits.
4308       __ add(edx, edx);
4309       __ cmp(edx, kQuietNaNHighBitsMask << 1);
4310       if (cc_ == equal) {
4311         STATIC_ASSERT(EQUAL != 1);
4312         __ setcc(above_equal, eax);
4313         __ ret(0);
4314       } else {
4315         Label nan;
4316         __ j(above_equal, &nan, Label::kNear);
4317         __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4318         __ ret(0);
4319         __ bind(&nan);
4320         __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4321         __ ret(0);
4322       }
4323     }
4324 
4325     __ bind(&not_identical);
4326   }
4327 
4328   // Strict equality can quickly decide whether objects are equal.
4329   // Non-strict object equality is slower, so it is handled later in the stub.
4330   if (cc_ == equal && strict_) {
4331     Label slow;  // Fallthrough label.
4332     Label not_smis;
4333     // If we're doing a strict equality comparison, we don't have to do
4334     // type conversion, so we generate code to do fast comparison for objects
4335     // and oddballs. Non-smi numbers and strings still go through the usual
4336     // slow-case code.
4337     // If either is a Smi (we know that not both are), then they can only
4338     // be equal if the other is a HeapNumber. If so, use the slow case.
4339     STATIC_ASSERT(kSmiTag == 0);
4340     ASSERT_EQ(0, Smi::FromInt(0));
4341     __ mov(ecx, Immediate(kSmiTagMask));
4342     __ and_(ecx, eax);
4343     __ test(ecx, edx);
4344     __ j(not_zero, &not_smis, Label::kNear);
4345     // One operand is a smi.
4346 
4347     // Check whether the non-smi is a heap number.
4348     STATIC_ASSERT(kSmiTagMask == 1);
4349     // ecx still holds eax & kSmiTag, which is either zero or one.
4350     __ sub(ecx, Immediate(0x01));
4351     __ mov(ebx, edx);
4352     __ xor_(ebx, eax);
4353     __ and_(ebx, ecx);  // ebx holds either 0 or eax ^ edx.
4354     __ xor_(ebx, eax);
4355     // if eax was smi, ebx is now edx, else eax.
4356 
4357     // Check if the non-smi operand is a heap number.
4358     __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
4359            Immediate(masm->isolate()->factory()->heap_number_map()));
4360     // If heap number, handle it in the slow case.
4361     __ j(equal, &slow, Label::kNear);
4362     // Return non-equal (ebx is not zero)
4363     __ mov(eax, ebx);
4364     __ ret(0);
4365 
4366     __ bind(&not_smis);
4367     // If either operand is a JSObject or an oddball value, then they are not
4368     // equal since their pointers are different
4369     // There is no test for undetectability in strict equality.
4370 
4371     // Get the type of the first operand.
4372     // If the first object is a JS object, we have done pointer comparison.
4373     Label first_non_object;
4374     STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
4375     __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4376     __ j(below, &first_non_object, Label::kNear);
4377 
4378     // Return non-zero (eax is not zero)
4379     Label return_not_equal;
4380     STATIC_ASSERT(kHeapObjectTag != 0);
4381     __ bind(&return_not_equal);
4382     __ ret(0);
4383 
4384     __ bind(&first_non_object);
4385     // Check for oddballs: true, false, null, undefined.
4386     __ CmpInstanceType(ecx, ODDBALL_TYPE);
4387     __ j(equal, &return_not_equal);
4388 
4389     __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
4390     __ j(above_equal, &return_not_equal);
4391 
4392     // Check for oddballs: true, false, null, undefined.
4393     __ CmpInstanceType(ecx, ODDBALL_TYPE);
4394     __ j(equal, &return_not_equal);
4395 
4396     // Fall through to the general case.
4397     __ bind(&slow);
4398   }
4399 
4400   // Generate the number comparison code.
4401   if (include_number_compare_) {
4402     Label non_number_comparison;
4403     Label unordered;
4404     if (CpuFeatures::IsSupported(SSE2)) {
4405       CpuFeatures::Scope use_sse2(SSE2);
4406       CpuFeatures::Scope use_cmov(CMOV);
4407 
4408       FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4409       __ ucomisd(xmm0, xmm1);
4410 
4411       // Don't base result on EFLAGS when a NaN is involved.
4412       __ j(parity_even, &unordered, Label::kNear);
4413       // Return a result of -1, 0, or 1, based on EFLAGS.
4414       __ mov(eax, 0);  // equal
4415       __ mov(ecx, Immediate(Smi::FromInt(1)));
4416       __ cmov(above, eax, ecx);
4417       __ mov(ecx, Immediate(Smi::FromInt(-1)));
4418       __ cmov(below, eax, ecx);
4419       __ ret(0);
4420     } else {
4421       FloatingPointHelper::CheckFloatOperands(
4422           masm, &non_number_comparison, ebx);
4423       FloatingPointHelper::LoadFloatOperand(masm, eax);
4424       FloatingPointHelper::LoadFloatOperand(masm, edx);
4425       __ FCmp();
4426 
4427       // Don't base result on EFLAGS when a NaN is involved.
4428       __ j(parity_even, &unordered, Label::kNear);
4429 
4430       Label below_label, above_label;
4431       // Return a result of -1, 0, or 1, based on EFLAGS.
4432       __ j(below, &below_label, Label::kNear);
4433       __ j(above, &above_label, Label::kNear);
4434 
4435       __ Set(eax, Immediate(0));
4436       __ ret(0);
4437 
4438       __ bind(&below_label);
4439       __ mov(eax, Immediate(Smi::FromInt(-1)));
4440       __ ret(0);
4441 
4442       __ bind(&above_label);
4443       __ mov(eax, Immediate(Smi::FromInt(1)));
4444       __ ret(0);
4445     }
4446 
4447     // If one of the numbers was NaN, then the result is always false.
4448     // The cc is never not-equal.
4449     __ bind(&unordered);
4450     ASSERT(cc_ != not_equal);
4451     if (cc_ == less || cc_ == less_equal) {
4452       __ mov(eax, Immediate(Smi::FromInt(1)));
4453     } else {
4454       __ mov(eax, Immediate(Smi::FromInt(-1)));
4455     }
4456     __ ret(0);
4457 
4458     // The number comparison code did not provide a valid result.
4459     __ bind(&non_number_comparison);
4460   }
4461 
4462   // Fast negative check for symbol-to-symbol equality.
4463   Label check_for_strings;
4464   if (cc_ == equal) {
4465     BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
4466     BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
4467 
4468     // We've already checked for object identity, so if both operands
4469     // are symbols they aren't equal. Register eax already holds a
4470     // non-zero value, which indicates not equal, so just return.
4471     __ ret(0);
4472   }
4473 
4474   __ bind(&check_for_strings);
4475 
4476   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
4477                                          &check_unequal_objects);
4478 
4479   // Inline comparison of ASCII strings.
4480   if (cc_ == equal) {
4481     StringCompareStub::GenerateFlatAsciiStringEquals(masm,
4482                                                      edx,
4483                                                      eax,
4484                                                      ecx,
4485                                                      ebx);
4486   } else {
4487     StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
4488                                                        edx,
4489                                                        eax,
4490                                                        ecx,
4491                                                        ebx,
4492                                                        edi);
4493   }
4494 #ifdef DEBUG
4495   __ Abort("Unexpected fall-through from string comparison");
4496 #endif
4497 
4498   __ bind(&check_unequal_objects);
4499   if (cc_ == equal && !strict_) {
4500     // Non-strict equality.  Objects are unequal if
4501     // they are both JSObjects and not undetectable,
4502     // and their pointers are different.
4503     Label not_both_objects;
4504     Label return_unequal;
4505     // At most one is a smi, so we can test for smi by adding the two.
4506     // A smi plus a heap object has the low bit set, a heap object plus
4507     // a heap object has the low bit clear.
4508     STATIC_ASSERT(kSmiTag == 0);
4509     STATIC_ASSERT(kSmiTagMask == 1);
4510     __ lea(ecx, Operand(eax, edx, times_1, 0));
4511     __ test(ecx, Immediate(kSmiTagMask));
4512     __ j(not_zero, &not_both_objects, Label::kNear);
4513     __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4514     __ j(below, &not_both_objects, Label::kNear);
4515     __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
4516     __ j(below, &not_both_objects, Label::kNear);
4517     // We do not bail out after this point.  Both are JSObjects, and
4518     // they are equal if and only if both are undetectable.
4519     // The and of the undetectable flags is 1 if and only if they are equal.
4520     __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
4521               1 << Map::kIsUndetectable);
4522     __ j(zero, &return_unequal, Label::kNear);
4523     __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
4524               1 << Map::kIsUndetectable);
4525     __ j(zero, &return_unequal, Label::kNear);
4526     // The objects are both undetectable, so they both compare as the value
4527     // undefined, and are equal.
4528     __ Set(eax, Immediate(EQUAL));
4529     __ bind(&return_unequal);
4530     // Return non-equal by returning the non-zero object pointer in eax,
4531     // or return equal if we fell through to here.
4532     __ ret(0);  // rax, rdx were pushed
4533     __ bind(&not_both_objects);
4534   }
4535 
4536   // Push arguments below the return address.
4537   __ pop(ecx);
4538   __ push(edx);
4539   __ push(eax);
4540 
4541   // Figure out which native to call and setup the arguments.
4542   Builtins::JavaScript builtin;
4543   if (cc_ == equal) {
4544     builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4545   } else {
4546     builtin = Builtins::COMPARE;
4547     __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4548   }
4549 
4550   // Restore return address on the stack.
4551   __ push(ecx);
4552 
4553   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4554   // tagged as a small integer.
4555   __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4556 }
4557 
4558 
BranchIfNonSymbol(MacroAssembler * masm,Label * label,Register object,Register scratch)4559 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4560                                     Label* label,
4561                                     Register object,
4562                                     Register scratch) {
4563   __ JumpIfSmi(object, label);
4564   __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4565   __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4566   __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4567   __ cmp(scratch, kSymbolTag | kStringTag);
4568   __ j(not_equal, label);
4569 }
4570 
4571 
Generate(MacroAssembler * masm)4572 void StackCheckStub::Generate(MacroAssembler* masm) {
4573   __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
4574 }
4575 
4576 
Generate(MacroAssembler * masm)4577 void InterruptStub::Generate(MacroAssembler* masm) {
4578   __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
4579 }
4580 
4581 
GenerateRecordCallTarget(MacroAssembler * masm)4582 static void GenerateRecordCallTarget(MacroAssembler* masm) {
4583   // Cache the called function in a global property cell.  Cache states
4584   // are uninitialized, monomorphic (indicated by a JSFunction), and
4585   // megamorphic.
4586   // ebx : cache cell for call target
4587   // edi : the function to call
4588   Isolate* isolate = masm->isolate();
4589   Label initialize, done;
4590 
4591   // Load the cache state into ecx.
4592   __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
4593 
4594   // A monomorphic cache hit or an already megamorphic state: invoke the
4595   // function without changing the state.
4596   __ cmp(ecx, edi);
4597   __ j(equal, &done, Label::kNear);
4598   __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4599   __ j(equal, &done, Label::kNear);
4600 
4601   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
4602   // megamorphic.
4603   __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
4604   __ j(equal, &initialize, Label::kNear);
4605   // MegamorphicSentinel is an immortal immovable object (undefined) so no
4606   // write-barrier is needed.
4607   __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
4608          Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4609   __ jmp(&done, Label::kNear);
4610 
4611   // An uninitialized cache is patched with the function.
4612   __ bind(&initialize);
4613   __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
4614   // No need for a write barrier here - cells are rescanned.
4615 
4616   __ bind(&done);
4617 }
4618 
4619 
Generate(MacroAssembler * masm)4620 void CallFunctionStub::Generate(MacroAssembler* masm) {
4621   // ebx : cache cell for call target
4622   // edi : the function to call
4623   Isolate* isolate = masm->isolate();
4624   Label slow, non_function;
4625 
4626   // The receiver might implicitly be the global object. This is
4627   // indicated by passing the hole as the receiver to the call
4628   // function stub.
4629   if (ReceiverMightBeImplicit()) {
4630     Label receiver_ok;
4631     // Get the receiver from the stack.
4632     // +1 ~ return address
4633     __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
4634     // Call as function is indicated with the hole.
4635     __ cmp(eax, isolate->factory()->the_hole_value());
4636     __ j(not_equal, &receiver_ok, Label::kNear);
4637     // Patch the receiver on the stack with the global receiver object.
4638     __ mov(ecx, GlobalObjectOperand());
4639     __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
4640     __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
4641     __ bind(&receiver_ok);
4642   }
4643 
4644   // Check that the function really is a JavaScript function.
4645   __ JumpIfSmi(edi, &non_function);
4646   // Goto slow case if we do not have a function.
4647   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4648   __ j(not_equal, &slow);
4649 
4650   if (RecordCallTarget()) {
4651     GenerateRecordCallTarget(masm);
4652   }
4653 
4654   // Fast-case: Just invoke the function.
4655   ParameterCount actual(argc_);
4656 
4657   if (ReceiverMightBeImplicit()) {
4658     Label call_as_function;
4659     __ cmp(eax, isolate->factory()->the_hole_value());
4660     __ j(equal, &call_as_function);
4661     __ InvokeFunction(edi,
4662                       actual,
4663                       JUMP_FUNCTION,
4664                       NullCallWrapper(),
4665                       CALL_AS_METHOD);
4666     __ bind(&call_as_function);
4667   }
4668   __ InvokeFunction(edi,
4669                     actual,
4670                     JUMP_FUNCTION,
4671                     NullCallWrapper(),
4672                     CALL_AS_FUNCTION);
4673 
4674   // Slow-case: Non-function called.
4675   __ bind(&slow);
4676   if (RecordCallTarget()) {
4677     // If there is a call target cache, mark it megamorphic in the
4678     // non-function case.  MegamorphicSentinel is an immortal immovable
4679     // object (undefined) so no write barrier is needed.
4680     __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
4681            Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4682   }
4683   // Check for function proxy.
4684   __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4685   __ j(not_equal, &non_function);
4686   __ pop(ecx);
4687   __ push(edi);  // put proxy as additional argument under return address
4688   __ push(ecx);
4689   __ Set(eax, Immediate(argc_ + 1));
4690   __ Set(ebx, Immediate(0));
4691   __ SetCallKind(ecx, CALL_AS_FUNCTION);
4692   __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
4693   {
4694     Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
4695     __ jmp(adaptor, RelocInfo::CODE_TARGET);
4696   }
4697 
4698   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4699   // of the original receiver from the call site).
4700   __ bind(&non_function);
4701   __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
4702   __ Set(eax, Immediate(argc_));
4703   __ Set(ebx, Immediate(0));
4704   __ SetCallKind(ecx, CALL_AS_METHOD);
4705   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
4706   Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
4707   __ jmp(adaptor, RelocInfo::CODE_TARGET);
4708 }
4709 
4710 
Generate(MacroAssembler * masm)4711 void CallConstructStub::Generate(MacroAssembler* masm) {
4712   // eax : number of arguments
4713   // ebx : cache cell for call target
4714   // edi : constructor function
4715   Label slow, non_function_call;
4716 
4717   // Check that function is not a smi.
4718   __ JumpIfSmi(edi, &non_function_call);
4719   // Check that function is a JSFunction.
4720   __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4721   __ j(not_equal, &slow);
4722 
4723   if (RecordCallTarget()) {
4724     GenerateRecordCallTarget(masm);
4725   }
4726 
4727   // Jump to the function-specific construct stub.
4728   __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
4729   __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
4730   __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
4731   __ jmp(ebx);
4732 
4733   // edi: called object
4734   // eax: number of arguments
4735   // ecx: object map
4736   Label do_call;
4737   __ bind(&slow);
4738   __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4739   __ j(not_equal, &non_function_call);
4740   __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
4741   __ jmp(&do_call);
4742 
4743   __ bind(&non_function_call);
4744   __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
4745   __ bind(&do_call);
4746   // Set expected number of arguments to zero (not changing eax).
4747   __ Set(ebx, Immediate(0));
4748   Handle<Code> arguments_adaptor =
4749       masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
4750   __ SetCallKind(ecx, CALL_AS_METHOD);
4751   __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
4752 }
4753 
4754 
NeedsImmovableCode()4755 bool CEntryStub::NeedsImmovableCode() {
4756   return false;
4757 }
4758 
4759 
IsPregenerated()4760 bool CEntryStub::IsPregenerated() {
4761   return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
4762           result_size_ == 1;
4763 }
4764 
4765 
GenerateStubsAheadOfTime()4766 void CodeStub::GenerateStubsAheadOfTime() {
4767   CEntryStub::GenerateAheadOfTime();
4768   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
4769   // It is important that the store buffer overflow stubs are generated first.
4770   RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
4771 }
4772 
4773 
GenerateFPStubs()4774 void CodeStub::GenerateFPStubs() {
4775   CEntryStub save_doubles(1, kSaveFPRegs);
4776   Handle<Code> code = save_doubles.GetCode();
4777   code->set_is_pregenerated(true);
4778   code->GetIsolate()->set_fp_stubs_generated(true);
4779 }
4780 
4781 
GenerateAheadOfTime()4782 void CEntryStub::GenerateAheadOfTime() {
4783   CEntryStub stub(1, kDontSaveFPRegs);
4784   Handle<Code> code = stub.GetCode();
4785   code->set_is_pregenerated(true);
4786 }
4787 
4788 
GenerateCore(MacroAssembler * masm,Label * throw_normal_exception,Label * throw_termination_exception,Label * throw_out_of_memory_exception,bool do_gc,bool always_allocate_scope)4789 void CEntryStub::GenerateCore(MacroAssembler* masm,
4790                               Label* throw_normal_exception,
4791                               Label* throw_termination_exception,
4792                               Label* throw_out_of_memory_exception,
4793                               bool do_gc,
4794                               bool always_allocate_scope) {
4795   // eax: result parameter for PerformGC, if any
4796   // ebx: pointer to C function  (C callee-saved)
4797   // ebp: frame pointer  (restored after C call)
4798   // esp: stack pointer  (restored after C call)
4799   // edi: number of arguments including receiver  (C callee-saved)
4800   // esi: pointer to the first argument (C callee-saved)
4801 
4802   // Result returned in eax, or eax+edx if result_size_ is 2.
4803 
4804   // Check stack alignment.
4805   if (FLAG_debug_code) {
4806     __ CheckStackAlignment();
4807   }
4808 
4809   if (do_gc) {
4810     // Pass failure code returned from last attempt as first argument to
4811     // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
4812     // stack alignment is known to be correct. This function takes one argument
4813     // which is passed on the stack, and we know that the stack has been
4814     // prepared to pass at least one argument.
4815     __ mov(Operand(esp, 0 * kPointerSize), eax);  // Result.
4816     __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
4817   }
4818 
4819   ExternalReference scope_depth =
4820       ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
4821   if (always_allocate_scope) {
4822     __ inc(Operand::StaticVariable(scope_depth));
4823   }
4824 
4825   // Call C function.
4826   __ mov(Operand(esp, 0 * kPointerSize), edi);  // argc.
4827   __ mov(Operand(esp, 1 * kPointerSize), esi);  // argv.
4828   __ mov(Operand(esp, 2 * kPointerSize),
4829          Immediate(ExternalReference::isolate_address()));
4830   __ call(ebx);
4831   // Result is in eax or edx:eax - do not destroy these registers!
4832 
4833   if (always_allocate_scope) {
4834     __ dec(Operand::StaticVariable(scope_depth));
4835   }
4836 
4837   // Make sure we're not trying to return 'the hole' from the runtime
4838   // call as this may lead to crashes in the IC code later.
4839   if (FLAG_debug_code) {
4840     Label okay;
4841     __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4842     __ j(not_equal, &okay, Label::kNear);
4843     __ int3();
4844     __ bind(&okay);
4845   }
4846 
4847   // Check for failure result.
4848   Label failure_returned;
4849   STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4850   __ lea(ecx, Operand(eax, 1));
4851   // Lower 2 bits of ecx are 0 iff eax has failure tag.
4852   __ test(ecx, Immediate(kFailureTagMask));
4853   __ j(zero, &failure_returned);
4854 
4855   ExternalReference pending_exception_address(
4856       Isolate::kPendingExceptionAddress, masm->isolate());
4857 
4858   // Check that there is no pending exception, otherwise we
4859   // should have returned some failure value.
4860   if (FLAG_debug_code) {
4861     __ push(edx);
4862     __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4863     Label okay;
4864     __ cmp(edx, Operand::StaticVariable(pending_exception_address));
4865     // Cannot use check here as it attempts to generate call into runtime.
4866     __ j(equal, &okay, Label::kNear);
4867     __ int3();
4868     __ bind(&okay);
4869     __ pop(edx);
4870   }
4871 
4872   // Exit the JavaScript to C++ exit frame.
4873   __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
4874   __ ret(0);
4875 
4876   // Handling of failure.
4877   __ bind(&failure_returned);
4878 
4879   Label retry;
4880   // If the returned exception is RETRY_AFTER_GC continue at retry label
4881   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
4882   __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4883   __ j(zero, &retry, Label::kNear);
4884 
4885   // Special handling of out of memory exceptions.
4886   __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4887   __ j(equal, throw_out_of_memory_exception);
4888 
4889   // Retrieve the pending exception and clear the variable.
4890   __ mov(eax, Operand::StaticVariable(pending_exception_address));
4891   __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4892   __ mov(Operand::StaticVariable(pending_exception_address), edx);
4893 
4894   // Special handling of termination exceptions which are uncatchable
4895   // by javascript code.
4896   __ cmp(eax, masm->isolate()->factory()->termination_exception());
4897   __ j(equal, throw_termination_exception);
4898 
4899   // Handle normal exception.
4900   __ jmp(throw_normal_exception);
4901 
4902   // Retry.
4903   __ bind(&retry);
4904 }
4905 
4906 
Generate(MacroAssembler * masm)4907 void CEntryStub::Generate(MacroAssembler* masm) {
4908   // eax: number of arguments including receiver
4909   // ebx: pointer to C function  (C callee-saved)
4910   // ebp: frame pointer  (restored after C call)
4911   // esp: stack pointer  (restored after C call)
4912   // esi: current context (C callee-saved)
4913   // edi: JS function of the caller (C callee-saved)
4914 
4915   // NOTE: Invocations of builtins may return failure objects instead
4916   // of a proper result. The builtin entry handles this by performing
4917   // a garbage collection and retrying the builtin (twice).
4918 
4919   // Enter the exit frame that transitions from JavaScript to C++.
4920   __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
4921 
4922   // eax: result parameter for PerformGC, if any (setup below)
4923   // ebx: pointer to builtin function  (C callee-saved)
4924   // ebp: frame pointer  (restored after C call)
4925   // esp: stack pointer  (restored after C call)
4926   // edi: number of arguments including receiver (C callee-saved)
4927   // esi: argv pointer (C callee-saved)
4928 
4929   Label throw_normal_exception;
4930   Label throw_termination_exception;
4931   Label throw_out_of_memory_exception;
4932 
4933   // Call into the runtime system.
4934   GenerateCore(masm,
4935                &throw_normal_exception,
4936                &throw_termination_exception,
4937                &throw_out_of_memory_exception,
4938                false,
4939                false);
4940 
4941   // Do space-specific GC and retry runtime call.
4942   GenerateCore(masm,
4943                &throw_normal_exception,
4944                &throw_termination_exception,
4945                &throw_out_of_memory_exception,
4946                true,
4947                false);
4948 
4949   // Do full GC and retry runtime call one final time.
4950   Failure* failure = Failure::InternalError();
4951   __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
4952   GenerateCore(masm,
4953                &throw_normal_exception,
4954                &throw_termination_exception,
4955                &throw_out_of_memory_exception,
4956                true,
4957                true);
4958 
4959   __ bind(&throw_out_of_memory_exception);
4960   // Set external caught exception to false.
4961   Isolate* isolate = masm->isolate();
4962   ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4963                                     isolate);
4964   __ mov(Operand::StaticVariable(external_caught), Immediate(false));
4965 
4966   // Set pending exception and eax to out of memory exception.
4967   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4968                                       isolate);
4969   __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4970   __ mov(Operand::StaticVariable(pending_exception), eax);
4971   // Fall through to the next label.
4972 
4973   __ bind(&throw_termination_exception);
4974   __ ThrowUncatchable(eax);
4975 
4976   __ bind(&throw_normal_exception);
4977   __ Throw(eax);
4978 }
4979 
4980 
GenerateBody(MacroAssembler * masm,bool is_construct)4981 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4982   Label invoke, handler_entry, exit;
4983   Label not_outermost_js, not_outermost_js_2;
4984 
4985   // Set up frame.
4986   __ push(ebp);
4987   __ mov(ebp, esp);
4988 
4989   // Push marker in two places.
4990   int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4991   __ push(Immediate(Smi::FromInt(marker)));  // context slot
4992   __ push(Immediate(Smi::FromInt(marker)));  // function slot
4993   // Save callee-saved registers (C calling conventions).
4994   __ push(edi);
4995   __ push(esi);
4996   __ push(ebx);
4997 
4998   // Save copies of the top frame descriptor on the stack.
4999   ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
5000   __ push(Operand::StaticVariable(c_entry_fp));
5001 
5002   // If this is the outermost JS call, set js_entry_sp value.
5003   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
5004                                 masm->isolate());
5005   __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
5006   __ j(not_equal, &not_outermost_js, Label::kNear);
5007   __ mov(Operand::StaticVariable(js_entry_sp), ebp);
5008   __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
5009   Label cont;
5010   __ jmp(&cont, Label::kNear);
5011   __ bind(&not_outermost_js);
5012   __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
5013   __ bind(&cont);
5014 
5015   // Jump to a faked try block that does the invoke, with a faked catch
5016   // block that sets the pending exception.
5017   __ jmp(&invoke);
5018   __ bind(&handler_entry);
5019   handler_offset_ = handler_entry.pos();
5020   // Caught exception: Store result (exception) in the pending exception
5021   // field in the JSEnv and return a failure sentinel.
5022   ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
5023                                       masm->isolate());
5024   __ mov(Operand::StaticVariable(pending_exception), eax);
5025   __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
5026   __ jmp(&exit);
5027 
5028   // Invoke: Link this frame into the handler chain.  There's only one
5029   // handler block in this code object, so its index is 0.
5030   __ bind(&invoke);
5031   __ PushTryHandler(StackHandler::JS_ENTRY, 0);
5032 
5033   // Clear any pending exceptions.
5034   __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
5035   __ mov(Operand::StaticVariable(pending_exception), edx);
5036 
5037   // Fake a receiver (NULL).
5038   __ push(Immediate(0));  // receiver
5039 
5040   // Invoke the function by calling through JS entry trampoline builtin and
5041   // pop the faked function when we return. Notice that we cannot store a
5042   // reference to the trampoline code directly in this stub, because the
5043   // builtin stubs may not have been generated yet.
5044   if (is_construct) {
5045     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
5046                                       masm->isolate());
5047     __ mov(edx, Immediate(construct_entry));
5048   } else {
5049     ExternalReference entry(Builtins::kJSEntryTrampoline,
5050                             masm->isolate());
5051     __ mov(edx, Immediate(entry));
5052   }
5053   __ mov(edx, Operand(edx, 0));  // deref address
5054   __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
5055   __ call(edx);
5056 
5057   // Unlink this frame from the handler chain.
5058   __ PopTryHandler();
5059 
5060   __ bind(&exit);
5061   // Check if the current stack frame is marked as the outermost JS frame.
5062   __ pop(ebx);
5063   __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
5064   __ j(not_equal, &not_outermost_js_2);
5065   __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
5066   __ bind(&not_outermost_js_2);
5067 
5068   // Restore the top frame descriptor from the stack.
5069   __ pop(Operand::StaticVariable(ExternalReference(
5070       Isolate::kCEntryFPAddress,
5071       masm->isolate())));
5072 
5073   // Restore callee-saved registers (C calling conventions).
5074   __ pop(ebx);
5075   __ pop(esi);
5076   __ pop(edi);
5077   __ add(esp, Immediate(2 * kPointerSize));  // remove markers
5078 
5079   // Restore frame pointer and return.
5080   __ pop(ebp);
5081   __ ret(0);
5082 }
5083 
5084 
5085 // Generate stub code for instanceof.
5086 // This code can patch a call site inlined cache of the instance of check,
5087 // which looks like this.
5088 //
5089 //   81 ff XX XX XX XX   cmp    edi, <the hole, patched to a map>
5090 //   75 0a               jne    <some near label>
5091 //   b8 XX XX XX XX      mov    eax, <the hole, patched to either true or false>
5092 //
5093 // If call site patching is requested the stack will have the delta from the
5094 // return address to the cmp instruction just below the return address. This
5095 // also means that call site patching can only take place with arguments in
5096 // registers. TOS looks like this when call site patching is requested
5097 //
5098 //   esp[0] : return address
5099 //   esp[4] : delta from return address to cmp instruction
5100 //
Generate(MacroAssembler * masm)5101 void InstanceofStub::Generate(MacroAssembler* masm) {
5102   // Call site inlining and patching implies arguments in registers.
5103   ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
5104 
5105   // Fixed register usage throughout the stub.
5106   Register object = eax;  // Object (lhs).
5107   Register map = ebx;  // Map of the object.
5108   Register function = edx;  // Function (rhs).
5109   Register prototype = edi;  // Prototype of the function.
5110   Register scratch = ecx;
5111 
5112   // Constants describing the call site code to patch.
5113   static const int kDeltaToCmpImmediate = 2;
5114   static const int kDeltaToMov = 8;
5115   static const int kDeltaToMovImmediate = 9;
5116   static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
5117   static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
5118   static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
5119 
5120   ExternalReference roots_array_start =
5121       ExternalReference::roots_array_start(masm->isolate());
5122 
5123   ASSERT_EQ(object.code(), InstanceofStub::left().code());
5124   ASSERT_EQ(function.code(), InstanceofStub::right().code());
5125 
5126   // Get the object and function - they are always both needed.
5127   Label slow, not_js_object;
5128   if (!HasArgsInRegisters()) {
5129     __ mov(object, Operand(esp, 2 * kPointerSize));
5130     __ mov(function, Operand(esp, 1 * kPointerSize));
5131   }
5132 
5133   // Check that the left hand is a JS object.
5134   __ JumpIfSmi(object, &not_js_object);
5135   __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
5136 
5137   // If there is a call site cache don't look in the global cache, but do the
5138   // real lookup and update the call site cache.
5139   if (!HasCallSiteInlineCheck()) {
5140     // Look up the function and the map in the instanceof cache.
5141     Label miss;
5142     __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5143     __ cmp(function, Operand::StaticArray(scratch,
5144                                           times_pointer_size,
5145                                           roots_array_start));
5146     __ j(not_equal, &miss, Label::kNear);
5147     __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5148     __ cmp(map, Operand::StaticArray(
5149         scratch, times_pointer_size, roots_array_start));
5150     __ j(not_equal, &miss, Label::kNear);
5151     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5152     __ mov(eax, Operand::StaticArray(
5153         scratch, times_pointer_size, roots_array_start));
5154     __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5155     __ bind(&miss);
5156   }
5157 
5158   // Get the prototype of the function.
5159   __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
5160 
5161   // Check that the function prototype is a JS object.
5162   __ JumpIfSmi(prototype, &slow);
5163   __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
5164 
5165   // Update the global instanceof or call site inlined cache with the current
5166   // map and function. The cached answer will be set when it is known below.
5167   if (!HasCallSiteInlineCheck()) {
5168   __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5169   __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
5170          map);
5171   __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5172   __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
5173          function);
5174   } else {
5175     // The constants for the code patching are based on no push instructions
5176     // at the call site.
5177     ASSERT(HasArgsInRegisters());
5178     // Get return address and delta to inlined map check.
5179     __ mov(scratch, Operand(esp, 0 * kPointerSize));
5180     __ sub(scratch, Operand(esp, 1 * kPointerSize));
5181     if (FLAG_debug_code) {
5182       __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
5183       __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
5184       __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
5185       __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
5186     }
5187     __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
5188     __ mov(Operand(scratch, 0), map);
5189   }
5190 
5191   // Loop through the prototype chain of the object looking for the function
5192   // prototype.
5193   __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
5194   Label loop, is_instance, is_not_instance;
5195   __ bind(&loop);
5196   __ cmp(scratch, prototype);
5197   __ j(equal, &is_instance, Label::kNear);
5198   Factory* factory = masm->isolate()->factory();
5199   __ cmp(scratch, Immediate(factory->null_value()));
5200   __ j(equal, &is_not_instance, Label::kNear);
5201   __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5202   __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
5203   __ jmp(&loop);
5204 
5205   __ bind(&is_instance);
5206   if (!HasCallSiteInlineCheck()) {
5207     __ Set(eax, Immediate(0));
5208     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5209     __ mov(Operand::StaticArray(scratch,
5210                                 times_pointer_size, roots_array_start), eax);
5211   } else {
5212     // Get return address and delta to inlined map check.
5213     __ mov(eax, factory->true_value());
5214     __ mov(scratch, Operand(esp, 0 * kPointerSize));
5215     __ sub(scratch, Operand(esp, 1 * kPointerSize));
5216     if (FLAG_debug_code) {
5217       __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5218       __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5219     }
5220     __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5221     if (!ReturnTrueFalseObject()) {
5222       __ Set(eax, Immediate(0));
5223     }
5224   }
5225   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5226 
5227   __ bind(&is_not_instance);
5228   if (!HasCallSiteInlineCheck()) {
5229     __ Set(eax, Immediate(Smi::FromInt(1)));
5230     __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5231     __ mov(Operand::StaticArray(
5232         scratch, times_pointer_size, roots_array_start), eax);
5233   } else {
5234     // Get return address and delta to inlined map check.
5235     __ mov(eax, factory->false_value());
5236     __ mov(scratch, Operand(esp, 0 * kPointerSize));
5237     __ sub(scratch, Operand(esp, 1 * kPointerSize));
5238     if (FLAG_debug_code) {
5239       __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5240       __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5241     }
5242     __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5243     if (!ReturnTrueFalseObject()) {
5244       __ Set(eax, Immediate(Smi::FromInt(1)));
5245     }
5246   }
5247   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5248 
5249   Label object_not_null, object_not_null_or_smi;
5250   __ bind(&not_js_object);
5251   // Before null, smi and string value checks, check that the rhs is a function
5252   // as for a non-function rhs an exception needs to be thrown.
5253   __ JumpIfSmi(function, &slow, Label::kNear);
5254   __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
5255   __ j(not_equal, &slow, Label::kNear);
5256 
5257   // Null is not instance of anything.
5258   __ cmp(object, factory->null_value());
5259   __ j(not_equal, &object_not_null, Label::kNear);
5260   __ Set(eax, Immediate(Smi::FromInt(1)));
5261   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5262 
5263   __ bind(&object_not_null);
5264   // Smi values is not instance of anything.
5265   __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
5266   __ Set(eax, Immediate(Smi::FromInt(1)));
5267   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5268 
5269   __ bind(&object_not_null_or_smi);
5270   // String values is not instance of anything.
5271   Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
5272   __ j(NegateCondition(is_string), &slow, Label::kNear);
5273   __ Set(eax, Immediate(Smi::FromInt(1)));
5274   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5275 
5276   // Slow-case: Go through the JavaScript implementation.
5277   __ bind(&slow);
5278   if (!ReturnTrueFalseObject()) {
5279     // Tail call the builtin which returns 0 or 1.
5280     if (HasArgsInRegisters()) {
5281       // Push arguments below return address.
5282       __ pop(scratch);
5283       __ push(object);
5284       __ push(function);
5285       __ push(scratch);
5286     }
5287     __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
5288   } else {
5289     // Call the builtin and convert 0/1 to true/false.
5290     {
5291       FrameScope scope(masm, StackFrame::INTERNAL);
5292       __ push(object);
5293       __ push(function);
5294       __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
5295     }
5296     Label true_value, done;
5297     __ test(eax, eax);
5298     __ j(zero, &true_value, Label::kNear);
5299     __ mov(eax, factory->false_value());
5300     __ jmp(&done, Label::kNear);
5301     __ bind(&true_value);
5302     __ mov(eax, factory->true_value());
5303     __ bind(&done);
5304     __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5305   }
5306 }
5307 
5308 
left()5309 Register InstanceofStub::left() { return eax; }
5310 
5311 
right()5312 Register InstanceofStub::right() { return edx; }
5313 
5314 
MinorKey()5315 int CompareStub::MinorKey() {
5316   // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5317   // stubs the never NaN NaN condition is only taken into account if the
5318   // condition is equals.
5319   ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
5320   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5321   return ConditionField::encode(static_cast<unsigned>(cc_))
5322          | RegisterField::encode(false)   // lhs_ and rhs_ are not used
5323          | StrictField::encode(strict_)
5324          | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
5325          | IncludeNumberCompareField::encode(include_number_compare_)
5326          | IncludeSmiCompareField::encode(include_smi_compare_);
5327 }
5328 
5329 
5330 // Unfortunately you have to run without snapshots to see most of these
5331 // names in the profile since most compare stubs end up in the snapshot.
PrintName(StringStream * stream)5332 void CompareStub::PrintName(StringStream* stream) {
5333   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5334   const char* cc_name;
5335   switch (cc_) {
5336     case less: cc_name = "LT"; break;
5337     case greater: cc_name = "GT"; break;
5338     case less_equal: cc_name = "LE"; break;
5339     case greater_equal: cc_name = "GE"; break;
5340     case equal: cc_name = "EQ"; break;
5341     case not_equal: cc_name = "NE"; break;
5342     default: cc_name = "UnknownCondition"; break;
5343   }
5344   bool is_equality = cc_ == equal || cc_ == not_equal;
5345   stream->Add("CompareStub_%s", cc_name);
5346   if (strict_ && is_equality) stream->Add("_STRICT");
5347   if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5348   if (!include_number_compare_) stream->Add("_NO_NUMBER");
5349   if (!include_smi_compare_) stream->Add("_NO_SMI");
5350 }
5351 
5352 
5353 // -------------------------------------------------------------------------
5354 // StringCharCodeAtGenerator
5355 
GenerateFast(MacroAssembler * masm)5356 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5357   // If the receiver is a smi trigger the non-string case.
5358   STATIC_ASSERT(kSmiTag == 0);
5359   __ JumpIfSmi(object_, receiver_not_string_);
5360 
5361   // Fetch the instance type of the receiver into result register.
5362   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5363   __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5364   // If the receiver is not a string trigger the non-string case.
5365   __ test(result_, Immediate(kIsNotStringMask));
5366   __ j(not_zero, receiver_not_string_);
5367 
5368   // If the index is non-smi trigger the non-smi case.
5369   STATIC_ASSERT(kSmiTag == 0);
5370   __ JumpIfNotSmi(index_, &index_not_smi_);
5371   __ bind(&got_smi_index_);
5372 
5373   // Check for index out of range.
5374   __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
5375   __ j(above_equal, index_out_of_range_);
5376 
5377   __ SmiUntag(index_);
5378 
5379   Factory* factory = masm->isolate()->factory();
5380   StringCharLoadGenerator::Generate(
5381       masm, factory, object_, index_, result_, &call_runtime_);
5382 
5383   __ SmiTag(result_);
5384   __ bind(&exit_);
5385 }
5386 
5387 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)5388 void StringCharCodeAtGenerator::GenerateSlow(
5389     MacroAssembler* masm,
5390     const RuntimeCallHelper& call_helper) {
5391   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5392 
5393   // Index is not a smi.
5394   __ bind(&index_not_smi_);
5395   // If index is a heap number, try converting it to an integer.
5396   __ CheckMap(index_,
5397               masm->isolate()->factory()->heap_number_map(),
5398               index_not_number_,
5399               DONT_DO_SMI_CHECK);
5400   call_helper.BeforeCall(masm);
5401   __ push(object_);
5402   __ push(index_);  // Consumed by runtime conversion function.
5403   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5404     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5405   } else {
5406     ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5407     // NumberToSmi discards numbers that are not exact integers.
5408     __ CallRuntime(Runtime::kNumberToSmi, 1);
5409   }
5410   if (!index_.is(eax)) {
5411     // Save the conversion result before the pop instructions below
5412     // have a chance to overwrite it.
5413     __ mov(index_, eax);
5414   }
5415   __ pop(object_);
5416   // Reload the instance type.
5417   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5418   __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5419   call_helper.AfterCall(masm);
5420   // If index is still not a smi, it must be out of range.
5421   STATIC_ASSERT(kSmiTag == 0);
5422   __ JumpIfNotSmi(index_, index_out_of_range_);
5423   // Otherwise, return to the fast path.
5424   __ jmp(&got_smi_index_);
5425 
5426   // Call runtime. We get here when the receiver is a string and the
5427   // index is a number, but the code of getting the actual character
5428   // is too complex (e.g., when the string needs to be flattened).
5429   __ bind(&call_runtime_);
5430   call_helper.BeforeCall(masm);
5431   __ push(object_);
5432   __ SmiTag(index_);
5433   __ push(index_);
5434   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5435   if (!result_.is(eax)) {
5436     __ mov(result_, eax);
5437   }
5438   call_helper.AfterCall(masm);
5439   __ jmp(&exit_);
5440 
5441   __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5442 }
5443 
5444 
5445 // -------------------------------------------------------------------------
5446 // StringCharFromCodeGenerator
5447 
GenerateFast(MacroAssembler * masm)5448 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5449   // Fast case of Heap::LookupSingleCharacterStringFromCode.
5450   STATIC_ASSERT(kSmiTag == 0);
5451   STATIC_ASSERT(kSmiShiftSize == 0);
5452   ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5453   __ test(code_,
5454           Immediate(kSmiTagMask |
5455                     ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5456   __ j(not_zero, &slow_case_);
5457 
5458   Factory* factory = masm->isolate()->factory();
5459   __ Set(result_, Immediate(factory->single_character_string_cache()));
5460   STATIC_ASSERT(kSmiTag == 0);
5461   STATIC_ASSERT(kSmiTagSize == 1);
5462   STATIC_ASSERT(kSmiShiftSize == 0);
5463   // At this point code register contains smi tagged ASCII char code.
5464   __ mov(result_, FieldOperand(result_,
5465                                code_, times_half_pointer_size,
5466                                FixedArray::kHeaderSize));
5467   __ cmp(result_, factory->undefined_value());
5468   __ j(equal, &slow_case_);
5469   __ bind(&exit_);
5470 }
5471 
5472 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)5473 void StringCharFromCodeGenerator::GenerateSlow(
5474     MacroAssembler* masm,
5475     const RuntimeCallHelper& call_helper) {
5476   __ Abort("Unexpected fallthrough to CharFromCode slow case");
5477 
5478   __ bind(&slow_case_);
5479   call_helper.BeforeCall(masm);
5480   __ push(code_);
5481   __ CallRuntime(Runtime::kCharFromCode, 1);
5482   if (!result_.is(eax)) {
5483     __ mov(result_, eax);
5484   }
5485   call_helper.AfterCall(masm);
5486   __ jmp(&exit_);
5487 
5488   __ Abort("Unexpected fallthrough from CharFromCode slow case");
5489 }
5490 
5491 
5492 // -------------------------------------------------------------------------
5493 // StringCharAtGenerator
5494 
GenerateFast(MacroAssembler * masm)5495 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5496   char_code_at_generator_.GenerateFast(masm);
5497   char_from_code_generator_.GenerateFast(masm);
5498 }
5499 
5500 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)5501 void StringCharAtGenerator::GenerateSlow(
5502     MacroAssembler* masm,
5503     const RuntimeCallHelper& call_helper) {
5504   char_code_at_generator_.GenerateSlow(masm, call_helper);
5505   char_from_code_generator_.GenerateSlow(masm, call_helper);
5506 }
5507 
5508 
Generate(MacroAssembler * masm)5509 void StringAddStub::Generate(MacroAssembler* masm) {
5510   Label call_runtime, call_builtin;
5511   Builtins::JavaScript builtin_id = Builtins::ADD;
5512 
5513   // Load the two arguments.
5514   __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
5515   __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
5516 
5517   // Make sure that both arguments are strings if not known in advance.
5518   if (flags_ == NO_STRING_ADD_FLAGS) {
5519     __ JumpIfSmi(eax, &call_runtime);
5520     __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
5521     __ j(above_equal, &call_runtime);
5522 
5523     // First argument is a a string, test second.
5524     __ JumpIfSmi(edx, &call_runtime);
5525     __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
5526     __ j(above_equal, &call_runtime);
5527   } else {
5528     // Here at least one of the arguments is definitely a string.
5529     // We convert the one that is not known to be a string.
5530     if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5531       ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5532       GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
5533                               &call_builtin);
5534       builtin_id = Builtins::STRING_ADD_RIGHT;
5535     } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5536       ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5537       GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
5538                               &call_builtin);
5539       builtin_id = Builtins::STRING_ADD_LEFT;
5540     }
5541   }
5542 
5543   // Both arguments are strings.
5544   // eax: first string
5545   // edx: second string
5546   // Check if either of the strings are empty. In that case return the other.
5547   Label second_not_zero_length, both_not_zero_length;
5548   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
5549   STATIC_ASSERT(kSmiTag == 0);
5550   __ test(ecx, ecx);
5551   __ j(not_zero, &second_not_zero_length, Label::kNear);
5552   // Second string is empty, result is first string which is already in eax.
5553   Counters* counters = masm->isolate()->counters();
5554   __ IncrementCounter(counters->string_add_native(), 1);
5555   __ ret(2 * kPointerSize);
5556   __ bind(&second_not_zero_length);
5557   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
5558   STATIC_ASSERT(kSmiTag == 0);
5559   __ test(ebx, ebx);
5560   __ j(not_zero, &both_not_zero_length, Label::kNear);
5561   // First string is empty, result is second string which is in edx.
5562   __ mov(eax, edx);
5563   __ IncrementCounter(counters->string_add_native(), 1);
5564   __ ret(2 * kPointerSize);
5565 
5566   // Both strings are non-empty.
5567   // eax: first string
5568   // ebx: length of first string as a smi
5569   // ecx: length of second string as a smi
5570   // edx: second string
5571   // Look at the length of the result of adding the two strings.
5572   Label string_add_flat_result, longer_than_two;
5573   __ bind(&both_not_zero_length);
5574   __ add(ebx, ecx);
5575   STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
5576   // Handle exceptionally long strings in the runtime system.
5577   __ j(overflow, &call_runtime);
5578   // Use the symbol table when adding two one character strings, as it
5579   // helps later optimizations to return a symbol here.
5580   __ cmp(ebx, Immediate(Smi::FromInt(2)));
5581   __ j(not_equal, &longer_than_two);
5582 
5583   // Check that both strings are non-external ASCII strings.
5584   __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
5585 
5586   // Get the two characters forming the new string.
5587   __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5588   __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5589 
5590   // Try to lookup two character string in symbol table. If it is not found
5591   // just allocate a new one.
5592   Label make_two_character_string, make_two_character_string_no_reload;
5593   StringHelper::GenerateTwoCharacterSymbolTableProbe(
5594       masm, ebx, ecx, eax, edx, edi,
5595       &make_two_character_string_no_reload, &make_two_character_string);
5596   __ IncrementCounter(counters->string_add_native(), 1);
5597   __ ret(2 * kPointerSize);
5598 
5599   // Allocate a two character string.
5600   __ bind(&make_two_character_string);
5601   // Reload the arguments.
5602   __ mov(eax, Operand(esp, 2 * kPointerSize));  // First argument.
5603   __ mov(edx, Operand(esp, 1 * kPointerSize));  // Second argument.
5604   // Get the two characters forming the new string.
5605   __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5606   __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5607   __ bind(&make_two_character_string_no_reload);
5608   __ IncrementCounter(counters->string_add_make_two_char(), 1);
5609   __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
5610   // Pack both characters in ebx.
5611   __ shl(ecx, kBitsPerByte);
5612   __ or_(ebx, ecx);
5613   // Set the characters in the new string.
5614   __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
5615   __ IncrementCounter(counters->string_add_native(), 1);
5616   __ ret(2 * kPointerSize);
5617 
5618   __ bind(&longer_than_two);
5619   // Check if resulting string will be flat.
5620   __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
5621   __ j(below, &string_add_flat_result);
5622 
5623   // If result is not supposed to be flat allocate a cons string object. If both
5624   // strings are ASCII the result is an ASCII cons string.
5625   Label non_ascii, allocated, ascii_data;
5626   __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
5627   __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
5628   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5629   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5630   __ and_(ecx, edi);
5631   STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5632   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5633   __ test(ecx, Immediate(kStringEncodingMask));
5634   __ j(zero, &non_ascii);
5635   __ bind(&ascii_data);
5636   // Allocate an ASCII cons string.
5637   __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
5638   __ bind(&allocated);
5639   // Fill the fields of the cons string.
5640   if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
5641   __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
5642   __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
5643          Immediate(String::kEmptyHashField));
5644   __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
5645   __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
5646   __ mov(eax, ecx);
5647   __ IncrementCounter(counters->string_add_native(), 1);
5648   __ ret(2 * kPointerSize);
5649   __ bind(&non_ascii);
5650   // At least one of the strings is two-byte. Check whether it happens
5651   // to contain only ASCII characters.
5652   // ecx: first instance type AND second instance type.
5653   // edi: second instance type.
5654   __ test(ecx, Immediate(kAsciiDataHintMask));
5655   __ j(not_zero, &ascii_data);
5656   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5657   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5658   __ xor_(edi, ecx);
5659   STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5660   __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
5661   __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
5662   __ j(equal, &ascii_data);
5663   // Allocate a two byte cons string.
5664   __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
5665   __ jmp(&allocated);
5666 
5667   // We cannot encounter sliced strings or cons strings here since:
5668   STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
5669   // Handle creating a flat result from either external or sequential strings.
5670   // Locate the first characters' locations.
5671   // eax: first string
5672   // ebx: length of resulting flat string as a smi
5673   // edx: second string
5674   Label first_prepared, second_prepared;
5675   Label first_is_sequential, second_is_sequential;
5676   __ bind(&string_add_flat_result);
5677   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5678   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5679   // ecx: instance type of first string
5680   STATIC_ASSERT(kSeqStringTag == 0);
5681   __ test_b(ecx, kStringRepresentationMask);
5682   __ j(zero, &first_is_sequential, Label::kNear);
5683   // Rule out short external string and load string resource.
5684   STATIC_ASSERT(kShortExternalStringTag != 0);
5685   __ test_b(ecx, kShortExternalStringMask);
5686   __ j(not_zero, &call_runtime);
5687   __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
5688   STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5689   __ jmp(&first_prepared, Label::kNear);
5690   __ bind(&first_is_sequential);
5691   __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5692   __ bind(&first_prepared);
5693 
5694   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5695   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5696   // Check whether both strings have same encoding.
5697   // edi: instance type of second string
5698   __ xor_(ecx, edi);
5699   __ test_b(ecx, kStringEncodingMask);
5700   __ j(not_zero, &call_runtime);
5701   STATIC_ASSERT(kSeqStringTag == 0);
5702   __ test_b(edi, kStringRepresentationMask);
5703   __ j(zero, &second_is_sequential, Label::kNear);
5704   // Rule out short external string and load string resource.
5705   STATIC_ASSERT(kShortExternalStringTag != 0);
5706   __ test_b(edi, kShortExternalStringMask);
5707   __ j(not_zero, &call_runtime);
5708   __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
5709   STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5710   __ jmp(&second_prepared, Label::kNear);
5711   __ bind(&second_is_sequential);
5712   __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5713   __ bind(&second_prepared);
5714 
5715   // Push the addresses of both strings' first characters onto the stack.
5716   __ push(edx);
5717   __ push(eax);
5718 
5719   Label non_ascii_string_add_flat_result, call_runtime_drop_two;
5720   // edi: instance type of second string
5721   // First string and second string have the same encoding.
5722   STATIC_ASSERT(kTwoByteStringTag == 0);
5723   __ test_b(edi, kStringEncodingMask);
5724   __ j(zero, &non_ascii_string_add_flat_result);
5725 
5726   // Both strings are ASCII strings.
5727   // ebx: length of resulting flat string as a smi
5728   __ SmiUntag(ebx);
5729   __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
5730   // eax: result string
5731   __ mov(ecx, eax);
5732   // Locate first character of result.
5733   __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5734   // Load first argument's length and first character location.  Account for
5735   // values currently on the stack when fetching arguments from it.
5736   __ mov(edx, Operand(esp, 4 * kPointerSize));
5737   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5738   __ SmiUntag(edi);
5739   __ pop(edx);
5740   // eax: result string
5741   // ecx: first character of result
5742   // edx: first char of first argument
5743   // edi: length of first argument
5744   StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5745   // Load second argument's length and first character location.  Account for
5746   // values currently on the stack when fetching arguments from it.
5747   __ mov(edx, Operand(esp, 2 * kPointerSize));
5748   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5749   __ SmiUntag(edi);
5750   __ pop(edx);
5751   // eax: result string
5752   // ecx: next character of result
5753   // edx: first char of second argument
5754   // edi: length of second argument
5755   StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5756   __ IncrementCounter(counters->string_add_native(), 1);
5757   __ ret(2 * kPointerSize);
5758 
5759   // Handle creating a flat two byte result.
5760   // eax: first string - known to be two byte
5761   // ebx: length of resulting flat string as a smi
5762   // edx: second string
5763   __ bind(&non_ascii_string_add_flat_result);
5764   // Both strings are two byte strings.
5765   __ SmiUntag(ebx);
5766   __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
5767   // eax: result string
5768   __ mov(ecx, eax);
5769   // Locate first character of result.
5770   __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5771   // Load second argument's length and first character location.  Account for
5772   // values currently on the stack when fetching arguments from it.
5773   __ mov(edx, Operand(esp, 4 * kPointerSize));
5774   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5775   __ SmiUntag(edi);
5776   __ pop(edx);
5777   // eax: result string
5778   // ecx: first character of result
5779   // edx: first char of first argument
5780   // edi: length of first argument
5781   StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5782   // Load second argument's length and first character location.  Account for
5783   // values currently on the stack when fetching arguments from it.
5784   __ mov(edx, Operand(esp, 2 * kPointerSize));
5785   __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5786   __ SmiUntag(edi);
5787   __ pop(edx);
5788   // eax: result string
5789   // ecx: next character of result
5790   // edx: first char of second argument
5791   // edi: length of second argument
5792   StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5793   __ IncrementCounter(counters->string_add_native(), 1);
5794   __ ret(2 * kPointerSize);
5795 
5796   // Recover stack pointer before jumping to runtime.
5797   __ bind(&call_runtime_drop_two);
5798   __ Drop(2);
5799   // Just jump to runtime to add the two strings.
5800   __ bind(&call_runtime);
5801   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5802 
5803   if (call_builtin.is_linked()) {
5804     __ bind(&call_builtin);
5805     __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5806   }
5807 }
5808 
5809 
GenerateConvertArgument(MacroAssembler * masm,int stack_offset,Register arg,Register scratch1,Register scratch2,Register scratch3,Label * slow)5810 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5811                                             int stack_offset,
5812                                             Register arg,
5813                                             Register scratch1,
5814                                             Register scratch2,
5815                                             Register scratch3,
5816                                             Label* slow) {
5817   // First check if the argument is already a string.
5818   Label not_string, done;
5819   __ JumpIfSmi(arg, &not_string);
5820   __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
5821   __ j(below, &done);
5822 
5823   // Check the number to string cache.
5824   Label not_cached;
5825   __ bind(&not_string);
5826   // Puts the cached result into scratch1.
5827   NumberToStringStub::GenerateLookupNumberStringCache(masm,
5828                                                       arg,
5829                                                       scratch1,
5830                                                       scratch2,
5831                                                       scratch3,
5832                                                       false,
5833                                                       &not_cached);
5834   __ mov(arg, scratch1);
5835   __ mov(Operand(esp, stack_offset), arg);
5836   __ jmp(&done);
5837 
5838   // Check if the argument is a safe string wrapper.
5839   __ bind(&not_cached);
5840   __ JumpIfSmi(arg, slow);
5841   __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
5842   __ j(not_equal, slow);
5843   __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
5844             1 << Map::kStringWrapperSafeForDefaultValueOf);
5845   __ j(zero, slow);
5846   __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
5847   __ mov(Operand(esp, stack_offset), arg);
5848 
5849   __ bind(&done);
5850 }
5851 
5852 
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,bool ascii)5853 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5854                                           Register dest,
5855                                           Register src,
5856                                           Register count,
5857                                           Register scratch,
5858                                           bool ascii) {
5859   Label loop;
5860   __ bind(&loop);
5861   // This loop just copies one character at a time, as it is only used for very
5862   // short strings.
5863   if (ascii) {
5864     __ mov_b(scratch, Operand(src, 0));
5865     __ mov_b(Operand(dest, 0), scratch);
5866     __ add(src, Immediate(1));
5867     __ add(dest, Immediate(1));
5868   } else {
5869     __ mov_w(scratch, Operand(src, 0));
5870     __ mov_w(Operand(dest, 0), scratch);
5871     __ add(src, Immediate(2));
5872     __ add(dest, Immediate(2));
5873   }
5874   __ sub(count, Immediate(1));
5875   __ j(not_zero, &loop);
5876 }
5877 
5878 
GenerateCopyCharactersREP(MacroAssembler * masm,Register dest,Register src,Register count,Register scratch,bool ascii)5879 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
5880                                              Register dest,
5881                                              Register src,
5882                                              Register count,
5883                                              Register scratch,
5884                                              bool ascii) {
5885   // Copy characters using rep movs of doublewords.
5886   // The destination is aligned on a 4 byte boundary because we are
5887   // copying to the beginning of a newly allocated string.
5888   ASSERT(dest.is(edi));  // rep movs destination
5889   ASSERT(src.is(esi));  // rep movs source
5890   ASSERT(count.is(ecx));  // rep movs count
5891   ASSERT(!scratch.is(dest));
5892   ASSERT(!scratch.is(src));
5893   ASSERT(!scratch.is(count));
5894 
5895   // Nothing to do for zero characters.
5896   Label done;
5897   __ test(count, count);
5898   __ j(zero, &done);
5899 
5900   // Make count the number of bytes to copy.
5901   if (!ascii) {
5902     __ shl(count, 1);
5903   }
5904 
5905   // Don't enter the rep movs if there are less than 4 bytes to copy.
5906   Label last_bytes;
5907   __ test(count, Immediate(~3));
5908   __ j(zero, &last_bytes, Label::kNear);
5909 
5910   // Copy from edi to esi using rep movs instruction.
5911   __ mov(scratch, count);
5912   __ sar(count, 2);  // Number of doublewords to copy.
5913   __ cld();
5914   __ rep_movs();
5915 
5916   // Find number of bytes left.
5917   __ mov(count, scratch);
5918   __ and_(count, 3);
5919 
5920   // Check if there are more bytes to copy.
5921   __ bind(&last_bytes);
5922   __ test(count, count);
5923   __ j(zero, &done);
5924 
5925   // Copy remaining characters.
5926   Label loop;
5927   __ bind(&loop);
5928   __ mov_b(scratch, Operand(src, 0));
5929   __ mov_b(Operand(dest, 0), scratch);
5930   __ add(src, Immediate(1));
5931   __ add(dest, Immediate(1));
5932   __ sub(count, Immediate(1));
5933   __ j(not_zero, &loop);
5934 
5935   __ bind(&done);
5936 }
5937 
5938 
GenerateTwoCharacterSymbolTableProbe(MacroAssembler * masm,Register c1,Register c2,Register scratch1,Register scratch2,Register scratch3,Label * not_probed,Label * not_found)5939 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5940                                                         Register c1,
5941                                                         Register c2,
5942                                                         Register scratch1,
5943                                                         Register scratch2,
5944                                                         Register scratch3,
5945                                                         Label* not_probed,
5946                                                         Label* not_found) {
5947   // Register scratch3 is the general scratch register in this function.
5948   Register scratch = scratch3;
5949 
5950   // Make sure that both characters are not digits as such strings has a
5951   // different hash algorithm. Don't try to look for these in the symbol table.
5952   Label not_array_index;
5953   __ mov(scratch, c1);
5954   __ sub(scratch, Immediate(static_cast<int>('0')));
5955   __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
5956   __ j(above, &not_array_index, Label::kNear);
5957   __ mov(scratch, c2);
5958   __ sub(scratch, Immediate(static_cast<int>('0')));
5959   __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
5960   __ j(below_equal, not_probed);
5961 
5962   __ bind(&not_array_index);
5963   // Calculate the two character string hash.
5964   Register hash = scratch1;
5965   GenerateHashInit(masm, hash, c1, scratch);
5966   GenerateHashAddCharacter(masm, hash, c2, scratch);
5967   GenerateHashGetHash(masm, hash, scratch);
5968 
5969   // Collect the two characters in a register.
5970   Register chars = c1;
5971   __ shl(c2, kBitsPerByte);
5972   __ or_(chars, c2);
5973 
5974   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5975   // hash:  hash of two character string.
5976 
5977   // Load the symbol table.
5978   Register symbol_table = c2;
5979   ExternalReference roots_array_start =
5980       ExternalReference::roots_array_start(masm->isolate());
5981   __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
5982   __ mov(symbol_table,
5983          Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
5984 
5985   // Calculate capacity mask from the symbol table capacity.
5986   Register mask = scratch2;
5987   __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
5988   __ SmiUntag(mask);
5989   __ sub(mask, Immediate(1));
5990 
5991   // Registers
5992   // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
5993   // hash:         hash of two character string
5994   // symbol_table: symbol table
5995   // mask:         capacity mask
5996   // scratch:      -
5997 
5998   // Perform a number of probes in the symbol table.
5999   static const int kProbes = 4;
6000   Label found_in_symbol_table;
6001   Label next_probe[kProbes], next_probe_pop_mask[kProbes];
6002   Register candidate = scratch;  // Scratch register contains candidate.
6003   for (int i = 0; i < kProbes; i++) {
6004     // Calculate entry in symbol table.
6005     __ mov(scratch, hash);
6006     if (i > 0) {
6007       __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
6008     }
6009     __ and_(scratch, mask);
6010 
6011     // Load the entry from the symbol table.
6012     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
6013     __ mov(candidate,
6014            FieldOperand(symbol_table,
6015                         scratch,
6016                         times_pointer_size,
6017                         SymbolTable::kElementsStartOffset));
6018 
6019     // If entry is undefined no string with this hash can be found.
6020     Factory* factory = masm->isolate()->factory();
6021     __ cmp(candidate, factory->undefined_value());
6022     __ j(equal, not_found);
6023     __ cmp(candidate, factory->the_hole_value());
6024     __ j(equal, &next_probe[i]);
6025 
6026     // If length is not 2 the string is not a candidate.
6027     __ cmp(FieldOperand(candidate, String::kLengthOffset),
6028            Immediate(Smi::FromInt(2)));
6029     __ j(not_equal, &next_probe[i]);
6030 
6031     // As we are out of registers save the mask on the stack and use that
6032     // register as a temporary.
6033     __ push(mask);
6034     Register temp = mask;
6035 
6036     // Check that the candidate is a non-external ASCII string.
6037     __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
6038     __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
6039     __ JumpIfInstanceTypeIsNotSequentialAscii(
6040         temp, temp, &next_probe_pop_mask[i]);
6041 
6042     // Check if the two characters match.
6043     __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
6044     __ and_(temp, 0x0000ffff);
6045     __ cmp(chars, temp);
6046     __ j(equal, &found_in_symbol_table);
6047     __ bind(&next_probe_pop_mask[i]);
6048     __ pop(mask);
6049     __ bind(&next_probe[i]);
6050   }
6051 
6052   // No matching 2 character string found by probing.
6053   __ jmp(not_found);
6054 
6055   // Scratch register contains result when we fall through to here.
6056   Register result = candidate;
6057   __ bind(&found_in_symbol_table);
6058   __ pop(mask);  // Pop saved mask from the stack.
6059   if (!result.is(eax)) {
6060     __ mov(eax, result);
6061   }
6062 }
6063 
6064 
GenerateHashInit(MacroAssembler * masm,Register hash,Register character,Register scratch)6065 void StringHelper::GenerateHashInit(MacroAssembler* masm,
6066                                     Register hash,
6067                                     Register character,
6068                                     Register scratch) {
6069   // hash = (seed + character) + ((seed + character) << 10);
6070   if (Serializer::enabled()) {
6071     ExternalReference roots_array_start =
6072         ExternalReference::roots_array_start(masm->isolate());
6073     __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
6074     __ mov(scratch, Operand::StaticArray(scratch,
6075                                          times_pointer_size,
6076                                          roots_array_start));
6077     __ SmiUntag(scratch);
6078     __ add(scratch, character);
6079     __ mov(hash, scratch);
6080     __ shl(scratch, 10);
6081     __ add(hash, scratch);
6082   } else {
6083     int32_t seed = masm->isolate()->heap()->HashSeed();
6084     __ lea(scratch, Operand(character, seed));
6085     __ shl(scratch, 10);
6086     __ lea(hash, Operand(scratch, character, times_1, seed));
6087   }
6088   // hash ^= hash >> 6;
6089   __ mov(scratch, hash);
6090   __ shr(scratch, 6);
6091   __ xor_(hash, scratch);
6092 }
6093 
6094 
GenerateHashAddCharacter(MacroAssembler * masm,Register hash,Register character,Register scratch)6095 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6096                                             Register hash,
6097                                             Register character,
6098                                             Register scratch) {
6099   // hash += character;
6100   __ add(hash, character);
6101   // hash += hash << 10;
6102   __ mov(scratch, hash);
6103   __ shl(scratch, 10);
6104   __ add(hash, scratch);
6105   // hash ^= hash >> 6;
6106   __ mov(scratch, hash);
6107   __ shr(scratch, 6);
6108   __ xor_(hash, scratch);
6109 }
6110 
6111 
GenerateHashGetHash(MacroAssembler * masm,Register hash,Register scratch)6112 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6113                                        Register hash,
6114                                        Register scratch) {
6115   // hash += hash << 3;
6116   __ mov(scratch, hash);
6117   __ shl(scratch, 3);
6118   __ add(hash, scratch);
6119   // hash ^= hash >> 11;
6120   __ mov(scratch, hash);
6121   __ shr(scratch, 11);
6122   __ xor_(hash, scratch);
6123   // hash += hash << 15;
6124   __ mov(scratch, hash);
6125   __ shl(scratch, 15);
6126   __ add(hash, scratch);
6127 
6128   __ and_(hash, String::kHashBitMask);
6129 
6130   // if (hash == 0) hash = 27;
6131   Label hash_not_zero;
6132   __ j(not_zero, &hash_not_zero, Label::kNear);
6133   __ mov(hash, Immediate(StringHasher::kZeroHash));
6134   __ bind(&hash_not_zero);
6135 }
6136 
6137 
Generate(MacroAssembler * masm)6138 void SubStringStub::Generate(MacroAssembler* masm) {
6139   Label runtime;
6140 
6141   // Stack frame on entry.
6142   //  esp[0]: return address
6143   //  esp[4]: to
6144   //  esp[8]: from
6145   //  esp[12]: string
6146 
6147   // Make sure first argument is a string.
6148   __ mov(eax, Operand(esp, 3 * kPointerSize));
6149   STATIC_ASSERT(kSmiTag == 0);
6150   __ JumpIfSmi(eax, &runtime);
6151   Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
6152   __ j(NegateCondition(is_string), &runtime);
6153 
6154   // eax: string
6155   // ebx: instance type
6156 
6157   // Calculate length of sub string using the smi values.
6158   __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
6159   __ JumpIfNotSmi(ecx, &runtime);
6160   __ mov(edx, Operand(esp, 2 * kPointerSize));  // From index.
6161   __ JumpIfNotSmi(edx, &runtime);
6162   __ sub(ecx, edx);
6163   __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
6164   Label not_original_string;
6165   __ j(not_equal, &not_original_string, Label::kNear);
6166   Counters* counters = masm->isolate()->counters();
6167   __ IncrementCounter(counters->sub_string_native(), 1);
6168   __ ret(3 * kPointerSize);
6169   __ bind(&not_original_string);
6170 
6171   // eax: string
6172   // ebx: instance type
6173   // ecx: sub string length (smi)
6174   // edx: from index (smi)
6175   // Deal with different string types: update the index if necessary
6176   // and put the underlying string into edi.
6177   Label underlying_unpacked, sliced_string, seq_or_external_string;
6178   // If the string is not indirect, it can only be sequential or external.
6179   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6180   STATIC_ASSERT(kIsIndirectStringMask != 0);
6181   __ test(ebx, Immediate(kIsIndirectStringMask));
6182   __ j(zero, &seq_or_external_string, Label::kNear);
6183 
6184   Factory* factory = masm->isolate()->factory();
6185   __ test(ebx, Immediate(kSlicedNotConsMask));
6186   __ j(not_zero, &sliced_string, Label::kNear);
6187   // Cons string.  Check whether it is flat, then fetch first part.
6188   // Flat cons strings have an empty second part.
6189   __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
6190          factory->empty_string());
6191   __ j(not_equal, &runtime);
6192   __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
6193   // Update instance type.
6194   __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
6195   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
6196   __ jmp(&underlying_unpacked, Label::kNear);
6197 
6198   __ bind(&sliced_string);
6199   // Sliced string.  Fetch parent and adjust start index by offset.
6200   __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
6201   __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
6202   // Update instance type.
6203   __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
6204   __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
6205   __ jmp(&underlying_unpacked, Label::kNear);
6206 
6207   __ bind(&seq_or_external_string);
6208   // Sequential or external string.  Just move string to the expected register.
6209   __ mov(edi, eax);
6210 
6211   __ bind(&underlying_unpacked);
6212 
6213   if (FLAG_string_slices) {
6214     Label copy_routine;
6215     // edi: underlying subject string
6216     // ebx: instance type of underlying subject string
6217     // edx: adjusted start index (smi)
6218     // ecx: length (smi)
6219     __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
6220     // Short slice.  Copy instead of slicing.
6221     __ j(less, &copy_routine);
6222     // Allocate new sliced string.  At this point we do not reload the instance
6223     // type including the string encoding because we simply rely on the info
6224     // provided by the original string.  It does not matter if the original
6225     // string's encoding is wrong because we always have to recheck encoding of
6226     // the newly created string's parent anyways due to externalized strings.
6227     Label two_byte_slice, set_slice_header;
6228     STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6229     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6230     __ test(ebx, Immediate(kStringEncodingMask));
6231     __ j(zero, &two_byte_slice, Label::kNear);
6232     __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
6233     __ jmp(&set_slice_header, Label::kNear);
6234     __ bind(&two_byte_slice);
6235     __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
6236     __ bind(&set_slice_header);
6237     __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
6238     __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
6239            Immediate(String::kEmptyHashField));
6240     __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
6241     __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
6242     __ IncrementCounter(counters->sub_string_native(), 1);
6243     __ ret(3 * kPointerSize);
6244 
6245     __ bind(&copy_routine);
6246   }
6247 
6248   // edi: underlying subject string
6249   // ebx: instance type of underlying subject string
6250   // edx: adjusted start index (smi)
6251   // ecx: length (smi)
6252   // The subject string can only be external or sequential string of either
6253   // encoding at this point.
6254   Label two_byte_sequential, runtime_drop_two, sequential_string;
6255   STATIC_ASSERT(kExternalStringTag != 0);
6256   STATIC_ASSERT(kSeqStringTag == 0);
6257   __ test_b(ebx, kExternalStringTag);
6258   __ j(zero, &sequential_string);
6259 
6260   // Handle external string.
6261   // Rule out short external strings.
6262   STATIC_CHECK(kShortExternalStringTag != 0);
6263   __ test_b(ebx, kShortExternalStringMask);
6264   __ j(not_zero, &runtime);
6265   __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
6266   // Move the pointer so that offset-wise, it looks like a sequential string.
6267   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
6268   __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6269 
6270   __ bind(&sequential_string);
6271   // Stash away (adjusted) index and (underlying) string.
6272   __ push(edx);
6273   __ push(edi);
6274   __ SmiUntag(ecx);
6275   STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6276   __ test_b(ebx, kStringEncodingMask);
6277   __ j(zero, &two_byte_sequential);
6278 
6279   // Sequential ASCII string.  Allocate the result.
6280   __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
6281 
6282   // eax: result string
6283   // ecx: result string length
6284   __ mov(edx, esi);  // esi used by following code.
6285   // Locate first character of result.
6286   __ mov(edi, eax);
6287   __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6288   // Load string argument and locate character of sub string start.
6289   __ pop(esi);
6290   __ pop(ebx);
6291   __ SmiUntag(ebx);
6292   __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
6293 
6294   // eax: result string
6295   // ecx: result length
6296   // edx: original value of esi
6297   // edi: first character of result
6298   // esi: character of sub string start
6299   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
6300   __ mov(esi, edx);  // Restore esi.
6301   __ IncrementCounter(counters->sub_string_native(), 1);
6302   __ ret(3 * kPointerSize);
6303 
6304   __ bind(&two_byte_sequential);
6305   // Sequential two-byte string.  Allocate the result.
6306   __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
6307 
6308   // eax: result string
6309   // ecx: result string length
6310   __ mov(edx, esi);  // esi used by following code.
6311   // Locate first character of result.
6312   __ mov(edi, eax);
6313   __ add(edi,
6314          Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6315   // Load string argument and locate character of sub string start.
6316   __ pop(esi);
6317   __ pop(ebx);
6318   // As from is a smi it is 2 times the value which matches the size of a two
6319   // byte character.
6320   STATIC_ASSERT(kSmiTag == 0);
6321   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
6322   __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
6323 
6324   // eax: result string
6325   // ecx: result length
6326   // edx: original value of esi
6327   // edi: first character of result
6328   // esi: character of sub string start
6329   StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
6330   __ mov(esi, edx);  // Restore esi.
6331   __ IncrementCounter(counters->sub_string_native(), 1);
6332   __ ret(3 * kPointerSize);
6333 
6334   // Drop pushed values on the stack before tail call.
6335   __ bind(&runtime_drop_two);
6336   __ Drop(2);
6337 
6338   // Just jump to runtime to create the sub string.
6339   __ bind(&runtime);
6340   __ TailCallRuntime(Runtime::kSubString, 3, 1);
6341 }
6342 
6343 
GenerateFlatAsciiStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2)6344 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6345                                                       Register left,
6346                                                       Register right,
6347                                                       Register scratch1,
6348                                                       Register scratch2) {
6349   Register length = scratch1;
6350 
6351   // Compare lengths.
6352   Label strings_not_equal, check_zero_length;
6353   __ mov(length, FieldOperand(left, String::kLengthOffset));
6354   __ cmp(length, FieldOperand(right, String::kLengthOffset));
6355   __ j(equal, &check_zero_length, Label::kNear);
6356   __ bind(&strings_not_equal);
6357   __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
6358   __ ret(0);
6359 
6360   // Check if the length is zero.
6361   Label compare_chars;
6362   __ bind(&check_zero_length);
6363   STATIC_ASSERT(kSmiTag == 0);
6364   __ test(length, length);
6365   __ j(not_zero, &compare_chars, Label::kNear);
6366   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6367   __ ret(0);
6368 
6369   // Compare characters.
6370   __ bind(&compare_chars);
6371   GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
6372                                 &strings_not_equal, Label::kNear);
6373 
6374   // Characters are equal.
6375   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6376   __ ret(0);
6377 }
6378 
6379 
GenerateCompareFlatAsciiStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3)6380 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6381                                                         Register left,
6382                                                         Register right,
6383                                                         Register scratch1,
6384                                                         Register scratch2,
6385                                                         Register scratch3) {
6386   Counters* counters = masm->isolate()->counters();
6387   __ IncrementCounter(counters->string_compare_native(), 1);
6388 
6389   // Find minimum length.
6390   Label left_shorter;
6391   __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
6392   __ mov(scratch3, scratch1);
6393   __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
6394 
6395   Register length_delta = scratch3;
6396 
6397   __ j(less_equal, &left_shorter, Label::kNear);
6398   // Right string is shorter. Change scratch1 to be length of right string.
6399   __ sub(scratch1, length_delta);
6400   __ bind(&left_shorter);
6401 
6402   Register min_length = scratch1;
6403 
6404   // If either length is zero, just compare lengths.
6405   Label compare_lengths;
6406   __ test(min_length, min_length);
6407   __ j(zero, &compare_lengths, Label::kNear);
6408 
6409   // Compare characters.
6410   Label result_not_equal;
6411   GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
6412                                 &result_not_equal, Label::kNear);
6413 
6414   // Compare lengths -  strings up to min-length are equal.
6415   __ bind(&compare_lengths);
6416   __ test(length_delta, length_delta);
6417   __ j(not_zero, &result_not_equal, Label::kNear);
6418 
6419   // Result is EQUAL.
6420   STATIC_ASSERT(EQUAL == 0);
6421   STATIC_ASSERT(kSmiTag == 0);
6422   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6423   __ ret(0);
6424 
6425   Label result_greater;
6426   __ bind(&result_not_equal);
6427   __ j(greater, &result_greater, Label::kNear);
6428 
6429   // Result is LESS.
6430   __ Set(eax, Immediate(Smi::FromInt(LESS)));
6431   __ ret(0);
6432 
6433   // Result is GREATER.
6434   __ bind(&result_greater);
6435   __ Set(eax, Immediate(Smi::FromInt(GREATER)));
6436   __ ret(0);
6437 }
6438 
6439 
GenerateAsciiCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch,Label * chars_not_equal,Label::Distance chars_not_equal_near)6440 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6441     MacroAssembler* masm,
6442     Register left,
6443     Register right,
6444     Register length,
6445     Register scratch,
6446     Label* chars_not_equal,
6447     Label::Distance chars_not_equal_near) {
6448   // Change index to run from -length to -1 by adding length to string
6449   // start. This means that loop ends when index reaches zero, which
6450   // doesn't need an additional compare.
6451   __ SmiUntag(length);
6452   __ lea(left,
6453          FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
6454   __ lea(right,
6455          FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
6456   __ neg(length);
6457   Register index = length;  // index = -length;
6458 
6459   // Compare loop.
6460   Label loop;
6461   __ bind(&loop);
6462   __ mov_b(scratch, Operand(left, index, times_1, 0));
6463   __ cmpb(scratch, Operand(right, index, times_1, 0));
6464   __ j(not_equal, chars_not_equal, chars_not_equal_near);
6465   __ inc(index);
6466   __ j(not_zero, &loop);
6467 }
6468 
6469 
Generate(MacroAssembler * masm)6470 void StringCompareStub::Generate(MacroAssembler* masm) {
6471   Label runtime;
6472 
6473   // Stack frame on entry.
6474   //  esp[0]: return address
6475   //  esp[4]: right string
6476   //  esp[8]: left string
6477 
6478   __ mov(edx, Operand(esp, 2 * kPointerSize));  // left
6479   __ mov(eax, Operand(esp, 1 * kPointerSize));  // right
6480 
6481   Label not_same;
6482   __ cmp(edx, eax);
6483   __ j(not_equal, &not_same, Label::kNear);
6484   STATIC_ASSERT(EQUAL == 0);
6485   STATIC_ASSERT(kSmiTag == 0);
6486   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6487   __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
6488   __ ret(2 * kPointerSize);
6489 
6490   __ bind(&not_same);
6491 
6492   // Check that both objects are sequential ASCII strings.
6493   __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
6494 
6495   // Compare flat ASCII strings.
6496   // Drop arguments from the stack.
6497   __ pop(ecx);
6498   __ add(esp, Immediate(2 * kPointerSize));
6499   __ push(ecx);
6500   GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
6501 
6502   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6503   // tagged as a small integer.
6504   __ bind(&runtime);
6505   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6506 }
6507 
6508 
GenerateSmis(MacroAssembler * masm)6509 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6510   ASSERT(state_ == CompareIC::SMIS);
6511   Label miss;
6512   __ mov(ecx, edx);
6513   __ or_(ecx, eax);
6514   __ JumpIfNotSmi(ecx, &miss, Label::kNear);
6515 
6516   if (GetCondition() == equal) {
6517     // For equality we do not care about the sign of the result.
6518     __ sub(eax, edx);
6519   } else {
6520     Label done;
6521     __ sub(edx, eax);
6522     __ j(no_overflow, &done, Label::kNear);
6523     // Correct sign of result in case of overflow.
6524     __ not_(edx);
6525     __ bind(&done);
6526     __ mov(eax, edx);
6527   }
6528   __ ret(0);
6529 
6530   __ bind(&miss);
6531   GenerateMiss(masm);
6532 }
6533 
6534 
GenerateHeapNumbers(MacroAssembler * masm)6535 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6536   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6537 
6538   Label generic_stub;
6539   Label unordered, maybe_undefined1, maybe_undefined2;
6540   Label miss;
6541   __ mov(ecx, edx);
6542   __ and_(ecx, eax);
6543   __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
6544 
6545   __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6546   __ j(not_equal, &maybe_undefined1, Label::kNear);
6547   __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6548   __ j(not_equal, &maybe_undefined2, Label::kNear);
6549 
6550   // Inlining the double comparison and falling back to the general compare
6551   // stub if NaN is involved or SS2 or CMOV is unsupported.
6552   if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
6553     CpuFeatures::Scope scope1(SSE2);
6554     CpuFeatures::Scope scope2(CMOV);
6555 
6556     // Load left and right operand
6557     __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6558     __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
6559 
6560     // Compare operands
6561     __ ucomisd(xmm0, xmm1);
6562 
6563     // Don't base result on EFLAGS when a NaN is involved.
6564     __ j(parity_even, &unordered, Label::kNear);
6565 
6566     // Return a result of -1, 0, or 1, based on EFLAGS.
6567     // Performing mov, because xor would destroy the flag register.
6568     __ mov(eax, 0);  // equal
6569     __ mov(ecx, Immediate(Smi::FromInt(1)));
6570     __ cmov(above, eax, ecx);
6571     __ mov(ecx, Immediate(Smi::FromInt(-1)));
6572     __ cmov(below, eax, ecx);
6573     __ ret(0);
6574   }
6575 
6576   __ bind(&unordered);
6577   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6578   __ bind(&generic_stub);
6579   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6580 
6581   __ bind(&maybe_undefined1);
6582   if (Token::IsOrderedRelationalCompareOp(op_)) {
6583     __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
6584     __ j(not_equal, &miss);
6585     __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6586     __ j(not_equal, &maybe_undefined2, Label::kNear);
6587     __ jmp(&unordered);
6588   }
6589 
6590   __ bind(&maybe_undefined2);
6591   if (Token::IsOrderedRelationalCompareOp(op_)) {
6592     __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
6593     __ j(equal, &unordered);
6594   }
6595 
6596   __ bind(&miss);
6597   GenerateMiss(masm);
6598 }
6599 
6600 
GenerateSymbols(MacroAssembler * masm)6601 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6602   ASSERT(state_ == CompareIC::SYMBOLS);
6603   ASSERT(GetCondition() == equal);
6604 
6605   // Registers containing left and right operands respectively.
6606   Register left = edx;
6607   Register right = eax;
6608   Register tmp1 = ecx;
6609   Register tmp2 = ebx;
6610 
6611   // Check that both operands are heap objects.
6612   Label miss;
6613   __ mov(tmp1, left);
6614   STATIC_ASSERT(kSmiTag == 0);
6615   __ and_(tmp1, right);
6616   __ JumpIfSmi(tmp1, &miss, Label::kNear);
6617 
6618   // Check that both operands are symbols.
6619   __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6620   __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6621   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6622   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6623   STATIC_ASSERT(kSymbolTag != 0);
6624   __ and_(tmp1, tmp2);
6625   __ test(tmp1, Immediate(kIsSymbolMask));
6626   __ j(zero, &miss, Label::kNear);
6627 
6628   // Symbols are compared by identity.
6629   Label done;
6630   __ cmp(left, right);
6631   // Make sure eax is non-zero. At this point input operands are
6632   // guaranteed to be non-zero.
6633   ASSERT(right.is(eax));
6634   __ j(not_equal, &done, Label::kNear);
6635   STATIC_ASSERT(EQUAL == 0);
6636   STATIC_ASSERT(kSmiTag == 0);
6637   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6638   __ bind(&done);
6639   __ ret(0);
6640 
6641   __ bind(&miss);
6642   GenerateMiss(masm);
6643 }
6644 
6645 
GenerateStrings(MacroAssembler * masm)6646 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6647   ASSERT(state_ == CompareIC::STRINGS);
6648   Label miss;
6649 
6650   bool equality = Token::IsEqualityOp(op_);
6651 
6652   // Registers containing left and right operands respectively.
6653   Register left = edx;
6654   Register right = eax;
6655   Register tmp1 = ecx;
6656   Register tmp2 = ebx;
6657   Register tmp3 = edi;
6658 
6659   // Check that both operands are heap objects.
6660   __ mov(tmp1, left);
6661   STATIC_ASSERT(kSmiTag == 0);
6662   __ and_(tmp1, right);
6663   __ JumpIfSmi(tmp1, &miss);
6664 
6665   // Check that both operands are strings. This leaves the instance
6666   // types loaded in tmp1 and tmp2.
6667   __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6668   __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6669   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6670   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6671   __ mov(tmp3, tmp1);
6672   STATIC_ASSERT(kNotStringTag != 0);
6673   __ or_(tmp3, tmp2);
6674   __ test(tmp3, Immediate(kIsNotStringMask));
6675   __ j(not_zero, &miss);
6676 
6677   // Fast check for identical strings.
6678   Label not_same;
6679   __ cmp(left, right);
6680   __ j(not_equal, &not_same, Label::kNear);
6681   STATIC_ASSERT(EQUAL == 0);
6682   STATIC_ASSERT(kSmiTag == 0);
6683   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6684   __ ret(0);
6685 
6686   // Handle not identical strings.
6687   __ bind(&not_same);
6688 
6689   // Check that both strings are symbols. If they are, we're done
6690   // because we already know they are not identical.  But in the case of
6691   // non-equality compare, we still need to determine the order.
6692   if (equality) {
6693     Label do_compare;
6694     STATIC_ASSERT(kSymbolTag != 0);
6695     __ and_(tmp1, tmp2);
6696     __ test(tmp1, Immediate(kIsSymbolMask));
6697     __ j(zero, &do_compare, Label::kNear);
6698     // Make sure eax is non-zero. At this point input operands are
6699     // guaranteed to be non-zero.
6700     ASSERT(right.is(eax));
6701     __ ret(0);
6702     __ bind(&do_compare);
6703   }
6704 
6705   // Check that both strings are sequential ASCII.
6706   Label runtime;
6707   __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
6708 
6709   // Compare flat ASCII strings. Returns when done.
6710   if (equality) {
6711     StringCompareStub::GenerateFlatAsciiStringEquals(
6712         masm, left, right, tmp1, tmp2);
6713   } else {
6714     StringCompareStub::GenerateCompareFlatAsciiStrings(
6715         masm, left, right, tmp1, tmp2, tmp3);
6716   }
6717 
6718   // Handle more complex cases in runtime.
6719   __ bind(&runtime);
6720   __ pop(tmp1);  // Return address.
6721   __ push(left);
6722   __ push(right);
6723   __ push(tmp1);
6724   if (equality) {
6725     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6726   } else {
6727     __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6728   }
6729 
6730   __ bind(&miss);
6731   GenerateMiss(masm);
6732 }
6733 
6734 
GenerateObjects(MacroAssembler * masm)6735 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6736   ASSERT(state_ == CompareIC::OBJECTS);
6737   Label miss;
6738   __ mov(ecx, edx);
6739   __ and_(ecx, eax);
6740   __ JumpIfSmi(ecx, &miss, Label::kNear);
6741 
6742   __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6743   __ j(not_equal, &miss, Label::kNear);
6744   __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6745   __ j(not_equal, &miss, Label::kNear);
6746 
6747   ASSERT(GetCondition() == equal);
6748   __ sub(eax, edx);
6749   __ ret(0);
6750 
6751   __ bind(&miss);
6752   GenerateMiss(masm);
6753 }
6754 
6755 
GenerateKnownObjects(MacroAssembler * masm)6756 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6757   Label miss;
6758   __ mov(ecx, edx);
6759   __ and_(ecx, eax);
6760   __ JumpIfSmi(ecx, &miss, Label::kNear);
6761 
6762   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
6763   __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
6764   __ cmp(ecx, known_map_);
6765   __ j(not_equal, &miss, Label::kNear);
6766   __ cmp(ebx, known_map_);
6767   __ j(not_equal, &miss, Label::kNear);
6768 
6769   __ sub(eax, edx);
6770   __ ret(0);
6771 
6772   __ bind(&miss);
6773   GenerateMiss(masm);
6774 }
6775 
6776 
GenerateMiss(MacroAssembler * masm)6777 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6778   {
6779     // Call the runtime system in a fresh internal frame.
6780     ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6781                                                masm->isolate());
6782     FrameScope scope(masm, StackFrame::INTERNAL);
6783     __ push(edx);  // Preserve edx and eax.
6784     __ push(eax);
6785     __ push(edx);  // And also use them as the arguments.
6786     __ push(eax);
6787     __ push(Immediate(Smi::FromInt(op_)));
6788     __ CallExternalReference(miss, 3);
6789     // Compute the entry point of the rewritten stub.
6790     __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
6791     __ pop(eax);
6792     __ pop(edx);
6793   }
6794 
6795   // Do a tail call to the rewritten stub.
6796   __ jmp(edi);
6797 }
6798 
6799 
6800 // Helper function used to check that the dictionary doesn't contain
6801 // the property. This function may return false negatives, so miss_label
6802 // must always call a backup property check that is complete.
6803 // This function is safe to call if the receiver has fast properties.
6804 // Name must be a symbol and receiver must be a heap object.
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register properties,Handle<String> name,Register r0)6805 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6806                                                         Label* miss,
6807                                                         Label* done,
6808                                                         Register properties,
6809                                                         Handle<String> name,
6810                                                         Register r0) {
6811   ASSERT(name->IsSymbol());
6812 
6813   // If names of slots in range from 1 to kProbes - 1 for the hash value are
6814   // not equal to the name and kProbes-th slot is not used (its name is the
6815   // undefined value), it guarantees the hash table doesn't contain the
6816   // property. It's true even if some slots represent deleted properties
6817   // (their names are the hole value).
6818   for (int i = 0; i < kInlinedProbes; i++) {
6819     // Compute the masked index: (hash + i + i * i) & mask.
6820     Register index = r0;
6821     // Capacity is smi 2^n.
6822     __ mov(index, FieldOperand(properties, kCapacityOffset));
6823     __ dec(index);
6824     __ and_(index,
6825             Immediate(Smi::FromInt(name->Hash() +
6826                                    StringDictionary::GetProbeOffset(i))));
6827 
6828     // Scale the index by multiplying by the entry size.
6829     ASSERT(StringDictionary::kEntrySize == 3);
6830     __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
6831     Register entity_name = r0;
6832     // Having undefined at this place means the name is not contained.
6833     ASSERT_EQ(kSmiTagSize, 1);
6834     __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
6835                                 kElementsStartOffset - kHeapObjectTag));
6836     __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
6837     __ j(equal, done);
6838 
6839     // Stop if found the property.
6840     __ cmp(entity_name, Handle<String>(name));
6841     __ j(equal, miss);
6842 
6843     Label the_hole;
6844     // Check for the hole and skip.
6845     __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
6846     __ j(equal, &the_hole, Label::kNear);
6847 
6848     // Check if the entry name is not a symbol.
6849     __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
6850     __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
6851               kIsSymbolMask);
6852     __ j(zero, miss);
6853     __ bind(&the_hole);
6854   }
6855 
6856   StringDictionaryLookupStub stub(properties,
6857                                   r0,
6858                                   r0,
6859                                   StringDictionaryLookupStub::NEGATIVE_LOOKUP);
6860   __ push(Immediate(Handle<Object>(name)));
6861   __ push(Immediate(name->Hash()));
6862   __ CallStub(&stub);
6863   __ test(r0, r0);
6864   __ j(not_zero, miss);
6865   __ jmp(done);
6866 }
6867 
6868 
6869 // Probe the string dictionary in the |elements| register. Jump to the
6870 // |done| label if a property with the given name is found leaving the
6871 // index into the dictionary in |r0|. Jump to the |miss| label
6872 // otherwise.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register r0,Register r1)6873 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6874                                                         Label* miss,
6875                                                         Label* done,
6876                                                         Register elements,
6877                                                         Register name,
6878                                                         Register r0,
6879                                                         Register r1) {
6880   ASSERT(!elements.is(r0));
6881   ASSERT(!elements.is(r1));
6882   ASSERT(!name.is(r0));
6883   ASSERT(!name.is(r1));
6884 
6885   // Assert that name contains a string.
6886   if (FLAG_debug_code) __ AbortIfNotString(name);
6887 
6888   __ mov(r1, FieldOperand(elements, kCapacityOffset));
6889   __ shr(r1, kSmiTagSize);  // convert smi to int
6890   __ dec(r1);
6891 
6892   // Generate an unrolled loop that performs a few probes before
6893   // giving up. Measurements done on Gmail indicate that 2 probes
6894   // cover ~93% of loads from dictionaries.
6895   for (int i = 0; i < kInlinedProbes; i++) {
6896     // Compute the masked index: (hash + i + i * i) & mask.
6897     __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6898     __ shr(r0, String::kHashShift);
6899     if (i > 0) {
6900       __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
6901     }
6902     __ and_(r0, r1);
6903 
6904     // Scale the index by multiplying by the entry size.
6905     ASSERT(StringDictionary::kEntrySize == 3);
6906     __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
6907 
6908     // Check if the key is identical to the name.
6909     __ cmp(name, Operand(elements,
6910                          r0,
6911                          times_4,
6912                          kElementsStartOffset - kHeapObjectTag));
6913     __ j(equal, done);
6914   }
6915 
6916   StringDictionaryLookupStub stub(elements,
6917                                   r1,
6918                                   r0,
6919                                   POSITIVE_LOOKUP);
6920   __ push(name);
6921   __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6922   __ shr(r0, String::kHashShift);
6923   __ push(r0);
6924   __ CallStub(&stub);
6925 
6926   __ test(r1, r1);
6927   __ j(zero, miss);
6928   __ jmp(done);
6929 }
6930 
6931 
Generate(MacroAssembler * masm)6932 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6933   // This stub overrides SometimesSetsUpAFrame() to return false.  That means
6934   // we cannot call anything that could cause a GC from this stub.
6935   // Stack frame on entry:
6936   //  esp[0 * kPointerSize]: return address.
6937   //  esp[1 * kPointerSize]: key's hash.
6938   //  esp[2 * kPointerSize]: key.
6939   // Registers:
6940   //  dictionary_: StringDictionary to probe.
6941   //  result_: used as scratch.
6942   //  index_: will hold an index of entry if lookup is successful.
6943   //          might alias with result_.
6944   // Returns:
6945   //  result_ is zero if lookup failed, non zero otherwise.
6946 
6947   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6948 
6949   Register scratch = result_;
6950 
6951   __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
6952   __ dec(scratch);
6953   __ SmiUntag(scratch);
6954   __ push(scratch);
6955 
6956   // If names of slots in range from 1 to kProbes - 1 for the hash value are
6957   // not equal to the name and kProbes-th slot is not used (its name is the
6958   // undefined value), it guarantees the hash table doesn't contain the
6959   // property. It's true even if some slots represent deleted properties
6960   // (their names are the null value).
6961   for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6962     // Compute the masked index: (hash + i + i * i) & mask.
6963     __ mov(scratch, Operand(esp, 2 * kPointerSize));
6964     if (i > 0) {
6965       __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
6966     }
6967     __ and_(scratch, Operand(esp, 0));
6968 
6969     // Scale the index by multiplying by the entry size.
6970     ASSERT(StringDictionary::kEntrySize == 3);
6971     __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
6972 
6973     // Having undefined at this place means the name is not contained.
6974     ASSERT_EQ(kSmiTagSize, 1);
6975     __ mov(scratch, Operand(dictionary_,
6976                             index_,
6977                             times_pointer_size,
6978                             kElementsStartOffset - kHeapObjectTag));
6979     __ cmp(scratch, masm->isolate()->factory()->undefined_value());
6980     __ j(equal, &not_in_dictionary);
6981 
6982     // Stop if found the property.
6983     __ cmp(scratch, Operand(esp, 3 * kPointerSize));
6984     __ j(equal, &in_dictionary);
6985 
6986     if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6987       // If we hit a non symbol key during negative lookup
6988       // we have to bailout as this key might be equal to the
6989       // key we are looking for.
6990 
6991       // Check if the entry name is not a symbol.
6992       __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
6993       __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
6994                 kIsSymbolMask);
6995       __ j(zero, &maybe_in_dictionary);
6996     }
6997   }
6998 
6999   __ bind(&maybe_in_dictionary);
7000   // If we are doing negative lookup then probing failure should be
7001   // treated as a lookup success. For positive lookup probing failure
7002   // should be treated as lookup failure.
7003   if (mode_ == POSITIVE_LOOKUP) {
7004     __ mov(result_, Immediate(0));
7005     __ Drop(1);
7006     __ ret(2 * kPointerSize);
7007   }
7008 
7009   __ bind(&in_dictionary);
7010   __ mov(result_, Immediate(1));
7011   __ Drop(1);
7012   __ ret(2 * kPointerSize);
7013 
7014   __ bind(&not_in_dictionary);
7015   __ mov(result_, Immediate(0));
7016   __ Drop(1);
7017   __ ret(2 * kPointerSize);
7018 }
7019 
7020 
7021 struct AheadOfTimeWriteBarrierStubList {
7022   Register object, value, address;
7023   RememberedSetAction action;
7024 };
7025 
7026 
7027 #define REG(Name) { kRegister_ ## Name ## _Code }
7028 
7029 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7030   // Used in RegExpExecStub.
7031   { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
7032   // Used in CompileArrayPushCall.
7033   { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
7034   { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
7035   // Used in CompileStoreGlobal and CallFunctionStub.
7036   { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
7037   // Used in StoreStubCompiler::CompileStoreField and
7038   // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7039   { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
7040   // GenerateStoreField calls the stub with two different permutations of
7041   // registers.  This is the second.
7042   { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
7043   // StoreIC::GenerateNormal via GenerateDictionaryStore
7044   { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
7045   // KeyedStoreIC::GenerateGeneric.
7046   { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
7047   // KeyedStoreStubCompiler::GenerateStoreFastElement.
7048   { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
7049   { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
7050   // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7051   // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7052   // and ElementsTransitionGenerator::GenerateDoubleToObject
7053   { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
7054   { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
7055   // ElementsTransitionGenerator::GenerateDoubleToObject
7056   { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
7057   { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
7058   // StoreArrayLiteralElementStub::Generate
7059   { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
7060   // Null termination.
7061   { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7062 };
7063 
7064 #undef REG
7065 
IsPregenerated()7066 bool RecordWriteStub::IsPregenerated() {
7067   for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7068        !entry->object.is(no_reg);
7069        entry++) {
7070     if (object_.is(entry->object) &&
7071         value_.is(entry->value) &&
7072         address_.is(entry->address) &&
7073         remembered_set_action_ == entry->action &&
7074         save_fp_regs_mode_ == kDontSaveFPRegs) {
7075       return true;
7076     }
7077   }
7078   return false;
7079 }
7080 
7081 
GenerateFixedRegStubsAheadOfTime()7082 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7083   StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7084   stub1.GetCode()->set_is_pregenerated(true);
7085 
7086   CpuFeatures::TryForceFeatureScope scope(SSE2);
7087   if (CpuFeatures::IsSupported(SSE2)) {
7088     StoreBufferOverflowStub stub2(kSaveFPRegs);
7089     stub2.GetCode()->set_is_pregenerated(true);
7090   }
7091 }
7092 
7093 
GenerateFixedRegStubsAheadOfTime()7094 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7095   for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7096        !entry->object.is(no_reg);
7097        entry++) {
7098     RecordWriteStub stub(entry->object,
7099                          entry->value,
7100                          entry->address,
7101                          entry->action,
7102                          kDontSaveFPRegs);
7103     stub.GetCode()->set_is_pregenerated(true);
7104   }
7105 }
7106 
7107 
7108 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to
7109 // the value has just been written into the object, now this stub makes sure
7110 // we keep the GC informed.  The word in the object where the value has been
7111 // written is in the address register.
Generate(MacroAssembler * masm)7112 void RecordWriteStub::Generate(MacroAssembler* masm) {
7113   Label skip_to_incremental_noncompacting;
7114   Label skip_to_incremental_compacting;
7115 
7116   // The first two instructions are generated with labels so as to get the
7117   // offset fixed up correctly by the bind(Label*) call.  We patch it back and
7118   // forth between a compare instructions (a nop in this position) and the
7119   // real branch when we start and stop incremental heap marking.
7120   __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
7121   __ jmp(&skip_to_incremental_compacting, Label::kFar);
7122 
7123   if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7124     __ RememberedSetHelper(object_,
7125                            address_,
7126                            value_,
7127                            save_fp_regs_mode_,
7128                            MacroAssembler::kReturnAtEnd);
7129   } else {
7130     __ ret(0);
7131   }
7132 
7133   __ bind(&skip_to_incremental_noncompacting);
7134   GenerateIncremental(masm, INCREMENTAL);
7135 
7136   __ bind(&skip_to_incremental_compacting);
7137   GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7138 
7139   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7140   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7141   masm->set_byte_at(0, kTwoByteNopInstruction);
7142   masm->set_byte_at(2, kFiveByteNopInstruction);
7143 }
7144 
7145 
GenerateIncremental(MacroAssembler * masm,Mode mode)7146 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7147   regs_.Save(masm);
7148 
7149   if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7150     Label dont_need_remembered_set;
7151 
7152     __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7153     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
7154                            regs_.scratch0(),
7155                            &dont_need_remembered_set);
7156 
7157     __ CheckPageFlag(regs_.object(),
7158                      regs_.scratch0(),
7159                      1 << MemoryChunk::SCAN_ON_SCAVENGE,
7160                      not_zero,
7161                      &dont_need_remembered_set);
7162 
7163     // First notify the incremental marker if necessary, then update the
7164     // remembered set.
7165     CheckNeedsToInformIncrementalMarker(
7166         masm,
7167         kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
7168         mode);
7169     InformIncrementalMarker(masm, mode);
7170     regs_.Restore(masm);
7171     __ RememberedSetHelper(object_,
7172                            address_,
7173                            value_,
7174                            save_fp_regs_mode_,
7175                            MacroAssembler::kReturnAtEnd);
7176 
7177     __ bind(&dont_need_remembered_set);
7178   }
7179 
7180   CheckNeedsToInformIncrementalMarker(
7181       masm,
7182       kReturnOnNoNeedToInformIncrementalMarker,
7183       mode);
7184   InformIncrementalMarker(masm, mode);
7185   regs_.Restore(masm);
7186   __ ret(0);
7187 }
7188 
7189 
InformIncrementalMarker(MacroAssembler * masm,Mode mode)7190 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7191   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7192   int argument_count = 3;
7193   __ PrepareCallCFunction(argument_count, regs_.scratch0());
7194   __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
7195   if (mode == INCREMENTAL_COMPACTION) {
7196     __ mov(Operand(esp, 1 * kPointerSize), regs_.address());  // Slot.
7197   } else {
7198     ASSERT(mode == INCREMENTAL);
7199     __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7200     __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0());  // Value.
7201   }
7202   __ mov(Operand(esp, 2 * kPointerSize),
7203          Immediate(ExternalReference::isolate_address()));
7204 
7205   AllowExternalCallThatCantCauseGC scope(masm);
7206   if (mode == INCREMENTAL_COMPACTION) {
7207     __ CallCFunction(
7208         ExternalReference::incremental_evacuation_record_write_function(
7209             masm->isolate()),
7210         argument_count);
7211   } else {
7212     ASSERT(mode == INCREMENTAL);
7213     __ CallCFunction(
7214         ExternalReference::incremental_marking_record_write_function(
7215             masm->isolate()),
7216         argument_count);
7217   }
7218   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7219 }
7220 
7221 
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)7222 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7223     MacroAssembler* masm,
7224     OnNoNeedToInformIncrementalMarker on_no_need,
7225     Mode mode) {
7226   Label object_is_black, need_incremental, need_incremental_pop_object;
7227 
7228   // Let's look at the color of the object:  If it is not black we don't have
7229   // to inform the incremental marker.
7230   __ JumpIfBlack(regs_.object(),
7231                  regs_.scratch0(),
7232                  regs_.scratch1(),
7233                  &object_is_black,
7234                  Label::kNear);
7235 
7236   regs_.Restore(masm);
7237   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7238     __ RememberedSetHelper(object_,
7239                            address_,
7240                            value_,
7241                            save_fp_regs_mode_,
7242                            MacroAssembler::kReturnAtEnd);
7243   } else {
7244     __ ret(0);
7245   }
7246 
7247   __ bind(&object_is_black);
7248 
7249   // Get the value from the slot.
7250   __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7251 
7252   if (mode == INCREMENTAL_COMPACTION) {
7253     Label ensure_not_white;
7254 
7255     __ CheckPageFlag(regs_.scratch0(),  // Contains value.
7256                      regs_.scratch1(),  // Scratch.
7257                      MemoryChunk::kEvacuationCandidateMask,
7258                      zero,
7259                      &ensure_not_white,
7260                      Label::kNear);
7261 
7262     __ CheckPageFlag(regs_.object(),
7263                      regs_.scratch1(),  // Scratch.
7264                      MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7265                      not_zero,
7266                      &ensure_not_white,
7267                      Label::kNear);
7268 
7269     __ jmp(&need_incremental);
7270 
7271     __ bind(&ensure_not_white);
7272   }
7273 
7274   // We need an extra register for this, so we push the object register
7275   // temporarily.
7276   __ push(regs_.object());
7277   __ EnsureNotWhite(regs_.scratch0(),  // The value.
7278                     regs_.scratch1(),  // Scratch.
7279                     regs_.object(),  // Scratch.
7280                     &need_incremental_pop_object,
7281                     Label::kNear);
7282   __ pop(regs_.object());
7283 
7284   regs_.Restore(masm);
7285   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7286     __ RememberedSetHelper(object_,
7287                            address_,
7288                            value_,
7289                            save_fp_regs_mode_,
7290                            MacroAssembler::kReturnAtEnd);
7291   } else {
7292     __ ret(0);
7293   }
7294 
7295   __ bind(&need_incremental_pop_object);
7296   __ pop(regs_.object());
7297 
7298   __ bind(&need_incremental);
7299 
7300   // Fall through when we need to inform the incremental marker.
7301 }
7302 
7303 
Generate(MacroAssembler * masm)7304 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7305   // ----------- S t a t e -------------
7306   //  -- eax    : element value to store
7307   //  -- ebx    : array literal
7308   //  -- edi    : map of array literal
7309   //  -- ecx    : element index as smi
7310   //  -- edx    : array literal index in function
7311   //  -- esp[0] : return address
7312   // -----------------------------------
7313 
7314   Label element_done;
7315   Label double_elements;
7316   Label smi_element;
7317   Label slow_elements;
7318   Label slow_elements_from_double;
7319   Label fast_elements;
7320 
7321   __ CheckFastElements(edi, &double_elements);
7322 
7323   // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7324   __ JumpIfSmi(eax, &smi_element);
7325   __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
7326 
7327   // Store into the array literal requires a elements transition. Call into
7328   // the runtime.
7329 
7330   __ bind(&slow_elements);
7331   __ pop(edi);  // Pop return address and remember to put back later for tail
7332                 // call.
7333   __ push(ebx);
7334   __ push(ecx);
7335   __ push(eax);
7336   __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
7337   __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
7338   __ push(edx);
7339   __ push(edi);  // Return return address so that tail call returns to right
7340                  // place.
7341   __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7342 
7343   __ bind(&slow_elements_from_double);
7344   __ pop(edx);
7345   __ jmp(&slow_elements);
7346 
7347   // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7348   __ bind(&fast_elements);
7349   __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
7350   __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
7351                            FixedArrayBase::kHeaderSize));
7352   __ mov(Operand(ecx, 0), eax);
7353   // Update the write barrier for the array store.
7354   __ RecordWrite(ebx, ecx, eax,
7355                  kDontSaveFPRegs,
7356                  EMIT_REMEMBERED_SET,
7357                  OMIT_SMI_CHECK);
7358   __ ret(0);
7359 
7360   // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7361   // FAST_ELEMENTS, and value is Smi.
7362   __ bind(&smi_element);
7363   __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
7364   __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
7365                       FixedArrayBase::kHeaderSize), eax);
7366   __ ret(0);
7367 
7368   // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7369   __ bind(&double_elements);
7370 
7371   __ push(edx);
7372   __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
7373   __ StoreNumberToDoubleElements(eax,
7374                                  edx,
7375                                  ecx,
7376                                  edi,
7377                                  xmm0,
7378                                  &slow_elements_from_double,
7379                                  false);
7380   __ pop(edx);
7381   __ ret(0);
7382 }
7383 
7384 #undef __
7385 
7386 } }  // namespace v8::internal
7387 
7388 #endif  // V8_TARGET_ARCH_IA32
7389