• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include "v8.h"
29 
30 #if defined(V8_TARGET_ARCH_X64)
31 
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
35 
36 namespace v8 {
37 namespace internal {
38 
39 #define __ ACCESS_MASM(masm)
40 
Generate(MacroAssembler * masm)41 void ToNumberStub::Generate(MacroAssembler* masm) {
42   // The ToNumber stub takes one argument in eax.
43   NearLabel check_heap_number, call_builtin;
44   __ SmiTest(rax);
45   __ j(not_zero, &check_heap_number);
46   __ Ret();
47 
48   __ bind(&check_heap_number);
49   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
50                  Heap::kHeapNumberMapRootIndex);
51   __ j(not_equal, &call_builtin);
52   __ Ret();
53 
54   __ bind(&call_builtin);
55   __ pop(rcx);  // Pop return address.
56   __ push(rax);
57   __ push(rcx);  // Push return address.
58   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
59 }
60 
61 
Generate(MacroAssembler * masm)62 void FastNewClosureStub::Generate(MacroAssembler* masm) {
63   // Create a new closure from the given function info in new
64   // space. Set the context to the current context in rsi.
65   Label gc;
66   __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
67 
68   // Get the function info from the stack.
69   __ movq(rdx, Operand(rsp, 1 * kPointerSize));
70 
71   int map_index = strict_mode_ == kStrictMode
72       ? Context::STRICT_MODE_FUNCTION_MAP_INDEX
73       : Context::FUNCTION_MAP_INDEX;
74 
75   // Compute the function map in the current global context and set that
76   // as the map of the allocated object.
77   __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
78   __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
79   __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
80   __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
81 
82   // Initialize the rest of the function. We don't have to update the
83   // write barrier because the allocated object is in new space.
84   __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
85   __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
86   __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
87   __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
88   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
89   __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
90   __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
91   __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
92   __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
93   __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
94 
95   // Initialize the code pointer in the function to be the one
96   // found in the shared function info object.
97   __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
98   __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
99   __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
100 
101 
102   // Return and remove the on-stack parameter.
103   __ ret(1 * kPointerSize);
104 
105   // Create a new closure through the slower runtime call.
106   __ bind(&gc);
107   __ pop(rcx);  // Temporarily remove return address.
108   __ pop(rdx);
109   __ push(rsi);
110   __ push(rdx);
111   __ PushRoot(Heap::kFalseValueRootIndex);
112   __ push(rcx);  // Restore return address.
113   __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
114 }
115 
116 
Generate(MacroAssembler * masm)117 void FastNewContextStub::Generate(MacroAssembler* masm) {
118   // Try to allocate the context in new space.
119   Label gc;
120   int length = slots_ + Context::MIN_CONTEXT_SLOTS;
121   __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
122                         rax, rbx, rcx, &gc, TAG_OBJECT);
123 
124   // Get the function from the stack.
125   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
126 
127   // Setup the object header.
128   __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
129   __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
130   __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
131 
132   // Setup the fixed slots.
133   __ Set(rbx, 0);  // Set to NULL.
134   __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
135   __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
136   __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
137   __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
138 
139   // Copy the global object from the surrounding context.
140   __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
141   __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
142 
143   // Initialize the rest of the slots to undefined.
144   __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
145   for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
146     __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
147   }
148 
149   // Return and remove the on-stack parameter.
150   __ movq(rsi, rax);
151   __ ret(1 * kPointerSize);
152 
153   // Need to collect. Call into runtime system.
154   __ bind(&gc);
155   __ TailCallRuntime(Runtime::kNewContext, 1, 1);
156 }
157 
158 
Generate(MacroAssembler * masm)159 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
160   // Stack layout on entry:
161   //
162   // [rsp + kPointerSize]: constant elements.
163   // [rsp + (2 * kPointerSize)]: literal index.
164   // [rsp + (3 * kPointerSize)]: literals array.
165 
166   // All sizes here are multiples of kPointerSize.
167   int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
168   int size = JSArray::kSize + elements_size;
169 
170   // Load boilerplate object into rcx and check if we need to create a
171   // boilerplate.
172   Label slow_case;
173   __ movq(rcx, Operand(rsp, 3 * kPointerSize));
174   __ movq(rax, Operand(rsp, 2 * kPointerSize));
175   SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
176   __ movq(rcx,
177           FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
178   __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
179   __ j(equal, &slow_case);
180 
181   if (FLAG_debug_code) {
182     const char* message;
183     Heap::RootListIndex expected_map_index;
184     if (mode_ == CLONE_ELEMENTS) {
185       message = "Expected (writable) fixed array";
186       expected_map_index = Heap::kFixedArrayMapRootIndex;
187     } else {
188       ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
189       message = "Expected copy-on-write fixed array";
190       expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
191     }
192     __ push(rcx);
193     __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
194     __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
195                    expected_map_index);
196     __ Assert(equal, message);
197     __ pop(rcx);
198   }
199 
200   // Allocate both the JS array and the elements array in one big
201   // allocation. This avoids multiple limit checks.
202   __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
203 
204   // Copy the JS array part.
205   for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
206     if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
207       __ movq(rbx, FieldOperand(rcx, i));
208       __ movq(FieldOperand(rax, i), rbx);
209     }
210   }
211 
212   if (length_ > 0) {
213     // Get hold of the elements array of the boilerplate and setup the
214     // elements pointer in the resulting object.
215     __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
216     __ lea(rdx, Operand(rax, JSArray::kSize));
217     __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
218 
219     // Copy the elements array.
220     for (int i = 0; i < elements_size; i += kPointerSize) {
221       __ movq(rbx, FieldOperand(rcx, i));
222       __ movq(FieldOperand(rdx, i), rbx);
223     }
224   }
225 
226   // Return and remove the on-stack parameters.
227   __ ret(3 * kPointerSize);
228 
229   __ bind(&slow_case);
230   __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
231 }
232 
233 
Generate(MacroAssembler * masm)234 void ToBooleanStub::Generate(MacroAssembler* masm) {
235   NearLabel false_result, true_result, not_string;
236   __ movq(rax, Operand(rsp, 1 * kPointerSize));
237 
238   // 'null' => false.
239   __ CompareRoot(rax, Heap::kNullValueRootIndex);
240   __ j(equal, &false_result);
241 
242   // Get the map and type of the heap object.
243   // We don't use CmpObjectType because we manipulate the type field.
244   __ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
245   __ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
246 
247   // Undetectable => false.
248   __ movzxbq(rbx, FieldOperand(rdx, Map::kBitFieldOffset));
249   __ and_(rbx, Immediate(1 << Map::kIsUndetectable));
250   __ j(not_zero, &false_result);
251 
252   // JavaScript object => true.
253   __ cmpq(rcx, Immediate(FIRST_JS_OBJECT_TYPE));
254   __ j(above_equal, &true_result);
255 
256   // String value => false iff empty.
257   __ cmpq(rcx, Immediate(FIRST_NONSTRING_TYPE));
258   __ j(above_equal, &not_string);
259   __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
260   __ SmiTest(rdx);
261   __ j(zero, &false_result);
262   __ jmp(&true_result);
263 
264   __ bind(&not_string);
265   __ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
266   __ j(not_equal, &true_result);
267   // HeapNumber => false iff +0, -0, or NaN.
268   // These three cases set the zero flag when compared to zero using ucomisd.
269   __ xorpd(xmm0, xmm0);
270   __ ucomisd(xmm0, FieldOperand(rax, HeapNumber::kValueOffset));
271   __ j(zero, &false_result);
272   // Fall through to |true_result|.
273 
274   // Return 1/0 for true/false in rax.
275   __ bind(&true_result);
276   __ Set(rax, 1);
277   __ ret(1 * kPointerSize);
278   __ bind(&false_result);
279   __ Set(rax, 0);
280   __ ret(1 * kPointerSize);
281 }
282 
283 
284 class FloatingPointHelper : public AllStatic {
285  public:
286   // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
287   // If the operands are not both numbers, jump to not_numbers.
288   // Leaves rdx and rax unchanged.  SmiOperands assumes both are smis.
289   // NumberOperands assumes both are smis or heap numbers.
290   static void LoadSSE2SmiOperands(MacroAssembler* masm);
291   static void LoadSSE2NumberOperands(MacroAssembler* masm);
292   static void LoadSSE2UnknownOperands(MacroAssembler* masm,
293                                       Label* not_numbers);
294 
295   // Takes the operands in rdx and rax and loads them as integers in rax
296   // and rcx.
297   static void LoadAsIntegers(MacroAssembler* masm,
298                              Label* operand_conversion_failure,
299                              Register heap_number_map);
300   // As above, but we know the operands to be numbers. In that case,
301   // conversion can't fail.
302   static void LoadNumbersAsIntegers(MacroAssembler* masm);
303 
304   // Tries to convert two values to smis losslessly.
305   // This fails if either argument is not a Smi nor a HeapNumber,
306   // or if it's a HeapNumber with a value that can't be converted
307   // losslessly to a Smi. In that case, control transitions to the
308   // on_not_smis label.
309   // On success, either control goes to the on_success label (if one is
310   // provided), or it falls through at the end of the code (if on_success
311   // is NULL).
312   // On success, both first and second holds Smi tagged values.
313   // One of first or second must be non-Smi when entering.
314   static void NumbersToSmis(MacroAssembler* masm,
315                             Register first,
316                             Register second,
317                             Register scratch1,
318                             Register scratch2,
319                             Register scratch3,
320                             Label* on_success,
321                             Label* on_not_smis);
322 };
323 
324 
GetTypeRecordingBinaryOpStub(int key,TRBinaryOpIC::TypeInfo type_info,TRBinaryOpIC::TypeInfo result_type_info)325 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
326     TRBinaryOpIC::TypeInfo type_info,
327     TRBinaryOpIC::TypeInfo result_type_info) {
328   TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
329   return stub.GetCode();
330 }
331 
332 
GenerateTypeTransition(MacroAssembler * masm)333 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
334   __ pop(rcx);  // Save return address.
335   __ push(rdx);
336   __ push(rax);
337   // Left and right arguments are now on top.
338   // Push this stub's key. Although the operation and the type info are
339   // encoded into the key, the encoding is opaque, so push them too.
340   __ Push(Smi::FromInt(MinorKey()));
341   __ Push(Smi::FromInt(op_));
342   __ Push(Smi::FromInt(operands_type_));
343 
344   __ push(rcx);  // Push return address.
345 
346   // Patch the caller to an appropriate specialized stub and return the
347   // operation result to the caller of the stub.
348   __ TailCallExternalReference(
349       ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch),
350                         masm->isolate()),
351       5,
352       1);
353 }
354 
355 
Generate(MacroAssembler * masm)356 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
357   switch (operands_type_) {
358     case TRBinaryOpIC::UNINITIALIZED:
359       GenerateTypeTransition(masm);
360       break;
361     case TRBinaryOpIC::SMI:
362       GenerateSmiStub(masm);
363       break;
364     case TRBinaryOpIC::INT32:
365       UNREACHABLE();
366       // The int32 case is identical to the Smi case.  We avoid creating this
367       // ic state on x64.
368       break;
369     case TRBinaryOpIC::HEAP_NUMBER:
370       GenerateHeapNumberStub(masm);
371       break;
372     case TRBinaryOpIC::ODDBALL:
373       GenerateOddballStub(masm);
374       break;
375     case TRBinaryOpIC::STRING:
376       GenerateStringStub(masm);
377       break;
378     case TRBinaryOpIC::GENERIC:
379       GenerateGeneric(masm);
380       break;
381     default:
382       UNREACHABLE();
383   }
384 }
385 
386 
GetName()387 const char* TypeRecordingBinaryOpStub::GetName() {
388   if (name_ != NULL) return name_;
389   const int kMaxNameLength = 100;
390   name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
391       kMaxNameLength);
392   if (name_ == NULL) return "OOM";
393   const char* op_name = Token::Name(op_);
394   const char* overwrite_name;
395   switch (mode_) {
396     case NO_OVERWRITE: overwrite_name = "Alloc"; break;
397     case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
398     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
399     default: overwrite_name = "UnknownOverwrite"; break;
400   }
401 
402   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
403                "TypeRecordingBinaryOpStub_%s_%s_%s",
404                op_name,
405                overwrite_name,
406                TRBinaryOpIC::GetName(operands_type_));
407   return name_;
408 }
409 
410 
GenerateSmiCode(MacroAssembler * masm,Label * slow,SmiCodeGenerateHeapNumberResults allow_heapnumber_results)411 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
412     Label* slow,
413     SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
414 
415   // Arguments to TypeRecordingBinaryOpStub are in rdx and rax.
416   Register left = rdx;
417   Register right = rax;
418 
419   // We only generate heapnumber answers for overflowing calculations
420   // for the four basic arithmetic operations and logical right shift by 0.
421   bool generate_inline_heapnumber_results =
422       (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
423       (op_ == Token::ADD || op_ == Token::SUB ||
424        op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
425 
426   // Smi check of both operands.  If op is BIT_OR, the check is delayed
427   // until after the OR operation.
428   Label not_smis;
429   Label use_fp_on_smis;
430   Label fail;
431 
432   if (op_ != Token::BIT_OR) {
433     Comment smi_check_comment(masm, "-- Smi check arguments");
434     __ JumpIfNotBothSmi(left, right, &not_smis);
435   }
436 
437   Label smi_values;
438   __ bind(&smi_values);
439   // Perform the operation.
440   Comment perform_smi(masm, "-- Perform smi operation");
441   switch (op_) {
442     case Token::ADD:
443       ASSERT(right.is(rax));
444       __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
445       break;
446 
447     case Token::SUB:
448       __ SmiSub(left, left, right, &use_fp_on_smis);
449       __ movq(rax, left);
450       break;
451 
452     case Token::MUL:
453       ASSERT(right.is(rax));
454       __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
455       break;
456 
457     case Token::DIV:
458       // SmiDiv will not accept left in rdx or right in rax.
459       left = rcx;
460       right = rbx;
461       __ movq(rbx, rax);
462       __ movq(rcx, rdx);
463       __ SmiDiv(rax, left, right, &use_fp_on_smis);
464       break;
465 
466     case Token::MOD:
467       // SmiMod will not accept left in rdx or right in rax.
468       left = rcx;
469       right = rbx;
470       __ movq(rbx, rax);
471       __ movq(rcx, rdx);
472       __ SmiMod(rax, left, right, &use_fp_on_smis);
473       break;
474 
475     case Token::BIT_OR: {
476       ASSERT(right.is(rax));
477       __ SmiOrIfSmis(right, right, left, &not_smis);  // BIT_OR is commutative.
478       break;
479       }
480     case Token::BIT_XOR:
481       ASSERT(right.is(rax));
482       __ SmiXor(right, right, left);  // BIT_XOR is commutative.
483       break;
484 
485     case Token::BIT_AND:
486       ASSERT(right.is(rax));
487       __ SmiAnd(right, right, left);  // BIT_AND is commutative.
488       break;
489 
490     case Token::SHL:
491       __ SmiShiftLeft(left, left, right);
492       __ movq(rax, left);
493       break;
494 
495     case Token::SAR:
496       __ SmiShiftArithmeticRight(left, left, right);
497       __ movq(rax, left);
498       break;
499 
500     case Token::SHR:
501       __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
502       __ movq(rax, left);
503       break;
504 
505     default:
506       UNREACHABLE();
507   }
508 
509   // 5. Emit return of result in rax.  Some operations have registers pushed.
510   __ ret(0);
511 
512   if (use_fp_on_smis.is_linked()) {
513     // 6. For some operations emit inline code to perform floating point
514     //    operations on known smis (e.g., if the result of the operation
515     //    overflowed the smi range).
516     __ bind(&use_fp_on_smis);
517     if (op_ == Token::DIV || op_ == Token::MOD) {
518       // Restore left and right to rdx and rax.
519       __ movq(rdx, rcx);
520       __ movq(rax, rbx);
521     }
522 
523     if (generate_inline_heapnumber_results) {
524       __ AllocateHeapNumber(rcx, rbx, slow);
525       Comment perform_float(masm, "-- Perform float operation on smis");
526       if (op_ == Token::SHR) {
527         __ SmiToInteger32(left, left);
528         __ cvtqsi2sd(xmm0, left);
529       } else {
530         FloatingPointHelper::LoadSSE2SmiOperands(masm);
531         switch (op_) {
532         case Token::ADD: __ addsd(xmm0, xmm1); break;
533         case Token::SUB: __ subsd(xmm0, xmm1); break;
534         case Token::MUL: __ mulsd(xmm0, xmm1); break;
535         case Token::DIV: __ divsd(xmm0, xmm1); break;
536         default: UNREACHABLE();
537         }
538       }
539       __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
540       __ movq(rax, rcx);
541       __ ret(0);
542     } else {
543       __ jmp(&fail);
544     }
545   }
546 
547   // 7. Non-smi operands reach the end of the code generated by
548   //    GenerateSmiCode, and fall through to subsequent code,
549   //    with the operands in rdx and rax.
550   //    But first we check if non-smi values are HeapNumbers holding
551   //    values that could be smi.
552   __ bind(&not_smis);
553   Comment done_comment(masm, "-- Enter non-smi code");
554   FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
555                                      &smi_values, &fail);
556   __ jmp(&smi_values);
557   __ bind(&fail);
558 }
559 
560 
GenerateFloatingPointCode(MacroAssembler * masm,Label * allocation_failure,Label * non_numeric_failure)561 void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
562     MacroAssembler* masm,
563     Label* allocation_failure,
564     Label* non_numeric_failure) {
565   switch (op_) {
566     case Token::ADD:
567     case Token::SUB:
568     case Token::MUL:
569     case Token::DIV: {
570       FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
571 
572       switch (op_) {
573         case Token::ADD: __ addsd(xmm0, xmm1); break;
574         case Token::SUB: __ subsd(xmm0, xmm1); break;
575         case Token::MUL: __ mulsd(xmm0, xmm1); break;
576         case Token::DIV: __ divsd(xmm0, xmm1); break;
577         default: UNREACHABLE();
578       }
579       GenerateHeapResultAllocation(masm, allocation_failure);
580       __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
581       __ ret(0);
582       break;
583     }
584     case Token::MOD: {
585       // For MOD we jump to the allocation_failure label, to call runtime.
586       __ jmp(allocation_failure);
587       break;
588     }
589     case Token::BIT_OR:
590     case Token::BIT_AND:
591     case Token::BIT_XOR:
592     case Token::SAR:
593     case Token::SHL:
594     case Token::SHR: {
595       Label non_smi_shr_result;
596       Register heap_number_map = r9;
597       __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
598       FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
599                                           heap_number_map);
600       switch (op_) {
601         case Token::BIT_OR:  __ orl(rax, rcx); break;
602         case Token::BIT_AND: __ andl(rax, rcx); break;
603         case Token::BIT_XOR: __ xorl(rax, rcx); break;
604         case Token::SAR: __ sarl_cl(rax); break;
605         case Token::SHL: __ shll_cl(rax); break;
606         case Token::SHR: {
607           __ shrl_cl(rax);
608           // Check if result is negative. This can only happen for a shift
609           // by zero.
610           __ testl(rax, rax);
611           __ j(negative, &non_smi_shr_result);
612           break;
613         }
614         default: UNREACHABLE();
615       }
616       STATIC_ASSERT(kSmiValueSize == 32);
617       // Tag smi result and return.
618       __ Integer32ToSmi(rax, rax);
619       __ Ret();
620 
621       // Logical shift right can produce an unsigned int32 that is not
622       // an int32, and so is not in the smi range.  Allocate a heap number
623       // in that case.
624       if (op_ == Token::SHR) {
625         __ bind(&non_smi_shr_result);
626         Label allocation_failed;
627         __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
628         // Allocate heap number in new space.
629         // Not using AllocateHeapNumber macro in order to reuse
630         // already loaded heap_number_map.
631         __ AllocateInNewSpace(HeapNumber::kSize,
632                               rax,
633                               rdx,
634                               no_reg,
635                               &allocation_failed,
636                               TAG_OBJECT);
637         // Set the map.
638         if (FLAG_debug_code) {
639           __ AbortIfNotRootValue(heap_number_map,
640                                  Heap::kHeapNumberMapRootIndex,
641                                  "HeapNumberMap register clobbered.");
642         }
643         __ movq(FieldOperand(rax, HeapObject::kMapOffset),
644                 heap_number_map);
645         __ cvtqsi2sd(xmm0, rbx);
646         __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
647         __ Ret();
648 
649         __ bind(&allocation_failed);
650         // We need tagged values in rdx and rax for the following code,
651         // not int32 in rax and rcx.
652         __ Integer32ToSmi(rax, rcx);
653         __ Integer32ToSmi(rdx, rbx);
654         __ jmp(allocation_failure);
655       }
656       break;
657     }
658     default: UNREACHABLE(); break;
659   }
660   // No fall-through from this generated code.
661   if (FLAG_debug_code) {
662     __ Abort("Unexpected fall-through in "
663              "TypeRecordingBinaryStub::GenerateFloatingPointCode.");
664   }
665 }
666 
667 
GenerateStringAddCode(MacroAssembler * masm)668 void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
669   ASSERT(op_ == Token::ADD);
670   NearLabel left_not_string, call_runtime;
671 
672   // Registers containing left and right operands respectively.
673   Register left = rdx;
674   Register right = rax;
675 
676   // Test if left operand is a string.
677   __ JumpIfSmi(left, &left_not_string);
678   __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
679   __ j(above_equal, &left_not_string);
680   StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
681   GenerateRegisterArgsPush(masm);
682   __ TailCallStub(&string_add_left_stub);
683 
684   // Left operand is not a string, test right.
685   __ bind(&left_not_string);
686   __ JumpIfSmi(right, &call_runtime);
687   __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
688   __ j(above_equal, &call_runtime);
689 
690   StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
691   GenerateRegisterArgsPush(masm);
692   __ TailCallStub(&string_add_right_stub);
693 
694   // Neither argument is a string.
695   __ bind(&call_runtime);
696 }
697 
698 
GenerateCallRuntimeCode(MacroAssembler * masm)699 void TypeRecordingBinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
700   GenerateRegisterArgsPush(masm);
701   switch (op_) {
702     case Token::ADD:
703       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
704       break;
705     case Token::SUB:
706       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
707       break;
708     case Token::MUL:
709       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
710       break;
711     case Token::DIV:
712       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
713       break;
714     case Token::MOD:
715       __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
716       break;
717     case Token::BIT_OR:
718       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
719       break;
720     case Token::BIT_AND:
721       __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
722       break;
723     case Token::BIT_XOR:
724       __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
725       break;
726     case Token::SAR:
727       __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
728       break;
729     case Token::SHL:
730       __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
731       break;
732     case Token::SHR:
733       __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
734       break;
735     default:
736       UNREACHABLE();
737   }
738 }
739 
740 
GenerateSmiStub(MacroAssembler * masm)741 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
742   Label call_runtime;
743   if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
744       result_type_ == TRBinaryOpIC::SMI) {
745     // Only allow smi results.
746     GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
747   } else {
748     // Allow heap number result and don't make a transition if a heap number
749     // cannot be allocated.
750     GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
751   }
752 
753   // Code falls through if the result is not returned as either a smi or heap
754   // number.
755   GenerateTypeTransition(masm);
756 
757   if (call_runtime.is_linked()) {
758     __ bind(&call_runtime);
759     GenerateCallRuntimeCode(masm);
760   }
761 }
762 
763 
GenerateStringStub(MacroAssembler * masm)764 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
765   ASSERT(operands_type_ == TRBinaryOpIC::STRING);
766   ASSERT(op_ == Token::ADD);
767   GenerateStringAddCode(masm);
768   // Try to add arguments as strings, otherwise, transition to the generic
769   // TRBinaryOpIC type.
770   GenerateTypeTransition(masm);
771 }
772 
773 
GenerateOddballStub(MacroAssembler * masm)774 void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
775   Label call_runtime;
776 
777   if (op_ == Token::ADD) {
778     // Handle string addition here, because it is the only operation
779     // that does not do a ToNumber conversion on the operands.
780     GenerateStringAddCode(masm);
781   }
782 
783   // Convert oddball arguments to numbers.
784   NearLabel check, done;
785   __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
786   __ j(not_equal, &check);
787   if (Token::IsBitOp(op_)) {
788     __ xor_(rdx, rdx);
789   } else {
790     __ LoadRoot(rdx, Heap::kNanValueRootIndex);
791   }
792   __ jmp(&done);
793   __ bind(&check);
794   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
795   __ j(not_equal, &done);
796   if (Token::IsBitOp(op_)) {
797     __ xor_(rax, rax);
798   } else {
799     __ LoadRoot(rax, Heap::kNanValueRootIndex);
800   }
801   __ bind(&done);
802 
803   GenerateHeapNumberStub(masm);
804 }
805 
806 
GenerateHeapNumberStub(MacroAssembler * masm)807 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
808   Label gc_required, not_number;
809   GenerateFloatingPointCode(masm, &gc_required, &not_number);
810 
811   __ bind(&not_number);
812   GenerateTypeTransition(masm);
813 
814   __ bind(&gc_required);
815   GenerateCallRuntimeCode(masm);
816 }
817 
818 
GenerateGeneric(MacroAssembler * masm)819 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
820   Label call_runtime, call_string_add_or_runtime;
821 
822   GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
823 
824   GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
825 
826   __ bind(&call_string_add_or_runtime);
827   if (op_ == Token::ADD) {
828     GenerateStringAddCode(masm);
829   }
830 
831   __ bind(&call_runtime);
832   GenerateCallRuntimeCode(masm);
833 }
834 
835 
GenerateHeapResultAllocation(MacroAssembler * masm,Label * alloc_failure)836 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
837     MacroAssembler* masm,
838     Label* alloc_failure) {
839   Label skip_allocation;
840   OverwriteMode mode = mode_;
841   switch (mode) {
842     case OVERWRITE_LEFT: {
843       // If the argument in rdx is already an object, we skip the
844       // allocation of a heap number.
845       __ JumpIfNotSmi(rdx, &skip_allocation);
846       // Allocate a heap number for the result. Keep eax and edx intact
847       // for the possible runtime call.
848       __ AllocateHeapNumber(rbx, rcx, alloc_failure);
849       // Now rdx can be overwritten losing one of the arguments as we are
850       // now done and will not need it any more.
851       __ movq(rdx, rbx);
852       __ bind(&skip_allocation);
853       // Use object in rdx as a result holder
854       __ movq(rax, rdx);
855       break;
856     }
857     case OVERWRITE_RIGHT:
858       // If the argument in rax is already an object, we skip the
859       // allocation of a heap number.
860       __ JumpIfNotSmi(rax, &skip_allocation);
861       // Fall through!
862     case NO_OVERWRITE:
863       // Allocate a heap number for the result. Keep rax and rdx intact
864       // for the possible runtime call.
865       __ AllocateHeapNumber(rbx, rcx, alloc_failure);
866       // Now rax can be overwritten losing one of the arguments as we are
867       // now done and will not need it any more.
868       __ movq(rax, rbx);
869       __ bind(&skip_allocation);
870       break;
871     default: UNREACHABLE();
872   }
873 }
874 
875 
GenerateRegisterArgsPush(MacroAssembler * masm)876 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
877   __ pop(rcx);
878   __ push(rdx);
879   __ push(rax);
880   __ push(rcx);
881 }
882 
883 
Generate(MacroAssembler * masm)884 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
885   // TAGGED case:
886   //   Input:
887   //     rsp[8]: argument (should be number).
888   //     rsp[0]: return address.
889   //   Output:
890   //     rax: tagged double result.
891   // UNTAGGED case:
892   //   Input::
893   //     rsp[0]: return address.
894   //     xmm1: untagged double input argument
895   //   Output:
896   //     xmm1: untagged double result.
897 
898   Label runtime_call;
899   Label runtime_call_clear_stack;
900   Label skip_cache;
901   const bool tagged = (argument_type_ == TAGGED);
902   if (tagged) {
903     NearLabel input_not_smi;
904     NearLabel loaded;
905     // Test that rax is a number.
906     __ movq(rax, Operand(rsp, kPointerSize));
907     __ JumpIfNotSmi(rax, &input_not_smi);
908     // Input is a smi. Untag and load it onto the FPU stack.
909     // Then load the bits of the double into rbx.
910     __ SmiToInteger32(rax, rax);
911     __ subq(rsp, Immediate(kDoubleSize));
912     __ cvtlsi2sd(xmm1, rax);
913     __ movsd(Operand(rsp, 0), xmm1);
914     __ movq(rbx, xmm1);
915     __ movq(rdx, xmm1);
916     __ fld_d(Operand(rsp, 0));
917     __ addq(rsp, Immediate(kDoubleSize));
918     __ jmp(&loaded);
919 
920     __ bind(&input_not_smi);
921     // Check if input is a HeapNumber.
922     __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
923     __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
924     __ j(not_equal, &runtime_call);
925     // Input is a HeapNumber. Push it on the FPU stack and load its
926     // bits into rbx.
927     __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
928     __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
929     __ movq(rdx, rbx);
930 
931     __ bind(&loaded);
932   } else {  // UNTAGGED.
933     __ movq(rbx, xmm1);
934     __ movq(rdx, xmm1);
935   }
936 
937   // ST[0] == double value, if TAGGED.
938   // rbx = bits of double value.
939   // rdx = also bits of double value.
940   // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
941   //   h = h0 = bits ^ (bits >> 32);
942   //   h ^= h >> 16;
943   //   h ^= h >> 8;
944   //   h = h & (cacheSize - 1);
945   // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
946   __ sar(rdx, Immediate(32));
947   __ xorl(rdx, rbx);
948   __ movl(rcx, rdx);
949   __ movl(rax, rdx);
950   __ movl(rdi, rdx);
951   __ sarl(rdx, Immediate(8));
952   __ sarl(rcx, Immediate(16));
953   __ sarl(rax, Immediate(24));
954   __ xorl(rcx, rdx);
955   __ xorl(rax, rdi);
956   __ xorl(rcx, rax);
957   ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
958   __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
959 
960   // ST[0] == double value.
961   // rbx = bits of double value.
962   // rcx = TranscendentalCache::hash(double value).
963   ExternalReference cache_array =
964       ExternalReference::transcendental_cache_array_address(masm->isolate());
965   __ movq(rax, cache_array);
966   int cache_array_index =
967       type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
968   __ movq(rax, Operand(rax, cache_array_index));
969   // rax points to the cache for the type type_.
970   // If NULL, the cache hasn't been initialized yet, so go through runtime.
971   __ testq(rax, rax);
972   __ j(zero, &runtime_call_clear_stack);  // Only clears stack if TAGGED.
973 #ifdef DEBUG
974   // Check that the layout of cache elements match expectations.
975   {  // NOLINT - doesn't like a single brace on a line.
976     TranscendentalCache::SubCache::Element test_elem[2];
977     char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
978     char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
979     char* elem_in0  = reinterpret_cast<char*>(&(test_elem[0].in[0]));
980     char* elem_in1  = reinterpret_cast<char*>(&(test_elem[0].in[1]));
981     char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
982     // Two uint_32's and a pointer per element.
983     CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
984     CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
985     CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
986     CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
987   }
988 #endif
989   // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
990   __ addl(rcx, rcx);
991   __ lea(rcx, Operand(rax, rcx, times_8, 0));
992   // Check if cache matches: Double value is stored in uint32_t[2] array.
993   NearLabel cache_miss;
994   __ cmpq(rbx, Operand(rcx, 0));
995   __ j(not_equal, &cache_miss);
996   // Cache hit!
997   __ movq(rax, Operand(rcx, 2 * kIntSize));
998   if (tagged) {
999     __ fstp(0);  // Clear FPU stack.
1000     __ ret(kPointerSize);
1001   } else {  // UNTAGGED.
1002     __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1003     __ Ret();
1004   }
1005 
1006   __ bind(&cache_miss);
1007   // Update cache with new value.
1008   if (tagged) {
1009   __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
1010   } else {  // UNTAGGED.
1011     __ AllocateHeapNumber(rax, rdi, &skip_cache);
1012     __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1013     __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1014   }
1015   GenerateOperation(masm);
1016   __ movq(Operand(rcx, 0), rbx);
1017   __ movq(Operand(rcx, 2 * kIntSize), rax);
1018   __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
1019   if (tagged) {
1020     __ ret(kPointerSize);
1021   } else {  // UNTAGGED.
1022     __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1023     __ Ret();
1024 
1025     // Skip cache and return answer directly, only in untagged case.
1026     __ bind(&skip_cache);
1027     __ subq(rsp, Immediate(kDoubleSize));
1028     __ movsd(Operand(rsp, 0), xmm1);
1029     __ fld_d(Operand(rsp, 0));
1030     GenerateOperation(masm);
1031     __ fstp_d(Operand(rsp, 0));
1032     __ movsd(xmm1, Operand(rsp, 0));
1033     __ addq(rsp, Immediate(kDoubleSize));
1034     // We return the value in xmm1 without adding it to the cache, but
1035     // we cause a scavenging GC so that future allocations will succeed.
1036     __ EnterInternalFrame();
1037     // Allocate an unused object bigger than a HeapNumber.
1038     __ Push(Smi::FromInt(2 * kDoubleSize));
1039     __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1040     __ LeaveInternalFrame();
1041     __ Ret();
1042   }
1043 
1044   // Call runtime, doing whatever allocation and cleanup is necessary.
1045   if (tagged) {
1046     __ bind(&runtime_call_clear_stack);
1047     __ fstp(0);
1048     __ bind(&runtime_call);
1049     __ TailCallExternalReference(
1050         ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
1051   } else {  // UNTAGGED.
1052     __ bind(&runtime_call_clear_stack);
1053     __ bind(&runtime_call);
1054     __ AllocateHeapNumber(rax, rdi, &skip_cache);
1055     __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1056     __ EnterInternalFrame();
1057     __ push(rax);
1058     __ CallRuntime(RuntimeFunction(), 1);
1059     __ LeaveInternalFrame();
1060     __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1061     __ Ret();
1062   }
1063 }
1064 
1065 
RuntimeFunction()1066 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1067   switch (type_) {
1068     // Add more cases when necessary.
1069     case TranscendentalCache::SIN: return Runtime::kMath_sin;
1070     case TranscendentalCache::COS: return Runtime::kMath_cos;
1071     case TranscendentalCache::LOG: return Runtime::kMath_log;
1072     default:
1073       UNIMPLEMENTED();
1074       return Runtime::kAbort;
1075   }
1076 }
1077 
1078 
GenerateOperation(MacroAssembler * masm)1079 void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
1080   // Registers:
1081   // rax: Newly allocated HeapNumber, which must be preserved.
1082   // rbx: Bits of input double. Must be preserved.
1083   // rcx: Pointer to cache entry. Must be preserved.
1084   // st(0): Input double
1085   Label done;
1086   if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
1087     // Both fsin and fcos require arguments in the range +/-2^63 and
1088     // return NaN for infinities and NaN. They can share all code except
1089     // the actual fsin/fcos operation.
1090     Label in_range;
1091     // If argument is outside the range -2^63..2^63, fsin/cos doesn't
1092     // work. We must reduce it to the appropriate range.
1093     __ movq(rdi, rbx);
1094     // Move exponent and sign bits to low bits.
1095     __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
1096     // Remove sign bit.
1097     __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
1098     int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
1099     __ cmpl(rdi, Immediate(supported_exponent_limit));
1100     __ j(below, &in_range);
1101     // Check for infinity and NaN. Both return NaN for sin.
1102     __ cmpl(rdi, Immediate(0x7ff));
1103     NearLabel non_nan_result;
1104     __ j(not_equal, &non_nan_result);
1105     // Input is +/-Infinity or NaN. Result is NaN.
1106     __ fstp(0);
1107     __ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
1108     __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
1109     __ jmp(&done);
1110 
1111     __ bind(&non_nan_result);
1112 
1113     // Use fpmod to restrict argument to the range +/-2*PI.
1114     __ movq(rdi, rax);  // Save rax before using fnstsw_ax.
1115     __ fldpi();
1116     __ fadd(0);
1117     __ fld(1);
1118     // FPU Stack: input, 2*pi, input.
1119     {
1120       Label no_exceptions;
1121       __ fwait();
1122       __ fnstsw_ax();
1123       // Clear if Illegal Operand or Zero Division exceptions are set.
1124       __ testl(rax, Immediate(5));  // #IO and #ZD flags of FPU status word.
1125       __ j(zero, &no_exceptions);
1126       __ fnclex();
1127       __ bind(&no_exceptions);
1128     }
1129 
1130     // Compute st(0) % st(1)
1131     {
1132       NearLabel partial_remainder_loop;
1133       __ bind(&partial_remainder_loop);
1134       __ fprem1();
1135       __ fwait();
1136       __ fnstsw_ax();
1137       __ testl(rax, Immediate(0x400));  // Check C2 bit of FPU status word.
1138       // If C2 is set, computation only has partial result. Loop to
1139       // continue computation.
1140       __ j(not_zero, &partial_remainder_loop);
1141   }
1142     // FPU Stack: input, 2*pi, input % 2*pi
1143     __ fstp(2);
1144     // FPU Stack: input % 2*pi, 2*pi,
1145     __ fstp(0);
1146     // FPU Stack: input % 2*pi
1147     __ movq(rax, rdi);  // Restore rax, pointer to the new HeapNumber.
1148     __ bind(&in_range);
1149     switch (type_) {
1150       case TranscendentalCache::SIN:
1151         __ fsin();
1152         break;
1153       case TranscendentalCache::COS:
1154         __ fcos();
1155         break;
1156       default:
1157         UNREACHABLE();
1158     }
1159     __ bind(&done);
1160   } else {
1161     ASSERT(type_ == TranscendentalCache::LOG);
1162     __ fldln2();
1163     __ fxch();
1164     __ fyl2x();
1165   }
1166 }
1167 
1168 
1169 // Get the integer part of a heap number.
1170 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
IntegerConvert(MacroAssembler * masm,Register result,Register source)1171 void IntegerConvert(MacroAssembler* masm,
1172                     Register result,
1173                     Register source) {
1174   // Result may be rcx. If result and source are the same register, source will
1175   // be overwritten.
1176   ASSERT(!result.is(rdi) && !result.is(rbx));
1177   // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
1178   // cvttsd2si (32-bit version) directly.
1179   Register double_exponent = rbx;
1180   Register double_value = rdi;
1181   NearLabel done, exponent_63_plus;
1182   // Get double and extract exponent.
1183   __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
1184   // Clear result preemptively, in case we need to return zero.
1185   __ xorl(result, result);
1186   __ movq(xmm0, double_value);  // Save copy in xmm0 in case we need it there.
1187   // Double to remove sign bit, shift exponent down to least significant bits.
1188   // and subtract bias to get the unshifted, unbiased exponent.
1189   __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
1190   __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
1191   __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
1192   // Check whether the exponent is too big for a 63 bit unsigned integer.
1193   __ cmpl(double_exponent, Immediate(63));
1194   __ j(above_equal, &exponent_63_plus);
1195   // Handle exponent range 0..62.
1196   __ cvttsd2siq(result, xmm0);
1197   __ jmp(&done);
1198 
1199   __ bind(&exponent_63_plus);
1200   // Exponent negative or 63+.
1201   __ cmpl(double_exponent, Immediate(83));
1202   // If exponent negative or above 83, number contains no significant bits in
1203   // the range 0..2^31, so result is zero, and rcx already holds zero.
1204   __ j(above, &done);
1205 
1206   // Exponent in rage 63..83.
1207   // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
1208   // the least significant exponent-52 bits.
1209 
1210   // Negate low bits of mantissa if value is negative.
1211   __ addq(double_value, double_value);  // Move sign bit to carry.
1212   __ sbbl(result, result);  // And convert carry to -1 in result register.
1213   // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
1214   __ addl(double_value, result);
1215   // Do xor in opposite directions depending on where we want the result
1216   // (depending on whether result is rcx or not).
1217 
1218   if (result.is(rcx)) {
1219     __ xorl(double_value, result);
1220     // Left shift mantissa by (exponent - mantissabits - 1) to save the
1221     // bits that have positional values below 2^32 (the extra -1 comes from the
1222     // doubling done above to move the sign bit into the carry flag).
1223     __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
1224     __ shll_cl(double_value);
1225     __ movl(result, double_value);
1226   } else {
1227     // As the then-branch, but move double-value to result before shifting.
1228     __ xorl(result, double_value);
1229     __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
1230     __ shll_cl(result);
1231   }
1232 
1233   __ bind(&done);
1234 }
1235 
1236 
1237 // Input: rdx, rax are the left and right objects of a bit op.
1238 // Output: rax, rcx are left and right integers for a bit op.
LoadNumbersAsIntegers(MacroAssembler * masm)1239 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1240   // Check float operands.
1241   Label done;
1242   Label rax_is_smi;
1243   Label rax_is_object;
1244   Label rdx_is_object;
1245 
1246   __ JumpIfNotSmi(rdx, &rdx_is_object);
1247   __ SmiToInteger32(rdx, rdx);
1248   __ JumpIfSmi(rax, &rax_is_smi);
1249 
1250   __ bind(&rax_is_object);
1251   IntegerConvert(masm, rcx, rax);  // Uses rdi, rcx and rbx.
1252   __ jmp(&done);
1253 
1254   __ bind(&rdx_is_object);
1255   IntegerConvert(masm, rdx, rdx);  // Uses rdi, rcx and rbx.
1256   __ JumpIfNotSmi(rax, &rax_is_object);
1257   __ bind(&rax_is_smi);
1258   __ SmiToInteger32(rcx, rax);
1259 
1260   __ bind(&done);
1261   __ movl(rax, rdx);
1262 }
1263 
1264 
1265 // Input: rdx, rax are the left and right objects of a bit op.
1266 // Output: rax, rcx are left and right integers for a bit op.
1267 // Jump to conversion_failure: rdx and rax are unchanged.
LoadAsIntegers(MacroAssembler * masm,Label * conversion_failure,Register heap_number_map)1268 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1269                                          Label* conversion_failure,
1270                                          Register heap_number_map) {
1271   // Check float operands.
1272   Label arg1_is_object, check_undefined_arg1;
1273   Label arg2_is_object, check_undefined_arg2;
1274   Label load_arg2, done;
1275 
1276   __ JumpIfNotSmi(rdx, &arg1_is_object);
1277   __ SmiToInteger32(r8, rdx);
1278   __ jmp(&load_arg2);
1279 
1280   // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1281   __ bind(&check_undefined_arg1);
1282   __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1283   __ j(not_equal, conversion_failure);
1284   __ Set(r8, 0);
1285   __ jmp(&load_arg2);
1286 
1287   __ bind(&arg1_is_object);
1288   __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1289   __ j(not_equal, &check_undefined_arg1);
1290   // Get the untagged integer version of the rdx heap number in rcx.
1291   IntegerConvert(masm, r8, rdx);
1292 
1293   // Here r8 has the untagged integer, rax has a Smi or a heap number.
1294   __ bind(&load_arg2);
1295   // Test if arg2 is a Smi.
1296   __ JumpIfNotSmi(rax, &arg2_is_object);
1297   __ SmiToInteger32(rcx, rax);
1298   __ jmp(&done);
1299 
1300   // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1301   __ bind(&check_undefined_arg2);
1302   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1303   __ j(not_equal, conversion_failure);
1304   __ Set(rcx, 0);
1305   __ jmp(&done);
1306 
1307   __ bind(&arg2_is_object);
1308   __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1309   __ j(not_equal, &check_undefined_arg2);
1310   // Get the untagged integer version of the rax heap number in rcx.
1311   IntegerConvert(masm, rcx, rax);
1312   __ bind(&done);
1313   __ movl(rax, r8);
1314 }
1315 
1316 
LoadSSE2SmiOperands(MacroAssembler * masm)1317 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1318   __ SmiToInteger32(kScratchRegister, rdx);
1319   __ cvtlsi2sd(xmm0, kScratchRegister);
1320   __ SmiToInteger32(kScratchRegister, rax);
1321   __ cvtlsi2sd(xmm1, kScratchRegister);
1322 }
1323 
1324 
LoadSSE2NumberOperands(MacroAssembler * masm)1325 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
1326   Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
1327   // Load operand in rdx into xmm0.
1328   __ JumpIfSmi(rdx, &load_smi_rdx);
1329   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1330   // Load operand in rax into xmm1.
1331   __ JumpIfSmi(rax, &load_smi_rax);
1332   __ bind(&load_nonsmi_rax);
1333   __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1334   __ jmp(&done);
1335 
1336   __ bind(&load_smi_rdx);
1337   __ SmiToInteger32(kScratchRegister, rdx);
1338   __ cvtlsi2sd(xmm0, kScratchRegister);
1339   __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1340 
1341   __ bind(&load_smi_rax);
1342   __ SmiToInteger32(kScratchRegister, rax);
1343   __ cvtlsi2sd(xmm1, kScratchRegister);
1344 
1345   __ bind(&done);
1346 }
1347 
1348 
LoadSSE2UnknownOperands(MacroAssembler * masm,Label * not_numbers)1349 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1350                                                   Label* not_numbers) {
1351   Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1352   // Load operand in rdx into xmm0, or branch to not_numbers.
1353   __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1354   __ JumpIfSmi(rdx, &load_smi_rdx);
1355   __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1356   __ j(not_equal, not_numbers);  // Argument in rdx is not a number.
1357   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1358   // Load operand in rax into xmm1, or branch to not_numbers.
1359   __ JumpIfSmi(rax, &load_smi_rax);
1360 
1361   __ bind(&load_nonsmi_rax);
1362   __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
1363   __ j(not_equal, not_numbers);
1364   __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1365   __ jmp(&done);
1366 
1367   __ bind(&load_smi_rdx);
1368   __ SmiToInteger32(kScratchRegister, rdx);
1369   __ cvtlsi2sd(xmm0, kScratchRegister);
1370   __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1371 
1372   __ bind(&load_smi_rax);
1373   __ SmiToInteger32(kScratchRegister, rax);
1374   __ cvtlsi2sd(xmm1, kScratchRegister);
1375   __ bind(&done);
1376 }
1377 
1378 
NumbersToSmis(MacroAssembler * masm,Register first,Register second,Register scratch1,Register scratch2,Register scratch3,Label * on_success,Label * on_not_smis)1379 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1380                                         Register first,
1381                                         Register second,
1382                                         Register scratch1,
1383                                         Register scratch2,
1384                                         Register scratch3,
1385                                         Label* on_success,
1386                                         Label* on_not_smis)   {
1387   Register heap_number_map = scratch3;
1388   Register smi_result = scratch1;
1389   Label done;
1390 
1391   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1392 
1393   NearLabel first_smi, check_second;
1394   __ JumpIfSmi(first, &first_smi);
1395   __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1396   __ j(not_equal, on_not_smis);
1397   // Convert HeapNumber to smi if possible.
1398   __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1399   __ movq(scratch2, xmm0);
1400   __ cvttsd2siq(smi_result, xmm0);
1401   // Check if conversion was successful by converting back and
1402   // comparing to the original double's bits.
1403   __ cvtlsi2sd(xmm1, smi_result);
1404   __ movq(kScratchRegister, xmm1);
1405   __ cmpq(scratch2, kScratchRegister);
1406   __ j(not_equal, on_not_smis);
1407   __ Integer32ToSmi(first, smi_result);
1408 
1409   __ bind(&check_second);
1410   __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1411   __ bind(&first_smi);
1412   if (FLAG_debug_code) {
1413     // Second should be non-smi if we get here.
1414     __ AbortIfSmi(second);
1415   }
1416   __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1417   __ j(not_equal, on_not_smis);
1418   // Convert second to smi, if possible.
1419   __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1420   __ movq(scratch2, xmm0);
1421   __ cvttsd2siq(smi_result, xmm0);
1422   __ cvtlsi2sd(xmm1, smi_result);
1423   __ movq(kScratchRegister, xmm1);
1424   __ cmpq(scratch2, kScratchRegister);
1425   __ j(not_equal, on_not_smis);
1426   __ Integer32ToSmi(second, smi_result);
1427   if (on_success != NULL) {
1428     __ jmp(on_success);
1429   } else {
1430     __ bind(&done);
1431   }
1432 }
1433 
1434 
Generate(MacroAssembler * masm)1435 void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
1436   Label slow, done;
1437 
1438   if (op_ == Token::SUB) {
1439     if (include_smi_code_) {
1440       // Check whether the value is a smi.
1441       Label try_float;
1442       __ JumpIfNotSmi(rax, &try_float);
1443       if (negative_zero_ == kIgnoreNegativeZero) {
1444         __ SmiCompare(rax, Smi::FromInt(0));
1445         __ j(equal, &done);
1446       }
1447       __ SmiNeg(rax, rax, &done);
1448       __ jmp(&slow);  // zero, if not handled above, and Smi::kMinValue.
1449 
1450       // Try floating point case.
1451       __ bind(&try_float);
1452     } else if (FLAG_debug_code) {
1453       __ AbortIfSmi(rax);
1454     }
1455 
1456     __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
1457                    Heap::kHeapNumberMapRootIndex);
1458     __ j(not_equal, &slow);
1459     // Operand is a float, negate its value by flipping sign bit.
1460     __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
1461     __ Set(kScratchRegister, 0x01);
1462     __ shl(kScratchRegister, Immediate(63));
1463     __ xor_(rdx, kScratchRegister);  // Flip sign.
1464     // rdx is value to store.
1465     if (overwrite_ == UNARY_OVERWRITE) {
1466       __ movq(FieldOperand(rax, HeapNumber::kValueOffset), rdx);
1467     } else {
1468       __ AllocateHeapNumber(rcx, rbx, &slow);
1469       // rcx: allocated 'empty' number
1470       __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
1471       __ movq(rax, rcx);
1472     }
1473   } else if (op_ == Token::BIT_NOT) {
1474     if (include_smi_code_) {
1475       Label try_float;
1476       __ JumpIfNotSmi(rax, &try_float);
1477       __ SmiNot(rax, rax);
1478       __ jmp(&done);
1479       // Try floating point case.
1480       __ bind(&try_float);
1481     } else if (FLAG_debug_code) {
1482       __ AbortIfSmi(rax);
1483     }
1484 
1485     // Check if the operand is a heap number.
1486     __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
1487                    Heap::kHeapNumberMapRootIndex);
1488     __ j(not_equal, &slow);
1489 
1490     // Convert the heap number in rax to an untagged integer in rcx.
1491     IntegerConvert(masm, rax, rax);
1492 
1493     // Do the bitwise operation and smi tag the result.
1494     __ notl(rax);
1495     __ Integer32ToSmi(rax, rax);
1496   }
1497 
1498   // Return from the stub.
1499   __ bind(&done);
1500   __ StubReturn(1);
1501 
1502   // Handle the slow case by jumping to the JavaScript builtin.
1503   __ bind(&slow);
1504   __ pop(rcx);  // pop return address
1505   __ push(rax);
1506   __ push(rcx);  // push return address
1507   switch (op_) {
1508     case Token::SUB:
1509       __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1510       break;
1511     case Token::BIT_NOT:
1512       __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1513       break;
1514     default:
1515       UNREACHABLE();
1516   }
1517 }
1518 
1519 
Generate(MacroAssembler * masm)1520 void MathPowStub::Generate(MacroAssembler* masm) {
1521   // Registers are used as follows:
1522   // rdx = base
1523   // rax = exponent
1524   // rcx = temporary, result
1525 
1526   Label allocate_return, call_runtime;
1527 
1528   // Load input parameters.
1529   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
1530   __ movq(rax, Operand(rsp, 1 * kPointerSize));
1531 
1532   // Save 1 in xmm3 - we need this several times later on.
1533   __ Set(rcx, 1);
1534   __ cvtlsi2sd(xmm3, rcx);
1535 
1536   Label exponent_nonsmi;
1537   Label base_nonsmi;
1538   // If the exponent is a heap number go to that specific case.
1539   __ JumpIfNotSmi(rax, &exponent_nonsmi);
1540   __ JumpIfNotSmi(rdx, &base_nonsmi);
1541 
1542   // Optimized version when both exponent and base are smis.
1543   Label powi;
1544   __ SmiToInteger32(rdx, rdx);
1545   __ cvtlsi2sd(xmm0, rdx);
1546   __ jmp(&powi);
1547   // Exponent is a smi and base is a heapnumber.
1548   __ bind(&base_nonsmi);
1549   __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
1550                  Heap::kHeapNumberMapRootIndex);
1551   __ j(not_equal, &call_runtime);
1552 
1553   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1554 
1555   // Optimized version of pow if exponent is a smi.
1556   // xmm0 contains the base.
1557   __ bind(&powi);
1558   __ SmiToInteger32(rax, rax);
1559 
1560   // Save exponent in base as we need to check if exponent is negative later.
1561   // We know that base and exponent are in different registers.
1562   __ movq(rdx, rax);
1563 
1564   // Get absolute value of exponent.
1565   NearLabel no_neg;
1566   __ cmpl(rax, Immediate(0));
1567   __ j(greater_equal, &no_neg);
1568   __ negl(rax);
1569   __ bind(&no_neg);
1570 
1571   // Load xmm1 with 1.
1572   __ movsd(xmm1, xmm3);
1573   NearLabel while_true;
1574   NearLabel no_multiply;
1575 
1576   __ bind(&while_true);
1577   __ shrl(rax, Immediate(1));
1578   __ j(not_carry, &no_multiply);
1579   __ mulsd(xmm1, xmm0);
1580   __ bind(&no_multiply);
1581   __ mulsd(xmm0, xmm0);
1582   __ j(not_zero, &while_true);
1583 
1584   // Base has the original value of the exponent - if the exponent  is
1585   // negative return 1/result.
1586   __ testl(rdx, rdx);
1587   __ j(positive, &allocate_return);
1588   // Special case if xmm1 has reached infinity.
1589   __ divsd(xmm3, xmm1);
1590   __ movsd(xmm1, xmm3);
1591   __ xorpd(xmm0, xmm0);
1592   __ ucomisd(xmm0, xmm1);
1593   __ j(equal, &call_runtime);
1594 
1595   __ jmp(&allocate_return);
1596 
1597   // Exponent (or both) is a heapnumber - no matter what we should now work
1598   // on doubles.
1599   __ bind(&exponent_nonsmi);
1600   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
1601                  Heap::kHeapNumberMapRootIndex);
1602   __ j(not_equal, &call_runtime);
1603   __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1604   // Test if exponent is nan.
1605   __ ucomisd(xmm1, xmm1);
1606   __ j(parity_even, &call_runtime);
1607 
1608   NearLabel base_not_smi;
1609   NearLabel handle_special_cases;
1610   __ JumpIfNotSmi(rdx, &base_not_smi);
1611   __ SmiToInteger32(rdx, rdx);
1612   __ cvtlsi2sd(xmm0, rdx);
1613   __ jmp(&handle_special_cases);
1614 
1615   __ bind(&base_not_smi);
1616   __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
1617                  Heap::kHeapNumberMapRootIndex);
1618   __ j(not_equal, &call_runtime);
1619   __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
1620   __ andl(rcx, Immediate(HeapNumber::kExponentMask));
1621   __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
1622   // base is NaN or +/-Infinity
1623   __ j(greater_equal, &call_runtime);
1624   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1625 
1626   // base is in xmm0 and exponent is in xmm1.
1627   __ bind(&handle_special_cases);
1628   NearLabel not_minus_half;
1629   // Test for -0.5.
1630   // Load xmm2 with -0.5.
1631   __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
1632   __ movq(xmm2, rcx);
1633   // xmm2 now has -0.5.
1634   __ ucomisd(xmm2, xmm1);
1635   __ j(not_equal, &not_minus_half);
1636 
1637   // Calculates reciprocal of square root.
1638   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
1639   __ xorpd(xmm1, xmm1);
1640   __ addsd(xmm1, xmm0);
1641   __ sqrtsd(xmm1, xmm1);
1642   __ divsd(xmm3, xmm1);
1643   __ movsd(xmm1, xmm3);
1644   __ jmp(&allocate_return);
1645 
1646   // Test for 0.5.
1647   __ bind(&not_minus_half);
1648   // Load xmm2 with 0.5.
1649   // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
1650   __ addsd(xmm2, xmm3);
1651   // xmm2 now has 0.5.
1652   __ ucomisd(xmm2, xmm1);
1653   __ j(not_equal, &call_runtime);
1654   // Calculates square root.
1655   // sqrtsd returns -0 when input is -0.  ECMA spec requires +0.
1656   __ xorpd(xmm1, xmm1);
1657   __ addsd(xmm1, xmm0);
1658   __ sqrtsd(xmm1, xmm1);
1659 
1660   __ bind(&allocate_return);
1661   __ AllocateHeapNumber(rcx, rax, &call_runtime);
1662   __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
1663   __ movq(rax, rcx);
1664   __ ret(2 * kPointerSize);
1665 
1666   __ bind(&call_runtime);
1667   __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1668 }
1669 
1670 
GenerateReadElement(MacroAssembler * masm)1671 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1672   // The key is in rdx and the parameter count is in rax.
1673 
1674   // The displacement is used for skipping the frame pointer on the
1675   // stack. It is the offset of the last parameter (if any) relative
1676   // to the frame pointer.
1677   static const int kDisplacement = 1 * kPointerSize;
1678 
1679   // Check that the key is a smi.
1680   Label slow;
1681   __ JumpIfNotSmi(rdx, &slow);
1682 
1683   // Check if the calling frame is an arguments adaptor frame.  We look at the
1684   // context offset, and if the frame is not a regular one, then we find a
1685   // Smi instead of the context.  We can't use SmiCompare here, because that
1686   // only works for comparing two smis.
1687   Label adaptor;
1688   __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
1689   __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
1690          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1691   __ j(equal, &adaptor);
1692 
1693   // Check index against formal parameters count limit passed in
1694   // through register rax. Use unsigned comparison to get negative
1695   // check for free.
1696   __ cmpq(rdx, rax);
1697   __ j(above_equal, &slow);
1698 
1699   // Read the argument from the stack and return it.
1700   SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
1701   __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
1702   index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
1703   __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
1704   __ Ret();
1705 
1706   // Arguments adaptor case: Check index against actual arguments
1707   // limit found in the arguments adaptor frame. Use unsigned
1708   // comparison to get negative check for free.
1709   __ bind(&adaptor);
1710   __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
1711   __ cmpq(rdx, rcx);
1712   __ j(above_equal, &slow);
1713 
1714   // Read the argument from the stack and return it.
1715   index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
1716   __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
1717   index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
1718   __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
1719   __ Ret();
1720 
1721   // Slow-case: Handle non-smi or out-of-bounds access to arguments
1722   // by calling the runtime system.
1723   __ bind(&slow);
1724   __ pop(rbx);  // Return address.
1725   __ push(rdx);
1726   __ push(rbx);
1727   __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1728 }
1729 
1730 
GenerateNewObject(MacroAssembler * masm)1731 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
1732   // rsp[0] : return address
1733   // rsp[8] : number of parameters
1734   // rsp[16] : receiver displacement
1735   // rsp[24] : function
1736 
1737   // The displacement is used for skipping the return address and the
1738   // frame pointer on the stack. It is the offset of the last
1739   // parameter (if any) relative to the frame pointer.
1740   static const int kDisplacement = 2 * kPointerSize;
1741 
1742   // Check if the calling frame is an arguments adaptor frame.
1743   Label adaptor_frame, try_allocate, runtime;
1744   __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
1745   __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset),
1746          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
1747   __ j(equal, &adaptor_frame);
1748 
1749   // Get the length from the frame.
1750   __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
1751   __ jmp(&try_allocate);
1752 
1753   // Patch the arguments.length and the parameters pointer.
1754   __ bind(&adaptor_frame);
1755   __ SmiToInteger32(rcx,
1756                     Operand(rdx,
1757                             ArgumentsAdaptorFrameConstants::kLengthOffset));
1758   // Space on stack must already hold a smi.
1759   __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
1760   // Do not clobber the length index for the indexing operation since
1761   // it is used compute the size for allocation later.
1762   __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
1763   __ movq(Operand(rsp, 2 * kPointerSize), rdx);
1764 
1765   // Try the new space allocation. Start out with computing the size of
1766   // the arguments object and the elements array.
1767   Label add_arguments_object;
1768   __ bind(&try_allocate);
1769   __ testl(rcx, rcx);
1770   __ j(zero, &add_arguments_object);
1771   __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
1772   __ bind(&add_arguments_object);
1773   __ addl(rcx, Immediate(GetArgumentsObjectSize()));
1774 
1775   // Do the allocation of both objects in one go.
1776   __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
1777 
1778   // Get the arguments boilerplate from the current (global) context.
1779   __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
1780   __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
1781   __ movq(rdi, Operand(rdi,
1782                        Context::SlotOffset(GetArgumentsBoilerplateIndex())));
1783 
1784   // Copy the JS object part.
1785   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
1786   __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
1787   __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
1788   __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
1789   __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
1790   __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
1791   __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
1792 
1793   if (type_ == NEW_NON_STRICT) {
1794     // Setup the callee in-object property.
1795     ASSERT(Heap::kArgumentsCalleeIndex == 1);
1796     __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
1797     __ movq(FieldOperand(rax, JSObject::kHeaderSize +
1798                               Heap::kArgumentsCalleeIndex * kPointerSize),
1799             kScratchRegister);
1800   }
1801 
1802   // Get the length (smi tagged) and set that as an in-object property too.
1803   ASSERT(Heap::kArgumentsLengthIndex == 0);
1804   __ movq(rcx, Operand(rsp, 1 * kPointerSize));
1805   __ movq(FieldOperand(rax, JSObject::kHeaderSize +
1806                             Heap::kArgumentsLengthIndex * kPointerSize),
1807           rcx);
1808 
1809   // If there are no actual arguments, we're done.
1810   Label done;
1811   __ SmiTest(rcx);
1812   __ j(zero, &done);
1813 
1814   // Get the parameters pointer from the stack and untag the length.
1815   __ movq(rdx, Operand(rsp, 2 * kPointerSize));
1816 
1817   // Setup the elements pointer in the allocated arguments object and
1818   // initialize the header in the elements fixed array.
1819   __ lea(rdi, Operand(rax, GetArgumentsObjectSize()));
1820   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
1821   __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
1822   __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
1823   __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
1824   __ SmiToInteger32(rcx, rcx);  // Untag length for the loop below.
1825 
1826   // Copy the fixed array slots.
1827   Label loop;
1828   __ bind(&loop);
1829   __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize));  // Skip receiver.
1830   __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
1831   __ addq(rdi, Immediate(kPointerSize));
1832   __ subq(rdx, Immediate(kPointerSize));
1833   __ decl(rcx);
1834   __ j(not_zero, &loop);
1835 
1836   // Return and remove the on-stack parameters.
1837   __ bind(&done);
1838   __ ret(3 * kPointerSize);
1839 
1840   // Do the runtime call to allocate the arguments object.
1841   __ bind(&runtime);
1842   __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
1843 }
1844 
1845 
Generate(MacroAssembler * masm)1846 void RegExpExecStub::Generate(MacroAssembler* masm) {
1847   // Just jump directly to runtime if native RegExp is not selected at compile
1848   // time or if regexp entry in generated code is turned off runtime switch or
1849   // at compilation.
1850 #ifdef V8_INTERPRETED_REGEXP
1851   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1852 #else  // V8_INTERPRETED_REGEXP
1853   if (!FLAG_regexp_entry_native) {
1854     __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
1855     return;
1856   }
1857 
1858   // Stack frame on entry.
1859   //  rsp[0]: return address
1860   //  rsp[8]: last_match_info (expected JSArray)
1861   //  rsp[16]: previous index
1862   //  rsp[24]: subject string
1863   //  rsp[32]: JSRegExp object
1864 
1865   static const int kLastMatchInfoOffset = 1 * kPointerSize;
1866   static const int kPreviousIndexOffset = 2 * kPointerSize;
1867   static const int kSubjectOffset = 3 * kPointerSize;
1868   static const int kJSRegExpOffset = 4 * kPointerSize;
1869 
1870   Label runtime;
1871   // Ensure that a RegExp stack is allocated.
1872   Isolate* isolate = masm->isolate();
1873   ExternalReference address_of_regexp_stack_memory_address =
1874       ExternalReference::address_of_regexp_stack_memory_address(isolate);
1875   ExternalReference address_of_regexp_stack_memory_size =
1876       ExternalReference::address_of_regexp_stack_memory_size(isolate);
1877   __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
1878   __ testq(kScratchRegister, kScratchRegister);
1879   __ j(zero, &runtime);
1880 
1881 
1882   // Check that the first argument is a JSRegExp object.
1883   __ movq(rax, Operand(rsp, kJSRegExpOffset));
1884   __ JumpIfSmi(rax, &runtime);
1885   __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
1886   __ j(not_equal, &runtime);
1887   // Check that the RegExp has been compiled (data contains a fixed array).
1888   __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
1889   if (FLAG_debug_code) {
1890     Condition is_smi = masm->CheckSmi(rax);
1891     __ Check(NegateCondition(is_smi),
1892         "Unexpected type for RegExp data, FixedArray expected");
1893     __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
1894     __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
1895   }
1896 
1897   // rax: RegExp data (FixedArray)
1898   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1899   __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
1900   __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
1901   __ j(not_equal, &runtime);
1902 
1903   // rax: RegExp data (FixedArray)
1904   // Check that the number of captures fit in the static offsets vector buffer.
1905   __ SmiToInteger32(rdx,
1906                     FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
1907   // Calculate number of capture registers (number_of_captures + 1) * 2.
1908   __ leal(rdx, Operand(rdx, rdx, times_1, 2));
1909   // Check that the static offsets vector buffer is large enough.
1910   __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
1911   __ j(above, &runtime);
1912 
1913   // rax: RegExp data (FixedArray)
1914   // rdx: Number of capture registers
1915   // Check that the second argument is a string.
1916   __ movq(rdi, Operand(rsp, kSubjectOffset));
1917   __ JumpIfSmi(rdi, &runtime);
1918   Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
1919   __ j(NegateCondition(is_string), &runtime);
1920 
1921   // rdi: Subject string.
1922   // rax: RegExp data (FixedArray).
1923   // rdx: Number of capture registers.
1924   // Check that the third argument is a positive smi less than the string
1925   // length. A negative value will be greater (unsigned comparison).
1926   __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
1927   __ JumpIfNotSmi(rbx, &runtime);
1928   __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
1929   __ j(above_equal, &runtime);
1930 
1931   // rax: RegExp data (FixedArray)
1932   // rdx: Number of capture registers
1933   // Check that the fourth object is a JSArray object.
1934   __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
1935   __ JumpIfSmi(rdi, &runtime);
1936   __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
1937   __ j(not_equal, &runtime);
1938   // Check that the JSArray is in fast case.
1939   __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
1940   __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
1941   __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
1942                  Heap::kFixedArrayMapRootIndex);
1943   __ j(not_equal, &runtime);
1944   // Check that the last match info has space for the capture registers and the
1945   // additional information. Ensure no overflow in add.
1946   STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
1947   __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
1948   __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
1949   __ cmpl(rdx, rdi);
1950   __ j(greater, &runtime);
1951 
1952   // rax: RegExp data (FixedArray)
1953   // Check the representation and encoding of the subject string.
1954   NearLabel seq_ascii_string, seq_two_byte_string, check_code;
1955   __ movq(rdi, Operand(rsp, kSubjectOffset));
1956   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1957   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1958   // First check for flat two byte string.
1959   __ andb(rbx, Immediate(
1960       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
1961   STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
1962   __ j(zero, &seq_two_byte_string);
1963   // Any other flat string must be a flat ascii string.
1964   __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
1965   __ j(zero, &seq_ascii_string);
1966 
1967   // Check for flat cons string.
1968   // A flat cons string is a cons string where the second part is the empty
1969   // string. In that case the subject string is just the first part of the cons
1970   // string. Also in this case the first part of the cons string is known to be
1971   // a sequential string or an external string.
1972   STATIC_ASSERT(kExternalStringTag !=0);
1973   STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
1974   __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
1975   __ j(not_zero, &runtime);
1976   // String is a cons string.
1977   __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
1978                  Heap::kEmptyStringRootIndex);
1979   __ j(not_equal, &runtime);
1980   __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
1981   __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1982   // String is a cons string with empty second part.
1983   // rdi: first part of cons string.
1984   // rbx: map of first part of cons string.
1985   // Is first part a flat two byte string?
1986   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
1987            Immediate(kStringRepresentationMask | kStringEncodingMask));
1988   STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
1989   __ j(zero, &seq_two_byte_string);
1990   // Any other flat string must be ascii.
1991   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
1992            Immediate(kStringRepresentationMask));
1993   __ j(not_zero, &runtime);
1994 
1995   __ bind(&seq_ascii_string);
1996   // rdi: subject string (sequential ascii)
1997   // rax: RegExp data (FixedArray)
1998   __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
1999   __ Set(rcx, 1);  // Type is ascii.
2000   __ jmp(&check_code);
2001 
2002   __ bind(&seq_two_byte_string);
2003   // rdi: subject string (flat two-byte)
2004   // rax: RegExp data (FixedArray)
2005   __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
2006   __ Set(rcx, 0);  // Type is two byte.
2007 
2008   __ bind(&check_code);
2009   // Check that the irregexp code has been generated for the actual string
2010   // encoding. If it has, the field contains a code object otherwise it contains
2011   // the hole.
2012   __ CmpObjectType(r11, CODE_TYPE, kScratchRegister);
2013   __ j(not_equal, &runtime);
2014 
2015   // rdi: subject string
2016   // rcx: encoding of subject string (1 if ascii, 0 if two_byte);
2017   // r11: code
2018   // Load used arguments before starting to push arguments for call to native
2019   // RegExp code to avoid handling changing stack height.
2020   __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
2021 
2022   // rdi: subject string
2023   // rbx: previous index
2024   // rcx: encoding of subject string (1 if ascii 0 if two_byte);
2025   // r11: code
2026   // All checks done. Now push arguments for native regexp code.
2027   Counters* counters = masm->isolate()->counters();
2028   __ IncrementCounter(counters->regexp_entry_native(), 1);
2029 
2030   // Isolates: note we add an additional parameter here (isolate pointer).
2031   static const int kRegExpExecuteArguments = 8;
2032   int argument_slots_on_stack =
2033       masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
2034   __ EnterApiExitFrame(argument_slots_on_stack);
2035 
2036   // Argument 8: Pass current isolate address.
2037   // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2038   //     Immediate(ExternalReference::isolate_address()));
2039   __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
2040   __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2041           kScratchRegister);
2042 
2043   // Argument 7: Indicate that this is a direct call from JavaScript.
2044   __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
2045           Immediate(1));
2046 
2047   // Argument 6: Start (high end) of backtracking stack memory area.
2048   __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
2049   __ movq(r9, Operand(kScratchRegister, 0));
2050   __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
2051   __ addq(r9, Operand(kScratchRegister, 0));
2052   // Argument 6 passed in r9 on Linux and on the stack on Windows.
2053 #ifdef _WIN64
2054   __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
2055 #endif
2056 
2057   // Argument 5: static offsets vector buffer.
2058   __ LoadAddress(r8,
2059                  ExternalReference::address_of_static_offsets_vector(isolate));
2060   // Argument 5 passed in r8 on Linux and on the stack on Windows.
2061 #ifdef _WIN64
2062   __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
2063 #endif
2064 
2065   // First four arguments are passed in registers on both Linux and Windows.
2066 #ifdef _WIN64
2067   Register arg4 = r9;
2068   Register arg3 = r8;
2069   Register arg2 = rdx;
2070   Register arg1 = rcx;
2071 #else
2072   Register arg4 = rcx;
2073   Register arg3 = rdx;
2074   Register arg2 = rsi;
2075   Register arg1 = rdi;
2076 #endif
2077 
2078   // Keep track on aliasing between argX defined above and the registers used.
2079   // rdi: subject string
2080   // rbx: previous index
2081   // rcx: encoding of subject string (1 if ascii 0 if two_byte);
2082   // r11: code
2083 
2084   // Argument 4: End of string data
2085   // Argument 3: Start of string data
2086   NearLabel setup_two_byte, setup_rest;
2087   __ testb(rcx, rcx);  // Last use of rcx as encoding of subject string.
2088   __ j(zero, &setup_two_byte);
2089   __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
2090   __ lea(arg4, FieldOperand(rdi, rcx, times_1, SeqAsciiString::kHeaderSize));
2091   __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
2092   __ jmp(&setup_rest);
2093   __ bind(&setup_two_byte);
2094   __ SmiToInteger32(rcx, FieldOperand(rdi, String::kLengthOffset));
2095   __ lea(arg4, FieldOperand(rdi, rcx, times_2, SeqTwoByteString::kHeaderSize));
2096   __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
2097 
2098   __ bind(&setup_rest);
2099   // Argument 2: Previous index.
2100   __ movq(arg2, rbx);
2101 
2102   // Argument 1: Subject string.
2103 #ifdef _WIN64
2104   __ movq(arg1, rdi);
2105 #else
2106   // Already there in AMD64 calling convention.
2107   ASSERT(arg1.is(rdi));
2108 #endif
2109 
2110   // Locate the code entry and call it.
2111   __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
2112   __ call(r11);
2113 
2114   __ LeaveApiExitFrame();
2115 
2116   // Check the result.
2117   NearLabel success;
2118   Label exception;
2119   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
2120   __ j(equal, &success);
2121   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
2122   __ j(equal, &exception);
2123   __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
2124   // If none of the above, it can only be retry.
2125   // Handle that in the runtime system.
2126   __ j(not_equal, &runtime);
2127 
2128   // For failure return null.
2129   __ LoadRoot(rax, Heap::kNullValueRootIndex);
2130   __ ret(4 * kPointerSize);
2131 
2132   // Load RegExp data.
2133   __ bind(&success);
2134   __ movq(rax, Operand(rsp, kJSRegExpOffset));
2135   __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
2136   __ SmiToInteger32(rax,
2137                     FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
2138   // Calculate number of capture registers (number_of_captures + 1) * 2.
2139   __ leal(rdx, Operand(rax, rax, times_1, 2));
2140 
2141   // rdx: Number of capture registers
2142   // Load last_match_info which is still known to be a fast case JSArray.
2143   __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
2144   __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
2145 
2146   // rbx: last_match_info backing store (FixedArray)
2147   // rdx: number of capture registers
2148   // Store the capture count.
2149   __ Integer32ToSmi(kScratchRegister, rdx);
2150   __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
2151           kScratchRegister);
2152   // Store last subject and last input.
2153   __ movq(rax, Operand(rsp, kSubjectOffset));
2154   __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
2155   __ movq(rcx, rbx);
2156   __ RecordWrite(rcx, RegExpImpl::kLastSubjectOffset, rax, rdi);
2157   __ movq(rax, Operand(rsp, kSubjectOffset));
2158   __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
2159   __ movq(rcx, rbx);
2160   __ RecordWrite(rcx, RegExpImpl::kLastInputOffset, rax, rdi);
2161 
2162   // Get the static offsets vector filled by the native regexp code.
2163   __ LoadAddress(rcx,
2164                  ExternalReference::address_of_static_offsets_vector(isolate));
2165 
2166   // rbx: last_match_info backing store (FixedArray)
2167   // rcx: offsets vector
2168   // rdx: number of capture registers
2169   NearLabel next_capture, done;
2170   // Capture register counter starts from number of capture registers and
2171   // counts down until wraping after zero.
2172   __ bind(&next_capture);
2173   __ subq(rdx, Immediate(1));
2174   __ j(negative, &done);
2175   // Read the value from the static offsets vector buffer and make it a smi.
2176   __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
2177   __ Integer32ToSmi(rdi, rdi);
2178   // Store the smi value in the last match info.
2179   __ movq(FieldOperand(rbx,
2180                        rdx,
2181                        times_pointer_size,
2182                        RegExpImpl::kFirstCaptureOffset),
2183           rdi);
2184   __ jmp(&next_capture);
2185   __ bind(&done);
2186 
2187   // Return last match info.
2188   __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
2189   __ ret(4 * kPointerSize);
2190 
2191   __ bind(&exception);
2192   // Result must now be exception. If there is no pending exception already a
2193   // stack overflow (on the backtrack stack) was detected in RegExp code but
2194   // haven't created the exception yet. Handle that in the runtime system.
2195   // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2196   ExternalReference pending_exception_address(
2197       Isolate::k_pending_exception_address, isolate);
2198   Operand pending_exception_operand =
2199       masm->ExternalOperand(pending_exception_address, rbx);
2200   __ movq(rax, pending_exception_operand);
2201   __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
2202   __ cmpq(rax, rdx);
2203   __ j(equal, &runtime);
2204   __ movq(pending_exception_operand, rdx);
2205 
2206   __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
2207   NearLabel termination_exception;
2208   __ j(equal, &termination_exception);
2209   __ Throw(rax);
2210 
2211   __ bind(&termination_exception);
2212   __ ThrowUncatchable(TERMINATION, rax);
2213 
2214   // Do the runtime call to execute the regexp.
2215   __ bind(&runtime);
2216   __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2217 #endif  // V8_INTERPRETED_REGEXP
2218 }
2219 
2220 
Generate(MacroAssembler * masm)2221 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
2222   const int kMaxInlineLength = 100;
2223   Label slowcase;
2224   Label done;
2225   __ movq(r8, Operand(rsp, kPointerSize * 3));
2226   __ JumpIfNotSmi(r8, &slowcase);
2227   __ SmiToInteger32(rbx, r8);
2228   __ cmpl(rbx, Immediate(kMaxInlineLength));
2229   __ j(above, &slowcase);
2230   // Smi-tagging is equivalent to multiplying by 2.
2231   STATIC_ASSERT(kSmiTag == 0);
2232   STATIC_ASSERT(kSmiTagSize == 1);
2233   // Allocate RegExpResult followed by FixedArray with size in rbx.
2234   // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
2235   // Elements:  [Map][Length][..elements..]
2236   __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
2237                         times_pointer_size,
2238                         rbx,  // In: Number of elements.
2239                         rax,  // Out: Start of allocation (tagged).
2240                         rcx,  // Out: End of allocation.
2241                         rdx,  // Scratch register
2242                         &slowcase,
2243                         TAG_OBJECT);
2244   // rax: Start of allocated area, object-tagged.
2245   // rbx: Number of array elements as int32.
2246   // r8: Number of array elements as smi.
2247 
2248   // Set JSArray map to global.regexp_result_map().
2249   __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
2250   __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
2251   __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
2252   __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
2253 
2254   // Set empty properties FixedArray.
2255   __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
2256   __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
2257 
2258   // Set elements to point to FixedArray allocated right after the JSArray.
2259   __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
2260   __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
2261 
2262   // Set input, index and length fields from arguments.
2263   __ movq(r8, Operand(rsp, kPointerSize * 1));
2264   __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
2265   __ movq(r8, Operand(rsp, kPointerSize * 2));
2266   __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
2267   __ movq(r8, Operand(rsp, kPointerSize * 3));
2268   __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
2269 
2270   // Fill out the elements FixedArray.
2271   // rax: JSArray.
2272   // rcx: FixedArray.
2273   // rbx: Number of elements in array as int32.
2274 
2275   // Set map.
2276   __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
2277   __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
2278   // Set length.
2279   __ Integer32ToSmi(rdx, rbx);
2280   __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
2281   // Fill contents of fixed-array with the-hole.
2282   __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
2283   __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
2284   // Fill fixed array elements with hole.
2285   // rax: JSArray.
2286   // rbx: Number of elements in array that remains to be filled, as int32.
2287   // rcx: Start of elements in FixedArray.
2288   // rdx: the hole.
2289   Label loop;
2290   __ testl(rbx, rbx);
2291   __ bind(&loop);
2292   __ j(less_equal, &done);  // Jump if rcx is negative or zero.
2293   __ subl(rbx, Immediate(1));
2294   __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
2295   __ jmp(&loop);
2296 
2297   __ bind(&done);
2298   __ ret(3 * kPointerSize);
2299 
2300   __ bind(&slowcase);
2301   __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
2302 }
2303 
2304 
GenerateLookupNumberStringCache(MacroAssembler * masm,Register object,Register result,Register scratch1,Register scratch2,bool object_is_smi,Label * not_found)2305 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
2306                                                          Register object,
2307                                                          Register result,
2308                                                          Register scratch1,
2309                                                          Register scratch2,
2310                                                          bool object_is_smi,
2311                                                          Label* not_found) {
2312   // Use of registers. Register result is used as a temporary.
2313   Register number_string_cache = result;
2314   Register mask = scratch1;
2315   Register scratch = scratch2;
2316 
2317   // Load the number string cache.
2318   __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2319 
2320   // Make the hash mask from the length of the number string cache. It
2321   // contains two elements (number and string) for each cache entry.
2322   __ SmiToInteger32(
2323       mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2324   __ shrl(mask, Immediate(1));
2325   __ subq(mask, Immediate(1));  // Make mask.
2326 
2327   // Calculate the entry in the number string cache. The hash value in the
2328   // number string cache for smis is just the smi value, and the hash for
2329   // doubles is the xor of the upper and lower words. See
2330   // Heap::GetNumberStringCache.
2331   Label is_smi;
2332   Label load_result_from_cache;
2333   if (!object_is_smi) {
2334     __ JumpIfSmi(object, &is_smi);
2335     __ CheckMap(object, FACTORY->heap_number_map(), not_found, true);
2336 
2337     STATIC_ASSERT(8 == kDoubleSize);
2338     __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2339     __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2340     GenerateConvertHashCodeToIndex(masm, scratch, mask);
2341 
2342     Register index = scratch;
2343     Register probe = mask;
2344     __ movq(probe,
2345             FieldOperand(number_string_cache,
2346                          index,
2347                          times_1,
2348                          FixedArray::kHeaderSize));
2349     __ JumpIfSmi(probe, not_found);
2350     __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2351     __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
2352     __ ucomisd(xmm0, xmm1);
2353     __ j(parity_even, not_found);  // Bail out if NaN is involved.
2354     __ j(not_equal, not_found);  // The cache did not contain this value.
2355     __ jmp(&load_result_from_cache);
2356   }
2357 
2358   __ bind(&is_smi);
2359   __ SmiToInteger32(scratch, object);
2360   GenerateConvertHashCodeToIndex(masm, scratch, mask);
2361 
2362   Register index = scratch;
2363   // Check if the entry is the smi we are looking for.
2364   __ cmpq(object,
2365           FieldOperand(number_string_cache,
2366                        index,
2367                        times_1,
2368                        FixedArray::kHeaderSize));
2369   __ j(not_equal, not_found);
2370 
2371   // Get the result from the cache.
2372   __ bind(&load_result_from_cache);
2373   __ movq(result,
2374           FieldOperand(number_string_cache,
2375                        index,
2376                        times_1,
2377                        FixedArray::kHeaderSize + kPointerSize));
2378   Counters* counters = masm->isolate()->counters();
2379   __ IncrementCounter(counters->number_to_string_native(), 1);
2380 }
2381 
2382 
GenerateConvertHashCodeToIndex(MacroAssembler * masm,Register hash,Register mask)2383 void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
2384                                                         Register hash,
2385                                                         Register mask) {
2386   __ and_(hash, mask);
2387   // Each entry in string cache consists of two pointer sized fields,
2388   // but times_twice_pointer_size (multiplication by 16) scale factor
2389   // is not supported by addrmode on x64 platform.
2390   // So we have to premultiply entry index before lookup.
2391   __ shl(hash, Immediate(kPointerSizeLog2 + 1));
2392 }
2393 
2394 
Generate(MacroAssembler * masm)2395 void NumberToStringStub::Generate(MacroAssembler* masm) {
2396   Label runtime;
2397 
2398   __ movq(rbx, Operand(rsp, kPointerSize));
2399 
2400   // Generate code to lookup number in the number string cache.
2401   GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
2402   __ ret(1 * kPointerSize);
2403 
2404   __ bind(&runtime);
2405   // Handle number to string in the runtime system if not found in the cache.
2406   __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
2407 }
2408 
2409 
NegativeComparisonResult(Condition cc)2410 static int NegativeComparisonResult(Condition cc) {
2411   ASSERT(cc != equal);
2412   ASSERT((cc == less) || (cc == less_equal)
2413       || (cc == greater) || (cc == greater_equal));
2414   return (cc == greater || cc == greater_equal) ? LESS : GREATER;
2415 }
2416 
2417 
Generate(MacroAssembler * masm)2418 void CompareStub::Generate(MacroAssembler* masm) {
2419   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
2420 
2421   Label check_unequal_objects, done;
2422 
2423   // Compare two smis if required.
2424   if (include_smi_compare_) {
2425     Label non_smi, smi_done;
2426     __ JumpIfNotBothSmi(rax, rdx, &non_smi);
2427     __ subq(rdx, rax);
2428     __ j(no_overflow, &smi_done);
2429     __ not_(rdx);  // Correct sign in case of overflow. rdx cannot be 0 here.
2430     __ bind(&smi_done);
2431     __ movq(rax, rdx);
2432     __ ret(0);
2433     __ bind(&non_smi);
2434   } else if (FLAG_debug_code) {
2435     Label ok;
2436     __ JumpIfNotSmi(rdx, &ok);
2437     __ JumpIfNotSmi(rax, &ok);
2438     __ Abort("CompareStub: smi operands");
2439     __ bind(&ok);
2440   }
2441 
2442   // The compare stub returns a positive, negative, or zero 64-bit integer
2443   // value in rax, corresponding to result of comparing the two inputs.
2444   // NOTICE! This code is only reached after a smi-fast-case check, so
2445   // it is certain that at least one operand isn't a smi.
2446 
2447   // Two identical objects are equal unless they are both NaN or undefined.
2448   {
2449     NearLabel not_identical;
2450     __ cmpq(rax, rdx);
2451     __ j(not_equal, &not_identical);
2452 
2453     if (cc_ != equal) {
2454       // Check for undefined.  undefined OP undefined is false even though
2455       // undefined == undefined.
2456       NearLabel check_for_nan;
2457       __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
2458       __ j(not_equal, &check_for_nan);
2459       __ Set(rax, NegativeComparisonResult(cc_));
2460       __ ret(0);
2461       __ bind(&check_for_nan);
2462     }
2463 
2464     // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
2465     // so we do the second best thing - test it ourselves.
2466     // Note: if cc_ != equal, never_nan_nan_ is not used.
2467     // We cannot set rax to EQUAL until just before return because
2468     // rax must be unchanged on jump to not_identical.
2469 
2470     if (never_nan_nan_ && (cc_ == equal)) {
2471       __ Set(rax, EQUAL);
2472       __ ret(0);
2473     } else {
2474       NearLabel heap_number;
2475       // If it's not a heap number, then return equal for (in)equality operator.
2476       __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
2477              FACTORY->heap_number_map());
2478       __ j(equal, &heap_number);
2479       if (cc_ != equal) {
2480         // Call runtime on identical JSObjects.  Otherwise return equal.
2481         __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
2482         __ j(above_equal, &not_identical);
2483       }
2484       __ Set(rax, EQUAL);
2485       __ ret(0);
2486 
2487       __ bind(&heap_number);
2488       // It is a heap number, so return  equal if it's not NaN.
2489       // For NaN, return 1 for every condition except greater and
2490       // greater-equal.  Return -1 for them, so the comparison yields
2491       // false for all conditions except not-equal.
2492       __ Set(rax, EQUAL);
2493       __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
2494       __ ucomisd(xmm0, xmm0);
2495       __ setcc(parity_even, rax);
2496       // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
2497       if (cc_ == greater_equal || cc_ == greater) {
2498         __ neg(rax);
2499       }
2500       __ ret(0);
2501     }
2502 
2503     __ bind(&not_identical);
2504   }
2505 
2506   if (cc_ == equal) {  // Both strict and non-strict.
2507     Label slow;  // Fallthrough label.
2508 
2509     // If we're doing a strict equality comparison, we don't have to do
2510     // type conversion, so we generate code to do fast comparison for objects
2511     // and oddballs. Non-smi numbers and strings still go through the usual
2512     // slow-case code.
2513     if (strict_) {
2514       // If either is a Smi (we know that not both are), then they can only
2515       // be equal if the other is a HeapNumber. If so, use the slow case.
2516       {
2517         Label not_smis;
2518         __ SelectNonSmi(rbx, rax, rdx, &not_smis);
2519 
2520         // Check if the non-smi operand is a heap number.
2521         __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
2522                FACTORY->heap_number_map());
2523         // If heap number, handle it in the slow case.
2524         __ j(equal, &slow);
2525         // Return non-equal.  ebx (the lower half of rbx) is not zero.
2526         __ movq(rax, rbx);
2527         __ ret(0);
2528 
2529         __ bind(&not_smis);
2530       }
2531 
2532       // If either operand is a JSObject or an oddball value, then they are not
2533       // equal since their pointers are different
2534       // There is no test for undetectability in strict equality.
2535 
2536       // If the first object is a JS object, we have done pointer comparison.
2537       STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
2538       NearLabel first_non_object;
2539       __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
2540       __ j(below, &first_non_object);
2541       // Return non-zero (eax (not rax) is not zero)
2542       Label return_not_equal;
2543       STATIC_ASSERT(kHeapObjectTag != 0);
2544       __ bind(&return_not_equal);
2545       __ ret(0);
2546 
2547       __ bind(&first_non_object);
2548       // Check for oddballs: true, false, null, undefined.
2549       __ CmpInstanceType(rcx, ODDBALL_TYPE);
2550       __ j(equal, &return_not_equal);
2551 
2552       __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
2553       __ j(above_equal, &return_not_equal);
2554 
2555       // Check for oddballs: true, false, null, undefined.
2556       __ CmpInstanceType(rcx, ODDBALL_TYPE);
2557       __ j(equal, &return_not_equal);
2558 
2559       // Fall through to the general case.
2560     }
2561     __ bind(&slow);
2562   }
2563 
2564   // Generate the number comparison code.
2565   if (include_number_compare_) {
2566     Label non_number_comparison;
2567     NearLabel unordered;
2568     FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
2569     __ xorl(rax, rax);
2570     __ xorl(rcx, rcx);
2571     __ ucomisd(xmm0, xmm1);
2572 
2573     // Don't base result on EFLAGS when a NaN is involved.
2574     __ j(parity_even, &unordered);
2575     // Return a result of -1, 0, or 1, based on EFLAGS.
2576     __ setcc(above, rax);
2577     __ setcc(below, rcx);
2578     __ subq(rax, rcx);
2579     __ ret(0);
2580 
2581     // If one of the numbers was NaN, then the result is always false.
2582     // The cc is never not-equal.
2583     __ bind(&unordered);
2584     ASSERT(cc_ != not_equal);
2585     if (cc_ == less || cc_ == less_equal) {
2586       __ Set(rax, 1);
2587     } else {
2588       __ Set(rax, -1);
2589     }
2590     __ ret(0);
2591 
2592     // The number comparison code did not provide a valid result.
2593     __ bind(&non_number_comparison);
2594   }
2595 
2596   // Fast negative check for symbol-to-symbol equality.
2597   Label check_for_strings;
2598   if (cc_ == equal) {
2599     BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
2600     BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
2601 
2602     // We've already checked for object identity, so if both operands
2603     // are symbols they aren't equal. Register eax (not rax) already holds a
2604     // non-zero value, which indicates not equal, so just return.
2605     __ ret(0);
2606   }
2607 
2608   __ bind(&check_for_strings);
2609 
2610   __ JumpIfNotBothSequentialAsciiStrings(
2611       rdx, rax, rcx, rbx, &check_unequal_objects);
2612 
2613   // Inline comparison of ascii strings.
2614   StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
2615                                                      rdx,
2616                                                      rax,
2617                                                      rcx,
2618                                                      rbx,
2619                                                      rdi,
2620                                                      r8);
2621 
2622 #ifdef DEBUG
2623   __ Abort("Unexpected fall-through from string comparison");
2624 #endif
2625 
2626   __ bind(&check_unequal_objects);
2627   if (cc_ == equal && !strict_) {
2628     // Not strict equality.  Objects are unequal if
2629     // they are both JSObjects and not undetectable,
2630     // and their pointers are different.
2631     NearLabel not_both_objects, return_unequal;
2632     // At most one is a smi, so we can test for smi by adding the two.
2633     // A smi plus a heap object has the low bit set, a heap object plus
2634     // a heap object has the low bit clear.
2635     STATIC_ASSERT(kSmiTag == 0);
2636     STATIC_ASSERT(kSmiTagMask == 1);
2637     __ lea(rcx, Operand(rax, rdx, times_1, 0));
2638     __ testb(rcx, Immediate(kSmiTagMask));
2639     __ j(not_zero, &not_both_objects);
2640     __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rbx);
2641     __ j(below, &not_both_objects);
2642     __ CmpObjectType(rdx, FIRST_JS_OBJECT_TYPE, rcx);
2643     __ j(below, &not_both_objects);
2644     __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
2645              Immediate(1 << Map::kIsUndetectable));
2646     __ j(zero, &return_unequal);
2647     __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
2648              Immediate(1 << Map::kIsUndetectable));
2649     __ j(zero, &return_unequal);
2650     // The objects are both undetectable, so they both compare as the value
2651     // undefined, and are equal.
2652     __ Set(rax, EQUAL);
2653     __ bind(&return_unequal);
2654     // Return non-equal by returning the non-zero object pointer in rax,
2655     // or return equal if we fell through to here.
2656     __ ret(0);
2657     __ bind(&not_both_objects);
2658   }
2659 
2660   // Push arguments below the return address to prepare jump to builtin.
2661   __ pop(rcx);
2662   __ push(rdx);
2663   __ push(rax);
2664 
2665   // Figure out which native to call and setup the arguments.
2666   Builtins::JavaScript builtin;
2667   if (cc_ == equal) {
2668     builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
2669   } else {
2670     builtin = Builtins::COMPARE;
2671     __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
2672   }
2673 
2674   // Restore return address on the stack.
2675   __ push(rcx);
2676 
2677   // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
2678   // tagged as a small integer.
2679   __ InvokeBuiltin(builtin, JUMP_FUNCTION);
2680 }
2681 
2682 
BranchIfNonSymbol(MacroAssembler * masm,Label * label,Register object,Register scratch)2683 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
2684                                     Label* label,
2685                                     Register object,
2686                                     Register scratch) {
2687   __ JumpIfSmi(object, label);
2688   __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
2689   __ movzxbq(scratch,
2690              FieldOperand(scratch, Map::kInstanceTypeOffset));
2691   // Ensure that no non-strings have the symbol bit set.
2692   STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
2693   STATIC_ASSERT(kSymbolTag != 0);
2694   __ testb(scratch, Immediate(kIsSymbolMask));
2695   __ j(zero, label);
2696 }
2697 
2698 
Generate(MacroAssembler * masm)2699 void StackCheckStub::Generate(MacroAssembler* masm) {
2700   __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
2701 }
2702 
2703 
Generate(MacroAssembler * masm)2704 void CallFunctionStub::Generate(MacroAssembler* masm) {
2705   Label slow;
2706 
2707   // If the receiver might be a value (string, number or boolean) check for this
2708   // and box it if it is.
2709   if (ReceiverMightBeValue()) {
2710     // Get the receiver from the stack.
2711     // +1 ~ return address
2712     Label receiver_is_value, receiver_is_js_object;
2713     __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
2714 
2715     // Check if receiver is a smi (which is a number value).
2716     __ JumpIfSmi(rax, &receiver_is_value);
2717 
2718     // Check if the receiver is a valid JS object.
2719     __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
2720     __ j(above_equal, &receiver_is_js_object);
2721 
2722     // Call the runtime to box the value.
2723     __ bind(&receiver_is_value);
2724     __ EnterInternalFrame();
2725     __ push(rax);
2726     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2727     __ LeaveInternalFrame();
2728     __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
2729 
2730     __ bind(&receiver_is_js_object);
2731   }
2732 
2733   // Get the function to call from the stack.
2734   // +2 ~ receiver, return address
2735   __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
2736 
2737   // Check that the function really is a JavaScript function.
2738   __ JumpIfSmi(rdi, &slow);
2739   // Goto slow case if we do not have a function.
2740   __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2741   __ j(not_equal, &slow);
2742 
2743   // Fast-case: Just invoke the function.
2744   ParameterCount actual(argc_);
2745   __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
2746 
2747   // Slow-case: Non-function called.
2748   __ bind(&slow);
2749   // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2750   // of the original receiver from the call site).
2751   __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
2752   __ Set(rax, argc_);
2753   __ Set(rbx, 0);
2754   __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
2755   Handle<Code> adaptor =
2756       Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
2757   __ Jump(adaptor, RelocInfo::CODE_TARGET);
2758 }
2759 
2760 
NeedsImmovableCode()2761 bool CEntryStub::NeedsImmovableCode() {
2762   return false;
2763 }
2764 
2765 
GenerateThrowTOS(MacroAssembler * masm)2766 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
2767   // Throw exception in eax.
2768   __ Throw(rax);
2769 }
2770 
2771 
GenerateCore(MacroAssembler * masm,Label * throw_normal_exception,Label * throw_termination_exception,Label * throw_out_of_memory_exception,bool do_gc,bool always_allocate_scope)2772 void CEntryStub::GenerateCore(MacroAssembler* masm,
2773                               Label* throw_normal_exception,
2774                               Label* throw_termination_exception,
2775                               Label* throw_out_of_memory_exception,
2776                               bool do_gc,
2777                               bool always_allocate_scope) {
2778   // rax: result parameter for PerformGC, if any.
2779   // rbx: pointer to C function  (C callee-saved).
2780   // rbp: frame pointer  (restored after C call).
2781   // rsp: stack pointer  (restored after C call).
2782   // r14: number of arguments including receiver (C callee-saved).
2783   // r15: pointer to the first argument (C callee-saved).
2784   //      This pointer is reused in LeaveExitFrame(), so it is stored in a
2785   //      callee-saved register.
2786 
2787   // Simple results returned in rax (both AMD64 and Win64 calling conventions).
2788   // Complex results must be written to address passed as first argument.
2789   // AMD64 calling convention: a struct of two pointers in rax+rdx
2790 
2791   // Check stack alignment.
2792   if (FLAG_debug_code) {
2793     __ CheckStackAlignment();
2794   }
2795 
2796   if (do_gc) {
2797     // Pass failure code returned from last attempt as first argument to
2798     // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
2799     // stack is known to be aligned. This function takes one argument which is
2800     // passed in register.
2801 #ifdef _WIN64
2802     __ movq(rcx, rax);
2803 #else  // _WIN64
2804     __ movq(rdi, rax);
2805 #endif
2806     __ movq(kScratchRegister,
2807             FUNCTION_ADDR(Runtime::PerformGC),
2808             RelocInfo::RUNTIME_ENTRY);
2809     __ call(kScratchRegister);
2810   }
2811 
2812   ExternalReference scope_depth =
2813       ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
2814   if (always_allocate_scope) {
2815     Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
2816     __ incl(scope_depth_operand);
2817   }
2818 
2819   // Call C function.
2820 #ifdef _WIN64
2821   // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
2822   // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
2823   __ movq(StackSpaceOperand(0), r14);  // argc.
2824   __ movq(StackSpaceOperand(1), r15);  // argv.
2825   if (result_size_ < 2) {
2826     // Pass a pointer to the Arguments object as the first argument.
2827     // Return result in single register (rax).
2828     __ lea(rcx, StackSpaceOperand(0));
2829     __ LoadAddress(rdx, ExternalReference::isolate_address());
2830   } else {
2831     ASSERT_EQ(2, result_size_);
2832     // Pass a pointer to the result location as the first argument.
2833     __ lea(rcx, StackSpaceOperand(2));
2834     // Pass a pointer to the Arguments object as the second argument.
2835     __ lea(rdx, StackSpaceOperand(0));
2836     __ LoadAddress(r8, ExternalReference::isolate_address());
2837   }
2838 
2839 #else  // _WIN64
2840   // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
2841   __ movq(rdi, r14);  // argc.
2842   __ movq(rsi, r15);  // argv.
2843   __ movq(rdx, ExternalReference::isolate_address());
2844 #endif
2845   __ call(rbx);
2846   // Result is in rax - do not destroy this register!
2847 
2848   if (always_allocate_scope) {
2849     Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
2850     __ decl(scope_depth_operand);
2851   }
2852 
2853   // Check for failure result.
2854   Label failure_returned;
2855   STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
2856 #ifdef _WIN64
2857   // If return value is on the stack, pop it to registers.
2858   if (result_size_ > 1) {
2859     ASSERT_EQ(2, result_size_);
2860     // Read result values stored on stack. Result is stored
2861     // above the four argument mirror slots and the two
2862     // Arguments object slots.
2863     __ movq(rax, Operand(rsp, 6 * kPointerSize));
2864     __ movq(rdx, Operand(rsp, 7 * kPointerSize));
2865   }
2866 #endif
2867   __ lea(rcx, Operand(rax, 1));
2868   // Lower 2 bits of rcx are 0 iff rax has failure tag.
2869   __ testl(rcx, Immediate(kFailureTagMask));
2870   __ j(zero, &failure_returned);
2871 
2872   // Exit the JavaScript to C++ exit frame.
2873   __ LeaveExitFrame(save_doubles_);
2874   __ ret(0);
2875 
2876   // Handling of failure.
2877   __ bind(&failure_returned);
2878 
2879   NearLabel retry;
2880   // If the returned exception is RETRY_AFTER_GC continue at retry label
2881   STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
2882   __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
2883   __ j(zero, &retry);
2884 
2885   // Special handling of out of memory exceptions.
2886   __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
2887   __ cmpq(rax, kScratchRegister);
2888   __ j(equal, throw_out_of_memory_exception);
2889 
2890   // Retrieve the pending exception and clear the variable.
2891   ExternalReference pending_exception_address(
2892       Isolate::k_pending_exception_address, masm->isolate());
2893   Operand pending_exception_operand =
2894       masm->ExternalOperand(pending_exception_address);
2895   __ movq(rax, pending_exception_operand);
2896   __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
2897   __ movq(pending_exception_operand, rdx);
2898 
2899   // Special handling of termination exceptions which are uncatchable
2900   // by javascript code.
2901   __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
2902   __ j(equal, throw_termination_exception);
2903 
2904   // Handle normal exception.
2905   __ jmp(throw_normal_exception);
2906 
2907   // Retry.
2908   __ bind(&retry);
2909 }
2910 
2911 
GenerateThrowUncatchable(MacroAssembler * masm,UncatchableExceptionType type)2912 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
2913                                           UncatchableExceptionType type) {
2914   __ ThrowUncatchable(type, rax);
2915 }
2916 
2917 
Generate(MacroAssembler * masm)2918 void CEntryStub::Generate(MacroAssembler* masm) {
2919   // rax: number of arguments including receiver
2920   // rbx: pointer to C function  (C callee-saved)
2921   // rbp: frame pointer of calling JS frame (restored after C call)
2922   // rsp: stack pointer  (restored after C call)
2923   // rsi: current context (restored)
2924 
2925   // NOTE: Invocations of builtins may return failure objects
2926   // instead of a proper result. The builtin entry handles
2927   // this by performing a garbage collection and retrying the
2928   // builtin once.
2929 
2930   // Enter the exit frame that transitions from JavaScript to C++.
2931 #ifdef _WIN64
2932   int arg_stack_space = (result_size_ < 2 ? 2 : 4);
2933 #else
2934   int arg_stack_space = 0;
2935 #endif
2936   __ EnterExitFrame(arg_stack_space, save_doubles_);
2937 
2938   // rax: Holds the context at this point, but should not be used.
2939   //      On entry to code generated by GenerateCore, it must hold
2940   //      a failure result if the collect_garbage argument to GenerateCore
2941   //      is true.  This failure result can be the result of code
2942   //      generated by a previous call to GenerateCore.  The value
2943   //      of rax is then passed to Runtime::PerformGC.
2944   // rbx: pointer to builtin function  (C callee-saved).
2945   // rbp: frame pointer of exit frame  (restored after C call).
2946   // rsp: stack pointer (restored after C call).
2947   // r14: number of arguments including receiver (C callee-saved).
2948   // r15: argv pointer (C callee-saved).
2949 
2950   Label throw_normal_exception;
2951   Label throw_termination_exception;
2952   Label throw_out_of_memory_exception;
2953 
2954   // Call into the runtime system.
2955   GenerateCore(masm,
2956                &throw_normal_exception,
2957                &throw_termination_exception,
2958                &throw_out_of_memory_exception,
2959                false,
2960                false);
2961 
2962   // Do space-specific GC and retry runtime call.
2963   GenerateCore(masm,
2964                &throw_normal_exception,
2965                &throw_termination_exception,
2966                &throw_out_of_memory_exception,
2967                true,
2968                false);
2969 
2970   // Do full GC and retry runtime call one final time.
2971   Failure* failure = Failure::InternalError();
2972   __ movq(rax, failure, RelocInfo::NONE);
2973   GenerateCore(masm,
2974                &throw_normal_exception,
2975                &throw_termination_exception,
2976                &throw_out_of_memory_exception,
2977                true,
2978                true);
2979 
2980   __ bind(&throw_out_of_memory_exception);
2981   GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
2982 
2983   __ bind(&throw_termination_exception);
2984   GenerateThrowUncatchable(masm, TERMINATION);
2985 
2986   __ bind(&throw_normal_exception);
2987   GenerateThrowTOS(masm);
2988 }
2989 
2990 
GenerateBody(MacroAssembler * masm,bool is_construct)2991 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
2992   Label invoke, exit;
2993 #ifdef ENABLE_LOGGING_AND_PROFILING
2994   Label not_outermost_js, not_outermost_js_2;
2995 #endif
2996   {  // NOLINT. Scope block confuses linter.
2997     MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
2998     // Setup frame.
2999     __ push(rbp);
3000     __ movq(rbp, rsp);
3001 
3002     // Push the stack frame type marker twice.
3003     int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
3004     // Scratch register is neither callee-save, nor an argument register on any
3005     // platform. It's free to use at this point.
3006     // Cannot use smi-register for loading yet.
3007     __ movq(kScratchRegister,
3008             reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
3009             RelocInfo::NONE);
3010     __ push(kScratchRegister);  // context slot
3011     __ push(kScratchRegister);  // function slot
3012     // Save callee-saved registers (X64/Win64 calling conventions).
3013     __ push(r12);
3014     __ push(r13);
3015     __ push(r14);
3016     __ push(r15);
3017 #ifdef _WIN64
3018     __ push(rdi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
3019     __ push(rsi);  // Only callee save in Win64 ABI, argument in AMD64 ABI.
3020 #endif
3021     __ push(rbx);
3022     // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
3023     // callee save as well.
3024 
3025     // Set up the roots and smi constant registers.
3026     // Needs to be done before any further smi loads.
3027     __ InitializeSmiConstantRegister();
3028     __ InitializeRootRegister();
3029   }
3030 
3031   Isolate* isolate = masm->isolate();
3032 
3033   // Save copies of the top frame descriptor on the stack.
3034   ExternalReference c_entry_fp(Isolate::k_c_entry_fp_address, isolate);
3035   {
3036     Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
3037     __ push(c_entry_fp_operand);
3038   }
3039 
3040 #ifdef ENABLE_LOGGING_AND_PROFILING
3041   // If this is the outermost JS call, set js_entry_sp value.
3042   ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
3043   __ Load(rax, js_entry_sp);
3044   __ testq(rax, rax);
3045   __ j(not_zero, &not_outermost_js);
3046   __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
3047   __ movq(rax, rbp);
3048   __ Store(js_entry_sp, rax);
3049   Label cont;
3050   __ jmp(&cont);
3051   __ bind(&not_outermost_js);
3052   __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
3053   __ bind(&cont);
3054 #endif
3055 
3056   // Call a faked try-block that does the invoke.
3057   __ call(&invoke);
3058 
3059   // Caught exception: Store result (exception) in the pending
3060   // exception field in the JSEnv and return a failure sentinel.
3061   ExternalReference pending_exception(Isolate::k_pending_exception_address,
3062                                       isolate);
3063   __ Store(pending_exception, rax);
3064   __ movq(rax, Failure::Exception(), RelocInfo::NONE);
3065   __ jmp(&exit);
3066 
3067   // Invoke: Link this frame into the handler chain.
3068   __ bind(&invoke);
3069   __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
3070 
3071   // Clear any pending exceptions.
3072   __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
3073   __ Store(pending_exception, rax);
3074 
3075   // Fake a receiver (NULL).
3076   __ push(Immediate(0));  // receiver
3077 
3078   // Invoke the function by calling through JS entry trampoline
3079   // builtin and pop the faked function when we return. We load the address
3080   // from an external reference instead of inlining the call target address
3081   // directly in the code, because the builtin stubs may not have been
3082   // generated yet at the time this code is generated.
3083   if (is_construct) {
3084     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
3085                                       isolate);
3086     __ Load(rax, construct_entry);
3087   } else {
3088     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
3089     __ Load(rax, entry);
3090   }
3091   __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
3092   __ call(kScratchRegister);
3093 
3094   // Unlink this frame from the handler chain.
3095   __ PopTryHandler();
3096 
3097   __ bind(&exit);
3098 #ifdef ENABLE_LOGGING_AND_PROFILING
3099   // Check if the current stack frame is marked as the outermost JS frame.
3100   __ pop(rbx);
3101   __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
3102   __ j(not_equal, &not_outermost_js_2);
3103   __ movq(kScratchRegister, js_entry_sp);
3104   __ movq(Operand(kScratchRegister, 0), Immediate(0));
3105   __ bind(&not_outermost_js_2);
3106 #endif
3107 
3108   // Restore the top frame descriptor from the stack.
3109   { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
3110     __ pop(c_entry_fp_operand);
3111   }
3112 
3113   // Restore callee-saved registers (X64 conventions).
3114   __ pop(rbx);
3115 #ifdef _WIN64
3116   // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
3117   __ pop(rsi);
3118   __ pop(rdi);
3119 #endif
3120   __ pop(r15);
3121   __ pop(r14);
3122   __ pop(r13);
3123   __ pop(r12);
3124   __ addq(rsp, Immediate(2 * kPointerSize));  // remove markers
3125 
3126   // Restore frame pointer and return.
3127   __ pop(rbp);
3128   __ ret(0);
3129 }
3130 
3131 
Generate(MacroAssembler * masm)3132 void InstanceofStub::Generate(MacroAssembler* masm) {
3133   // Implements "value instanceof function" operator.
3134   // Expected input state with no inline cache:
3135   //   rsp[0] : return address
3136   //   rsp[1] : function pointer
3137   //   rsp[2] : value
3138   // Expected input state with an inline one-element cache:
3139   //   rsp[0] : return address
3140   //   rsp[1] : offset from return address to location of inline cache
3141   //   rsp[2] : function pointer
3142   //   rsp[3] : value
3143   // Returns a bitwise zero to indicate that the value
3144   // is and instance of the function and anything else to
3145   // indicate that the value is not an instance.
3146 
3147   static const int kOffsetToMapCheckValue = 2;
3148   static const int kOffsetToResultValue = 18;
3149   // The last 4 bytes of the instruction sequence
3150   //   movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
3151   //   Move(kScratchRegister, FACTORY->the_hole_value())
3152   // in front of the hole value address.
3153   static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
3154   // The last 4 bytes of the instruction sequence
3155   //   __ j(not_equal, &cache_miss);
3156   //   __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
3157   // before the offset of the hole value in the root array.
3158   static const unsigned int kWordBeforeResultValue = 0x458B4909;
3159   // Only the inline check flag is supported on X64.
3160   ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
3161   int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
3162 
3163   // Get the object - go slow case if it's a smi.
3164   Label slow;
3165 
3166   __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
3167   __ JumpIfSmi(rax, &slow);
3168 
3169   // Check that the left hand is a JS object. Leave its map in rax.
3170   __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
3171   __ j(below, &slow);
3172   __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
3173   __ j(above, &slow);
3174 
3175   // Get the prototype of the function.
3176   __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
3177   // rdx is function, rax is map.
3178 
3179   // If there is a call site cache don't look in the global cache, but do the
3180   // real lookup and update the call site cache.
3181   if (!HasCallSiteInlineCheck()) {
3182     // Look up the function and the map in the instanceof cache.
3183     NearLabel miss;
3184     __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
3185     __ j(not_equal, &miss);
3186     __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
3187     __ j(not_equal, &miss);
3188     __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
3189     __ ret(2 * kPointerSize);
3190     __ bind(&miss);
3191   }
3192 
3193   __ TryGetFunctionPrototype(rdx, rbx, &slow);
3194 
3195   // Check that the function prototype is a JS object.
3196   __ JumpIfSmi(rbx, &slow);
3197   __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
3198   __ j(below, &slow);
3199   __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
3200   __ j(above, &slow);
3201 
3202   // Register mapping:
3203   //   rax is object map.
3204   //   rdx is function.
3205   //   rbx is function prototype.
3206   if (!HasCallSiteInlineCheck()) {
3207     __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
3208     __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
3209   } else {
3210     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
3211     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
3212     __ movq(Operand(kScratchRegister, kOffsetToMapCheckValue), rax);
3213     if (FLAG_debug_code) {
3214       __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
3215       __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
3216       __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
3217     }
3218   }
3219 
3220   __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
3221 
3222   // Loop through the prototype chain looking for the function prototype.
3223   NearLabel loop, is_instance, is_not_instance;
3224   __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
3225   __ bind(&loop);
3226   __ cmpq(rcx, rbx);
3227   __ j(equal, &is_instance);
3228   __ cmpq(rcx, kScratchRegister);
3229   // The code at is_not_instance assumes that kScratchRegister contains a
3230   // non-zero GCable value (the null object in this case).
3231   __ j(equal, &is_not_instance);
3232   __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
3233   __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
3234   __ jmp(&loop);
3235 
3236   __ bind(&is_instance);
3237   if (!HasCallSiteInlineCheck()) {
3238     __ xorl(rax, rax);
3239     // Store bitwise zero in the cache.  This is a Smi in GC terms.
3240     STATIC_ASSERT(kSmiTag == 0);
3241     __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
3242   } else {
3243     // Store offset of true in the root array at the inline check site.
3244     ASSERT((Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
3245         == 0xB0 - 0x100);
3246     __ movl(rax, Immediate(0xB0));  // TrueValue is at -10 * kPointerSize.
3247     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
3248     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
3249     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
3250     if (FLAG_debug_code) {
3251       __ movl(rax, Immediate(kWordBeforeResultValue));
3252       __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
3253       __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
3254     }
3255     __ Set(rax, 0);
3256   }
3257   __ ret(2 * kPointerSize + extra_stack_space);
3258 
3259   __ bind(&is_not_instance);
3260   if (!HasCallSiteInlineCheck()) {
3261     // We have to store a non-zero value in the cache.
3262     __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
3263   } else {
3264     // Store offset of false in the root array at the inline check site.
3265     ASSERT((Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias
3266         == 0xB8 - 0x100);
3267     __ movl(rax, Immediate(0xB8));  // FalseValue is at -9 * kPointerSize.
3268     __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
3269     __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
3270     __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
3271     if (FLAG_debug_code) {
3272       __ movl(rax, Immediate(kWordBeforeResultValue));
3273       __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
3274       __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
3275     }
3276   }
3277   __ ret(2 * kPointerSize + extra_stack_space);
3278 
3279   // Slow-case: Go through the JavaScript implementation.
3280   __ bind(&slow);
3281   if (HasCallSiteInlineCheck()) {
3282     // Remove extra value from the stack.
3283     __ pop(rcx);
3284     __ pop(rax);
3285     __ push(rcx);
3286   }
3287   __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
3288 }
3289 
3290 
3291 // Passing arguments in registers is not supported.
left()3292 Register InstanceofStub::left() { return no_reg; }
3293 
3294 
right()3295 Register InstanceofStub::right() { return no_reg; }
3296 
3297 
MinorKey()3298 int CompareStub::MinorKey() {
3299   // Encode the three parameters in a unique 16 bit value. To avoid duplicate
3300   // stubs the never NaN NaN condition is only taken into account if the
3301   // condition is equals.
3302   ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
3303   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3304   return ConditionField::encode(static_cast<unsigned>(cc_))
3305          | RegisterField::encode(false)    // lhs_ and rhs_ are not used
3306          | StrictField::encode(strict_)
3307          | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
3308          | IncludeNumberCompareField::encode(include_number_compare_)
3309          | IncludeSmiCompareField::encode(include_smi_compare_);
3310 }
3311 
3312 
3313 // Unfortunately you have to run without snapshots to see most of these
3314 // names in the profile since most compare stubs end up in the snapshot.
GetName()3315 const char* CompareStub::GetName() {
3316   ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3317 
3318   if (name_ != NULL) return name_;
3319   const int kMaxNameLength = 100;
3320   name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
3321       kMaxNameLength);
3322   if (name_ == NULL) return "OOM";
3323 
3324   const char* cc_name;
3325   switch (cc_) {
3326     case less: cc_name = "LT"; break;
3327     case greater: cc_name = "GT"; break;
3328     case less_equal: cc_name = "LE"; break;
3329     case greater_equal: cc_name = "GE"; break;
3330     case equal: cc_name = "EQ"; break;
3331     case not_equal: cc_name = "NE"; break;
3332     default: cc_name = "UnknownCondition"; break;
3333   }
3334 
3335   const char* strict_name = "";
3336   if (strict_ && (cc_ == equal || cc_ == not_equal)) {
3337     strict_name = "_STRICT";
3338   }
3339 
3340   const char* never_nan_nan_name = "";
3341   if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
3342     never_nan_nan_name = "_NO_NAN";
3343   }
3344 
3345   const char* include_number_compare_name = "";
3346   if (!include_number_compare_) {
3347     include_number_compare_name = "_NO_NUMBER";
3348   }
3349 
3350   const char* include_smi_compare_name = "";
3351   if (!include_smi_compare_) {
3352     include_smi_compare_name = "_NO_SMI";
3353   }
3354 
3355   OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
3356                "CompareStub_%s%s%s%s",
3357                cc_name,
3358                strict_name,
3359                never_nan_nan_name,
3360                include_number_compare_name,
3361                include_smi_compare_name);
3362   return name_;
3363 }
3364 
3365 
3366 // -------------------------------------------------------------------------
3367 // StringCharCodeAtGenerator
3368 
GenerateFast(MacroAssembler * masm)3369 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3370   Label flat_string;
3371   Label ascii_string;
3372   Label got_char_code;
3373 
3374   // If the receiver is a smi trigger the non-string case.
3375   __ JumpIfSmi(object_, receiver_not_string_);
3376 
3377   // Fetch the instance type of the receiver into result register.
3378   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
3379   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
3380   // If the receiver is not a string trigger the non-string case.
3381   __ testb(result_, Immediate(kIsNotStringMask));
3382   __ j(not_zero, receiver_not_string_);
3383 
3384   // If the index is non-smi trigger the non-smi case.
3385   __ JumpIfNotSmi(index_, &index_not_smi_);
3386 
3387   // Put smi-tagged index into scratch register.
3388   __ movq(scratch_, index_);
3389   __ bind(&got_smi_index_);
3390 
3391   // Check for index out of range.
3392   __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
3393   __ j(above_equal, index_out_of_range_);
3394 
3395   // We need special handling for non-flat strings.
3396   STATIC_ASSERT(kSeqStringTag == 0);
3397   __ testb(result_, Immediate(kStringRepresentationMask));
3398   __ j(zero, &flat_string);
3399 
3400   // Handle non-flat strings.
3401   __ testb(result_, Immediate(kIsConsStringMask));
3402   __ j(zero, &call_runtime_);
3403 
3404   // ConsString.
3405   // Check whether the right hand side is the empty string (i.e. if
3406   // this is really a flat string in a cons string). If that is not
3407   // the case we would rather go to the runtime system now to flatten
3408   // the string.
3409   __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
3410                  Heap::kEmptyStringRootIndex);
3411   __ j(not_equal, &call_runtime_);
3412   // Get the first of the two strings and load its instance type.
3413   __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
3414   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
3415   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
3416   // If the first cons component is also non-flat, then go to runtime.
3417   STATIC_ASSERT(kSeqStringTag == 0);
3418   __ testb(result_, Immediate(kStringRepresentationMask));
3419   __ j(not_zero, &call_runtime_);
3420 
3421   // Check for 1-byte or 2-byte string.
3422   __ bind(&flat_string);
3423   STATIC_ASSERT(kAsciiStringTag != 0);
3424   __ testb(result_, Immediate(kStringEncodingMask));
3425   __ j(not_zero, &ascii_string);
3426 
3427   // 2-byte string.
3428   // Load the 2-byte character code into the result register.
3429   __ SmiToInteger32(scratch_, scratch_);
3430   __ movzxwl(result_, FieldOperand(object_,
3431                                    scratch_, times_2,
3432                                    SeqTwoByteString::kHeaderSize));
3433   __ jmp(&got_char_code);
3434 
3435   // ASCII string.
3436   // Load the byte into the result register.
3437   __ bind(&ascii_string);
3438   __ SmiToInteger32(scratch_, scratch_);
3439   __ movzxbl(result_, FieldOperand(object_,
3440                                    scratch_, times_1,
3441                                    SeqAsciiString::kHeaderSize));
3442   __ bind(&got_char_code);
3443   __ Integer32ToSmi(result_, result_);
3444   __ bind(&exit_);
3445 }
3446 
3447 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)3448 void StringCharCodeAtGenerator::GenerateSlow(
3449     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
3450   __ Abort("Unexpected fallthrough to CharCodeAt slow case");
3451 
3452   // Index is not a smi.
3453   __ bind(&index_not_smi_);
3454   // If index is a heap number, try converting it to an integer.
3455   __ CheckMap(index_, FACTORY->heap_number_map(), index_not_number_, true);
3456   call_helper.BeforeCall(masm);
3457   __ push(object_);
3458   __ push(index_);
3459   __ push(index_);  // Consumed by runtime conversion function.
3460   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3461     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3462   } else {
3463     ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3464     // NumberToSmi discards numbers that are not exact integers.
3465     __ CallRuntime(Runtime::kNumberToSmi, 1);
3466   }
3467   if (!scratch_.is(rax)) {
3468     // Save the conversion result before the pop instructions below
3469     // have a chance to overwrite it.
3470     __ movq(scratch_, rax);
3471   }
3472   __ pop(index_);
3473   __ pop(object_);
3474   // Reload the instance type.
3475   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
3476   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
3477   call_helper.AfterCall(masm);
3478   // If index is still not a smi, it must be out of range.
3479   __ JumpIfNotSmi(scratch_, index_out_of_range_);
3480   // Otherwise, return to the fast path.
3481   __ jmp(&got_smi_index_);
3482 
3483   // Call runtime. We get here when the receiver is a string and the
3484   // index is a number, but the code of getting the actual character
3485   // is too complex (e.g., when the string needs to be flattened).
3486   __ bind(&call_runtime_);
3487   call_helper.BeforeCall(masm);
3488   __ push(object_);
3489   __ push(index_);
3490   __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3491   if (!result_.is(rax)) {
3492     __ movq(result_, rax);
3493   }
3494   call_helper.AfterCall(masm);
3495   __ jmp(&exit_);
3496 
3497   __ Abort("Unexpected fallthrough from CharCodeAt slow case");
3498 }
3499 
3500 
3501 // -------------------------------------------------------------------------
3502 // StringCharFromCodeGenerator
3503 
GenerateFast(MacroAssembler * masm)3504 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3505   // Fast case of Heap::LookupSingleCharacterStringFromCode.
3506   __ JumpIfNotSmi(code_, &slow_case_);
3507   __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
3508   __ j(above, &slow_case_);
3509 
3510   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3511   SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
3512   __ movq(result_, FieldOperand(result_, index.reg, index.scale,
3513                                 FixedArray::kHeaderSize));
3514   __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3515   __ j(equal, &slow_case_);
3516   __ bind(&exit_);
3517 }
3518 
3519 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)3520 void StringCharFromCodeGenerator::GenerateSlow(
3521     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
3522   __ Abort("Unexpected fallthrough to CharFromCode slow case");
3523 
3524   __ bind(&slow_case_);
3525   call_helper.BeforeCall(masm);
3526   __ push(code_);
3527   __ CallRuntime(Runtime::kCharFromCode, 1);
3528   if (!result_.is(rax)) {
3529     __ movq(result_, rax);
3530   }
3531   call_helper.AfterCall(masm);
3532   __ jmp(&exit_);
3533 
3534   __ Abort("Unexpected fallthrough from CharFromCode slow case");
3535 }
3536 
3537 
3538 // -------------------------------------------------------------------------
3539 // StringCharAtGenerator
3540 
GenerateFast(MacroAssembler * masm)3541 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
3542   char_code_at_generator_.GenerateFast(masm);
3543   char_from_code_generator_.GenerateFast(masm);
3544 }
3545 
3546 
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)3547 void StringCharAtGenerator::GenerateSlow(
3548     MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
3549   char_code_at_generator_.GenerateSlow(masm, call_helper);
3550   char_from_code_generator_.GenerateSlow(masm, call_helper);
3551 }
3552 
3553 
Generate(MacroAssembler * masm)3554 void StringAddStub::Generate(MacroAssembler* masm) {
3555   Label string_add_runtime, call_builtin;
3556   Builtins::JavaScript builtin_id = Builtins::ADD;
3557 
3558   // Load the two arguments.
3559   __ movq(rax, Operand(rsp, 2 * kPointerSize));  // First argument (left).
3560   __ movq(rdx, Operand(rsp, 1 * kPointerSize));  // Second argument (right).
3561 
3562   // Make sure that both arguments are strings if not known in advance.
3563   if (flags_ == NO_STRING_ADD_FLAGS) {
3564     Condition is_smi;
3565     is_smi = masm->CheckSmi(rax);
3566     __ j(is_smi, &string_add_runtime);
3567     __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
3568     __ j(above_equal, &string_add_runtime);
3569 
3570     // First argument is a a string, test second.
3571     is_smi = masm->CheckSmi(rdx);
3572     __ j(is_smi, &string_add_runtime);
3573     __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
3574     __ j(above_equal, &string_add_runtime);
3575   } else {
3576     // Here at least one of the arguments is definitely a string.
3577     // We convert the one that is not known to be a string.
3578     if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
3579       ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
3580       GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
3581                               &call_builtin);
3582       builtin_id = Builtins::STRING_ADD_RIGHT;
3583     } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
3584       ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
3585       GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
3586                               &call_builtin);
3587       builtin_id = Builtins::STRING_ADD_LEFT;
3588     }
3589   }
3590 
3591   // Both arguments are strings.
3592   // rax: first string
3593   // rdx: second string
3594   // Check if either of the strings are empty. In that case return the other.
3595   NearLabel second_not_zero_length, both_not_zero_length;
3596   __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
3597   __ SmiTest(rcx);
3598   __ j(not_zero, &second_not_zero_length);
3599   // Second string is empty, result is first string which is already in rax.
3600   Counters* counters = masm->isolate()->counters();
3601   __ IncrementCounter(counters->string_add_native(), 1);
3602   __ ret(2 * kPointerSize);
3603   __ bind(&second_not_zero_length);
3604   __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
3605   __ SmiTest(rbx);
3606   __ j(not_zero, &both_not_zero_length);
3607   // First string is empty, result is second string which is in rdx.
3608   __ movq(rax, rdx);
3609   __ IncrementCounter(counters->string_add_native(), 1);
3610   __ ret(2 * kPointerSize);
3611 
3612   // Both strings are non-empty.
3613   // rax: first string
3614   // rbx: length of first string
3615   // rcx: length of second string
3616   // rdx: second string
3617   // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
3618   // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
3619   Label string_add_flat_result, longer_than_two;
3620   __ bind(&both_not_zero_length);
3621 
3622   // If arguments where known to be strings, maps are not loaded to r8 and r9
3623   // by the code above.
3624   if (flags_ != NO_STRING_ADD_FLAGS) {
3625     __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
3626     __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
3627   }
3628   // Get the instance types of the two strings as they will be needed soon.
3629   __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
3630   __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
3631 
3632   // Look at the length of the result of adding the two strings.
3633   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
3634   __ SmiAdd(rbx, rbx, rcx);
3635   // Use the symbol table when adding two one character strings, as it
3636   // helps later optimizations to return a symbol here.
3637   __ SmiCompare(rbx, Smi::FromInt(2));
3638   __ j(not_equal, &longer_than_two);
3639 
3640   // Check that both strings are non-external ascii strings.
3641   __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
3642                                                   &string_add_runtime);
3643 
3644   // Get the two characters forming the sub string.
3645   __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
3646   __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
3647 
3648   // Try to lookup two character string in symbol table. If it is not found
3649   // just allocate a new one.
3650   Label make_two_character_string, make_flat_ascii_string;
3651   StringHelper::GenerateTwoCharacterSymbolTableProbe(
3652       masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
3653   __ IncrementCounter(counters->string_add_native(), 1);
3654   __ ret(2 * kPointerSize);
3655 
3656   __ bind(&make_two_character_string);
3657   __ Set(rbx, 2);
3658   __ jmp(&make_flat_ascii_string);
3659 
3660   __ bind(&longer_than_two);
3661   // Check if resulting string will be flat.
3662   __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
3663   __ j(below, &string_add_flat_result);
3664   // Handle exceptionally long strings in the runtime system.
3665   STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
3666   __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
3667   __ j(above, &string_add_runtime);
3668 
3669   // If result is not supposed to be flat, allocate a cons string object. If
3670   // both strings are ascii the result is an ascii cons string.
3671   // rax: first string
3672   // rbx: length of resulting flat string
3673   // rdx: second string
3674   // r8: instance type of first string
3675   // r9: instance type of second string
3676   Label non_ascii, allocated, ascii_data;
3677   __ movl(rcx, r8);
3678   __ and_(rcx, r9);
3679   STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
3680   __ testl(rcx, Immediate(kAsciiStringTag));
3681   __ j(zero, &non_ascii);
3682   __ bind(&ascii_data);
3683   // Allocate an acsii cons string.
3684   __ AllocateAsciiConsString(rcx, rdi, no_reg, &string_add_runtime);
3685   __ bind(&allocated);
3686   // Fill the fields of the cons string.
3687   __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
3688   __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
3689           Immediate(String::kEmptyHashField));
3690   __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
3691   __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
3692   __ movq(rax, rcx);
3693   __ IncrementCounter(counters->string_add_native(), 1);
3694   __ ret(2 * kPointerSize);
3695   __ bind(&non_ascii);
3696   // At least one of the strings is two-byte. Check whether it happens
3697   // to contain only ascii characters.
3698   // rcx: first instance type AND second instance type.
3699   // r8: first instance type.
3700   // r9: second instance type.
3701   __ testb(rcx, Immediate(kAsciiDataHintMask));
3702   __ j(not_zero, &ascii_data);
3703   __ xor_(r8, r9);
3704   STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
3705   __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
3706   __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
3707   __ j(equal, &ascii_data);
3708   // Allocate a two byte cons string.
3709   __ AllocateConsString(rcx, rdi, no_reg, &string_add_runtime);
3710   __ jmp(&allocated);
3711 
3712   // Handle creating a flat result. First check that both strings are not
3713   // external strings.
3714   // rax: first string
3715   // rbx: length of resulting flat string as smi
3716   // rdx: second string
3717   // r8: instance type of first string
3718   // r9: instance type of first string
3719   __ bind(&string_add_flat_result);
3720   __ SmiToInteger32(rbx, rbx);
3721   __ movl(rcx, r8);
3722   __ and_(rcx, Immediate(kStringRepresentationMask));
3723   __ cmpl(rcx, Immediate(kExternalStringTag));
3724   __ j(equal, &string_add_runtime);
3725   __ movl(rcx, r9);
3726   __ and_(rcx, Immediate(kStringRepresentationMask));
3727   __ cmpl(rcx, Immediate(kExternalStringTag));
3728   __ j(equal, &string_add_runtime);
3729   // Now check if both strings are ascii strings.
3730   // rax: first string
3731   // rbx: length of resulting flat string
3732   // rdx: second string
3733   // r8: instance type of first string
3734   // r9: instance type of second string
3735   Label non_ascii_string_add_flat_result;
3736   STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
3737   __ testl(r8, Immediate(kAsciiStringTag));
3738   __ j(zero, &non_ascii_string_add_flat_result);
3739   __ testl(r9, Immediate(kAsciiStringTag));
3740   __ j(zero, &string_add_runtime);
3741 
3742   __ bind(&make_flat_ascii_string);
3743   // Both strings are ascii strings. As they are short they are both flat.
3744   __ AllocateAsciiString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
3745   // rcx: result string
3746   __ movq(rbx, rcx);
3747   // Locate first character of result.
3748   __ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3749   // Locate first character of first argument
3750   __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
3751   __ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3752   // rax: first char of first argument
3753   // rbx: result string
3754   // rcx: first character of result
3755   // rdx: second string
3756   // rdi: length of first argument
3757   StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
3758   // Locate first character of second argument.
3759   __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
3760   __ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
3761   // rbx: result string
3762   // rcx: next character of result
3763   // rdx: first char of second argument
3764   // rdi: length of second argument
3765   StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, true);
3766   __ movq(rax, rbx);
3767   __ IncrementCounter(counters->string_add_native(), 1);
3768   __ ret(2 * kPointerSize);
3769 
3770   // Handle creating a flat two byte result.
3771   // rax: first string - known to be two byte
3772   // rbx: length of resulting flat string
3773   // rdx: second string
3774   // r8: instance type of first string
3775   // r9: instance type of first string
3776   __ bind(&non_ascii_string_add_flat_result);
3777   __ and_(r9, Immediate(kAsciiStringTag));
3778   __ j(not_zero, &string_add_runtime);
3779   // Both strings are two byte strings. As they are short they are both
3780   // flat.
3781   __ AllocateTwoByteString(rcx, rbx, rdi, r14, r11, &string_add_runtime);
3782   // rcx: result string
3783   __ movq(rbx, rcx);
3784   // Locate first character of result.
3785   __ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3786   // Locate first character of first argument.
3787   __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
3788   __ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3789   // rax: first char of first argument
3790   // rbx: result string
3791   // rcx: first character of result
3792   // rdx: second argument
3793   // rdi: length of first argument
3794   StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
3795   // Locate first character of second argument.
3796   __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
3797   __ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3798   // rbx: result string
3799   // rcx: next character of result
3800   // rdx: first char of second argument
3801   // rdi: length of second argument
3802   StringHelper::GenerateCopyCharacters(masm, rcx, rdx, rdi, false);
3803   __ movq(rax, rbx);
3804   __ IncrementCounter(counters->string_add_native(), 1);
3805   __ ret(2 * kPointerSize);
3806 
3807   // Just jump to runtime to add the two strings.
3808   __ bind(&string_add_runtime);
3809   __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
3810 
3811   if (call_builtin.is_linked()) {
3812     __ bind(&call_builtin);
3813     __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
3814   }
3815 }
3816 
3817 
GenerateConvertArgument(MacroAssembler * masm,int stack_offset,Register arg,Register scratch1,Register scratch2,Register scratch3,Label * slow)3818 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
3819                                             int stack_offset,
3820                                             Register arg,
3821                                             Register scratch1,
3822                                             Register scratch2,
3823                                             Register scratch3,
3824                                             Label* slow) {
3825   // First check if the argument is already a string.
3826   Label not_string, done;
3827   __ JumpIfSmi(arg, &not_string);
3828   __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
3829   __ j(below, &done);
3830 
3831   // Check the number to string cache.
3832   Label not_cached;
3833   __ bind(&not_string);
3834   // Puts the cached result into scratch1.
3835   NumberToStringStub::GenerateLookupNumberStringCache(masm,
3836                                                       arg,
3837                                                       scratch1,
3838                                                       scratch2,
3839                                                       scratch3,
3840                                                       false,
3841                                                       &not_cached);
3842   __ movq(arg, scratch1);
3843   __ movq(Operand(rsp, stack_offset), arg);
3844   __ jmp(&done);
3845 
3846   // Check if the argument is a safe string wrapper.
3847   __ bind(&not_cached);
3848   __ JumpIfSmi(arg, slow);
3849   __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1);  // map -> scratch1.
3850   __ j(not_equal, slow);
3851   __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
3852            Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
3853   __ j(zero, slow);
3854   __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
3855   __ movq(Operand(rsp, stack_offset), arg);
3856 
3857   __ bind(&done);
3858 }
3859 
3860 
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,bool ascii)3861 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3862                                           Register dest,
3863                                           Register src,
3864                                           Register count,
3865                                           bool ascii) {
3866   Label loop;
3867   __ bind(&loop);
3868   // This loop just copies one character at a time, as it is only used for very
3869   // short strings.
3870   if (ascii) {
3871     __ movb(kScratchRegister, Operand(src, 0));
3872     __ movb(Operand(dest, 0), kScratchRegister);
3873     __ incq(src);
3874     __ incq(dest);
3875   } else {
3876     __ movzxwl(kScratchRegister, Operand(src, 0));
3877     __ movw(Operand(dest, 0), kScratchRegister);
3878     __ addq(src, Immediate(2));
3879     __ addq(dest, Immediate(2));
3880   }
3881   __ decl(count);
3882   __ j(not_zero, &loop);
3883 }
3884 
3885 
GenerateCopyCharactersREP(MacroAssembler * masm,Register dest,Register src,Register count,bool ascii)3886 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
3887                                              Register dest,
3888                                              Register src,
3889                                              Register count,
3890                                              bool ascii) {
3891   // Copy characters using rep movs of doublewords. Align destination on 4 byte
3892   // boundary before starting rep movs. Copy remaining characters after running
3893   // rep movs.
3894   // Count is positive int32, dest and src are character pointers.
3895   ASSERT(dest.is(rdi));  // rep movs destination
3896   ASSERT(src.is(rsi));  // rep movs source
3897   ASSERT(count.is(rcx));  // rep movs count
3898 
3899   // Nothing to do for zero characters.
3900   NearLabel done;
3901   __ testl(count, count);
3902   __ j(zero, &done);
3903 
3904   // Make count the number of bytes to copy.
3905   if (!ascii) {
3906     STATIC_ASSERT(2 == sizeof(uc16));
3907     __ addl(count, count);
3908   }
3909 
3910   // Don't enter the rep movs if there are less than 4 bytes to copy.
3911   NearLabel last_bytes;
3912   __ testl(count, Immediate(~7));
3913   __ j(zero, &last_bytes);
3914 
3915   // Copy from edi to esi using rep movs instruction.
3916   __ movl(kScratchRegister, count);
3917   __ shr(count, Immediate(3));  // Number of doublewords to copy.
3918   __ repmovsq();
3919 
3920   // Find number of bytes left.
3921   __ movl(count, kScratchRegister);
3922   __ and_(count, Immediate(7));
3923 
3924   // Check if there are more bytes to copy.
3925   __ bind(&last_bytes);
3926   __ testl(count, count);
3927   __ j(zero, &done);
3928 
3929   // Copy remaining characters.
3930   Label loop;
3931   __ bind(&loop);
3932   __ movb(kScratchRegister, Operand(src, 0));
3933   __ movb(Operand(dest, 0), kScratchRegister);
3934   __ incq(src);
3935   __ incq(dest);
3936   __ decl(count);
3937   __ j(not_zero, &loop);
3938 
3939   __ bind(&done);
3940 }
3941 
GenerateTwoCharacterSymbolTableProbe(MacroAssembler * masm,Register c1,Register c2,Register scratch1,Register scratch2,Register scratch3,Register scratch4,Label * not_found)3942 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
3943                                                         Register c1,
3944                                                         Register c2,
3945                                                         Register scratch1,
3946                                                         Register scratch2,
3947                                                         Register scratch3,
3948                                                         Register scratch4,
3949                                                         Label* not_found) {
3950   // Register scratch3 is the general scratch register in this function.
3951   Register scratch = scratch3;
3952 
3953   // Make sure that both characters are not digits as such strings has a
3954   // different hash algorithm. Don't try to look for these in the symbol table.
3955   NearLabel not_array_index;
3956   __ leal(scratch, Operand(c1, -'0'));
3957   __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
3958   __ j(above, &not_array_index);
3959   __ leal(scratch, Operand(c2, -'0'));
3960   __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
3961   __ j(below_equal, not_found);
3962 
3963   __ bind(&not_array_index);
3964   // Calculate the two character string hash.
3965   Register hash = scratch1;
3966   GenerateHashInit(masm, hash, c1, scratch);
3967   GenerateHashAddCharacter(masm, hash, c2, scratch);
3968   GenerateHashGetHash(masm, hash, scratch);
3969 
3970   // Collect the two characters in a register.
3971   Register chars = c1;
3972   __ shl(c2, Immediate(kBitsPerByte));
3973   __ orl(chars, c2);
3974 
3975   // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
3976   // hash:  hash of two character string.
3977 
3978   // Load the symbol table.
3979   Register symbol_table = c2;
3980   __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
3981 
3982   // Calculate capacity mask from the symbol table capacity.
3983   Register mask = scratch2;
3984   __ SmiToInteger32(mask,
3985                     FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
3986   __ decl(mask);
3987 
3988   Register map = scratch4;
3989 
3990   // Registers
3991   // chars:        two character string, char 1 in byte 0 and char 2 in byte 1.
3992   // hash:         hash of two character string (32-bit int)
3993   // symbol_table: symbol table
3994   // mask:         capacity mask (32-bit int)
3995   // map:          -
3996   // scratch:      -
3997 
3998   // Perform a number of probes in the symbol table.
3999   static const int kProbes = 4;
4000   Label found_in_symbol_table;
4001   Label next_probe[kProbes];
4002   for (int i = 0; i < kProbes; i++) {
4003     // Calculate entry in symbol table.
4004     __ movl(scratch, hash);
4005     if (i > 0) {
4006       __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
4007     }
4008     __ andl(scratch, mask);
4009 
4010     // Load the entry from the symbol table.
4011     Register candidate = scratch;  // Scratch register contains candidate.
4012     STATIC_ASSERT(SymbolTable::kEntrySize == 1);
4013     __ movq(candidate,
4014             FieldOperand(symbol_table,
4015                          scratch,
4016                          times_pointer_size,
4017                          SymbolTable::kElementsStartOffset));
4018 
4019     // If entry is undefined no string with this hash can be found.
4020     NearLabel is_string;
4021     __ CmpObjectType(candidate, ODDBALL_TYPE, map);
4022     __ j(not_equal, &is_string);
4023 
4024     __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
4025     __ j(equal, not_found);
4026     // Must be null (deleted entry).
4027     __ jmp(&next_probe[i]);
4028 
4029     __ bind(&is_string);
4030 
4031     // If length is not 2 the string is not a candidate.
4032     __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
4033                   Smi::FromInt(2));
4034     __ j(not_equal, &next_probe[i]);
4035 
4036     // We use kScratchRegister as a temporary register in assumption that
4037     // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
4038     Register temp = kScratchRegister;
4039 
4040     // Check that the candidate is a non-external ascii string.
4041     __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
4042     __ JumpIfInstanceTypeIsNotSequentialAscii(
4043         temp, temp, &next_probe[i]);
4044 
4045     // Check if the two characters match.
4046     __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
4047     __ andl(temp, Immediate(0x0000ffff));
4048     __ cmpl(chars, temp);
4049     __ j(equal, &found_in_symbol_table);
4050     __ bind(&next_probe[i]);
4051   }
4052 
4053   // No matching 2 character string found by probing.
4054   __ jmp(not_found);
4055 
4056   // Scratch register contains result when we fall through to here.
4057   Register result = scratch;
4058   __ bind(&found_in_symbol_table);
4059   if (!result.is(rax)) {
4060     __ movq(rax, result);
4061   }
4062 }
4063 
4064 
GenerateHashInit(MacroAssembler * masm,Register hash,Register character,Register scratch)4065 void StringHelper::GenerateHashInit(MacroAssembler* masm,
4066                                     Register hash,
4067                                     Register character,
4068                                     Register scratch) {
4069   // hash = character + (character << 10);
4070   __ movl(hash, character);
4071   __ shll(hash, Immediate(10));
4072   __ addl(hash, character);
4073   // hash ^= hash >> 6;
4074   __ movl(scratch, hash);
4075   __ sarl(scratch, Immediate(6));
4076   __ xorl(hash, scratch);
4077 }
4078 
4079 
GenerateHashAddCharacter(MacroAssembler * masm,Register hash,Register character,Register scratch)4080 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
4081                                             Register hash,
4082                                             Register character,
4083                                             Register scratch) {
4084   // hash += character;
4085   __ addl(hash, character);
4086   // hash += hash << 10;
4087   __ movl(scratch, hash);
4088   __ shll(scratch, Immediate(10));
4089   __ addl(hash, scratch);
4090   // hash ^= hash >> 6;
4091   __ movl(scratch, hash);
4092   __ sarl(scratch, Immediate(6));
4093   __ xorl(hash, scratch);
4094 }
4095 
4096 
GenerateHashGetHash(MacroAssembler * masm,Register hash,Register scratch)4097 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
4098                                        Register hash,
4099                                        Register scratch) {
4100   // hash += hash << 3;
4101   __ leal(hash, Operand(hash, hash, times_8, 0));
4102   // hash ^= hash >> 11;
4103   __ movl(scratch, hash);
4104   __ sarl(scratch, Immediate(11));
4105   __ xorl(hash, scratch);
4106   // hash += hash << 15;
4107   __ movl(scratch, hash);
4108   __ shll(scratch, Immediate(15));
4109   __ addl(hash, scratch);
4110 
4111   // if (hash == 0) hash = 27;
4112   Label hash_not_zero;
4113   __ j(not_zero, &hash_not_zero);
4114   __ Set(hash, 27);
4115   __ bind(&hash_not_zero);
4116 }
4117 
Generate(MacroAssembler * masm)4118 void SubStringStub::Generate(MacroAssembler* masm) {
4119   Label runtime;
4120 
4121   // Stack frame on entry.
4122   //  rsp[0]: return address
4123   //  rsp[8]: to
4124   //  rsp[16]: from
4125   //  rsp[24]: string
4126 
4127   const int kToOffset = 1 * kPointerSize;
4128   const int kFromOffset = kToOffset + kPointerSize;
4129   const int kStringOffset = kFromOffset + kPointerSize;
4130   const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
4131 
4132   // Make sure first argument is a string.
4133   __ movq(rax, Operand(rsp, kStringOffset));
4134   STATIC_ASSERT(kSmiTag == 0);
4135   __ testl(rax, Immediate(kSmiTagMask));
4136   __ j(zero, &runtime);
4137   Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
4138   __ j(NegateCondition(is_string), &runtime);
4139 
4140   // rax: string
4141   // rbx: instance type
4142   // Calculate length of sub string using the smi values.
4143   Label result_longer_than_two;
4144   __ movq(rcx, Operand(rsp, kToOffset));
4145   __ movq(rdx, Operand(rsp, kFromOffset));
4146   __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
4147 
4148   __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
4149   __ cmpq(FieldOperand(rax, String::kLengthOffset), rcx);
4150   Label return_rax;
4151   __ j(equal, &return_rax);
4152   // Special handling of sub-strings of length 1 and 2. One character strings
4153   // are handled in the runtime system (looked up in the single character
4154   // cache). Two character strings are looked for in the symbol cache.
4155   __ SmiToInteger32(rcx, rcx);
4156   __ cmpl(rcx, Immediate(2));
4157   __ j(greater, &result_longer_than_two);
4158   __ j(less, &runtime);
4159 
4160   // Sub string of length 2 requested.
4161   // rax: string
4162   // rbx: instance type
4163   // rcx: sub string length (value is 2)
4164   // rdx: from index (smi)
4165   __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &runtime);
4166 
4167   // Get the two characters forming the sub string.
4168   __ SmiToInteger32(rdx, rdx);  // From index is no longer smi.
4169   __ movzxbq(rbx, FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize));
4170   __ movzxbq(rcx,
4171              FieldOperand(rax, rdx, times_1, SeqAsciiString::kHeaderSize + 1));
4172 
4173   // Try to lookup two character string in symbol table.
4174   Label make_two_character_string;
4175   StringHelper::GenerateTwoCharacterSymbolTableProbe(
4176       masm, rbx, rcx, rax, rdx, rdi, r14, &make_two_character_string);
4177   __ ret(3 * kPointerSize);
4178 
4179   __ bind(&make_two_character_string);
4180   // Setup registers for allocating the two character string.
4181   __ movq(rax, Operand(rsp, kStringOffset));
4182   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
4183   __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
4184   __ Set(rcx, 2);
4185 
4186   __ bind(&result_longer_than_two);
4187 
4188   // rax: string
4189   // rbx: instance type
4190   // rcx: result string length
4191   // Check for flat ascii string
4192   Label non_ascii_flat;
4193   __ JumpIfInstanceTypeIsNotSequentialAscii(rbx, rbx, &non_ascii_flat);
4194 
4195   // Allocate the result.
4196   __ AllocateAsciiString(rax, rcx, rbx, rdx, rdi, &runtime);
4197 
4198   // rax: result string
4199   // rcx: result string length
4200   __ movq(rdx, rsi);  // esi used by following code.
4201   // Locate first character of result.
4202   __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
4203   // Load string argument and locate character of sub string start.
4204   __ movq(rsi, Operand(rsp, kStringOffset));
4205   __ movq(rbx, Operand(rsp, kFromOffset));
4206   {
4207     SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_1);
4208     __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
4209                         SeqAsciiString::kHeaderSize - kHeapObjectTag));
4210   }
4211 
4212   // rax: result string
4213   // rcx: result length
4214   // rdx: original value of rsi
4215   // rdi: first character of result
4216   // rsi: character of sub string start
4217   StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
4218   __ movq(rsi, rdx);  // Restore rsi.
4219   Counters* counters = masm->isolate()->counters();
4220   __ IncrementCounter(counters->sub_string_native(), 1);
4221   __ ret(kArgumentsSize);
4222 
4223   __ bind(&non_ascii_flat);
4224   // rax: string
4225   // rbx: instance type & kStringRepresentationMask | kStringEncodingMask
4226   // rcx: result string length
4227   // Check for sequential two byte string
4228   __ cmpb(rbx, Immediate(kSeqStringTag | kTwoByteStringTag));
4229   __ j(not_equal, &runtime);
4230 
4231   // Allocate the result.
4232   __ AllocateTwoByteString(rax, rcx, rbx, rdx, rdi, &runtime);
4233 
4234   // rax: result string
4235   // rcx: result string length
4236   __ movq(rdx, rsi);  // esi used by following code.
4237   // Locate first character of result.
4238   __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
4239   // Load string argument and locate character of sub string start.
4240   __ movq(rsi, Operand(rsp, kStringOffset));
4241   __ movq(rbx, Operand(rsp, kFromOffset));
4242   {
4243     SmiIndex smi_as_index = masm->SmiToIndex(rbx, rbx, times_2);
4244     __ lea(rsi, Operand(rsi, smi_as_index.reg, smi_as_index.scale,
4245                         SeqAsciiString::kHeaderSize - kHeapObjectTag));
4246   }
4247 
4248   // rax: result string
4249   // rcx: result length
4250   // rdx: original value of rsi
4251   // rdi: first character of result
4252   // rsi: character of sub string start
4253   StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
4254   __ movq(rsi, rdx);  // Restore esi.
4255 
4256   __ bind(&return_rax);
4257   __ IncrementCounter(counters->sub_string_native(), 1);
4258   __ ret(kArgumentsSize);
4259 
4260   // Just jump to runtime to create the sub string.
4261   __ bind(&runtime);
4262   __ TailCallRuntime(Runtime::kSubString, 3, 1);
4263 }
4264 
4265 
GenerateCompareFlatAsciiStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)4266 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
4267                                                         Register left,
4268                                                         Register right,
4269                                                         Register scratch1,
4270                                                         Register scratch2,
4271                                                         Register scratch3,
4272                                                         Register scratch4) {
4273   // Ensure that you can always subtract a string length from a non-negative
4274   // number (e.g. another length).
4275   STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
4276 
4277   // Find minimum length and length difference.
4278   __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
4279   __ movq(scratch4, scratch1);
4280   __ SmiSub(scratch4,
4281             scratch4,
4282             FieldOperand(right, String::kLengthOffset));
4283   // Register scratch4 now holds left.length - right.length.
4284   const Register length_difference = scratch4;
4285   NearLabel left_shorter;
4286   __ j(less, &left_shorter);
4287   // The right string isn't longer that the left one.
4288   // Get the right string's length by subtracting the (non-negative) difference
4289   // from the left string's length.
4290   __ SmiSub(scratch1, scratch1, length_difference);
4291   __ bind(&left_shorter);
4292   // Register scratch1 now holds Min(left.length, right.length).
4293   const Register min_length = scratch1;
4294 
4295   NearLabel compare_lengths;
4296   // If min-length is zero, go directly to comparing lengths.
4297   __ SmiTest(min_length);
4298   __ j(zero, &compare_lengths);
4299 
4300   __ SmiToInteger32(min_length, min_length);
4301 
4302   // Registers scratch2 and scratch3 are free.
4303   NearLabel result_not_equal;
4304   Label loop;
4305   {
4306     // Check characters 0 .. min_length - 1 in a loop.
4307     // Use scratch3 as loop index, min_length as limit and scratch2
4308     // for computation.
4309     const Register index = scratch3;
4310     __ Set(index, 0);  // Index into strings.
4311     __ bind(&loop);
4312     // Compare characters.
4313     // TODO(lrn): Could we load more than one character at a time?
4314     __ movb(scratch2, FieldOperand(left,
4315                                    index,
4316                                    times_1,
4317                                    SeqAsciiString::kHeaderSize));
4318     // Increment index and use -1 modifier on next load to give
4319     // the previous load extra time to complete.
4320     __ addl(index, Immediate(1));
4321     __ cmpb(scratch2, FieldOperand(right,
4322                                    index,
4323                                    times_1,
4324                                    SeqAsciiString::kHeaderSize - 1));
4325     __ j(not_equal, &result_not_equal);
4326     __ cmpl(index, min_length);
4327     __ j(not_equal, &loop);
4328   }
4329   // Completed loop without finding different characters.
4330   // Compare lengths (precomputed).
4331   __ bind(&compare_lengths);
4332   __ SmiTest(length_difference);
4333   __ j(not_zero, &result_not_equal);
4334 
4335   // Result is EQUAL.
4336   __ Move(rax, Smi::FromInt(EQUAL));
4337   __ ret(0);
4338 
4339   NearLabel result_greater;
4340   __ bind(&result_not_equal);
4341   // Unequal comparison of left to right, either character or length.
4342   __ j(greater, &result_greater);
4343 
4344   // Result is LESS.
4345   __ Move(rax, Smi::FromInt(LESS));
4346   __ ret(0);
4347 
4348   // Result is GREATER.
4349   __ bind(&result_greater);
4350   __ Move(rax, Smi::FromInt(GREATER));
4351   __ ret(0);
4352 }
4353 
4354 
Generate(MacroAssembler * masm)4355 void StringCompareStub::Generate(MacroAssembler* masm) {
4356   Label runtime;
4357 
4358   // Stack frame on entry.
4359   //  rsp[0]: return address
4360   //  rsp[8]: right string
4361   //  rsp[16]: left string
4362 
4363   __ movq(rdx, Operand(rsp, 2 * kPointerSize));  // left
4364   __ movq(rax, Operand(rsp, 1 * kPointerSize));  // right
4365 
4366   // Check for identity.
4367   NearLabel not_same;
4368   __ cmpq(rdx, rax);
4369   __ j(not_equal, &not_same);
4370   __ Move(rax, Smi::FromInt(EQUAL));
4371   Counters* counters = masm->isolate()->counters();
4372   __ IncrementCounter(counters->string_compare_native(), 1);
4373   __ ret(2 * kPointerSize);
4374 
4375   __ bind(&not_same);
4376 
4377   // Check that both are sequential ASCII strings.
4378   __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
4379 
4380   // Inline comparison of ascii strings.
4381   __ IncrementCounter(counters->string_compare_native(), 1);
4382   // Drop arguments from the stack
4383   __ pop(rcx);
4384   __ addq(rsp, Immediate(2 * kPointerSize));
4385   __ push(rcx);
4386   GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
4387 
4388   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
4389   // tagged as a small integer.
4390   __ bind(&runtime);
4391   __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4392 }
4393 
4394 
GenerateSmis(MacroAssembler * masm)4395 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4396   ASSERT(state_ == CompareIC::SMIS);
4397   NearLabel miss;
4398   __ JumpIfNotBothSmi(rdx, rax, &miss);
4399 
4400   if (GetCondition() == equal) {
4401     // For equality we do not care about the sign of the result.
4402     __ subq(rax, rdx);
4403   } else {
4404     NearLabel done;
4405     __ subq(rdx, rax);
4406     __ j(no_overflow, &done);
4407     // Correct sign of result in case of overflow.
4408     __ SmiNot(rdx, rdx);
4409     __ bind(&done);
4410     __ movq(rax, rdx);
4411   }
4412   __ ret(0);
4413 
4414   __ bind(&miss);
4415   GenerateMiss(masm);
4416 }
4417 
4418 
GenerateHeapNumbers(MacroAssembler * masm)4419 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
4420   ASSERT(state_ == CompareIC::HEAP_NUMBERS);
4421 
4422   NearLabel generic_stub;
4423   NearLabel unordered;
4424   NearLabel miss;
4425   Condition either_smi = masm->CheckEitherSmi(rax, rdx);
4426   __ j(either_smi, &generic_stub);
4427 
4428   __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
4429   __ j(not_equal, &miss);
4430   __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
4431   __ j(not_equal, &miss);
4432 
4433   // Load left and right operand
4434   __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
4435   __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
4436 
4437   // Compare operands
4438   __ ucomisd(xmm0, xmm1);
4439 
4440   // Don't base result on EFLAGS when a NaN is involved.
4441   __ j(parity_even, &unordered);
4442 
4443   // Return a result of -1, 0, or 1, based on EFLAGS.
4444   // Performing mov, because xor would destroy the flag register.
4445   __ movl(rax, Immediate(0));
4446   __ movl(rcx, Immediate(0));
4447   __ setcc(above, rax);  // Add one to zero if carry clear and not equal.
4448   __ sbbq(rax, rcx);  // Subtract one if below (aka. carry set).
4449   __ ret(0);
4450 
4451   __ bind(&unordered);
4452 
4453   CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
4454   __ bind(&generic_stub);
4455   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
4456 
4457   __ bind(&miss);
4458   GenerateMiss(masm);
4459 }
4460 
4461 
GenerateObjects(MacroAssembler * masm)4462 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
4463   ASSERT(state_ == CompareIC::OBJECTS);
4464   NearLabel miss;
4465   Condition either_smi = masm->CheckEitherSmi(rdx, rax);
4466   __ j(either_smi, &miss);
4467 
4468   __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
4469   __ j(not_equal, &miss, not_taken);
4470   __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
4471   __ j(not_equal, &miss, not_taken);
4472 
4473   ASSERT(GetCondition() == equal);
4474   __ subq(rax, rdx);
4475   __ ret(0);
4476 
4477   __ bind(&miss);
4478   GenerateMiss(masm);
4479 }
4480 
4481 
GenerateMiss(MacroAssembler * masm)4482 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
4483   // Save the registers.
4484   __ pop(rcx);
4485   __ push(rdx);
4486   __ push(rax);
4487   __ push(rcx);
4488 
4489   // Call the runtime system in a fresh internal frame.
4490   ExternalReference miss =
4491       ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
4492   __ EnterInternalFrame();
4493   __ push(rdx);
4494   __ push(rax);
4495   __ Push(Smi::FromInt(op_));
4496   __ CallExternalReference(miss, 3);
4497   __ LeaveInternalFrame();
4498 
4499   // Compute the entry point of the rewritten stub.
4500   __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
4501 
4502   // Restore registers.
4503   __ pop(rcx);
4504   __ pop(rax);
4505   __ pop(rdx);
4506   __ push(rcx);
4507 
4508   // Do a tail call to the rewritten stub.
4509   __ jmp(rdi);
4510 }
4511 
4512 
4513 #undef __
4514 
4515 } }  // namespace v8::internal
4516 
4517 #endif  // V8_TARGET_ARCH_X64
4518