• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/v8.h"
6 
7 #if V8_TARGET_ARCH_X64
8 
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 // -------------------------------------------------------------------------
16 // Platform-specific RuntimeCallHelper functions.
17 
BeforeCall(MacroAssembler * masm) const18 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
19   masm->EnterFrame(StackFrame::INTERNAL);
20   DCHECK(!masm->has_frame());
21   masm->set_has_frame(true);
22 }
23 
24 
AfterCall(MacroAssembler * masm) const25 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
26   masm->LeaveFrame(StackFrame::INTERNAL);
27   DCHECK(masm->has_frame());
28   masm->set_has_frame(false);
29 }
30 
31 
32 #define __ masm.
33 
34 
CreateExpFunction()35 UnaryMathFunction CreateExpFunction() {
36   if (!FLAG_fast_math) return &std::exp;
37   size_t actual_size;
38   byte* buffer =
39       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
40   if (buffer == NULL) return &std::exp;
41   ExternalReference::InitializeMathExpData();
42 
43   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
44   // xmm0: raw double input.
45   XMMRegister input = xmm0;
46   XMMRegister result = xmm1;
47   __ pushq(rax);
48   __ pushq(rbx);
49 
50   MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
51 
52   __ popq(rbx);
53   __ popq(rax);
54   __ movsd(xmm0, result);
55   __ Ret();
56 
57   CodeDesc desc;
58   masm.GetCode(&desc);
59   DCHECK(!RelocInfo::RequiresRelocation(desc));
60 
61   CpuFeatures::FlushICache(buffer, actual_size);
62   base::OS::ProtectCode(buffer, actual_size);
63   return FUNCTION_CAST<UnaryMathFunction>(buffer);
64 }
65 
66 
CreateSqrtFunction()67 UnaryMathFunction CreateSqrtFunction() {
68   size_t actual_size;
69   // Allocate buffer in executable space.
70   byte* buffer =
71       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
72   if (buffer == NULL) return &std::sqrt;
73 
74   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
75   // xmm0: raw double input.
76   // Move double input into registers.
77   __ sqrtsd(xmm0, xmm0);
78   __ Ret();
79 
80   CodeDesc desc;
81   masm.GetCode(&desc);
82   DCHECK(!RelocInfo::RequiresRelocation(desc));
83 
84   CpuFeatures::FlushICache(buffer, actual_size);
85   base::OS::ProtectCode(buffer, actual_size);
86   return FUNCTION_CAST<UnaryMathFunction>(buffer);
87 }
88 
89 
90 #ifdef _WIN64
91 typedef double (*ModuloFunction)(double, double);
92 // Define custom fmod implementation.
CreateModuloFunction()93 ModuloFunction CreateModuloFunction() {
94   size_t actual_size;
95   byte* buffer = static_cast<byte*>(
96       base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true));
97   CHECK(buffer);
98   Assembler masm(NULL, buffer, static_cast<int>(actual_size));
99   // Generated code is put into a fixed, unmovable, buffer, and not into
100   // the V8 heap. We can't, and don't, refer to any relocatable addresses
101   // (e.g. the JavaScript nan-object).
102 
103   // Windows 64 ABI passes double arguments in xmm0, xmm1 and
104   // returns result in xmm0.
105   // Argument backing space is allocated on the stack above
106   // the return address.
107 
108   // Compute x mod y.
109   // Load y and x (use argument backing store as temporary storage).
110   __ movsd(Operand(rsp, kRegisterSize * 2), xmm1);
111   __ movsd(Operand(rsp, kRegisterSize), xmm0);
112   __ fld_d(Operand(rsp, kRegisterSize * 2));
113   __ fld_d(Operand(rsp, kRegisterSize));
114 
115   // Clear exception flags before operation.
116   {
117     Label no_exceptions;
118     __ fwait();
119     __ fnstsw_ax();
120     // Clear if Illegal Operand or Zero Division exceptions are set.
121     __ testb(rax, Immediate(5));
122     __ j(zero, &no_exceptions);
123     __ fnclex();
124     __ bind(&no_exceptions);
125   }
126 
127   // Compute st(0) % st(1)
128   {
129     Label partial_remainder_loop;
130     __ bind(&partial_remainder_loop);
131     __ fprem();
132     __ fwait();
133     __ fnstsw_ax();
134     __ testl(rax, Immediate(0x400 /* C2 */));
135     // If C2 is set, computation only has partial result. Loop to
136     // continue computation.
137     __ j(not_zero, &partial_remainder_loop);
138   }
139 
140   Label valid_result;
141   Label return_result;
142   // If Invalid Operand or Zero Division exceptions are set,
143   // return NaN.
144   __ testb(rax, Immediate(5));
145   __ j(zero, &valid_result);
146   __ fstp(0);  // Drop result in st(0).
147   int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
148   __ movq(rcx, kNaNValue);
149   __ movq(Operand(rsp, kRegisterSize), rcx);
150   __ movsd(xmm0, Operand(rsp, kRegisterSize));
151   __ jmp(&return_result);
152 
153   // If result is valid, return that.
154   __ bind(&valid_result);
155   __ fstp_d(Operand(rsp, kRegisterSize));
156   __ movsd(xmm0, Operand(rsp, kRegisterSize));
157 
158   // Clean up FPU stack and exceptions and return xmm0
159   __ bind(&return_result);
160   __ fstp(0);  // Unload y.
161 
162   Label clear_exceptions;
163   __ testb(rax, Immediate(0x3f /* Any Exception*/));
164   __ j(not_zero, &clear_exceptions);
165   __ ret(0);
166   __ bind(&clear_exceptions);
167   __ fnclex();
168   __ ret(0);
169 
170   CodeDesc desc;
171   masm.GetCode(&desc);
172   base::OS::ProtectCode(buffer, actual_size);
173   // Call the function from C++ through this pointer.
174   return FUNCTION_CAST<ModuloFunction>(buffer);
175 }
176 
177 #endif
178 
179 #undef __
180 
181 // -------------------------------------------------------------------------
182 // Code generators
183 
184 #define __ ACCESS_MASM(masm)
185 
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)186 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
187     MacroAssembler* masm,
188     Register receiver,
189     Register key,
190     Register value,
191     Register target_map,
192     AllocationSiteMode mode,
193     Label* allocation_memento_found) {
194   // Return address is on the stack.
195   Register scratch = rdi;
196   DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
197 
198   if (mode == TRACK_ALLOCATION_SITE) {
199     DCHECK(allocation_memento_found != NULL);
200     __ JumpIfJSArrayHasAllocationMemento(
201         receiver, scratch, allocation_memento_found);
202   }
203 
204   // Set transitioned map.
205   __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
206   __ RecordWriteField(receiver,
207                       HeapObject::kMapOffset,
208                       target_map,
209                       scratch,
210                       kDontSaveFPRegs,
211                       EMIT_REMEMBERED_SET,
212                       OMIT_SMI_CHECK);
213 }
214 
215 
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)216 void ElementsTransitionGenerator::GenerateSmiToDouble(
217     MacroAssembler* masm,
218     Register receiver,
219     Register key,
220     Register value,
221     Register target_map,
222     AllocationSiteMode mode,
223     Label* fail) {
224   // Return address is on the stack.
225   DCHECK(receiver.is(rdx));
226   DCHECK(key.is(rcx));
227   DCHECK(value.is(rax));
228   DCHECK(target_map.is(rbx));
229 
230   // The fail label is not actually used since we do not allocate.
231   Label allocated, new_backing_store, only_change_map, done;
232 
233   if (mode == TRACK_ALLOCATION_SITE) {
234     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
235   }
236 
237   // Check for empty arrays, which only require a map transition and no changes
238   // to the backing store.
239   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
240   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
241   __ j(equal, &only_change_map);
242 
243   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
244   if (kPointerSize == kDoubleSize) {
245     // Check backing store for COW-ness. For COW arrays we have to
246     // allocate a new backing store.
247     __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
248                    Heap::kFixedCOWArrayMapRootIndex);
249     __ j(equal, &new_backing_store);
250   } else {
251     // For x32 port we have to allocate a new backing store as SMI size is
252     // not equal with double size.
253     DCHECK(kDoubleSize == 2 * kPointerSize);
254     __ jmp(&new_backing_store);
255   }
256 
257   // Check if the backing store is in new-space. If not, we need to allocate
258   // a new one since the old one is in pointer-space.
259   // If in new space, we can reuse the old backing store because it is
260   // the same size.
261   __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
262 
263   __ movp(r14, r8);  // Destination array equals source array.
264 
265   // r8 : source FixedArray
266   // r9 : elements array length
267   // r14: destination FixedDoubleArray
268   // Set backing store's map
269   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
270   __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
271 
272   __ bind(&allocated);
273   // Set transitioned map.
274   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
275   __ RecordWriteField(rdx,
276                       HeapObject::kMapOffset,
277                       rbx,
278                       rdi,
279                       kDontSaveFPRegs,
280                       EMIT_REMEMBERED_SET,
281                       OMIT_SMI_CHECK);
282 
283   // Convert smis to doubles and holes to hole NaNs.  The Array's length
284   // remains unchanged.
285   STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
286   STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
287 
288   Label loop, entry, convert_hole;
289   __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
290   // r15: the-hole NaN
291   __ jmp(&entry);
292 
293   // Allocate new backing store.
294   __ bind(&new_backing_store);
295   __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
296   __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
297   // Set backing store's map
298   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
299   __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
300   // Set receiver's backing store.
301   __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
302   __ movp(r11, r14);
303   __ RecordWriteField(rdx,
304                       JSObject::kElementsOffset,
305                       r11,
306                       r15,
307                       kDontSaveFPRegs,
308                       EMIT_REMEMBERED_SET,
309                       OMIT_SMI_CHECK);
310   // Set backing store's length.
311   __ Integer32ToSmi(r11, r9);
312   __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
313   __ jmp(&allocated);
314 
315   __ bind(&only_change_map);
316   // Set transitioned map.
317   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
318   __ RecordWriteField(rdx,
319                       HeapObject::kMapOffset,
320                       rbx,
321                       rdi,
322                       kDontSaveFPRegs,
323                       OMIT_REMEMBERED_SET,
324                       OMIT_SMI_CHECK);
325   __ jmp(&done);
326 
327   // Conversion loop.
328   __ bind(&loop);
329   __ movp(rbx,
330           FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
331   // r9 : current element's index
332   // rbx: current element (smi-tagged)
333   __ JumpIfNotSmi(rbx, &convert_hole);
334   __ SmiToInteger32(rbx, rbx);
335   __ Cvtlsi2sd(xmm0, rbx);
336   __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
337            xmm0);
338   __ jmp(&entry);
339   __ bind(&convert_hole);
340 
341   if (FLAG_debug_code) {
342     __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
343     __ Assert(equal, kObjectFoundInSmiOnlyArray);
344   }
345 
346   __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
347   __ bind(&entry);
348   __ decp(r9);
349   __ j(not_sign, &loop);
350 
351   __ bind(&done);
352 }
353 
354 
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)355 void ElementsTransitionGenerator::GenerateDoubleToObject(
356     MacroAssembler* masm,
357     Register receiver,
358     Register key,
359     Register value,
360     Register target_map,
361     AllocationSiteMode mode,
362     Label* fail) {
363   // Return address is on the stack.
364   DCHECK(receiver.is(rdx));
365   DCHECK(key.is(rcx));
366   DCHECK(value.is(rax));
367   DCHECK(target_map.is(rbx));
368 
369   Label loop, entry, convert_hole, gc_required, only_change_map;
370 
371   if (mode == TRACK_ALLOCATION_SITE) {
372     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
373   }
374 
375   // Check for empty arrays, which only require a map transition and no changes
376   // to the backing store.
377   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
378   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
379   __ j(equal, &only_change_map);
380 
381   __ Push(rax);
382 
383   __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
384   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
385   // r8 : source FixedDoubleArray
386   // r9 : number of elements
387   __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
388   __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
389   // r11: destination FixedArray
390   __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
391   __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
392   __ Integer32ToSmi(r14, r9);
393   __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
394 
395   // Prepare for conversion loop.
396   __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
397   __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
398   // rsi: the-hole NaN
399   // rdi: pointer to the-hole
400   __ jmp(&entry);
401 
402   // Call into runtime if GC is required.
403   __ bind(&gc_required);
404   __ Pop(rax);
405   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
406   __ jmp(fail);
407 
408   // Box doubles into heap numbers.
409   __ bind(&loop);
410   __ movq(r14, FieldOperand(r8,
411                             r9,
412                             times_8,
413                             FixedDoubleArray::kHeaderSize));
414   // r9 : current element's index
415   // r14: current element
416   __ cmpq(r14, rsi);
417   __ j(equal, &convert_hole);
418 
419   // Non-hole double, copy value into a heap number.
420   __ AllocateHeapNumber(rax, r15, &gc_required);
421   // rax: new heap number
422   __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
423   __ movp(FieldOperand(r11,
424                        r9,
425                        times_pointer_size,
426                        FixedArray::kHeaderSize),
427           rax);
428   __ movp(r15, r9);
429   __ RecordWriteArray(r11,
430                       rax,
431                       r15,
432                       kDontSaveFPRegs,
433                       EMIT_REMEMBERED_SET,
434                       OMIT_SMI_CHECK);
435   __ jmp(&entry, Label::kNear);
436 
437   // Replace the-hole NaN with the-hole pointer.
438   __ bind(&convert_hole);
439   __ movp(FieldOperand(r11,
440                        r9,
441                        times_pointer_size,
442                        FixedArray::kHeaderSize),
443           rdi);
444 
445   __ bind(&entry);
446   __ decp(r9);
447   __ j(not_sign, &loop);
448 
449   // Replace receiver's backing store with newly created and filled FixedArray.
450   __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
451   __ RecordWriteField(rdx,
452                       JSObject::kElementsOffset,
453                       r11,
454                       r15,
455                       kDontSaveFPRegs,
456                       EMIT_REMEMBERED_SET,
457                       OMIT_SMI_CHECK);
458   __ Pop(rax);
459   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
460 
461   __ bind(&only_change_map);
462   // Set transitioned map.
463   __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
464   __ RecordWriteField(rdx,
465                       HeapObject::kMapOffset,
466                       rbx,
467                       rdi,
468                       kDontSaveFPRegs,
469                       OMIT_REMEMBERED_SET,
470                       OMIT_SMI_CHECK);
471 }
472 
473 
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)474 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
475                                        Register string,
476                                        Register index,
477                                        Register result,
478                                        Label* call_runtime) {
479   // Fetch the instance type of the receiver into result register.
480   __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
481   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
482 
483   // We need special handling for indirect strings.
484   Label check_sequential;
485   __ testb(result, Immediate(kIsIndirectStringMask));
486   __ j(zero, &check_sequential, Label::kNear);
487 
488   // Dispatch on the indirect string shape: slice or cons.
489   Label cons_string;
490   __ testb(result, Immediate(kSlicedNotConsMask));
491   __ j(zero, &cons_string, Label::kNear);
492 
493   // Handle slices.
494   Label indirect_string_loaded;
495   __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
496   __ addp(index, result);
497   __ movp(string, FieldOperand(string, SlicedString::kParentOffset));
498   __ jmp(&indirect_string_loaded, Label::kNear);
499 
500   // Handle cons strings.
501   // Check whether the right hand side is the empty string (i.e. if
502   // this is really a flat string in a cons string). If that is not
503   // the case we would rather go to the runtime system now to flatten
504   // the string.
505   __ bind(&cons_string);
506   __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
507                  Heap::kempty_stringRootIndex);
508   __ j(not_equal, call_runtime);
509   __ movp(string, FieldOperand(string, ConsString::kFirstOffset));
510 
511   __ bind(&indirect_string_loaded);
512   __ movp(result, FieldOperand(string, HeapObject::kMapOffset));
513   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
514 
515   // Distinguish sequential and external strings. Only these two string
516   // representations can reach here (slices and flat cons strings have been
517   // reduced to the underlying sequential or external string).
518   Label seq_string;
519   __ bind(&check_sequential);
520   STATIC_ASSERT(kSeqStringTag == 0);
521   __ testb(result, Immediate(kStringRepresentationMask));
522   __ j(zero, &seq_string, Label::kNear);
523 
524   // Handle external strings.
525   Label one_byte_external, done;
526   if (FLAG_debug_code) {
527     // Assert that we do not have a cons or slice (indirect strings) here.
528     // Sequential strings have already been ruled out.
529     __ testb(result, Immediate(kIsIndirectStringMask));
530     __ Assert(zero, kExternalStringExpectedButNotFound);
531   }
532   // Rule out short external strings.
533   STATIC_ASSERT(kShortExternalStringTag != 0);
534   __ testb(result, Immediate(kShortExternalStringTag));
535   __ j(not_zero, call_runtime);
536   // Check encoding.
537   STATIC_ASSERT(kTwoByteStringTag == 0);
538   __ testb(result, Immediate(kStringEncodingMask));
539   __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
540   __ j(not_equal, &one_byte_external, Label::kNear);
541   // Two-byte string.
542   __ movzxwl(result, Operand(result, index, times_2, 0));
543   __ jmp(&done, Label::kNear);
544   __ bind(&one_byte_external);
545   // One-byte string.
546   __ movzxbl(result, Operand(result, index, times_1, 0));
547   __ jmp(&done, Label::kNear);
548 
549   // Dispatch on the encoding: one-byte or two-byte.
550   Label one_byte;
551   __ bind(&seq_string);
552   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
553   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
554   __ testb(result, Immediate(kStringEncodingMask));
555   __ j(not_zero, &one_byte, Label::kNear);
556 
557   // Two-byte string.
558   // Load the two-byte character code into the result register.
559   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
560   __ movzxwl(result, FieldOperand(string,
561                                   index,
562                                   times_2,
563                                   SeqTwoByteString::kHeaderSize));
564   __ jmp(&done, Label::kNear);
565 
566   // One-byte string.
567   // Load the byte into the result register.
568   __ bind(&one_byte);
569   __ movzxbl(result, FieldOperand(string,
570                                   index,
571                                   times_1,
572                                   SeqOneByteString::kHeaderSize));
573   __ bind(&done);
574 }
575 
576 
EmitMathExp(MacroAssembler * masm,XMMRegister input,XMMRegister result,XMMRegister double_scratch,Register temp1,Register temp2)577 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
578                                    XMMRegister input,
579                                    XMMRegister result,
580                                    XMMRegister double_scratch,
581                                    Register temp1,
582                                    Register temp2) {
583   DCHECK(!input.is(result));
584   DCHECK(!input.is(double_scratch));
585   DCHECK(!result.is(double_scratch));
586   DCHECK(!temp1.is(temp2));
587   DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
588   DCHECK(!masm->serializer_enabled());  // External references not serializable.
589 
590   Label done;
591 
592   __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
593   __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
594   __ xorpd(result, result);
595   __ ucomisd(double_scratch, input);
596   __ j(above_equal, &done);
597   __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
598   __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
599   __ j(above_equal, &done);
600   __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
601   __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
602   __ mulsd(double_scratch, input);
603   __ addsd(double_scratch, result);
604   __ movq(temp2, double_scratch);
605   __ subsd(double_scratch, result);
606   __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
607   __ leaq(temp1, Operand(temp2, 0x1ff800));
608   __ andq(temp2, Immediate(0x7ff));
609   __ shrq(temp1, Immediate(11));
610   __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
611   __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
612   __ shlq(temp1, Immediate(52));
613   __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
614   __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
615   __ subsd(double_scratch, input);
616   __ movsd(input, double_scratch);
617   __ subsd(result, double_scratch);
618   __ mulsd(input, double_scratch);
619   __ mulsd(result, input);
620   __ movq(input, temp1);
621   __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
622   __ subsd(result, double_scratch);
623   __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
624   __ mulsd(result, input);
625 
626   __ bind(&done);
627 }
628 
629 #undef __
630 
631 
CodeAgingHelper()632 CodeAgingHelper::CodeAgingHelper() {
633   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
634   // The sequence of instructions that is patched out for aging code is the
635   // following boilerplate stack-building prologue that is found both in
636   // FUNCTION and OPTIMIZED_FUNCTION code:
637   CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
638   patcher.masm()->pushq(rbp);
639   patcher.masm()->movp(rbp, rsp);
640   patcher.masm()->Push(rsi);
641   patcher.masm()->Push(rdi);
642 }
643 
644 
645 #ifdef DEBUG
IsOld(byte * candidate) const646 bool CodeAgingHelper::IsOld(byte* candidate) const {
647   return *candidate == kCallOpcode;
648 }
649 #endif
650 
651 
IsYoungSequence(Isolate * isolate,byte * sequence)652 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
653   bool result = isolate->code_aging_helper()->IsYoung(sequence);
654   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
655   return result;
656 }
657 
658 
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)659 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
660                                MarkingParity* parity) {
661   if (IsYoungSequence(isolate, sequence)) {
662     *age = kNoAgeCodeAge;
663     *parity = NO_MARKING_PARITY;
664   } else {
665     sequence++;  // Skip the kCallOpcode byte
666     Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
667         Assembler::kCallTargetAddressOffset;
668     Code* stub = GetCodeFromTargetAddress(target_address);
669     GetCodeAgeAndParity(stub, age, parity);
670   }
671 }
672 
673 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)674 void Code::PatchPlatformCodeAge(Isolate* isolate,
675                                 byte* sequence,
676                                 Code::Age age,
677                                 MarkingParity parity) {
678   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
679   if (age == kNoAgeCodeAge) {
680     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
681     CpuFeatures::FlushICache(sequence, young_length);
682   } else {
683     Code* stub = GetCodeAgeStub(isolate, age, parity);
684     CodePatcher patcher(sequence, young_length);
685     patcher.masm()->call(stub->instruction_start());
686     patcher.masm()->Nop(
687         kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
688   }
689 }
690 
691 
GetArgumentOperand(int index)692 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
693   DCHECK(index >= 0);
694   int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
695   int displacement_to_last_argument = base_reg_.is(rsp) ?
696       kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
697   displacement_to_last_argument += extra_displacement_to_last_argument_;
698   if (argument_count_reg_.is(no_reg)) {
699     // argument[0] is at base_reg_ + displacement_to_last_argument +
700     // (argument_count_immediate_ + receiver - 1) * kPointerSize.
701     DCHECK(argument_count_immediate_ + receiver > 0);
702     return Operand(base_reg_, displacement_to_last_argument +
703         (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
704   } else {
705     // argument[0] is at base_reg_ + displacement_to_last_argument +
706     // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
707     return Operand(base_reg_, argument_count_reg_, times_pointer_size,
708         displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
709   }
710 }
711 
712 
713 } }  // namespace v8::internal
714 
715 #endif  // V8_TARGET_ARCH_X64
716