• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/ppc/codegen-ppc.h"
6 
7 #if V8_TARGET_ARCH_PPC
8 
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 #include "src/ppc/simulator-ppc.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 
17 #define __ masm.
18 
19 
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_ppc_machine_code = nullptr;
fast_exp_simulator(double x,Isolate * isolate)22 double fast_exp_simulator(double x, Isolate* isolate) {
23   return Simulator::current(isolate)
24       ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
25 }
26 #endif
27 
28 
CreateExpFunction(Isolate * isolate)29 UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
30   size_t actual_size;
31   byte* buffer =
32       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
33   if (buffer == nullptr) return nullptr;
34   ExternalReference::InitializeMathExpData();
35 
36   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
37                       CodeObjectRequired::kNo);
38 
39   {
40     DoubleRegister input = d1;
41     DoubleRegister result = d2;
42     DoubleRegister double_scratch1 = d3;
43     DoubleRegister double_scratch2 = d4;
44     Register temp1 = r7;
45     Register temp2 = r8;
46     Register temp3 = r9;
47 
48 // Called from C
49     __ function_descriptor();
50 
51     __ Push(temp3, temp2, temp1);
52     MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
53                                   double_scratch2, temp1, temp2, temp3);
54     __ Pop(temp3, temp2, temp1);
55     __ fmr(d1, result);
56     __ Ret();
57   }
58 
59   CodeDesc desc;
60   masm.GetCode(&desc);
61 #if !ABI_USES_FUNCTION_DESCRIPTORS
62   DCHECK(!RelocInfo::RequiresRelocation(desc));
63 #endif
64 
65   Assembler::FlushICache(isolate, buffer, actual_size);
66   base::OS::ProtectCode(buffer, actual_size);
67 
68 #if !defined(USE_SIMULATOR)
69   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
70 #else
71   fast_exp_ppc_machine_code = buffer;
72   return &fast_exp_simulator;
73 #endif
74 }
75 
76 
CreateSqrtFunction(Isolate * isolate)77 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
78 #if defined(USE_SIMULATOR)
79   return nullptr;
80 #else
81   size_t actual_size;
82   byte* buffer =
83       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
84   if (buffer == nullptr) return nullptr;
85 
86   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
87                       CodeObjectRequired::kNo);
88 
89 // Called from C
90   __ function_descriptor();
91 
92   __ MovFromFloatParameter(d1);
93   __ fsqrt(d1, d1);
94   __ MovToFloatResult(d1);
95   __ Ret();
96 
97   CodeDesc desc;
98   masm.GetCode(&desc);
99 #if !ABI_USES_FUNCTION_DESCRIPTORS
100   DCHECK(!RelocInfo::RequiresRelocation(desc));
101 #endif
102 
103   Assembler::FlushICache(isolate, buffer, actual_size);
104   base::OS::ProtectCode(buffer, actual_size);
105   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
106 #endif
107 }
108 
109 #undef __
110 
111 
112 // -------------------------------------------------------------------------
113 // Platform-specific RuntimeCallHelper functions.
114 
BeforeCall(MacroAssembler * masm) const115 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
116   masm->EnterFrame(StackFrame::INTERNAL);
117   DCHECK(!masm->has_frame());
118   masm->set_has_frame(true);
119 }
120 
121 
AfterCall(MacroAssembler * masm) const122 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
123   masm->LeaveFrame(StackFrame::INTERNAL);
124   DCHECK(masm->has_frame());
125   masm->set_has_frame(false);
126 }
127 
128 
129 // -------------------------------------------------------------------------
130 // Code generators
131 
132 #define __ ACCESS_MASM(masm)
133 
GenerateMapChangeElementsTransition(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * allocation_memento_found)134 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
135     MacroAssembler* masm, Register receiver, Register key, Register value,
136     Register target_map, AllocationSiteMode mode,
137     Label* allocation_memento_found) {
138   Register scratch_elements = r7;
139   DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
140 
141   if (mode == TRACK_ALLOCATION_SITE) {
142     DCHECK(allocation_memento_found != NULL);
143     __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
144                                          allocation_memento_found);
145   }
146 
147   // Set transitioned map.
148   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
149   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
150                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
151                       OMIT_SMI_CHECK);
152 }
153 
154 
GenerateSmiToDouble(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)155 void ElementsTransitionGenerator::GenerateSmiToDouble(
156     MacroAssembler* masm, Register receiver, Register key, Register value,
157     Register target_map, AllocationSiteMode mode, Label* fail) {
158   // lr contains the return address
159   Label loop, entry, convert_hole, only_change_map, done;
160   Register elements = r7;
161   Register length = r8;
162   Register array = r9;
163   Register array_end = array;
164 
165   // target_map parameter can be clobbered.
166   Register scratch1 = target_map;
167   Register scratch2 = r10;
168   Register scratch3 = r11;
169   Register scratch4 = r14;
170 
171   // Verify input registers don't conflict with locals.
172   DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
173                      scratch2));
174 
175   if (mode == TRACK_ALLOCATION_SITE) {
176     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
177   }
178 
179   // Check for empty arrays, which only require a map transition and no changes
180   // to the backing store.
181   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
182   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
183   __ beq(&only_change_map);
184 
185   __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
186   // length: number of elements (smi-tagged)
187 
188   // Allocate new FixedDoubleArray.
189   __ SmiToDoubleArrayOffset(scratch3, length);
190   __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
191   __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
192   // array: destination FixedDoubleArray, not tagged as heap object.
193   // elements: source FixedArray.
194 
195   // Set destination FixedDoubleArray's length and map.
196   __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
197   __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
198   // Update receiver's map.
199   __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
200 
201   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
202   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
203                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
204                       OMIT_SMI_CHECK);
205   // Replace receiver's backing store with newly created FixedDoubleArray.
206   __ addi(scratch1, array, Operand(kHeapObjectTag));
207   __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
208   __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
209                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
210                       OMIT_SMI_CHECK);
211 
212   // Prepare for conversion loop.
213   __ addi(scratch1, elements,
214           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
215   __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
216   __ SmiToDoubleArrayOffset(array_end, length);
217   __ add(array_end, scratch2, array_end);
218 // Repurpose registers no longer in use.
219 #if V8_TARGET_ARCH_PPC64
220   Register hole_int64 = elements;
221   __ mov(hole_int64, Operand(kHoleNanInt64));
222 #else
223   Register hole_lower = elements;
224   Register hole_upper = length;
225   __ mov(hole_lower, Operand(kHoleNanLower32));
226   __ mov(hole_upper, Operand(kHoleNanUpper32));
227 #endif
228   // scratch1: begin of source FixedArray element fields, not tagged
229   // hole_lower: kHoleNanLower32 OR hol_int64
230   // hole_upper: kHoleNanUpper32
231   // array_end: end of destination FixedDoubleArray, not tagged
232   // scratch2: begin of FixedDoubleArray element fields, not tagged
233 
234   __ b(&entry);
235 
236   __ bind(&only_change_map);
237   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
238   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
239                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
240                       OMIT_SMI_CHECK);
241   __ b(&done);
242 
243   // Convert and copy elements.
244   __ bind(&loop);
245   __ LoadP(scratch3, MemOperand(scratch1));
246   __ addi(scratch1, scratch1, Operand(kPointerSize));
247   // scratch3: current element
248   __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
249 
250   // Normal smi, convert to double and store.
251   __ ConvertIntToDouble(scratch3, d0);
252   __ stfd(d0, MemOperand(scratch2, 0));
253   __ addi(scratch2, scratch2, Operand(8));
254   __ b(&entry);
255 
256   // Hole found, store the-hole NaN.
257   __ bind(&convert_hole);
258   if (FLAG_debug_code) {
259     __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
260     __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
261     __ Assert(eq, kObjectFoundInSmiOnlyArray);
262   }
263 #if V8_TARGET_ARCH_PPC64
264   __ std(hole_int64, MemOperand(scratch2, 0));
265 #else
266   __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
267   __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
268 #endif
269   __ addi(scratch2, scratch2, Operand(8));
270 
271   __ bind(&entry);
272   __ cmp(scratch2, array_end);
273   __ blt(&loop);
274 
275   __ bind(&done);
276 }
277 
278 
GenerateDoubleToObject(MacroAssembler * masm,Register receiver,Register key,Register value,Register target_map,AllocationSiteMode mode,Label * fail)279 void ElementsTransitionGenerator::GenerateDoubleToObject(
280     MacroAssembler* masm, Register receiver, Register key, Register value,
281     Register target_map, AllocationSiteMode mode, Label* fail) {
282   // Register lr contains the return address.
283   Label loop, convert_hole, gc_required, only_change_map;
284   Register elements = r7;
285   Register array = r9;
286   Register length = r8;
287   Register scratch = r10;
288   Register scratch3 = r11;
289   Register hole_value = r14;
290 
291   // Verify input registers don't conflict with locals.
292   DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
293                      scratch));
294 
295   if (mode == TRACK_ALLOCATION_SITE) {
296     __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
297   }
298 
299   // Check for empty arrays, which only require a map transition and no changes
300   // to the backing store.
301   __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
302   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
303   __ beq(&only_change_map);
304 
305   __ Push(target_map, receiver, key, value);
306   __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
307   // elements: source FixedDoubleArray
308   // length: number of elements (smi-tagged)
309 
310   // Allocate new FixedArray.
311   // Re-use value and target_map registers, as they have been saved on the
312   // stack.
313   Register array_size = value;
314   Register allocate_scratch = target_map;
315   __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
316   __ SmiToPtrArrayOffset(r0, length);
317   __ add(array_size, array_size, r0);
318   __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
319               NO_ALLOCATION_FLAGS);
320   // array: destination FixedArray, not tagged as heap object
321   // Set destination FixedDoubleArray's length and map.
322   __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
323   __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
324   __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
325   __ addi(array, array, Operand(kHeapObjectTag));
326 
327   // Prepare for conversion loop.
328   Register src_elements = elements;
329   Register dst_elements = target_map;
330   Register dst_end = length;
331   Register heap_number_map = scratch;
332   __ addi(src_elements, elements,
333           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
334   __ SmiToPtrArrayOffset(length, length);
335   __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
336 
337   Label initialization_loop, loop_done;
338   __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
339   __ beq(&loop_done, cr0);
340 
341   // Allocating heap numbers in the loop below can fail and cause a jump to
342   // gc_required. We can't leave a partly initialized FixedArray behind,
343   // so pessimistically fill it with holes now.
344   __ mtctr(r0);
345   __ addi(dst_elements, array,
346           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
347   __ bind(&initialization_loop);
348   __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
349   __ bdnz(&initialization_loop);
350 
351   __ addi(dst_elements, array,
352           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
353   __ add(dst_end, dst_elements, length);
354   __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
355   // Using offsetted addresses in src_elements to fully take advantage of
356   // post-indexing.
357   // dst_elements: begin of destination FixedArray element fields, not tagged
358   // src_elements: begin of source FixedDoubleArray element fields,
359   //               not tagged, +4
360   // dst_end: end of destination FixedArray, not tagged
361   // array: destination FixedArray
362   // hole_value: the-hole pointer
363   // heap_number_map: heap number map
364   __ b(&loop);
365 
366   // Call into runtime if GC is required.
367   __ bind(&gc_required);
368   __ Pop(target_map, receiver, key, value);
369   __ b(fail);
370 
371   // Replace the-hole NaN with the-hole pointer.
372   __ bind(&convert_hole);
373   __ StoreP(hole_value, MemOperand(dst_elements));
374   __ addi(dst_elements, dst_elements, Operand(kPointerSize));
375   __ cmpl(dst_elements, dst_end);
376   __ bge(&loop_done);
377 
378   __ bind(&loop);
379   Register upper_bits = key;
380   __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
381   __ addi(src_elements, src_elements, Operand(kDoubleSize));
382   // upper_bits: current element's upper 32 bit
383   // src_elements: address of next element's upper 32 bit
384   __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
385   __ beq(&convert_hole);
386 
387   // Non-hole double, copy value into a heap number.
388   Register heap_number = receiver;
389   Register scratch2 = value;
390   __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
391                         &gc_required);
392   // heap_number: new heap number
393 #if V8_TARGET_ARCH_PPC64
394   __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
395   // subtract tag for std
396   __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
397   __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
398 #else
399   __ lwz(scratch2,
400          MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
401   __ lwz(upper_bits,
402          MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
403   __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
404   __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
405 #endif
406   __ mr(scratch2, dst_elements);
407   __ StoreP(heap_number, MemOperand(dst_elements));
408   __ addi(dst_elements, dst_elements, Operand(kPointerSize));
409   __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
410                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
411   __ cmpl(dst_elements, dst_end);
412   __ blt(&loop);
413   __ bind(&loop_done);
414 
415   __ Pop(target_map, receiver, key, value);
416   // Replace receiver's backing store with newly created and filled FixedArray.
417   __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
418   __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
419                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
420                       OMIT_SMI_CHECK);
421 
422   __ bind(&only_change_map);
423   // Update receiver's map.
424   __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
425   __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
426                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
427                       OMIT_SMI_CHECK);
428 }
429 
430 
431 // assume ip can be used as a scratch register below
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)432 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
433                                        Register index, Register result,
434                                        Label* call_runtime) {
435   // Fetch the instance type of the receiver into result register.
436   __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
437   __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
438 
439   // We need special handling for indirect strings.
440   Label check_sequential;
441   __ andi(r0, result, Operand(kIsIndirectStringMask));
442   __ beq(&check_sequential, cr0);
443 
444   // Dispatch on the indirect string shape: slice or cons.
445   Label cons_string;
446   __ mov(ip, Operand(kSlicedNotConsMask));
447   __ and_(r0, result, ip, SetRC);
448   __ beq(&cons_string, cr0);
449 
450   // Handle slices.
451   Label indirect_string_loaded;
452   __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
453   __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
454   __ SmiUntag(ip, result);
455   __ add(index, index, ip);
456   __ b(&indirect_string_loaded);
457 
458   // Handle cons strings.
459   // Check whether the right hand side is the empty string (i.e. if
460   // this is really a flat string in a cons string). If that is not
461   // the case we would rather go to the runtime system now to flatten
462   // the string.
463   __ bind(&cons_string);
464   __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
465   __ CompareRoot(result, Heap::kempty_stringRootIndex);
466   __ bne(call_runtime);
467   // Get the first of the two strings and load its instance type.
468   __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
469 
470   __ bind(&indirect_string_loaded);
471   __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
472   __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
473 
474   // Distinguish sequential and external strings. Only these two string
475   // representations can reach here (slices and flat cons strings have been
476   // reduced to the underlying sequential or external string).
477   Label external_string, check_encoding;
478   __ bind(&check_sequential);
479   STATIC_ASSERT(kSeqStringTag == 0);
480   __ andi(r0, result, Operand(kStringRepresentationMask));
481   __ bne(&external_string, cr0);
482 
483   // Prepare sequential strings
484   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
485   __ addi(string, string,
486           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
487   __ b(&check_encoding);
488 
489   // Handle external strings.
490   __ bind(&external_string);
491   if (FLAG_debug_code) {
492     // Assert that we do not have a cons or slice (indirect strings) here.
493     // Sequential strings have already been ruled out.
494     __ andi(r0, result, Operand(kIsIndirectStringMask));
495     __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
496   }
497   // Rule out short external strings.
498   STATIC_ASSERT(kShortExternalStringTag != 0);
499   __ andi(r0, result, Operand(kShortExternalStringMask));
500   __ bne(call_runtime, cr0);
501   __ LoadP(string,
502            FieldMemOperand(string, ExternalString::kResourceDataOffset));
503 
504   Label one_byte, done;
505   __ bind(&check_encoding);
506   STATIC_ASSERT(kTwoByteStringTag == 0);
507   __ andi(r0, result, Operand(kStringEncodingMask));
508   __ bne(&one_byte, cr0);
509   // Two-byte string.
510   __ ShiftLeftImm(result, index, Operand(1));
511   __ lhzx(result, MemOperand(string, result));
512   __ b(&done);
513   __ bind(&one_byte);
514   // One-byte string.
515   __ lbzx(result, MemOperand(string, index));
516   __ bind(&done);
517 }
518 
519 
ExpConstant(int index,Register base)520 static MemOperand ExpConstant(int index, Register base) {
521   return MemOperand(base, index * kDoubleSize);
522 }
523 
524 
EmitMathExp(MacroAssembler * masm,DoubleRegister input,DoubleRegister result,DoubleRegister double_scratch1,DoubleRegister double_scratch2,Register temp1,Register temp2,Register temp3)525 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
526                                    DoubleRegister result,
527                                    DoubleRegister double_scratch1,
528                                    DoubleRegister double_scratch2,
529                                    Register temp1, Register temp2,
530                                    Register temp3) {
531   DCHECK(!input.is(result));
532   DCHECK(!input.is(double_scratch1));
533   DCHECK(!input.is(double_scratch2));
534   DCHECK(!result.is(double_scratch1));
535   DCHECK(!result.is(double_scratch2));
536   DCHECK(!double_scratch1.is(double_scratch2));
537   DCHECK(!temp1.is(temp2));
538   DCHECK(!temp1.is(temp3));
539   DCHECK(!temp2.is(temp3));
540   DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
541   DCHECK(!masm->serializer_enabled());  // External references not serializable.
542 
543   Label zero, infinity, done;
544 
545   __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
546 
547   __ lfd(double_scratch1, ExpConstant(0, temp3));
548   __ fcmpu(double_scratch1, input);
549   __ fmr(result, input);
550   __ bunordered(&done);
551   __ bge(&zero);
552 
553   __ lfd(double_scratch2, ExpConstant(1, temp3));
554   __ fcmpu(input, double_scratch2);
555   __ bge(&infinity);
556 
557   __ lfd(double_scratch1, ExpConstant(3, temp3));
558   __ lfd(result, ExpConstant(4, temp3));
559   __ fmul(double_scratch1, double_scratch1, input);
560   __ fadd(double_scratch1, double_scratch1, result);
561   __ MovDoubleLowToInt(temp2, double_scratch1);
562   __ fsub(double_scratch1, double_scratch1, result);
563   __ lfd(result, ExpConstant(6, temp3));
564   __ lfd(double_scratch2, ExpConstant(5, temp3));
565   __ fmul(double_scratch1, double_scratch1, double_scratch2);
566   __ fsub(double_scratch1, double_scratch1, input);
567   __ fsub(result, result, double_scratch1);
568   __ fmul(double_scratch2, double_scratch1, double_scratch1);
569   __ fmul(result, result, double_scratch2);
570   __ lfd(double_scratch2, ExpConstant(7, temp3));
571   __ fmul(result, result, double_scratch2);
572   __ fsub(result, result, double_scratch1);
573   __ lfd(double_scratch2, ExpConstant(8, temp3));
574   __ fadd(result, result, double_scratch2);
575   __ srwi(temp1, temp2, Operand(11));
576   __ andi(temp2, temp2, Operand(0x7ff));
577   __ addi(temp1, temp1, Operand(0x3ff));
578 
579   // Must not call ExpConstant() after overwriting temp3!
580   __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
581   __ slwi(temp2, temp2, Operand(3));
582 #if V8_TARGET_ARCH_PPC64
583   __ ldx(temp2, MemOperand(temp3, temp2));
584   __ sldi(temp1, temp1, Operand(52));
585   __ orx(temp2, temp1, temp2);
586   __ MovInt64ToDouble(double_scratch1, temp2);
587 #else
588   __ add(ip, temp3, temp2);
589   __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
590   __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
591   __ slwi(temp1, temp1, Operand(20));
592   __ orx(temp3, temp1, temp3);
593   __ MovInt64ToDouble(double_scratch1, temp3, temp2);
594 #endif
595 
596   __ fmul(result, result, double_scratch1);
597   __ b(&done);
598 
599   __ bind(&zero);
600   __ fmr(result, kDoubleRegZero);
601   __ b(&done);
602 
603   __ bind(&infinity);
604   __ lfd(result, ExpConstant(2, temp3));
605 
606   __ bind(&done);
607 }
608 
609 #undef __
610 
CodeAgingHelper(Isolate * isolate)611 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
612   USE(isolate);
613   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
614   // Since patcher is a large object, allocate it dynamically when needed,
615   // to avoid overloading the stack in stress conditions.
616   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
617   // the process, before ARM simulator ICache is setup.
618   base::SmartPointer<CodePatcher> patcher(
619       new CodePatcher(isolate, young_sequence_.start(),
620                       young_sequence_.length() / Assembler::kInstrSize,
621                       CodePatcher::DONT_FLUSH));
622   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
623   patcher->masm()->PushFixedFrame(r4);
624   patcher->masm()->addi(fp, sp,
625                         Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
626   for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
627     patcher->masm()->nop();
628   }
629 }
630 
631 
632 #ifdef DEBUG
IsOld(byte * candidate) const633 bool CodeAgingHelper::IsOld(byte* candidate) const {
634   return Assembler::IsNop(Assembler::instr_at(candidate));
635 }
636 #endif
637 
638 
IsYoungSequence(Isolate * isolate,byte * sequence)639 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
640   bool result = isolate->code_aging_helper()->IsYoung(sequence);
641   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
642   return result;
643 }
644 
645 
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)646 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
647                                MarkingParity* parity) {
648   if (IsYoungSequence(isolate, sequence)) {
649     *age = kNoAgeCodeAge;
650     *parity = NO_MARKING_PARITY;
651   } else {
652     Code* code = NULL;
653     Address target_address =
654         Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
655     Code* stub = GetCodeFromTargetAddress(target_address);
656     GetCodeAgeAndParity(stub, age, parity);
657   }
658 }
659 
660 
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)661 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
662                                 MarkingParity parity) {
663   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
664   if (age == kNoAgeCodeAge) {
665     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
666     Assembler::FlushICache(isolate, sequence, young_length);
667   } else {
668     // FIXED_SEQUENCE
669     Code* stub = GetCodeAgeStub(isolate, age, parity);
670     CodePatcher patcher(isolate, sequence,
671                         young_length / Assembler::kInstrSize);
672     Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
673     intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
674     // Don't use Call -- we need to preserve ip and lr.
675     // GenerateMakeCodeYoungAgainCommon for the stub code.
676     patcher.masm()->nop();  // marker to detect sequence (see IsOld)
677     patcher.masm()->mov(r3, Operand(target));
678     patcher.masm()->Jump(r3);
679     for (int i = 0; i < kCodeAgingSequenceNops; i++) {
680       patcher.masm()->nop();
681     }
682   }
683 }
684 }  // namespace internal
685 }  // namespace v8
686 
687 #endif  // V8_TARGET_ARCH_PPC
688