1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_ARM64
8
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 #include "src/arm64/simulator-arm64.h"
12
13 namespace v8 {
14 namespace internal {
15
16 #define __ ACCESS_MASM(masm)
17
18 #if defined(USE_SIMULATOR)
19 byte* fast_exp_arm64_machine_code = NULL;
fast_exp_simulator(double x)20 double fast_exp_simulator(double x) {
21 Simulator * simulator = Simulator::current(Isolate::Current());
22 Simulator::CallArgument args[] = {
23 Simulator::CallArgument(x),
24 Simulator::CallArgument::End()
25 };
26 return simulator->CallDouble(fast_exp_arm64_machine_code, args);
27 }
28 #endif
29
30
CreateExpFunction()31 UnaryMathFunction CreateExpFunction() {
32 if (!FLAG_fast_math) return &std::exp;
33
34 // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
35 // an AAPCS64-compliant exp() function. This will be faster than the C
36 // library's exp() function, but probably less accurate.
37 size_t actual_size;
38 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
39 if (buffer == NULL) return &std::exp;
40
41 ExternalReference::InitializeMathExpData();
42 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
43 masm.SetStackPointer(csp);
44
45 // The argument will be in d0 on entry.
46 DoubleRegister input = d0;
47 // Use other caller-saved registers for all other values.
48 DoubleRegister result = d1;
49 DoubleRegister double_temp1 = d2;
50 DoubleRegister double_temp2 = d3;
51 Register temp1 = x10;
52 Register temp2 = x11;
53 Register temp3 = x12;
54
55 MathExpGenerator::EmitMathExp(&masm, input, result,
56 double_temp1, double_temp2,
57 temp1, temp2, temp3);
58 // Move the result to the return register.
59 masm.Fmov(d0, result);
60 masm.Ret();
61
62 CodeDesc desc;
63 masm.GetCode(&desc);
64 ASSERT(!RelocInfo::RequiresRelocation(desc));
65
66 CPU::FlushICache(buffer, actual_size);
67 OS::ProtectCode(buffer, actual_size);
68
69 #if !defined(USE_SIMULATOR)
70 return FUNCTION_CAST<UnaryMathFunction>(buffer);
71 #else
72 fast_exp_arm64_machine_code = buffer;
73 return &fast_exp_simulator;
74 #endif
75 }
76
77
CreateSqrtFunction()78 UnaryMathFunction CreateSqrtFunction() {
79 return &std::sqrt;
80 }
81
82
83 // -------------------------------------------------------------------------
84 // Platform-specific RuntimeCallHelper functions.
85
BeforeCall(MacroAssembler * masm) const86 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
87 masm->EnterFrame(StackFrame::INTERNAL);
88 ASSERT(!masm->has_frame());
89 masm->set_has_frame(true);
90 }
91
92
AfterCall(MacroAssembler * masm) const93 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
94 masm->LeaveFrame(StackFrame::INTERNAL);
95 ASSERT(masm->has_frame());
96 masm->set_has_frame(false);
97 }
98
99
100 // -------------------------------------------------------------------------
101 // Code generators
102
GenerateMapChangeElementsTransition(MacroAssembler * masm,AllocationSiteMode mode,Label * allocation_memento_found)103 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
104 MacroAssembler* masm, AllocationSiteMode mode,
105 Label* allocation_memento_found) {
106 // ----------- S t a t e -------------
107 // -- x2 : receiver
108 // -- x3 : target map
109 // -----------------------------------
110 Register receiver = x2;
111 Register map = x3;
112
113 if (mode == TRACK_ALLOCATION_SITE) {
114 ASSERT(allocation_memento_found != NULL);
115 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
116 allocation_memento_found);
117 }
118
119 // Set transitioned map.
120 __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
121 __ RecordWriteField(receiver,
122 HeapObject::kMapOffset,
123 map,
124 x10,
125 kLRHasNotBeenSaved,
126 kDontSaveFPRegs,
127 EMIT_REMEMBERED_SET,
128 OMIT_SMI_CHECK);
129 }
130
131
GenerateSmiToDouble(MacroAssembler * masm,AllocationSiteMode mode,Label * fail)132 void ElementsTransitionGenerator::GenerateSmiToDouble(
133 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
134 ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
135 // ----------- S t a t e -------------
136 // -- lr : return address
137 // -- x0 : value
138 // -- x1 : key
139 // -- x2 : receiver
140 // -- x3 : target map, scratch for subsequent call
141 // -----------------------------------
142 Register receiver = x2;
143 Register target_map = x3;
144
145 Label gc_required, only_change_map;
146
147 if (mode == TRACK_ALLOCATION_SITE) {
148 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
149 }
150
151 // Check for empty arrays, which only require a map transition and no changes
152 // to the backing store.
153 Register elements = x4;
154 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
155 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
156
157 __ Push(lr);
158 Register length = x5;
159 __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
160 FixedArray::kLengthOffset));
161
162 // Allocate new FixedDoubleArray.
163 Register array_size = x6;
164 Register array = x7;
165 __ Lsl(array_size, length, kDoubleSizeLog2);
166 __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
167 __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
168 // Register array is non-tagged heap object.
169
170 // Set the destination FixedDoubleArray's length and map.
171 Register map_root = x6;
172 __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
173 __ SmiTag(x11, length);
174 __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
175 __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
176
177 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
178 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
179 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
180 OMIT_SMI_CHECK);
181
182 // Replace receiver's backing store with newly created FixedDoubleArray.
183 __ Add(x10, array, kHeapObjectTag);
184 __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
185 __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
186 x6, kLRHasBeenSaved, kDontSaveFPRegs,
187 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
188
189 // Prepare for conversion loop.
190 Register src_elements = x10;
191 Register dst_elements = x11;
192 Register dst_end = x12;
193 __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
194 __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
195 __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
196
197 FPRegister nan_d = d1;
198 __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
199
200 Label entry, done;
201 __ B(&entry);
202
203 __ Bind(&only_change_map);
204 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
205 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
206 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
207 OMIT_SMI_CHECK);
208 __ B(&done);
209
210 // Call into runtime if GC is required.
211 __ Bind(&gc_required);
212 __ Pop(lr);
213 __ B(fail);
214
215 // Iterate over the array, copying and coverting smis to doubles. If an
216 // element is non-smi, write a hole to the destination.
217 {
218 Label loop;
219 __ Bind(&loop);
220 __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
221 __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
222 __ Tst(x13, kSmiTagMask);
223 __ Fcsel(d0, d0, nan_d, eq);
224 __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
225
226 __ Bind(&entry);
227 __ Cmp(dst_elements, dst_end);
228 __ B(lt, &loop);
229 }
230
231 __ Pop(lr);
232 __ Bind(&done);
233 }
234
235
GenerateDoubleToObject(MacroAssembler * masm,AllocationSiteMode mode,Label * fail)236 void ElementsTransitionGenerator::GenerateDoubleToObject(
237 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
238 ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
239 // ----------- S t a t e -------------
240 // -- x0 : value
241 // -- x1 : key
242 // -- x2 : receiver
243 // -- lr : return address
244 // -- x3 : target map, scratch for subsequent call
245 // -- x4 : scratch (elements)
246 // -----------------------------------
247 Register value = x0;
248 Register key = x1;
249 Register receiver = x2;
250 Register target_map = x3;
251
252 if (mode == TRACK_ALLOCATION_SITE) {
253 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
254 }
255
256 // Check for empty arrays, which only require a map transition and no changes
257 // to the backing store.
258 Label only_change_map;
259 Register elements = x4;
260 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
261 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
262
263 __ Push(lr);
264 // TODO(all): These registers may not need to be pushed. Examine
265 // RecordWriteStub and check whether it's needed.
266 __ Push(target_map, receiver, key, value);
267 Register length = x5;
268 __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
269 FixedArray::kLengthOffset));
270
271 // Allocate new FixedArray.
272 Register array_size = x6;
273 Register array = x7;
274 Label gc_required;
275 __ Mov(array_size, FixedDoubleArray::kHeaderSize);
276 __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
277 __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
278
279 // Set destination FixedDoubleArray's length and map.
280 Register map_root = x6;
281 __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
282 __ SmiTag(x11, length);
283 __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
284 __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
285
286 // Prepare for conversion loop.
287 Register src_elements = x10;
288 Register dst_elements = x11;
289 Register dst_end = x12;
290 __ Add(src_elements, elements,
291 FixedDoubleArray::kHeaderSize - kHeapObjectTag);
292 __ Add(dst_elements, array, FixedArray::kHeaderSize);
293 __ Add(array, array, kHeapObjectTag);
294 __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
295
296 Register the_hole = x14;
297 Register heap_num_map = x15;
298 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
299 __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
300
301 Label entry;
302 __ B(&entry);
303
304 // Call into runtime if GC is required.
305 __ Bind(&gc_required);
306 __ Pop(value, key, receiver, target_map);
307 __ Pop(lr);
308 __ B(fail);
309
310 {
311 Label loop, convert_hole;
312 __ Bind(&loop);
313 __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
314 __ Cmp(x13, kHoleNanInt64);
315 __ B(eq, &convert_hole);
316
317 // Non-hole double, copy value into a heap number.
318 Register heap_num = x5;
319 __ AllocateHeapNumber(heap_num, &gc_required, x6, x4,
320 x13, heap_num_map);
321 __ Mov(x13, dst_elements);
322 __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
323 __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
324 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
325
326 __ B(&entry);
327
328 // Replace the-hole NaN with the-hole pointer.
329 __ Bind(&convert_hole);
330 __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
331
332 __ Bind(&entry);
333 __ Cmp(dst_elements, dst_end);
334 __ B(lt, &loop);
335 }
336
337 __ Pop(value, key, receiver, target_map);
338 // Replace receiver's backing store with newly created and filled FixedArray.
339 __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
340 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
341 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
342 OMIT_SMI_CHECK);
343 __ Pop(lr);
344
345 __ Bind(&only_change_map);
346 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
347 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
348 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
349 OMIT_SMI_CHECK);
350 }
351
352
CodeAgingHelper()353 CodeAgingHelper::CodeAgingHelper() {
354 ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
355 // The sequence of instructions that is patched out for aging code is the
356 // following boilerplate stack-building prologue that is found both in
357 // FUNCTION and OPTIMIZED_FUNCTION code:
358 PatchingAssembler patcher(young_sequence_.start(),
359 young_sequence_.length() / kInstructionSize);
360 // The young sequence is the frame setup code for FUNCTION code types. It is
361 // generated by FullCodeGenerator::Generate.
362 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
363
364 #ifdef DEBUG
365 const int length = kCodeAgeStubEntryOffset / kInstructionSize;
366 ASSERT(old_sequence_.length() >= kCodeAgeStubEntryOffset);
367 PatchingAssembler patcher_old(old_sequence_.start(), length);
368 MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
369 #endif
370 }
371
372
373 #ifdef DEBUG
IsOld(byte * candidate) const374 bool CodeAgingHelper::IsOld(byte* candidate) const {
375 return memcmp(candidate, old_sequence_.start(), kCodeAgeStubEntryOffset) == 0;
376 }
377 #endif
378
379
IsYoungSequence(Isolate * isolate,byte * sequence)380 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
381 return MacroAssembler::IsYoungSequence(isolate, sequence);
382 }
383
384
GetCodeAgeAndParity(Isolate * isolate,byte * sequence,Age * age,MarkingParity * parity)385 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
386 MarkingParity* parity) {
387 if (IsYoungSequence(isolate, sequence)) {
388 *age = kNoAgeCodeAge;
389 *parity = NO_MARKING_PARITY;
390 } else {
391 byte* target = sequence + kCodeAgeStubEntryOffset;
392 Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
393 GetCodeAgeAndParity(stub, age, parity);
394 }
395 }
396
397
PatchPlatformCodeAge(Isolate * isolate,byte * sequence,Code::Age age,MarkingParity parity)398 void Code::PatchPlatformCodeAge(Isolate* isolate,
399 byte* sequence,
400 Code::Age age,
401 MarkingParity parity) {
402 PatchingAssembler patcher(sequence,
403 kNoCodeAgeSequenceLength / kInstructionSize);
404 if (age == kNoAgeCodeAge) {
405 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
406 } else {
407 Code * stub = GetCodeAgeStub(isolate, age, parity);
408 MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
409 }
410 }
411
412
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)413 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
414 Register string,
415 Register index,
416 Register result,
417 Label* call_runtime) {
418 ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
419 // Fetch the instance type of the receiver into result register.
420 __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
421 __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
422
423 // We need special handling for indirect strings.
424 Label check_sequential;
425 __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
426
427 // Dispatch on the indirect string shape: slice or cons.
428 Label cons_string;
429 __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
430
431 // Handle slices.
432 Label indirect_string_loaded;
433 __ Ldr(result.W(),
434 UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
435 __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
436 __ Add(index, index, result.W());
437 __ B(&indirect_string_loaded);
438
439 // Handle cons strings.
440 // Check whether the right hand side is the empty string (i.e. if
441 // this is really a flat string in a cons string). If that is not
442 // the case we would rather go to the runtime system now to flatten
443 // the string.
444 __ Bind(&cons_string);
445 __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
446 __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
447 // Get the first of the two strings and load its instance type.
448 __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
449
450 __ Bind(&indirect_string_loaded);
451 __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
452 __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
453
454 // Distinguish sequential and external strings. Only these two string
455 // representations can reach here (slices and flat cons strings have been
456 // reduced to the underlying sequential or external string).
457 Label external_string, check_encoding;
458 __ Bind(&check_sequential);
459 STATIC_ASSERT(kSeqStringTag == 0);
460 __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
461
462 // Prepare sequential strings
463 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
464 __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
465 __ B(&check_encoding);
466
467 // Handle external strings.
468 __ Bind(&external_string);
469 if (FLAG_debug_code) {
470 // Assert that we do not have a cons or slice (indirect strings) here.
471 // Sequential strings have already been ruled out.
472 __ Tst(result, kIsIndirectStringMask);
473 __ Assert(eq, kExternalStringExpectedButNotFound);
474 }
475 // Rule out short external strings.
476 STATIC_ASSERT(kShortExternalStringTag != 0);
477 // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
478 // can be bound far away in deferred code.
479 __ Tst(result, kShortExternalStringMask);
480 __ B(ne, call_runtime);
481 __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
482
483 Label ascii, done;
484 __ Bind(&check_encoding);
485 STATIC_ASSERT(kTwoByteStringTag == 0);
486 __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
487 // Two-byte string.
488 __ Ldrh(result, MemOperand(string, index, SXTW, 1));
489 __ B(&done);
490 __ Bind(&ascii);
491 // Ascii string.
492 __ Ldrb(result, MemOperand(string, index, SXTW));
493 __ Bind(&done);
494 }
495
496
ExpConstant(Register base,int index)497 static MemOperand ExpConstant(Register base, int index) {
498 return MemOperand(base, index * kDoubleSize);
499 }
500
501
EmitMathExp(MacroAssembler * masm,DoubleRegister input,DoubleRegister result,DoubleRegister double_temp1,DoubleRegister double_temp2,Register temp1,Register temp2,Register temp3)502 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
503 DoubleRegister input,
504 DoubleRegister result,
505 DoubleRegister double_temp1,
506 DoubleRegister double_temp2,
507 Register temp1,
508 Register temp2,
509 Register temp3) {
510 // TODO(jbramley): There are several instances where fnmsub could be used
511 // instead of fmul and fsub. Doing this changes the result, but since this is
512 // an estimation anyway, does it matter?
513
514 ASSERT(!AreAliased(input, result,
515 double_temp1, double_temp2,
516 temp1, temp2, temp3));
517 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
518
519 Label done;
520 DoubleRegister double_temp3 = result;
521 Register constants = temp3;
522
523 // The algorithm used relies on some magic constants which are initialized in
524 // ExternalReference::InitializeMathExpData().
525
526 // Load the address of the start of the array.
527 __ Mov(constants, ExternalReference::math_exp_constants(0));
528
529 // We have to do a four-way split here:
530 // - If input <= about -708.4, the output always rounds to zero.
531 // - If input >= about 709.8, the output always rounds to +infinity.
532 // - If the input is NaN, the output is NaN.
533 // - Otherwise, the result needs to be calculated.
534 Label result_is_finite_non_zero;
535 // Assert that we can load offset 0 (the small input threshold) and offset 1
536 // (the large input threshold) with a single ldp.
537 ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
538 ExpConstant(constants, 0).offset()));
539 __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
540
541 __ Fcmp(input, double_temp1);
542 __ Fccmp(input, double_temp2, NoFlag, hi);
543 // At this point, the condition flags can be in one of five states:
544 // NZCV
545 // 1000 -708.4 < input < 709.8 result = exp(input)
546 // 0110 input == 709.8 result = +infinity
547 // 0010 input > 709.8 result = +infinity
548 // 0011 input is NaN result = input
549 // 0000 input <= -708.4 result = +0.0
550
551 // Continue the common case first. 'mi' tests N == 1.
552 __ B(&result_is_finite_non_zero, mi);
553
554 // TODO(jbramley): Consider adding a +infinity register for ARM64.
555 __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
556
557 // Select between +0.0 and +infinity. 'lo' tests C == 0.
558 __ Fcsel(result, fp_zero, double_temp2, lo);
559 // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
560 __ Fcsel(result, result, input, vc);
561 __ B(&done);
562
563 // The rest is magic, as described in InitializeMathExpData().
564 __ Bind(&result_is_finite_non_zero);
565
566 // Assert that we can load offset 3 and offset 4 with a single ldp.
567 ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
568 ExpConstant(constants, 3).offset()));
569 __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
570 __ Fmadd(double_temp1, double_temp1, input, double_temp3);
571 __ Fmov(temp2.W(), double_temp1.S());
572 __ Fsub(double_temp1, double_temp1, double_temp3);
573
574 // Assert that we can load offset 5 and offset 6 with a single ldp.
575 ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
576 ExpConstant(constants, 5).offset()));
577 __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
578 // TODO(jbramley): Consider using Fnmsub here.
579 __ Fmul(double_temp1, double_temp1, double_temp2);
580 __ Fsub(double_temp1, double_temp1, input);
581
582 __ Fmul(double_temp2, double_temp1, double_temp1);
583 __ Fsub(double_temp3, double_temp3, double_temp1);
584 __ Fmul(double_temp3, double_temp3, double_temp2);
585
586 __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
587
588 __ Ldr(double_temp2, ExpConstant(constants, 7));
589 // TODO(jbramley): Consider using Fnmsub here.
590 __ Fmul(double_temp3, double_temp3, double_temp2);
591 __ Fsub(double_temp3, double_temp3, double_temp1);
592
593 // The 8th constant is 1.0, so use an immediate move rather than a load.
594 // We can't generate a runtime assertion here as we would need to call Abort
595 // in the runtime and we don't have an Isolate when we generate this code.
596 __ Fmov(double_temp2, 1.0);
597 __ Fadd(double_temp3, double_temp3, double_temp2);
598
599 __ And(temp2, temp2, 0x7ff);
600 __ Add(temp1, temp1, 0x3ff);
601
602 // Do the final table lookup.
603 __ Mov(temp3, ExternalReference::math_exp_log_table());
604
605 __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeLog2));
606 __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
607 __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
608 __ Bfi(temp2, temp1, 32, 32);
609 __ Fmov(double_temp1, temp2);
610
611 __ Fmul(result, double_temp3, double_temp1);
612
613 __ Bind(&done);
614 }
615
616 #undef __
617
618 } } // namespace v8::internal
619
620 #endif // V8_TARGET_ARCH_ARM64
621