1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if defined(V8_TARGET_ARCH_X64)
31
32 #include "codegen.h"
33 #include "macro-assembler.h"
34
35 namespace v8 {
36 namespace internal {
37
38 // -------------------------------------------------------------------------
39 // Platform-specific RuntimeCallHelper functions.
40
BeforeCall(MacroAssembler * masm) const41 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
42 masm->EnterFrame(StackFrame::INTERNAL);
43 ASSERT(!masm->has_frame());
44 masm->set_has_frame(true);
45 }
46
47
AfterCall(MacroAssembler * masm) const48 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
49 masm->LeaveFrame(StackFrame::INTERNAL);
50 ASSERT(masm->has_frame());
51 masm->set_has_frame(false);
52 }
53
54
55 #define __ masm.
56
57
CreateTranscendentalFunction(TranscendentalCache::Type type)58 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
59 size_t actual_size;
60 // Allocate buffer in executable space.
61 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
62 &actual_size,
63 true));
64 if (buffer == NULL) {
65 // Fallback to library function if function cannot be created.
66 switch (type) {
67 case TranscendentalCache::SIN: return &sin;
68 case TranscendentalCache::COS: return &cos;
69 case TranscendentalCache::TAN: return &tan;
70 case TranscendentalCache::LOG: return &log;
71 default: UNIMPLEMENTED();
72 }
73 }
74
75 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
76 // xmm0: raw double input.
77 // Move double input into registers.
78 __ push(rbx);
79 __ push(rdi);
80 __ movq(rbx, xmm0);
81 __ push(rbx);
82 __ fld_d(Operand(rsp, 0));
83 TranscendentalCacheStub::GenerateOperation(&masm, type);
84 // The return value is expected to be in xmm0.
85 __ fstp_d(Operand(rsp, 0));
86 __ pop(rbx);
87 __ movq(xmm0, rbx);
88 __ pop(rdi);
89 __ pop(rbx);
90 __ Ret();
91
92 CodeDesc desc;
93 masm.GetCode(&desc);
94 ASSERT(desc.reloc_size == 0);
95
96 CPU::FlushICache(buffer, actual_size);
97 OS::ProtectCode(buffer, actual_size);
98 return FUNCTION_CAST<UnaryMathFunction>(buffer);
99 }
100
101
CreateSqrtFunction()102 UnaryMathFunction CreateSqrtFunction() {
103 size_t actual_size;
104 // Allocate buffer in executable space.
105 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
106 &actual_size,
107 true));
108 if (buffer == NULL) return &sqrt;
109
110 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
111 // xmm0: raw double input.
112 // Move double input into registers.
113 __ sqrtsd(xmm0, xmm0);
114 __ Ret();
115
116 CodeDesc desc;
117 masm.GetCode(&desc);
118 ASSERT(desc.reloc_size == 0);
119
120 CPU::FlushICache(buffer, actual_size);
121 OS::ProtectCode(buffer, actual_size);
122 return FUNCTION_CAST<UnaryMathFunction>(buffer);
123 }
124
125
126 #ifdef _WIN64
127 typedef double (*ModuloFunction)(double, double);
128 // Define custom fmod implementation.
CreateModuloFunction()129 ModuloFunction CreateModuloFunction() {
130 size_t actual_size;
131 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
132 &actual_size,
133 true));
134 CHECK(buffer);
135 Assembler masm(NULL, buffer, static_cast<int>(actual_size));
136 // Generated code is put into a fixed, unmovable, buffer, and not into
137 // the V8 heap. We can't, and don't, refer to any relocatable addresses
138 // (e.g. the JavaScript nan-object).
139
140 // Windows 64 ABI passes double arguments in xmm0, xmm1 and
141 // returns result in xmm0.
142 // Argument backing space is allocated on the stack above
143 // the return address.
144
145 // Compute x mod y.
146 // Load y and x (use argument backing store as temporary storage).
147 __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
148 __ movsd(Operand(rsp, kPointerSize), xmm0);
149 __ fld_d(Operand(rsp, kPointerSize * 2));
150 __ fld_d(Operand(rsp, kPointerSize));
151
152 // Clear exception flags before operation.
153 {
154 Label no_exceptions;
155 __ fwait();
156 __ fnstsw_ax();
157 // Clear if Illegal Operand or Zero Division exceptions are set.
158 __ testb(rax, Immediate(5));
159 __ j(zero, &no_exceptions);
160 __ fnclex();
161 __ bind(&no_exceptions);
162 }
163
164 // Compute st(0) % st(1)
165 {
166 Label partial_remainder_loop;
167 __ bind(&partial_remainder_loop);
168 __ fprem();
169 __ fwait();
170 __ fnstsw_ax();
171 __ testl(rax, Immediate(0x400 /* C2 */));
172 // If C2 is set, computation only has partial result. Loop to
173 // continue computation.
174 __ j(not_zero, &partial_remainder_loop);
175 }
176
177 Label valid_result;
178 Label return_result;
179 // If Invalid Operand or Zero Division exceptions are set,
180 // return NaN.
181 __ testb(rax, Immediate(5));
182 __ j(zero, &valid_result);
183 __ fstp(0); // Drop result in st(0).
184 int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
185 __ movq(rcx, kNaNValue, RelocInfo::NONE);
186 __ movq(Operand(rsp, kPointerSize), rcx);
187 __ movsd(xmm0, Operand(rsp, kPointerSize));
188 __ jmp(&return_result);
189
190 // If result is valid, return that.
191 __ bind(&valid_result);
192 __ fstp_d(Operand(rsp, kPointerSize));
193 __ movsd(xmm0, Operand(rsp, kPointerSize));
194
195 // Clean up FPU stack and exceptions and return xmm0
196 __ bind(&return_result);
197 __ fstp(0); // Unload y.
198
199 Label clear_exceptions;
200 __ testb(rax, Immediate(0x3f /* Any Exception*/));
201 __ j(not_zero, &clear_exceptions);
202 __ ret(0);
203 __ bind(&clear_exceptions);
204 __ fnclex();
205 __ ret(0);
206
207 CodeDesc desc;
208 masm.GetCode(&desc);
209 OS::ProtectCode(buffer, actual_size);
210 // Call the function from C++ through this pointer.
211 return FUNCTION_CAST<ModuloFunction>(buffer);
212 }
213
214 #endif
215
216 #undef __
217
218 // -------------------------------------------------------------------------
219 // Code generators
220
221 #define __ ACCESS_MASM(masm)
222
GenerateSmiOnlyToObject(MacroAssembler * masm)223 void ElementsTransitionGenerator::GenerateSmiOnlyToObject(
224 MacroAssembler* masm) {
225 // ----------- S t a t e -------------
226 // -- rax : value
227 // -- rbx : target map
228 // -- rcx : key
229 // -- rdx : receiver
230 // -- rsp[0] : return address
231 // -----------------------------------
232 // Set transitioned map.
233 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
234 __ RecordWriteField(rdx,
235 HeapObject::kMapOffset,
236 rbx,
237 rdi,
238 kDontSaveFPRegs,
239 EMIT_REMEMBERED_SET,
240 OMIT_SMI_CHECK);
241 }
242
243
GenerateSmiOnlyToDouble(MacroAssembler * masm,Label * fail)244 void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
245 MacroAssembler* masm, Label* fail) {
246 // ----------- S t a t e -------------
247 // -- rax : value
248 // -- rbx : target map
249 // -- rcx : key
250 // -- rdx : receiver
251 // -- rsp[0] : return address
252 // -----------------------------------
253 // The fail label is not actually used since we do not allocate.
254 Label allocated, new_backing_store, only_change_map, done;
255
256 // Check for empty arrays, which only require a map transition and no changes
257 // to the backing store.
258 __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
259 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
260 __ j(equal, &only_change_map);
261
262 // Check backing store for COW-ness. For COW arrays we have to
263 // allocate a new backing store.
264 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
265 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
266 Heap::kFixedCOWArrayMapRootIndex);
267 __ j(equal, &new_backing_store);
268 // Check if the backing store is in new-space. If not, we need to allocate
269 // a new one since the old one is in pointer-space.
270 // If in new space, we can reuse the old backing store because it is
271 // the same size.
272 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
273
274 __ movq(r14, r8); // Destination array equals source array.
275
276 // r8 : source FixedArray
277 // r9 : elements array length
278 // r14: destination FixedDoubleArray
279 // Set backing store's map
280 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
281 __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
282
283 __ bind(&allocated);
284 // Set transitioned map.
285 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
286 __ RecordWriteField(rdx,
287 HeapObject::kMapOffset,
288 rbx,
289 rdi,
290 kDontSaveFPRegs,
291 EMIT_REMEMBERED_SET,
292 OMIT_SMI_CHECK);
293
294 // Convert smis to doubles and holes to hole NaNs. The Array's length
295 // remains unchanged.
296 STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
297 STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
298
299 Label loop, entry, convert_hole;
300 __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
301 // r15: the-hole NaN
302 __ jmp(&entry);
303
304 // Allocate new backing store.
305 __ bind(&new_backing_store);
306 __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
307 __ AllocateInNewSpace(rdi, r14, r11, r15, fail, TAG_OBJECT);
308 // Set backing store's map
309 __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
310 __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
311 // Set receiver's backing store.
312 __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
313 __ movq(r11, r14);
314 __ RecordWriteField(rdx,
315 JSObject::kElementsOffset,
316 r11,
317 r15,
318 kDontSaveFPRegs,
319 EMIT_REMEMBERED_SET,
320 OMIT_SMI_CHECK);
321 // Set backing store's length.
322 __ Integer32ToSmi(r11, r9);
323 __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
324 __ jmp(&allocated);
325
326 __ bind(&only_change_map);
327 // Set transitioned map.
328 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
329 __ RecordWriteField(rdx,
330 HeapObject::kMapOffset,
331 rbx,
332 rdi,
333 kDontSaveFPRegs,
334 OMIT_REMEMBERED_SET,
335 OMIT_SMI_CHECK);
336 __ jmp(&done);
337
338 // Conversion loop.
339 __ bind(&loop);
340 __ movq(rbx,
341 FieldOperand(r8, r9, times_8, FixedArray::kHeaderSize));
342 // r9 : current element's index
343 // rbx: current element (smi-tagged)
344 __ JumpIfNotSmi(rbx, &convert_hole);
345 __ SmiToInteger32(rbx, rbx);
346 __ cvtlsi2sd(xmm0, rbx);
347 __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
348 xmm0);
349 __ jmp(&entry);
350 __ bind(&convert_hole);
351
352 if (FLAG_debug_code) {
353 __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
354 __ Assert(equal, "object found in smi-only array");
355 }
356
357 __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
358 __ bind(&entry);
359 __ decq(r9);
360 __ j(not_sign, &loop);
361
362 __ bind(&done);
363 }
364
365
GenerateDoubleToObject(MacroAssembler * masm,Label * fail)366 void ElementsTransitionGenerator::GenerateDoubleToObject(
367 MacroAssembler* masm, Label* fail) {
368 // ----------- S t a t e -------------
369 // -- rax : value
370 // -- rbx : target map
371 // -- rcx : key
372 // -- rdx : receiver
373 // -- rsp[0] : return address
374 // -----------------------------------
375 Label loop, entry, convert_hole, gc_required, only_change_map;
376
377 // Check for empty arrays, which only require a map transition and no changes
378 // to the backing store.
379 __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
380 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
381 __ j(equal, &only_change_map);
382
383 __ push(rax);
384
385 __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
386 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
387 // r8 : source FixedDoubleArray
388 // r9 : number of elements
389 __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
390 __ AllocateInNewSpace(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
391 // r11: destination FixedArray
392 __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
393 __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
394 __ Integer32ToSmi(r14, r9);
395 __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
396
397 // Prepare for conversion loop.
398 __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
399 __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
400 // rsi: the-hole NaN
401 // rdi: pointer to the-hole
402 __ jmp(&entry);
403
404 // Call into runtime if GC is required.
405 __ bind(&gc_required);
406 __ pop(rax);
407 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
408 __ jmp(fail);
409
410 // Box doubles into heap numbers.
411 __ bind(&loop);
412 __ movq(r14, FieldOperand(r8,
413 r9,
414 times_pointer_size,
415 FixedDoubleArray::kHeaderSize));
416 // r9 : current element's index
417 // r14: current element
418 __ cmpq(r14, rsi);
419 __ j(equal, &convert_hole);
420
421 // Non-hole double, copy value into a heap number.
422 __ AllocateHeapNumber(rax, r15, &gc_required);
423 // rax: new heap number
424 __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
425 __ movq(FieldOperand(r11,
426 r9,
427 times_pointer_size,
428 FixedArray::kHeaderSize),
429 rax);
430 __ movq(r15, r9);
431 __ RecordWriteArray(r11,
432 rax,
433 r15,
434 kDontSaveFPRegs,
435 EMIT_REMEMBERED_SET,
436 OMIT_SMI_CHECK);
437 __ jmp(&entry, Label::kNear);
438
439 // Replace the-hole NaN with the-hole pointer.
440 __ bind(&convert_hole);
441 __ movq(FieldOperand(r11,
442 r9,
443 times_pointer_size,
444 FixedArray::kHeaderSize),
445 rdi);
446
447 __ bind(&entry);
448 __ decq(r9);
449 __ j(not_sign, &loop);
450
451 // Replace receiver's backing store with newly created and filled FixedArray.
452 __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
453 __ RecordWriteField(rdx,
454 JSObject::kElementsOffset,
455 r11,
456 r15,
457 kDontSaveFPRegs,
458 EMIT_REMEMBERED_SET,
459 OMIT_SMI_CHECK);
460 __ pop(rax);
461 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
462
463 __ bind(&only_change_map);
464 // Set transitioned map.
465 __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
466 __ RecordWriteField(rdx,
467 HeapObject::kMapOffset,
468 rbx,
469 rdi,
470 kDontSaveFPRegs,
471 OMIT_REMEMBERED_SET,
472 OMIT_SMI_CHECK);
473 }
474
475
Generate(MacroAssembler * masm,Register string,Register index,Register result,Label * call_runtime)476 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
477 Register string,
478 Register index,
479 Register result,
480 Label* call_runtime) {
481 // Fetch the instance type of the receiver into result register.
482 __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
483 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
484
485 // We need special handling for indirect strings.
486 Label check_sequential;
487 __ testb(result, Immediate(kIsIndirectStringMask));
488 __ j(zero, &check_sequential, Label::kNear);
489
490 // Dispatch on the indirect string shape: slice or cons.
491 Label cons_string;
492 __ testb(result, Immediate(kSlicedNotConsMask));
493 __ j(zero, &cons_string, Label::kNear);
494
495 // Handle slices.
496 Label indirect_string_loaded;
497 __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
498 __ addq(index, result);
499 __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
500 __ jmp(&indirect_string_loaded, Label::kNear);
501
502 // Handle cons strings.
503 // Check whether the right hand side is the empty string (i.e. if
504 // this is really a flat string in a cons string). If that is not
505 // the case we would rather go to the runtime system now to flatten
506 // the string.
507 __ bind(&cons_string);
508 __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
509 Heap::kEmptyStringRootIndex);
510 __ j(not_equal, call_runtime);
511 __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
512
513 __ bind(&indirect_string_loaded);
514 __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
515 __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
516
517 // Distinguish sequential and external strings. Only these two string
518 // representations can reach here (slices and flat cons strings have been
519 // reduced to the underlying sequential or external string).
520 Label seq_string;
521 __ bind(&check_sequential);
522 STATIC_ASSERT(kSeqStringTag == 0);
523 __ testb(result, Immediate(kStringRepresentationMask));
524 __ j(zero, &seq_string, Label::kNear);
525
526 // Handle external strings.
527 Label ascii_external, done;
528 if (FLAG_debug_code) {
529 // Assert that we do not have a cons or slice (indirect strings) here.
530 // Sequential strings have already been ruled out.
531 __ testb(result, Immediate(kIsIndirectStringMask));
532 __ Assert(zero, "external string expected, but not found");
533 }
534 // Rule out short external strings.
535 STATIC_CHECK(kShortExternalStringTag != 0);
536 __ testb(result, Immediate(kShortExternalStringTag));
537 __ j(not_zero, call_runtime);
538 // Check encoding.
539 STATIC_ASSERT(kTwoByteStringTag == 0);
540 __ testb(result, Immediate(kStringEncodingMask));
541 __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
542 __ j(not_equal, &ascii_external, Label::kNear);
543 // Two-byte string.
544 __ movzxwl(result, Operand(result, index, times_2, 0));
545 __ jmp(&done, Label::kNear);
546 __ bind(&ascii_external);
547 // Ascii string.
548 __ movzxbl(result, Operand(result, index, times_1, 0));
549 __ jmp(&done, Label::kNear);
550
551 // Dispatch on the encoding: ASCII or two-byte.
552 Label ascii;
553 __ bind(&seq_string);
554 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
555 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
556 __ testb(result, Immediate(kStringEncodingMask));
557 __ j(not_zero, &ascii, Label::kNear);
558
559 // Two-byte string.
560 // Load the two-byte character code into the result register.
561 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
562 __ movzxwl(result, FieldOperand(string,
563 index,
564 times_2,
565 SeqTwoByteString::kHeaderSize));
566 __ jmp(&done, Label::kNear);
567
568 // ASCII string.
569 // Load the byte into the result register.
570 __ bind(&ascii);
571 __ movzxbl(result, FieldOperand(string,
572 index,
573 times_1,
574 SeqAsciiString::kHeaderSize));
575 __ bind(&done);
576 }
577
578 #undef __
579
580 } } // namespace v8::internal
581
582 #endif // V8_TARGET_ARCH_X64
583