1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_X64
6
7 #include "src/bootstrapper.h"
8 #include "src/code-stubs.h"
9 #include "src/codegen.h"
10 #include "src/ic/handler-compiler.h"
11 #include "src/ic/ic.h"
12 #include "src/ic/stub-cache.h"
13 #include "src/isolate.h"
14 #include "src/regexp/jsregexp.h"
15 #include "src/regexp/regexp-macro-assembler.h"
16 #include "src/runtime/runtime.h"
17 #include "src/x64/code-stubs-x64.h"
18
19 namespace v8 {
20 namespace internal {
21
22
InitializeArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)23 static void InitializeArrayConstructorDescriptor(
24 Isolate* isolate, CodeStubDescriptor* descriptor,
25 int constant_stack_parameter_count) {
26 Address deopt_handler = Runtime::FunctionForId(
27 Runtime::kArrayConstructor)->entry;
28
29 if (constant_stack_parameter_count == 0) {
30 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
31 JS_FUNCTION_STUB_MODE);
32 } else {
33 descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
34 JS_FUNCTION_STUB_MODE);
35 }
36 }
37
38
InitializeInternalArrayConstructorDescriptor(Isolate * isolate,CodeStubDescriptor * descriptor,int constant_stack_parameter_count)39 static void InitializeInternalArrayConstructorDescriptor(
40 Isolate* isolate, CodeStubDescriptor* descriptor,
41 int constant_stack_parameter_count) {
42 Address deopt_handler = Runtime::FunctionForId(
43 Runtime::kInternalArrayConstructor)->entry;
44
45 if (constant_stack_parameter_count == 0) {
46 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
47 JS_FUNCTION_STUB_MODE);
48 } else {
49 descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
50 JS_FUNCTION_STUB_MODE);
51 }
52 }
53
54
InitializeDescriptor(CodeStubDescriptor * descriptor)55 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
56 CodeStubDescriptor* descriptor) {
57 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
58 }
59
60
InitializeDescriptor(CodeStubDescriptor * descriptor)61 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
62 CodeStubDescriptor* descriptor) {
63 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
64 }
65
66
InitializeDescriptor(CodeStubDescriptor * descriptor)67 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
68 CodeStubDescriptor* descriptor) {
69 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
70 }
71
72
InitializeDescriptor(CodeStubDescriptor * descriptor)73 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
74 CodeStubDescriptor* descriptor) {
75 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
76 }
77
78
InitializeDescriptor(CodeStubDescriptor * descriptor)79 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
80 CodeStubDescriptor* descriptor) {
81 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
82 }
83
84
InitializeDescriptor(CodeStubDescriptor * descriptor)85 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
86 CodeStubDescriptor* descriptor) {
87 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
88 }
89
90
91 #define __ ACCESS_MASM(masm)
92
93
GenerateLightweightMiss(MacroAssembler * masm,ExternalReference miss)94 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
95 ExternalReference miss) {
96 // Update the static counter each time a new code stub is generated.
97 isolate()->counters()->code_stubs()->Increment();
98
99 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
100 int param_count = descriptor.GetRegisterParameterCount();
101 {
102 // Call the runtime system in a fresh internal frame.
103 FrameScope scope(masm, StackFrame::INTERNAL);
104 DCHECK(param_count == 0 ||
105 rax.is(descriptor.GetRegisterParameter(param_count - 1)));
106 // Push arguments
107 for (int i = 0; i < param_count; ++i) {
108 __ Push(descriptor.GetRegisterParameter(i));
109 }
110 __ CallExternalReference(miss, param_count);
111 }
112
113 __ Ret();
114 }
115
116
Generate(MacroAssembler * masm)117 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
118 __ PushCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
119 const int argument_count = 1;
120 __ PrepareCallCFunction(argument_count);
121 __ LoadAddress(arg_reg_1,
122 ExternalReference::isolate_address(isolate()));
123
124 AllowExternalCallThatCantCauseGC scope(masm);
125 __ CallCFunction(
126 ExternalReference::store_buffer_overflow_function(isolate()),
127 argument_count);
128 __ PopCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
129 __ ret(0);
130 }
131
132
133 class FloatingPointHelper : public AllStatic {
134 public:
135 enum ConvertUndefined {
136 CONVERT_UNDEFINED_TO_ZERO,
137 BAILOUT_ON_UNDEFINED
138 };
139 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
140 // If the operands are not both numbers, jump to not_numbers.
141 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
142 // NumberOperands assumes both are smis or heap numbers.
143 static void LoadSSE2UnknownOperands(MacroAssembler* masm,
144 Label* not_numbers);
145 };
146
147
Generate(MacroAssembler * masm)148 void DoubleToIStub::Generate(MacroAssembler* masm) {
149 Register input_reg = this->source();
150 Register final_result_reg = this->destination();
151 DCHECK(is_truncating());
152
153 Label check_negative, process_64_bits, done;
154
155 int double_offset = offset();
156
157 // Account for return address and saved regs if input is rsp.
158 if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
159
160 MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
161 MemOperand exponent_operand(MemOperand(input_reg,
162 double_offset + kDoubleSize / 2));
163
164 Register scratch1;
165 Register scratch_candidates[3] = { rbx, rdx, rdi };
166 for (int i = 0; i < 3; i++) {
167 scratch1 = scratch_candidates[i];
168 if (!final_result_reg.is(scratch1) && !input_reg.is(scratch1)) break;
169 }
170
171 // Since we must use rcx for shifts below, use some other register (rax)
172 // to calculate the result if ecx is the requested return register.
173 Register result_reg = final_result_reg.is(rcx) ? rax : final_result_reg;
174 // Save ecx if it isn't the return register and therefore volatile, or if it
175 // is the return register, then save the temp register we use in its stead
176 // for the result.
177 Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
178 __ pushq(scratch1);
179 __ pushq(save_reg);
180
181 bool stash_exponent_copy = !input_reg.is(rsp);
182 __ movl(scratch1, mantissa_operand);
183 __ Movsd(xmm0, mantissa_operand);
184 __ movl(rcx, exponent_operand);
185 if (stash_exponent_copy) __ pushq(rcx);
186
187 __ andl(rcx, Immediate(HeapNumber::kExponentMask));
188 __ shrl(rcx, Immediate(HeapNumber::kExponentShift));
189 __ leal(result_reg, MemOperand(rcx, -HeapNumber::kExponentBias));
190 __ cmpl(result_reg, Immediate(HeapNumber::kMantissaBits));
191 __ j(below, &process_64_bits);
192
193 // Result is entirely in lower 32-bits of mantissa
194 int delta = HeapNumber::kExponentBias + Double::kPhysicalSignificandSize;
195 __ subl(rcx, Immediate(delta));
196 __ xorl(result_reg, result_reg);
197 __ cmpl(rcx, Immediate(31));
198 __ j(above, &done);
199 __ shll_cl(scratch1);
200 __ jmp(&check_negative);
201
202 __ bind(&process_64_bits);
203 __ Cvttsd2siq(result_reg, xmm0);
204 __ jmp(&done, Label::kNear);
205
206 // If the double was negative, negate the integer result.
207 __ bind(&check_negative);
208 __ movl(result_reg, scratch1);
209 __ negl(result_reg);
210 if (stash_exponent_copy) {
211 __ cmpl(MemOperand(rsp, 0), Immediate(0));
212 } else {
213 __ cmpl(exponent_operand, Immediate(0));
214 }
215 __ cmovl(greater, result_reg, scratch1);
216
217 // Restore registers
218 __ bind(&done);
219 if (stash_exponent_copy) {
220 __ addp(rsp, Immediate(kDoubleSize));
221 }
222 if (!final_result_reg.is(result_reg)) {
223 DCHECK(final_result_reg.is(rcx));
224 __ movl(final_result_reg, result_reg);
225 }
226 __ popq(save_reg);
227 __ popq(scratch1);
228 __ ret(0);
229 }
230
231
LoadSSE2UnknownOperands(MacroAssembler * masm,Label * not_numbers)232 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
233 Label* not_numbers) {
234 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
235 // Load operand in rdx into xmm0, or branch to not_numbers.
236 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
237 __ JumpIfSmi(rdx, &load_smi_rdx);
238 __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
239 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
240 __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
241 // Load operand in rax into xmm1, or branch to not_numbers.
242 __ JumpIfSmi(rax, &load_smi_rax);
243
244 __ bind(&load_nonsmi_rax);
245 __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
246 __ j(not_equal, not_numbers);
247 __ Movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
248 __ jmp(&done);
249
250 __ bind(&load_smi_rdx);
251 __ SmiToInteger32(kScratchRegister, rdx);
252 __ Cvtlsi2sd(xmm0, kScratchRegister);
253 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
254
255 __ bind(&load_smi_rax);
256 __ SmiToInteger32(kScratchRegister, rax);
257 __ Cvtlsi2sd(xmm1, kScratchRegister);
258 __ bind(&done);
259 }
260
261
Generate(MacroAssembler * masm)262 void MathPowStub::Generate(MacroAssembler* masm) {
263 const Register exponent = MathPowTaggedDescriptor::exponent();
264 DCHECK(exponent.is(rdx));
265 const Register base = rax;
266 const Register scratch = rcx;
267 const XMMRegister double_result = xmm3;
268 const XMMRegister double_base = xmm2;
269 const XMMRegister double_exponent = xmm1;
270 const XMMRegister double_scratch = xmm4;
271
272 Label call_runtime, done, exponent_not_smi, int_exponent;
273
274 // Save 1 in double_result - we need this several times later on.
275 __ movp(scratch, Immediate(1));
276 __ Cvtlsi2sd(double_result, scratch);
277
278 if (exponent_type() == ON_STACK) {
279 Label base_is_smi, unpack_exponent;
280 // The exponent and base are supplied as arguments on the stack.
281 // This can only happen if the stub is called from non-optimized code.
282 // Load input parameters from stack.
283 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
284 __ movp(base, args.GetArgumentOperand(0));
285 __ movp(exponent, args.GetArgumentOperand(1));
286 __ JumpIfSmi(base, &base_is_smi, Label::kNear);
287 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
288 Heap::kHeapNumberMapRootIndex);
289 __ j(not_equal, &call_runtime);
290
291 __ Movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
292 __ jmp(&unpack_exponent, Label::kNear);
293
294 __ bind(&base_is_smi);
295 __ SmiToInteger32(base, base);
296 __ Cvtlsi2sd(double_base, base);
297 __ bind(&unpack_exponent);
298
299 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
300 __ SmiToInteger32(exponent, exponent);
301 __ jmp(&int_exponent);
302
303 __ bind(&exponent_not_smi);
304 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
305 Heap::kHeapNumberMapRootIndex);
306 __ j(not_equal, &call_runtime);
307 __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
308 } else if (exponent_type() == TAGGED) {
309 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
310 __ SmiToInteger32(exponent, exponent);
311 __ jmp(&int_exponent);
312
313 __ bind(&exponent_not_smi);
314 __ Movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
315 }
316
317 if (exponent_type() != INTEGER) {
318 Label fast_power, try_arithmetic_simplification;
319 // Detect integer exponents stored as double.
320 __ DoubleToI(exponent, double_exponent, double_scratch,
321 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
322 &try_arithmetic_simplification,
323 &try_arithmetic_simplification);
324 __ jmp(&int_exponent);
325
326 __ bind(&try_arithmetic_simplification);
327 __ Cvttsd2si(exponent, double_exponent);
328 // Skip to runtime if possibly NaN (indicated by the indefinite integer).
329 __ cmpl(exponent, Immediate(0x1));
330 __ j(overflow, &call_runtime);
331
332 if (exponent_type() == ON_STACK) {
333 // Detect square root case. Crankshaft detects constant +/-0.5 at
334 // compile time and uses DoMathPowHalf instead. We then skip this check
335 // for non-constant cases of +/-0.5 as these hardly occur.
336 Label continue_sqrt, continue_rsqrt, not_plus_half;
337 // Test for 0.5.
338 // Load double_scratch with 0.5.
339 __ movq(scratch, V8_UINT64_C(0x3FE0000000000000));
340 __ Movq(double_scratch, scratch);
341 // Already ruled out NaNs for exponent.
342 __ Ucomisd(double_scratch, double_exponent);
343 __ j(not_equal, ¬_plus_half, Label::kNear);
344
345 // Calculates square root of base. Check for the special case of
346 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
347 // According to IEEE-754, double-precision -Infinity has the highest
348 // 12 bits set and the lowest 52 bits cleared.
349 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
350 __ Movq(double_scratch, scratch);
351 __ Ucomisd(double_scratch, double_base);
352 // Comparing -Infinity with NaN results in "unordered", which sets the
353 // zero flag as if both were equal. However, it also sets the carry flag.
354 __ j(not_equal, &continue_sqrt, Label::kNear);
355 __ j(carry, &continue_sqrt, Label::kNear);
356
357 // Set result to Infinity in the special case.
358 __ Xorpd(double_result, double_result);
359 __ Subsd(double_result, double_scratch);
360 __ jmp(&done);
361
362 __ bind(&continue_sqrt);
363 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
364 __ Xorpd(double_scratch, double_scratch);
365 __ Addsd(double_scratch, double_base); // Convert -0 to 0.
366 __ Sqrtsd(double_result, double_scratch);
367 __ jmp(&done);
368
369 // Test for -0.5.
370 __ bind(¬_plus_half);
371 // Load double_scratch with -0.5 by substracting 1.
372 __ Subsd(double_scratch, double_result);
373 // Already ruled out NaNs for exponent.
374 __ Ucomisd(double_scratch, double_exponent);
375 __ j(not_equal, &fast_power, Label::kNear);
376
377 // Calculates reciprocal of square root of base. Check for the special
378 // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
379 // According to IEEE-754, double-precision -Infinity has the highest
380 // 12 bits set and the lowest 52 bits cleared.
381 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000));
382 __ Movq(double_scratch, scratch);
383 __ Ucomisd(double_scratch, double_base);
384 // Comparing -Infinity with NaN results in "unordered", which sets the
385 // zero flag as if both were equal. However, it also sets the carry flag.
386 __ j(not_equal, &continue_rsqrt, Label::kNear);
387 __ j(carry, &continue_rsqrt, Label::kNear);
388
389 // Set result to 0 in the special case.
390 __ Xorpd(double_result, double_result);
391 __ jmp(&done);
392
393 __ bind(&continue_rsqrt);
394 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
395 __ Xorpd(double_exponent, double_exponent);
396 __ Addsd(double_exponent, double_base); // Convert -0 to +0.
397 __ Sqrtsd(double_exponent, double_exponent);
398 __ Divsd(double_result, double_exponent);
399 __ jmp(&done);
400 }
401
402 // Using FPU instructions to calculate power.
403 Label fast_power_failed;
404 __ bind(&fast_power);
405 __ fnclex(); // Clear flags to catch exceptions later.
406 // Transfer (B)ase and (E)xponent onto the FPU register stack.
407 __ subp(rsp, Immediate(kDoubleSize));
408 __ Movsd(Operand(rsp, 0), double_exponent);
409 __ fld_d(Operand(rsp, 0)); // E
410 __ Movsd(Operand(rsp, 0), double_base);
411 __ fld_d(Operand(rsp, 0)); // B, E
412
413 // Exponent is in st(1) and base is in st(0)
414 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
415 // FYL2X calculates st(1) * log2(st(0))
416 __ fyl2x(); // X
417 __ fld(0); // X, X
418 __ frndint(); // rnd(X), X
419 __ fsub(1); // rnd(X), X-rnd(X)
420 __ fxch(1); // X - rnd(X), rnd(X)
421 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
422 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
423 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
424 __ faddp(1); // 2^(X-rnd(X)), rnd(X)
425 // FSCALE calculates st(0) * 2^st(1)
426 __ fscale(); // 2^X, rnd(X)
427 __ fstp(1);
428 // Bail out to runtime in case of exceptions in the status word.
429 __ fnstsw_ax();
430 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
431 __ j(not_zero, &fast_power_failed, Label::kNear);
432 __ fstp_d(Operand(rsp, 0));
433 __ Movsd(double_result, Operand(rsp, 0));
434 __ addp(rsp, Immediate(kDoubleSize));
435 __ jmp(&done);
436
437 __ bind(&fast_power_failed);
438 __ fninit();
439 __ addp(rsp, Immediate(kDoubleSize));
440 __ jmp(&call_runtime);
441 }
442
443 // Calculate power with integer exponent.
444 __ bind(&int_exponent);
445 const XMMRegister double_scratch2 = double_exponent;
446 // Back up exponent as we need to check if exponent is negative later.
447 __ movp(scratch, exponent); // Back up exponent.
448 __ Movsd(double_scratch, double_base); // Back up base.
449 __ Movsd(double_scratch2, double_result); // Load double_exponent with 1.
450
451 // Get absolute value of exponent.
452 Label no_neg, while_true, while_false;
453 __ testl(scratch, scratch);
454 __ j(positive, &no_neg, Label::kNear);
455 __ negl(scratch);
456 __ bind(&no_neg);
457
458 __ j(zero, &while_false, Label::kNear);
459 __ shrl(scratch, Immediate(1));
460 // Above condition means CF==0 && ZF==0. This means that the
461 // bit that has been shifted out is 0 and the result is not 0.
462 __ j(above, &while_true, Label::kNear);
463 __ Movsd(double_result, double_scratch);
464 __ j(zero, &while_false, Label::kNear);
465
466 __ bind(&while_true);
467 __ shrl(scratch, Immediate(1));
468 __ Mulsd(double_scratch, double_scratch);
469 __ j(above, &while_true, Label::kNear);
470 __ Mulsd(double_result, double_scratch);
471 __ j(not_zero, &while_true);
472
473 __ bind(&while_false);
474 // If the exponent is negative, return 1/result.
475 __ testl(exponent, exponent);
476 __ j(greater, &done);
477 __ Divsd(double_scratch2, double_result);
478 __ Movsd(double_result, double_scratch2);
479 // Test whether result is zero. Bail out to check for subnormal result.
480 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
481 __ Xorpd(double_scratch2, double_scratch2);
482 __ Ucomisd(double_scratch2, double_result);
483 // double_exponent aliased as double_scratch2 has already been overwritten
484 // and may not have contained the exponent value in the first place when the
485 // input was a smi. We reset it with exponent value before bailing out.
486 __ j(not_equal, &done);
487 __ Cvtlsi2sd(double_exponent, exponent);
488
489 // Returning or bailing out.
490 Counters* counters = isolate()->counters();
491 if (exponent_type() == ON_STACK) {
492 // The arguments are still on the stack.
493 __ bind(&call_runtime);
494 __ TailCallRuntime(Runtime::kMathPowRT);
495
496 // The stub is called from non-optimized code, which expects the result
497 // as heap number in rax.
498 __ bind(&done);
499 __ AllocateHeapNumber(rax, rcx, &call_runtime);
500 __ Movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
501 __ IncrementCounter(counters->math_pow(), 1);
502 __ ret(2 * kPointerSize);
503 } else {
504 __ bind(&call_runtime);
505 // Move base to the correct argument register. Exponent is already in xmm1.
506 __ Movsd(xmm0, double_base);
507 DCHECK(double_exponent.is(xmm1));
508 {
509 AllowExternalCallThatCantCauseGC scope(masm);
510 __ PrepareCallCFunction(2);
511 __ CallCFunction(
512 ExternalReference::power_double_double_function(isolate()), 2);
513 }
514 // Return value is in xmm0.
515 __ Movsd(double_result, xmm0);
516
517 __ bind(&done);
518 __ IncrementCounter(counters->math_pow(), 1);
519 __ ret(0);
520 }
521 }
522
523
Generate(MacroAssembler * masm)524 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
525 Label miss;
526 Register receiver = LoadDescriptor::ReceiverRegister();
527 // Ensure that the vector and slot registers won't be clobbered before
528 // calling the miss handler.
529 DCHECK(!AreAliased(r8, r9, LoadWithVectorDescriptor::VectorRegister(),
530 LoadDescriptor::SlotRegister()));
531
532 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
533 r9, &miss);
534 __ bind(&miss);
535 PropertyAccessCompiler::TailCallBuiltin(
536 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
537 }
538
539
GenerateReadElement(MacroAssembler * masm)540 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
541 // The key is in rdx and the parameter count is in rax.
542 DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
543 DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
544
545 // Check that the key is a smi.
546 Label slow;
547 __ JumpIfNotSmi(rdx, &slow);
548
549 // Check if the calling frame is an arguments adaptor frame. We look at the
550 // context offset, and if the frame is not a regular one, then we find a
551 // Smi instead of the context. We can't use SmiCompare here, because that
552 // only works for comparing two smis.
553 Label adaptor;
554 __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
555 __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
556 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
557 __ j(equal, &adaptor);
558
559 // Check index against formal parameters count limit passed in
560 // through register rax. Use unsigned comparison to get negative
561 // check for free.
562 __ cmpp(rdx, rax);
563 __ j(above_equal, &slow);
564
565 // Read the argument from the stack and return it.
566 __ SmiSub(rax, rax, rdx);
567 __ SmiToInteger32(rax, rax);
568 StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
569 __ movp(rax, args.GetArgumentOperand(0));
570 __ Ret();
571
572 // Arguments adaptor case: Check index against actual arguments
573 // limit found in the arguments adaptor frame. Use unsigned
574 // comparison to get negative check for free.
575 __ bind(&adaptor);
576 __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
577 __ cmpp(rdx, rcx);
578 __ j(above_equal, &slow);
579
580 // Read the argument from the stack and return it.
581 __ SmiSub(rcx, rcx, rdx);
582 __ SmiToInteger32(rcx, rcx);
583 StackArgumentsAccessor adaptor_args(rbx, rcx,
584 ARGUMENTS_DONT_CONTAIN_RECEIVER);
585 __ movp(rax, adaptor_args.GetArgumentOperand(0));
586 __ Ret();
587
588 // Slow-case: Handle non-smi or out-of-bounds access to arguments
589 // by calling the runtime system.
590 __ bind(&slow);
591 __ PopReturnAddressTo(rbx);
592 __ Push(rdx);
593 __ PushReturnAddressFrom(rbx);
594 __ TailCallRuntime(Runtime::kArguments);
595 }
596
597
GenerateNewSloppyFast(MacroAssembler * masm)598 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
599 // rcx : number of parameters (tagged)
600 // rdx : parameters pointer
601 // rdi : function
602 // rsp[0] : return address
603 // Registers used over the whole function:
604 // rbx: the mapped parameter count (untagged)
605 // rax: the allocated object (tagged).
606 Factory* factory = isolate()->factory();
607
608 DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
609 DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
610 DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
611
612 __ SmiToInteger64(rbx, rcx);
613 // rbx = parameter count (untagged)
614
615 // Check if the calling frame is an arguments adaptor frame.
616 Label adaptor_frame, try_allocate, runtime;
617 __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
618 __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset));
619 __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
620 __ j(equal, &adaptor_frame);
621
622 // No adaptor, parameter count = argument count.
623 __ movp(r11, rbx);
624 __ jmp(&try_allocate, Label::kNear);
625
626 // We have an adaptor frame. Patch the parameters pointer.
627 __ bind(&adaptor_frame);
628 __ SmiToInteger64(
629 r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset));
630 __ leap(rdx, Operand(rax, r11, times_pointer_size,
631 StandardFrameConstants::kCallerSPOffset));
632
633 // rbx = parameter count (untagged)
634 // r11 = argument count (untagged)
635 // Compute the mapped parameter count = min(rbx, r11) in rbx.
636 __ cmpp(rbx, r11);
637 __ j(less_equal, &try_allocate, Label::kNear);
638 __ movp(rbx, r11);
639
640 __ bind(&try_allocate);
641
642 // Compute the sizes of backing store, parameter map, and arguments object.
643 // 1. Parameter map, has 2 extra words containing context and backing store.
644 const int kParameterMapHeaderSize =
645 FixedArray::kHeaderSize + 2 * kPointerSize;
646 Label no_parameter_map;
647 __ xorp(r8, r8);
648 __ testp(rbx, rbx);
649 __ j(zero, &no_parameter_map, Label::kNear);
650 __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
651 __ bind(&no_parameter_map);
652
653 // 2. Backing store.
654 __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize));
655
656 // 3. Arguments object.
657 __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
658
659 // Do the allocation of all three objects in one go.
660 __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
661
662 // rax = address of new object(s) (tagged)
663 // r11 = argument count (untagged)
664 // Get the arguments map from the current native context into r9.
665 Label has_mapped_parameters, instantiate;
666 __ movp(r9, NativeContextOperand());
667 __ testp(rbx, rbx);
668 __ j(not_zero, &has_mapped_parameters, Label::kNear);
669
670 const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
671 __ movp(r9, Operand(r9, Context::SlotOffset(kIndex)));
672 __ jmp(&instantiate, Label::kNear);
673
674 const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
675 __ bind(&has_mapped_parameters);
676 __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex)));
677 __ bind(&instantiate);
678
679 // rax = address of new object (tagged)
680 // rbx = mapped parameter count (untagged)
681 // r11 = argument count (untagged)
682 // r9 = address of arguments map (tagged)
683 __ movp(FieldOperand(rax, JSObject::kMapOffset), r9);
684 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
685 __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
686 __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
687
688 // Set up the callee in-object property.
689 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
690 __ AssertNotSmi(rdi);
691 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
692 Heap::kArgumentsCalleeIndex * kPointerSize),
693 rdi);
694
695 // Use the length (smi tagged) and set that as an in-object property too.
696 // Note: r11 is tagged from here on.
697 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
698 __ Integer32ToSmi(r11, r11);
699 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
700 Heap::kArgumentsLengthIndex * kPointerSize),
701 r11);
702
703 // Set up the elements pointer in the allocated arguments object.
704 // If we allocated a parameter map, rdi will point there, otherwise to the
705 // backing store.
706 __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
707 __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
708
709 // rax = address of new object (tagged)
710 // rbx = mapped parameter count (untagged)
711 // r11 = argument count (tagged)
712 // rdi = address of parameter map or backing store (tagged)
713
714 // Initialize parameter map. If there are no mapped arguments, we're done.
715 Label skip_parameter_map;
716 __ testp(rbx, rbx);
717 __ j(zero, &skip_parameter_map);
718
719 __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
720 // rbx contains the untagged argument count. Add 2 and tag to write.
721 __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
722 __ Integer64PlusConstantToSmi(r9, rbx, 2);
723 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
724 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
725 __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
726 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
727
728 // Copy the parameter slots and the holes in the arguments.
729 // We need to fill in mapped_parameter_count slots. They index the context,
730 // where parameters are stored in reverse order, at
731 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
732 // The mapped parameter thus need to get indices
733 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
734 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
735 // We loop from right to left.
736 Label parameters_loop, parameters_test;
737
738 // Load tagged parameter count into r9.
739 __ Integer32ToSmi(r9, rbx);
740 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
741 __ addp(r8, rcx);
742 __ subp(r8, r9);
743 __ movp(rcx, rdi);
744 __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
745 __ SmiToInteger64(r9, r9);
746 // r9 = loop variable (untagged)
747 // r8 = mapping index (tagged)
748 // rcx = address of parameter map (tagged)
749 // rdi = address of backing store (tagged)
750 __ jmp(¶meters_test, Label::kNear);
751
752 __ bind(¶meters_loop);
753 __ subp(r9, Immediate(1));
754 __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
755 __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize),
756 r8);
757 __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize),
758 kScratchRegister);
759 __ SmiAddConstant(r8, r8, Smi::FromInt(1));
760 __ bind(¶meters_test);
761 __ testp(r9, r9);
762 __ j(not_zero, ¶meters_loop, Label::kNear);
763
764 __ bind(&skip_parameter_map);
765
766 // r11 = argument count (tagged)
767 // rdi = address of backing store (tagged)
768 // Copy arguments header and remaining slots (if there are any).
769 __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
770 factory->fixed_array_map());
771 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11);
772
773 Label arguments_loop, arguments_test;
774 __ movp(r8, rbx);
775 // Untag r11 for the loop below.
776 __ SmiToInteger64(r11, r11);
777 __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
778 __ subp(rdx, kScratchRegister);
779 __ jmp(&arguments_test, Label::kNear);
780
781 __ bind(&arguments_loop);
782 __ subp(rdx, Immediate(kPointerSize));
783 __ movp(r9, Operand(rdx, 0));
784 __ movp(FieldOperand(rdi, r8,
785 times_pointer_size,
786 FixedArray::kHeaderSize),
787 r9);
788 __ addp(r8, Immediate(1));
789
790 __ bind(&arguments_test);
791 __ cmpp(r8, r11);
792 __ j(less, &arguments_loop, Label::kNear);
793
794 // Return.
795 __ ret(0);
796
797 // Do the runtime call to allocate the arguments object.
798 // r11 = argument count (untagged)
799 __ bind(&runtime);
800 __ Integer32ToSmi(r11, r11);
801 __ PopReturnAddressTo(rax);
802 __ Push(rdi); // Push function.
803 __ Push(rdx); // Push parameters pointer.
804 __ Push(r11); // Push parameter count.
805 __ PushReturnAddressFrom(rax);
806 __ TailCallRuntime(Runtime::kNewSloppyArguments);
807 }
808
809
GenerateNewSloppySlow(MacroAssembler * masm)810 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
811 // rcx : number of parameters (tagged)
812 // rdx : parameters pointer
813 // rdi : function
814 // rsp[0] : return address
815
816 DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
817 DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
818 DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
819
820 // Check if the calling frame is an arguments adaptor frame.
821 Label runtime;
822 __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
823 __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset));
824 __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
825 __ j(not_equal, &runtime);
826
827 // Patch the arguments.length and the parameters pointer.
828 StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
829 __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
830 __ SmiToInteger64(rax, rcx);
831 __ leap(rdx, Operand(rbx, rax, times_pointer_size,
832 StandardFrameConstants::kCallerSPOffset));
833
834 __ bind(&runtime);
835 __ PopReturnAddressTo(rax);
836 __ Push(rdi); // Push function.
837 __ Push(rdx); // Push parameters pointer.
838 __ Push(rcx); // Push parameter count.
839 __ PushReturnAddressFrom(rax);
840 __ TailCallRuntime(Runtime::kNewSloppyArguments);
841 }
842
843
GenerateNew(MacroAssembler * masm)844 void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
845 // rcx : number of parameters (tagged)
846 // rdx : parameters pointer
847 // rbx : rest parameter index (tagged)
848 // rsp[0] : return address
849
850 // Check if the calling frame is an arguments adaptor frame.
851 Label runtime;
852 __ movp(r8, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
853 __ movp(rax, Operand(r8, StandardFrameConstants::kContextOffset));
854 __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
855 __ j(not_equal, &runtime);
856
857 // Patch the arguments.length and the parameters pointer.
858 StackArgumentsAccessor args(rsp, 4, ARGUMENTS_DONT_CONTAIN_RECEIVER);
859 __ movp(rcx, Operand(r8, ArgumentsAdaptorFrameConstants::kLengthOffset));
860 __ SmiToInteger64(rax, rcx);
861 __ leap(rdx, Operand(r8, rax, times_pointer_size,
862 StandardFrameConstants::kCallerSPOffset));
863
864 __ bind(&runtime);
865 __ PopReturnAddressTo(rax);
866 __ Push(rcx); // Push number of parameters.
867 __ Push(rdx); // Push parameters pointer.
868 __ Push(rbx); // Push rest parameter index.
869 __ PushReturnAddressFrom(rax);
870 __ TailCallRuntime(Runtime::kNewRestParam);
871 }
872
873
Generate(MacroAssembler * masm)874 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
875 // Return address is on the stack.
876 Label slow;
877
878 Register receiver = LoadDescriptor::ReceiverRegister();
879 Register key = LoadDescriptor::NameRegister();
880 Register scratch = rax;
881 DCHECK(!scratch.is(receiver) && !scratch.is(key));
882
883 // Check that the key is an array index, that is Uint32.
884 STATIC_ASSERT(kSmiValueSize <= 32);
885 __ JumpUnlessNonNegativeSmi(key, &slow);
886
887 // Everything is fine, call runtime.
888 __ PopReturnAddressTo(scratch);
889 __ Push(receiver); // receiver
890 __ Push(key); // key
891 __ PushReturnAddressFrom(scratch);
892
893 // Perform tail call to the entry.
894 __ TailCallRuntime(Runtime::kLoadElementWithInterceptor);
895
896 __ bind(&slow);
897 PropertyAccessCompiler::TailCallBuiltin(
898 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
899 }
900
901
Generate(MacroAssembler * masm)902 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
903 // Return address is on the stack.
904 Label miss;
905
906 Register receiver = LoadDescriptor::ReceiverRegister();
907 Register index = LoadDescriptor::NameRegister();
908 Register scratch = rdi;
909 Register result = rax;
910 DCHECK(!scratch.is(receiver) && !scratch.is(index));
911 DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
912 result.is(LoadDescriptor::SlotRegister()));
913
914 // StringCharAtGenerator doesn't use the result register until it's passed
915 // the different miss possibilities. If it did, we would have a conflict
916 // when FLAG_vector_ics is true.
917 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
918 &miss, // When not a string.
919 &miss, // When not a number.
920 &miss, // When index out of range.
921 STRING_INDEX_IS_ARRAY_INDEX,
922 RECEIVER_IS_STRING);
923 char_at_generator.GenerateFast(masm);
924 __ ret(0);
925
926 StubRuntimeCallHelper call_helper;
927 char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
928
929 __ bind(&miss);
930 PropertyAccessCompiler::TailCallBuiltin(
931 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
932 }
933
934
GenerateNewStrict(MacroAssembler * masm)935 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
936 // rcx : number of parameters (tagged)
937 // rdx : parameters pointer
938 // rdi : function
939 // rsp[0] : return address
940
941 DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
942 DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
943 DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
944
945 // Check if the calling frame is an arguments adaptor frame.
946 Label adaptor_frame, try_allocate, runtime;
947 __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
948 __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset));
949 __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
950 __ j(equal, &adaptor_frame);
951
952 // Get the length from the frame.
953 __ SmiToInteger64(rax, rcx);
954 __ jmp(&try_allocate);
955
956 // Patch the arguments.length and the parameters pointer.
957 __ bind(&adaptor_frame);
958 __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
959 __ SmiToInteger64(rax, rcx);
960 __ leap(rdx, Operand(rbx, rax, times_pointer_size,
961 StandardFrameConstants::kCallerSPOffset));
962
963 // Try the new space allocation. Start out with computing the size of
964 // the arguments object and the elements array.
965 Label add_arguments_object;
966 __ bind(&try_allocate);
967 __ testp(rax, rax);
968 __ j(zero, &add_arguments_object, Label::kNear);
969 __ leap(rax, Operand(rax, times_pointer_size, FixedArray::kHeaderSize));
970 __ bind(&add_arguments_object);
971 __ addp(rax, Immediate(Heap::kStrictArgumentsObjectSize));
972
973 // Do the allocation of both objects in one go.
974 __ Allocate(rax, rax, rbx, no_reg, &runtime, TAG_OBJECT);
975
976 // Get the arguments map from the current native context.
977 __ movp(rdi, NativeContextOperand());
978 __ movp(rdi, ContextOperand(rdi, Context::STRICT_ARGUMENTS_MAP_INDEX));
979
980 __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
981 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
982 __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
983 __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
984
985 // Get the length (smi tagged) and set that as an in-object property too.
986 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
987 __ movp(FieldOperand(rax, JSObject::kHeaderSize +
988 Heap::kArgumentsLengthIndex * kPointerSize),
989 rcx);
990
991 // If there are no actual arguments, we're done.
992 Label done;
993 __ testp(rcx, rcx);
994 __ j(zero, &done);
995
996 // Set up the elements pointer in the allocated arguments object and
997 // initialize the header in the elements fixed array.
998 __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
999 __ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
1000 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
1001 __ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
1002 __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
1003
1004 // Untag the length for the loop below.
1005 __ SmiToInteger64(rcx, rcx);
1006
1007 // Copy the fixed array slots.
1008 Label loop;
1009 __ bind(&loop);
1010 __ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
1011 __ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
1012 __ addp(rdi, Immediate(kPointerSize));
1013 __ subp(rdx, Immediate(kPointerSize));
1014 __ decp(rcx);
1015 __ j(not_zero, &loop);
1016
1017 // Return.
1018 __ bind(&done);
1019 __ ret(0);
1020
1021 // Do the runtime call to allocate the arguments object.
1022 __ bind(&runtime);
1023 __ PopReturnAddressTo(rax);
1024 __ Push(rdi); // Push function.
1025 __ Push(rdx); // Push parameters pointer.
1026 __ Push(rcx); // Push parameter count.
1027 __ PushReturnAddressFrom(rax);
1028 __ TailCallRuntime(Runtime::kNewStrictArguments);
1029 }
1030
1031
Generate(MacroAssembler * masm)1032 void RegExpExecStub::Generate(MacroAssembler* masm) {
1033 // Just jump directly to runtime if native RegExp is not selected at compile
1034 // time or if regexp entry in generated code is turned off runtime switch or
1035 // at compilation.
1036 #ifdef V8_INTERPRETED_REGEXP
1037 __ TailCallRuntime(Runtime::kRegExpExec);
1038 #else // V8_INTERPRETED_REGEXP
1039
1040 // Stack frame on entry.
1041 // rsp[0] : return address
1042 // rsp[8] : last_match_info (expected JSArray)
1043 // rsp[16] : previous index
1044 // rsp[24] : subject string
1045 // rsp[32] : JSRegExp object
1046
1047 enum RegExpExecStubArgumentIndices {
1048 JS_REG_EXP_OBJECT_ARGUMENT_INDEX,
1049 SUBJECT_STRING_ARGUMENT_INDEX,
1050 PREVIOUS_INDEX_ARGUMENT_INDEX,
1051 LAST_MATCH_INFO_ARGUMENT_INDEX,
1052 REG_EXP_EXEC_ARGUMENT_COUNT
1053 };
1054
1055 StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT,
1056 ARGUMENTS_DONT_CONTAIN_RECEIVER);
1057 Label runtime;
1058 // Ensure that a RegExp stack is allocated.
1059 ExternalReference address_of_regexp_stack_memory_address =
1060 ExternalReference::address_of_regexp_stack_memory_address(isolate());
1061 ExternalReference address_of_regexp_stack_memory_size =
1062 ExternalReference::address_of_regexp_stack_memory_size(isolate());
1063 __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
1064 __ testp(kScratchRegister, kScratchRegister);
1065 __ j(zero, &runtime);
1066
1067 // Check that the first argument is a JSRegExp object.
1068 __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
1069 __ JumpIfSmi(rax, &runtime);
1070 __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
1071 __ j(not_equal, &runtime);
1072
1073 // Check that the RegExp has been compiled (data contains a fixed array).
1074 __ movp(rax, FieldOperand(rax, JSRegExp::kDataOffset));
1075 if (FLAG_debug_code) {
1076 Condition is_smi = masm->CheckSmi(rax);
1077 __ Check(NegateCondition(is_smi),
1078 kUnexpectedTypeForRegExpDataFixedArrayExpected);
1079 __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
1080 __ Check(equal, kUnexpectedTypeForRegExpDataFixedArrayExpected);
1081 }
1082
1083 // rax: RegExp data (FixedArray)
1084 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
1085 __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
1086 __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
1087 __ j(not_equal, &runtime);
1088
1089 // rax: RegExp data (FixedArray)
1090 // Check that the number of captures fit in the static offsets vector buffer.
1091 __ SmiToInteger32(rdx,
1092 FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
1093 // Check (number_of_captures + 1) * 2 <= offsets vector size
1094 // Or number_of_captures <= offsets vector size / 2 - 1
1095 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
1096 __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
1097 __ j(above, &runtime);
1098
1099 // Reset offset for possibly sliced string.
1100 __ Set(r14, 0);
1101 __ movp(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
1102 __ JumpIfSmi(rdi, &runtime);
1103 __ movp(r15, rdi); // Make a copy of the original subject string.
1104 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1105 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1106 // rax: RegExp data (FixedArray)
1107 // rdi: subject string
1108 // r15: subject string
1109 // Handle subject string according to its encoding and representation:
1110 // (1) Sequential two byte? If yes, go to (9).
1111 // (2) Sequential one byte? If yes, go to (6).
1112 // (3) Anything but sequential or cons? If yes, go to (7).
1113 // (4) Cons string. If the string is flat, replace subject with first string.
1114 // Otherwise bailout.
1115 // (5a) Is subject sequential two byte? If yes, go to (9).
1116 // (5b) Is subject external? If yes, go to (8).
1117 // (6) One byte sequential. Load regexp code for one byte.
1118 // (E) Carry on.
1119 /// [...]
1120
1121 // Deferred code at the end of the stub:
1122 // (7) Not a long external string? If yes, go to (10).
1123 // (8) External string. Make it, offset-wise, look like a sequential string.
1124 // (8a) Is the external string one byte? If yes, go to (6).
1125 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1126 // (10) Short external string or not a string? If yes, bail out to runtime.
1127 // (11) Sliced string. Replace subject with parent. Go to (5a).
1128
1129 Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
1130 external_string /* 8 */, check_underlying /* 5a */,
1131 not_seq_nor_cons /* 7 */, check_code /* E */,
1132 not_long_external /* 10 */;
1133
1134 // (1) Sequential two byte? If yes, go to (9).
1135 __ andb(rbx, Immediate(kIsNotStringMask |
1136 kStringRepresentationMask |
1137 kStringEncodingMask |
1138 kShortExternalStringMask));
1139 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
1140 __ j(zero, &seq_two_byte_string); // Go to (9).
1141
1142 // (2) Sequential one byte? If yes, go to (6).
1143 // Any other sequential string must be one byte.
1144 __ andb(rbx, Immediate(kIsNotStringMask |
1145 kStringRepresentationMask |
1146 kShortExternalStringMask));
1147 __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
1148
1149 // (3) Anything but sequential or cons? If yes, go to (7).
1150 // We check whether the subject string is a cons, since sequential strings
1151 // have already been covered.
1152 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
1153 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
1154 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
1155 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
1156 __ cmpp(rbx, Immediate(kExternalStringTag));
1157 __ j(greater_equal, ¬_seq_nor_cons); // Go to (7).
1158
1159 // (4) Cons string. Check that it's flat.
1160 // Replace subject with first string and reload instance type.
1161 __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
1162 Heap::kempty_stringRootIndex);
1163 __ j(not_equal, &runtime);
1164 __ movp(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
1165 __ bind(&check_underlying);
1166 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1167 __ movp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1168
1169 // (5a) Is subject sequential two byte? If yes, go to (9).
1170 __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
1171 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
1172 __ j(zero, &seq_two_byte_string); // Go to (9).
1173 // (5b) Is subject external? If yes, go to (8).
1174 __ testb(rbx, Immediate(kStringRepresentationMask));
1175 // The underlying external string is never a short external string.
1176 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
1177 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
1178 __ j(not_zero, &external_string); // Go to (8)
1179
1180 // (6) One byte sequential. Load regexp code for one byte.
1181 __ bind(&seq_one_byte_string);
1182 // rax: RegExp data (FixedArray)
1183 __ movp(r11, FieldOperand(rax, JSRegExp::kDataOneByteCodeOffset));
1184 __ Set(rcx, 1); // Type is one byte.
1185
1186 // (E) Carry on. String handling is done.
1187 __ bind(&check_code);
1188 // r11: irregexp code
1189 // Check that the irregexp code has been generated for the actual string
1190 // encoding. If it has, the field contains a code object otherwise it contains
1191 // smi (code flushing support)
1192 __ JumpIfSmi(r11, &runtime);
1193
1194 // rdi: sequential subject string (or look-alike, external string)
1195 // r15: original subject string
1196 // rcx: encoding of subject string (1 if one_byte, 0 if two_byte);
1197 // r11: code
1198 // Load used arguments before starting to push arguments for call to native
1199 // RegExp code to avoid handling changing stack height.
1200 // We have to use r15 instead of rdi to load the length because rdi might
1201 // have been only made to look like a sequential string when it actually
1202 // is an external string.
1203 __ movp(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
1204 __ JumpIfNotSmi(rbx, &runtime);
1205 __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
1206 __ j(above_equal, &runtime);
1207 __ SmiToInteger64(rbx, rbx);
1208
1209 // rdi: subject string
1210 // rbx: previous index
1211 // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
1212 // r11: code
1213 // All checks done. Now push arguments for native regexp code.
1214 Counters* counters = isolate()->counters();
1215 __ IncrementCounter(counters->regexp_entry_native(), 1);
1216
1217 // Isolates: note we add an additional parameter here (isolate pointer).
1218 static const int kRegExpExecuteArguments = 9;
1219 int argument_slots_on_stack =
1220 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
1221 __ EnterApiExitFrame(argument_slots_on_stack);
1222
1223 // Argument 9: Pass current isolate address.
1224 __ LoadAddress(kScratchRegister,
1225 ExternalReference::isolate_address(isolate()));
1226 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
1227 kScratchRegister);
1228
1229 // Argument 8: Indicate that this is a direct call from JavaScript.
1230 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
1231 Immediate(1));
1232
1233 // Argument 7: Start (high end) of backtracking stack memory area.
1234 __ Move(kScratchRegister, address_of_regexp_stack_memory_address);
1235 __ movp(r9, Operand(kScratchRegister, 0));
1236 __ Move(kScratchRegister, address_of_regexp_stack_memory_size);
1237 __ addp(r9, Operand(kScratchRegister, 0));
1238 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
1239
1240 // Argument 6: Set the number of capture registers to zero to force global
1241 // regexps to behave as non-global. This does not affect non-global regexps.
1242 // Argument 6 is passed in r9 on Linux and on the stack on Windows.
1243 #ifdef _WIN64
1244 __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
1245 Immediate(0));
1246 #else
1247 __ Set(r9, 0);
1248 #endif
1249
1250 // Argument 5: static offsets vector buffer.
1251 __ LoadAddress(
1252 r8, ExternalReference::address_of_static_offsets_vector(isolate()));
1253 // Argument 5 passed in r8 on Linux and on the stack on Windows.
1254 #ifdef _WIN64
1255 __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r8);
1256 #endif
1257
1258 // rdi: subject string
1259 // rbx: previous index
1260 // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
1261 // r11: code
1262 // r14: slice offset
1263 // r15: original subject string
1264
1265 // Argument 2: Previous index.
1266 __ movp(arg_reg_2, rbx);
1267
1268 // Argument 4: End of string data
1269 // Argument 3: Start of string data
1270 Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
1271 // Prepare start and end index of the input.
1272 // Load the length from the original sliced string if that is the case.
1273 __ addp(rbx, r14);
1274 __ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
1275 __ addp(r14, arg_reg_3); // Using arg3 as scratch.
1276
1277 // rbx: start index of the input
1278 // r14: end index of the input
1279 // r15: original subject string
1280 __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
1281 __ j(zero, &setup_two_byte, Label::kNear);
1282 __ leap(arg_reg_4,
1283 FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
1284 __ leap(arg_reg_3,
1285 FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
1286 __ jmp(&setup_rest, Label::kNear);
1287 __ bind(&setup_two_byte);
1288 __ leap(arg_reg_4,
1289 FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
1290 __ leap(arg_reg_3,
1291 FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
1292 __ bind(&setup_rest);
1293
1294 // Argument 1: Original subject string.
1295 // The original subject is in the previous stack frame. Therefore we have to
1296 // use rbp, which points exactly to one pointer size below the previous rsp.
1297 // (Because creating a new stack frame pushes the previous rbp onto the stack
1298 // and thereby moves up rsp by one kPointerSize.)
1299 __ movp(arg_reg_1, r15);
1300
1301 // Locate the code entry and call it.
1302 __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
1303 __ call(r11);
1304
1305 __ LeaveApiExitFrame(true);
1306
1307 // Check the result.
1308 Label success;
1309 Label exception;
1310 __ cmpl(rax, Immediate(1));
1311 // We expect exactly one result since we force the called regexp to behave
1312 // as non-global.
1313 __ j(equal, &success, Label::kNear);
1314 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
1315 __ j(equal, &exception);
1316 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
1317 // If none of the above, it can only be retry.
1318 // Handle that in the runtime system.
1319 __ j(not_equal, &runtime);
1320
1321 // For failure return null.
1322 __ LoadRoot(rax, Heap::kNullValueRootIndex);
1323 __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
1324
1325 // Load RegExp data.
1326 __ bind(&success);
1327 __ movp(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
1328 __ movp(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
1329 __ SmiToInteger32(rax,
1330 FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
1331 // Calculate number of capture registers (number_of_captures + 1) * 2.
1332 __ leal(rdx, Operand(rax, rax, times_1, 2));
1333
1334 // rdx: Number of capture registers
1335 // Check that the fourth object is a JSArray object.
1336 __ movp(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
1337 __ JumpIfSmi(r15, &runtime);
1338 __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
1339 __ j(not_equal, &runtime);
1340 // Check that the JSArray is in fast case.
1341 __ movp(rbx, FieldOperand(r15, JSArray::kElementsOffset));
1342 __ movp(rax, FieldOperand(rbx, HeapObject::kMapOffset));
1343 __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
1344 __ j(not_equal, &runtime);
1345 // Check that the last match info has space for the capture registers and the
1346 // additional information. Ensure no overflow in add.
1347 STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
1348 __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
1349 __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
1350 __ cmpl(rdx, rax);
1351 __ j(greater, &runtime);
1352
1353 // rbx: last_match_info backing store (FixedArray)
1354 // rdx: number of capture registers
1355 // Store the capture count.
1356 __ Integer32ToSmi(kScratchRegister, rdx);
1357 __ movp(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
1358 kScratchRegister);
1359 // Store last subject and last input.
1360 __ movp(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
1361 __ movp(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
1362 __ movp(rcx, rax);
1363 __ RecordWriteField(rbx,
1364 RegExpImpl::kLastSubjectOffset,
1365 rax,
1366 rdi,
1367 kDontSaveFPRegs);
1368 __ movp(rax, rcx);
1369 __ movp(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
1370 __ RecordWriteField(rbx,
1371 RegExpImpl::kLastInputOffset,
1372 rax,
1373 rdi,
1374 kDontSaveFPRegs);
1375
1376 // Get the static offsets vector filled by the native regexp code.
1377 __ LoadAddress(
1378 rcx, ExternalReference::address_of_static_offsets_vector(isolate()));
1379
1380 // rbx: last_match_info backing store (FixedArray)
1381 // rcx: offsets vector
1382 // rdx: number of capture registers
1383 Label next_capture, done;
1384 // Capture register counter starts from number of capture registers and
1385 // counts down until wraping after zero.
1386 __ bind(&next_capture);
1387 __ subp(rdx, Immediate(1));
1388 __ j(negative, &done, Label::kNear);
1389 // Read the value from the static offsets vector buffer and make it a smi.
1390 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
1391 __ Integer32ToSmi(rdi, rdi);
1392 // Store the smi value in the last match info.
1393 __ movp(FieldOperand(rbx,
1394 rdx,
1395 times_pointer_size,
1396 RegExpImpl::kFirstCaptureOffset),
1397 rdi);
1398 __ jmp(&next_capture);
1399 __ bind(&done);
1400
1401 // Return last match info.
1402 __ movp(rax, r15);
1403 __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
1404
1405 __ bind(&exception);
1406 // Result must now be exception. If there is no pending exception already a
1407 // stack overflow (on the backtrack stack) was detected in RegExp code but
1408 // haven't created the exception yet. Handle that in the runtime system.
1409 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
1410 ExternalReference pending_exception_address(
1411 Isolate::kPendingExceptionAddress, isolate());
1412 Operand pending_exception_operand =
1413 masm->ExternalOperand(pending_exception_address, rbx);
1414 __ movp(rax, pending_exception_operand);
1415 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
1416 __ cmpp(rax, rdx);
1417 __ j(equal, &runtime);
1418
1419 // For exception, throw the exception again.
1420 __ TailCallRuntime(Runtime::kRegExpExecReThrow);
1421
1422 // Do the runtime call to execute the regexp.
1423 __ bind(&runtime);
1424 __ TailCallRuntime(Runtime::kRegExpExec);
1425
1426 // Deferred code for string handling.
1427 // (7) Not a long external string? If yes, go to (10).
1428 __ bind(¬_seq_nor_cons);
1429 // Compare flags are still set from (3).
1430 __ j(greater, ¬_long_external, Label::kNear); // Go to (10).
1431
1432 // (8) External string. Short external strings have been ruled out.
1433 __ bind(&external_string);
1434 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
1435 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
1436 if (FLAG_debug_code) {
1437 // Assert that we do not have a cons or slice (indirect strings) here.
1438 // Sequential strings have already been ruled out.
1439 __ testb(rbx, Immediate(kIsIndirectStringMask));
1440 __ Assert(zero, kExternalStringExpectedButNotFound);
1441 }
1442 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
1443 // Move the pointer so that offset-wise, it looks like a sequential string.
1444 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
1445 __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
1446 STATIC_ASSERT(kTwoByteStringTag == 0);
1447 // (8a) Is the external string one byte? If yes, go to (6).
1448 __ testb(rbx, Immediate(kStringEncodingMask));
1449 __ j(not_zero, &seq_one_byte_string); // Goto (6).
1450
1451 // rdi: subject string (flat two-byte)
1452 // rax: RegExp data (FixedArray)
1453 // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
1454 __ bind(&seq_two_byte_string);
1455 __ movp(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
1456 __ Set(rcx, 0); // Type is two byte.
1457 __ jmp(&check_code); // Go to (E).
1458
1459 // (10) Not a string or a short external string? If yes, bail out to runtime.
1460 __ bind(¬_long_external);
1461 // Catch non-string subject or short external string.
1462 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
1463 __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
1464 __ j(not_zero, &runtime);
1465
1466 // (11) Sliced string. Replace subject with parent. Go to (5a).
1467 // Load offset into r14 and replace subject string with parent.
1468 __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
1469 __ movp(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
1470 __ jmp(&check_underlying);
1471 #endif // V8_INTERPRETED_REGEXP
1472 }
1473
1474
NegativeComparisonResult(Condition cc)1475 static int NegativeComparisonResult(Condition cc) {
1476 DCHECK(cc != equal);
1477 DCHECK((cc == less) || (cc == less_equal)
1478 || (cc == greater) || (cc == greater_equal));
1479 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
1480 }
1481
1482
CheckInputType(MacroAssembler * masm,Register input,CompareICState::State expected,Label * fail)1483 static void CheckInputType(MacroAssembler* masm, Register input,
1484 CompareICState::State expected, Label* fail) {
1485 Label ok;
1486 if (expected == CompareICState::SMI) {
1487 __ JumpIfNotSmi(input, fail);
1488 } else if (expected == CompareICState::NUMBER) {
1489 __ JumpIfSmi(input, &ok);
1490 __ CompareMap(input, masm->isolate()->factory()->heap_number_map());
1491 __ j(not_equal, fail);
1492 }
1493 // We could be strict about internalized/non-internalized here, but as long as
1494 // hydrogen doesn't care, the stub doesn't have to care either.
1495 __ bind(&ok);
1496 }
1497
1498
BranchIfNotInternalizedString(MacroAssembler * masm,Label * label,Register object,Register scratch)1499 static void BranchIfNotInternalizedString(MacroAssembler* masm,
1500 Label* label,
1501 Register object,
1502 Register scratch) {
1503 __ JumpIfSmi(object, label);
1504 __ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
1505 __ movzxbp(scratch,
1506 FieldOperand(scratch, Map::kInstanceTypeOffset));
1507 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
1508 __ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
1509 __ j(not_zero, label);
1510 }
1511
1512
GenerateGeneric(MacroAssembler * masm)1513 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
1514 Label runtime_call, check_unequal_objects, done;
1515 Condition cc = GetCondition();
1516 Factory* factory = isolate()->factory();
1517
1518 Label miss;
1519 CheckInputType(masm, rdx, left(), &miss);
1520 CheckInputType(masm, rax, right(), &miss);
1521
1522 // Compare two smis.
1523 Label non_smi, smi_done;
1524 __ JumpIfNotBothSmi(rax, rdx, &non_smi);
1525 __ subp(rdx, rax);
1526 __ j(no_overflow, &smi_done);
1527 __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
1528 __ bind(&smi_done);
1529 __ movp(rax, rdx);
1530 __ ret(0);
1531 __ bind(&non_smi);
1532
1533 // The compare stub returns a positive, negative, or zero 64-bit integer
1534 // value in rax, corresponding to result of comparing the two inputs.
1535 // NOTICE! This code is only reached after a smi-fast-case check, so
1536 // it is certain that at least one operand isn't a smi.
1537
1538 // Two identical objects are equal unless they are both NaN or undefined.
1539 {
1540 Label not_identical;
1541 __ cmpp(rax, rdx);
1542 __ j(not_equal, ¬_identical, Label::kNear);
1543
1544 if (cc != equal) {
1545 // Check for undefined. undefined OP undefined is false even though
1546 // undefined == undefined.
1547 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1548 if (is_strong(strength())) {
1549 // In strong mode, this comparison must throw, so call the runtime.
1550 __ j(equal, &runtime_call, Label::kFar);
1551 } else {
1552 Label check_for_nan;
1553 __ j(not_equal, &check_for_nan, Label::kNear);
1554 __ Set(rax, NegativeComparisonResult(cc));
1555 __ ret(0);
1556 __ bind(&check_for_nan);
1557 }
1558 }
1559
1560 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
1561 // so we do the second best thing - test it ourselves.
1562 Label heap_number;
1563 // If it's not a heap number, then return equal for (in)equality operator.
1564 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
1565 factory->heap_number_map());
1566 __ j(equal, &heap_number, Label::kNear);
1567 if (cc != equal) {
1568 __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
1569 __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
1570 // Call runtime on identical objects. Otherwise return equal.
1571 __ cmpb(rcx, Immediate(static_cast<uint8_t>(FIRST_JS_RECEIVER_TYPE)));
1572 __ j(above_equal, &runtime_call, Label::kFar);
1573 // Call runtime on identical symbols since we need to throw a TypeError.
1574 __ cmpb(rcx, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
1575 __ j(equal, &runtime_call, Label::kFar);
1576 // Call runtime on identical SIMD values since we must throw a TypeError.
1577 __ cmpb(rcx, Immediate(static_cast<uint8_t>(SIMD128_VALUE_TYPE)));
1578 __ j(equal, &runtime_call, Label::kFar);
1579 if (is_strong(strength())) {
1580 // We have already tested for smis and heap numbers, so if both
1581 // arguments are not strings we must proceed to the slow case.
1582 __ testb(rcx, Immediate(kIsNotStringMask));
1583 __ j(not_zero, &runtime_call, Label::kFar);
1584 }
1585 }
1586 __ Set(rax, EQUAL);
1587 __ ret(0);
1588
1589 __ bind(&heap_number);
1590 // It is a heap number, so return equal if it's not NaN.
1591 // For NaN, return 1 for every condition except greater and
1592 // greater-equal. Return -1 for them, so the comparison yields
1593 // false for all conditions except not-equal.
1594 __ Set(rax, EQUAL);
1595 __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1596 __ Ucomisd(xmm0, xmm0);
1597 __ setcc(parity_even, rax);
1598 // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
1599 if (cc == greater_equal || cc == greater) {
1600 __ negp(rax);
1601 }
1602 __ ret(0);
1603
1604 __ bind(¬_identical);
1605 }
1606
1607 if (cc == equal) { // Both strict and non-strict.
1608 Label slow; // Fallthrough label.
1609
1610 // If we're doing a strict equality comparison, we don't have to do
1611 // type conversion, so we generate code to do fast comparison for objects
1612 // and oddballs. Non-smi numbers and strings still go through the usual
1613 // slow-case code.
1614 if (strict()) {
1615 // If either is a Smi (we know that not both are), then they can only
1616 // be equal if the other is a HeapNumber. If so, use the slow case.
1617 {
1618 Label not_smis;
1619 __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
1620
1621 // Check if the non-smi operand is a heap number.
1622 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
1623 factory->heap_number_map());
1624 // If heap number, handle it in the slow case.
1625 __ j(equal, &slow);
1626 // Return non-equal. ebx (the lower half of rbx) is not zero.
1627 __ movp(rax, rbx);
1628 __ ret(0);
1629
1630 __ bind(¬_smis);
1631 }
1632
1633 // If either operand is a JSObject or an oddball value, then they are not
1634 // equal since their pointers are different
1635 // There is no test for undetectability in strict equality.
1636
1637 // If the first object is a JS object, we have done pointer comparison.
1638 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1639 Label first_non_object;
1640 __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
1641 __ j(below, &first_non_object, Label::kNear);
1642 // Return non-zero (rax (not rax) is not zero)
1643 Label return_not_equal;
1644 STATIC_ASSERT(kHeapObjectTag != 0);
1645 __ bind(&return_not_equal);
1646 __ ret(0);
1647
1648 __ bind(&first_non_object);
1649 // Check for oddballs: true, false, null, undefined.
1650 __ CmpInstanceType(rcx, ODDBALL_TYPE);
1651 __ j(equal, &return_not_equal);
1652
1653 __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
1654 __ j(above_equal, &return_not_equal);
1655
1656 // Check for oddballs: true, false, null, undefined.
1657 __ CmpInstanceType(rcx, ODDBALL_TYPE);
1658 __ j(equal, &return_not_equal);
1659
1660 // Fall through to the general case.
1661 }
1662 __ bind(&slow);
1663 }
1664
1665 // Generate the number comparison code.
1666 Label non_number_comparison;
1667 Label unordered;
1668 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
1669 __ xorl(rax, rax);
1670 __ xorl(rcx, rcx);
1671 __ Ucomisd(xmm0, xmm1);
1672
1673 // Don't base result on EFLAGS when a NaN is involved.
1674 __ j(parity_even, &unordered, Label::kNear);
1675 // Return a result of -1, 0, or 1, based on EFLAGS.
1676 __ setcc(above, rax);
1677 __ setcc(below, rcx);
1678 __ subp(rax, rcx);
1679 __ ret(0);
1680
1681 // If one of the numbers was NaN, then the result is always false.
1682 // The cc is never not-equal.
1683 __ bind(&unordered);
1684 DCHECK(cc != not_equal);
1685 if (cc == less || cc == less_equal) {
1686 __ Set(rax, 1);
1687 } else {
1688 __ Set(rax, -1);
1689 }
1690 __ ret(0);
1691
1692 // The number comparison code did not provide a valid result.
1693 __ bind(&non_number_comparison);
1694
1695 // Fast negative check for internalized-to-internalized equality.
1696 Label check_for_strings;
1697 if (cc == equal) {
1698 BranchIfNotInternalizedString(
1699 masm, &check_for_strings, rax, kScratchRegister);
1700 BranchIfNotInternalizedString(
1701 masm, &check_for_strings, rdx, kScratchRegister);
1702
1703 // We've already checked for object identity, so if both operands are
1704 // internalized strings they aren't equal. Register rax (not rax) already
1705 // holds a non-zero value, which indicates not equal, so just return.
1706 __ ret(0);
1707 }
1708
1709 __ bind(&check_for_strings);
1710
1711 __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx,
1712 &check_unequal_objects);
1713
1714 // Inline comparison of one-byte strings.
1715 if (cc == equal) {
1716 StringHelper::GenerateFlatOneByteStringEquals(masm, rdx, rax, rcx, rbx);
1717 } else {
1718 StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx,
1719 rdi, r8);
1720 }
1721
1722 #ifdef DEBUG
1723 __ Abort(kUnexpectedFallThroughFromStringComparison);
1724 #endif
1725
1726 __ bind(&check_unequal_objects);
1727 if (cc == equal && !strict()) {
1728 // Not strict equality. Objects are unequal if
1729 // they are both JSObjects and not undetectable,
1730 // and their pointers are different.
1731 Label return_unequal;
1732 // At most one is a smi, so we can test for smi by adding the two.
1733 // A smi plus a heap object has the low bit set, a heap object plus
1734 // a heap object has the low bit clear.
1735 STATIC_ASSERT(kSmiTag == 0);
1736 STATIC_ASSERT(kSmiTagMask == 1);
1737 __ leap(rcx, Operand(rax, rdx, times_1, 0));
1738 __ testb(rcx, Immediate(kSmiTagMask));
1739 __ j(not_zero, &runtime_call, Label::kNear);
1740 __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rbx);
1741 __ j(below, &runtime_call, Label::kNear);
1742 __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
1743 __ j(below, &runtime_call, Label::kNear);
1744 __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
1745 Immediate(1 << Map::kIsUndetectable));
1746 __ j(zero, &return_unequal, Label::kNear);
1747 __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
1748 Immediate(1 << Map::kIsUndetectable));
1749 __ j(zero, &return_unequal, Label::kNear);
1750 // The objects are both undetectable, so they both compare as the value
1751 // undefined, and are equal.
1752 __ Set(rax, EQUAL);
1753 __ bind(&return_unequal);
1754 // Return non-equal by returning the non-zero object pointer in rax,
1755 // or return equal if we fell through to here.
1756 __ ret(0);
1757 }
1758 __ bind(&runtime_call);
1759
1760 // Push arguments below the return address to prepare jump to builtin.
1761 __ PopReturnAddressTo(rcx);
1762 __ Push(rdx);
1763 __ Push(rax);
1764
1765 // Figure out which native to call and setup the arguments.
1766 if (cc == equal) {
1767 __ PushReturnAddressFrom(rcx);
1768 __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals);
1769 } else {
1770 __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
1771 __ PushReturnAddressFrom(rcx);
1772 __ TailCallRuntime(is_strong(strength()) ? Runtime::kCompare_Strong
1773 : Runtime::kCompare);
1774 }
1775
1776 __ bind(&miss);
1777 GenerateMiss(masm);
1778 }
1779
1780
CallStubInRecordCallTarget(MacroAssembler * masm,CodeStub * stub)1781 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
1782 // rax : number of arguments to the construct function
1783 // rbx : feedback vector
1784 // rdx : slot in feedback vector (Smi)
1785 // rdi : the function to call
1786 FrameScope scope(masm, StackFrame::INTERNAL);
1787
1788 // Number-of-arguments register must be smi-tagged to call out.
1789 __ Integer32ToSmi(rax, rax);
1790 __ Push(rax);
1791 __ Push(rdi);
1792 __ Integer32ToSmi(rdx, rdx);
1793 __ Push(rdx);
1794 __ Push(rbx);
1795
1796 __ CallStub(stub);
1797
1798 __ Pop(rbx);
1799 __ Pop(rdx);
1800 __ Pop(rdi);
1801 __ Pop(rax);
1802 __ SmiToInteger32(rax, rax);
1803 }
1804
1805
GenerateRecordCallTarget(MacroAssembler * masm)1806 static void GenerateRecordCallTarget(MacroAssembler* masm) {
1807 // Cache the called function in a feedback vector slot. Cache states
1808 // are uninitialized, monomorphic (indicated by a JSFunction), and
1809 // megamorphic.
1810 // rax : number of arguments to the construct function
1811 // rbx : feedback vector
1812 // rdx : slot in feedback vector (Smi)
1813 // rdi : the function to call
1814 Isolate* isolate = masm->isolate();
1815 Label initialize, done, miss, megamorphic, not_array_function,
1816 done_no_smi_convert;
1817
1818 // Load the cache state into r11.
1819 __ SmiToInteger32(rdx, rdx);
1820 __ movp(r11,
1821 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
1822
1823 // A monomorphic cache hit or an already megamorphic state: invoke the
1824 // function without changing the state.
1825 // We don't know if r11 is a WeakCell or a Symbol, but it's harmless to read
1826 // at this position in a symbol (see static asserts in
1827 // type-feedback-vector.h).
1828 Label check_allocation_site;
1829 __ cmpp(rdi, FieldOperand(r11, WeakCell::kValueOffset));
1830 __ j(equal, &done, Label::kFar);
1831 __ CompareRoot(r11, Heap::kmegamorphic_symbolRootIndex);
1832 __ j(equal, &done, Label::kFar);
1833 __ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
1834 Heap::kWeakCellMapRootIndex);
1835 __ j(not_equal, &check_allocation_site);
1836
1837 // If the weak cell is cleared, we have a new chance to become monomorphic.
1838 __ CheckSmi(FieldOperand(r11, WeakCell::kValueOffset));
1839 __ j(equal, &initialize);
1840 __ jmp(&megamorphic);
1841
1842 __ bind(&check_allocation_site);
1843 // If we came here, we need to see if we are the array function.
1844 // If we didn't have a matching function, and we didn't find the megamorph
1845 // sentinel, then we have in the slot either some other function or an
1846 // AllocationSite.
1847 __ CompareRoot(FieldOperand(r11, 0), Heap::kAllocationSiteMapRootIndex);
1848 __ j(not_equal, &miss);
1849
1850 // Make sure the function is the Array() function
1851 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
1852 __ cmpp(rdi, r11);
1853 __ j(not_equal, &megamorphic);
1854 __ jmp(&done);
1855
1856 __ bind(&miss);
1857
1858 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
1859 // megamorphic.
1860 __ CompareRoot(r11, Heap::kuninitialized_symbolRootIndex);
1861 __ j(equal, &initialize);
1862 // MegamorphicSentinel is an immortal immovable object (undefined) so no
1863 // write-barrier is needed.
1864 __ bind(&megamorphic);
1865 __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
1866 TypeFeedbackVector::MegamorphicSentinel(isolate));
1867 __ jmp(&done);
1868
1869 // An uninitialized cache is patched with the function or sentinel to
1870 // indicate the ElementsKind if function is the Array constructor.
1871 __ bind(&initialize);
1872
1873 // Make sure the function is the Array() function
1874 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r11);
1875 __ cmpp(rdi, r11);
1876 __ j(not_equal, ¬_array_function);
1877
1878 CreateAllocationSiteStub create_stub(isolate);
1879 CallStubInRecordCallTarget(masm, &create_stub);
1880 __ jmp(&done_no_smi_convert);
1881
1882 __ bind(¬_array_function);
1883 CreateWeakCellStub weak_cell_stub(isolate);
1884 CallStubInRecordCallTarget(masm, &weak_cell_stub);
1885 __ jmp(&done_no_smi_convert);
1886
1887 __ bind(&done);
1888 __ Integer32ToSmi(rdx, rdx);
1889
1890 __ bind(&done_no_smi_convert);
1891 }
1892
1893
Generate(MacroAssembler * masm)1894 void CallConstructStub::Generate(MacroAssembler* masm) {
1895 // rax : number of arguments
1896 // rbx : feedback vector
1897 // rdx : slot in feedback vector (Smi)
1898 // rdi : constructor function
1899
1900 Label non_function;
1901 // Check that the constructor is not a smi.
1902 __ JumpIfSmi(rdi, &non_function);
1903 // Check that constructor is a JSFunction.
1904 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, r11);
1905 __ j(not_equal, &non_function);
1906
1907 GenerateRecordCallTarget(masm);
1908
1909 __ SmiToInteger32(rdx, rdx);
1910 Label feedback_register_initialized;
1911 // Put the AllocationSite from the feedback vector into rbx, or undefined.
1912 __ movp(rbx,
1913 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
1914 __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
1915 __ j(equal, &feedback_register_initialized, Label::kNear);
1916 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
1917 __ bind(&feedback_register_initialized);
1918
1919 __ AssertUndefinedOrAllocationSite(rbx);
1920
1921 // Pass new target to construct stub.
1922 __ movp(rdx, rdi);
1923
1924 // Tail call to the function-specific construct stub (still in the caller
1925 // context at this point).
1926 __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
1927 __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
1928 __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
1929 __ jmp(rcx);
1930
1931 __ bind(&non_function);
1932 __ movp(rdx, rdi);
1933 __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
1934 }
1935
1936
HandleArrayCase(MacroAssembler * masm,Label * miss)1937 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
1938 // rdi - function
1939 // rdx - slot id
1940 // rbx - vector
1941 // rcx - allocation site (loaded from vector[slot]).
1942 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8);
1943 __ cmpp(rdi, r8);
1944 __ j(not_equal, miss);
1945
1946 __ movp(rax, Immediate(arg_count()));
1947
1948 // Increment the call count for monomorphic function calls.
1949 __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
1950 FixedArray::kHeaderSize + kPointerSize),
1951 Smi::FromInt(CallICNexus::kCallCountIncrement));
1952
1953 __ movp(rbx, rcx);
1954 __ movp(rdx, rdi);
1955 ArrayConstructorStub stub(masm->isolate(), arg_count());
1956 __ TailCallStub(&stub);
1957 }
1958
1959
Generate(MacroAssembler * masm)1960 void CallICStub::Generate(MacroAssembler* masm) {
1961 // ----------- S t a t e -------------
1962 // -- rdi - function
1963 // -- rdx - slot id
1964 // -- rbx - vector
1965 // -----------------------------------
1966 Isolate* isolate = masm->isolate();
1967 Label extra_checks_or_miss, call, call_function;
1968 int argc = arg_count();
1969 StackArgumentsAccessor args(rsp, argc);
1970 ParameterCount actual(argc);
1971
1972 // The checks. First, does rdi match the recorded monomorphic target?
1973 __ SmiToInteger32(rdx, rdx);
1974 __ movp(rcx,
1975 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
1976
1977 // We don't know that we have a weak cell. We might have a private symbol
1978 // or an AllocationSite, but the memory is safe to examine.
1979 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
1980 // FixedArray.
1981 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
1982 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
1983 // computed, meaning that it can't appear to be a pointer. If the low bit is
1984 // 0, then hash is computed, but the 0 bit prevents the field from appearing
1985 // to be a pointer.
1986 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
1987 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
1988 WeakCell::kValueOffset &&
1989 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
1990
1991 __ cmpp(rdi, FieldOperand(rcx, WeakCell::kValueOffset));
1992 __ j(not_equal, &extra_checks_or_miss);
1993
1994 // The compare above could have been a SMI/SMI comparison. Guard against this
1995 // convincing us that we have a monomorphic JSFunction.
1996 __ JumpIfSmi(rdi, &extra_checks_or_miss);
1997
1998 // Increment the call count for monomorphic function calls.
1999 __ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
2000 FixedArray::kHeaderSize + kPointerSize),
2001 Smi::FromInt(CallICNexus::kCallCountIncrement));
2002
2003 __ bind(&call_function);
2004 __ Set(rax, argc);
2005 __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode()),
2006 RelocInfo::CODE_TARGET);
2007
2008 __ bind(&extra_checks_or_miss);
2009 Label uninitialized, miss, not_allocation_site;
2010
2011 __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
2012 __ j(equal, &call);
2013
2014 // Check if we have an allocation site.
2015 __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
2016 Heap::kAllocationSiteMapRootIndex);
2017 __ j(not_equal, ¬_allocation_site);
2018
2019 // We have an allocation site.
2020 HandleArrayCase(masm, &miss);
2021
2022 __ bind(¬_allocation_site);
2023
2024 // The following cases attempt to handle MISS cases without going to the
2025 // runtime.
2026 if (FLAG_trace_ic) {
2027 __ jmp(&miss);
2028 }
2029
2030 __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
2031 __ j(equal, &uninitialized);
2032
2033 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2034 // to handle it here. More complex cases are dealt with in the runtime.
2035 __ AssertNotSmi(rcx);
2036 __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
2037 __ j(not_equal, &miss);
2038 __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
2039 TypeFeedbackVector::MegamorphicSentinel(isolate));
2040
2041 __ bind(&call);
2042 __ Set(rax, argc);
2043 __ Jump(masm->isolate()->builtins()->Call(convert_mode()),
2044 RelocInfo::CODE_TARGET);
2045
2046 __ bind(&uninitialized);
2047
2048 // We are going monomorphic, provided we actually have a JSFunction.
2049 __ JumpIfSmi(rdi, &miss);
2050
2051 // Goto miss case if we do not have a function.
2052 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
2053 __ j(not_equal, &miss);
2054
2055 // Make sure the function is not the Array() function, which requires special
2056 // behavior on MISS.
2057 __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, rcx);
2058 __ cmpp(rdi, rcx);
2059 __ j(equal, &miss);
2060
2061 // Make sure the function belongs to the same native context.
2062 __ movp(rcx, FieldOperand(rdi, JSFunction::kContextOffset));
2063 __ movp(rcx, ContextOperand(rcx, Context::NATIVE_CONTEXT_INDEX));
2064 __ cmpp(rcx, NativeContextOperand());
2065 __ j(not_equal, &miss);
2066
2067 // Initialize the call counter.
2068 __ Move(FieldOperand(rbx, rdx, times_pointer_size,
2069 FixedArray::kHeaderSize + kPointerSize),
2070 Smi::FromInt(CallICNexus::kCallCountIncrement));
2071
2072 // Store the function. Use a stub since we need a frame for allocation.
2073 // rbx - vector
2074 // rdx - slot (needs to be in smi form)
2075 // rdi - function
2076 {
2077 FrameScope scope(masm, StackFrame::INTERNAL);
2078 CreateWeakCellStub create_stub(isolate);
2079
2080 __ Integer32ToSmi(rdx, rdx);
2081 __ Push(rdi);
2082 __ CallStub(&create_stub);
2083 __ Pop(rdi);
2084 }
2085
2086 __ jmp(&call_function);
2087
2088 // We are here because tracing is on or we encountered a MISS case we can't
2089 // handle here.
2090 __ bind(&miss);
2091 GenerateMiss(masm);
2092
2093 __ jmp(&call);
2094
2095 // Unreachable
2096 __ int3();
2097 }
2098
2099
GenerateMiss(MacroAssembler * masm)2100 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2101 FrameScope scope(masm, StackFrame::INTERNAL);
2102
2103 // Push the receiver and the function and feedback info.
2104 __ Push(rdi);
2105 __ Push(rbx);
2106 __ Integer32ToSmi(rdx, rdx);
2107 __ Push(rdx);
2108
2109 // Call the entry.
2110 __ CallRuntime(Runtime::kCallIC_Miss);
2111
2112 // Move result to edi and exit the internal frame.
2113 __ movp(rdi, rax);
2114 }
2115
2116
NeedsImmovableCode()2117 bool CEntryStub::NeedsImmovableCode() {
2118 return false;
2119 }
2120
2121
GenerateStubsAheadOfTime(Isolate * isolate)2122 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
2123 CEntryStub::GenerateAheadOfTime(isolate);
2124 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
2125 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
2126 // It is important that the store buffer overflow stubs are generated first.
2127 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
2128 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2129 CreateWeakCellStub::GenerateAheadOfTime(isolate);
2130 BinaryOpICStub::GenerateAheadOfTime(isolate);
2131 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
2132 StoreFastElementStub::GenerateAheadOfTime(isolate);
2133 TypeofStub::GenerateAheadOfTime(isolate);
2134 }
2135
2136
GenerateFPStubs(Isolate * isolate)2137 void CodeStub::GenerateFPStubs(Isolate* isolate) {
2138 }
2139
2140
GenerateAheadOfTime(Isolate * isolate)2141 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
2142 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
2143 stub.GetCode();
2144 CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
2145 save_doubles.GetCode();
2146 }
2147
2148
Generate(MacroAssembler * masm)2149 void CEntryStub::Generate(MacroAssembler* masm) {
2150 // rax: number of arguments including receiver
2151 // rbx: pointer to C function (C callee-saved)
2152 // rbp: frame pointer of calling JS frame (restored after C call)
2153 // rsp: stack pointer (restored after C call)
2154 // rsi: current context (restored)
2155 //
2156 // If argv_in_register():
2157 // r15: pointer to the first argument
2158
2159 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2160
2161 // Enter the exit frame that transitions from JavaScript to C++.
2162 #ifdef _WIN64
2163 int arg_stack_space = (result_size() < 2 ? 2 : 4);
2164 #else // _WIN64
2165 int arg_stack_space = 0;
2166 #endif // _WIN64
2167 if (argv_in_register()) {
2168 DCHECK(!save_doubles());
2169 __ EnterApiExitFrame(arg_stack_space);
2170 // Move argc into r14 (argv is already in r15).
2171 __ movp(r14, rax);
2172 } else {
2173 __ EnterExitFrame(arg_stack_space, save_doubles());
2174 }
2175
2176 // rbx: pointer to builtin function (C callee-saved).
2177 // rbp: frame pointer of exit frame (restored after C call).
2178 // rsp: stack pointer (restored after C call).
2179 // r14: number of arguments including receiver (C callee-saved).
2180 // r15: argv pointer (C callee-saved).
2181
2182 // Simple results returned in rax (both AMD64 and Win64 calling conventions).
2183 // Complex results must be written to address passed as first argument.
2184 // AMD64 calling convention: a struct of two pointers in rax+rdx
2185
2186 // Check stack alignment.
2187 if (FLAG_debug_code) {
2188 __ CheckStackAlignment();
2189 }
2190
2191 // Call C function.
2192 #ifdef _WIN64
2193 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
2194 // Pass argv and argc as two parameters. The arguments object will
2195 // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
2196 if (result_size() < 2) {
2197 // Pass a pointer to the Arguments object as the first argument.
2198 // Return result in single register (rax).
2199 __ movp(rcx, r14); // argc.
2200 __ movp(rdx, r15); // argv.
2201 __ Move(r8, ExternalReference::isolate_address(isolate()));
2202 } else {
2203 DCHECK_EQ(2, result_size());
2204 // Pass a pointer to the result location as the first argument.
2205 __ leap(rcx, StackSpaceOperand(2));
2206 // Pass a pointer to the Arguments object as the second argument.
2207 __ movp(rdx, r14); // argc.
2208 __ movp(r8, r15); // argv.
2209 __ Move(r9, ExternalReference::isolate_address(isolate()));
2210 }
2211
2212 #else // _WIN64
2213 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
2214 __ movp(rdi, r14); // argc.
2215 __ movp(rsi, r15); // argv.
2216 __ Move(rdx, ExternalReference::isolate_address(isolate()));
2217 #endif // _WIN64
2218 __ call(rbx);
2219 // Result is in rax - do not destroy this register!
2220
2221 #ifdef _WIN64
2222 // If return value is on the stack, pop it to registers.
2223 if (result_size() > 1) {
2224 DCHECK_EQ(2, result_size());
2225 // Read result values stored on stack. Result is stored
2226 // above the four argument mirror slots and the two
2227 // Arguments object slots.
2228 __ movq(rax, Operand(rsp, 6 * kRegisterSize));
2229 __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
2230 }
2231 #endif // _WIN64
2232
2233 // Check result for exception sentinel.
2234 Label exception_returned;
2235 __ CompareRoot(rax, Heap::kExceptionRootIndex);
2236 __ j(equal, &exception_returned);
2237
2238 // Check that there is no pending exception, otherwise we
2239 // should have returned the exception sentinel.
2240 if (FLAG_debug_code) {
2241 Label okay;
2242 __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
2243 ExternalReference pending_exception_address(
2244 Isolate::kPendingExceptionAddress, isolate());
2245 Operand pending_exception_operand =
2246 masm->ExternalOperand(pending_exception_address);
2247 __ cmpp(r14, pending_exception_operand);
2248 __ j(equal, &okay, Label::kNear);
2249 __ int3();
2250 __ bind(&okay);
2251 }
2252
2253 // Exit the JavaScript to C++ exit frame.
2254 __ LeaveExitFrame(save_doubles(), !argv_in_register());
2255 __ ret(0);
2256
2257 // Handling of exception.
2258 __ bind(&exception_returned);
2259
2260 ExternalReference pending_handler_context_address(
2261 Isolate::kPendingHandlerContextAddress, isolate());
2262 ExternalReference pending_handler_code_address(
2263 Isolate::kPendingHandlerCodeAddress, isolate());
2264 ExternalReference pending_handler_offset_address(
2265 Isolate::kPendingHandlerOffsetAddress, isolate());
2266 ExternalReference pending_handler_fp_address(
2267 Isolate::kPendingHandlerFPAddress, isolate());
2268 ExternalReference pending_handler_sp_address(
2269 Isolate::kPendingHandlerSPAddress, isolate());
2270
2271 // Ask the runtime for help to determine the handler. This will set rax to
2272 // contain the current pending exception, don't clobber it.
2273 ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
2274 isolate());
2275 {
2276 FrameScope scope(masm, StackFrame::MANUAL);
2277 __ movp(arg_reg_1, Immediate(0)); // argc.
2278 __ movp(arg_reg_2, Immediate(0)); // argv.
2279 __ Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
2280 __ PrepareCallCFunction(3);
2281 __ CallCFunction(find_handler, 3);
2282 }
2283
2284 // Retrieve the handler context, SP and FP.
2285 __ movp(rsi, masm->ExternalOperand(pending_handler_context_address));
2286 __ movp(rsp, masm->ExternalOperand(pending_handler_sp_address));
2287 __ movp(rbp, masm->ExternalOperand(pending_handler_fp_address));
2288
2289 // If the handler is a JS frame, restore the context to the frame. Note that
2290 // the context will be set to (rsi == 0) for non-JS frames.
2291 Label skip;
2292 __ testp(rsi, rsi);
2293 __ j(zero, &skip, Label::kNear);
2294 __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2295 __ bind(&skip);
2296
2297 // Compute the handler entry address and jump to it.
2298 __ movp(rdi, masm->ExternalOperand(pending_handler_code_address));
2299 __ movp(rdx, masm->ExternalOperand(pending_handler_offset_address));
2300 __ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
2301 __ jmp(rdi);
2302 }
2303
2304
Generate(MacroAssembler * masm)2305 void JSEntryStub::Generate(MacroAssembler* masm) {
2306 Label invoke, handler_entry, exit;
2307 Label not_outermost_js, not_outermost_js_2;
2308
2309 ProfileEntryHookStub::MaybeCallEntryHook(masm);
2310
2311 { // NOLINT. Scope block confuses linter.
2312 MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
2313 // Set up frame.
2314 __ pushq(rbp);
2315 __ movp(rbp, rsp);
2316
2317 // Push the stack frame type marker twice.
2318 int marker = type();
2319 // Scratch register is neither callee-save, nor an argument register on any
2320 // platform. It's free to use at this point.
2321 // Cannot use smi-register for loading yet.
2322 __ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
2323 __ Push(kScratchRegister); // context slot
2324 __ Push(kScratchRegister); // function slot
2325 // Save callee-saved registers (X64/X32/Win64 calling conventions).
2326 __ pushq(r12);
2327 __ pushq(r13);
2328 __ pushq(r14);
2329 __ pushq(r15);
2330 #ifdef _WIN64
2331 __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2332 __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
2333 #endif
2334 __ pushq(rbx);
2335
2336 #ifdef _WIN64
2337 // On Win64 XMM6-XMM15 are callee-save
2338 __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2339 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
2340 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
2341 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
2342 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9);
2343 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10);
2344 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11);
2345 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6), xmm12);
2346 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7), xmm13);
2347 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8), xmm14);
2348 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9), xmm15);
2349 #endif
2350
2351 // Set up the roots and smi constant registers.
2352 // Needs to be done before any further smi loads.
2353 __ InitializeRootRegister();
2354 }
2355
2356 // Save copies of the top frame descriptor on the stack.
2357 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate());
2358 {
2359 Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
2360 __ Push(c_entry_fp_operand);
2361 }
2362
2363 // If this is the outermost JS call, set js_entry_sp value.
2364 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
2365 __ Load(rax, js_entry_sp);
2366 __ testp(rax, rax);
2367 __ j(not_zero, ¬_outermost_js);
2368 __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
2369 __ movp(rax, rbp);
2370 __ Store(js_entry_sp, rax);
2371 Label cont;
2372 __ jmp(&cont);
2373 __ bind(¬_outermost_js);
2374 __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
2375 __ bind(&cont);
2376
2377 // Jump to a faked try block that does the invoke, with a faked catch
2378 // block that sets the pending exception.
2379 __ jmp(&invoke);
2380 __ bind(&handler_entry);
2381 handler_offset_ = handler_entry.pos();
2382 // Caught exception: Store result (exception) in the pending exception
2383 // field in the JSEnv and return a failure sentinel.
2384 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
2385 isolate());
2386 __ Store(pending_exception, rax);
2387 __ LoadRoot(rax, Heap::kExceptionRootIndex);
2388 __ jmp(&exit);
2389
2390 // Invoke: Link this frame into the handler chain.
2391 __ bind(&invoke);
2392 __ PushStackHandler();
2393
2394 // Clear any pending exceptions.
2395 __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
2396 __ Store(pending_exception, rax);
2397
2398 // Fake a receiver (NULL).
2399 __ Push(Immediate(0)); // receiver
2400
2401 // Invoke the function by calling through JS entry trampoline builtin and
2402 // pop the faked function when we return. We load the address from an
2403 // external reference instead of inlining the call target address directly
2404 // in the code, because the builtin stubs may not have been generated yet
2405 // at the time this code is generated.
2406 if (type() == StackFrame::ENTRY_CONSTRUCT) {
2407 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
2408 isolate());
2409 __ Load(rax, construct_entry);
2410 } else {
2411 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
2412 __ Load(rax, entry);
2413 }
2414 __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
2415 __ call(kScratchRegister);
2416
2417 // Unlink this frame from the handler chain.
2418 __ PopStackHandler();
2419
2420 __ bind(&exit);
2421 // Check if the current stack frame is marked as the outermost JS frame.
2422 __ Pop(rbx);
2423 __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
2424 __ j(not_equal, ¬_outermost_js_2);
2425 __ Move(kScratchRegister, js_entry_sp);
2426 __ movp(Operand(kScratchRegister, 0), Immediate(0));
2427 __ bind(¬_outermost_js_2);
2428
2429 // Restore the top frame descriptor from the stack.
2430 { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
2431 __ Pop(c_entry_fp_operand);
2432 }
2433
2434 // Restore callee-saved registers (X64 conventions).
2435 #ifdef _WIN64
2436 // On Win64 XMM6-XMM15 are callee-save
2437 __ movdqu(xmm6, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0));
2438 __ movdqu(xmm7, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1));
2439 __ movdqu(xmm8, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2));
2440 __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3));
2441 __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4));
2442 __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5));
2443 __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6));
2444 __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
2445 __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
2446 __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
2447 __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
2448 #endif
2449
2450 __ popq(rbx);
2451 #ifdef _WIN64
2452 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
2453 __ popq(rsi);
2454 __ popq(rdi);
2455 #endif
2456 __ popq(r15);
2457 __ popq(r14);
2458 __ popq(r13);
2459 __ popq(r12);
2460 __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
2461
2462 // Restore frame pointer and return.
2463 __ popq(rbp);
2464 __ ret(0);
2465 }
2466
2467
Generate(MacroAssembler * masm)2468 void InstanceOfStub::Generate(MacroAssembler* masm) {
2469 Register const object = rdx; // Object (lhs).
2470 Register const function = rax; // Function (rhs).
2471 Register const object_map = rcx; // Map of {object}.
2472 Register const function_map = r8; // Map of {function}.
2473 Register const function_prototype = rdi; // Prototype of {function}.
2474
2475 DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
2476 DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
2477
2478 // Check if {object} is a smi.
2479 Label object_is_smi;
2480 __ JumpIfSmi(object, &object_is_smi, Label::kNear);
2481
2482 // Lookup the {function} and the {object} map in the global instanceof cache.
2483 // Note: This is safe because we clear the global instanceof cache whenever
2484 // we change the prototype of any object.
2485 Label fast_case, slow_case;
2486 __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
2487 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2488 __ j(not_equal, &fast_case, Label::kNear);
2489 __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
2490 __ j(not_equal, &fast_case, Label::kNear);
2491 __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2492 __ ret(0);
2493
2494 // If {object} is a smi we can safely return false if {function} is a JS
2495 // function, otherwise we have to miss to the runtime and throw an exception.
2496 __ bind(&object_is_smi);
2497 __ JumpIfSmi(function, &slow_case);
2498 __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
2499 __ j(not_equal, &slow_case);
2500 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2501 __ ret(0);
2502
2503 // Fast-case: The {function} must be a valid JSFunction.
2504 __ bind(&fast_case);
2505 __ JumpIfSmi(function, &slow_case);
2506 __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
2507 __ j(not_equal, &slow_case);
2508
2509 // Ensure that {function} has an instance prototype.
2510 __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
2511 Immediate(1 << Map::kHasNonInstancePrototype));
2512 __ j(not_zero, &slow_case);
2513
2514 // Get the "prototype" (or initial map) of the {function}.
2515 __ movp(function_prototype,
2516 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2517 __ AssertNotSmi(function_prototype);
2518
2519 // Resolve the prototype if the {function} has an initial map. Afterwards the
2520 // {function_prototype} will be either the JSReceiver prototype object or the
2521 // hole value, which means that no instances of the {function} were created so
2522 // far and hence we should return false.
2523 Label function_prototype_valid;
2524 Register const function_prototype_map = kScratchRegister;
2525 __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
2526 __ j(not_equal, &function_prototype_valid, Label::kNear);
2527 __ movp(function_prototype,
2528 FieldOperand(function_prototype, Map::kPrototypeOffset));
2529 __ bind(&function_prototype_valid);
2530 __ AssertNotSmi(function_prototype);
2531
2532 // Update the global instanceof cache with the current {object} map and
2533 // {function}. The cached answer will be set when it is known below.
2534 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2535 __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
2536
2537 // Loop through the prototype chain looking for the {function} prototype.
2538 // Assume true, and change to false if not found.
2539 Label done, loop, fast_runtime_fallback;
2540 __ LoadRoot(rax, Heap::kTrueValueRootIndex);
2541 __ bind(&loop);
2542
2543 __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
2544 Immediate(1 << Map::kIsAccessCheckNeeded));
2545 __ j(not_zero, &fast_runtime_fallback, Label::kNear);
2546 __ CmpInstanceType(object_map, JS_PROXY_TYPE);
2547 __ j(equal, &fast_runtime_fallback, Label::kNear);
2548
2549 __ movp(object, FieldOperand(object_map, Map::kPrototypeOffset));
2550 __ cmpp(object, function_prototype);
2551 __ j(equal, &done, Label::kNear);
2552 __ CompareRoot(object, Heap::kNullValueRootIndex);
2553 __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
2554 __ j(not_equal, &loop);
2555 __ LoadRoot(rax, Heap::kFalseValueRootIndex);
2556 __ bind(&done);
2557 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
2558 __ ret(0);
2559
2560 // Found Proxy or access check needed: Call the runtime.
2561 __ bind(&fast_runtime_fallback);
2562 __ PopReturnAddressTo(kScratchRegister);
2563 __ Push(object);
2564 __ Push(function_prototype);
2565 __ PushReturnAddressFrom(kScratchRegister);
2566 // Invalidate the instanceof cache.
2567 __ Move(rax, Smi::FromInt(0));
2568 __ StoreRoot(rax, Heap::kInstanceofCacheFunctionRootIndex);
2569 __ TailCallRuntime(Runtime::kHasInPrototypeChain);
2570
2571 // Slow-case: Call the %InstanceOf runtime function.
2572 __ bind(&slow_case);
2573 __ PopReturnAddressTo(kScratchRegister);
2574 __ Push(object);
2575 __ Push(function);
2576 __ PushReturnAddressFrom(kScratchRegister);
2577 __ TailCallRuntime(Runtime::kInstanceOf);
2578 }
2579
2580
2581 // -------------------------------------------------------------------------
2582 // StringCharCodeAtGenerator
2583
GenerateFast(MacroAssembler * masm)2584 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
2585 // If the receiver is a smi trigger the non-string case.
2586 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
2587 __ JumpIfSmi(object_, receiver_not_string_);
2588
2589 // Fetch the instance type of the receiver into result register.
2590 __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
2591 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
2592 // If the receiver is not a string trigger the non-string case.
2593 __ testb(result_, Immediate(kIsNotStringMask));
2594 __ j(not_zero, receiver_not_string_);
2595 }
2596
2597 // If the index is non-smi trigger the non-smi case.
2598 __ JumpIfNotSmi(index_, &index_not_smi_);
2599 __ bind(&got_smi_index_);
2600
2601 // Check for index out of range.
2602 __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
2603 __ j(above_equal, index_out_of_range_);
2604
2605 __ SmiToInteger32(index_, index_);
2606
2607 StringCharLoadGenerator::Generate(
2608 masm, object_, index_, result_, &call_runtime_);
2609
2610 __ Integer32ToSmi(result_, result_);
2611 __ bind(&exit_);
2612 }
2613
2614
GenerateSlow(MacroAssembler * masm,EmbedMode embed_mode,const RuntimeCallHelper & call_helper)2615 void StringCharCodeAtGenerator::GenerateSlow(
2616 MacroAssembler* masm, EmbedMode embed_mode,
2617 const RuntimeCallHelper& call_helper) {
2618 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
2619
2620 Factory* factory = masm->isolate()->factory();
2621 // Index is not a smi.
2622 __ bind(&index_not_smi_);
2623 // If index is a heap number, try converting it to an integer.
2624 __ CheckMap(index_,
2625 factory->heap_number_map(),
2626 index_not_number_,
2627 DONT_DO_SMI_CHECK);
2628 call_helper.BeforeCall(masm);
2629 if (embed_mode == PART_OF_IC_HANDLER) {
2630 __ Push(LoadWithVectorDescriptor::VectorRegister());
2631 __ Push(LoadDescriptor::SlotRegister());
2632 }
2633 __ Push(object_);
2634 __ Push(index_); // Consumed by runtime conversion function.
2635 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
2636 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
2637 } else {
2638 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
2639 // NumberToSmi discards numbers that are not exact integers.
2640 __ CallRuntime(Runtime::kNumberToSmi);
2641 }
2642 if (!index_.is(rax)) {
2643 // Save the conversion result before the pop instructions below
2644 // have a chance to overwrite it.
2645 __ movp(index_, rax);
2646 }
2647 __ Pop(object_);
2648 if (embed_mode == PART_OF_IC_HANDLER) {
2649 __ Pop(LoadDescriptor::SlotRegister());
2650 __ Pop(LoadWithVectorDescriptor::VectorRegister());
2651 }
2652 // Reload the instance type.
2653 __ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
2654 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
2655 call_helper.AfterCall(masm);
2656 // If index is still not a smi, it must be out of range.
2657 __ JumpIfNotSmi(index_, index_out_of_range_);
2658 // Otherwise, return to the fast path.
2659 __ jmp(&got_smi_index_);
2660
2661 // Call runtime. We get here when the receiver is a string and the
2662 // index is a number, but the code of getting the actual character
2663 // is too complex (e.g., when the string needs to be flattened).
2664 __ bind(&call_runtime_);
2665 call_helper.BeforeCall(masm);
2666 __ Push(object_);
2667 __ Integer32ToSmi(index_, index_);
2668 __ Push(index_);
2669 __ CallRuntime(Runtime::kStringCharCodeAtRT);
2670 if (!result_.is(rax)) {
2671 __ movp(result_, rax);
2672 }
2673 call_helper.AfterCall(masm);
2674 __ jmp(&exit_);
2675
2676 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
2677 }
2678
2679
2680 // -------------------------------------------------------------------------
2681 // StringCharFromCodeGenerator
2682
GenerateFast(MacroAssembler * masm)2683 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
2684 // Fast case of Heap::LookupSingleCharacterStringFromCode.
2685 __ JumpIfNotSmi(code_, &slow_case_);
2686 __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
2687 __ j(above, &slow_case_);
2688
2689 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
2690 SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
2691 __ movp(result_, FieldOperand(result_, index.reg, index.scale,
2692 FixedArray::kHeaderSize));
2693 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
2694 __ j(equal, &slow_case_);
2695 __ bind(&exit_);
2696 }
2697
2698
GenerateSlow(MacroAssembler * masm,const RuntimeCallHelper & call_helper)2699 void StringCharFromCodeGenerator::GenerateSlow(
2700 MacroAssembler* masm,
2701 const RuntimeCallHelper& call_helper) {
2702 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
2703
2704 __ bind(&slow_case_);
2705 call_helper.BeforeCall(masm);
2706 __ Push(code_);
2707 __ CallRuntime(Runtime::kStringCharFromCode);
2708 if (!result_.is(rax)) {
2709 __ movp(result_, rax);
2710 }
2711 call_helper.AfterCall(masm);
2712 __ jmp(&exit_);
2713
2714 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
2715 }
2716
2717
GenerateCopyCharacters(MacroAssembler * masm,Register dest,Register src,Register count,String::Encoding encoding)2718 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
2719 Register dest,
2720 Register src,
2721 Register count,
2722 String::Encoding encoding) {
2723 // Nothing to do for zero characters.
2724 Label done;
2725 __ testl(count, count);
2726 __ j(zero, &done, Label::kNear);
2727
2728 // Make count the number of bytes to copy.
2729 if (encoding == String::TWO_BYTE_ENCODING) {
2730 STATIC_ASSERT(2 == sizeof(uc16));
2731 __ addl(count, count);
2732 }
2733
2734 // Copy remaining characters.
2735 Label loop;
2736 __ bind(&loop);
2737 __ movb(kScratchRegister, Operand(src, 0));
2738 __ movb(Operand(dest, 0), kScratchRegister);
2739 __ incp(src);
2740 __ incp(dest);
2741 __ decl(count);
2742 __ j(not_zero, &loop);
2743
2744 __ bind(&done);
2745 }
2746
2747
Generate(MacroAssembler * masm)2748 void SubStringStub::Generate(MacroAssembler* masm) {
2749 Label runtime;
2750
2751 // Stack frame on entry.
2752 // rsp[0] : return address
2753 // rsp[8] : to
2754 // rsp[16] : from
2755 // rsp[24] : string
2756
2757 enum SubStringStubArgumentIndices {
2758 STRING_ARGUMENT_INDEX,
2759 FROM_ARGUMENT_INDEX,
2760 TO_ARGUMENT_INDEX,
2761 SUB_STRING_ARGUMENT_COUNT
2762 };
2763
2764 StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
2765 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2766
2767 // Make sure first argument is a string.
2768 __ movp(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
2769 STATIC_ASSERT(kSmiTag == 0);
2770 __ testl(rax, Immediate(kSmiTagMask));
2771 __ j(zero, &runtime);
2772 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
2773 __ j(NegateCondition(is_string), &runtime);
2774
2775 // rax: string
2776 // rbx: instance type
2777 // Calculate length of sub string using the smi values.
2778 __ movp(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
2779 __ movp(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
2780 __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
2781
2782 __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
2783 __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
2784 Label not_original_string;
2785 // Shorter than original string's length: an actual substring.
2786 __ j(below, ¬_original_string, Label::kNear);
2787 // Longer than original string's length or negative: unsafe arguments.
2788 __ j(above, &runtime);
2789 // Return original string.
2790 Counters* counters = isolate()->counters();
2791 __ IncrementCounter(counters->sub_string_native(), 1);
2792 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
2793 __ bind(¬_original_string);
2794
2795 Label single_char;
2796 __ SmiCompare(rcx, Smi::FromInt(1));
2797 __ j(equal, &single_char);
2798
2799 __ SmiToInteger32(rcx, rcx);
2800
2801 // rax: string
2802 // rbx: instance type
2803 // rcx: sub string length
2804 // rdx: from index (smi)
2805 // Deal with different string types: update the index if necessary
2806 // and put the underlying string into edi.
2807 Label underlying_unpacked, sliced_string, seq_or_external_string;
2808 // If the string is not indirect, it can only be sequential or external.
2809 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
2810 STATIC_ASSERT(kIsIndirectStringMask != 0);
2811 __ testb(rbx, Immediate(kIsIndirectStringMask));
2812 __ j(zero, &seq_or_external_string, Label::kNear);
2813
2814 __ testb(rbx, Immediate(kSlicedNotConsMask));
2815 __ j(not_zero, &sliced_string, Label::kNear);
2816 // Cons string. Check whether it is flat, then fetch first part.
2817 // Flat cons strings have an empty second part.
2818 __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
2819 Heap::kempty_stringRootIndex);
2820 __ j(not_equal, &runtime);
2821 __ movp(rdi, FieldOperand(rax, ConsString::kFirstOffset));
2822 // Update instance type.
2823 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2824 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2825 __ jmp(&underlying_unpacked, Label::kNear);
2826
2827 __ bind(&sliced_string);
2828 // Sliced string. Fetch parent and correct start index by offset.
2829 __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
2830 __ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
2831 // Update instance type.
2832 __ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2833 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2834 __ jmp(&underlying_unpacked, Label::kNear);
2835
2836 __ bind(&seq_or_external_string);
2837 // Sequential or external string. Just move string to the correct register.
2838 __ movp(rdi, rax);
2839
2840 __ bind(&underlying_unpacked);
2841
2842 if (FLAG_string_slices) {
2843 Label copy_routine;
2844 // rdi: underlying subject string
2845 // rbx: instance type of underlying subject string
2846 // rdx: adjusted start index (smi)
2847 // rcx: length
2848 // If coming from the make_two_character_string path, the string
2849 // is too short to be sliced anyways.
2850 __ cmpp(rcx, Immediate(SlicedString::kMinLength));
2851 // Short slice. Copy instead of slicing.
2852 __ j(less, ©_routine);
2853 // Allocate new sliced string. At this point we do not reload the instance
2854 // type including the string encoding because we simply rely on the info
2855 // provided by the original string. It does not matter if the original
2856 // string's encoding is wrong because we always have to recheck encoding of
2857 // the newly created string's parent anyways due to externalized strings.
2858 Label two_byte_slice, set_slice_header;
2859 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
2860 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
2861 __ testb(rbx, Immediate(kStringEncodingMask));
2862 __ j(zero, &two_byte_slice, Label::kNear);
2863 __ AllocateOneByteSlicedString(rax, rbx, r14, &runtime);
2864 __ jmp(&set_slice_header, Label::kNear);
2865 __ bind(&two_byte_slice);
2866 __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
2867 __ bind(&set_slice_header);
2868 __ Integer32ToSmi(rcx, rcx);
2869 __ movp(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
2870 __ movp(FieldOperand(rax, SlicedString::kHashFieldOffset),
2871 Immediate(String::kEmptyHashField));
2872 __ movp(FieldOperand(rax, SlicedString::kParentOffset), rdi);
2873 __ movp(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
2874 __ IncrementCounter(counters->sub_string_native(), 1);
2875 __ ret(3 * kPointerSize);
2876
2877 __ bind(©_routine);
2878 }
2879
2880 // rdi: underlying subject string
2881 // rbx: instance type of underlying subject string
2882 // rdx: adjusted start index (smi)
2883 // rcx: length
2884 // The subject string can only be external or sequential string of either
2885 // encoding at this point.
2886 Label two_byte_sequential, sequential_string;
2887 STATIC_ASSERT(kExternalStringTag != 0);
2888 STATIC_ASSERT(kSeqStringTag == 0);
2889 __ testb(rbx, Immediate(kExternalStringTag));
2890 __ j(zero, &sequential_string);
2891
2892 // Handle external string.
2893 // Rule out short external strings.
2894 STATIC_ASSERT(kShortExternalStringTag != 0);
2895 __ testb(rbx, Immediate(kShortExternalStringMask));
2896 __ j(not_zero, &runtime);
2897 __ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
2898 // Move the pointer so that offset-wise, it looks like a sequential string.
2899 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2900 __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2901
2902 __ bind(&sequential_string);
2903 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
2904 __ testb(rbx, Immediate(kStringEncodingMask));
2905 __ j(zero, &two_byte_sequential);
2906
2907 // Allocate the result.
2908 __ AllocateOneByteString(rax, rcx, r11, r14, r15, &runtime);
2909
2910 // rax: result string
2911 // rcx: result string length
2912 { // Locate character of sub string start.
2913 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
2914 __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
2915 SeqOneByteString::kHeaderSize - kHeapObjectTag));
2916 }
2917 // Locate first character of result.
2918 __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
2919
2920 // rax: result string
2921 // rcx: result length
2922 // r14: first character of result
2923 // rsi: character of sub string start
2924 StringHelper::GenerateCopyCharacters(
2925 masm, rdi, r14, rcx, String::ONE_BYTE_ENCODING);
2926 __ IncrementCounter(counters->sub_string_native(), 1);
2927 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
2928
2929 __ bind(&two_byte_sequential);
2930 // Allocate the result.
2931 __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
2932
2933 // rax: result string
2934 // rcx: result string length
2935 { // Locate character of sub string start.
2936 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
2937 __ leap(r14, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
2938 SeqOneByteString::kHeaderSize - kHeapObjectTag));
2939 }
2940 // Locate first character of result.
2941 __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
2942
2943 // rax: result string
2944 // rcx: result length
2945 // rdi: first character of result
2946 // r14: character of sub string start
2947 StringHelper::GenerateCopyCharacters(
2948 masm, rdi, r14, rcx, String::TWO_BYTE_ENCODING);
2949 __ IncrementCounter(counters->sub_string_native(), 1);
2950 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
2951
2952 // Just jump to runtime to create the sub string.
2953 __ bind(&runtime);
2954 __ TailCallRuntime(Runtime::kSubString);
2955
2956 __ bind(&single_char);
2957 // rax: string
2958 // rbx: instance type
2959 // rcx: sub string length (smi)
2960 // rdx: from index (smi)
2961 StringCharAtGenerator generator(rax, rdx, rcx, rax, &runtime, &runtime,
2962 &runtime, STRING_INDEX_IS_NUMBER,
2963 RECEIVER_IS_STRING);
2964 generator.GenerateFast(masm);
2965 __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
2966 generator.SkipSlow(masm, &runtime);
2967 }
2968
2969
Generate(MacroAssembler * masm)2970 void ToNumberStub::Generate(MacroAssembler* masm) {
2971 // The ToNumber stub takes one argument in rax.
2972 Label not_smi;
2973 __ JumpIfNotSmi(rax, ¬_smi, Label::kNear);
2974 __ Ret();
2975 __ bind(¬_smi);
2976
2977 Label not_heap_number;
2978 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
2979 Heap::kHeapNumberMapRootIndex);
2980 __ j(not_equal, ¬_heap_number, Label::kNear);
2981 __ Ret();
2982 __ bind(¬_heap_number);
2983
2984 Label not_string, slow_string;
2985 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
2986 // rax: object
2987 // rdi: object map
2988 __ j(above_equal, ¬_string, Label::kNear);
2989 // Check if string has a cached array index.
2990 __ testl(FieldOperand(rax, String::kHashFieldOffset),
2991 Immediate(String::kContainsCachedArrayIndexMask));
2992 __ j(not_zero, &slow_string, Label::kNear);
2993 __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
2994 __ IndexFromHash(rax, rax);
2995 __ Ret();
2996 __ bind(&slow_string);
2997 __ PopReturnAddressTo(rcx); // Pop return address.
2998 __ Push(rax); // Push argument.
2999 __ PushReturnAddressFrom(rcx); // Push return address.
3000 __ TailCallRuntime(Runtime::kStringToNumber);
3001 __ bind(¬_string);
3002
3003 Label not_oddball;
3004 __ CmpInstanceType(rdi, ODDBALL_TYPE);
3005 __ j(not_equal, ¬_oddball, Label::kNear);
3006 __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
3007 __ Ret();
3008 __ bind(¬_oddball);
3009
3010 __ PopReturnAddressTo(rcx); // Pop return address.
3011 __ Push(rax); // Push argument.
3012 __ PushReturnAddressFrom(rcx); // Push return address.
3013 __ TailCallRuntime(Runtime::kToNumber);
3014 }
3015
3016
Generate(MacroAssembler * masm)3017 void ToLengthStub::Generate(MacroAssembler* masm) {
3018 // The ToLength stub takes on argument in rax.
3019 Label not_smi, positive_smi;
3020 __ JumpIfNotSmi(rax, ¬_smi, Label::kNear);
3021 STATIC_ASSERT(kSmiTag == 0);
3022 __ testp(rax, rax);
3023 __ j(greater_equal, &positive_smi, Label::kNear);
3024 __ xorl(rax, rax);
3025 __ bind(&positive_smi);
3026 __ Ret();
3027 __ bind(¬_smi);
3028
3029 __ PopReturnAddressTo(rcx); // Pop return address.
3030 __ Push(rax); // Push argument.
3031 __ PushReturnAddressFrom(rcx); // Push return address.
3032 __ TailCallRuntime(Runtime::kToLength);
3033 }
3034
3035
Generate(MacroAssembler * masm)3036 void ToStringStub::Generate(MacroAssembler* masm) {
3037 // The ToString stub takes one argument in rax.
3038 Label is_number;
3039 __ JumpIfSmi(rax, &is_number, Label::kNear);
3040
3041 Label not_string;
3042 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
3043 // rax: receiver
3044 // rdi: receiver map
3045 __ j(above_equal, ¬_string, Label::kNear);
3046 __ Ret();
3047 __ bind(¬_string);
3048
3049 Label not_heap_number;
3050 __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex);
3051 __ j(not_equal, ¬_heap_number, Label::kNear);
3052 __ bind(&is_number);
3053 NumberToStringStub stub(isolate());
3054 __ TailCallStub(&stub);
3055 __ bind(¬_heap_number);
3056
3057 Label not_oddball;
3058 __ CmpInstanceType(rdi, ODDBALL_TYPE);
3059 __ j(not_equal, ¬_oddball, Label::kNear);
3060 __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset));
3061 __ Ret();
3062 __ bind(¬_oddball);
3063
3064 __ PopReturnAddressTo(rcx); // Pop return address.
3065 __ Push(rax); // Push argument.
3066 __ PushReturnAddressFrom(rcx); // Push return address.
3067 __ TailCallRuntime(Runtime::kToString);
3068 }
3069
3070
GenerateFlatOneByteStringEquals(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2)3071 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
3072 Register left,
3073 Register right,
3074 Register scratch1,
3075 Register scratch2) {
3076 Register length = scratch1;
3077
3078 // Compare lengths.
3079 Label check_zero_length;
3080 __ movp(length, FieldOperand(left, String::kLengthOffset));
3081 __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
3082 __ j(equal, &check_zero_length, Label::kNear);
3083 __ Move(rax, Smi::FromInt(NOT_EQUAL));
3084 __ ret(0);
3085
3086 // Check if the length is zero.
3087 Label compare_chars;
3088 __ bind(&check_zero_length);
3089 STATIC_ASSERT(kSmiTag == 0);
3090 __ SmiTest(length);
3091 __ j(not_zero, &compare_chars, Label::kNear);
3092 __ Move(rax, Smi::FromInt(EQUAL));
3093 __ ret(0);
3094
3095 // Compare characters.
3096 __ bind(&compare_chars);
3097 Label strings_not_equal;
3098 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
3099 &strings_not_equal, Label::kNear);
3100
3101 // Characters are equal.
3102 __ Move(rax, Smi::FromInt(EQUAL));
3103 __ ret(0);
3104
3105 // Characters are not equal.
3106 __ bind(&strings_not_equal);
3107 __ Move(rax, Smi::FromInt(NOT_EQUAL));
3108 __ ret(0);
3109 }
3110
3111
GenerateCompareFlatOneByteStrings(MacroAssembler * masm,Register left,Register right,Register scratch1,Register scratch2,Register scratch3,Register scratch4)3112 void StringHelper::GenerateCompareFlatOneByteStrings(
3113 MacroAssembler* masm, Register left, Register right, Register scratch1,
3114 Register scratch2, Register scratch3, Register scratch4) {
3115 // Ensure that you can always subtract a string length from a non-negative
3116 // number (e.g. another length).
3117 STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
3118
3119 // Find minimum length and length difference.
3120 __ movp(scratch1, FieldOperand(left, String::kLengthOffset));
3121 __ movp(scratch4, scratch1);
3122 __ SmiSub(scratch4,
3123 scratch4,
3124 FieldOperand(right, String::kLengthOffset));
3125 // Register scratch4 now holds left.length - right.length.
3126 const Register length_difference = scratch4;
3127 Label left_shorter;
3128 __ j(less, &left_shorter, Label::kNear);
3129 // The right string isn't longer that the left one.
3130 // Get the right string's length by subtracting the (non-negative) difference
3131 // from the left string's length.
3132 __ SmiSub(scratch1, scratch1, length_difference);
3133 __ bind(&left_shorter);
3134 // Register scratch1 now holds Min(left.length, right.length).
3135 const Register min_length = scratch1;
3136
3137 Label compare_lengths;
3138 // If min-length is zero, go directly to comparing lengths.
3139 __ SmiTest(min_length);
3140 __ j(zero, &compare_lengths, Label::kNear);
3141
3142 // Compare loop.
3143 Label result_not_equal;
3144 GenerateOneByteCharsCompareLoop(
3145 masm, left, right, min_length, scratch2, &result_not_equal,
3146 // In debug-code mode, SmiTest below might push
3147 // the target label outside the near range.
3148 Label::kFar);
3149
3150 // Completed loop without finding different characters.
3151 // Compare lengths (precomputed).
3152 __ bind(&compare_lengths);
3153 __ SmiTest(length_difference);
3154 Label length_not_equal;
3155 __ j(not_zero, &length_not_equal, Label::kNear);
3156
3157 // Result is EQUAL.
3158 __ Move(rax, Smi::FromInt(EQUAL));
3159 __ ret(0);
3160
3161 Label result_greater;
3162 Label result_less;
3163 __ bind(&length_not_equal);
3164 __ j(greater, &result_greater, Label::kNear);
3165 __ jmp(&result_less, Label::kNear);
3166 __ bind(&result_not_equal);
3167 // Unequal comparison of left to right, either character or length.
3168 __ j(above, &result_greater, Label::kNear);
3169 __ bind(&result_less);
3170
3171 // Result is LESS.
3172 __ Move(rax, Smi::FromInt(LESS));
3173 __ ret(0);
3174
3175 // Result is GREATER.
3176 __ bind(&result_greater);
3177 __ Move(rax, Smi::FromInt(GREATER));
3178 __ ret(0);
3179 }
3180
3181
GenerateOneByteCharsCompareLoop(MacroAssembler * masm,Register left,Register right,Register length,Register scratch,Label * chars_not_equal,Label::Distance near_jump)3182 void StringHelper::GenerateOneByteCharsCompareLoop(
3183 MacroAssembler* masm, Register left, Register right, Register length,
3184 Register scratch, Label* chars_not_equal, Label::Distance near_jump) {
3185 // Change index to run from -length to -1 by adding length to string
3186 // start. This means that loop ends when index reaches zero, which
3187 // doesn't need an additional compare.
3188 __ SmiToInteger32(length, length);
3189 __ leap(left,
3190 FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
3191 __ leap(right,
3192 FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
3193 __ negq(length);
3194 Register index = length; // index = -length;
3195
3196 // Compare loop.
3197 Label loop;
3198 __ bind(&loop);
3199 __ movb(scratch, Operand(left, index, times_1, 0));
3200 __ cmpb(scratch, Operand(right, index, times_1, 0));
3201 __ j(not_equal, chars_not_equal, near_jump);
3202 __ incq(index);
3203 __ j(not_zero, &loop);
3204 }
3205
3206
Generate(MacroAssembler * masm)3207 void StringCompareStub::Generate(MacroAssembler* masm) {
3208 // ----------- S t a t e -------------
3209 // -- rdx : left string
3210 // -- rax : right string
3211 // -- rsp[0] : return address
3212 // -----------------------------------
3213 __ AssertString(rdx);
3214 __ AssertString(rax);
3215
3216 // Check for identity.
3217 Label not_same;
3218 __ cmpp(rdx, rax);
3219 __ j(not_equal, ¬_same, Label::kNear);
3220 __ Move(rax, Smi::FromInt(EQUAL));
3221 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
3222 __ Ret();
3223
3224 __ bind(¬_same);
3225
3226 // Check that both are sequential one-byte strings.
3227 Label runtime;
3228 __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime);
3229
3230 // Inline comparison of one-byte strings.
3231 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
3232 StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi,
3233 r8);
3234
3235 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3236 // tagged as a small integer.
3237 __ bind(&runtime);
3238 __ PopReturnAddressTo(rcx);
3239 __ Push(rdx);
3240 __ Push(rax);
3241 __ PushReturnAddressFrom(rcx);
3242 __ TailCallRuntime(Runtime::kStringCompare);
3243 }
3244
3245
Generate(MacroAssembler * masm)3246 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3247 // ----------- S t a t e -------------
3248 // -- rdx : left
3249 // -- rax : right
3250 // -- rsp[0] : return address
3251 // -----------------------------------
3252
3253 // Load rcx with the allocation site. We stick an undefined dummy value here
3254 // and replace it with the real allocation site later when we instantiate this
3255 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3256 __ Move(rcx, handle(isolate()->heap()->undefined_value()));
3257
3258 // Make sure that we actually patched the allocation site.
3259 if (FLAG_debug_code) {
3260 __ testb(rcx, Immediate(kSmiTagMask));
3261 __ Assert(not_equal, kExpectedAllocationSite);
3262 __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
3263 isolate()->factory()->allocation_site_map());
3264 __ Assert(equal, kExpectedAllocationSite);
3265 }
3266
3267 // Tail call into the stub that handles binary operations with allocation
3268 // sites.
3269 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3270 __ TailCallStub(&stub);
3271 }
3272
3273
GenerateBooleans(MacroAssembler * masm)3274 void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
3275 DCHECK_EQ(CompareICState::BOOLEAN, state());
3276 Label miss;
3277 Label::Distance const miss_distance =
3278 masm->emit_debug_code() ? Label::kFar : Label::kNear;
3279
3280 __ JumpIfSmi(rdx, &miss, miss_distance);
3281 __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
3282 __ JumpIfSmi(rax, &miss, miss_distance);
3283 __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
3284 __ JumpIfNotRoot(rcx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
3285 __ JumpIfNotRoot(rbx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
3286 if (op() != Token::EQ_STRICT && is_strong(strength())) {
3287 __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
3288 } else {
3289 if (!Token::IsEqualityOp(op())) {
3290 __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
3291 __ AssertSmi(rax);
3292 __ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset));
3293 __ AssertSmi(rdx);
3294 __ pushq(rax);
3295 __ movq(rax, rdx);
3296 __ popq(rdx);
3297 }
3298 __ subp(rax, rdx);
3299 __ Ret();
3300 }
3301
3302 __ bind(&miss);
3303 GenerateMiss(masm);
3304 }
3305
3306
GenerateSmis(MacroAssembler * masm)3307 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3308 DCHECK(state() == CompareICState::SMI);
3309 Label miss;
3310 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
3311
3312 if (GetCondition() == equal) {
3313 // For equality we do not care about the sign of the result.
3314 __ subp(rax, rdx);
3315 } else {
3316 Label done;
3317 __ subp(rdx, rax);
3318 __ j(no_overflow, &done, Label::kNear);
3319 // Correct sign of result in case of overflow.
3320 __ notp(rdx);
3321 __ bind(&done);
3322 __ movp(rax, rdx);
3323 }
3324 __ ret(0);
3325
3326 __ bind(&miss);
3327 GenerateMiss(masm);
3328 }
3329
3330
GenerateNumbers(MacroAssembler * masm)3331 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3332 DCHECK(state() == CompareICState::NUMBER);
3333
3334 Label generic_stub;
3335 Label unordered, maybe_undefined1, maybe_undefined2;
3336 Label miss;
3337
3338 if (left() == CompareICState::SMI) {
3339 __ JumpIfNotSmi(rdx, &miss);
3340 }
3341 if (right() == CompareICState::SMI) {
3342 __ JumpIfNotSmi(rax, &miss);
3343 }
3344
3345 // Load left and right operand.
3346 Label done, left, left_smi, right_smi;
3347 __ JumpIfSmi(rax, &right_smi, Label::kNear);
3348 __ CompareMap(rax, isolate()->factory()->heap_number_map());
3349 __ j(not_equal, &maybe_undefined1, Label::kNear);
3350 __ Movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
3351 __ jmp(&left, Label::kNear);
3352 __ bind(&right_smi);
3353 __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
3354 __ Cvtlsi2sd(xmm1, rcx);
3355
3356 __ bind(&left);
3357 __ JumpIfSmi(rdx, &left_smi, Label::kNear);
3358 __ CompareMap(rdx, isolate()->factory()->heap_number_map());
3359 __ j(not_equal, &maybe_undefined2, Label::kNear);
3360 __ Movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
3361 __ jmp(&done);
3362 __ bind(&left_smi);
3363 __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
3364 __ Cvtlsi2sd(xmm0, rcx);
3365
3366 __ bind(&done);
3367 // Compare operands
3368 __ Ucomisd(xmm0, xmm1);
3369
3370 // Don't base result on EFLAGS when a NaN is involved.
3371 __ j(parity_even, &unordered, Label::kNear);
3372
3373 // Return a result of -1, 0, or 1, based on EFLAGS.
3374 // Performing mov, because xor would destroy the flag register.
3375 __ movl(rax, Immediate(0));
3376 __ movl(rcx, Immediate(0));
3377 __ setcc(above, rax); // Add one to zero if carry clear and not equal.
3378 __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
3379 __ ret(0);
3380
3381 __ bind(&unordered);
3382 __ bind(&generic_stub);
3383 CompareICStub stub(isolate(), op(), strength(), CompareICState::GENERIC,
3384 CompareICState::GENERIC, CompareICState::GENERIC);
3385 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
3386
3387 __ bind(&maybe_undefined1);
3388 if (Token::IsOrderedRelationalCompareOp(op())) {
3389 __ Cmp(rax, isolate()->factory()->undefined_value());
3390 __ j(not_equal, &miss);
3391 __ JumpIfSmi(rdx, &unordered);
3392 __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
3393 __ j(not_equal, &maybe_undefined2, Label::kNear);
3394 __ jmp(&unordered);
3395 }
3396
3397 __ bind(&maybe_undefined2);
3398 if (Token::IsOrderedRelationalCompareOp(op())) {
3399 __ Cmp(rdx, isolate()->factory()->undefined_value());
3400 __ j(equal, &unordered);
3401 }
3402
3403 __ bind(&miss);
3404 GenerateMiss(masm);
3405 }
3406
3407
GenerateInternalizedStrings(MacroAssembler * masm)3408 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3409 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3410 DCHECK(GetCondition() == equal);
3411
3412 // Registers containing left and right operands respectively.
3413 Register left = rdx;
3414 Register right = rax;
3415 Register tmp1 = rcx;
3416 Register tmp2 = rbx;
3417
3418 // Check that both operands are heap objects.
3419 Label miss;
3420 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3421 __ j(cond, &miss, Label::kNear);
3422
3423 // Check that both operands are internalized strings.
3424 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3425 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3426 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3427 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3428 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3429 __ orp(tmp1, tmp2);
3430 __ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
3431 __ j(not_zero, &miss, Label::kNear);
3432
3433 // Internalized strings are compared by identity.
3434 Label done;
3435 __ cmpp(left, right);
3436 // Make sure rax is non-zero. At this point input operands are
3437 // guaranteed to be non-zero.
3438 DCHECK(right.is(rax));
3439 __ j(not_equal, &done, Label::kNear);
3440 STATIC_ASSERT(EQUAL == 0);
3441 STATIC_ASSERT(kSmiTag == 0);
3442 __ Move(rax, Smi::FromInt(EQUAL));
3443 __ bind(&done);
3444 __ ret(0);
3445
3446 __ bind(&miss);
3447 GenerateMiss(masm);
3448 }
3449
3450
GenerateUniqueNames(MacroAssembler * masm)3451 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3452 DCHECK(state() == CompareICState::UNIQUE_NAME);
3453 DCHECK(GetCondition() == equal);
3454
3455 // Registers containing left and right operands respectively.
3456 Register left = rdx;
3457 Register right = rax;
3458 Register tmp1 = rcx;
3459 Register tmp2 = rbx;
3460
3461 // Check that both operands are heap objects.
3462 Label miss;
3463 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3464 __ j(cond, &miss, Label::kNear);
3465
3466 // Check that both operands are unique names. This leaves the instance
3467 // types loaded in tmp1 and tmp2.
3468 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3469 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3470 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3471 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3472
3473 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
3474 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
3475
3476 // Unique names are compared by identity.
3477 Label done;
3478 __ cmpp(left, right);
3479 // Make sure rax is non-zero. At this point input operands are
3480 // guaranteed to be non-zero.
3481 DCHECK(right.is(rax));
3482 __ j(not_equal, &done, Label::kNear);
3483 STATIC_ASSERT(EQUAL == 0);
3484 STATIC_ASSERT(kSmiTag == 0);
3485 __ Move(rax, Smi::FromInt(EQUAL));
3486 __ bind(&done);
3487 __ ret(0);
3488
3489 __ bind(&miss);
3490 GenerateMiss(masm);
3491 }
3492
3493
GenerateStrings(MacroAssembler * masm)3494 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3495 DCHECK(state() == CompareICState::STRING);
3496 Label miss;
3497
3498 bool equality = Token::IsEqualityOp(op());
3499
3500 // Registers containing left and right operands respectively.
3501 Register left = rdx;
3502 Register right = rax;
3503 Register tmp1 = rcx;
3504 Register tmp2 = rbx;
3505 Register tmp3 = rdi;
3506
3507 // Check that both operands are heap objects.
3508 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
3509 __ j(cond, &miss);
3510
3511 // Check that both operands are strings. This leaves the instance
3512 // types loaded in tmp1 and tmp2.
3513 __ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
3514 __ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
3515 __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
3516 __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
3517 __ movp(tmp3, tmp1);
3518 STATIC_ASSERT(kNotStringTag != 0);
3519 __ orp(tmp3, tmp2);
3520 __ testb(tmp3, Immediate(kIsNotStringMask));
3521 __ j(not_zero, &miss);
3522
3523 // Fast check for identical strings.
3524 Label not_same;
3525 __ cmpp(left, right);
3526 __ j(not_equal, ¬_same, Label::kNear);
3527 STATIC_ASSERT(EQUAL == 0);
3528 STATIC_ASSERT(kSmiTag == 0);
3529 __ Move(rax, Smi::FromInt(EQUAL));
3530 __ ret(0);
3531
3532 // Handle not identical strings.
3533 __ bind(¬_same);
3534
3535 // Check that both strings are internalized strings. If they are, we're done
3536 // because we already know they are not identical. We also know they are both
3537 // strings.
3538 if (equality) {
3539 Label do_compare;
3540 STATIC_ASSERT(kInternalizedTag == 0);
3541 __ orp(tmp1, tmp2);
3542 __ testb(tmp1, Immediate(kIsNotInternalizedMask));
3543 __ j(not_zero, &do_compare, Label::kNear);
3544 // Make sure rax is non-zero. At this point input operands are
3545 // guaranteed to be non-zero.
3546 DCHECK(right.is(rax));
3547 __ ret(0);
3548 __ bind(&do_compare);
3549 }
3550
3551 // Check that both strings are sequential one-byte.
3552 Label runtime;
3553 __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
3554
3555 // Compare flat one-byte strings. Returns when done.
3556 if (equality) {
3557 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
3558 tmp2);
3559 } else {
3560 StringHelper::GenerateCompareFlatOneByteStrings(
3561 masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
3562 }
3563
3564 // Handle more complex cases in runtime.
3565 __ bind(&runtime);
3566 __ PopReturnAddressTo(tmp1);
3567 __ Push(left);
3568 __ Push(right);
3569 __ PushReturnAddressFrom(tmp1);
3570 if (equality) {
3571 __ TailCallRuntime(Runtime::kStringEquals);
3572 } else {
3573 __ TailCallRuntime(Runtime::kStringCompare);
3574 }
3575
3576 __ bind(&miss);
3577 GenerateMiss(masm);
3578 }
3579
3580
GenerateReceivers(MacroAssembler * masm)3581 void CompareICStub::GenerateReceivers(MacroAssembler* masm) {
3582 DCHECK_EQ(CompareICState::RECEIVER, state());
3583 Label miss;
3584 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
3585 __ j(either_smi, &miss, Label::kNear);
3586
3587 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
3588 __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
3589 __ j(below, &miss, Label::kNear);
3590 __ CmpObjectType(rdx, FIRST_JS_RECEIVER_TYPE, rcx);
3591 __ j(below, &miss, Label::kNear);
3592
3593 DCHECK_EQ(equal, GetCondition());
3594 __ subp(rax, rdx);
3595 __ ret(0);
3596
3597 __ bind(&miss);
3598 GenerateMiss(masm);
3599 }
3600
3601
GenerateKnownReceivers(MacroAssembler * masm)3602 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
3603 Label miss;
3604 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3605 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
3606 __ j(either_smi, &miss, Label::kNear);
3607
3608 __ GetWeakValue(rdi, cell);
3609 __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rdi);
3610 __ j(not_equal, &miss, Label::kNear);
3611 __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rdi);
3612 __ j(not_equal, &miss, Label::kNear);
3613
3614 if (Token::IsEqualityOp(op())) {
3615 __ subp(rax, rdx);
3616 __ ret(0);
3617 } else if (is_strong(strength())) {
3618 __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion);
3619 } else {
3620 __ PopReturnAddressTo(rcx);
3621 __ Push(rdx);
3622 __ Push(rax);
3623 __ Push(Smi::FromInt(NegativeComparisonResult(GetCondition())));
3624 __ PushReturnAddressFrom(rcx);
3625 __ TailCallRuntime(Runtime::kCompare);
3626 }
3627
3628 __ bind(&miss);
3629 GenerateMiss(masm);
3630 }
3631
3632
GenerateMiss(MacroAssembler * masm)3633 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3634 {
3635 // Call the runtime system in a fresh internal frame.
3636 FrameScope scope(masm, StackFrame::INTERNAL);
3637 __ Push(rdx);
3638 __ Push(rax);
3639 __ Push(rdx);
3640 __ Push(rax);
3641 __ Push(Smi::FromInt(op()));
3642 __ CallRuntime(Runtime::kCompareIC_Miss);
3643
3644 // Compute the entry point of the rewritten stub.
3645 __ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
3646 __ Pop(rax);
3647 __ Pop(rdx);
3648 }
3649
3650 // Do a tail call to the rewritten stub.
3651 __ jmp(rdi);
3652 }
3653
3654
GenerateNegativeLookup(MacroAssembler * masm,Label * miss,Label * done,Register properties,Handle<Name> name,Register r0)3655 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
3656 Label* miss,
3657 Label* done,
3658 Register properties,
3659 Handle<Name> name,
3660 Register r0) {
3661 DCHECK(name->IsUniqueName());
3662 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3663 // not equal to the name and kProbes-th slot is not used (its name is the
3664 // undefined value), it guarantees the hash table doesn't contain the
3665 // property. It's true even if some slots represent deleted properties
3666 // (their names are the hole value).
3667 for (int i = 0; i < kInlinedProbes; i++) {
3668 // r0 points to properties hash.
3669 // Compute the masked index: (hash + i + i * i) & mask.
3670 Register index = r0;
3671 // Capacity is smi 2^n.
3672 __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
3673 __ decl(index);
3674 __ andp(index,
3675 Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
3676
3677 // Scale the index by multiplying by the entry size.
3678 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3679 __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
3680
3681 Register entity_name = r0;
3682 // Having undefined at this place means the name is not contained.
3683 STATIC_ASSERT(kSmiTagSize == 1);
3684 __ movp(entity_name, Operand(properties,
3685 index,
3686 times_pointer_size,
3687 kElementsStartOffset - kHeapObjectTag));
3688 __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
3689 __ j(equal, done);
3690
3691 // Stop if found the property.
3692 __ Cmp(entity_name, Handle<Name>(name));
3693 __ j(equal, miss);
3694
3695 Label good;
3696 // Check for the hole and skip.
3697 __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
3698 __ j(equal, &good, Label::kNear);
3699
3700 // Check if the entry name is not a unique name.
3701 __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
3702 __ JumpIfNotUniqueNameInstanceType(
3703 FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
3704 __ bind(&good);
3705 }
3706
3707 NameDictionaryLookupStub stub(masm->isolate(), properties, r0, r0,
3708 NEGATIVE_LOOKUP);
3709 __ Push(Handle<Object>(name));
3710 __ Push(Immediate(name->Hash()));
3711 __ CallStub(&stub);
3712 __ testp(r0, r0);
3713 __ j(not_zero, miss);
3714 __ jmp(done);
3715 }
3716
3717
3718 // Probe the name dictionary in the |elements| register. Jump to the
3719 // |done| label if a property with the given name is found leaving the
3720 // index into the dictionary in |r1|. Jump to the |miss| label
3721 // otherwise.
GeneratePositiveLookup(MacroAssembler * masm,Label * miss,Label * done,Register elements,Register name,Register r0,Register r1)3722 void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
3723 Label* miss,
3724 Label* done,
3725 Register elements,
3726 Register name,
3727 Register r0,
3728 Register r1) {
3729 DCHECK(!elements.is(r0));
3730 DCHECK(!elements.is(r1));
3731 DCHECK(!name.is(r0));
3732 DCHECK(!name.is(r1));
3733
3734 __ AssertName(name);
3735
3736 __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
3737 __ decl(r0);
3738
3739 for (int i = 0; i < kInlinedProbes; i++) {
3740 // Compute the masked index: (hash + i + i * i) & mask.
3741 __ movl(r1, FieldOperand(name, Name::kHashFieldOffset));
3742 __ shrl(r1, Immediate(Name::kHashShift));
3743 if (i > 0) {
3744 __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
3745 }
3746 __ andp(r1, r0);
3747
3748 // Scale the index by multiplying by the entry size.
3749 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3750 __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
3751
3752 // Check if the key is identical to the name.
3753 __ cmpp(name, Operand(elements, r1, times_pointer_size,
3754 kElementsStartOffset - kHeapObjectTag));
3755 __ j(equal, done);
3756 }
3757
3758 NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1,
3759 POSITIVE_LOOKUP);
3760 __ Push(name);
3761 __ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
3762 __ shrl(r0, Immediate(Name::kHashShift));
3763 __ Push(r0);
3764 __ CallStub(&stub);
3765
3766 __ testp(r0, r0);
3767 __ j(zero, miss);
3768 __ jmp(done);
3769 }
3770
3771
Generate(MacroAssembler * masm)3772 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
3773 // This stub overrides SometimesSetsUpAFrame() to return false. That means
3774 // we cannot call anything that could cause a GC from this stub.
3775 // Stack frame on entry:
3776 // rsp[0 * kPointerSize] : return address.
3777 // rsp[1 * kPointerSize] : key's hash.
3778 // rsp[2 * kPointerSize] : key.
3779 // Registers:
3780 // dictionary_: NameDictionary to probe.
3781 // result_: used as scratch.
3782 // index_: will hold an index of entry if lookup is successful.
3783 // might alias with result_.
3784 // Returns:
3785 // result_ is zero if lookup failed, non zero otherwise.
3786
3787 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
3788
3789 Register scratch = result();
3790
3791 __ SmiToInteger32(scratch, FieldOperand(dictionary(), kCapacityOffset));
3792 __ decl(scratch);
3793 __ Push(scratch);
3794
3795 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3796 // not equal to the name and kProbes-th slot is not used (its name is the
3797 // undefined value), it guarantees the hash table doesn't contain the
3798 // property. It's true even if some slots represent deleted properties
3799 // (their names are the null value).
3800 StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER,
3801 kPointerSize);
3802 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
3803 // Compute the masked index: (hash + i + i * i) & mask.
3804 __ movp(scratch, args.GetArgumentOperand(1));
3805 if (i > 0) {
3806 __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
3807 }
3808 __ andp(scratch, Operand(rsp, 0));
3809
3810 // Scale the index by multiplying by the entry size.
3811 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
3812 __ leap(index(), Operand(scratch, scratch, times_2, 0)); // index *= 3.
3813
3814 // Having undefined at this place means the name is not contained.
3815 __ movp(scratch, Operand(dictionary(), index(), times_pointer_size,
3816 kElementsStartOffset - kHeapObjectTag));
3817
3818 __ Cmp(scratch, isolate()->factory()->undefined_value());
3819 __ j(equal, ¬_in_dictionary);
3820
3821 // Stop if found the property.
3822 __ cmpp(scratch, args.GetArgumentOperand(0));
3823 __ j(equal, &in_dictionary);
3824
3825 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
3826 // If we hit a key that is not a unique name during negative
3827 // lookup we have to bailout as this key might be equal to the
3828 // key we are looking for.
3829
3830 // Check if the entry name is not a unique name.
3831 __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
3832 __ JumpIfNotUniqueNameInstanceType(
3833 FieldOperand(scratch, Map::kInstanceTypeOffset),
3834 &maybe_in_dictionary);
3835 }
3836 }
3837
3838 __ bind(&maybe_in_dictionary);
3839 // If we are doing negative lookup then probing failure should be
3840 // treated as a lookup success. For positive lookup probing failure
3841 // should be treated as lookup failure.
3842 if (mode() == POSITIVE_LOOKUP) {
3843 __ movp(scratch, Immediate(0));
3844 __ Drop(1);
3845 __ ret(2 * kPointerSize);
3846 }
3847
3848 __ bind(&in_dictionary);
3849 __ movp(scratch, Immediate(1));
3850 __ Drop(1);
3851 __ ret(2 * kPointerSize);
3852
3853 __ bind(¬_in_dictionary);
3854 __ movp(scratch, Immediate(0));
3855 __ Drop(1);
3856 __ ret(2 * kPointerSize);
3857 }
3858
3859
GenerateFixedRegStubsAheadOfTime(Isolate * isolate)3860 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
3861 Isolate* isolate) {
3862 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
3863 stub1.GetCode();
3864 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
3865 stub2.GetCode();
3866 }
3867
3868
3869 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
3870 // the value has just been written into the object, now this stub makes sure
3871 // we keep the GC informed. The word in the object where the value has been
3872 // written is in the address register.
Generate(MacroAssembler * masm)3873 void RecordWriteStub::Generate(MacroAssembler* masm) {
3874 Label skip_to_incremental_noncompacting;
3875 Label skip_to_incremental_compacting;
3876
3877 // The first two instructions are generated with labels so as to get the
3878 // offset fixed up correctly by the bind(Label*) call. We patch it back and
3879 // forth between a compare instructions (a nop in this position) and the
3880 // real branch when we start and stop incremental heap marking.
3881 // See RecordWriteStub::Patch for details.
3882 __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
3883 __ jmp(&skip_to_incremental_compacting, Label::kFar);
3884
3885 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3886 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3887 MacroAssembler::kReturnAtEnd);
3888 } else {
3889 __ ret(0);
3890 }
3891
3892 __ bind(&skip_to_incremental_noncompacting);
3893 GenerateIncremental(masm, INCREMENTAL);
3894
3895 __ bind(&skip_to_incremental_compacting);
3896 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
3897
3898 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
3899 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
3900 masm->set_byte_at(0, kTwoByteNopInstruction);
3901 masm->set_byte_at(2, kFiveByteNopInstruction);
3902 }
3903
3904
GenerateIncremental(MacroAssembler * masm,Mode mode)3905 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
3906 regs_.Save(masm);
3907
3908 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
3909 Label dont_need_remembered_set;
3910
3911 __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
3912 __ JumpIfNotInNewSpace(regs_.scratch0(),
3913 regs_.scratch0(),
3914 &dont_need_remembered_set);
3915
3916 __ CheckPageFlag(regs_.object(),
3917 regs_.scratch0(),
3918 1 << MemoryChunk::SCAN_ON_SCAVENGE,
3919 not_zero,
3920 &dont_need_remembered_set);
3921
3922 // First notify the incremental marker if necessary, then update the
3923 // remembered set.
3924 CheckNeedsToInformIncrementalMarker(
3925 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
3926 InformIncrementalMarker(masm);
3927 regs_.Restore(masm);
3928 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3929 MacroAssembler::kReturnAtEnd);
3930
3931 __ bind(&dont_need_remembered_set);
3932 }
3933
3934 CheckNeedsToInformIncrementalMarker(
3935 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
3936 InformIncrementalMarker(masm);
3937 regs_.Restore(masm);
3938 __ ret(0);
3939 }
3940
3941
InformIncrementalMarker(MacroAssembler * masm)3942 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
3943 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
3944 Register address =
3945 arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
3946 DCHECK(!address.is(regs_.object()));
3947 DCHECK(!address.is(arg_reg_1));
3948 __ Move(address, regs_.address());
3949 __ Move(arg_reg_1, regs_.object());
3950 // TODO(gc) Can we just set address arg2 in the beginning?
3951 __ Move(arg_reg_2, address);
3952 __ LoadAddress(arg_reg_3,
3953 ExternalReference::isolate_address(isolate()));
3954 int argument_count = 3;
3955
3956 AllowExternalCallThatCantCauseGC scope(masm);
3957 __ PrepareCallCFunction(argument_count);
3958 __ CallCFunction(
3959 ExternalReference::incremental_marking_record_write_function(isolate()),
3960 argument_count);
3961 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
3962 }
3963
3964
CheckNeedsToInformIncrementalMarker(MacroAssembler * masm,OnNoNeedToInformIncrementalMarker on_no_need,Mode mode)3965 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
3966 MacroAssembler* masm,
3967 OnNoNeedToInformIncrementalMarker on_no_need,
3968 Mode mode) {
3969 Label on_black;
3970 Label need_incremental;
3971 Label need_incremental_pop_object;
3972
3973 __ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
3974 __ andp(regs_.scratch0(), regs_.object());
3975 __ movp(regs_.scratch1(),
3976 Operand(regs_.scratch0(),
3977 MemoryChunk::kWriteBarrierCounterOffset));
3978 __ subp(regs_.scratch1(), Immediate(1));
3979 __ movp(Operand(regs_.scratch0(),
3980 MemoryChunk::kWriteBarrierCounterOffset),
3981 regs_.scratch1());
3982 __ j(negative, &need_incremental);
3983
3984 // Let's look at the color of the object: If it is not black we don't have
3985 // to inform the incremental marker.
3986 __ JumpIfBlack(regs_.object(),
3987 regs_.scratch0(),
3988 regs_.scratch1(),
3989 &on_black,
3990 Label::kNear);
3991
3992 regs_.Restore(masm);
3993 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
3994 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
3995 MacroAssembler::kReturnAtEnd);
3996 } else {
3997 __ ret(0);
3998 }
3999
4000 __ bind(&on_black);
4001
4002 // Get the value from the slot.
4003 __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
4004
4005 if (mode == INCREMENTAL_COMPACTION) {
4006 Label ensure_not_white;
4007
4008 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4009 regs_.scratch1(), // Scratch.
4010 MemoryChunk::kEvacuationCandidateMask,
4011 zero,
4012 &ensure_not_white,
4013 Label::kNear);
4014
4015 __ CheckPageFlag(regs_.object(),
4016 regs_.scratch1(), // Scratch.
4017 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
4018 zero,
4019 &need_incremental);
4020
4021 __ bind(&ensure_not_white);
4022 }
4023
4024 // We need an extra register for this, so we push the object register
4025 // temporarily.
4026 __ Push(regs_.object());
4027 __ JumpIfWhite(regs_.scratch0(), // The value.
4028 regs_.scratch1(), // Scratch.
4029 regs_.object(), // Scratch.
4030 &need_incremental_pop_object, Label::kNear);
4031 __ Pop(regs_.object());
4032
4033 regs_.Restore(masm);
4034 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4035 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4036 MacroAssembler::kReturnAtEnd);
4037 } else {
4038 __ ret(0);
4039 }
4040
4041 __ bind(&need_incremental_pop_object);
4042 __ Pop(regs_.object());
4043
4044 __ bind(&need_incremental);
4045
4046 // Fall through when we need to inform the incremental marker.
4047 }
4048
4049
Generate(MacroAssembler * masm)4050 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4051 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4052 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4053 int parameter_count_offset =
4054 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4055 __ movp(rbx, MemOperand(rbp, parameter_count_offset));
4056 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4057 __ PopReturnAddressTo(rcx);
4058 int additional_offset =
4059 function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
4060 __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
4061 __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
4062 }
4063
4064
Generate(MacroAssembler * masm)4065 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4066 __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
4067 LoadICStub stub(isolate(), state());
4068 stub.GenerateForTrampoline(masm);
4069 }
4070
4071
Generate(MacroAssembler * masm)4072 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4073 __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
4074 KeyedLoadICStub stub(isolate(), state());
4075 stub.GenerateForTrampoline(masm);
4076 }
4077
4078
HandleArrayCases(MacroAssembler * masm,Register feedback,Register receiver_map,Register scratch1,Register scratch2,Register scratch3,bool is_polymorphic,Label * miss)4079 static void HandleArrayCases(MacroAssembler* masm, Register feedback,
4080 Register receiver_map, Register scratch1,
4081 Register scratch2, Register scratch3,
4082 bool is_polymorphic, Label* miss) {
4083 // feedback initially contains the feedback array
4084 Label next_loop, prepare_next;
4085 Label start_polymorphic;
4086
4087 Register counter = scratch1;
4088 Register length = scratch2;
4089 Register cached_map = scratch3;
4090
4091 __ movp(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
4092 __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
4093 __ j(not_equal, &start_polymorphic);
4094
4095 // found, now call handler.
4096 Register handler = feedback;
4097 __ movp(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
4098 __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
4099 __ jmp(handler);
4100
4101 // Polymorphic, we have to loop from 2 to N
4102 __ bind(&start_polymorphic);
4103 __ SmiToInteger32(length, FieldOperand(feedback, FixedArray::kLengthOffset));
4104 if (!is_polymorphic) {
4105 // If the IC could be monomorphic we have to make sure we don't go past the
4106 // end of the feedback array.
4107 __ cmpl(length, Immediate(2));
4108 __ j(equal, miss);
4109 }
4110 __ movl(counter, Immediate(2));
4111
4112 __ bind(&next_loop);
4113 __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
4114 FixedArray::kHeaderSize));
4115 __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
4116 __ j(not_equal, &prepare_next);
4117 __ movp(handler, FieldOperand(feedback, counter, times_pointer_size,
4118 FixedArray::kHeaderSize + kPointerSize));
4119 __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
4120 __ jmp(handler);
4121
4122 __ bind(&prepare_next);
4123 __ addl(counter, Immediate(2));
4124 __ cmpl(counter, length);
4125 __ j(less, &next_loop);
4126
4127 // We exhausted our array of map handler pairs.
4128 __ jmp(miss);
4129 }
4130
4131
HandleMonomorphicCase(MacroAssembler * masm,Register receiver,Register receiver_map,Register feedback,Register vector,Register integer_slot,Label * compare_map,Label * load_smi_map,Label * try_array)4132 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
4133 Register receiver_map, Register feedback,
4134 Register vector, Register integer_slot,
4135 Label* compare_map, Label* load_smi_map,
4136 Label* try_array) {
4137 __ JumpIfSmi(receiver, load_smi_map);
4138 __ movp(receiver_map, FieldOperand(receiver, 0));
4139
4140 __ bind(compare_map);
4141 __ cmpp(receiver_map, FieldOperand(feedback, WeakCell::kValueOffset));
4142 __ j(not_equal, try_array);
4143 Register handler = feedback;
4144 __ movp(handler, FieldOperand(vector, integer_slot, times_pointer_size,
4145 FixedArray::kHeaderSize + kPointerSize));
4146 __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
4147 __ jmp(handler);
4148 }
4149
4150
Generate(MacroAssembler * masm)4151 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
4152
4153
GenerateForTrampoline(MacroAssembler * masm)4154 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4155 GenerateImpl(masm, true);
4156 }
4157
4158
GenerateImpl(MacroAssembler * masm,bool in_frame)4159 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4160 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // rdx
4161 Register name = LoadWithVectorDescriptor::NameRegister(); // rcx
4162 Register vector = LoadWithVectorDescriptor::VectorRegister(); // rbx
4163 Register slot = LoadWithVectorDescriptor::SlotRegister(); // rax
4164 Register feedback = rdi;
4165 Register integer_slot = r8;
4166 Register receiver_map = r9;
4167
4168 __ SmiToInteger32(integer_slot, slot);
4169 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4170 FixedArray::kHeaderSize));
4171
4172 // Try to quickly handle the monomorphic case without knowing for sure
4173 // if we have a weak cell in feedback. We do know it's safe to look
4174 // at WeakCell::kValueOffset.
4175 Label try_array, load_smi_map, compare_map;
4176 Label not_array, miss;
4177 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
4178 integer_slot, &compare_map, &load_smi_map, &try_array);
4179
4180 // Is it a fixed array?
4181 __ bind(&try_array);
4182 __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
4183 __ j(not_equal, ¬_array);
4184 HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
4185 &miss);
4186
4187 __ bind(¬_array);
4188 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4189 __ j(not_equal, &miss);
4190 Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4191 Code::ComputeHandlerFlags(Code::LOAD_IC));
4192 masm->isolate()->stub_cache()->GenerateProbe(
4193 masm, Code::LOAD_IC, code_flags, receiver, name, feedback, no_reg);
4194
4195 __ bind(&miss);
4196 LoadIC::GenerateMiss(masm);
4197
4198 __ bind(&load_smi_map);
4199 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4200 __ jmp(&compare_map);
4201 }
4202
4203
Generate(MacroAssembler * masm)4204 void KeyedLoadICStub::Generate(MacroAssembler* masm) {
4205 GenerateImpl(masm, false);
4206 }
4207
4208
GenerateForTrampoline(MacroAssembler * masm)4209 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
4210 GenerateImpl(masm, true);
4211 }
4212
4213
GenerateImpl(MacroAssembler * masm,bool in_frame)4214 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4215 Register receiver = LoadWithVectorDescriptor::ReceiverRegister(); // rdx
4216 Register key = LoadWithVectorDescriptor::NameRegister(); // rcx
4217 Register vector = LoadWithVectorDescriptor::VectorRegister(); // rbx
4218 Register slot = LoadWithVectorDescriptor::SlotRegister(); // rax
4219 Register feedback = rdi;
4220 Register integer_slot = r8;
4221 Register receiver_map = r9;
4222
4223 __ SmiToInteger32(integer_slot, slot);
4224 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4225 FixedArray::kHeaderSize));
4226
4227 // Try to quickly handle the monomorphic case without knowing for sure
4228 // if we have a weak cell in feedback. We do know it's safe to look
4229 // at WeakCell::kValueOffset.
4230 Label try_array, load_smi_map, compare_map;
4231 Label not_array, miss;
4232 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
4233 integer_slot, &compare_map, &load_smi_map, &try_array);
4234
4235 __ bind(&try_array);
4236 // Is it a fixed array?
4237 __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
4238 __ j(not_equal, ¬_array);
4239
4240 // We have a polymorphic element handler.
4241 Label polymorphic, try_poly_name;
4242 __ bind(&polymorphic);
4243 HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
4244 &miss);
4245
4246 __ bind(¬_array);
4247 // Is it generic?
4248 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4249 __ j(not_equal, &try_poly_name);
4250 Handle<Code> megamorphic_stub =
4251 KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4252 __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
4253
4254 __ bind(&try_poly_name);
4255 // We might have a name in feedback, and a fixed array in the next slot.
4256 __ cmpp(key, feedback);
4257 __ j(not_equal, &miss);
4258 // If the name comparison succeeded, we know we have a fixed array with
4259 // at least one map/handler pair.
4260 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4261 FixedArray::kHeaderSize + kPointerSize));
4262 HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, false,
4263 &miss);
4264
4265 __ bind(&miss);
4266 KeyedLoadIC::GenerateMiss(masm);
4267
4268 __ bind(&load_smi_map);
4269 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4270 __ jmp(&compare_map);
4271 }
4272
4273
Generate(MacroAssembler * masm)4274 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4275 __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
4276 VectorStoreICStub stub(isolate(), state());
4277 stub.GenerateForTrampoline(masm);
4278 }
4279
4280
Generate(MacroAssembler * masm)4281 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
4282 __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
4283 VectorKeyedStoreICStub stub(isolate(), state());
4284 stub.GenerateForTrampoline(masm);
4285 }
4286
4287
Generate(MacroAssembler * masm)4288 void VectorStoreICStub::Generate(MacroAssembler* masm) {
4289 GenerateImpl(masm, false);
4290 }
4291
4292
GenerateForTrampoline(MacroAssembler * masm)4293 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4294 GenerateImpl(masm, true);
4295 }
4296
4297
GenerateImpl(MacroAssembler * masm,bool in_frame)4298 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4299 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx
4300 Register key = VectorStoreICDescriptor::NameRegister(); // rcx
4301 Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx
4302 Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi
4303 DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax
4304 Register feedback = r8;
4305 Register integer_slot = r9;
4306 Register receiver_map = r11;
4307 DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
4308
4309 __ SmiToInteger32(integer_slot, slot);
4310 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4311 FixedArray::kHeaderSize));
4312
4313 // Try to quickly handle the monomorphic case without knowing for sure
4314 // if we have a weak cell in feedback. We do know it's safe to look
4315 // at WeakCell::kValueOffset.
4316 Label try_array, load_smi_map, compare_map;
4317 Label not_array, miss;
4318 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
4319 integer_slot, &compare_map, &load_smi_map, &try_array);
4320
4321 // Is it a fixed array?
4322 __ bind(&try_array);
4323 __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
4324 __ j(not_equal, ¬_array);
4325 HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, true,
4326 &miss);
4327
4328 __ bind(¬_array);
4329 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4330 __ j(not_equal, &miss);
4331
4332 Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
4333 Code::ComputeHandlerFlags(Code::STORE_IC));
4334 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
4335 receiver, key, feedback, no_reg);
4336
4337 __ bind(&miss);
4338 StoreIC::GenerateMiss(masm);
4339
4340 __ bind(&load_smi_map);
4341 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4342 __ jmp(&compare_map);
4343 }
4344
4345
Generate(MacroAssembler * masm)4346 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) {
4347 GenerateImpl(masm, false);
4348 }
4349
4350
GenerateForTrampoline(MacroAssembler * masm)4351 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
4352 GenerateImpl(masm, true);
4353 }
4354
4355
HandlePolymorphicKeyedStoreCase(MacroAssembler * masm,Register receiver_map,Register feedback,Register scratch,Register scratch1,Register scratch2,Label * miss)4356 static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
4357 Register receiver_map,
4358 Register feedback, Register scratch,
4359 Register scratch1,
4360 Register scratch2, Label* miss) {
4361 // feedback initially contains the feedback array
4362 Label next, next_loop, prepare_next;
4363 Label transition_call;
4364
4365 Register cached_map = scratch;
4366 Register counter = scratch1;
4367 Register length = scratch2;
4368
4369 // Polymorphic, we have to loop from 0 to N - 1
4370 __ movp(counter, Immediate(0));
4371 __ movp(length, FieldOperand(feedback, FixedArray::kLengthOffset));
4372 __ SmiToInteger32(length, length);
4373
4374 __ bind(&next_loop);
4375 __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
4376 FixedArray::kHeaderSize));
4377 __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
4378 __ j(not_equal, &prepare_next);
4379 __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
4380 FixedArray::kHeaderSize + kPointerSize));
4381 __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
4382 __ j(not_equal, &transition_call);
4383 __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
4384 FixedArray::kHeaderSize + 2 * kPointerSize));
4385 __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
4386 __ jmp(feedback);
4387
4388 __ bind(&transition_call);
4389 DCHECK(receiver_map.is(VectorStoreTransitionDescriptor::MapRegister()));
4390 __ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
4391 // The weak cell may have been cleared.
4392 __ JumpIfSmi(receiver_map, miss);
4393 // Get the handler in value.
4394 __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
4395 FixedArray::kHeaderSize + 2 * kPointerSize));
4396 __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
4397 __ jmp(feedback);
4398
4399 __ bind(&prepare_next);
4400 __ addl(counter, Immediate(3));
4401 __ cmpl(counter, length);
4402 __ j(less, &next_loop);
4403
4404 // We exhausted our array of map handler pairs.
4405 __ jmp(miss);
4406 }
4407
4408
GenerateImpl(MacroAssembler * masm,bool in_frame)4409 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
4410 Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx
4411 Register key = VectorStoreICDescriptor::NameRegister(); // rcx
4412 Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx
4413 Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi
4414 DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax
4415 Register feedback = r8;
4416 Register integer_slot = r9;
4417 Register receiver_map = r11;
4418 DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
4419
4420 __ SmiToInteger32(integer_slot, slot);
4421 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4422 FixedArray::kHeaderSize));
4423
4424 // Try to quickly handle the monomorphic case without knowing for sure
4425 // if we have a weak cell in feedback. We do know it's safe to look
4426 // at WeakCell::kValueOffset.
4427 Label try_array, load_smi_map, compare_map;
4428 Label not_array, miss;
4429 HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
4430 integer_slot, &compare_map, &load_smi_map, &try_array);
4431
4432 // Is it a fixed array?
4433 __ bind(&try_array);
4434 __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
4435 __ j(not_equal, ¬_array);
4436 HandlePolymorphicKeyedStoreCase(masm, receiver_map, feedback, integer_slot,
4437 r15, r14, &miss);
4438
4439 __ bind(¬_array);
4440 Label try_poly_name;
4441 __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
4442 __ j(not_equal, &try_poly_name);
4443
4444 Handle<Code> megamorphic_stub =
4445 KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
4446 __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
4447
4448 __ bind(&try_poly_name);
4449 // We might have a name in feedback, and a fixed array in the next slot.
4450 __ cmpp(key, feedback);
4451 __ j(not_equal, &miss);
4452 // If the name comparison succeeded, we know we have a fixed array with
4453 // at least one map/handler pair.
4454 __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
4455 FixedArray::kHeaderSize + kPointerSize));
4456 HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, false,
4457 &miss);
4458
4459 __ bind(&miss);
4460 KeyedStoreIC::GenerateMiss(masm);
4461
4462 __ bind(&load_smi_map);
4463 __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
4464 __ jmp(&compare_map);
4465 }
4466
4467
Generate(MacroAssembler * masm)4468 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4469 __ EmitLoadTypeFeedbackVector(rbx);
4470 CallICStub stub(isolate(), state());
4471 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
4472 }
4473
4474
MaybeCallEntryHook(MacroAssembler * masm)4475 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4476 if (masm->isolate()->function_entry_hook() != NULL) {
4477 ProfileEntryHookStub stub(masm->isolate());
4478 masm->CallStub(&stub);
4479 }
4480 }
4481
4482
Generate(MacroAssembler * masm)4483 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4484 // This stub can be called from essentially anywhere, so it needs to save
4485 // all volatile and callee-save registers.
4486 const size_t kNumSavedRegisters = 2;
4487 __ pushq(arg_reg_1);
4488 __ pushq(arg_reg_2);
4489
4490 // Calculate the original stack pointer and store it in the second arg.
4491 __ leap(arg_reg_2,
4492 Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
4493
4494 // Calculate the function address to the first arg.
4495 __ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
4496 __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
4497
4498 // Save the remainder of the volatile registers.
4499 masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4500
4501 // Call the entry hook function.
4502 __ Move(rax, FUNCTION_ADDR(isolate()->function_entry_hook()),
4503 Assembler::RelocInfoNone());
4504
4505 AllowExternalCallThatCantCauseGC scope(masm);
4506
4507 const int kArgumentCount = 2;
4508 __ PrepareCallCFunction(kArgumentCount);
4509 __ CallCFunction(rax, kArgumentCount);
4510
4511 // Restore volatile regs.
4512 masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
4513 __ popq(arg_reg_2);
4514 __ popq(arg_reg_1);
4515
4516 __ Ret();
4517 }
4518
4519
4520 template<class T>
CreateArrayDispatch(MacroAssembler * masm,AllocationSiteOverrideMode mode)4521 static void CreateArrayDispatch(MacroAssembler* masm,
4522 AllocationSiteOverrideMode mode) {
4523 if (mode == DISABLE_ALLOCATION_SITES) {
4524 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4525 __ TailCallStub(&stub);
4526 } else if (mode == DONT_OVERRIDE) {
4527 int last_index = GetSequenceIndexFromFastElementsKind(
4528 TERMINAL_FAST_ELEMENTS_KIND);
4529 for (int i = 0; i <= last_index; ++i) {
4530 Label next;
4531 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4532 __ cmpl(rdx, Immediate(kind));
4533 __ j(not_equal, &next);
4534 T stub(masm->isolate(), kind);
4535 __ TailCallStub(&stub);
4536 __ bind(&next);
4537 }
4538
4539 // If we reached this point there is a problem.
4540 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4541 } else {
4542 UNREACHABLE();
4543 }
4544 }
4545
4546
CreateArrayDispatchOneArgument(MacroAssembler * masm,AllocationSiteOverrideMode mode)4547 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4548 AllocationSiteOverrideMode mode) {
4549 // rbx - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4550 // rdx - kind (if mode != DISABLE_ALLOCATION_SITES)
4551 // rax - number of arguments
4552 // rdi - constructor?
4553 // rsp[0] - return address
4554 // rsp[8] - last argument
4555 Handle<Object> undefined_sentinel(
4556 masm->isolate()->heap()->undefined_value(),
4557 masm->isolate());
4558
4559 Label normal_sequence;
4560 if (mode == DONT_OVERRIDE) {
4561 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
4562 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
4563 STATIC_ASSERT(FAST_ELEMENTS == 2);
4564 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
4565 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
4566 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4567
4568 // is the low bit set? If so, we are holey and that is good.
4569 __ testb(rdx, Immediate(1));
4570 __ j(not_zero, &normal_sequence);
4571 }
4572
4573 // look at the first argument
4574 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4575 __ movp(rcx, args.GetArgumentOperand(0));
4576 __ testp(rcx, rcx);
4577 __ j(zero, &normal_sequence);
4578
4579 if (mode == DISABLE_ALLOCATION_SITES) {
4580 ElementsKind initial = GetInitialFastElementsKind();
4581 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4582
4583 ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
4584 holey_initial,
4585 DISABLE_ALLOCATION_SITES);
4586 __ TailCallStub(&stub_holey);
4587
4588 __ bind(&normal_sequence);
4589 ArraySingleArgumentConstructorStub stub(masm->isolate(),
4590 initial,
4591 DISABLE_ALLOCATION_SITES);
4592 __ TailCallStub(&stub);
4593 } else if (mode == DONT_OVERRIDE) {
4594 // We are going to create a holey array, but our kind is non-holey.
4595 // Fix kind and retry (only if we have an allocation site in the slot).
4596 __ incl(rdx);
4597
4598 if (FLAG_debug_code) {
4599 Handle<Map> allocation_site_map =
4600 masm->isolate()->factory()->allocation_site_map();
4601 __ Cmp(FieldOperand(rbx, 0), allocation_site_map);
4602 __ Assert(equal, kExpectedAllocationSite);
4603 }
4604
4605 // Save the resulting elements kind in type info. We can't just store r3
4606 // in the AllocationSite::transition_info field because elements kind is
4607 // restricted to a portion of the field...upper bits need to be left alone.
4608 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4609 __ SmiAddConstant(FieldOperand(rbx, AllocationSite::kTransitionInfoOffset),
4610 Smi::FromInt(kFastElementsKindPackedToHoley));
4611
4612 __ bind(&normal_sequence);
4613 int last_index = GetSequenceIndexFromFastElementsKind(
4614 TERMINAL_FAST_ELEMENTS_KIND);
4615 for (int i = 0; i <= last_index; ++i) {
4616 Label next;
4617 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4618 __ cmpl(rdx, Immediate(kind));
4619 __ j(not_equal, &next);
4620 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4621 __ TailCallStub(&stub);
4622 __ bind(&next);
4623 }
4624
4625 // If we reached this point there is a problem.
4626 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4627 } else {
4628 UNREACHABLE();
4629 }
4630 }
4631
4632
4633 template<class T>
ArrayConstructorStubAheadOfTimeHelper(Isolate * isolate)4634 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4635 int to_index = GetSequenceIndexFromFastElementsKind(
4636 TERMINAL_FAST_ELEMENTS_KIND);
4637 for (int i = 0; i <= to_index; ++i) {
4638 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4639 T stub(isolate, kind);
4640 stub.GetCode();
4641 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4642 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4643 stub1.GetCode();
4644 }
4645 }
4646 }
4647
4648
GenerateStubsAheadOfTime(Isolate * isolate)4649 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4650 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4651 isolate);
4652 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4653 isolate);
4654 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4655 isolate);
4656 }
4657
4658
GenerateStubsAheadOfTime(Isolate * isolate)4659 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4660 Isolate* isolate) {
4661 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
4662 for (int i = 0; i < 2; i++) {
4663 // For internal arrays we only need a few things
4664 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4665 stubh1.GetCode();
4666 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4667 stubh2.GetCode();
4668 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4669 stubh3.GetCode();
4670 }
4671 }
4672
4673
GenerateDispatchToArrayStub(MacroAssembler * masm,AllocationSiteOverrideMode mode)4674 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4675 MacroAssembler* masm,
4676 AllocationSiteOverrideMode mode) {
4677 if (argument_count() == ANY) {
4678 Label not_zero_case, not_one_case;
4679 __ testp(rax, rax);
4680 __ j(not_zero, ¬_zero_case);
4681 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4682
4683 __ bind(¬_zero_case);
4684 __ cmpl(rax, Immediate(1));
4685 __ j(greater, ¬_one_case);
4686 CreateArrayDispatchOneArgument(masm, mode);
4687
4688 __ bind(¬_one_case);
4689 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4690 } else if (argument_count() == NONE) {
4691 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4692 } else if (argument_count() == ONE) {
4693 CreateArrayDispatchOneArgument(masm, mode);
4694 } else if (argument_count() == MORE_THAN_ONE) {
4695 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4696 } else {
4697 UNREACHABLE();
4698 }
4699 }
4700
4701
Generate(MacroAssembler * masm)4702 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4703 // ----------- S t a t e -------------
4704 // -- rax : argc
4705 // -- rbx : AllocationSite or undefined
4706 // -- rdi : constructor
4707 // -- rdx : new target
4708 // -- rsp[0] : return address
4709 // -- rsp[8] : last argument
4710 // -----------------------------------
4711 if (FLAG_debug_code) {
4712 // The array construct code is only set for the global and natives
4713 // builtin Array functions which always have maps.
4714
4715 // Initial map for the builtin Array function should be a map.
4716 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4717 // Will both indicate a NULL and a Smi.
4718 STATIC_ASSERT(kSmiTag == 0);
4719 Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
4720 __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
4721 __ CmpObjectType(rcx, MAP_TYPE, rcx);
4722 __ Check(equal, kUnexpectedInitialMapForArrayFunction);
4723
4724 // We should either have undefined in rbx or a valid AllocationSite
4725 __ AssertUndefinedOrAllocationSite(rbx);
4726 }
4727
4728 // Enter the context of the Array function.
4729 __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
4730
4731 Label subclassing;
4732 __ cmpp(rdi, rdx);
4733 __ j(not_equal, &subclassing);
4734
4735 Label no_info;
4736 // If the feedback vector is the undefined value call an array constructor
4737 // that doesn't use AllocationSites.
4738 __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
4739 __ j(equal, &no_info);
4740
4741 // Only look at the lower 16 bits of the transition info.
4742 __ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
4743 __ SmiToInteger32(rdx, rdx);
4744 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4745 __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
4746 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4747
4748 __ bind(&no_info);
4749 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4750
4751 // Subclassing
4752 __ bind(&subclassing);
4753 switch (argument_count()) {
4754 case ANY:
4755 case MORE_THAN_ONE: {
4756 StackArgumentsAccessor args(rsp, rax);
4757 __ movp(args.GetReceiverOperand(), rdi);
4758 __ addp(rax, Immediate(3));
4759 break;
4760 }
4761 case NONE: {
4762 StackArgumentsAccessor args(rsp, 0);
4763 __ movp(args.GetReceiverOperand(), rdi);
4764 __ Set(rax, 3);
4765 break;
4766 }
4767 case ONE: {
4768 StackArgumentsAccessor args(rsp, 1);
4769 __ movp(args.GetReceiverOperand(), rdi);
4770 __ Set(rax, 4);
4771 break;
4772 }
4773 }
4774 __ PopReturnAddressTo(rcx);
4775 __ Push(rdx);
4776 __ Push(rbx);
4777 __ PushReturnAddressFrom(rcx);
4778 __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate()));
4779 }
4780
4781
GenerateCase(MacroAssembler * masm,ElementsKind kind)4782 void InternalArrayConstructorStub::GenerateCase(
4783 MacroAssembler* masm, ElementsKind kind) {
4784 Label not_zero_case, not_one_case;
4785 Label normal_sequence;
4786
4787 __ testp(rax, rax);
4788 __ j(not_zero, ¬_zero_case);
4789 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4790 __ TailCallStub(&stub0);
4791
4792 __ bind(¬_zero_case);
4793 __ cmpl(rax, Immediate(1));
4794 __ j(greater, ¬_one_case);
4795
4796 if (IsFastPackedElementsKind(kind)) {
4797 // We might need to create a holey array
4798 // look at the first argument
4799 StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
4800 __ movp(rcx, args.GetArgumentOperand(0));
4801 __ testp(rcx, rcx);
4802 __ j(zero, &normal_sequence);
4803
4804 InternalArraySingleArgumentConstructorStub
4805 stub1_holey(isolate(), GetHoleyElementsKind(kind));
4806 __ TailCallStub(&stub1_holey);
4807 }
4808
4809 __ bind(&normal_sequence);
4810 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4811 __ TailCallStub(&stub1);
4812
4813 __ bind(¬_one_case);
4814 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4815 __ TailCallStub(&stubN);
4816 }
4817
4818
Generate(MacroAssembler * masm)4819 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4820 // ----------- S t a t e -------------
4821 // -- rax : argc
4822 // -- rdi : constructor
4823 // -- rsp[0] : return address
4824 // -- rsp[8] : last argument
4825 // -----------------------------------
4826
4827 if (FLAG_debug_code) {
4828 // The array construct code is only set for the global and natives
4829 // builtin Array functions which always have maps.
4830
4831 // Initial map for the builtin Array function should be a map.
4832 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4833 // Will both indicate a NULL and a Smi.
4834 STATIC_ASSERT(kSmiTag == 0);
4835 Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
4836 __ Check(not_smi, kUnexpectedInitialMapForArrayFunction);
4837 __ CmpObjectType(rcx, MAP_TYPE, rcx);
4838 __ Check(equal, kUnexpectedInitialMapForArrayFunction);
4839 }
4840
4841 // Figure out the right elements kind
4842 __ movp(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
4843
4844 // Load the map's "bit field 2" into |result|. We only need the first byte,
4845 // but the following masking takes care of that anyway.
4846 __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
4847 // Retrieve elements_kind from bit field 2.
4848 __ DecodeField<Map::ElementsKindBits>(rcx);
4849
4850 if (FLAG_debug_code) {
4851 Label done;
4852 __ cmpl(rcx, Immediate(FAST_ELEMENTS));
4853 __ j(equal, &done);
4854 __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
4855 __ Assert(equal,
4856 kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4857 __ bind(&done);
4858 }
4859
4860 Label fast_elements_case;
4861 __ cmpl(rcx, Immediate(FAST_ELEMENTS));
4862 __ j(equal, &fast_elements_case);
4863 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4864
4865 __ bind(&fast_elements_case);
4866 GenerateCase(masm, FAST_ELEMENTS);
4867 }
4868
4869
Generate(MacroAssembler * masm)4870 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
4871 Register context_reg = rsi;
4872 Register slot_reg = rbx;
4873 Register result_reg = rax;
4874 Label slow_case;
4875
4876 // Go up context chain to the script context.
4877 for (int i = 0; i < depth(); ++i) {
4878 __ movp(rdi, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
4879 context_reg = rdi;
4880 }
4881
4882 // Load the PropertyCell value at the specified slot.
4883 __ movp(result_reg, ContextOperand(context_reg, slot_reg));
4884 __ movp(result_reg, FieldOperand(result_reg, PropertyCell::kValueOffset));
4885
4886 // Check that value is not the_hole.
4887 __ CompareRoot(result_reg, Heap::kTheHoleValueRootIndex);
4888 __ j(equal, &slow_case, Label::kNear);
4889 __ Ret();
4890
4891 // Fallback to the runtime.
4892 __ bind(&slow_case);
4893 __ Integer32ToSmi(slot_reg, slot_reg);
4894 __ PopReturnAddressTo(kScratchRegister);
4895 __ Push(slot_reg);
4896 __ Push(kScratchRegister);
4897 __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
4898 }
4899
4900
Generate(MacroAssembler * masm)4901 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) {
4902 Register context_reg = rsi;
4903 Register slot_reg = rbx;
4904 Register value_reg = rax;
4905 Register cell_reg = r8;
4906 Register cell_details_reg = rdx;
4907 Register cell_value_reg = r9;
4908 Label fast_heapobject_case, fast_smi_case, slow_case;
4909
4910 if (FLAG_debug_code) {
4911 __ CompareRoot(value_reg, Heap::kTheHoleValueRootIndex);
4912 __ Check(not_equal, kUnexpectedValue);
4913 }
4914
4915 // Go up context chain to the script context.
4916 for (int i = 0; i < depth(); ++i) {
4917 __ movp(rdi, ContextOperand(context_reg, Context::PREVIOUS_INDEX));
4918 context_reg = rdi;
4919 }
4920
4921 // Load the PropertyCell at the specified slot.
4922 __ movp(cell_reg, ContextOperand(context_reg, slot_reg));
4923
4924 // Load PropertyDetails for the cell (actually only the cell_type, kind and
4925 // READ_ONLY bit of attributes).
4926 __ SmiToInteger32(cell_details_reg,
4927 FieldOperand(cell_reg, PropertyCell::kDetailsOffset));
4928 __ andl(cell_details_reg,
4929 Immediate(PropertyDetails::PropertyCellTypeField::kMask |
4930 PropertyDetails::KindField::kMask |
4931 PropertyDetails::kAttributesReadOnlyMask));
4932
4933 // Check if PropertyCell holds mutable data.
4934 Label not_mutable_data;
4935 __ cmpl(cell_details_reg,
4936 Immediate(PropertyDetails::PropertyCellTypeField::encode(
4937 PropertyCellType::kMutable) |
4938 PropertyDetails::KindField::encode(kData)));
4939 __ j(not_equal, ¬_mutable_data);
4940 __ JumpIfSmi(value_reg, &fast_smi_case);
4941 __ bind(&fast_heapobject_case);
4942 __ movp(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
4943 __ RecordWriteField(cell_reg, PropertyCell::kValueOffset, value_reg,
4944 cell_value_reg, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
4945 OMIT_SMI_CHECK);
4946 // RecordWriteField clobbers the value register, so we need to reload.
4947 __ movp(value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
4948 __ Ret();
4949 __ bind(¬_mutable_data);
4950
4951 // Check if PropertyCell value matches the new value (relevant for Constant,
4952 // ConstantType and Undefined cells).
4953 Label not_same_value;
4954 __ movp(cell_value_reg, FieldOperand(cell_reg, PropertyCell::kValueOffset));
4955 __ cmpp(cell_value_reg, value_reg);
4956 __ j(not_equal, ¬_same_value,
4957 FLAG_debug_code ? Label::kFar : Label::kNear);
4958 // Make sure the PropertyCell is not marked READ_ONLY.
4959 __ testl(cell_details_reg,
4960 Immediate(PropertyDetails::kAttributesReadOnlyMask));
4961 __ j(not_zero, &slow_case);
4962 if (FLAG_debug_code) {
4963 Label done;
4964 // This can only be true for Constant, ConstantType and Undefined cells,
4965 // because we never store the_hole via this stub.
4966 __ cmpl(cell_details_reg,
4967 Immediate(PropertyDetails::PropertyCellTypeField::encode(
4968 PropertyCellType::kConstant) |
4969 PropertyDetails::KindField::encode(kData)));
4970 __ j(equal, &done);
4971 __ cmpl(cell_details_reg,
4972 Immediate(PropertyDetails::PropertyCellTypeField::encode(
4973 PropertyCellType::kConstantType) |
4974 PropertyDetails::KindField::encode(kData)));
4975 __ j(equal, &done);
4976 __ cmpl(cell_details_reg,
4977 Immediate(PropertyDetails::PropertyCellTypeField::encode(
4978 PropertyCellType::kUndefined) |
4979 PropertyDetails::KindField::encode(kData)));
4980 __ Check(equal, kUnexpectedValue);
4981 __ bind(&done);
4982 }
4983 __ Ret();
4984 __ bind(¬_same_value);
4985
4986 // Check if PropertyCell contains data with constant type (and is not
4987 // READ_ONLY).
4988 __ cmpl(cell_details_reg,
4989 Immediate(PropertyDetails::PropertyCellTypeField::encode(
4990 PropertyCellType::kConstantType) |
4991 PropertyDetails::KindField::encode(kData)));
4992 __ j(not_equal, &slow_case, Label::kNear);
4993
4994 // Now either both old and new values must be SMIs or both must be heap
4995 // objects with same map.
4996 Label value_is_heap_object;
4997 __ JumpIfNotSmi(value_reg, &value_is_heap_object, Label::kNear);
4998 __ JumpIfNotSmi(cell_value_reg, &slow_case, Label::kNear);
4999 // Old and new values are SMIs, no need for a write barrier here.
5000 __ bind(&fast_smi_case);
5001 __ movp(FieldOperand(cell_reg, PropertyCell::kValueOffset), value_reg);
5002 __ Ret();
5003 __ bind(&value_is_heap_object);
5004 __ JumpIfSmi(cell_value_reg, &slow_case, Label::kNear);
5005 Register cell_value_map_reg = cell_value_reg;
5006 __ movp(cell_value_map_reg,
5007 FieldOperand(cell_value_reg, HeapObject::kMapOffset));
5008 __ cmpp(cell_value_map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
5009 __ j(equal, &fast_heapobject_case);
5010
5011 // Fallback to the runtime.
5012 __ bind(&slow_case);
5013 __ Integer32ToSmi(slot_reg, slot_reg);
5014 __ PopReturnAddressTo(kScratchRegister);
5015 __ Push(slot_reg);
5016 __ Push(value_reg);
5017 __ Push(kScratchRegister);
5018 __ TailCallRuntime(is_strict(language_mode())
5019 ? Runtime::kStoreGlobalViaContext_Strict
5020 : Runtime::kStoreGlobalViaContext_Sloppy);
5021 }
5022
5023
Offset(ExternalReference ref0,ExternalReference ref1)5024 static int Offset(ExternalReference ref0, ExternalReference ref1) {
5025 int64_t offset = (ref0.address() - ref1.address());
5026 // Check that fits into int.
5027 DCHECK(static_cast<int>(offset) == offset);
5028 return static_cast<int>(offset);
5029 }
5030
5031
5032 // Prepares stack to put arguments (aligns and so on). WIN64 calling
5033 // convention requires to put the pointer to the return value slot into
5034 // rcx (rcx must be preserverd until CallApiFunctionAndReturn). Saves
5035 // context (rsi). Clobbers rax. Allocates arg_stack_space * kPointerSize
5036 // inside the exit frame (not GCed) accessible via StackSpaceOperand.
PrepareCallApiFunction(MacroAssembler * masm,int arg_stack_space)5037 static void PrepareCallApiFunction(MacroAssembler* masm, int arg_stack_space) {
5038 __ EnterApiExitFrame(arg_stack_space);
5039 }
5040
5041
5042 // Calls an API function. Allocates HandleScope, extracts returned value
5043 // from handle and propagates exceptions. Clobbers r14, r15, rbx and
5044 // caller-save registers. Restores context. On return removes
5045 // stack_space * kPointerSize (GCed).
CallApiFunctionAndReturn(MacroAssembler * masm,Register function_address,ExternalReference thunk_ref,Register thunk_last_arg,int stack_space,Operand * stack_space_operand,Operand return_value_operand,Operand * context_restore_operand)5046 static void CallApiFunctionAndReturn(MacroAssembler* masm,
5047 Register function_address,
5048 ExternalReference thunk_ref,
5049 Register thunk_last_arg, int stack_space,
5050 Operand* stack_space_operand,
5051 Operand return_value_operand,
5052 Operand* context_restore_operand) {
5053 Label prologue;
5054 Label promote_scheduled_exception;
5055 Label delete_allocated_handles;
5056 Label leave_exit_frame;
5057 Label write_back;
5058
5059 Isolate* isolate = masm->isolate();
5060 Factory* factory = isolate->factory();
5061 ExternalReference next_address =
5062 ExternalReference::handle_scope_next_address(isolate);
5063 const int kNextOffset = 0;
5064 const int kLimitOffset = Offset(
5065 ExternalReference::handle_scope_limit_address(isolate), next_address);
5066 const int kLevelOffset = Offset(
5067 ExternalReference::handle_scope_level_address(isolate), next_address);
5068 ExternalReference scheduled_exception_address =
5069 ExternalReference::scheduled_exception_address(isolate);
5070
5071 DCHECK(rdx.is(function_address) || r8.is(function_address));
5072 // Allocate HandleScope in callee-save registers.
5073 Register prev_next_address_reg = r14;
5074 Register prev_limit_reg = rbx;
5075 Register base_reg = r15;
5076 __ Move(base_reg, next_address);
5077 __ movp(prev_next_address_reg, Operand(base_reg, kNextOffset));
5078 __ movp(prev_limit_reg, Operand(base_reg, kLimitOffset));
5079 __ addl(Operand(base_reg, kLevelOffset), Immediate(1));
5080
5081 if (FLAG_log_timer_events) {
5082 FrameScope frame(masm, StackFrame::MANUAL);
5083 __ PushSafepointRegisters();
5084 __ PrepareCallCFunction(1);
5085 __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
5086 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
5087 1);
5088 __ PopSafepointRegisters();
5089 }
5090
5091 Label profiler_disabled;
5092 Label end_profiler_check;
5093 __ Move(rax, ExternalReference::is_profiling_address(isolate));
5094 __ cmpb(Operand(rax, 0), Immediate(0));
5095 __ j(zero, &profiler_disabled);
5096
5097 // Third parameter is the address of the actual getter function.
5098 __ Move(thunk_last_arg, function_address);
5099 __ Move(rax, thunk_ref);
5100 __ jmp(&end_profiler_check);
5101
5102 __ bind(&profiler_disabled);
5103 // Call the api function!
5104 __ Move(rax, function_address);
5105
5106 __ bind(&end_profiler_check);
5107
5108 // Call the api function!
5109 __ call(rax);
5110
5111 if (FLAG_log_timer_events) {
5112 FrameScope frame(masm, StackFrame::MANUAL);
5113 __ PushSafepointRegisters();
5114 __ PrepareCallCFunction(1);
5115 __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
5116 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
5117 1);
5118 __ PopSafepointRegisters();
5119 }
5120
5121 // Load the value from ReturnValue
5122 __ movp(rax, return_value_operand);
5123 __ bind(&prologue);
5124
5125 // No more valid handles (the result handle was the last one). Restore
5126 // previous handle scope.
5127 __ subl(Operand(base_reg, kLevelOffset), Immediate(1));
5128 __ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
5129 __ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
5130 __ j(not_equal, &delete_allocated_handles);
5131
5132 // Leave the API exit frame.
5133 __ bind(&leave_exit_frame);
5134 bool restore_context = context_restore_operand != NULL;
5135 if (restore_context) {
5136 __ movp(rsi, *context_restore_operand);
5137 }
5138 if (stack_space_operand != nullptr) {
5139 __ movp(rbx, *stack_space_operand);
5140 }
5141 __ LeaveApiExitFrame(!restore_context);
5142
5143 // Check if the function scheduled an exception.
5144 __ Move(rdi, scheduled_exception_address);
5145 __ Cmp(Operand(rdi, 0), factory->the_hole_value());
5146 __ j(not_equal, &promote_scheduled_exception);
5147
5148 #if DEBUG
5149 // Check if the function returned a valid JavaScript value.
5150 Label ok;
5151 Register return_value = rax;
5152 Register map = rcx;
5153
5154 __ JumpIfSmi(return_value, &ok, Label::kNear);
5155 __ movp(map, FieldOperand(return_value, HeapObject::kMapOffset));
5156
5157 __ CmpInstanceType(map, LAST_NAME_TYPE);
5158 __ j(below_equal, &ok, Label::kNear);
5159
5160 __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
5161 __ j(above_equal, &ok, Label::kNear);
5162
5163 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
5164 __ j(equal, &ok, Label::kNear);
5165
5166 __ CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
5167 __ j(equal, &ok, Label::kNear);
5168
5169 __ CompareRoot(return_value, Heap::kTrueValueRootIndex);
5170 __ j(equal, &ok, Label::kNear);
5171
5172 __ CompareRoot(return_value, Heap::kFalseValueRootIndex);
5173 __ j(equal, &ok, Label::kNear);
5174
5175 __ CompareRoot(return_value, Heap::kNullValueRootIndex);
5176 __ j(equal, &ok, Label::kNear);
5177
5178 __ Abort(kAPICallReturnedInvalidObject);
5179
5180 __ bind(&ok);
5181 #endif
5182
5183 if (stack_space_operand != nullptr) {
5184 DCHECK_EQ(stack_space, 0);
5185 __ PopReturnAddressTo(rcx);
5186 __ addq(rsp, rbx);
5187 __ jmp(rcx);
5188 } else {
5189 __ ret(stack_space * kPointerSize);
5190 }
5191
5192 // Re-throw by promoting a scheduled exception.
5193 __ bind(&promote_scheduled_exception);
5194 __ TailCallRuntime(Runtime::kPromoteScheduledException);
5195
5196 // HandleScope limit has changed. Delete allocated extensions.
5197 __ bind(&delete_allocated_handles);
5198 __ movp(Operand(base_reg, kLimitOffset), prev_limit_reg);
5199 __ movp(prev_limit_reg, rax);
5200 __ LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate));
5201 __ LoadAddress(rax,
5202 ExternalReference::delete_handle_scope_extensions(isolate));
5203 __ call(rax);
5204 __ movp(rax, prev_limit_reg);
5205 __ jmp(&leave_exit_frame);
5206 }
5207
5208
CallApiFunctionStubHelper(MacroAssembler * masm,const ParameterCount & argc,bool return_first_arg,bool call_data_undefined)5209 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5210 const ParameterCount& argc,
5211 bool return_first_arg,
5212 bool call_data_undefined) {
5213 // ----------- S t a t e -------------
5214 // -- rdi : callee
5215 // -- rbx : call_data
5216 // -- rcx : holder
5217 // -- rdx : api_function_address
5218 // -- rsi : context
5219 // -- rax : number of arguments if argc is a register
5220 // -- rsp[0] : return address
5221 // -- rsp[8] : last argument
5222 // -- ...
5223 // -- rsp[argc * 8] : first argument
5224 // -- rsp[(argc + 1) * 8] : receiver
5225 // -----------------------------------
5226
5227 Register callee = rdi;
5228 Register call_data = rbx;
5229 Register holder = rcx;
5230 Register api_function_address = rdx;
5231 Register context = rsi;
5232 Register return_address = r8;
5233
5234 typedef FunctionCallbackArguments FCA;
5235
5236 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5237 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5238 STATIC_ASSERT(FCA::kDataIndex == 4);
5239 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5240 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5241 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5242 STATIC_ASSERT(FCA::kHolderIndex == 0);
5243 STATIC_ASSERT(FCA::kArgsLength == 7);
5244
5245 DCHECK(argc.is_immediate() || rax.is(argc.reg()));
5246
5247 __ PopReturnAddressTo(return_address);
5248
5249 // context save
5250 __ Push(context);
5251
5252 // callee
5253 __ Push(callee);
5254
5255 // call data
5256 __ Push(call_data);
5257 Register scratch = call_data;
5258 if (!call_data_undefined) {
5259 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5260 }
5261 // return value
5262 __ Push(scratch);
5263 // return value default
5264 __ Push(scratch);
5265 // isolate
5266 __ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
5267 __ Push(scratch);
5268 // holder
5269 __ Push(holder);
5270
5271 __ movp(scratch, rsp);
5272 // Push return address back on stack.
5273 __ PushReturnAddressFrom(return_address);
5274
5275 // load context from callee
5276 __ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
5277
5278 // Allocate the v8::Arguments structure in the arguments' space since
5279 // it's not controlled by GC.
5280 const int kApiStackSpace = 4;
5281
5282 PrepareCallApiFunction(masm, kApiStackSpace);
5283
5284 // FunctionCallbackInfo::implicit_args_.
5285 __ movp(StackSpaceOperand(0), scratch);
5286 if (argc.is_immediate()) {
5287 __ addp(scratch, Immediate((argc.immediate() + FCA::kArgsLength - 1) *
5288 kPointerSize));
5289 // FunctionCallbackInfo::values_.
5290 __ movp(StackSpaceOperand(1), scratch);
5291 // FunctionCallbackInfo::length_.
5292 __ Set(StackSpaceOperand(2), argc.immediate());
5293 // FunctionCallbackInfo::is_construct_call_.
5294 __ Set(StackSpaceOperand(3), 0);
5295 } else {
5296 __ leap(scratch, Operand(scratch, argc.reg(), times_pointer_size,
5297 (FCA::kArgsLength - 1) * kPointerSize));
5298 // FunctionCallbackInfo::values_.
5299 __ movp(StackSpaceOperand(1), scratch);
5300 // FunctionCallbackInfo::length_.
5301 __ movp(StackSpaceOperand(2), argc.reg());
5302 // FunctionCallbackInfo::is_construct_call_.
5303 __ leap(argc.reg(), Operand(argc.reg(), times_pointer_size,
5304 (FCA::kArgsLength + 1) * kPointerSize));
5305 __ movp(StackSpaceOperand(3), argc.reg());
5306 }
5307
5308 #if defined(__MINGW64__) || defined(_WIN64)
5309 Register arguments_arg = rcx;
5310 Register callback_arg = rdx;
5311 #else
5312 Register arguments_arg = rdi;
5313 Register callback_arg = rsi;
5314 #endif
5315
5316 // It's okay if api_function_address == callback_arg
5317 // but not arguments_arg
5318 DCHECK(!api_function_address.is(arguments_arg));
5319
5320 // v8::InvocationCallback's argument.
5321 __ leap(arguments_arg, StackSpaceOperand(0));
5322
5323 ExternalReference thunk_ref =
5324 ExternalReference::invoke_function_callback(masm->isolate());
5325
5326 // Accessor for FunctionCallbackInfo and first js arg.
5327 StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
5328 ARGUMENTS_DONT_CONTAIN_RECEIVER);
5329 Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
5330 FCA::kArgsLength - FCA::kContextSaveIndex);
5331 Operand is_construct_call_operand = StackSpaceOperand(3);
5332 Operand return_value_operand = args_from_rbp.GetArgumentOperand(
5333 return_first_arg ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
5334 int stack_space = 0;
5335 Operand* stack_space_operand = &is_construct_call_operand;
5336 if (argc.is_immediate()) {
5337 stack_space = argc.immediate() + FCA::kArgsLength + 1;
5338 stack_space_operand = nullptr;
5339 }
5340 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, callback_arg,
5341 stack_space, stack_space_operand,
5342 return_value_operand, &context_restore_operand);
5343 }
5344
5345
Generate(MacroAssembler * masm)5346 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5347 bool call_data_undefined = this->call_data_undefined();
5348 CallApiFunctionStubHelper(masm, ParameterCount(rax), false,
5349 call_data_undefined);
5350 }
5351
5352
Generate(MacroAssembler * masm)5353 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5354 bool is_store = this->is_store();
5355 int argc = this->argc();
5356 bool call_data_undefined = this->call_data_undefined();
5357 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5358 call_data_undefined);
5359 }
5360
5361
Generate(MacroAssembler * masm)5362 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5363 // ----------- S t a t e -------------
5364 // -- rsp[0] : return address
5365 // -- rsp[8] : name
5366 // -- rsp[16 - kArgsLength*8] : PropertyCallbackArguments object
5367 // -- ...
5368 // -- r8 : api_function_address
5369 // -----------------------------------
5370
5371 #if defined(__MINGW64__) || defined(_WIN64)
5372 Register getter_arg = r8;
5373 Register accessor_info_arg = rdx;
5374 Register name_arg = rcx;
5375 #else
5376 Register getter_arg = rdx;
5377 Register accessor_info_arg = rsi;
5378 Register name_arg = rdi;
5379 #endif
5380 Register api_function_address = ApiGetterDescriptor::function_address();
5381 DCHECK(api_function_address.is(r8));
5382 Register scratch = rax;
5383
5384 // v8::Arguments::values_ and handler for name.
5385 const int kStackSpace = PropertyCallbackArguments::kArgsLength + 1;
5386
5387 // Allocate v8::AccessorInfo in non-GCed stack space.
5388 const int kArgStackSpace = 1;
5389
5390 __ leap(name_arg, Operand(rsp, kPCOnStackSize));
5391
5392 PrepareCallApiFunction(masm, kArgStackSpace);
5393 __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
5394
5395 // v8::PropertyAccessorInfo::args_.
5396 __ movp(StackSpaceOperand(0), scratch);
5397
5398 // The context register (rsi) has been saved in PrepareCallApiFunction and
5399 // could be used to pass arguments.
5400 __ leap(accessor_info_arg, StackSpaceOperand(0));
5401
5402 ExternalReference thunk_ref =
5403 ExternalReference::invoke_accessor_getter_callback(isolate());
5404
5405 // It's okay if api_function_address == getter_arg
5406 // but not accessor_info_arg or name_arg
5407 DCHECK(!api_function_address.is(accessor_info_arg) &&
5408 !api_function_address.is(name_arg));
5409
5410 // The name handler is counted as an argument.
5411 StackArgumentsAccessor args(rbp, PropertyCallbackArguments::kArgsLength);
5412 Operand return_value_operand = args.GetArgumentOperand(
5413 PropertyCallbackArguments::kArgsLength - 1 -
5414 PropertyCallbackArguments::kReturnValueOffset);
5415 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, getter_arg,
5416 kStackSpace, nullptr, return_value_operand, NULL);
5417 }
5418
5419
5420 #undef __
5421
5422 } // namespace internal
5423 } // namespace v8
5424
5425 #endif // V8_TARGET_ARCH_X64
5426