1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_IA32
6
7 #include "src/crankshaft/ia32/lithium-codegen-ia32.h"
8
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/crankshaft/hydrogen-osr.h"
14 #include "src/deoptimizer.h"
15 #include "src/ia32/frames-ia32.h"
16 #include "src/ic/ic.h"
17 #include "src/ic/stub-cache.h"
18
19 namespace v8 {
20 namespace internal {
21
22 // When invoking builtins, we need to record the safepoint in the middle of
23 // the invoke instruction sequence generated by the macro assembler.
24 class SafepointGenerator final : public CallWrapper {
25 public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)26 SafepointGenerator(LCodeGen* codegen,
27 LPointerMap* pointers,
28 Safepoint::DeoptMode mode)
29 : codegen_(codegen),
30 pointers_(pointers),
31 deopt_mode_(mode) {}
~SafepointGenerator()32 virtual ~SafepointGenerator() {}
33
BeforeCall(int call_size) const34 void BeforeCall(int call_size) const override {}
35
AfterCall() const36 void AfterCall() const override {
37 codegen_->RecordSafepoint(pointers_, deopt_mode_);
38 }
39
40 private:
41 LCodeGen* codegen_;
42 LPointerMap* pointers_;
43 Safepoint::DeoptMode deopt_mode_;
44 };
45
46
47 #define __ masm()->
48
GenerateCode()49 bool LCodeGen::GenerateCode() {
50 LPhase phase("Z_Code generation", chunk());
51 DCHECK(is_unused());
52 status_ = GENERATING;
53
54 // Open a frame scope to indicate that there is a frame on the stack. The
55 // MANUAL indicates that the scope shouldn't actually generate code to set up
56 // the frame (that is done in GeneratePrologue).
57 FrameScope frame_scope(masm_, StackFrame::MANUAL);
58
59 return GeneratePrologue() &&
60 GenerateBody() &&
61 GenerateDeferredCode() &&
62 GenerateJumpTable() &&
63 GenerateSafepointTable();
64 }
65
66
FinishCode(Handle<Code> code)67 void LCodeGen::FinishCode(Handle<Code> code) {
68 DCHECK(is_done());
69 code->set_stack_slots(GetTotalFrameSlotCount());
70 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
71 PopulateDeoptimizationData(code);
72 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
73 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
74 }
75 }
76
77
78 #ifdef _MSC_VER
MakeSureStackPagesMapped(int offset)79 void LCodeGen::MakeSureStackPagesMapped(int offset) {
80 const int kPageSize = 4 * KB;
81 for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
82 __ mov(Operand(esp, offset), eax);
83 }
84 }
85 #endif
86
87
SaveCallerDoubles()88 void LCodeGen::SaveCallerDoubles() {
89 DCHECK(info()->saves_caller_doubles());
90 DCHECK(NeedsEagerFrame());
91 Comment(";;; Save clobbered callee double registers");
92 int count = 0;
93 BitVector* doubles = chunk()->allocated_double_registers();
94 BitVector::Iterator save_iterator(doubles);
95 while (!save_iterator.Done()) {
96 __ movsd(MemOperand(esp, count * kDoubleSize),
97 XMMRegister::from_code(save_iterator.Current()));
98 save_iterator.Advance();
99 count++;
100 }
101 }
102
103
RestoreCallerDoubles()104 void LCodeGen::RestoreCallerDoubles() {
105 DCHECK(info()->saves_caller_doubles());
106 DCHECK(NeedsEagerFrame());
107 Comment(";;; Restore clobbered callee double registers");
108 BitVector* doubles = chunk()->allocated_double_registers();
109 BitVector::Iterator save_iterator(doubles);
110 int count = 0;
111 while (!save_iterator.Done()) {
112 __ movsd(XMMRegister::from_code(save_iterator.Current()),
113 MemOperand(esp, count * kDoubleSize));
114 save_iterator.Advance();
115 count++;
116 }
117 }
118
119
GeneratePrologue()120 bool LCodeGen::GeneratePrologue() {
121 DCHECK(is_generating());
122
123 if (info()->IsOptimizing()) {
124 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
125 }
126
127 info()->set_prologue_offset(masm_->pc_offset());
128 if (NeedsEagerFrame()) {
129 DCHECK(!frame_is_built_);
130 frame_is_built_ = true;
131 if (info()->IsStub()) {
132 __ StubPrologue(StackFrame::STUB);
133 } else {
134 __ Prologue(info()->GeneratePreagedPrologue());
135 }
136 }
137
138 // Reserve space for the stack slots needed by the code.
139 int slots = GetStackSlotCount();
140 DCHECK(slots != 0 || !info()->IsOptimizing());
141 if (slots > 0) {
142 __ sub(Operand(esp), Immediate(slots * kPointerSize));
143 #ifdef _MSC_VER
144 MakeSureStackPagesMapped(slots * kPointerSize);
145 #endif
146 if (FLAG_debug_code) {
147 __ push(eax);
148 __ mov(Operand(eax), Immediate(slots));
149 Label loop;
150 __ bind(&loop);
151 __ mov(MemOperand(esp, eax, times_4, 0), Immediate(kSlotsZapValue));
152 __ dec(eax);
153 __ j(not_zero, &loop);
154 __ pop(eax);
155 }
156
157 if (info()->saves_caller_doubles()) SaveCallerDoubles();
158 }
159 return !is_aborted();
160 }
161
162
DoPrologue(LPrologue * instr)163 void LCodeGen::DoPrologue(LPrologue* instr) {
164 Comment(";;; Prologue begin");
165
166 // Possibly allocate a local context.
167 if (info_->scope()->NeedsContext()) {
168 Comment(";;; Allocate local context");
169 bool need_write_barrier = true;
170 // Argument to NewContext is the function, which is still in edi.
171 int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
172 Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
173 if (info()->scope()->is_script_scope()) {
174 __ push(edi);
175 __ Push(info()->scope()->scope_info());
176 __ CallRuntime(Runtime::kNewScriptContext);
177 deopt_mode = Safepoint::kLazyDeopt;
178 } else {
179 if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
180 FastNewFunctionContextStub stub(isolate());
181 __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
182 Immediate(slots));
183 __ CallStub(&stub);
184 // Result of FastNewFunctionContextStub is always in new space.
185 need_write_barrier = false;
186 } else {
187 __ push(edi);
188 __ CallRuntime(Runtime::kNewFunctionContext);
189 }
190 }
191 RecordSafepoint(deopt_mode);
192
193 // Context is returned in eax. It replaces the context passed to us.
194 // It's saved in the stack and kept live in esi.
195 __ mov(esi, eax);
196 __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
197
198 // Copy parameters into context if necessary.
199 int num_parameters = info()->scope()->num_parameters();
200 int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
201 for (int i = first_parameter; i < num_parameters; i++) {
202 Variable* var = (i == -1) ? info()->scope()->receiver()
203 : info()->scope()->parameter(i);
204 if (var->IsContextSlot()) {
205 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
206 (num_parameters - 1 - i) * kPointerSize;
207 // Load parameter from stack.
208 __ mov(eax, Operand(ebp, parameter_offset));
209 // Store it in the context.
210 int context_offset = Context::SlotOffset(var->index());
211 __ mov(Operand(esi, context_offset), eax);
212 // Update the write barrier. This clobbers eax and ebx.
213 if (need_write_barrier) {
214 __ RecordWriteContextSlot(esi,
215 context_offset,
216 eax,
217 ebx,
218 kDontSaveFPRegs);
219 } else if (FLAG_debug_code) {
220 Label done;
221 __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
222 __ Abort(kExpectedNewSpaceObject);
223 __ bind(&done);
224 }
225 }
226 }
227 Comment(";;; End allocate local context");
228 }
229
230 Comment(";;; Prologue end");
231 }
232
233
GenerateOsrPrologue()234 void LCodeGen::GenerateOsrPrologue() {
235 // Generate the OSR entry prologue at the first unknown OSR value, or if there
236 // are none, at the OSR entrypoint instruction.
237 if (osr_pc_offset_ >= 0) return;
238
239 osr_pc_offset_ = masm()->pc_offset();
240
241 // Adjust the frame size, subsuming the unoptimized frame into the
242 // optimized frame.
243 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
244 DCHECK(slots >= 0);
245 __ sub(esp, Immediate(slots * kPointerSize));
246 }
247
248
GenerateBodyInstructionPre(LInstruction * instr)249 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
250 if (instr->IsCall()) {
251 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
252 }
253 if (!instr->IsLazyBailout() && !instr->IsGap()) {
254 safepoints_.BumpLastLazySafepointIndex();
255 }
256 }
257
258
GenerateBodyInstructionPost(LInstruction * instr)259 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
260
261
GenerateJumpTable()262 bool LCodeGen::GenerateJumpTable() {
263 if (!jump_table_.length()) return !is_aborted();
264
265 Label needs_frame;
266 Comment(";;; -------------------- Jump table --------------------");
267
268 for (int i = 0; i < jump_table_.length(); i++) {
269 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
270 __ bind(&table_entry->label);
271 Address entry = table_entry->address;
272 DeoptComment(table_entry->deopt_info);
273 if (table_entry->needs_frame) {
274 DCHECK(!info()->saves_caller_doubles());
275 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
276 __ call(&needs_frame);
277 } else {
278 if (info()->saves_caller_doubles()) RestoreCallerDoubles();
279 __ call(entry, RelocInfo::RUNTIME_ENTRY);
280 }
281 }
282 if (needs_frame.is_linked()) {
283 __ bind(&needs_frame);
284 /* stack layout
285 3: entry address
286 2: return address <-- esp
287 1: garbage
288 0: garbage
289 */
290 __ push(MemOperand(esp, 0)); // Copy return address.
291 __ push(MemOperand(esp, 2 * kPointerSize)); // Copy entry address.
292
293 /* stack layout
294 4: entry address
295 3: return address
296 1: return address
297 0: entry address <-- esp
298 */
299 __ mov(MemOperand(esp, 3 * kPointerSize), ebp); // Save ebp.
300 // Fill ebp with the right stack frame address.
301 __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
302
303 // This variant of deopt can only be used with stubs. Since we don't
304 // have a function pointer to install in the stack frame that we're
305 // building, install a special marker there instead.
306 DCHECK(info()->IsStub());
307 __ mov(MemOperand(esp, 2 * kPointerSize),
308 Immediate(Smi::FromInt(StackFrame::STUB)));
309
310 /* stack layout
311 3: old ebp
312 2: stub marker
313 1: return address
314 0: entry address <-- esp
315 */
316 __ ret(0); // Call the continuation without clobbering registers.
317 }
318 return !is_aborted();
319 }
320
321
GenerateDeferredCode()322 bool LCodeGen::GenerateDeferredCode() {
323 DCHECK(is_generating());
324 if (deferred_.length() > 0) {
325 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
326 LDeferredCode* code = deferred_[i];
327
328 HValue* value =
329 instructions_->at(code->instruction_index())->hydrogen_value();
330 RecordAndWritePosition(value->position());
331
332 Comment(";;; <@%d,#%d> "
333 "-------------------- Deferred %s --------------------",
334 code->instruction_index(),
335 code->instr()->hydrogen_value()->id(),
336 code->instr()->Mnemonic());
337 __ bind(code->entry());
338 if (NeedsDeferredFrame()) {
339 Comment(";;; Build frame");
340 DCHECK(!frame_is_built_);
341 DCHECK(info()->IsStub());
342 frame_is_built_ = true;
343 // Build the frame in such a way that esi isn't trashed.
344 __ push(ebp); // Caller's frame pointer.
345 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
346 __ lea(ebp, Operand(esp, TypedFrameConstants::kFixedFrameSizeFromFp));
347 Comment(";;; Deferred code");
348 }
349 code->Generate();
350 if (NeedsDeferredFrame()) {
351 __ bind(code->done());
352 Comment(";;; Destroy frame");
353 DCHECK(frame_is_built_);
354 frame_is_built_ = false;
355 __ mov(esp, ebp);
356 __ pop(ebp);
357 }
358 __ jmp(code->exit());
359 }
360 }
361
362 // Deferred code is the last part of the instruction sequence. Mark
363 // the generated code as done unless we bailed out.
364 if (!is_aborted()) status_ = DONE;
365 return !is_aborted();
366 }
367
368
GenerateSafepointTable()369 bool LCodeGen::GenerateSafepointTable() {
370 DCHECK(is_done());
371 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
372 // For lazy deoptimization we need space to patch a call after every call.
373 // Ensure there is always space for such patching, even if the code ends
374 // in a call.
375 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
376 while (masm()->pc_offset() < target_offset) {
377 masm()->nop();
378 }
379 }
380 safepoints_.Emit(masm(), GetTotalFrameSlotCount());
381 return !is_aborted();
382 }
383
384
ToRegister(int code) const385 Register LCodeGen::ToRegister(int code) const {
386 return Register::from_code(code);
387 }
388
389
ToDoubleRegister(int code) const390 XMMRegister LCodeGen::ToDoubleRegister(int code) const {
391 return XMMRegister::from_code(code);
392 }
393
394
ToRegister(LOperand * op) const395 Register LCodeGen::ToRegister(LOperand* op) const {
396 DCHECK(op->IsRegister());
397 return ToRegister(op->index());
398 }
399
400
ToDoubleRegister(LOperand * op) const401 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
402 DCHECK(op->IsDoubleRegister());
403 return ToDoubleRegister(op->index());
404 }
405
406
ToInteger32(LConstantOperand * op) const407 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
408 return ToRepresentation(op, Representation::Integer32());
409 }
410
411
ToRepresentation(LConstantOperand * op,const Representation & r) const412 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
413 const Representation& r) const {
414 HConstant* constant = chunk_->LookupConstant(op);
415 if (r.IsExternal()) {
416 return reinterpret_cast<int32_t>(
417 constant->ExternalReferenceValue().address());
418 }
419 int32_t value = constant->Integer32Value();
420 if (r.IsInteger32()) return value;
421 DCHECK(r.IsSmiOrTagged());
422 return reinterpret_cast<int32_t>(Smi::FromInt(value));
423 }
424
425
ToHandle(LConstantOperand * op) const426 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
427 HConstant* constant = chunk_->LookupConstant(op);
428 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
429 return constant->handle(isolate());
430 }
431
432
ToDouble(LConstantOperand * op) const433 double LCodeGen::ToDouble(LConstantOperand* op) const {
434 HConstant* constant = chunk_->LookupConstant(op);
435 DCHECK(constant->HasDoubleValue());
436 return constant->DoubleValue();
437 }
438
439
ToExternalReference(LConstantOperand * op) const440 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
441 HConstant* constant = chunk_->LookupConstant(op);
442 DCHECK(constant->HasExternalReferenceValue());
443 return constant->ExternalReferenceValue();
444 }
445
446
IsInteger32(LConstantOperand * op) const447 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
448 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
449 }
450
451
IsSmi(LConstantOperand * op) const452 bool LCodeGen::IsSmi(LConstantOperand* op) const {
453 return chunk_->LookupLiteralRepresentation(op).IsSmi();
454 }
455
456
ArgumentsOffsetWithoutFrame(int index)457 static int ArgumentsOffsetWithoutFrame(int index) {
458 DCHECK(index < 0);
459 return -(index + 1) * kPointerSize + kPCOnStackSize;
460 }
461
462
ToOperand(LOperand * op) const463 Operand LCodeGen::ToOperand(LOperand* op) const {
464 if (op->IsRegister()) return Operand(ToRegister(op));
465 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
466 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
467 if (NeedsEagerFrame()) {
468 return Operand(ebp, FrameSlotToFPOffset(op->index()));
469 } else {
470 // Retrieve parameter without eager stack-frame relative to the
471 // stack-pointer.
472 return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
473 }
474 }
475
476
HighOperand(LOperand * op)477 Operand LCodeGen::HighOperand(LOperand* op) {
478 DCHECK(op->IsDoubleStackSlot());
479 if (NeedsEagerFrame()) {
480 return Operand(ebp, FrameSlotToFPOffset(op->index()) + kPointerSize);
481 } else {
482 // Retrieve parameter without eager stack-frame relative to the
483 // stack-pointer.
484 return Operand(
485 esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
486 }
487 }
488
489
WriteTranslation(LEnvironment * environment,Translation * translation)490 void LCodeGen::WriteTranslation(LEnvironment* environment,
491 Translation* translation) {
492 if (environment == NULL) return;
493
494 // The translation includes one command per value in the environment.
495 int translation_size = environment->translation_size();
496
497 WriteTranslation(environment->outer(), translation);
498 WriteTranslationFrame(environment, translation);
499
500 int object_index = 0;
501 int dematerialized_index = 0;
502 for (int i = 0; i < translation_size; ++i) {
503 LOperand* value = environment->values()->at(i);
504 AddToTranslation(
505 environment, translation, value, environment->HasTaggedValueAt(i),
506 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
507 }
508 }
509
510
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)511 void LCodeGen::AddToTranslation(LEnvironment* environment,
512 Translation* translation,
513 LOperand* op,
514 bool is_tagged,
515 bool is_uint32,
516 int* object_index_pointer,
517 int* dematerialized_index_pointer) {
518 if (op == LEnvironment::materialization_marker()) {
519 int object_index = (*object_index_pointer)++;
520 if (environment->ObjectIsDuplicateAt(object_index)) {
521 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
522 translation->DuplicateObject(dupe_of);
523 return;
524 }
525 int object_length = environment->ObjectLengthAt(object_index);
526 if (environment->ObjectIsArgumentsAt(object_index)) {
527 translation->BeginArgumentsObject(object_length);
528 } else {
529 translation->BeginCapturedObject(object_length);
530 }
531 int dematerialized_index = *dematerialized_index_pointer;
532 int env_offset = environment->translation_size() + dematerialized_index;
533 *dematerialized_index_pointer += object_length;
534 for (int i = 0; i < object_length; ++i) {
535 LOperand* value = environment->values()->at(env_offset + i);
536 AddToTranslation(environment,
537 translation,
538 value,
539 environment->HasTaggedValueAt(env_offset + i),
540 environment->HasUint32ValueAt(env_offset + i),
541 object_index_pointer,
542 dematerialized_index_pointer);
543 }
544 return;
545 }
546
547 if (op->IsStackSlot()) {
548 int index = op->index();
549 if (is_tagged) {
550 translation->StoreStackSlot(index);
551 } else if (is_uint32) {
552 translation->StoreUint32StackSlot(index);
553 } else {
554 translation->StoreInt32StackSlot(index);
555 }
556 } else if (op->IsDoubleStackSlot()) {
557 int index = op->index();
558 translation->StoreDoubleStackSlot(index);
559 } else if (op->IsRegister()) {
560 Register reg = ToRegister(op);
561 if (is_tagged) {
562 translation->StoreRegister(reg);
563 } else if (is_uint32) {
564 translation->StoreUint32Register(reg);
565 } else {
566 translation->StoreInt32Register(reg);
567 }
568 } else if (op->IsDoubleRegister()) {
569 XMMRegister reg = ToDoubleRegister(op);
570 translation->StoreDoubleRegister(reg);
571 } else if (op->IsConstantOperand()) {
572 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
573 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
574 translation->StoreLiteral(src_index);
575 } else {
576 UNREACHABLE();
577 }
578 }
579
580
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)581 void LCodeGen::CallCodeGeneric(Handle<Code> code,
582 RelocInfo::Mode mode,
583 LInstruction* instr,
584 SafepointMode safepoint_mode) {
585 DCHECK(instr != NULL);
586 __ call(code, mode);
587 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
588
589 // Signal that we don't inline smi code before these stubs in the
590 // optimizing code generator.
591 if (code->kind() == Code::BINARY_OP_IC ||
592 code->kind() == Code::COMPARE_IC) {
593 __ nop();
594 }
595 }
596
597
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)598 void LCodeGen::CallCode(Handle<Code> code,
599 RelocInfo::Mode mode,
600 LInstruction* instr) {
601 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
602 }
603
604
CallRuntime(const Runtime::Function * fun,int argc,LInstruction * instr,SaveFPRegsMode save_doubles)605 void LCodeGen::CallRuntime(const Runtime::Function* fun,
606 int argc,
607 LInstruction* instr,
608 SaveFPRegsMode save_doubles) {
609 DCHECK(instr != NULL);
610 DCHECK(instr->HasPointerMap());
611
612 __ CallRuntime(fun, argc, save_doubles);
613
614 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
615
616 DCHECK(info()->is_calling());
617 }
618
619
LoadContextFromDeferred(LOperand * context)620 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
621 if (context->IsRegister()) {
622 if (!ToRegister(context).is(esi)) {
623 __ mov(esi, ToRegister(context));
624 }
625 } else if (context->IsStackSlot()) {
626 __ mov(esi, ToOperand(context));
627 } else if (context->IsConstantOperand()) {
628 HConstant* constant =
629 chunk_->LookupConstant(LConstantOperand::cast(context));
630 __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
631 } else {
632 UNREACHABLE();
633 }
634 }
635
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)636 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
637 int argc,
638 LInstruction* instr,
639 LOperand* context) {
640 LoadContextFromDeferred(context);
641
642 __ CallRuntimeSaveDoubles(id);
643 RecordSafepointWithRegisters(
644 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
645
646 DCHECK(info()->is_calling());
647 }
648
649
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)650 void LCodeGen::RegisterEnvironmentForDeoptimization(
651 LEnvironment* environment, Safepoint::DeoptMode mode) {
652 environment->set_has_been_used();
653 if (!environment->HasBeenRegistered()) {
654 // Physical stack frame layout:
655 // -x ............. -4 0 ..................................... y
656 // [incoming arguments] [spill slots] [pushed outgoing arguments]
657
658 // Layout of the environment:
659 // 0 ..................................................... size-1
660 // [parameters] [locals] [expression stack including arguments]
661
662 // Layout of the translation:
663 // 0 ........................................................ size - 1 + 4
664 // [expression stack including arguments] [locals] [4 words] [parameters]
665 // |>------------ translation_size ------------<|
666
667 int frame_count = 0;
668 int jsframe_count = 0;
669 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
670 ++frame_count;
671 if (e->frame_type() == JS_FUNCTION) {
672 ++jsframe_count;
673 }
674 }
675 Translation translation(&translations_, frame_count, jsframe_count, zone());
676 WriteTranslation(environment, &translation);
677 int deoptimization_index = deoptimizations_.length();
678 int pc_offset = masm()->pc_offset();
679 environment->Register(deoptimization_index,
680 translation.index(),
681 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
682 deoptimizations_.Add(environment, zone());
683 }
684 }
685
DeoptimizeIf(Condition cc,LInstruction * instr,DeoptimizeReason deopt_reason,Deoptimizer::BailoutType bailout_type)686 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
687 DeoptimizeReason deopt_reason,
688 Deoptimizer::BailoutType bailout_type) {
689 LEnvironment* environment = instr->environment();
690 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
691 DCHECK(environment->HasBeenRegistered());
692 int id = environment->deoptimization_index();
693 Address entry =
694 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
695 if (entry == NULL) {
696 Abort(kBailoutWasNotPrepared);
697 return;
698 }
699
700 if (DeoptEveryNTimes()) {
701 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
702 Label no_deopt;
703 __ pushfd();
704 __ push(eax);
705 __ mov(eax, Operand::StaticVariable(count));
706 __ sub(eax, Immediate(1));
707 __ j(not_zero, &no_deopt, Label::kNear);
708 if (FLAG_trap_on_deopt) __ int3();
709 __ mov(eax, Immediate(FLAG_deopt_every_n_times));
710 __ mov(Operand::StaticVariable(count), eax);
711 __ pop(eax);
712 __ popfd();
713 DCHECK(frame_is_built_);
714 __ call(entry, RelocInfo::RUNTIME_ENTRY);
715 __ bind(&no_deopt);
716 __ mov(Operand::StaticVariable(count), eax);
717 __ pop(eax);
718 __ popfd();
719 }
720
721 if (info()->ShouldTrapOnDeopt()) {
722 Label done;
723 if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
724 __ int3();
725 __ bind(&done);
726 }
727
728 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
729
730 DCHECK(info()->IsStub() || frame_is_built_);
731 if (cc == no_condition && frame_is_built_) {
732 DeoptComment(deopt_info);
733 __ call(entry, RelocInfo::RUNTIME_ENTRY);
734 } else {
735 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
736 !frame_is_built_);
737 // We often have several deopts to the same entry, reuse the last
738 // jump entry if this is the case.
739 if (FLAG_trace_deopt || isolate()->is_profiling() ||
740 jump_table_.is_empty() ||
741 !table_entry.IsEquivalentTo(jump_table_.last())) {
742 jump_table_.Add(table_entry, zone());
743 }
744 if (cc == no_condition) {
745 __ jmp(&jump_table_.last().label);
746 } else {
747 __ j(cc, &jump_table_.last().label);
748 }
749 }
750 }
751
DeoptimizeIf(Condition cc,LInstruction * instr,DeoptimizeReason deopt_reason)752 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
753 DeoptimizeReason deopt_reason) {
754 Deoptimizer::BailoutType bailout_type = info()->IsStub()
755 ? Deoptimizer::LAZY
756 : Deoptimizer::EAGER;
757 DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
758 }
759
760
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)761 void LCodeGen::RecordSafepointWithLazyDeopt(
762 LInstruction* instr, SafepointMode safepoint_mode) {
763 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
764 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
765 } else {
766 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
767 RecordSafepointWithRegisters(
768 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
769 }
770 }
771
772
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)773 void LCodeGen::RecordSafepoint(
774 LPointerMap* pointers,
775 Safepoint::Kind kind,
776 int arguments,
777 Safepoint::DeoptMode deopt_mode) {
778 DCHECK(kind == expected_safepoint_kind_);
779 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
780 Safepoint safepoint =
781 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
782 for (int i = 0; i < operands->length(); i++) {
783 LOperand* pointer = operands->at(i);
784 if (pointer->IsStackSlot()) {
785 safepoint.DefinePointerSlot(pointer->index(), zone());
786 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
787 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
788 }
789 }
790 }
791
792
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode mode)793 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
794 Safepoint::DeoptMode mode) {
795 RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
796 }
797
798
RecordSafepoint(Safepoint::DeoptMode mode)799 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
800 LPointerMap empty_pointers(zone());
801 RecordSafepoint(&empty_pointers, mode);
802 }
803
804
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode mode)805 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
806 int arguments,
807 Safepoint::DeoptMode mode) {
808 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
809 }
810
811
LabelType(LLabel * label)812 static const char* LabelType(LLabel* label) {
813 if (label->is_loop_header()) return " (loop header)";
814 if (label->is_osr_entry()) return " (OSR entry)";
815 return "";
816 }
817
818
DoLabel(LLabel * label)819 void LCodeGen::DoLabel(LLabel* label) {
820 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
821 current_instruction_,
822 label->hydrogen_value()->id(),
823 label->block_id(),
824 LabelType(label));
825 __ bind(label->label());
826 current_block_ = label->block_id();
827 DoGap(label);
828 }
829
830
DoParallelMove(LParallelMove * move)831 void LCodeGen::DoParallelMove(LParallelMove* move) {
832 resolver_.Resolve(move);
833 }
834
835
DoGap(LGap * gap)836 void LCodeGen::DoGap(LGap* gap) {
837 for (int i = LGap::FIRST_INNER_POSITION;
838 i <= LGap::LAST_INNER_POSITION;
839 i++) {
840 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
841 LParallelMove* move = gap->GetParallelMove(inner_pos);
842 if (move != NULL) DoParallelMove(move);
843 }
844 }
845
846
DoInstructionGap(LInstructionGap * instr)847 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
848 DoGap(instr);
849 }
850
851
DoParameter(LParameter * instr)852 void LCodeGen::DoParameter(LParameter* instr) {
853 // Nothing to do.
854 }
855
856
DoUnknownOSRValue(LUnknownOSRValue * instr)857 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
858 GenerateOsrPrologue();
859 }
860
861
DoModByPowerOf2I(LModByPowerOf2I * instr)862 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
863 Register dividend = ToRegister(instr->dividend());
864 int32_t divisor = instr->divisor();
865 DCHECK(dividend.is(ToRegister(instr->result())));
866
867 // Theoretically, a variation of the branch-free code for integer division by
868 // a power of 2 (calculating the remainder via an additional multiplication
869 // (which gets simplified to an 'and') and subtraction) should be faster, and
870 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
871 // indicate that positive dividends are heavily favored, so the branching
872 // version performs better.
873 HMod* hmod = instr->hydrogen();
874 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
875 Label dividend_is_not_negative, done;
876 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
877 __ test(dividend, dividend);
878 __ j(not_sign, ÷nd_is_not_negative, Label::kNear);
879 // Note that this is correct even for kMinInt operands.
880 __ neg(dividend);
881 __ and_(dividend, mask);
882 __ neg(dividend);
883 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
884 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
885 }
886 __ jmp(&done, Label::kNear);
887 }
888
889 __ bind(÷nd_is_not_negative);
890 __ and_(dividend, mask);
891 __ bind(&done);
892 }
893
894
DoModByConstI(LModByConstI * instr)895 void LCodeGen::DoModByConstI(LModByConstI* instr) {
896 Register dividend = ToRegister(instr->dividend());
897 int32_t divisor = instr->divisor();
898 DCHECK(ToRegister(instr->result()).is(eax));
899
900 if (divisor == 0) {
901 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
902 return;
903 }
904
905 __ TruncatingDiv(dividend, Abs(divisor));
906 __ imul(edx, edx, Abs(divisor));
907 __ mov(eax, dividend);
908 __ sub(eax, edx);
909
910 // Check for negative zero.
911 HMod* hmod = instr->hydrogen();
912 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
913 Label remainder_not_zero;
914 __ j(not_zero, &remainder_not_zero, Label::kNear);
915 __ cmp(dividend, Immediate(0));
916 DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
917 __ bind(&remainder_not_zero);
918 }
919 }
920
921
DoModI(LModI * instr)922 void LCodeGen::DoModI(LModI* instr) {
923 HMod* hmod = instr->hydrogen();
924
925 Register left_reg = ToRegister(instr->left());
926 DCHECK(left_reg.is(eax));
927 Register right_reg = ToRegister(instr->right());
928 DCHECK(!right_reg.is(eax));
929 DCHECK(!right_reg.is(edx));
930 Register result_reg = ToRegister(instr->result());
931 DCHECK(result_reg.is(edx));
932
933 Label done;
934 // Check for x % 0, idiv would signal a divide error. We have to
935 // deopt in this case because we can't return a NaN.
936 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
937 __ test(right_reg, Operand(right_reg));
938 DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
939 }
940
941 // Check for kMinInt % -1, idiv would signal a divide error. We
942 // have to deopt if we care about -0, because we can't return that.
943 if (hmod->CheckFlag(HValue::kCanOverflow)) {
944 Label no_overflow_possible;
945 __ cmp(left_reg, kMinInt);
946 __ j(not_equal, &no_overflow_possible, Label::kNear);
947 __ cmp(right_reg, -1);
948 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
949 DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
950 } else {
951 __ j(not_equal, &no_overflow_possible, Label::kNear);
952 __ Move(result_reg, Immediate(0));
953 __ jmp(&done, Label::kNear);
954 }
955 __ bind(&no_overflow_possible);
956 }
957
958 // Sign extend dividend in eax into edx:eax.
959 __ cdq();
960
961 // If we care about -0, test if the dividend is <0 and the result is 0.
962 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
963 Label positive_left;
964 __ test(left_reg, Operand(left_reg));
965 __ j(not_sign, &positive_left, Label::kNear);
966 __ idiv(right_reg);
967 __ test(result_reg, Operand(result_reg));
968 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
969 __ jmp(&done, Label::kNear);
970 __ bind(&positive_left);
971 }
972 __ idiv(right_reg);
973 __ bind(&done);
974 }
975
976
DoDivByPowerOf2I(LDivByPowerOf2I * instr)977 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
978 Register dividend = ToRegister(instr->dividend());
979 int32_t divisor = instr->divisor();
980 Register result = ToRegister(instr->result());
981 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
982 DCHECK(!result.is(dividend));
983
984 // Check for (0 / -x) that will produce negative zero.
985 HDiv* hdiv = instr->hydrogen();
986 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
987 __ test(dividend, dividend);
988 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
989 }
990 // Check for (kMinInt / -1).
991 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
992 __ cmp(dividend, kMinInt);
993 DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
994 }
995 // Deoptimize if remainder will not be 0.
996 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
997 divisor != 1 && divisor != -1) {
998 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
999 __ test(dividend, Immediate(mask));
1000 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
1001 }
1002 __ Move(result, dividend);
1003 int32_t shift = WhichPowerOf2Abs(divisor);
1004 if (shift > 0) {
1005 // The arithmetic shift is always OK, the 'if' is an optimization only.
1006 if (shift > 1) __ sar(result, 31);
1007 __ shr(result, 32 - shift);
1008 __ add(result, dividend);
1009 __ sar(result, shift);
1010 }
1011 if (divisor < 0) __ neg(result);
1012 }
1013
1014
DoDivByConstI(LDivByConstI * instr)1015 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1016 Register dividend = ToRegister(instr->dividend());
1017 int32_t divisor = instr->divisor();
1018 DCHECK(ToRegister(instr->result()).is(edx));
1019
1020 if (divisor == 0) {
1021 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
1022 return;
1023 }
1024
1025 // Check for (0 / -x) that will produce negative zero.
1026 HDiv* hdiv = instr->hydrogen();
1027 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1028 __ test(dividend, dividend);
1029 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1030 }
1031
1032 __ TruncatingDiv(dividend, Abs(divisor));
1033 if (divisor < 0) __ neg(edx);
1034
1035 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1036 __ mov(eax, edx);
1037 __ imul(eax, eax, divisor);
1038 __ sub(eax, dividend);
1039 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
1040 }
1041 }
1042
1043
1044 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1045 void LCodeGen::DoDivI(LDivI* instr) {
1046 HBinaryOperation* hdiv = instr->hydrogen();
1047 Register dividend = ToRegister(instr->dividend());
1048 Register divisor = ToRegister(instr->divisor());
1049 Register remainder = ToRegister(instr->temp());
1050 DCHECK(dividend.is(eax));
1051 DCHECK(remainder.is(edx));
1052 DCHECK(ToRegister(instr->result()).is(eax));
1053 DCHECK(!divisor.is(eax));
1054 DCHECK(!divisor.is(edx));
1055
1056 // Check for x / 0.
1057 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1058 __ test(divisor, divisor);
1059 DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
1060 }
1061
1062 // Check for (0 / -x) that will produce negative zero.
1063 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1064 Label dividend_not_zero;
1065 __ test(dividend, dividend);
1066 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1067 __ test(divisor, divisor);
1068 DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1069 __ bind(÷nd_not_zero);
1070 }
1071
1072 // Check for (kMinInt / -1).
1073 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1074 Label dividend_not_min_int;
1075 __ cmp(dividend, kMinInt);
1076 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1077 __ cmp(divisor, -1);
1078 DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
1079 __ bind(÷nd_not_min_int);
1080 }
1081
1082 // Sign extend to edx (= remainder).
1083 __ cdq();
1084 __ idiv(divisor);
1085
1086 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1087 // Deoptimize if remainder is not 0.
1088 __ test(remainder, remainder);
1089 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
1090 }
1091 }
1092
1093
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1094 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1095 Register dividend = ToRegister(instr->dividend());
1096 int32_t divisor = instr->divisor();
1097 DCHECK(dividend.is(ToRegister(instr->result())));
1098
1099 // If the divisor is positive, things are easy: There can be no deopts and we
1100 // can simply do an arithmetic right shift.
1101 if (divisor == 1) return;
1102 int32_t shift = WhichPowerOf2Abs(divisor);
1103 if (divisor > 1) {
1104 __ sar(dividend, shift);
1105 return;
1106 }
1107
1108 // If the divisor is negative, we have to negate and handle edge cases.
1109 __ neg(dividend);
1110 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1111 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1112 }
1113
1114 // Dividing by -1 is basically negation, unless we overflow.
1115 if (divisor == -1) {
1116 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1117 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1118 }
1119 return;
1120 }
1121
1122 // If the negation could not overflow, simply shifting is OK.
1123 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1124 __ sar(dividend, shift);
1125 return;
1126 }
1127
1128 Label not_kmin_int, done;
1129 __ j(no_overflow, ¬_kmin_int, Label::kNear);
1130 __ mov(dividend, Immediate(kMinInt / divisor));
1131 __ jmp(&done, Label::kNear);
1132 __ bind(¬_kmin_int);
1133 __ sar(dividend, shift);
1134 __ bind(&done);
1135 }
1136
1137
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1138 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1139 Register dividend = ToRegister(instr->dividend());
1140 int32_t divisor = instr->divisor();
1141 DCHECK(ToRegister(instr->result()).is(edx));
1142
1143 if (divisor == 0) {
1144 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
1145 return;
1146 }
1147
1148 // Check for (0 / -x) that will produce negative zero.
1149 HMathFloorOfDiv* hdiv = instr->hydrogen();
1150 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1151 __ test(dividend, dividend);
1152 DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1153 }
1154
1155 // Easy case: We need no dynamic check for the dividend and the flooring
1156 // division is the same as the truncating division.
1157 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1158 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1159 __ TruncatingDiv(dividend, Abs(divisor));
1160 if (divisor < 0) __ neg(edx);
1161 return;
1162 }
1163
1164 // In the general case we may need to adjust before and after the truncating
1165 // division to get a flooring division.
1166 Register temp = ToRegister(instr->temp3());
1167 DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
1168 Label needs_adjustment, done;
1169 __ cmp(dividend, Immediate(0));
1170 __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1171 __ TruncatingDiv(dividend, Abs(divisor));
1172 if (divisor < 0) __ neg(edx);
1173 __ jmp(&done, Label::kNear);
1174 __ bind(&needs_adjustment);
1175 __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1176 __ TruncatingDiv(temp, Abs(divisor));
1177 if (divisor < 0) __ neg(edx);
1178 __ dec(edx);
1179 __ bind(&done);
1180 }
1181
1182
1183 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1184 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1185 HBinaryOperation* hdiv = instr->hydrogen();
1186 Register dividend = ToRegister(instr->dividend());
1187 Register divisor = ToRegister(instr->divisor());
1188 Register remainder = ToRegister(instr->temp());
1189 Register result = ToRegister(instr->result());
1190 DCHECK(dividend.is(eax));
1191 DCHECK(remainder.is(edx));
1192 DCHECK(result.is(eax));
1193 DCHECK(!divisor.is(eax));
1194 DCHECK(!divisor.is(edx));
1195
1196 // Check for x / 0.
1197 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1198 __ test(divisor, divisor);
1199 DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
1200 }
1201
1202 // Check for (0 / -x) that will produce negative zero.
1203 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1204 Label dividend_not_zero;
1205 __ test(dividend, dividend);
1206 __ j(not_zero, ÷nd_not_zero, Label::kNear);
1207 __ test(divisor, divisor);
1208 DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1209 __ bind(÷nd_not_zero);
1210 }
1211
1212 // Check for (kMinInt / -1).
1213 if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1214 Label dividend_not_min_int;
1215 __ cmp(dividend, kMinInt);
1216 __ j(not_zero, ÷nd_not_min_int, Label::kNear);
1217 __ cmp(divisor, -1);
1218 DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
1219 __ bind(÷nd_not_min_int);
1220 }
1221
1222 // Sign extend to edx (= remainder).
1223 __ cdq();
1224 __ idiv(divisor);
1225
1226 Label done;
1227 __ test(remainder, remainder);
1228 __ j(zero, &done, Label::kNear);
1229 __ xor_(remainder, divisor);
1230 __ sar(remainder, 31);
1231 __ add(result, remainder);
1232 __ bind(&done);
1233 }
1234
1235
DoMulI(LMulI * instr)1236 void LCodeGen::DoMulI(LMulI* instr) {
1237 Register left = ToRegister(instr->left());
1238 LOperand* right = instr->right();
1239
1240 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1241 __ mov(ToRegister(instr->temp()), left);
1242 }
1243
1244 if (right->IsConstantOperand()) {
1245 // Try strength reductions on the multiplication.
1246 // All replacement instructions are at most as long as the imul
1247 // and have better latency.
1248 int constant = ToInteger32(LConstantOperand::cast(right));
1249 if (constant == -1) {
1250 __ neg(left);
1251 } else if (constant == 0) {
1252 __ xor_(left, Operand(left));
1253 } else if (constant == 2) {
1254 __ add(left, Operand(left));
1255 } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1256 // If we know that the multiplication can't overflow, it's safe to
1257 // use instructions that don't set the overflow flag for the
1258 // multiplication.
1259 switch (constant) {
1260 case 1:
1261 // Do nothing.
1262 break;
1263 case 3:
1264 __ lea(left, Operand(left, left, times_2, 0));
1265 break;
1266 case 4:
1267 __ shl(left, 2);
1268 break;
1269 case 5:
1270 __ lea(left, Operand(left, left, times_4, 0));
1271 break;
1272 case 8:
1273 __ shl(left, 3);
1274 break;
1275 case 9:
1276 __ lea(left, Operand(left, left, times_8, 0));
1277 break;
1278 case 16:
1279 __ shl(left, 4);
1280 break;
1281 default:
1282 __ imul(left, left, constant);
1283 break;
1284 }
1285 } else {
1286 __ imul(left, left, constant);
1287 }
1288 } else {
1289 if (instr->hydrogen()->representation().IsSmi()) {
1290 __ SmiUntag(left);
1291 }
1292 __ imul(left, ToOperand(right));
1293 }
1294
1295 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1296 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1297 }
1298
1299 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1300 // Bail out if the result is supposed to be negative zero.
1301 Label done;
1302 __ test(left, Operand(left));
1303 __ j(not_zero, &done, Label::kNear);
1304 if (right->IsConstantOperand()) {
1305 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1306 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
1307 } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1308 __ cmp(ToRegister(instr->temp()), Immediate(0));
1309 DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
1310 }
1311 } else {
1312 // Test the non-zero operand for negative sign.
1313 __ or_(ToRegister(instr->temp()), ToOperand(right));
1314 DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1315 }
1316 __ bind(&done);
1317 }
1318 }
1319
1320
DoBitI(LBitI * instr)1321 void LCodeGen::DoBitI(LBitI* instr) {
1322 LOperand* left = instr->left();
1323 LOperand* right = instr->right();
1324 DCHECK(left->Equals(instr->result()));
1325 DCHECK(left->IsRegister());
1326
1327 if (right->IsConstantOperand()) {
1328 int32_t right_operand =
1329 ToRepresentation(LConstantOperand::cast(right),
1330 instr->hydrogen()->representation());
1331 switch (instr->op()) {
1332 case Token::BIT_AND:
1333 __ and_(ToRegister(left), right_operand);
1334 break;
1335 case Token::BIT_OR:
1336 __ or_(ToRegister(left), right_operand);
1337 break;
1338 case Token::BIT_XOR:
1339 if (right_operand == int32_t(~0)) {
1340 __ not_(ToRegister(left));
1341 } else {
1342 __ xor_(ToRegister(left), right_operand);
1343 }
1344 break;
1345 default:
1346 UNREACHABLE();
1347 break;
1348 }
1349 } else {
1350 switch (instr->op()) {
1351 case Token::BIT_AND:
1352 __ and_(ToRegister(left), ToOperand(right));
1353 break;
1354 case Token::BIT_OR:
1355 __ or_(ToRegister(left), ToOperand(right));
1356 break;
1357 case Token::BIT_XOR:
1358 __ xor_(ToRegister(left), ToOperand(right));
1359 break;
1360 default:
1361 UNREACHABLE();
1362 break;
1363 }
1364 }
1365 }
1366
1367
DoShiftI(LShiftI * instr)1368 void LCodeGen::DoShiftI(LShiftI* instr) {
1369 LOperand* left = instr->left();
1370 LOperand* right = instr->right();
1371 DCHECK(left->Equals(instr->result()));
1372 DCHECK(left->IsRegister());
1373 if (right->IsRegister()) {
1374 DCHECK(ToRegister(right).is(ecx));
1375
1376 switch (instr->op()) {
1377 case Token::ROR:
1378 __ ror_cl(ToRegister(left));
1379 break;
1380 case Token::SAR:
1381 __ sar_cl(ToRegister(left));
1382 break;
1383 case Token::SHR:
1384 __ shr_cl(ToRegister(left));
1385 if (instr->can_deopt()) {
1386 __ test(ToRegister(left), ToRegister(left));
1387 DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
1388 }
1389 break;
1390 case Token::SHL:
1391 __ shl_cl(ToRegister(left));
1392 break;
1393 default:
1394 UNREACHABLE();
1395 break;
1396 }
1397 } else {
1398 int value = ToInteger32(LConstantOperand::cast(right));
1399 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1400 switch (instr->op()) {
1401 case Token::ROR:
1402 if (shift_count == 0 && instr->can_deopt()) {
1403 __ test(ToRegister(left), ToRegister(left));
1404 DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
1405 } else {
1406 __ ror(ToRegister(left), shift_count);
1407 }
1408 break;
1409 case Token::SAR:
1410 if (shift_count != 0) {
1411 __ sar(ToRegister(left), shift_count);
1412 }
1413 break;
1414 case Token::SHR:
1415 if (shift_count != 0) {
1416 __ shr(ToRegister(left), shift_count);
1417 } else if (instr->can_deopt()) {
1418 __ test(ToRegister(left), ToRegister(left));
1419 DeoptimizeIf(sign, instr, DeoptimizeReason::kNegativeValue);
1420 }
1421 break;
1422 case Token::SHL:
1423 if (shift_count != 0) {
1424 if (instr->hydrogen_value()->representation().IsSmi() &&
1425 instr->can_deopt()) {
1426 if (shift_count != 1) {
1427 __ shl(ToRegister(left), shift_count - 1);
1428 }
1429 __ SmiTag(ToRegister(left));
1430 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1431 } else {
1432 __ shl(ToRegister(left), shift_count);
1433 }
1434 }
1435 break;
1436 default:
1437 UNREACHABLE();
1438 break;
1439 }
1440 }
1441 }
1442
1443
DoSubI(LSubI * instr)1444 void LCodeGen::DoSubI(LSubI* instr) {
1445 LOperand* left = instr->left();
1446 LOperand* right = instr->right();
1447 DCHECK(left->Equals(instr->result()));
1448
1449 if (right->IsConstantOperand()) {
1450 __ sub(ToOperand(left),
1451 ToImmediate(right, instr->hydrogen()->representation()));
1452 } else {
1453 __ sub(ToRegister(left), ToOperand(right));
1454 }
1455 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1456 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1457 }
1458 }
1459
1460
DoConstantI(LConstantI * instr)1461 void LCodeGen::DoConstantI(LConstantI* instr) {
1462 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1463 }
1464
1465
DoConstantS(LConstantS * instr)1466 void LCodeGen::DoConstantS(LConstantS* instr) {
1467 __ Move(ToRegister(instr->result()), Immediate(instr->value()));
1468 }
1469
1470
DoConstantD(LConstantD * instr)1471 void LCodeGen::DoConstantD(LConstantD* instr) {
1472 uint64_t const bits = instr->bits();
1473 uint32_t const lower = static_cast<uint32_t>(bits);
1474 uint32_t const upper = static_cast<uint32_t>(bits >> 32);
1475 DCHECK(instr->result()->IsDoubleRegister());
1476
1477 XMMRegister result = ToDoubleRegister(instr->result());
1478 if (bits == 0u) {
1479 __ xorps(result, result);
1480 } else {
1481 Register temp = ToRegister(instr->temp());
1482 if (CpuFeatures::IsSupported(SSE4_1)) {
1483 CpuFeatureScope scope2(masm(), SSE4_1);
1484 if (lower != 0) {
1485 __ Move(temp, Immediate(lower));
1486 __ movd(result, Operand(temp));
1487 __ Move(temp, Immediate(upper));
1488 __ pinsrd(result, Operand(temp), 1);
1489 } else {
1490 __ xorps(result, result);
1491 __ Move(temp, Immediate(upper));
1492 __ pinsrd(result, Operand(temp), 1);
1493 }
1494 } else {
1495 __ Move(temp, Immediate(upper));
1496 __ movd(result, Operand(temp));
1497 __ psllq(result, 32);
1498 if (lower != 0u) {
1499 XMMRegister xmm_scratch = double_scratch0();
1500 __ Move(temp, Immediate(lower));
1501 __ movd(xmm_scratch, Operand(temp));
1502 __ orps(result, xmm_scratch);
1503 }
1504 }
1505 }
1506 }
1507
1508
DoConstantE(LConstantE * instr)1509 void LCodeGen::DoConstantE(LConstantE* instr) {
1510 __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1511 }
1512
1513
DoConstantT(LConstantT * instr)1514 void LCodeGen::DoConstantT(LConstantT* instr) {
1515 Register reg = ToRegister(instr->result());
1516 Handle<Object> object = instr->value(isolate());
1517 AllowDeferredHandleDereference smi_check;
1518 __ LoadObject(reg, object);
1519 }
1520
1521
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1522 Operand LCodeGen::BuildSeqStringOperand(Register string,
1523 LOperand* index,
1524 String::Encoding encoding) {
1525 if (index->IsConstantOperand()) {
1526 int offset = ToRepresentation(LConstantOperand::cast(index),
1527 Representation::Integer32());
1528 if (encoding == String::TWO_BYTE_ENCODING) {
1529 offset *= kUC16Size;
1530 }
1531 STATIC_ASSERT(kCharSize == 1);
1532 return FieldOperand(string, SeqString::kHeaderSize + offset);
1533 }
1534 return FieldOperand(
1535 string, ToRegister(index),
1536 encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1537 SeqString::kHeaderSize);
1538 }
1539
1540
DoSeqStringGetChar(LSeqStringGetChar * instr)1541 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1542 String::Encoding encoding = instr->hydrogen()->encoding();
1543 Register result = ToRegister(instr->result());
1544 Register string = ToRegister(instr->string());
1545
1546 if (FLAG_debug_code) {
1547 __ push(string);
1548 __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
1549 __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
1550
1551 __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1552 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1553 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1554 __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1555 ? one_byte_seq_type : two_byte_seq_type));
1556 __ Check(equal, kUnexpectedStringType);
1557 __ pop(string);
1558 }
1559
1560 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1561 if (encoding == String::ONE_BYTE_ENCODING) {
1562 __ movzx_b(result, operand);
1563 } else {
1564 __ movzx_w(result, operand);
1565 }
1566 }
1567
1568
DoSeqStringSetChar(LSeqStringSetChar * instr)1569 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1570 String::Encoding encoding = instr->hydrogen()->encoding();
1571 Register string = ToRegister(instr->string());
1572
1573 if (FLAG_debug_code) {
1574 Register value = ToRegister(instr->value());
1575 Register index = ToRegister(instr->index());
1576 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1577 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1578 int encoding_mask =
1579 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1580 ? one_byte_seq_type : two_byte_seq_type;
1581 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1582 }
1583
1584 Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1585 if (instr->value()->IsConstantOperand()) {
1586 int value = ToRepresentation(LConstantOperand::cast(instr->value()),
1587 Representation::Integer32());
1588 DCHECK_LE(0, value);
1589 if (encoding == String::ONE_BYTE_ENCODING) {
1590 DCHECK_LE(value, String::kMaxOneByteCharCode);
1591 __ mov_b(operand, static_cast<int8_t>(value));
1592 } else {
1593 DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1594 __ mov_w(operand, static_cast<int16_t>(value));
1595 }
1596 } else {
1597 Register value = ToRegister(instr->value());
1598 if (encoding == String::ONE_BYTE_ENCODING) {
1599 __ mov_b(operand, value);
1600 } else {
1601 __ mov_w(operand, value);
1602 }
1603 }
1604 }
1605
1606
DoAddI(LAddI * instr)1607 void LCodeGen::DoAddI(LAddI* instr) {
1608 LOperand* left = instr->left();
1609 LOperand* right = instr->right();
1610
1611 if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1612 if (right->IsConstantOperand()) {
1613 int32_t offset = ToRepresentation(LConstantOperand::cast(right),
1614 instr->hydrogen()->representation());
1615 __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
1616 } else {
1617 Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1618 __ lea(ToRegister(instr->result()), address);
1619 }
1620 } else {
1621 if (right->IsConstantOperand()) {
1622 __ add(ToOperand(left),
1623 ToImmediate(right, instr->hydrogen()->representation()));
1624 } else {
1625 __ add(ToRegister(left), ToOperand(right));
1626 }
1627 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1628 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1629 }
1630 }
1631 }
1632
1633
DoMathMinMax(LMathMinMax * instr)1634 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1635 LOperand* left = instr->left();
1636 LOperand* right = instr->right();
1637 DCHECK(left->Equals(instr->result()));
1638 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1639 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1640 Label return_left;
1641 Condition condition = (operation == HMathMinMax::kMathMin)
1642 ? less_equal
1643 : greater_equal;
1644 if (right->IsConstantOperand()) {
1645 Operand left_op = ToOperand(left);
1646 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
1647 instr->hydrogen()->representation());
1648 __ cmp(left_op, immediate);
1649 __ j(condition, &return_left, Label::kNear);
1650 __ mov(left_op, immediate);
1651 } else {
1652 Register left_reg = ToRegister(left);
1653 Operand right_op = ToOperand(right);
1654 __ cmp(left_reg, right_op);
1655 __ j(condition, &return_left, Label::kNear);
1656 __ mov(left_reg, right_op);
1657 }
1658 __ bind(&return_left);
1659 } else {
1660 DCHECK(instr->hydrogen()->representation().IsDouble());
1661 Label check_nan_left, check_zero, return_left, return_right;
1662 Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1663 XMMRegister left_reg = ToDoubleRegister(left);
1664 XMMRegister right_reg = ToDoubleRegister(right);
1665 __ ucomisd(left_reg, right_reg);
1666 __ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
1667 __ j(equal, &check_zero, Label::kNear); // left == right.
1668 __ j(condition, &return_left, Label::kNear);
1669 __ jmp(&return_right, Label::kNear);
1670
1671 __ bind(&check_zero);
1672 XMMRegister xmm_scratch = double_scratch0();
1673 __ xorps(xmm_scratch, xmm_scratch);
1674 __ ucomisd(left_reg, xmm_scratch);
1675 __ j(not_equal, &return_left, Label::kNear); // left == right != 0.
1676 // At this point, both left and right are either 0 or -0.
1677 if (operation == HMathMinMax::kMathMin) {
1678 __ orpd(left_reg, right_reg);
1679 } else {
1680 // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
1681 __ addsd(left_reg, right_reg);
1682 }
1683 __ jmp(&return_left, Label::kNear);
1684
1685 __ bind(&check_nan_left);
1686 __ ucomisd(left_reg, left_reg); // NaN check.
1687 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
1688 __ bind(&return_right);
1689 __ movaps(left_reg, right_reg);
1690
1691 __ bind(&return_left);
1692 }
1693 }
1694
1695
DoArithmeticD(LArithmeticD * instr)1696 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1697 XMMRegister left = ToDoubleRegister(instr->left());
1698 XMMRegister right = ToDoubleRegister(instr->right());
1699 XMMRegister result = ToDoubleRegister(instr->result());
1700 switch (instr->op()) {
1701 case Token::ADD:
1702 if (CpuFeatures::IsSupported(AVX)) {
1703 CpuFeatureScope scope(masm(), AVX);
1704 __ vaddsd(result, left, right);
1705 } else {
1706 DCHECK(result.is(left));
1707 __ addsd(left, right);
1708 }
1709 break;
1710 case Token::SUB:
1711 if (CpuFeatures::IsSupported(AVX)) {
1712 CpuFeatureScope scope(masm(), AVX);
1713 __ vsubsd(result, left, right);
1714 } else {
1715 DCHECK(result.is(left));
1716 __ subsd(left, right);
1717 }
1718 break;
1719 case Token::MUL:
1720 if (CpuFeatures::IsSupported(AVX)) {
1721 CpuFeatureScope scope(masm(), AVX);
1722 __ vmulsd(result, left, right);
1723 } else {
1724 DCHECK(result.is(left));
1725 __ mulsd(left, right);
1726 }
1727 break;
1728 case Token::DIV:
1729 if (CpuFeatures::IsSupported(AVX)) {
1730 CpuFeatureScope scope(masm(), AVX);
1731 __ vdivsd(result, left, right);
1732 } else {
1733 DCHECK(result.is(left));
1734 __ divsd(left, right);
1735 }
1736 // Don't delete this mov. It may improve performance on some CPUs,
1737 // when there is a (v)mulsd depending on the result
1738 __ movaps(result, result);
1739 break;
1740 case Token::MOD: {
1741 // Pass two doubles as arguments on the stack.
1742 __ PrepareCallCFunction(4, eax);
1743 __ movsd(Operand(esp, 0 * kDoubleSize), left);
1744 __ movsd(Operand(esp, 1 * kDoubleSize), right);
1745 __ CallCFunction(
1746 ExternalReference::mod_two_doubles_operation(isolate()),
1747 4);
1748
1749 // Return value is in st(0) on ia32.
1750 // Store it into the result register.
1751 __ sub(Operand(esp), Immediate(kDoubleSize));
1752 __ fstp_d(Operand(esp, 0));
1753 __ movsd(result, Operand(esp, 0));
1754 __ add(Operand(esp), Immediate(kDoubleSize));
1755 break;
1756 }
1757 default:
1758 UNREACHABLE();
1759 break;
1760 }
1761 }
1762
1763
DoArithmeticT(LArithmeticT * instr)1764 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1765 DCHECK(ToRegister(instr->context()).is(esi));
1766 DCHECK(ToRegister(instr->left()).is(edx));
1767 DCHECK(ToRegister(instr->right()).is(eax));
1768 DCHECK(ToRegister(instr->result()).is(eax));
1769
1770 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
1771 CallCode(code, RelocInfo::CODE_TARGET, instr);
1772 }
1773
1774
1775 template<class InstrType>
EmitBranch(InstrType instr,Condition cc)1776 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
1777 int left_block = instr->TrueDestination(chunk_);
1778 int right_block = instr->FalseDestination(chunk_);
1779
1780 int next_block = GetNextEmittedBlock();
1781
1782 if (right_block == left_block || cc == no_condition) {
1783 EmitGoto(left_block);
1784 } else if (left_block == next_block) {
1785 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1786 } else if (right_block == next_block) {
1787 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1788 } else {
1789 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1790 __ jmp(chunk_->GetAssemblyLabel(right_block));
1791 }
1792 }
1793
1794
1795 template <class InstrType>
EmitTrueBranch(InstrType instr,Condition cc)1796 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
1797 int true_block = instr->TrueDestination(chunk_);
1798 if (cc == no_condition) {
1799 __ jmp(chunk_->GetAssemblyLabel(true_block));
1800 } else {
1801 __ j(cc, chunk_->GetAssemblyLabel(true_block));
1802 }
1803 }
1804
1805
1806 template<class InstrType>
EmitFalseBranch(InstrType instr,Condition cc)1807 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
1808 int false_block = instr->FalseDestination(chunk_);
1809 if (cc == no_condition) {
1810 __ jmp(chunk_->GetAssemblyLabel(false_block));
1811 } else {
1812 __ j(cc, chunk_->GetAssemblyLabel(false_block));
1813 }
1814 }
1815
1816
DoBranch(LBranch * instr)1817 void LCodeGen::DoBranch(LBranch* instr) {
1818 Representation r = instr->hydrogen()->value()->representation();
1819 if (r.IsSmiOrInteger32()) {
1820 Register reg = ToRegister(instr->value());
1821 __ test(reg, Operand(reg));
1822 EmitBranch(instr, not_zero);
1823 } else if (r.IsDouble()) {
1824 DCHECK(!info()->IsStub());
1825 XMMRegister reg = ToDoubleRegister(instr->value());
1826 XMMRegister xmm_scratch = double_scratch0();
1827 __ xorps(xmm_scratch, xmm_scratch);
1828 __ ucomisd(reg, xmm_scratch);
1829 EmitBranch(instr, not_equal);
1830 } else {
1831 DCHECK(r.IsTagged());
1832 Register reg = ToRegister(instr->value());
1833 HType type = instr->hydrogen()->value()->type();
1834 if (type.IsBoolean()) {
1835 DCHECK(!info()->IsStub());
1836 __ cmp(reg, factory()->true_value());
1837 EmitBranch(instr, equal);
1838 } else if (type.IsSmi()) {
1839 DCHECK(!info()->IsStub());
1840 __ test(reg, Operand(reg));
1841 EmitBranch(instr, not_equal);
1842 } else if (type.IsJSArray()) {
1843 DCHECK(!info()->IsStub());
1844 EmitBranch(instr, no_condition);
1845 } else if (type.IsHeapNumber()) {
1846 DCHECK(!info()->IsStub());
1847 XMMRegister xmm_scratch = double_scratch0();
1848 __ xorps(xmm_scratch, xmm_scratch);
1849 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
1850 EmitBranch(instr, not_equal);
1851 } else if (type.IsString()) {
1852 DCHECK(!info()->IsStub());
1853 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1854 EmitBranch(instr, not_equal);
1855 } else {
1856 ToBooleanHints expected = instr->hydrogen()->expected_input_types();
1857 if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
1858
1859 if (expected & ToBooleanHint::kUndefined) {
1860 // undefined -> false.
1861 __ cmp(reg, factory()->undefined_value());
1862 __ j(equal, instr->FalseLabel(chunk_));
1863 }
1864 if (expected & ToBooleanHint::kBoolean) {
1865 // true -> true.
1866 __ cmp(reg, factory()->true_value());
1867 __ j(equal, instr->TrueLabel(chunk_));
1868 // false -> false.
1869 __ cmp(reg, factory()->false_value());
1870 __ j(equal, instr->FalseLabel(chunk_));
1871 }
1872 if (expected & ToBooleanHint::kNull) {
1873 // 'null' -> false.
1874 __ cmp(reg, factory()->null_value());
1875 __ j(equal, instr->FalseLabel(chunk_));
1876 }
1877
1878 if (expected & ToBooleanHint::kSmallInteger) {
1879 // Smis: 0 -> false, all other -> true.
1880 __ test(reg, Operand(reg));
1881 __ j(equal, instr->FalseLabel(chunk_));
1882 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
1883 } else if (expected & ToBooleanHint::kNeedsMap) {
1884 // If we need a map later and have a Smi -> deopt.
1885 __ test(reg, Immediate(kSmiTagMask));
1886 DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
1887 }
1888
1889 Register map = no_reg; // Keep the compiler happy.
1890 if (expected & ToBooleanHint::kNeedsMap) {
1891 map = ToRegister(instr->temp());
1892 DCHECK(!map.is(reg));
1893 __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
1894
1895 if (expected & ToBooleanHint::kCanBeUndetectable) {
1896 // Undetectable -> false.
1897 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
1898 Immediate(1 << Map::kIsUndetectable));
1899 __ j(not_zero, instr->FalseLabel(chunk_));
1900 }
1901 }
1902
1903 if (expected & ToBooleanHint::kReceiver) {
1904 // spec object -> true.
1905 __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
1906 __ j(above_equal, instr->TrueLabel(chunk_));
1907 }
1908
1909 if (expected & ToBooleanHint::kString) {
1910 // String value -> false iff empty.
1911 Label not_string;
1912 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
1913 __ j(above_equal, ¬_string, Label::kNear);
1914 __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1915 __ j(not_zero, instr->TrueLabel(chunk_));
1916 __ jmp(instr->FalseLabel(chunk_));
1917 __ bind(¬_string);
1918 }
1919
1920 if (expected & ToBooleanHint::kSymbol) {
1921 // Symbol value -> true.
1922 __ CmpInstanceType(map, SYMBOL_TYPE);
1923 __ j(equal, instr->TrueLabel(chunk_));
1924 }
1925
1926 if (expected & ToBooleanHint::kSimdValue) {
1927 // SIMD value -> true.
1928 __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
1929 __ j(equal, instr->TrueLabel(chunk_));
1930 }
1931
1932 if (expected & ToBooleanHint::kHeapNumber) {
1933 // heap number -> false iff +0, -0, or NaN.
1934 Label not_heap_number;
1935 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1936 factory()->heap_number_map());
1937 __ j(not_equal, ¬_heap_number, Label::kNear);
1938 XMMRegister xmm_scratch = double_scratch0();
1939 __ xorps(xmm_scratch, xmm_scratch);
1940 __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
1941 __ j(zero, instr->FalseLabel(chunk_));
1942 __ jmp(instr->TrueLabel(chunk_));
1943 __ bind(¬_heap_number);
1944 }
1945
1946 if (expected != ToBooleanHint::kAny) {
1947 // We've seen something for the first time -> deopt.
1948 // This can only happen if we are not generic already.
1949 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
1950 }
1951 }
1952 }
1953 }
1954
1955
EmitGoto(int block)1956 void LCodeGen::EmitGoto(int block) {
1957 if (!IsNextEmittedBlock(block)) {
1958 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
1959 }
1960 }
1961
1962
DoGoto(LGoto * instr)1963 void LCodeGen::DoGoto(LGoto* instr) {
1964 EmitGoto(instr->block_id());
1965 }
1966
1967
TokenToCondition(Token::Value op,bool is_unsigned)1968 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1969 Condition cond = no_condition;
1970 switch (op) {
1971 case Token::EQ:
1972 case Token::EQ_STRICT:
1973 cond = equal;
1974 break;
1975 case Token::NE:
1976 case Token::NE_STRICT:
1977 cond = not_equal;
1978 break;
1979 case Token::LT:
1980 cond = is_unsigned ? below : less;
1981 break;
1982 case Token::GT:
1983 cond = is_unsigned ? above : greater;
1984 break;
1985 case Token::LTE:
1986 cond = is_unsigned ? below_equal : less_equal;
1987 break;
1988 case Token::GTE:
1989 cond = is_unsigned ? above_equal : greater_equal;
1990 break;
1991 case Token::IN:
1992 case Token::INSTANCEOF:
1993 default:
1994 UNREACHABLE();
1995 }
1996 return cond;
1997 }
1998
1999
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2000 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2001 LOperand* left = instr->left();
2002 LOperand* right = instr->right();
2003 bool is_unsigned =
2004 instr->is_double() ||
2005 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2006 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2007 Condition cc = TokenToCondition(instr->op(), is_unsigned);
2008
2009 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2010 // We can statically evaluate the comparison.
2011 double left_val = ToDouble(LConstantOperand::cast(left));
2012 double right_val = ToDouble(LConstantOperand::cast(right));
2013 int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2014 ? instr->TrueDestination(chunk_)
2015 : instr->FalseDestination(chunk_);
2016 EmitGoto(next_block);
2017 } else {
2018 if (instr->is_double()) {
2019 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2020 // Don't base result on EFLAGS when a NaN is involved. Instead
2021 // jump to the false block.
2022 __ j(parity_even, instr->FalseLabel(chunk_));
2023 } else {
2024 if (right->IsConstantOperand()) {
2025 __ cmp(ToOperand(left),
2026 ToImmediate(right, instr->hydrogen()->representation()));
2027 } else if (left->IsConstantOperand()) {
2028 __ cmp(ToOperand(right),
2029 ToImmediate(left, instr->hydrogen()->representation()));
2030 // We commuted the operands, so commute the condition.
2031 cc = CommuteCondition(cc);
2032 } else {
2033 __ cmp(ToRegister(left), ToOperand(right));
2034 }
2035 }
2036 EmitBranch(instr, cc);
2037 }
2038 }
2039
2040
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2041 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2042 Register left = ToRegister(instr->left());
2043
2044 if (instr->right()->IsConstantOperand()) {
2045 Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2046 __ CmpObject(left, right);
2047 } else {
2048 Operand right = ToOperand(instr->right());
2049 __ cmp(left, right);
2050 }
2051 EmitBranch(instr, equal);
2052 }
2053
2054
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2055 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2056 if (instr->hydrogen()->representation().IsTagged()) {
2057 Register input_reg = ToRegister(instr->object());
2058 __ cmp(input_reg, factory()->the_hole_value());
2059 EmitBranch(instr, equal);
2060 return;
2061 }
2062
2063 XMMRegister input_reg = ToDoubleRegister(instr->object());
2064 __ ucomisd(input_reg, input_reg);
2065 EmitFalseBranch(instr, parity_odd);
2066
2067 __ sub(esp, Immediate(kDoubleSize));
2068 __ movsd(MemOperand(esp, 0), input_reg);
2069
2070 __ add(esp, Immediate(kDoubleSize));
2071 int offset = sizeof(kHoleNanUpper32);
2072 __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2073 EmitBranch(instr, equal);
2074 }
2075
2076
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2077 Condition LCodeGen::EmitIsString(Register input,
2078 Register temp1,
2079 Label* is_not_string,
2080 SmiCheck check_needed = INLINE_SMI_CHECK) {
2081 if (check_needed == INLINE_SMI_CHECK) {
2082 __ JumpIfSmi(input, is_not_string);
2083 }
2084
2085 Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2086
2087 return cond;
2088 }
2089
2090
DoIsStringAndBranch(LIsStringAndBranch * instr)2091 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2092 Register reg = ToRegister(instr->value());
2093 Register temp = ToRegister(instr->temp());
2094
2095 SmiCheck check_needed =
2096 instr->hydrogen()->value()->type().IsHeapObject()
2097 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2098
2099 Condition true_cond = EmitIsString(
2100 reg, temp, instr->FalseLabel(chunk_), check_needed);
2101
2102 EmitBranch(instr, true_cond);
2103 }
2104
2105
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2106 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2107 Operand input = ToOperand(instr->value());
2108
2109 __ test(input, Immediate(kSmiTagMask));
2110 EmitBranch(instr, zero);
2111 }
2112
2113
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2114 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2115 Register input = ToRegister(instr->value());
2116 Register temp = ToRegister(instr->temp());
2117
2118 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2119 STATIC_ASSERT(kSmiTag == 0);
2120 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2121 }
2122 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2123 __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2124 Immediate(1 << Map::kIsUndetectable));
2125 EmitBranch(instr, not_zero);
2126 }
2127
2128
ComputeCompareCondition(Token::Value op)2129 static Condition ComputeCompareCondition(Token::Value op) {
2130 switch (op) {
2131 case Token::EQ_STRICT:
2132 case Token::EQ:
2133 return equal;
2134 case Token::LT:
2135 return less;
2136 case Token::GT:
2137 return greater;
2138 case Token::LTE:
2139 return less_equal;
2140 case Token::GTE:
2141 return greater_equal;
2142 default:
2143 UNREACHABLE();
2144 return no_condition;
2145 }
2146 }
2147
2148
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2149 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2150 DCHECK(ToRegister(instr->context()).is(esi));
2151 DCHECK(ToRegister(instr->left()).is(edx));
2152 DCHECK(ToRegister(instr->right()).is(eax));
2153
2154 Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2155 CallCode(code, RelocInfo::CODE_TARGET, instr);
2156 __ CompareRoot(eax, Heap::kTrueValueRootIndex);
2157 EmitBranch(instr, equal);
2158 }
2159
2160
TestType(HHasInstanceTypeAndBranch * instr)2161 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2162 InstanceType from = instr->from();
2163 InstanceType to = instr->to();
2164 if (from == FIRST_TYPE) return to;
2165 DCHECK(from == to || to == LAST_TYPE);
2166 return from;
2167 }
2168
2169
BranchCondition(HHasInstanceTypeAndBranch * instr)2170 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2171 InstanceType from = instr->from();
2172 InstanceType to = instr->to();
2173 if (from == to) return equal;
2174 if (to == LAST_TYPE) return above_equal;
2175 if (from == FIRST_TYPE) return below_equal;
2176 UNREACHABLE();
2177 return equal;
2178 }
2179
2180
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2181 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2182 Register input = ToRegister(instr->value());
2183 Register temp = ToRegister(instr->temp());
2184
2185 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2186 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2187 }
2188
2189 __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2190 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2191 }
2192
2193 // Branches to a label or falls through with the answer in the z flag. Trashes
2194 // the temp registers, but not the input.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2195 void LCodeGen::EmitClassOfTest(Label* is_true,
2196 Label* is_false,
2197 Handle<String>class_name,
2198 Register input,
2199 Register temp,
2200 Register temp2) {
2201 DCHECK(!input.is(temp));
2202 DCHECK(!input.is(temp2));
2203 DCHECK(!temp.is(temp2));
2204 __ JumpIfSmi(input, is_false);
2205
2206 __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
2207 STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2208 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2209 __ j(above_equal, is_true);
2210 } else {
2211 __ j(above_equal, is_false);
2212 }
2213
2214 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2215 // Check if the constructor in the map is a function.
2216 __ GetMapConstructor(temp, temp, temp2);
2217 // Objects with a non-function constructor have class 'Object'.
2218 __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
2219 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2220 __ j(not_equal, is_true);
2221 } else {
2222 __ j(not_equal, is_false);
2223 }
2224
2225 // temp now contains the constructor function. Grab the
2226 // instance class name from there.
2227 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2228 __ mov(temp, FieldOperand(temp,
2229 SharedFunctionInfo::kInstanceClassNameOffset));
2230 // The class name we are testing against is internalized since it's a literal.
2231 // The name in the constructor is internalized because of the way the context
2232 // is booted. This routine isn't expected to work for random API-created
2233 // classes and it doesn't have to because you can't access it with natives
2234 // syntax. Since both sides are internalized it is sufficient to use an
2235 // identity comparison.
2236 __ cmp(temp, class_name);
2237 // End with the answer in the z flag.
2238 }
2239
2240
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2241 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2242 Register input = ToRegister(instr->value());
2243 Register temp = ToRegister(instr->temp());
2244 Register temp2 = ToRegister(instr->temp2());
2245
2246 Handle<String> class_name = instr->hydrogen()->class_name();
2247
2248 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2249 class_name, input, temp, temp2);
2250
2251 EmitBranch(instr, equal);
2252 }
2253
2254
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2255 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2256 Register reg = ToRegister(instr->value());
2257 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2258 EmitBranch(instr, equal);
2259 }
2260
2261
DoHasInPrototypeChainAndBranch(LHasInPrototypeChainAndBranch * instr)2262 void LCodeGen::DoHasInPrototypeChainAndBranch(
2263 LHasInPrototypeChainAndBranch* instr) {
2264 Register const object = ToRegister(instr->object());
2265 Register const object_map = ToRegister(instr->scratch());
2266 Register const object_prototype = object_map;
2267 Register const prototype = ToRegister(instr->prototype());
2268
2269 // The {object} must be a spec object. It's sufficient to know that {object}
2270 // is not a smi, since all other non-spec objects have {null} prototypes and
2271 // will be ruled out below.
2272 if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2273 __ test(object, Immediate(kSmiTagMask));
2274 EmitFalseBranch(instr, zero);
2275 }
2276
2277 // Loop through the {object}s prototype chain looking for the {prototype}.
2278 __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
2279 Label loop;
2280 __ bind(&loop);
2281
2282 // Deoptimize if the object needs to be access checked.
2283 __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
2284 Immediate(1 << Map::kIsAccessCheckNeeded));
2285 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
2286 // Deoptimize for proxies.
2287 __ CmpInstanceType(object_map, JS_PROXY_TYPE);
2288 DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
2289
2290 __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2291 __ cmp(object_prototype, factory()->null_value());
2292 EmitFalseBranch(instr, equal);
2293 __ cmp(object_prototype, prototype);
2294 EmitTrueBranch(instr, equal);
2295 __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2296 __ jmp(&loop);
2297 }
2298
2299
DoCmpT(LCmpT * instr)2300 void LCodeGen::DoCmpT(LCmpT* instr) {
2301 Token::Value op = instr->op();
2302
2303 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2304 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2305
2306 Condition condition = ComputeCompareCondition(op);
2307 Label true_value, done;
2308 __ test(eax, Operand(eax));
2309 __ j(condition, &true_value, Label::kNear);
2310 __ mov(ToRegister(instr->result()), factory()->false_value());
2311 __ jmp(&done, Label::kNear);
2312 __ bind(&true_value);
2313 __ mov(ToRegister(instr->result()), factory()->true_value());
2314 __ bind(&done);
2315 }
2316
EmitReturn(LReturn * instr)2317 void LCodeGen::EmitReturn(LReturn* instr) {
2318 int extra_value_count = 1;
2319
2320 if (instr->has_constant_parameter_count()) {
2321 int parameter_count = ToInteger32(instr->constant_parameter_count());
2322 __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
2323 } else {
2324 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2325 Register reg = ToRegister(instr->parameter_count());
2326 // The argument count parameter is a smi
2327 __ SmiUntag(reg);
2328 Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
2329
2330 // emit code to restore stack based on instr->parameter_count()
2331 __ pop(return_addr_reg); // save return address
2332 __ shl(reg, kPointerSizeLog2);
2333 __ add(esp, reg);
2334 __ jmp(return_addr_reg);
2335 }
2336 }
2337
2338
DoReturn(LReturn * instr)2339 void LCodeGen::DoReturn(LReturn* instr) {
2340 if (FLAG_trace && info()->IsOptimizing()) {
2341 // Preserve the return value on the stack and rely on the runtime call
2342 // to return the value in the same register. We're leaving the code
2343 // managed by the register allocator and tearing down the frame, it's
2344 // safe to write to the context register.
2345 __ push(eax);
2346 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2347 __ CallRuntime(Runtime::kTraceExit);
2348 }
2349 if (info()->saves_caller_doubles()) RestoreCallerDoubles();
2350 if (NeedsEagerFrame()) {
2351 __ mov(esp, ebp);
2352 __ pop(ebp);
2353 }
2354
2355 EmitReturn(instr);
2356 }
2357
2358
DoLoadContextSlot(LLoadContextSlot * instr)2359 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2360 Register context = ToRegister(instr->context());
2361 Register result = ToRegister(instr->result());
2362 __ mov(result, ContextOperand(context, instr->slot_index()));
2363
2364 if (instr->hydrogen()->RequiresHoleCheck()) {
2365 __ cmp(result, factory()->the_hole_value());
2366 if (instr->hydrogen()->DeoptimizesOnHole()) {
2367 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2368 } else {
2369 Label is_not_hole;
2370 __ j(not_equal, &is_not_hole, Label::kNear);
2371 __ mov(result, factory()->undefined_value());
2372 __ bind(&is_not_hole);
2373 }
2374 }
2375 }
2376
2377
DoStoreContextSlot(LStoreContextSlot * instr)2378 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2379 Register context = ToRegister(instr->context());
2380 Register value = ToRegister(instr->value());
2381
2382 Label skip_assignment;
2383
2384 Operand target = ContextOperand(context, instr->slot_index());
2385 if (instr->hydrogen()->RequiresHoleCheck()) {
2386 __ cmp(target, factory()->the_hole_value());
2387 if (instr->hydrogen()->DeoptimizesOnHole()) {
2388 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2389 } else {
2390 __ j(not_equal, &skip_assignment, Label::kNear);
2391 }
2392 }
2393
2394 __ mov(target, value);
2395 if (instr->hydrogen()->NeedsWriteBarrier()) {
2396 SmiCheck check_needed =
2397 instr->hydrogen()->value()->type().IsHeapObject()
2398 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2399 Register temp = ToRegister(instr->temp());
2400 int offset = Context::SlotOffset(instr->slot_index());
2401 __ RecordWriteContextSlot(context,
2402 offset,
2403 value,
2404 temp,
2405 kSaveFPRegs,
2406 EMIT_REMEMBERED_SET,
2407 check_needed);
2408 }
2409
2410 __ bind(&skip_assignment);
2411 }
2412
2413
DoLoadNamedField(LLoadNamedField * instr)2414 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2415 HObjectAccess access = instr->hydrogen()->access();
2416 int offset = access.offset();
2417
2418 if (access.IsExternalMemory()) {
2419 Register result = ToRegister(instr->result());
2420 MemOperand operand = instr->object()->IsConstantOperand()
2421 ? MemOperand::StaticVariable(ToExternalReference(
2422 LConstantOperand::cast(instr->object())))
2423 : MemOperand(ToRegister(instr->object()), offset);
2424 __ Load(result, operand, access.representation());
2425 return;
2426 }
2427
2428 Register object = ToRegister(instr->object());
2429 if (instr->hydrogen()->representation().IsDouble()) {
2430 XMMRegister result = ToDoubleRegister(instr->result());
2431 __ movsd(result, FieldOperand(object, offset));
2432 return;
2433 }
2434
2435 Register result = ToRegister(instr->result());
2436 if (!access.IsInobject()) {
2437 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2438 object = result;
2439 }
2440 __ Load(result, FieldOperand(object, offset), access.representation());
2441 }
2442
2443
EmitPushTaggedOperand(LOperand * operand)2444 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
2445 DCHECK(!operand->IsDoubleRegister());
2446 if (operand->IsConstantOperand()) {
2447 Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
2448 AllowDeferredHandleDereference smi_check;
2449 if (object->IsSmi()) {
2450 __ Push(Handle<Smi>::cast(object));
2451 } else {
2452 __ PushHeapObject(Handle<HeapObject>::cast(object));
2453 }
2454 } else if (operand->IsRegister()) {
2455 __ push(ToRegister(operand));
2456 } else {
2457 __ push(ToOperand(operand));
2458 }
2459 }
2460
2461
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)2462 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2463 Register function = ToRegister(instr->function());
2464 Register temp = ToRegister(instr->temp());
2465 Register result = ToRegister(instr->result());
2466
2467 // Get the prototype or initial map from the function.
2468 __ mov(result,
2469 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2470
2471 // Check that the function has a prototype or an initial map.
2472 __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
2473 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2474
2475 // If the function does not have an initial map, we're done.
2476 Label done;
2477 __ CmpObjectType(result, MAP_TYPE, temp);
2478 __ j(not_equal, &done, Label::kNear);
2479
2480 // Get the prototype from the initial map.
2481 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
2482
2483 // All done.
2484 __ bind(&done);
2485 }
2486
2487
DoLoadRoot(LLoadRoot * instr)2488 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2489 Register result = ToRegister(instr->result());
2490 __ LoadRoot(result, instr->index());
2491 }
2492
2493
DoAccessArgumentsAt(LAccessArgumentsAt * instr)2494 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2495 Register arguments = ToRegister(instr->arguments());
2496 Register result = ToRegister(instr->result());
2497 if (instr->length()->IsConstantOperand() &&
2498 instr->index()->IsConstantOperand()) {
2499 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2500 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2501 int index = (const_length - const_index) + 1;
2502 __ mov(result, Operand(arguments, index * kPointerSize));
2503 } else {
2504 Register length = ToRegister(instr->length());
2505 Operand index = ToOperand(instr->index());
2506 // There are two words between the frame pointer and the last argument.
2507 // Subtracting from length accounts for one of them add one more.
2508 __ sub(length, index);
2509 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2510 }
2511 }
2512
2513
DoLoadKeyedExternalArray(LLoadKeyed * instr)2514 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2515 ElementsKind elements_kind = instr->elements_kind();
2516 LOperand* key = instr->key();
2517 if (!key->IsConstantOperand() &&
2518 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
2519 elements_kind)) {
2520 __ SmiUntag(ToRegister(key));
2521 }
2522 Operand operand(BuildFastArrayOperand(
2523 instr->elements(),
2524 key,
2525 instr->hydrogen()->key()->representation(),
2526 elements_kind,
2527 instr->base_offset()));
2528 if (elements_kind == FLOAT32_ELEMENTS) {
2529 XMMRegister result(ToDoubleRegister(instr->result()));
2530 __ movss(result, operand);
2531 __ cvtss2sd(result, result);
2532 } else if (elements_kind == FLOAT64_ELEMENTS) {
2533 __ movsd(ToDoubleRegister(instr->result()), operand);
2534 } else {
2535 Register result(ToRegister(instr->result()));
2536 switch (elements_kind) {
2537 case INT8_ELEMENTS:
2538 __ movsx_b(result, operand);
2539 break;
2540 case UINT8_ELEMENTS:
2541 case UINT8_CLAMPED_ELEMENTS:
2542 __ movzx_b(result, operand);
2543 break;
2544 case INT16_ELEMENTS:
2545 __ movsx_w(result, operand);
2546 break;
2547 case UINT16_ELEMENTS:
2548 __ movzx_w(result, operand);
2549 break;
2550 case INT32_ELEMENTS:
2551 __ mov(result, operand);
2552 break;
2553 case UINT32_ELEMENTS:
2554 __ mov(result, operand);
2555 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2556 __ test(result, Operand(result));
2557 DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
2558 }
2559 break;
2560 case FLOAT32_ELEMENTS:
2561 case FLOAT64_ELEMENTS:
2562 case FAST_SMI_ELEMENTS:
2563 case FAST_ELEMENTS:
2564 case FAST_DOUBLE_ELEMENTS:
2565 case FAST_HOLEY_SMI_ELEMENTS:
2566 case FAST_HOLEY_ELEMENTS:
2567 case FAST_HOLEY_DOUBLE_ELEMENTS:
2568 case DICTIONARY_ELEMENTS:
2569 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2570 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2571 case FAST_STRING_WRAPPER_ELEMENTS:
2572 case SLOW_STRING_WRAPPER_ELEMENTS:
2573 case NO_ELEMENTS:
2574 UNREACHABLE();
2575 break;
2576 }
2577 }
2578 }
2579
2580
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)2581 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2582 if (instr->hydrogen()->RequiresHoleCheck()) {
2583 Operand hole_check_operand = BuildFastArrayOperand(
2584 instr->elements(), instr->key(),
2585 instr->hydrogen()->key()->representation(),
2586 FAST_DOUBLE_ELEMENTS,
2587 instr->base_offset() + sizeof(kHoleNanLower32));
2588 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
2589 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2590 }
2591
2592 Operand double_load_operand = BuildFastArrayOperand(
2593 instr->elements(),
2594 instr->key(),
2595 instr->hydrogen()->key()->representation(),
2596 FAST_DOUBLE_ELEMENTS,
2597 instr->base_offset());
2598 XMMRegister result = ToDoubleRegister(instr->result());
2599 __ movsd(result, double_load_operand);
2600 }
2601
2602
DoLoadKeyedFixedArray(LLoadKeyed * instr)2603 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2604 Register result = ToRegister(instr->result());
2605
2606 // Load the result.
2607 __ mov(result,
2608 BuildFastArrayOperand(instr->elements(), instr->key(),
2609 instr->hydrogen()->key()->representation(),
2610 FAST_ELEMENTS, instr->base_offset()));
2611
2612 // Check for the hole value.
2613 if (instr->hydrogen()->RequiresHoleCheck()) {
2614 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
2615 __ test(result, Immediate(kSmiTagMask));
2616 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotASmi);
2617 } else {
2618 __ cmp(result, factory()->the_hole_value());
2619 DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2620 }
2621 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2622 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
2623 Label done;
2624 __ cmp(result, factory()->the_hole_value());
2625 __ j(not_equal, &done);
2626 if (info()->IsStub()) {
2627 // A stub can safely convert the hole to undefined only if the array
2628 // protector cell contains (Smi) Isolate::kProtectorValid.
2629 // Otherwise it needs to bail out.
2630 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2631 __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
2632 Immediate(Smi::FromInt(Isolate::kProtectorValid)));
2633 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
2634 }
2635 __ mov(result, isolate()->factory()->undefined_value());
2636 __ bind(&done);
2637 }
2638 }
2639
2640
DoLoadKeyed(LLoadKeyed * instr)2641 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2642 if (instr->is_fixed_typed_array()) {
2643 DoLoadKeyedExternalArray(instr);
2644 } else if (instr->hydrogen()->representation().IsDouble()) {
2645 DoLoadKeyedFixedDoubleArray(instr);
2646 } else {
2647 DoLoadKeyedFixedArray(instr);
2648 }
2649 }
2650
2651
BuildFastArrayOperand(LOperand * elements_pointer,LOperand * key,Representation key_representation,ElementsKind elements_kind,uint32_t base_offset)2652 Operand LCodeGen::BuildFastArrayOperand(
2653 LOperand* elements_pointer,
2654 LOperand* key,
2655 Representation key_representation,
2656 ElementsKind elements_kind,
2657 uint32_t base_offset) {
2658 Register elements_pointer_reg = ToRegister(elements_pointer);
2659 int element_shift_size = ElementsKindToShiftSize(elements_kind);
2660 int shift_size = element_shift_size;
2661 if (key->IsConstantOperand()) {
2662 int constant_value = ToInteger32(LConstantOperand::cast(key));
2663 if (constant_value & 0xF0000000) {
2664 Abort(kArrayIndexConstantValueTooBig);
2665 }
2666 return Operand(elements_pointer_reg,
2667 ((constant_value) << shift_size)
2668 + base_offset);
2669 } else {
2670 // Take the tag bit into account while computing the shift size.
2671 if (key_representation.IsSmi() && (shift_size >= 1)) {
2672 shift_size -= kSmiTagSize;
2673 }
2674 ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2675 return Operand(elements_pointer_reg,
2676 ToRegister(key),
2677 scale_factor,
2678 base_offset);
2679 }
2680 }
2681
2682
DoArgumentsElements(LArgumentsElements * instr)2683 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2684 Register result = ToRegister(instr->result());
2685
2686 if (instr->hydrogen()->from_inlined()) {
2687 __ lea(result, Operand(esp, -2 * kPointerSize));
2688 } else if (instr->hydrogen()->arguments_adaptor()) {
2689 // Check for arguments adapter frame.
2690 Label done, adapted;
2691 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2692 __ mov(result,
2693 Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset));
2694 __ cmp(Operand(result),
2695 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2696 __ j(equal, &adapted, Label::kNear);
2697
2698 // No arguments adaptor frame.
2699 __ mov(result, Operand(ebp));
2700 __ jmp(&done, Label::kNear);
2701
2702 // Arguments adaptor frame present.
2703 __ bind(&adapted);
2704 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2705
2706 // Result is the frame pointer for the frame if not adapted and for the real
2707 // frame below the adaptor frame if adapted.
2708 __ bind(&done);
2709 } else {
2710 __ mov(result, Operand(ebp));
2711 }
2712 }
2713
2714
DoArgumentsLength(LArgumentsLength * instr)2715 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2716 Operand elem = ToOperand(instr->elements());
2717 Register result = ToRegister(instr->result());
2718
2719 Label done;
2720
2721 // If no arguments adaptor frame the number of arguments is fixed.
2722 __ cmp(ebp, elem);
2723 __ mov(result, Immediate(scope()->num_parameters()));
2724 __ j(equal, &done, Label::kNear);
2725
2726 // Arguments adaptor frame present. Get argument length from there.
2727 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2728 __ mov(result, Operand(result,
2729 ArgumentsAdaptorFrameConstants::kLengthOffset));
2730 __ SmiUntag(result);
2731
2732 // Argument length is in result register.
2733 __ bind(&done);
2734 }
2735
2736
DoWrapReceiver(LWrapReceiver * instr)2737 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2738 Register receiver = ToRegister(instr->receiver());
2739 Register function = ToRegister(instr->function());
2740
2741 // If the receiver is null or undefined, we have to pass the global
2742 // object as a receiver to normal functions. Values have to be
2743 // passed unchanged to builtins and strict-mode functions.
2744 Label receiver_ok, global_object;
2745 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
2746 Register scratch = ToRegister(instr->temp());
2747
2748 if (!instr->hydrogen()->known_function()) {
2749 // Do not transform the receiver to object for strict mode
2750 // functions.
2751 __ mov(scratch,
2752 FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2753 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
2754 Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
2755 __ j(not_equal, &receiver_ok, dist);
2756
2757 // Do not transform the receiver to object for builtins.
2758 __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
2759 Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
2760 __ j(not_equal, &receiver_ok, dist);
2761 }
2762
2763 // Normal function. Replace undefined or null with global receiver.
2764 __ cmp(receiver, factory()->null_value());
2765 __ j(equal, &global_object, Label::kNear);
2766 __ cmp(receiver, factory()->undefined_value());
2767 __ j(equal, &global_object, Label::kNear);
2768
2769 // The receiver should be a JS object.
2770 __ test(receiver, Immediate(kSmiTagMask));
2771 DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
2772 __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
2773 DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
2774
2775 __ jmp(&receiver_ok, Label::kNear);
2776 __ bind(&global_object);
2777 __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
2778 __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
2779 __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
2780 __ bind(&receiver_ok);
2781 }
2782
2783
DoApplyArguments(LApplyArguments * instr)2784 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2785 Register receiver = ToRegister(instr->receiver());
2786 Register function = ToRegister(instr->function());
2787 Register length = ToRegister(instr->length());
2788 Register elements = ToRegister(instr->elements());
2789 DCHECK(receiver.is(eax)); // Used for parameter count.
2790 DCHECK(function.is(edi)); // Required by InvokeFunction.
2791 DCHECK(ToRegister(instr->result()).is(eax));
2792
2793 // Copy the arguments to this function possibly from the
2794 // adaptor frame below it.
2795 const uint32_t kArgumentsLimit = 1 * KB;
2796 __ cmp(length, kArgumentsLimit);
2797 DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
2798
2799 __ push(receiver);
2800 __ mov(receiver, length);
2801
2802 // Loop through the arguments pushing them onto the execution
2803 // stack.
2804 Label invoke, loop;
2805 // length is a small non-negative integer, due to the test above.
2806 __ test(length, Operand(length));
2807 __ j(zero, &invoke, Label::kNear);
2808 __ bind(&loop);
2809 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2810 __ dec(length);
2811 __ j(not_zero, &loop);
2812
2813 // Invoke the function.
2814 __ bind(&invoke);
2815
2816 InvokeFlag flag = CALL_FUNCTION;
2817 if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
2818 DCHECK(!info()->saves_caller_doubles());
2819 // TODO(ishell): drop current frame before pushing arguments to the stack.
2820 flag = JUMP_FUNCTION;
2821 ParameterCount actual(eax);
2822 // It is safe to use ebx, ecx and edx as scratch registers here given that
2823 // 1) we are not going to return to caller function anyway,
2824 // 2) ebx (expected arguments count) and edx (new.target) will be
2825 // initialized below.
2826 PrepareForTailCall(actual, ebx, ecx, edx);
2827 }
2828
2829 DCHECK(instr->HasPointerMap());
2830 LPointerMap* pointers = instr->pointer_map();
2831 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
2832 ParameterCount actual(eax);
2833 __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
2834 }
2835
2836
DoDebugBreak(LDebugBreak * instr)2837 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2838 __ int3();
2839 }
2840
2841
DoPushArgument(LPushArgument * instr)2842 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2843 LOperand* argument = instr->value();
2844 EmitPushTaggedOperand(argument);
2845 }
2846
2847
DoDrop(LDrop * instr)2848 void LCodeGen::DoDrop(LDrop* instr) {
2849 __ Drop(instr->count());
2850 }
2851
2852
DoThisFunction(LThisFunction * instr)2853 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2854 Register result = ToRegister(instr->result());
2855 __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2856 }
2857
2858
DoContext(LContext * instr)2859 void LCodeGen::DoContext(LContext* instr) {
2860 Register result = ToRegister(instr->result());
2861 if (info()->IsOptimizing()) {
2862 __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
2863 } else {
2864 // If there is no frame, the context must be in esi.
2865 DCHECK(result.is(esi));
2866 }
2867 }
2868
2869
DoDeclareGlobals(LDeclareGlobals * instr)2870 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2871 DCHECK(ToRegister(instr->context()).is(esi));
2872 __ push(Immediate(instr->hydrogen()->pairs()));
2873 __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
2874 __ push(Immediate(instr->hydrogen()->feedback_vector()));
2875 CallRuntime(Runtime::kDeclareGlobals, instr);
2876 }
2877
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,bool is_tail_call,LInstruction * instr)2878 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2879 int formal_parameter_count, int arity,
2880 bool is_tail_call, LInstruction* instr) {
2881 bool dont_adapt_arguments =
2882 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
2883 bool can_invoke_directly =
2884 dont_adapt_arguments || formal_parameter_count == arity;
2885
2886 Register function_reg = edi;
2887
2888 if (can_invoke_directly) {
2889 // Change context.
2890 __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
2891
2892 // Always initialize new target and number of actual arguments.
2893 __ mov(edx, factory()->undefined_value());
2894 __ mov(eax, arity);
2895
2896 bool is_self_call = function.is_identical_to(info()->closure());
2897
2898 // Invoke function directly.
2899 if (is_self_call) {
2900 Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
2901 if (is_tail_call) {
2902 __ Jump(self, RelocInfo::CODE_TARGET);
2903 } else {
2904 __ Call(self, RelocInfo::CODE_TARGET);
2905 }
2906 } else {
2907 Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
2908 if (is_tail_call) {
2909 __ jmp(target);
2910 } else {
2911 __ call(target);
2912 }
2913 }
2914
2915 if (!is_tail_call) {
2916 // Set up deoptimization.
2917 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2918 }
2919 } else {
2920 // We need to adapt arguments.
2921 LPointerMap* pointers = instr->pointer_map();
2922 SafepointGenerator generator(
2923 this, pointers, Safepoint::kLazyDeopt);
2924 ParameterCount actual(arity);
2925 ParameterCount expected(formal_parameter_count);
2926 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
2927 __ InvokeFunction(function_reg, expected, actual, flag, generator);
2928 }
2929 }
2930
2931
DoCallWithDescriptor(LCallWithDescriptor * instr)2932 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
2933 DCHECK(ToRegister(instr->result()).is(eax));
2934
2935 if (instr->hydrogen()->IsTailCall()) {
2936 if (NeedsEagerFrame()) __ leave();
2937
2938 if (instr->target()->IsConstantOperand()) {
2939 LConstantOperand* target = LConstantOperand::cast(instr->target());
2940 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2941 __ jmp(code, RelocInfo::CODE_TARGET);
2942 } else {
2943 DCHECK(instr->target()->IsRegister());
2944 Register target = ToRegister(instr->target());
2945 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
2946 __ jmp(target);
2947 }
2948 } else {
2949 LPointerMap* pointers = instr->pointer_map();
2950 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2951
2952 if (instr->target()->IsConstantOperand()) {
2953 LConstantOperand* target = LConstantOperand::cast(instr->target());
2954 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
2955 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
2956 __ call(code, RelocInfo::CODE_TARGET);
2957 } else {
2958 DCHECK(instr->target()->IsRegister());
2959 Register target = ToRegister(instr->target());
2960 generator.BeforeCall(__ CallSize(Operand(target)));
2961 __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
2962 __ call(target);
2963 }
2964 generator.AfterCall();
2965 }
2966 }
2967
2968
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)2969 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
2970 Register input_reg = ToRegister(instr->value());
2971 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2972 factory()->heap_number_map());
2973 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
2974
2975 Label slow, allocated, done;
2976 uint32_t available_regs = eax.bit() | ecx.bit() | edx.bit() | ebx.bit();
2977 available_regs &= ~input_reg.bit();
2978 if (instr->context()->IsRegister()) {
2979 // Make sure that the context isn't overwritten in the AllocateHeapNumber
2980 // macro below.
2981 available_regs &= ~ToRegister(instr->context()).bit();
2982 }
2983
2984 Register tmp =
2985 Register::from_code(base::bits::CountTrailingZeros32(available_regs));
2986 available_regs &= ~tmp.bit();
2987 Register tmp2 =
2988 Register::from_code(base::bits::CountTrailingZeros32(available_regs));
2989
2990 // Preserve the value of all registers.
2991 PushSafepointRegistersScope scope(this);
2992
2993 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2994 // Check the sign of the argument. If the argument is positive, just
2995 // return it. We do not need to patch the stack since |input| and
2996 // |result| are the same register and |input| will be restored
2997 // unchanged by popping safepoint registers.
2998 __ test(tmp, Immediate(HeapNumber::kSignMask));
2999 __ j(zero, &done, Label::kNear);
3000
3001 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3002 __ jmp(&allocated, Label::kNear);
3003
3004 // Slow case: Call the runtime system to do the number allocation.
3005 __ bind(&slow);
3006 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3007 instr, instr->context());
3008 // Set the pointer to the new heap number in tmp.
3009 if (!tmp.is(eax)) __ mov(tmp, eax);
3010 // Restore input_reg after call to runtime.
3011 __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3012
3013 __ bind(&allocated);
3014 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3015 __ and_(tmp2, ~HeapNumber::kSignMask);
3016 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3017 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3018 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3019 __ StoreToSafepointRegisterSlot(input_reg, tmp);
3020
3021 __ bind(&done);
3022 }
3023
3024
EmitIntegerMathAbs(LMathAbs * instr)3025 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3026 Register input_reg = ToRegister(instr->value());
3027 __ test(input_reg, Operand(input_reg));
3028 Label is_positive;
3029 __ j(not_sign, &is_positive, Label::kNear);
3030 __ neg(input_reg); // Sets flags.
3031 DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
3032 __ bind(&is_positive);
3033 }
3034
3035
DoMathAbs(LMathAbs * instr)3036 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3037 // Class for deferred case.
3038 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3039 public:
3040 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3041 LMathAbs* instr)
3042 : LDeferredCode(codegen), instr_(instr) { }
3043 void Generate() override {
3044 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3045 }
3046 LInstruction* instr() override { return instr_; }
3047
3048 private:
3049 LMathAbs* instr_;
3050 };
3051
3052 DCHECK(instr->value()->Equals(instr->result()));
3053 Representation r = instr->hydrogen()->value()->representation();
3054
3055 if (r.IsDouble()) {
3056 XMMRegister scratch = double_scratch0();
3057 XMMRegister input_reg = ToDoubleRegister(instr->value());
3058 __ xorps(scratch, scratch);
3059 __ subsd(scratch, input_reg);
3060 __ andps(input_reg, scratch);
3061 } else if (r.IsSmiOrInteger32()) {
3062 EmitIntegerMathAbs(instr);
3063 } else { // Tagged case.
3064 DeferredMathAbsTaggedHeapNumber* deferred =
3065 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3066 Register input_reg = ToRegister(instr->value());
3067 // Smi check.
3068 __ JumpIfNotSmi(input_reg, deferred->entry());
3069 EmitIntegerMathAbs(instr);
3070 __ bind(deferred->exit());
3071 }
3072 }
3073
DoMathFloorD(LMathFloorD * instr)3074 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3075 XMMRegister output_reg = ToDoubleRegister(instr->result());
3076 XMMRegister input_reg = ToDoubleRegister(instr->value());
3077 CpuFeatureScope scope(masm(), SSE4_1);
3078 __ roundsd(output_reg, input_reg, kRoundDown);
3079 }
3080
DoMathFloorI(LMathFloorI * instr)3081 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3082 XMMRegister xmm_scratch = double_scratch0();
3083 Register output_reg = ToRegister(instr->result());
3084 XMMRegister input_reg = ToDoubleRegister(instr->value());
3085
3086 if (CpuFeatures::IsSupported(SSE4_1)) {
3087 CpuFeatureScope scope(masm(), SSE4_1);
3088 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3089 // Deoptimize on negative zero.
3090 Label non_zero;
3091 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3092 __ ucomisd(input_reg, xmm_scratch);
3093 __ j(not_equal, &non_zero, Label::kNear);
3094 __ movmskpd(output_reg, input_reg);
3095 __ test(output_reg, Immediate(1));
3096 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
3097 __ bind(&non_zero);
3098 }
3099 __ roundsd(xmm_scratch, input_reg, kRoundDown);
3100 __ cvttsd2si(output_reg, Operand(xmm_scratch));
3101 // Overflow is signalled with minint.
3102 __ cmp(output_reg, 0x1);
3103 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3104 } else {
3105 Label negative_sign, done;
3106 // Deoptimize on unordered.
3107 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
3108 __ ucomisd(input_reg, xmm_scratch);
3109 DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
3110 __ j(below, &negative_sign, Label::kNear);
3111
3112 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3113 // Check for negative zero.
3114 Label positive_sign;
3115 __ j(above, &positive_sign, Label::kNear);
3116 __ movmskpd(output_reg, input_reg);
3117 __ test(output_reg, Immediate(1));
3118 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
3119 __ Move(output_reg, Immediate(0));
3120 __ jmp(&done, Label::kNear);
3121 __ bind(&positive_sign);
3122 }
3123
3124 // Use truncating instruction (OK because input is positive).
3125 __ cvttsd2si(output_reg, Operand(input_reg));
3126 // Overflow is signalled with minint.
3127 __ cmp(output_reg, 0x1);
3128 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3129 __ jmp(&done, Label::kNear);
3130
3131 // Non-zero negative reaches here.
3132 __ bind(&negative_sign);
3133 // Truncate, then compare and compensate.
3134 __ cvttsd2si(output_reg, Operand(input_reg));
3135 __ Cvtsi2sd(xmm_scratch, output_reg);
3136 __ ucomisd(input_reg, xmm_scratch);
3137 __ j(equal, &done, Label::kNear);
3138 __ sub(output_reg, Immediate(1));
3139 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3140
3141 __ bind(&done);
3142 }
3143 }
3144
DoMathRoundD(LMathRoundD * instr)3145 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
3146 XMMRegister xmm_scratch = double_scratch0();
3147 XMMRegister output_reg = ToDoubleRegister(instr->result());
3148 XMMRegister input_reg = ToDoubleRegister(instr->value());
3149 CpuFeatureScope scope(masm(), SSE4_1);
3150 Label done;
3151 __ roundsd(output_reg, input_reg, kRoundUp);
3152 __ Move(xmm_scratch, -0.5);
3153 __ addsd(xmm_scratch, output_reg);
3154 __ ucomisd(xmm_scratch, input_reg);
3155 __ j(below_equal, &done, Label::kNear);
3156 __ Move(xmm_scratch, 1.0);
3157 __ subsd(output_reg, xmm_scratch);
3158 __ bind(&done);
3159 }
3160
DoMathRoundI(LMathRoundI * instr)3161 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
3162 Register output_reg = ToRegister(instr->result());
3163 XMMRegister input_reg = ToDoubleRegister(instr->value());
3164 XMMRegister xmm_scratch = double_scratch0();
3165 XMMRegister input_temp = ToDoubleRegister(instr->temp());
3166 ExternalReference one_half = ExternalReference::address_of_one_half();
3167 ExternalReference minus_one_half =
3168 ExternalReference::address_of_minus_one_half();
3169
3170 Label done, round_to_zero, below_one_half, do_not_compensate;
3171 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3172
3173 __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
3174 __ ucomisd(xmm_scratch, input_reg);
3175 __ j(above, &below_one_half, Label::kNear);
3176
3177 // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3178 __ addsd(xmm_scratch, input_reg);
3179 __ cvttsd2si(output_reg, Operand(xmm_scratch));
3180 // Overflow is signalled with minint.
3181 __ cmp(output_reg, 0x1);
3182 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3183 __ jmp(&done, dist);
3184
3185 __ bind(&below_one_half);
3186 __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
3187 __ ucomisd(xmm_scratch, input_reg);
3188 __ j(below_equal, &round_to_zero, Label::kNear);
3189
3190 // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3191 // compare and compensate.
3192 __ movaps(input_temp, input_reg); // Do not alter input_reg.
3193 __ subsd(input_temp, xmm_scratch);
3194 __ cvttsd2si(output_reg, Operand(input_temp));
3195 // Catch minint due to overflow, and to prevent overflow when compensating.
3196 __ cmp(output_reg, 0x1);
3197 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3198
3199 __ Cvtsi2sd(xmm_scratch, output_reg);
3200 __ ucomisd(xmm_scratch, input_temp);
3201 __ j(equal, &done, dist);
3202 __ sub(output_reg, Immediate(1));
3203 // No overflow because we already ruled out minint.
3204 __ jmp(&done, dist);
3205
3206 __ bind(&round_to_zero);
3207 // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3208 // we can ignore the difference between a result of -0 and +0.
3209 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3210 // If the sign is positive, we return +0.
3211 __ movmskpd(output_reg, input_reg);
3212 __ test(output_reg, Immediate(1));
3213 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
3214 }
3215 __ Move(output_reg, Immediate(0));
3216 __ bind(&done);
3217 }
3218
3219
DoMathFround(LMathFround * instr)3220 void LCodeGen::DoMathFround(LMathFround* instr) {
3221 XMMRegister input_reg = ToDoubleRegister(instr->value());
3222 XMMRegister output_reg = ToDoubleRegister(instr->result());
3223 __ cvtsd2ss(output_reg, input_reg);
3224 __ cvtss2sd(output_reg, output_reg);
3225 }
3226
3227
DoMathSqrt(LMathSqrt * instr)3228 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3229 Operand input = ToOperand(instr->value());
3230 XMMRegister output = ToDoubleRegister(instr->result());
3231 __ sqrtsd(output, input);
3232 }
3233
3234
DoMathPowHalf(LMathPowHalf * instr)3235 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3236 XMMRegister xmm_scratch = double_scratch0();
3237 XMMRegister input_reg = ToDoubleRegister(instr->value());
3238 Register scratch = ToRegister(instr->temp());
3239 DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3240
3241 // Note that according to ECMA-262 15.8.2.13:
3242 // Math.pow(-Infinity, 0.5) == Infinity
3243 // Math.sqrt(-Infinity) == NaN
3244 Label done, sqrt;
3245 // Check base for -Infinity. According to IEEE-754, single-precision
3246 // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
3247 __ mov(scratch, 0xFF800000);
3248 __ movd(xmm_scratch, scratch);
3249 __ cvtss2sd(xmm_scratch, xmm_scratch);
3250 __ ucomisd(input_reg, xmm_scratch);
3251 // Comparing -Infinity with NaN results in "unordered", which sets the
3252 // zero flag as if both were equal. However, it also sets the carry flag.
3253 __ j(not_equal, &sqrt, Label::kNear);
3254 __ j(carry, &sqrt, Label::kNear);
3255 // If input is -Infinity, return Infinity.
3256 __ xorps(input_reg, input_reg);
3257 __ subsd(input_reg, xmm_scratch);
3258 __ jmp(&done, Label::kNear);
3259
3260 // Square root.
3261 __ bind(&sqrt);
3262 __ xorps(xmm_scratch, xmm_scratch);
3263 __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
3264 __ sqrtsd(input_reg, input_reg);
3265 __ bind(&done);
3266 }
3267
3268
DoPower(LPower * instr)3269 void LCodeGen::DoPower(LPower* instr) {
3270 Representation exponent_type = instr->hydrogen()->right()->representation();
3271 // Having marked this as a call, we can use any registers.
3272 // Just make sure that the input/output registers are the expected ones.
3273 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3274 DCHECK(!instr->right()->IsDoubleRegister() ||
3275 ToDoubleRegister(instr->right()).is(xmm1));
3276 DCHECK(!instr->right()->IsRegister() ||
3277 ToRegister(instr->right()).is(tagged_exponent));
3278 DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3279 DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3280
3281 if (exponent_type.IsSmi()) {
3282 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3283 __ CallStub(&stub);
3284 } else if (exponent_type.IsTagged()) {
3285 Label no_deopt;
3286 __ JumpIfSmi(tagged_exponent, &no_deopt);
3287 DCHECK(!ecx.is(tagged_exponent));
3288 __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
3289 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
3290 __ bind(&no_deopt);
3291 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3292 __ CallStub(&stub);
3293 } else if (exponent_type.IsInteger32()) {
3294 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3295 __ CallStub(&stub);
3296 } else {
3297 DCHECK(exponent_type.IsDouble());
3298 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3299 __ CallStub(&stub);
3300 }
3301 }
3302
3303
DoMathLog(LMathLog * instr)3304 void LCodeGen::DoMathLog(LMathLog* instr) {
3305 XMMRegister input = ToDoubleRegister(instr->value());
3306 XMMRegister result = ToDoubleRegister(instr->result());
3307 // Pass one double as argument on the stack.
3308 __ PrepareCallCFunction(2, eax);
3309 __ movsd(Operand(esp, 0 * kDoubleSize), input);
3310 __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 2);
3311 // Return value is in st(0) on ia32.
3312 // Store it into the result register.
3313 __ sub(esp, Immediate(kDoubleSize));
3314 __ fstp_d(Operand(esp, 0));
3315 __ movsd(result, Operand(esp, 0));
3316 __ add(esp, Immediate(kDoubleSize));
3317 }
3318
3319
DoMathClz32(LMathClz32 * instr)3320 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3321 Register input = ToRegister(instr->value());
3322 Register result = ToRegister(instr->result());
3323
3324 __ Lzcnt(result, input);
3325 }
3326
DoMathCos(LMathCos * instr)3327 void LCodeGen::DoMathCos(LMathCos* instr) {
3328 XMMRegister input = ToDoubleRegister(instr->value());
3329 XMMRegister result = ToDoubleRegister(instr->result());
3330 // Pass one double as argument on the stack.
3331 __ PrepareCallCFunction(2, eax);
3332 __ movsd(Operand(esp, 0 * kDoubleSize), input);
3333 __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 2);
3334 // Return value is in st(0) on ia32.
3335 // Store it into the result register.
3336 __ sub(esp, Immediate(kDoubleSize));
3337 __ fstp_d(Operand(esp, 0));
3338 __ movsd(result, Operand(esp, 0));
3339 __ add(esp, Immediate(kDoubleSize));
3340 }
3341
DoMathSin(LMathSin * instr)3342 void LCodeGen::DoMathSin(LMathSin* instr) {
3343 XMMRegister input = ToDoubleRegister(instr->value());
3344 XMMRegister result = ToDoubleRegister(instr->result());
3345 // Pass one double as argument on the stack.
3346 __ PrepareCallCFunction(2, eax);
3347 __ movsd(Operand(esp, 0 * kDoubleSize), input);
3348 __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 2);
3349 // Return value is in st(0) on ia32.
3350 // Store it into the result register.
3351 __ sub(esp, Immediate(kDoubleSize));
3352 __ fstp_d(Operand(esp, 0));
3353 __ movsd(result, Operand(esp, 0));
3354 __ add(esp, Immediate(kDoubleSize));
3355 }
3356
DoMathExp(LMathExp * instr)3357 void LCodeGen::DoMathExp(LMathExp* instr) {
3358 XMMRegister input = ToDoubleRegister(instr->value());
3359 XMMRegister result = ToDoubleRegister(instr->result());
3360 // Pass one double as argument on the stack.
3361 __ PrepareCallCFunction(2, eax);
3362 __ movsd(Operand(esp, 0 * kDoubleSize), input);
3363 __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 2);
3364 // Return value is in st(0) on ia32.
3365 // Store it into the result register.
3366 __ sub(esp, Immediate(kDoubleSize));
3367 __ fstp_d(Operand(esp, 0));
3368 __ movsd(result, Operand(esp, 0));
3369 __ add(esp, Immediate(kDoubleSize));
3370 }
3371
PrepareForTailCall(const ParameterCount & actual,Register scratch1,Register scratch2,Register scratch3)3372 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3373 Register scratch1, Register scratch2,
3374 Register scratch3) {
3375 #if DEBUG
3376 if (actual.is_reg()) {
3377 DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3378 } else {
3379 DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3380 }
3381 #endif
3382 if (FLAG_code_comments) {
3383 if (actual.is_reg()) {
3384 Comment(";;; PrepareForTailCall, actual: %s {",
3385 RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3386 actual.reg().code()));
3387 } else {
3388 Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3389 }
3390 }
3391
3392 // Check if next frame is an arguments adaptor frame.
3393 Register caller_args_count_reg = scratch1;
3394 Label no_arguments_adaptor, formal_parameter_count_loaded;
3395 __ mov(scratch2, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3396 __ cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
3397 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3398 __ j(not_equal, &no_arguments_adaptor, Label::kNear);
3399
3400 // Drop current frame and load arguments count from arguments adaptor frame.
3401 __ mov(ebp, scratch2);
3402 __ mov(caller_args_count_reg,
3403 Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3404 __ SmiUntag(caller_args_count_reg);
3405 __ jmp(&formal_parameter_count_loaded, Label::kNear);
3406
3407 __ bind(&no_arguments_adaptor);
3408 // Load caller's formal parameter count.
3409 __ mov(caller_args_count_reg,
3410 Immediate(info()->literal()->parameter_count()));
3411
3412 __ bind(&formal_parameter_count_loaded);
3413 __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
3414 ReturnAddressState::kNotOnStack, 0);
3415 Comment(";;; }");
3416 }
3417
DoInvokeFunction(LInvokeFunction * instr)3418 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3419 HInvokeFunction* hinstr = instr->hydrogen();
3420 DCHECK(ToRegister(instr->context()).is(esi));
3421 DCHECK(ToRegister(instr->function()).is(edi));
3422 DCHECK(instr->HasPointerMap());
3423
3424 bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3425
3426 if (is_tail_call) {
3427 DCHECK(!info()->saves_caller_doubles());
3428 ParameterCount actual(instr->arity());
3429 // It is safe to use ebx, ecx and edx as scratch registers here given that
3430 // 1) we are not going to return to caller function anyway,
3431 // 2) ebx (expected arguments count) and edx (new.target) will be
3432 // initialized below.
3433 PrepareForTailCall(actual, ebx, ecx, edx);
3434 }
3435
3436 Handle<JSFunction> known_function = hinstr->known_function();
3437 if (known_function.is_null()) {
3438 LPointerMap* pointers = instr->pointer_map();
3439 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3440 ParameterCount actual(instr->arity());
3441 InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3442 __ InvokeFunction(edi, no_reg, actual, flag, generator);
3443 } else {
3444 CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3445 instr->arity(), is_tail_call, instr);
3446 }
3447 }
3448
3449
DoCallNewArray(LCallNewArray * instr)3450 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3451 DCHECK(ToRegister(instr->context()).is(esi));
3452 DCHECK(ToRegister(instr->constructor()).is(edi));
3453 DCHECK(ToRegister(instr->result()).is(eax));
3454
3455 __ Move(eax, Immediate(instr->arity()));
3456 __ mov(ebx, instr->hydrogen()->site());
3457
3458 ElementsKind kind = instr->hydrogen()->elements_kind();
3459 AllocationSiteOverrideMode override_mode =
3460 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3461 ? DISABLE_ALLOCATION_SITES
3462 : DONT_OVERRIDE;
3463
3464 if (instr->arity() == 0) {
3465 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3466 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3467 } else if (instr->arity() == 1) {
3468 Label done;
3469 if (IsFastPackedElementsKind(kind)) {
3470 Label packed_case;
3471 // We might need a change here
3472 // look at the first argument
3473 __ mov(ecx, Operand(esp, 0));
3474 __ test(ecx, ecx);
3475 __ j(zero, &packed_case, Label::kNear);
3476
3477 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3478 ArraySingleArgumentConstructorStub stub(isolate(),
3479 holey_kind,
3480 override_mode);
3481 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3482 __ jmp(&done, Label::kNear);
3483 __ bind(&packed_case);
3484 }
3485
3486 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3487 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3488 __ bind(&done);
3489 } else {
3490 ArrayNArgumentsConstructorStub stub(isolate());
3491 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3492 }
3493 }
3494
3495
DoCallRuntime(LCallRuntime * instr)3496 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3497 DCHECK(ToRegister(instr->context()).is(esi));
3498 CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3499 }
3500
3501
DoStoreCodeEntry(LStoreCodeEntry * instr)3502 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3503 Register function = ToRegister(instr->function());
3504 Register code_object = ToRegister(instr->code_object());
3505 __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
3506 __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3507 }
3508
3509
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3510 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3511 Register result = ToRegister(instr->result());
3512 Register base = ToRegister(instr->base_object());
3513 if (instr->offset()->IsConstantOperand()) {
3514 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3515 __ lea(result, Operand(base, ToInteger32(offset)));
3516 } else {
3517 Register offset = ToRegister(instr->offset());
3518 __ lea(result, Operand(base, offset, times_1, 0));
3519 }
3520 }
3521
3522
DoStoreNamedField(LStoreNamedField * instr)3523 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3524 Representation representation = instr->hydrogen()->field_representation();
3525
3526 HObjectAccess access = instr->hydrogen()->access();
3527 int offset = access.offset();
3528
3529 if (access.IsExternalMemory()) {
3530 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3531 MemOperand operand = instr->object()->IsConstantOperand()
3532 ? MemOperand::StaticVariable(
3533 ToExternalReference(LConstantOperand::cast(instr->object())))
3534 : MemOperand(ToRegister(instr->object()), offset);
3535 if (instr->value()->IsConstantOperand()) {
3536 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3537 __ mov(operand, Immediate(ToInteger32(operand_value)));
3538 } else {
3539 Register value = ToRegister(instr->value());
3540 __ Store(value, operand, representation);
3541 }
3542 return;
3543 }
3544
3545 Register object = ToRegister(instr->object());
3546 __ AssertNotSmi(object);
3547
3548 DCHECK(!representation.IsSmi() ||
3549 !instr->value()->IsConstantOperand() ||
3550 IsSmi(LConstantOperand::cast(instr->value())));
3551 if (representation.IsDouble()) {
3552 DCHECK(access.IsInobject());
3553 DCHECK(!instr->hydrogen()->has_transition());
3554 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3555 XMMRegister value = ToDoubleRegister(instr->value());
3556 __ movsd(FieldOperand(object, offset), value);
3557 return;
3558 }
3559
3560 if (instr->hydrogen()->has_transition()) {
3561 Handle<Map> transition = instr->hydrogen()->transition_map();
3562 AddDeprecationDependency(transition);
3563 __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
3564 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3565 Register temp = ToRegister(instr->temp());
3566 Register temp_map = ToRegister(instr->temp_map());
3567 // Update the write barrier for the map field.
3568 __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
3569 }
3570 }
3571
3572 // Do the store.
3573 Register write_register = object;
3574 if (!access.IsInobject()) {
3575 write_register = ToRegister(instr->temp());
3576 __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
3577 }
3578
3579 MemOperand operand = FieldOperand(write_register, offset);
3580 if (instr->value()->IsConstantOperand()) {
3581 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3582 if (operand_value->IsRegister()) {
3583 Register value = ToRegister(operand_value);
3584 __ Store(value, operand, representation);
3585 } else if (representation.IsInteger32() || representation.IsExternal()) {
3586 Immediate immediate = ToImmediate(operand_value, representation);
3587 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3588 __ mov(operand, immediate);
3589 } else {
3590 Handle<Object> handle_value = ToHandle(operand_value);
3591 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
3592 __ mov(operand, handle_value);
3593 }
3594 } else {
3595 Register value = ToRegister(instr->value());
3596 __ Store(value, operand, representation);
3597 }
3598
3599 if (instr->hydrogen()->NeedsWriteBarrier()) {
3600 Register value = ToRegister(instr->value());
3601 Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
3602 // Update the write barrier for the object for in-object properties.
3603 __ RecordWriteField(write_register,
3604 offset,
3605 value,
3606 temp,
3607 kSaveFPRegs,
3608 EMIT_REMEMBERED_SET,
3609 instr->hydrogen()->SmiCheckForWriteBarrier(),
3610 instr->hydrogen()->PointersToHereCheckForValue());
3611 }
3612 }
3613
3614
DoBoundsCheck(LBoundsCheck * instr)3615 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3616 Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
3617 if (instr->index()->IsConstantOperand()) {
3618 __ cmp(ToOperand(instr->length()),
3619 ToImmediate(LConstantOperand::cast(instr->index()),
3620 instr->hydrogen()->length()->representation()));
3621 cc = CommuteCondition(cc);
3622 } else if (instr->length()->IsConstantOperand()) {
3623 __ cmp(ToOperand(instr->index()),
3624 ToImmediate(LConstantOperand::cast(instr->length()),
3625 instr->hydrogen()->index()->representation()));
3626 } else {
3627 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
3628 }
3629 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3630 Label done;
3631 __ j(NegateCondition(cc), &done, Label::kNear);
3632 __ int3();
3633 __ bind(&done);
3634 } else {
3635 DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
3636 }
3637 }
3638
3639
DoStoreKeyedExternalArray(LStoreKeyed * instr)3640 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3641 ElementsKind elements_kind = instr->elements_kind();
3642 LOperand* key = instr->key();
3643 if (!key->IsConstantOperand() &&
3644 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3645 elements_kind)) {
3646 __ SmiUntag(ToRegister(key));
3647 }
3648 Operand operand(BuildFastArrayOperand(
3649 instr->elements(),
3650 key,
3651 instr->hydrogen()->key()->representation(),
3652 elements_kind,
3653 instr->base_offset()));
3654 if (elements_kind == FLOAT32_ELEMENTS) {
3655 XMMRegister xmm_scratch = double_scratch0();
3656 __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
3657 __ movss(operand, xmm_scratch);
3658 } else if (elements_kind == FLOAT64_ELEMENTS) {
3659 __ movsd(operand, ToDoubleRegister(instr->value()));
3660 } else {
3661 Register value = ToRegister(instr->value());
3662 switch (elements_kind) {
3663 case UINT8_ELEMENTS:
3664 case INT8_ELEMENTS:
3665 case UINT8_CLAMPED_ELEMENTS:
3666 __ mov_b(operand, value);
3667 break;
3668 case UINT16_ELEMENTS:
3669 case INT16_ELEMENTS:
3670 __ mov_w(operand, value);
3671 break;
3672 case UINT32_ELEMENTS:
3673 case INT32_ELEMENTS:
3674 __ mov(operand, value);
3675 break;
3676 case FLOAT32_ELEMENTS:
3677 case FLOAT64_ELEMENTS:
3678 case FAST_SMI_ELEMENTS:
3679 case FAST_ELEMENTS:
3680 case FAST_DOUBLE_ELEMENTS:
3681 case FAST_HOLEY_SMI_ELEMENTS:
3682 case FAST_HOLEY_ELEMENTS:
3683 case FAST_HOLEY_DOUBLE_ELEMENTS:
3684 case DICTIONARY_ELEMENTS:
3685 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3686 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3687 case FAST_STRING_WRAPPER_ELEMENTS:
3688 case SLOW_STRING_WRAPPER_ELEMENTS:
3689 case NO_ELEMENTS:
3690 UNREACHABLE();
3691 break;
3692 }
3693 }
3694 }
3695
3696
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)3697 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
3698 Operand double_store_operand = BuildFastArrayOperand(
3699 instr->elements(),
3700 instr->key(),
3701 instr->hydrogen()->key()->representation(),
3702 FAST_DOUBLE_ELEMENTS,
3703 instr->base_offset());
3704
3705 XMMRegister value = ToDoubleRegister(instr->value());
3706
3707 if (instr->NeedsCanonicalization()) {
3708 XMMRegister xmm_scratch = double_scratch0();
3709 // Turn potential sNaN value into qNaN.
3710 __ xorps(xmm_scratch, xmm_scratch);
3711 __ subsd(value, xmm_scratch);
3712 }
3713
3714 __ movsd(double_store_operand, value);
3715 }
3716
3717
DoStoreKeyedFixedArray(LStoreKeyed * instr)3718 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
3719 Register elements = ToRegister(instr->elements());
3720 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3721
3722 Operand operand = BuildFastArrayOperand(
3723 instr->elements(),
3724 instr->key(),
3725 instr->hydrogen()->key()->representation(),
3726 FAST_ELEMENTS,
3727 instr->base_offset());
3728 if (instr->value()->IsRegister()) {
3729 __ mov(operand, ToRegister(instr->value()));
3730 } else {
3731 LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3732 if (IsSmi(operand_value)) {
3733 Immediate immediate = ToImmediate(operand_value, Representation::Smi());
3734 __ mov(operand, immediate);
3735 } else {
3736 DCHECK(!IsInteger32(operand_value));
3737 Handle<Object> handle_value = ToHandle(operand_value);
3738 __ mov(operand, handle_value);
3739 }
3740 }
3741
3742 if (instr->hydrogen()->NeedsWriteBarrier()) {
3743 DCHECK(instr->value()->IsRegister());
3744 Register value = ToRegister(instr->value());
3745 DCHECK(!instr->key()->IsConstantOperand());
3746 SmiCheck check_needed =
3747 instr->hydrogen()->value()->type().IsHeapObject()
3748 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3749 // Compute address of modified element and store it into key register.
3750 __ lea(key, operand);
3751 __ RecordWrite(elements,
3752 key,
3753 value,
3754 kSaveFPRegs,
3755 EMIT_REMEMBERED_SET,
3756 check_needed,
3757 instr->hydrogen()->PointersToHereCheckForValue());
3758 }
3759 }
3760
3761
DoStoreKeyed(LStoreKeyed * instr)3762 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
3763 // By cases...external, fast-double, fast
3764 if (instr->is_fixed_typed_array()) {
3765 DoStoreKeyedExternalArray(instr);
3766 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
3767 DoStoreKeyedFixedDoubleArray(instr);
3768 } else {
3769 DoStoreKeyedFixedArray(instr);
3770 }
3771 }
3772
3773
DoTrapAllocationMemento(LTrapAllocationMemento * instr)3774 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
3775 Register object = ToRegister(instr->object());
3776 Register temp = ToRegister(instr->temp());
3777 Label no_memento_found;
3778 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
3779 DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
3780 __ bind(&no_memento_found);
3781 }
3782
3783
DoMaybeGrowElements(LMaybeGrowElements * instr)3784 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
3785 class DeferredMaybeGrowElements final : public LDeferredCode {
3786 public:
3787 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
3788 : LDeferredCode(codegen), instr_(instr) {}
3789 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
3790 LInstruction* instr() override { return instr_; }
3791
3792 private:
3793 LMaybeGrowElements* instr_;
3794 };
3795
3796 Register result = eax;
3797 DeferredMaybeGrowElements* deferred =
3798 new (zone()) DeferredMaybeGrowElements(this, instr);
3799 LOperand* key = instr->key();
3800 LOperand* current_capacity = instr->current_capacity();
3801
3802 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
3803 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
3804 DCHECK(key->IsConstantOperand() || key->IsRegister());
3805 DCHECK(current_capacity->IsConstantOperand() ||
3806 current_capacity->IsRegister());
3807
3808 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
3809 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
3810 int32_t constant_capacity =
3811 ToInteger32(LConstantOperand::cast(current_capacity));
3812 if (constant_key >= constant_capacity) {
3813 // Deferred case.
3814 __ jmp(deferred->entry());
3815 }
3816 } else if (key->IsConstantOperand()) {
3817 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
3818 __ cmp(ToOperand(current_capacity), Immediate(constant_key));
3819 __ j(less_equal, deferred->entry());
3820 } else if (current_capacity->IsConstantOperand()) {
3821 int32_t constant_capacity =
3822 ToInteger32(LConstantOperand::cast(current_capacity));
3823 __ cmp(ToRegister(key), Immediate(constant_capacity));
3824 __ j(greater_equal, deferred->entry());
3825 } else {
3826 __ cmp(ToRegister(key), ToRegister(current_capacity));
3827 __ j(greater_equal, deferred->entry());
3828 }
3829
3830 __ mov(result, ToOperand(instr->elements()));
3831 __ bind(deferred->exit());
3832 }
3833
3834
DoDeferredMaybeGrowElements(LMaybeGrowElements * instr)3835 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
3836 // TODO(3095996): Get rid of this. For now, we need to make the
3837 // result register contain a valid pointer because it is already
3838 // contained in the register pointer map.
3839 Register result = eax;
3840 __ Move(result, Immediate(0));
3841
3842 // We have to call a stub.
3843 {
3844 PushSafepointRegistersScope scope(this);
3845 if (instr->object()->IsRegister()) {
3846 __ Move(result, ToRegister(instr->object()));
3847 } else {
3848 __ mov(result, ToOperand(instr->object()));
3849 }
3850
3851 LOperand* key = instr->key();
3852 if (key->IsConstantOperand()) {
3853 LConstantOperand* constant_key = LConstantOperand::cast(key);
3854 int32_t int_key = ToInteger32(constant_key);
3855 if (Smi::IsValid(int_key)) {
3856 __ mov(ebx, Immediate(Smi::FromInt(int_key)));
3857 } else {
3858 // We should never get here at runtime because there is a smi check on
3859 // the key before this point.
3860 __ int3();
3861 }
3862 } else {
3863 __ Move(ebx, ToRegister(key));
3864 __ SmiTag(ebx);
3865 }
3866
3867 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
3868 __ CallStub(&stub);
3869 RecordSafepointWithLazyDeopt(
3870 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3871 __ StoreToSafepointRegisterSlot(result, result);
3872 }
3873
3874 // Deopt on smi, which means the elements array changed to dictionary mode.
3875 __ test(result, Immediate(kSmiTagMask));
3876 DeoptimizeIf(equal, instr, DeoptimizeReason::kSmi);
3877 }
3878
3879
DoTransitionElementsKind(LTransitionElementsKind * instr)3880 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3881 Register object_reg = ToRegister(instr->object());
3882
3883 Handle<Map> from_map = instr->original_map();
3884 Handle<Map> to_map = instr->transitioned_map();
3885 ElementsKind from_kind = instr->from_kind();
3886 ElementsKind to_kind = instr->to_kind();
3887
3888 Label not_applicable;
3889 bool is_simple_map_transition =
3890 IsSimpleMapChangeTransition(from_kind, to_kind);
3891 Label::Distance branch_distance =
3892 is_simple_map_transition ? Label::kNear : Label::kFar;
3893 __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
3894 __ j(not_equal, ¬_applicable, branch_distance);
3895 if (is_simple_map_transition) {
3896 Register new_map_reg = ToRegister(instr->new_map_temp());
3897 __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
3898 Immediate(to_map));
3899 // Write barrier.
3900 DCHECK_NOT_NULL(instr->temp());
3901 __ RecordWriteForMap(object_reg, to_map, new_map_reg,
3902 ToRegister(instr->temp()),
3903 kDontSaveFPRegs);
3904 } else {
3905 DCHECK(ToRegister(instr->context()).is(esi));
3906 DCHECK(object_reg.is(eax));
3907 PushSafepointRegistersScope scope(this);
3908 __ mov(ebx, to_map);
3909 TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
3910 __ CallStub(&stub);
3911 RecordSafepointWithLazyDeopt(instr,
3912 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
3913 }
3914 __ bind(¬_applicable);
3915 }
3916
3917
DoStringCharCodeAt(LStringCharCodeAt * instr)3918 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3919 class DeferredStringCharCodeAt final : public LDeferredCode {
3920 public:
3921 DeferredStringCharCodeAt(LCodeGen* codegen,
3922 LStringCharCodeAt* instr)
3923 : LDeferredCode(codegen), instr_(instr) { }
3924 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
3925 LInstruction* instr() override { return instr_; }
3926
3927 private:
3928 LStringCharCodeAt* instr_;
3929 };
3930
3931 DeferredStringCharCodeAt* deferred =
3932 new(zone()) DeferredStringCharCodeAt(this, instr);
3933
3934 StringCharLoadGenerator::Generate(masm(),
3935 factory(),
3936 ToRegister(instr->string()),
3937 ToRegister(instr->index()),
3938 ToRegister(instr->result()),
3939 deferred->entry());
3940 __ bind(deferred->exit());
3941 }
3942
3943
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)3944 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3945 Register string = ToRegister(instr->string());
3946 Register result = ToRegister(instr->result());
3947
3948 // TODO(3095996): Get rid of this. For now, we need to make the
3949 // result register contain a valid pointer because it is already
3950 // contained in the register pointer map.
3951 __ Move(result, Immediate(0));
3952
3953 PushSafepointRegistersScope scope(this);
3954 __ push(string);
3955 // Push the index as a smi. This is safe because of the checks in
3956 // DoStringCharCodeAt above.
3957 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
3958 if (instr->index()->IsConstantOperand()) {
3959 Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
3960 Representation::Smi());
3961 __ push(immediate);
3962 } else {
3963 Register index = ToRegister(instr->index());
3964 __ SmiTag(index);
3965 __ push(index);
3966 }
3967 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
3968 instr, instr->context());
3969 __ AssertSmi(eax);
3970 __ SmiUntag(eax);
3971 __ StoreToSafepointRegisterSlot(result, eax);
3972 }
3973
3974
DoStringCharFromCode(LStringCharFromCode * instr)3975 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3976 class DeferredStringCharFromCode final : public LDeferredCode {
3977 public:
3978 DeferredStringCharFromCode(LCodeGen* codegen,
3979 LStringCharFromCode* instr)
3980 : LDeferredCode(codegen), instr_(instr) { }
3981 void Generate() override {
3982 codegen()->DoDeferredStringCharFromCode(instr_);
3983 }
3984 LInstruction* instr() override { return instr_; }
3985
3986 private:
3987 LStringCharFromCode* instr_;
3988 };
3989
3990 DeferredStringCharFromCode* deferred =
3991 new(zone()) DeferredStringCharFromCode(this, instr);
3992
3993 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
3994 Register char_code = ToRegister(instr->char_code());
3995 Register result = ToRegister(instr->result());
3996 DCHECK(!char_code.is(result));
3997
3998 __ cmp(char_code, String::kMaxOneByteCharCode);
3999 __ j(above, deferred->entry());
4000 __ Move(result, Immediate(factory()->single_character_string_cache()));
4001 __ mov(result, FieldOperand(result,
4002 char_code, times_pointer_size,
4003 FixedArray::kHeaderSize));
4004 __ cmp(result, factory()->undefined_value());
4005 __ j(equal, deferred->entry());
4006 __ bind(deferred->exit());
4007 }
4008
4009
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4010 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4011 Register char_code = ToRegister(instr->char_code());
4012 Register result = ToRegister(instr->result());
4013
4014 // TODO(3095996): Get rid of this. For now, we need to make the
4015 // result register contain a valid pointer because it is already
4016 // contained in the register pointer map.
4017 __ Move(result, Immediate(0));
4018
4019 PushSafepointRegistersScope scope(this);
4020 __ SmiTag(char_code);
4021 __ push(char_code);
4022 CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4023 instr->context());
4024 __ StoreToSafepointRegisterSlot(result, eax);
4025 }
4026
4027
DoStringAdd(LStringAdd * instr)4028 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4029 DCHECK(ToRegister(instr->context()).is(esi));
4030 DCHECK(ToRegister(instr->left()).is(edx));
4031 DCHECK(ToRegister(instr->right()).is(eax));
4032 StringAddStub stub(isolate(),
4033 instr->hydrogen()->flags(),
4034 instr->hydrogen()->pretenure_flag());
4035 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4036 }
4037
4038
DoInteger32ToDouble(LInteger32ToDouble * instr)4039 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4040 LOperand* input = instr->value();
4041 LOperand* output = instr->result();
4042 DCHECK(input->IsRegister() || input->IsStackSlot());
4043 DCHECK(output->IsDoubleRegister());
4044 __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4045 }
4046
4047
DoUint32ToDouble(LUint32ToDouble * instr)4048 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4049 LOperand* input = instr->value();
4050 LOperand* output = instr->result();
4051 __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4052 }
4053
4054
DoNumberTagI(LNumberTagI * instr)4055 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4056 class DeferredNumberTagI final : public LDeferredCode {
4057 public:
4058 DeferredNumberTagI(LCodeGen* codegen,
4059 LNumberTagI* instr)
4060 : LDeferredCode(codegen), instr_(instr) { }
4061 void Generate() override {
4062 codegen()->DoDeferredNumberTagIU(
4063 instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
4064 }
4065 LInstruction* instr() override { return instr_; }
4066
4067 private:
4068 LNumberTagI* instr_;
4069 };
4070
4071 LOperand* input = instr->value();
4072 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4073 Register reg = ToRegister(input);
4074
4075 DeferredNumberTagI* deferred =
4076 new(zone()) DeferredNumberTagI(this, instr);
4077 __ SmiTag(reg);
4078 __ j(overflow, deferred->entry());
4079 __ bind(deferred->exit());
4080 }
4081
4082
DoNumberTagU(LNumberTagU * instr)4083 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4084 class DeferredNumberTagU final : public LDeferredCode {
4085 public:
4086 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4087 : LDeferredCode(codegen), instr_(instr) { }
4088 void Generate() override {
4089 codegen()->DoDeferredNumberTagIU(
4090 instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
4091 }
4092 LInstruction* instr() override { return instr_; }
4093
4094 private:
4095 LNumberTagU* instr_;
4096 };
4097
4098 LOperand* input = instr->value();
4099 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4100 Register reg = ToRegister(input);
4101
4102 DeferredNumberTagU* deferred =
4103 new(zone()) DeferredNumberTagU(this, instr);
4104 __ cmp(reg, Immediate(Smi::kMaxValue));
4105 __ j(above, deferred->entry());
4106 __ SmiTag(reg);
4107 __ bind(deferred->exit());
4108 }
4109
4110
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp,IntegerSignedness signedness)4111 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4112 LOperand* value,
4113 LOperand* temp,
4114 IntegerSignedness signedness) {
4115 Label done, slow;
4116 Register reg = ToRegister(value);
4117 Register tmp = ToRegister(temp);
4118 XMMRegister xmm_scratch = double_scratch0();
4119
4120 if (signedness == SIGNED_INT32) {
4121 // There was overflow, so bits 30 and 31 of the original integer
4122 // disagree. Try to allocate a heap number in new space and store
4123 // the value in there. If that fails, call the runtime system.
4124 __ SmiUntag(reg);
4125 __ xor_(reg, 0x80000000);
4126 __ Cvtsi2sd(xmm_scratch, Operand(reg));
4127 } else {
4128 __ LoadUint32(xmm_scratch, reg);
4129 }
4130
4131 if (FLAG_inline_new) {
4132 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4133 __ jmp(&done, Label::kNear);
4134 }
4135
4136 // Slow case: Call the runtime system to do the number allocation.
4137 __ bind(&slow);
4138 {
4139 // TODO(3095996): Put a valid pointer value in the stack slot where the
4140 // result register is stored, as this register is in the pointer map, but
4141 // contains an integer value.
4142 __ Move(reg, Immediate(0));
4143
4144 // Preserve the value of all registers.
4145 PushSafepointRegistersScope scope(this);
4146 // Reset the context register.
4147 if (!reg.is(esi)) {
4148 __ Move(esi, Immediate(0));
4149 }
4150 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4151 RecordSafepointWithRegisters(
4152 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4153 __ StoreToSafepointRegisterSlot(reg, eax);
4154 }
4155
4156 // Done. Put the value in xmm_scratch into the value of the allocated heap
4157 // number.
4158 __ bind(&done);
4159 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
4160 }
4161
4162
DoNumberTagD(LNumberTagD * instr)4163 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4164 class DeferredNumberTagD final : public LDeferredCode {
4165 public:
4166 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4167 : LDeferredCode(codegen), instr_(instr) { }
4168 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4169 LInstruction* instr() override { return instr_; }
4170
4171 private:
4172 LNumberTagD* instr_;
4173 };
4174
4175 Register reg = ToRegister(instr->result());
4176
4177 DeferredNumberTagD* deferred =
4178 new(zone()) DeferredNumberTagD(this, instr);
4179 if (FLAG_inline_new) {
4180 Register tmp = ToRegister(instr->temp());
4181 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4182 } else {
4183 __ jmp(deferred->entry());
4184 }
4185 __ bind(deferred->exit());
4186 XMMRegister input_reg = ToDoubleRegister(instr->value());
4187 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4188 }
4189
4190
DoDeferredNumberTagD(LNumberTagD * instr)4191 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4192 // TODO(3095996): Get rid of this. For now, we need to make the
4193 // result register contain a valid pointer because it is already
4194 // contained in the register pointer map.
4195 Register reg = ToRegister(instr->result());
4196 __ Move(reg, Immediate(0));
4197
4198 PushSafepointRegistersScope scope(this);
4199 // Reset the context register.
4200 if (!reg.is(esi)) {
4201 __ Move(esi, Immediate(0));
4202 }
4203 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4204 RecordSafepointWithRegisters(
4205 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4206 __ StoreToSafepointRegisterSlot(reg, eax);
4207 }
4208
4209
DoSmiTag(LSmiTag * instr)4210 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4211 HChange* hchange = instr->hydrogen();
4212 Register input = ToRegister(instr->value());
4213 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4214 hchange->value()->CheckFlag(HValue::kUint32)) {
4215 __ test(input, Immediate(0xc0000000));
4216 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOverflow);
4217 }
4218 __ SmiTag(input);
4219 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4220 !hchange->value()->CheckFlag(HValue::kUint32)) {
4221 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
4222 }
4223 }
4224
4225
DoSmiUntag(LSmiUntag * instr)4226 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4227 LOperand* input = instr->value();
4228 Register result = ToRegister(input);
4229 DCHECK(input->IsRegister() && input->Equals(instr->result()));
4230 if (instr->needs_check()) {
4231 __ test(result, Immediate(kSmiTagMask));
4232 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
4233 } else {
4234 __ AssertSmi(result);
4235 }
4236 __ SmiUntag(result);
4237 }
4238
4239
EmitNumberUntagD(LNumberUntagD * instr,Register input_reg,Register temp_reg,XMMRegister result_reg,NumberUntagDMode mode)4240 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4241 Register temp_reg, XMMRegister result_reg,
4242 NumberUntagDMode mode) {
4243 bool can_convert_undefined_to_nan = instr->truncating();
4244 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4245
4246 Label convert, load_smi, done;
4247
4248 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4249 // Smi check.
4250 __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4251
4252 // Heap number map check.
4253 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4254 factory()->heap_number_map());
4255 if (can_convert_undefined_to_nan) {
4256 __ j(not_equal, &convert, Label::kNear);
4257 } else {
4258 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
4259 }
4260
4261 // Heap number to XMM conversion.
4262 __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4263
4264 if (deoptimize_on_minus_zero) {
4265 XMMRegister xmm_scratch = double_scratch0();
4266 __ xorps(xmm_scratch, xmm_scratch);
4267 __ ucomisd(result_reg, xmm_scratch);
4268 __ j(not_zero, &done, Label::kNear);
4269 __ movmskpd(temp_reg, result_reg);
4270 __ test_b(temp_reg, Immediate(1));
4271 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
4272 }
4273 __ jmp(&done, Label::kNear);
4274
4275 if (can_convert_undefined_to_nan) {
4276 __ bind(&convert);
4277
4278 // Convert undefined to NaN.
4279 __ cmp(input_reg, factory()->undefined_value());
4280 DeoptimizeIf(not_equal, instr,
4281 DeoptimizeReason::kNotAHeapNumberUndefined);
4282
4283 __ xorpd(result_reg, result_reg);
4284 __ divsd(result_reg, result_reg);
4285 __ jmp(&done, Label::kNear);
4286 }
4287 } else {
4288 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4289 }
4290
4291 __ bind(&load_smi);
4292 // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
4293 // input register since we avoid dependencies.
4294 __ mov(temp_reg, input_reg);
4295 __ SmiUntag(temp_reg); // Untag smi before converting to float.
4296 __ Cvtsi2sd(result_reg, Operand(temp_reg));
4297 __ bind(&done);
4298 }
4299
4300
DoDeferredTaggedToI(LTaggedToI * instr,Label * done)4301 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4302 Register input_reg = ToRegister(instr->value());
4303
4304 // The input was optimistically untagged; revert it.
4305 STATIC_ASSERT(kSmiTagSize == 1);
4306 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
4307
4308 if (instr->truncating()) {
4309 Label truncate;
4310 Label::Distance truncate_distance =
4311 DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4312 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4313 factory()->heap_number_map());
4314 __ j(equal, &truncate, truncate_distance);
4315 __ push(input_reg);
4316 __ CmpObjectType(input_reg, ODDBALL_TYPE, input_reg);
4317 __ pop(input_reg);
4318 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
4319 __ bind(&truncate);
4320 __ TruncateHeapNumberToI(input_reg, input_reg);
4321 } else {
4322 XMMRegister scratch = ToDoubleRegister(instr->temp());
4323 DCHECK(!scratch.is(xmm0));
4324 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4325 isolate()->factory()->heap_number_map());
4326 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
4327 __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4328 __ cvttsd2si(input_reg, Operand(xmm0));
4329 __ Cvtsi2sd(scratch, Operand(input_reg));
4330 __ ucomisd(xmm0, scratch);
4331 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
4332 DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
4333 if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
4334 __ test(input_reg, Operand(input_reg));
4335 __ j(not_zero, done);
4336 __ movmskpd(input_reg, xmm0);
4337 __ and_(input_reg, 1);
4338 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
4339 }
4340 }
4341 }
4342
4343
DoTaggedToI(LTaggedToI * instr)4344 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4345 class DeferredTaggedToI final : public LDeferredCode {
4346 public:
4347 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4348 : LDeferredCode(codegen), instr_(instr) { }
4349 void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
4350 LInstruction* instr() override { return instr_; }
4351
4352 private:
4353 LTaggedToI* instr_;
4354 };
4355
4356 LOperand* input = instr->value();
4357 DCHECK(input->IsRegister());
4358 Register input_reg = ToRegister(input);
4359 DCHECK(input_reg.is(ToRegister(instr->result())));
4360
4361 if (instr->hydrogen()->value()->representation().IsSmi()) {
4362 __ SmiUntag(input_reg);
4363 } else {
4364 DeferredTaggedToI* deferred =
4365 new(zone()) DeferredTaggedToI(this, instr);
4366 // Optimistically untag the input.
4367 // If the input is a HeapObject, SmiUntag will set the carry flag.
4368 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4369 __ SmiUntag(input_reg);
4370 // Branch to deferred code if the input was tagged.
4371 // The deferred code will take care of restoring the tag.
4372 __ j(carry, deferred->entry());
4373 __ bind(deferred->exit());
4374 }
4375 }
4376
4377
DoNumberUntagD(LNumberUntagD * instr)4378 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4379 LOperand* input = instr->value();
4380 DCHECK(input->IsRegister());
4381 LOperand* temp = instr->temp();
4382 DCHECK(temp->IsRegister());
4383 LOperand* result = instr->result();
4384 DCHECK(result->IsDoubleRegister());
4385
4386 Register input_reg = ToRegister(input);
4387 Register temp_reg = ToRegister(temp);
4388
4389 HValue* value = instr->hydrogen()->value();
4390 NumberUntagDMode mode = value->representation().IsSmi()
4391 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4392
4393 XMMRegister result_reg = ToDoubleRegister(result);
4394 EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
4395 }
4396
4397
DoDoubleToI(LDoubleToI * instr)4398 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4399 LOperand* input = instr->value();
4400 DCHECK(input->IsDoubleRegister());
4401 LOperand* result = instr->result();
4402 DCHECK(result->IsRegister());
4403 Register result_reg = ToRegister(result);
4404
4405 if (instr->truncating()) {
4406 XMMRegister input_reg = ToDoubleRegister(input);
4407 __ TruncateDoubleToI(result_reg, input_reg);
4408 } else {
4409 Label lost_precision, is_nan, minus_zero, done;
4410 XMMRegister input_reg = ToDoubleRegister(input);
4411 XMMRegister xmm_scratch = double_scratch0();
4412 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4413 __ DoubleToI(result_reg, input_reg, xmm_scratch,
4414 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
4415 &is_nan, &minus_zero, dist);
4416 __ jmp(&done, dist);
4417 __ bind(&lost_precision);
4418 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
4419 __ bind(&is_nan);
4420 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
4421 __ bind(&minus_zero);
4422 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
4423 __ bind(&done);
4424 }
4425 }
4426
4427
DoDoubleToSmi(LDoubleToSmi * instr)4428 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4429 LOperand* input = instr->value();
4430 DCHECK(input->IsDoubleRegister());
4431 LOperand* result = instr->result();
4432 DCHECK(result->IsRegister());
4433 Register result_reg = ToRegister(result);
4434
4435 Label lost_precision, is_nan, minus_zero, done;
4436 XMMRegister input_reg = ToDoubleRegister(input);
4437 XMMRegister xmm_scratch = double_scratch0();
4438 Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4439 __ DoubleToI(result_reg, input_reg, xmm_scratch,
4440 instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
4441 &minus_zero, dist);
4442 __ jmp(&done, dist);
4443 __ bind(&lost_precision);
4444 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
4445 __ bind(&is_nan);
4446 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
4447 __ bind(&minus_zero);
4448 DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
4449 __ bind(&done);
4450 __ SmiTag(result_reg);
4451 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
4452 }
4453
4454
DoCheckSmi(LCheckSmi * instr)4455 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4456 LOperand* input = instr->value();
4457 __ test(ToOperand(input), Immediate(kSmiTagMask));
4458 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kNotASmi);
4459 }
4460
4461
DoCheckNonSmi(LCheckNonSmi * instr)4462 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4463 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4464 LOperand* input = instr->value();
4465 __ test(ToOperand(input), Immediate(kSmiTagMask));
4466 DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
4467 }
4468 }
4469
4470
DoCheckArrayBufferNotNeutered(LCheckArrayBufferNotNeutered * instr)4471 void LCodeGen::DoCheckArrayBufferNotNeutered(
4472 LCheckArrayBufferNotNeutered* instr) {
4473 Register view = ToRegister(instr->view());
4474 Register scratch = ToRegister(instr->scratch());
4475
4476 __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
4477 __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
4478 Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
4479 DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
4480 }
4481
4482
DoCheckInstanceType(LCheckInstanceType * instr)4483 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4484 Register input = ToRegister(instr->value());
4485 Register temp = ToRegister(instr->temp());
4486
4487 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
4488
4489 if (instr->hydrogen()->is_interval_check()) {
4490 InstanceType first;
4491 InstanceType last;
4492 instr->hydrogen()->GetCheckInterval(&first, &last);
4493
4494 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(first));
4495
4496 // If there is only one type in the interval check for equality.
4497 if (first == last) {
4498 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
4499 } else {
4500 DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
4501 // Omit check for the last type.
4502 if (last != LAST_TYPE) {
4503 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(last));
4504 DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
4505 }
4506 }
4507 } else {
4508 uint8_t mask;
4509 uint8_t tag;
4510 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4511
4512 if (base::bits::IsPowerOfTwo32(mask)) {
4513 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4514 __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), Immediate(mask));
4515 DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
4516 DeoptimizeReason::kWrongInstanceType);
4517 } else {
4518 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
4519 __ and_(temp, mask);
4520 __ cmp(temp, tag);
4521 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
4522 }
4523 }
4524 }
4525
4526
DoCheckValue(LCheckValue * instr)4527 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4528 Handle<HeapObject> object = instr->hydrogen()->object().handle();
4529 if (instr->hydrogen()->object_in_new_space()) {
4530 Register reg = ToRegister(instr->value());
4531 Handle<Cell> cell = isolate()->factory()->NewCell(object);
4532 __ cmp(reg, Operand::ForCell(cell));
4533 } else {
4534 Operand operand = ToOperand(instr->value());
4535 __ cmp(operand, object);
4536 }
4537 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
4538 }
4539
4540
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)4541 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4542 {
4543 PushSafepointRegistersScope scope(this);
4544 __ push(object);
4545 __ xor_(esi, esi);
4546 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4547 RecordSafepointWithRegisters(
4548 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4549
4550 __ test(eax, Immediate(kSmiTagMask));
4551 }
4552 DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
4553 }
4554
4555
DoCheckMaps(LCheckMaps * instr)4556 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4557 class DeferredCheckMaps final : public LDeferredCode {
4558 public:
4559 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4560 : LDeferredCode(codegen), instr_(instr), object_(object) {
4561 SetExit(check_maps());
4562 }
4563 void Generate() override {
4564 codegen()->DoDeferredInstanceMigration(instr_, object_);
4565 }
4566 Label* check_maps() { return &check_maps_; }
4567 LInstruction* instr() override { return instr_; }
4568
4569 private:
4570 LCheckMaps* instr_;
4571 Label check_maps_;
4572 Register object_;
4573 };
4574
4575 if (instr->hydrogen()->IsStabilityCheck()) {
4576 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4577 for (int i = 0; i < maps->size(); ++i) {
4578 AddStabilityDependency(maps->at(i).handle());
4579 }
4580 return;
4581 }
4582
4583 LOperand* input = instr->value();
4584 DCHECK(input->IsRegister());
4585 Register reg = ToRegister(input);
4586
4587 DeferredCheckMaps* deferred = NULL;
4588 if (instr->hydrogen()->HasMigrationTarget()) {
4589 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
4590 __ bind(deferred->check_maps());
4591 }
4592
4593 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4594 Label success;
4595 for (int i = 0; i < maps->size() - 1; i++) {
4596 Handle<Map> map = maps->at(i).handle();
4597 __ CompareMap(reg, map);
4598 __ j(equal, &success, Label::kNear);
4599 }
4600
4601 Handle<Map> map = maps->at(maps->size() - 1).handle();
4602 __ CompareMap(reg, map);
4603 if (instr->hydrogen()->HasMigrationTarget()) {
4604 __ j(not_equal, deferred->entry());
4605 } else {
4606 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
4607 }
4608
4609 __ bind(&success);
4610 }
4611
4612
DoClampDToUint8(LClampDToUint8 * instr)4613 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4614 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4615 XMMRegister xmm_scratch = double_scratch0();
4616 Register result_reg = ToRegister(instr->result());
4617 __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
4618 }
4619
4620
DoClampIToUint8(LClampIToUint8 * instr)4621 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4622 DCHECK(instr->unclamped()->Equals(instr->result()));
4623 Register value_reg = ToRegister(instr->result());
4624 __ ClampUint8(value_reg);
4625 }
4626
4627
DoClampTToUint8(LClampTToUint8 * instr)4628 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4629 DCHECK(instr->unclamped()->Equals(instr->result()));
4630 Register input_reg = ToRegister(instr->unclamped());
4631 XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
4632 XMMRegister xmm_scratch = double_scratch0();
4633 Label is_smi, done, heap_number;
4634
4635 __ JumpIfSmi(input_reg, &is_smi);
4636
4637 // Check for heap number
4638 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4639 factory()->heap_number_map());
4640 __ j(equal, &heap_number, Label::kNear);
4641
4642 // Check for undefined. Undefined is converted to zero for clamping
4643 // conversions.
4644 __ cmp(input_reg, factory()->undefined_value());
4645 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
4646 __ mov(input_reg, 0);
4647 __ jmp(&done, Label::kNear);
4648
4649 // Heap number
4650 __ bind(&heap_number);
4651 __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
4652 __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
4653 __ jmp(&done, Label::kNear);
4654
4655 // smi
4656 __ bind(&is_smi);
4657 __ SmiUntag(input_reg);
4658 __ ClampUint8(input_reg);
4659 __ bind(&done);
4660 }
4661
4662
DoAllocate(LAllocate * instr)4663 void LCodeGen::DoAllocate(LAllocate* instr) {
4664 class DeferredAllocate final : public LDeferredCode {
4665 public:
4666 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
4667 : LDeferredCode(codegen), instr_(instr) { }
4668 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
4669 LInstruction* instr() override { return instr_; }
4670
4671 private:
4672 LAllocate* instr_;
4673 };
4674
4675 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
4676
4677 Register result = ToRegister(instr->result());
4678 Register temp = ToRegister(instr->temp());
4679
4680 // Allocate memory for the object.
4681 AllocationFlags flags = NO_ALLOCATION_FLAGS;
4682 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
4683 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
4684 }
4685 if (instr->hydrogen()->IsOldSpaceAllocation()) {
4686 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
4687 flags = static_cast<AllocationFlags>(flags | PRETENURE);
4688 }
4689 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
4690 flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
4691 }
4692 DCHECK(!instr->hydrogen()->IsAllocationFolded());
4693
4694 if (instr->size()->IsConstantOperand()) {
4695 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
4696 CHECK(size <= kMaxRegularHeapObjectSize);
4697 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
4698 } else {
4699 Register size = ToRegister(instr->size());
4700 __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
4701 }
4702
4703 __ bind(deferred->exit());
4704
4705 if (instr->hydrogen()->MustPrefillWithFiller()) {
4706 if (instr->size()->IsConstantOperand()) {
4707 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
4708 __ mov(temp, (size / kPointerSize) - 1);
4709 } else {
4710 temp = ToRegister(instr->size());
4711 __ shr(temp, kPointerSizeLog2);
4712 __ dec(temp);
4713 }
4714 Label loop;
4715 __ bind(&loop);
4716 __ mov(FieldOperand(result, temp, times_pointer_size, 0),
4717 isolate()->factory()->one_pointer_filler_map());
4718 __ dec(temp);
4719 __ j(not_zero, &loop);
4720 }
4721 }
4722
DoFastAllocate(LFastAllocate * instr)4723 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
4724 DCHECK(instr->hydrogen()->IsAllocationFolded());
4725 DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
4726 Register result = ToRegister(instr->result());
4727 Register temp = ToRegister(instr->temp());
4728
4729 AllocationFlags flags = ALLOCATION_FOLDED;
4730 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
4731 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
4732 }
4733 if (instr->hydrogen()->IsOldSpaceAllocation()) {
4734 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
4735 flags = static_cast<AllocationFlags>(flags | PRETENURE);
4736 }
4737 if (instr->size()->IsConstantOperand()) {
4738 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
4739 CHECK(size <= kMaxRegularHeapObjectSize);
4740 __ FastAllocate(size, result, temp, flags);
4741 } else {
4742 Register size = ToRegister(instr->size());
4743 __ FastAllocate(size, result, temp, flags);
4744 }
4745 }
4746
DoDeferredAllocate(LAllocate * instr)4747 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
4748 Register result = ToRegister(instr->result());
4749
4750 // TODO(3095996): Get rid of this. For now, we need to make the
4751 // result register contain a valid pointer because it is already
4752 // contained in the register pointer map.
4753 __ Move(result, Immediate(Smi::kZero));
4754
4755 PushSafepointRegistersScope scope(this);
4756 if (instr->size()->IsRegister()) {
4757 Register size = ToRegister(instr->size());
4758 DCHECK(!size.is(result));
4759 __ SmiTag(ToRegister(instr->size()));
4760 __ push(size);
4761 } else {
4762 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
4763 if (size >= 0 && size <= Smi::kMaxValue) {
4764 __ push(Immediate(Smi::FromInt(size)));
4765 } else {
4766 // We should never get here at runtime => abort
4767 __ int3();
4768 return;
4769 }
4770 }
4771
4772 int flags = AllocateDoubleAlignFlag::encode(
4773 instr->hydrogen()->MustAllocateDoubleAligned());
4774 if (instr->hydrogen()->IsOldSpaceAllocation()) {
4775 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
4776 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
4777 } else {
4778 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
4779 }
4780 __ push(Immediate(Smi::FromInt(flags)));
4781
4782 CallRuntimeFromDeferred(
4783 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
4784 __ StoreToSafepointRegisterSlot(result, eax);
4785
4786 if (instr->hydrogen()->IsAllocationFoldingDominator()) {
4787 AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
4788 if (instr->hydrogen()->IsOldSpaceAllocation()) {
4789 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
4790 allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
4791 }
4792 // If the allocation folding dominator allocate triggered a GC, allocation
4793 // happend in the runtime. We have to reset the top pointer to virtually
4794 // undo the allocation.
4795 ExternalReference allocation_top =
4796 AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
4797 __ sub(eax, Immediate(kHeapObjectTag));
4798 __ mov(Operand::StaticVariable(allocation_top), eax);
4799 __ add(eax, Immediate(kHeapObjectTag));
4800 }
4801 }
4802
4803
DoTypeof(LTypeof * instr)4804 void LCodeGen::DoTypeof(LTypeof* instr) {
4805 DCHECK(ToRegister(instr->context()).is(esi));
4806 DCHECK(ToRegister(instr->value()).is(ebx));
4807 Label end, do_call;
4808 Register value_register = ToRegister(instr->value());
4809 __ JumpIfNotSmi(value_register, &do_call);
4810 __ mov(eax, Immediate(isolate()->factory()->number_string()));
4811 __ jmp(&end);
4812 __ bind(&do_call);
4813 Callable callable = CodeFactory::Typeof(isolate());
4814 CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
4815 __ bind(&end);
4816 }
4817
4818
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)4819 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4820 Register input = ToRegister(instr->value());
4821 Condition final_branch_condition = EmitTypeofIs(instr, input);
4822 if (final_branch_condition != no_condition) {
4823 EmitBranch(instr, final_branch_condition);
4824 }
4825 }
4826
4827
EmitTypeofIs(LTypeofIsAndBranch * instr,Register input)4828 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
4829 Label* true_label = instr->TrueLabel(chunk_);
4830 Label* false_label = instr->FalseLabel(chunk_);
4831 Handle<String> type_name = instr->type_literal();
4832 int left_block = instr->TrueDestination(chunk_);
4833 int right_block = instr->FalseDestination(chunk_);
4834 int next_block = GetNextEmittedBlock();
4835
4836 Label::Distance true_distance = left_block == next_block ? Label::kNear
4837 : Label::kFar;
4838 Label::Distance false_distance = right_block == next_block ? Label::kNear
4839 : Label::kFar;
4840 Condition final_branch_condition = no_condition;
4841 if (String::Equals(type_name, factory()->number_string())) {
4842 __ JumpIfSmi(input, true_label, true_distance);
4843 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
4844 factory()->heap_number_map());
4845 final_branch_condition = equal;
4846
4847 } else if (String::Equals(type_name, factory()->string_string())) {
4848 __ JumpIfSmi(input, false_label, false_distance);
4849 __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
4850 final_branch_condition = below;
4851
4852 } else if (String::Equals(type_name, factory()->symbol_string())) {
4853 __ JumpIfSmi(input, false_label, false_distance);
4854 __ CmpObjectType(input, SYMBOL_TYPE, input);
4855 final_branch_condition = equal;
4856
4857 } else if (String::Equals(type_name, factory()->boolean_string())) {
4858 __ cmp(input, factory()->true_value());
4859 __ j(equal, true_label, true_distance);
4860 __ cmp(input, factory()->false_value());
4861 final_branch_condition = equal;
4862
4863 } else if (String::Equals(type_name, factory()->undefined_string())) {
4864 __ cmp(input, factory()->null_value());
4865 __ j(equal, false_label, false_distance);
4866 __ JumpIfSmi(input, false_label, false_distance);
4867 // Check for undetectable objects => true.
4868 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
4869 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
4870 Immediate(1 << Map::kIsUndetectable));
4871 final_branch_condition = not_zero;
4872
4873 } else if (String::Equals(type_name, factory()->function_string())) {
4874 __ JumpIfSmi(input, false_label, false_distance);
4875 // Check for callable and not undetectable objects => true.
4876 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
4877 __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
4878 __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
4879 __ cmp(input, 1 << Map::kIsCallable);
4880 final_branch_condition = equal;
4881
4882 } else if (String::Equals(type_name, factory()->object_string())) {
4883 __ JumpIfSmi(input, false_label, false_distance);
4884 __ cmp(input, factory()->null_value());
4885 __ j(equal, true_label, true_distance);
4886 STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
4887 __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
4888 __ j(below, false_label, false_distance);
4889 // Check for callable or undetectable objects => false.
4890 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
4891 Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
4892 final_branch_condition = zero;
4893
4894 // clang-format off
4895 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
4896 } else if (String::Equals(type_name, factory()->type##_string())) { \
4897 __ JumpIfSmi(input, false_label, false_distance); \
4898 __ cmp(FieldOperand(input, HeapObject::kMapOffset), \
4899 factory()->type##_map()); \
4900 final_branch_condition = equal;
4901 SIMD128_TYPES(SIMD128_TYPE)
4902 #undef SIMD128_TYPE
4903 // clang-format on
4904
4905 } else {
4906 __ jmp(false_label, false_distance);
4907 }
4908 return final_branch_condition;
4909 }
4910
4911
EnsureSpaceForLazyDeopt(int space_needed)4912 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
4913 if (info()->ShouldEnsureSpaceForLazyDeopt()) {
4914 // Ensure that we have enough space after the previous lazy-bailout
4915 // instruction for patching the code here.
4916 int current_pc = masm()->pc_offset();
4917 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
4918 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
4919 __ Nop(padding_size);
4920 }
4921 }
4922 last_lazy_deopt_pc_ = masm()->pc_offset();
4923 }
4924
4925
DoLazyBailout(LLazyBailout * instr)4926 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4927 last_lazy_deopt_pc_ = masm()->pc_offset();
4928 DCHECK(instr->HasEnvironment());
4929 LEnvironment* env = instr->environment();
4930 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4931 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4932 }
4933
4934
DoDeoptimize(LDeoptimize * instr)4935 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4936 Deoptimizer::BailoutType type = instr->hydrogen()->type();
4937 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
4938 // needed return address), even though the implementation of LAZY and EAGER is
4939 // now identical. When LAZY is eventually completely folded into EAGER, remove
4940 // the special case below.
4941 if (info()->IsStub() && type == Deoptimizer::EAGER) {
4942 type = Deoptimizer::LAZY;
4943 }
4944 DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
4945 }
4946
4947
DoDummy(LDummy * instr)4948 void LCodeGen::DoDummy(LDummy* instr) {
4949 // Nothing to see here, move on!
4950 }
4951
4952
DoDummyUse(LDummyUse * instr)4953 void LCodeGen::DoDummyUse(LDummyUse* instr) {
4954 // Nothing to see here, move on!
4955 }
4956
4957
DoDeferredStackCheck(LStackCheck * instr)4958 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4959 PushSafepointRegistersScope scope(this);
4960 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4961 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4962 RecordSafepointWithLazyDeopt(
4963 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4964 DCHECK(instr->HasEnvironment());
4965 LEnvironment* env = instr->environment();
4966 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4967 }
4968
4969
DoStackCheck(LStackCheck * instr)4970 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4971 class DeferredStackCheck final : public LDeferredCode {
4972 public:
4973 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4974 : LDeferredCode(codegen), instr_(instr) { }
4975 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
4976 LInstruction* instr() override { return instr_; }
4977
4978 private:
4979 LStackCheck* instr_;
4980 };
4981
4982 DCHECK(instr->HasEnvironment());
4983 LEnvironment* env = instr->environment();
4984 // There is no LLazyBailout instruction for stack-checks. We have to
4985 // prepare for lazy deoptimization explicitly here.
4986 if (instr->hydrogen()->is_function_entry()) {
4987 // Perform stack overflow check.
4988 Label done;
4989 ExternalReference stack_limit =
4990 ExternalReference::address_of_stack_limit(isolate());
4991 __ cmp(esp, Operand::StaticVariable(stack_limit));
4992 __ j(above_equal, &done, Label::kNear);
4993
4994 DCHECK(instr->context()->IsRegister());
4995 DCHECK(ToRegister(instr->context()).is(esi));
4996 CallCode(isolate()->builtins()->StackCheck(),
4997 RelocInfo::CODE_TARGET,
4998 instr);
4999 __ bind(&done);
5000 } else {
5001 DCHECK(instr->hydrogen()->is_backwards_branch());
5002 // Perform stack overflow check if this goto needs it before jumping.
5003 DeferredStackCheck* deferred_stack_check =
5004 new(zone()) DeferredStackCheck(this, instr);
5005 ExternalReference stack_limit =
5006 ExternalReference::address_of_stack_limit(isolate());
5007 __ cmp(esp, Operand::StaticVariable(stack_limit));
5008 __ j(below, deferred_stack_check->entry());
5009 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5010 __ bind(instr->done_label());
5011 deferred_stack_check->SetExit(instr->done_label());
5012 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5013 // Don't record a deoptimization index for the safepoint here.
5014 // This will be done explicitly when emitting call and the safepoint in
5015 // the deferred code.
5016 }
5017 }
5018
5019
DoOsrEntry(LOsrEntry * instr)5020 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5021 // This is a pseudo-instruction that ensures that the environment here is
5022 // properly registered for deoptimization and records the assembler's PC
5023 // offset.
5024 LEnvironment* environment = instr->environment();
5025
5026 // If the environment were already registered, we would have no way of
5027 // backpatching it with the spill slot operands.
5028 DCHECK(!environment->HasBeenRegistered());
5029 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5030
5031 GenerateOsrPrologue();
5032 }
5033
5034
DoForInPrepareMap(LForInPrepareMap * instr)5035 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5036 DCHECK(ToRegister(instr->context()).is(esi));
5037
5038 Label use_cache, call_runtime;
5039 __ CheckEnumCache(&call_runtime);
5040
5041 __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
5042 __ jmp(&use_cache, Label::kNear);
5043
5044 // Get the set of properties to enumerate.
5045 __ bind(&call_runtime);
5046 __ push(eax);
5047 CallRuntime(Runtime::kForInEnumerate, instr);
5048 __ bind(&use_cache);
5049 }
5050
5051
DoForInCacheArray(LForInCacheArray * instr)5052 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5053 Register map = ToRegister(instr->map());
5054 Register result = ToRegister(instr->result());
5055 Label load_cache, done;
5056 __ EnumLength(result, map);
5057 __ cmp(result, Immediate(Smi::kZero));
5058 __ j(not_equal, &load_cache, Label::kNear);
5059 __ mov(result, isolate()->factory()->empty_fixed_array());
5060 __ jmp(&done, Label::kNear);
5061
5062 __ bind(&load_cache);
5063 __ LoadInstanceDescriptors(map, result);
5064 __ mov(result,
5065 FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5066 __ mov(result,
5067 FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5068 __ bind(&done);
5069 __ test(result, result);
5070 DeoptimizeIf(equal, instr, DeoptimizeReason::kNoCache);
5071 }
5072
5073
DoCheckMapValue(LCheckMapValue * instr)5074 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5075 Register object = ToRegister(instr->value());
5076 __ cmp(ToRegister(instr->map()),
5077 FieldOperand(object, HeapObject::kMapOffset));
5078 DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
5079 }
5080
5081
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register object,Register index)5082 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5083 Register object,
5084 Register index) {
5085 PushSafepointRegistersScope scope(this);
5086 __ push(object);
5087 __ push(index);
5088 __ xor_(esi, esi);
5089 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5090 RecordSafepointWithRegisters(
5091 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5092 __ StoreToSafepointRegisterSlot(object, eax);
5093 }
5094
5095
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5096 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5097 class DeferredLoadMutableDouble final : public LDeferredCode {
5098 public:
5099 DeferredLoadMutableDouble(LCodeGen* codegen,
5100 LLoadFieldByIndex* instr,
5101 Register object,
5102 Register index)
5103 : LDeferredCode(codegen),
5104 instr_(instr),
5105 object_(object),
5106 index_(index) {
5107 }
5108 void Generate() override {
5109 codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5110 }
5111 LInstruction* instr() override { return instr_; }
5112
5113 private:
5114 LLoadFieldByIndex* instr_;
5115 Register object_;
5116 Register index_;
5117 };
5118
5119 Register object = ToRegister(instr->object());
5120 Register index = ToRegister(instr->index());
5121
5122 DeferredLoadMutableDouble* deferred;
5123 deferred = new(zone()) DeferredLoadMutableDouble(
5124 this, instr, object, index);
5125
5126 Label out_of_object, done;
5127 __ test(index, Immediate(Smi::FromInt(1)));
5128 __ j(not_zero, deferred->entry());
5129
5130 __ sar(index, 1);
5131
5132 __ cmp(index, Immediate(0));
5133 __ j(less, &out_of_object, Label::kNear);
5134 __ mov(object, FieldOperand(object,
5135 index,
5136 times_half_pointer_size,
5137 JSObject::kHeaderSize));
5138 __ jmp(&done, Label::kNear);
5139
5140 __ bind(&out_of_object);
5141 __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
5142 __ neg(index);
5143 // Index is now equal to out of object property index plus 1.
5144 __ mov(object, FieldOperand(object,
5145 index,
5146 times_half_pointer_size,
5147 FixedArray::kHeaderSize - kPointerSize));
5148 __ bind(deferred->exit());
5149 __ bind(&done);
5150 }
5151
5152 #undef __
5153
5154 } // namespace internal
5155 } // namespace v8
5156
5157 #endif // V8_TARGET_ARCH_IA32
5158