• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_X64
6 
7 #include "src/crankshaft/x64/lithium-codegen-x64.h"
8 
9 #include "src/base/bits.h"
10 #include "src/builtins/builtins-constructor.h"
11 #include "src/code-factory.h"
12 #include "src/code-stubs.h"
13 #include "src/crankshaft/hydrogen-osr.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
16 #include "src/objects-inl.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 
22 // When invoking builtins, we need to record the safepoint in the middle of
23 // the invoke instruction sequence generated by the macro assembler.
24 class SafepointGenerator final : public CallWrapper {
25  public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)26   SafepointGenerator(LCodeGen* codegen,
27                      LPointerMap* pointers,
28                      Safepoint::DeoptMode mode)
29       : codegen_(codegen),
30         pointers_(pointers),
31         deopt_mode_(mode) { }
~SafepointGenerator()32   virtual ~SafepointGenerator() {}
33 
BeforeCall(int call_size) const34   void BeforeCall(int call_size) const override {}
35 
AfterCall() const36   void AfterCall() const override {
37     codegen_->RecordSafepoint(pointers_, deopt_mode_);
38   }
39 
40  private:
41   LCodeGen* codegen_;
42   LPointerMap* pointers_;
43   Safepoint::DeoptMode deopt_mode_;
44 };
45 
46 
47 #define __ masm()->
48 
GenerateCode()49 bool LCodeGen::GenerateCode() {
50   LPhase phase("Z_Code generation", chunk());
51   DCHECK(is_unused());
52   status_ = GENERATING;
53 
54   // Open a frame scope to indicate that there is a frame on the stack.  The
55   // MANUAL indicates that the scope shouldn't actually generate code to set up
56   // the frame (that is done in GeneratePrologue).
57   FrameScope frame_scope(masm_, StackFrame::MANUAL);
58 
59   return GeneratePrologue() &&
60       GenerateBody() &&
61       GenerateDeferredCode() &&
62       GenerateJumpTable() &&
63       GenerateSafepointTable();
64 }
65 
66 
FinishCode(Handle<Code> code)67 void LCodeGen::FinishCode(Handle<Code> code) {
68   DCHECK(is_done());
69   code->set_stack_slots(GetTotalFrameSlotCount());
70   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
71   PopulateDeoptimizationData(code);
72 }
73 
74 
75 #ifdef _MSC_VER
MakeSureStackPagesMapped(int offset)76 void LCodeGen::MakeSureStackPagesMapped(int offset) {
77   const int kPageSize = 4 * KB;
78   for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
79     __ movp(Operand(rsp, offset), rax);
80   }
81 }
82 #endif
83 
84 
SaveCallerDoubles()85 void LCodeGen::SaveCallerDoubles() {
86   DCHECK(info()->saves_caller_doubles());
87   DCHECK(NeedsEagerFrame());
88   Comment(";;; Save clobbered callee double registers");
89   int count = 0;
90   BitVector* doubles = chunk()->allocated_double_registers();
91   BitVector::Iterator save_iterator(doubles);
92   while (!save_iterator.Done()) {
93     __ Movsd(MemOperand(rsp, count * kDoubleSize),
94              XMMRegister::from_code(save_iterator.Current()));
95     save_iterator.Advance();
96     count++;
97   }
98 }
99 
100 
RestoreCallerDoubles()101 void LCodeGen::RestoreCallerDoubles() {
102   DCHECK(info()->saves_caller_doubles());
103   DCHECK(NeedsEagerFrame());
104   Comment(";;; Restore clobbered callee double registers");
105   BitVector* doubles = chunk()->allocated_double_registers();
106   BitVector::Iterator save_iterator(doubles);
107   int count = 0;
108   while (!save_iterator.Done()) {
109     __ Movsd(XMMRegister::from_code(save_iterator.Current()),
110              MemOperand(rsp, count * kDoubleSize));
111     save_iterator.Advance();
112     count++;
113   }
114 }
115 
116 
GeneratePrologue()117 bool LCodeGen::GeneratePrologue() {
118   DCHECK(is_generating());
119 
120   if (info()->IsOptimizing()) {
121     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
122   }
123 
124   info()->set_prologue_offset(masm_->pc_offset());
125   if (NeedsEagerFrame()) {
126     DCHECK(!frame_is_built_);
127     frame_is_built_ = true;
128     if (info()->IsStub()) {
129       __ StubPrologue(StackFrame::STUB);
130     } else {
131       __ Prologue(info()->GeneratePreagedPrologue());
132     }
133   }
134 
135   // Reserve space for the stack slots needed by the code.
136   int slots = GetStackSlotCount();
137   if (slots > 0) {
138     if (FLAG_debug_code) {
139       __ subp(rsp, Immediate(slots * kPointerSize));
140 #ifdef _MSC_VER
141       MakeSureStackPagesMapped(slots * kPointerSize);
142 #endif
143       __ Push(rax);
144       __ Set(rax, slots);
145       __ Set(kScratchRegister, kSlotsZapValue);
146       Label loop;
147       __ bind(&loop);
148       __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
149               kScratchRegister);
150       __ decl(rax);
151       __ j(not_zero, &loop);
152       __ Pop(rax);
153     } else {
154       __ subp(rsp, Immediate(slots * kPointerSize));
155 #ifdef _MSC_VER
156       MakeSureStackPagesMapped(slots * kPointerSize);
157 #endif
158     }
159 
160     if (info()->saves_caller_doubles()) {
161       SaveCallerDoubles();
162     }
163   }
164   return !is_aborted();
165 }
166 
167 
DoPrologue(LPrologue * instr)168 void LCodeGen::DoPrologue(LPrologue* instr) {
169   Comment(";;; Prologue begin");
170 
171   // Possibly allocate a local context.
172   if (info_->scope()->NeedsContext()) {
173     Comment(";;; Allocate local context");
174     bool need_write_barrier = true;
175     // Argument to NewContext is the function, which is still in rdi.
176     int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
178     if (info()->scope()->is_script_scope()) {
179       __ Push(rdi);
180       __ Push(info()->scope()->scope_info());
181       __ CallRuntime(Runtime::kNewScriptContext);
182       deopt_mode = Safepoint::kLazyDeopt;
183     } else {
184       if (slots <=
185           ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
186         Callable callable = CodeFactory::FastNewFunctionContext(
187             isolate(), info()->scope()->scope_type());
188         __ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
189         __ Call(callable.code(), RelocInfo::CODE_TARGET);
190         // Result of FastNewFunctionContextStub is always in new space.
191         need_write_barrier = false;
192       } else {
193         __ Push(rdi);
194         __ Push(Smi::FromInt(info()->scope()->scope_type()));
195         __ CallRuntime(Runtime::kNewFunctionContext);
196       }
197     }
198     RecordSafepoint(deopt_mode);
199 
200     // Context is returned in rax.  It replaces the context passed to us.
201     // It's saved in the stack and kept live in rsi.
202     __ movp(rsi, rax);
203     __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
204 
205     // Copy any necessary parameters into the context.
206     int num_parameters = info()->scope()->num_parameters();
207     int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
208     for (int i = first_parameter; i < num_parameters; i++) {
209       Variable* var = (i == -1) ? info()->scope()->receiver()
210                                 : info()->scope()->parameter(i);
211       if (var->IsContextSlot()) {
212         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
213             (num_parameters - 1 - i) * kPointerSize;
214         // Load parameter from stack.
215         __ movp(rax, Operand(rbp, parameter_offset));
216         // Store it in the context.
217         int context_offset = Context::SlotOffset(var->index());
218         __ movp(Operand(rsi, context_offset), rax);
219         // Update the write barrier. This clobbers rax and rbx.
220         if (need_write_barrier) {
221           __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
222         } else if (FLAG_debug_code) {
223           Label done;
224           __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
225           __ Abort(kExpectedNewSpaceObject);
226           __ bind(&done);
227         }
228       }
229     }
230     Comment(";;; End allocate local context");
231   }
232 
233   Comment(";;; Prologue end");
234 }
235 
236 
GenerateOsrPrologue()237 void LCodeGen::GenerateOsrPrologue() {
238   // Generate the OSR entry prologue at the first unknown OSR value, or if there
239   // are none, at the OSR entrypoint instruction.
240   if (osr_pc_offset_ >= 0) return;
241 
242   osr_pc_offset_ = masm()->pc_offset();
243 
244   // Adjust the frame size, subsuming the unoptimized frame into the
245   // optimized frame.
246   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
247   DCHECK(slots >= 0);
248   __ subp(rsp, Immediate(slots * kPointerSize));
249 }
250 
251 
GenerateBodyInstructionPre(LInstruction * instr)252 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
253   if (instr->IsCall()) {
254     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
255   }
256   if (!instr->IsLazyBailout() && !instr->IsGap()) {
257     safepoints_.BumpLastLazySafepointIndex();
258   }
259 }
260 
261 
GenerateBodyInstructionPost(LInstruction * instr)262 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
263   if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
264       instr->hydrogen_value()->representation().IsInteger32() &&
265       instr->result()->IsRegister()) {
266     __ AssertZeroExtended(ToRegister(instr->result()));
267   }
268 
269   if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
270     // We sign extend the dehoisted key at the definition point when the pointer
271     // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
272     // points and MustSignExtendResult is always false. We can't use
273     // STATIC_ASSERT here as the pointer size is 32-bit for x32.
274     DCHECK(kPointerSize == kInt64Size);
275     if (instr->result()->IsRegister()) {
276       Register result_reg = ToRegister(instr->result());
277       __ movsxlq(result_reg, result_reg);
278     } else {
279       // Sign extend the 32bit result in the stack slots.
280       DCHECK(instr->result()->IsStackSlot());
281       Operand src = ToOperand(instr->result());
282       __ movsxlq(kScratchRegister, src);
283       __ movq(src, kScratchRegister);
284     }
285   }
286 }
287 
288 
GenerateJumpTable()289 bool LCodeGen::GenerateJumpTable() {
290   if (jump_table_.length() == 0) return !is_aborted();
291 
292   Label needs_frame;
293   Comment(";;; -------------------- Jump table --------------------");
294   for (int i = 0; i < jump_table_.length(); i++) {
295     Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
296     __ bind(&table_entry->label);
297     Address entry = table_entry->address;
298     DeoptComment(table_entry->deopt_info);
299     if (table_entry->needs_frame) {
300       DCHECK(!info()->saves_caller_doubles());
301       __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
302       __ call(&needs_frame);
303     } else {
304       if (info()->saves_caller_doubles()) {
305         DCHECK(info()->IsStub());
306         RestoreCallerDoubles();
307       }
308       __ call(entry, RelocInfo::RUNTIME_ENTRY);
309     }
310   }
311 
312   if (needs_frame.is_linked()) {
313     __ bind(&needs_frame);
314     /* stack layout
315        3: return address  <-- rsp
316        2: garbage
317        1: garbage
318        0: garbage
319     */
320     // Reserve space for stub marker.
321     __ subp(rsp, Immediate(TypedFrameConstants::kFrameTypeSize));
322     __ Push(MemOperand(
323         rsp, TypedFrameConstants::kFrameTypeSize));  // Copy return address.
324     __ Push(kScratchRegister);
325 
326     /* stack layout
327        3: return address
328        2: garbage
329        1: return address
330        0: entry address  <-- rsp
331     */
332 
333     // Create a stack frame.
334     __ movp(MemOperand(rsp, 3 * kPointerSize), rbp);
335     __ leap(rbp, MemOperand(rsp, 3 * kPointerSize));
336 
337     // This variant of deopt can only be used with stubs. Since we don't
338     // have a function pointer to install in the stack frame that we're
339     // building, install a special marker there instead.
340     DCHECK(info()->IsStub());
341     __ movp(MemOperand(rsp, 2 * kPointerSize),
342             Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
343 
344     /* stack layout
345        3: old rbp
346        2: stub marker
347        1: return address
348        0: entry address  <-- rsp
349     */
350     __ ret(0);
351   }
352 
353   return !is_aborted();
354 }
355 
356 
GenerateDeferredCode()357 bool LCodeGen::GenerateDeferredCode() {
358   DCHECK(is_generating());
359   if (deferred_.length() > 0) {
360     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
361       LDeferredCode* code = deferred_[i];
362 
363       HValue* value =
364           instructions_->at(code->instruction_index())->hydrogen_value();
365       RecordAndWritePosition(value->position());
366 
367       Comment(";;; <@%d,#%d> "
368               "-------------------- Deferred %s --------------------",
369               code->instruction_index(),
370               code->instr()->hydrogen_value()->id(),
371               code->instr()->Mnemonic());
372       __ bind(code->entry());
373       if (NeedsDeferredFrame()) {
374         Comment(";;; Build frame");
375         DCHECK(!frame_is_built_);
376         DCHECK(info()->IsStub());
377         frame_is_built_ = true;
378         // Build the frame in such a way that esi isn't trashed.
379         __ pushq(rbp);  // Caller's frame pointer.
380         __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::STUB)));
381         __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp));
382         Comment(";;; Deferred code");
383       }
384       code->Generate();
385       if (NeedsDeferredFrame()) {
386         __ bind(code->done());
387         Comment(";;; Destroy frame");
388         DCHECK(frame_is_built_);
389         frame_is_built_ = false;
390         __ movp(rsp, rbp);
391         __ popq(rbp);
392       }
393       __ jmp(code->exit());
394     }
395   }
396 
397   // Deferred code is the last part of the instruction sequence. Mark
398   // the generated code as done unless we bailed out.
399   if (!is_aborted()) status_ = DONE;
400   return !is_aborted();
401 }
402 
403 
GenerateSafepointTable()404 bool LCodeGen::GenerateSafepointTable() {
405   DCHECK(is_done());
406   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
407   return !is_aborted();
408 }
409 
410 
ToRegister(int index) const411 Register LCodeGen::ToRegister(int index) const {
412   return Register::from_code(index);
413 }
414 
415 
ToDoubleRegister(int index) const416 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
417   return XMMRegister::from_code(index);
418 }
419 
420 
ToRegister(LOperand * op) const421 Register LCodeGen::ToRegister(LOperand* op) const {
422   DCHECK(op->IsRegister());
423   return ToRegister(op->index());
424 }
425 
426 
ToDoubleRegister(LOperand * op) const427 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
428   DCHECK(op->IsDoubleRegister());
429   return ToDoubleRegister(op->index());
430 }
431 
432 
IsInteger32Constant(LConstantOperand * op) const433 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
434   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
435 }
436 
437 
IsExternalConstant(LConstantOperand * op) const438 bool LCodeGen::IsExternalConstant(LConstantOperand* op) const {
439   return chunk_->LookupLiteralRepresentation(op).IsExternal();
440 }
441 
442 
IsDehoistedKeyConstant(LConstantOperand * op) const443 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
444   return op->IsConstantOperand() &&
445       chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
446 }
447 
448 
IsSmiConstant(LConstantOperand * op) const449 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
450   return chunk_->LookupLiteralRepresentation(op).IsSmi();
451 }
452 
453 
ToInteger32(LConstantOperand * op) const454 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
455   return ToRepresentation(op, Representation::Integer32());
456 }
457 
458 
ToRepresentation(LConstantOperand * op,const Representation & r) const459 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
460                                    const Representation& r) const {
461   HConstant* constant = chunk_->LookupConstant(op);
462   int32_t value = constant->Integer32Value();
463   if (r.IsInteger32()) return value;
464   DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
465   return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
466 }
467 
468 
ToSmi(LConstantOperand * op) const469 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
470   HConstant* constant = chunk_->LookupConstant(op);
471   return Smi::FromInt(constant->Integer32Value());
472 }
473 
474 
ToDouble(LConstantOperand * op) const475 double LCodeGen::ToDouble(LConstantOperand* op) const {
476   HConstant* constant = chunk_->LookupConstant(op);
477   DCHECK(constant->HasDoubleValue());
478   return constant->DoubleValue();
479 }
480 
481 
ToExternalReference(LConstantOperand * op) const482 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
483   HConstant* constant = chunk_->LookupConstant(op);
484   DCHECK(constant->HasExternalReferenceValue());
485   return constant->ExternalReferenceValue();
486 }
487 
488 
ToHandle(LConstantOperand * op) const489 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
490   HConstant* constant = chunk_->LookupConstant(op);
491   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
492   return constant->handle(isolate());
493 }
494 
495 
ArgumentsOffsetWithoutFrame(int index)496 static int ArgumentsOffsetWithoutFrame(int index) {
497   DCHECK(index < 0);
498   return -(index + 1) * kPointerSize + kPCOnStackSize;
499 }
500 
501 
ToOperand(LOperand * op) const502 Operand LCodeGen::ToOperand(LOperand* op) const {
503   // Does not handle registers. In X64 assembler, plain registers are not
504   // representable as an Operand.
505   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
506   if (NeedsEagerFrame()) {
507     return Operand(rbp, FrameSlotToFPOffset(op->index()));
508   } else {
509     // Retrieve parameter without eager stack-frame relative to the
510     // stack-pointer.
511     return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
512   }
513 }
514 
515 
WriteTranslation(LEnvironment * environment,Translation * translation)516 void LCodeGen::WriteTranslation(LEnvironment* environment,
517                                 Translation* translation) {
518   if (environment == NULL) return;
519 
520   // The translation includes one command per value in the environment.
521   int translation_size = environment->translation_size();
522 
523   WriteTranslation(environment->outer(), translation);
524   WriteTranslationFrame(environment, translation);
525 
526   int object_index = 0;
527   int dematerialized_index = 0;
528   for (int i = 0; i < translation_size; ++i) {
529     LOperand* value = environment->values()->at(i);
530     AddToTranslation(
531         environment, translation, value, environment->HasTaggedValueAt(i),
532         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
533   }
534 }
535 
536 
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)537 void LCodeGen::AddToTranslation(LEnvironment* environment,
538                                 Translation* translation,
539                                 LOperand* op,
540                                 bool is_tagged,
541                                 bool is_uint32,
542                                 int* object_index_pointer,
543                                 int* dematerialized_index_pointer) {
544   if (op == LEnvironment::materialization_marker()) {
545     int object_index = (*object_index_pointer)++;
546     if (environment->ObjectIsDuplicateAt(object_index)) {
547       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
548       translation->DuplicateObject(dupe_of);
549       return;
550     }
551     int object_length = environment->ObjectLengthAt(object_index);
552     if (environment->ObjectIsArgumentsAt(object_index)) {
553       translation->BeginArgumentsObject(object_length);
554     } else {
555       translation->BeginCapturedObject(object_length);
556     }
557     int dematerialized_index = *dematerialized_index_pointer;
558     int env_offset = environment->translation_size() + dematerialized_index;
559     *dematerialized_index_pointer += object_length;
560     for (int i = 0; i < object_length; ++i) {
561       LOperand* value = environment->values()->at(env_offset + i);
562       AddToTranslation(environment,
563                        translation,
564                        value,
565                        environment->HasTaggedValueAt(env_offset + i),
566                        environment->HasUint32ValueAt(env_offset + i),
567                        object_index_pointer,
568                        dematerialized_index_pointer);
569     }
570     return;
571   }
572 
573   if (op->IsStackSlot()) {
574     int index = op->index();
575     if (is_tagged) {
576       translation->StoreStackSlot(index);
577     } else if (is_uint32) {
578       translation->StoreUint32StackSlot(index);
579     } else {
580       translation->StoreInt32StackSlot(index);
581     }
582   } else if (op->IsDoubleStackSlot()) {
583     int index = op->index();
584     translation->StoreDoubleStackSlot(index);
585   } else if (op->IsRegister()) {
586     Register reg = ToRegister(op);
587     if (is_tagged) {
588       translation->StoreRegister(reg);
589     } else if (is_uint32) {
590       translation->StoreUint32Register(reg);
591     } else {
592       translation->StoreInt32Register(reg);
593     }
594   } else if (op->IsDoubleRegister()) {
595     XMMRegister reg = ToDoubleRegister(op);
596     translation->StoreDoubleRegister(reg);
597   } else if (op->IsConstantOperand()) {
598     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
599     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
600     translation->StoreLiteral(src_index);
601   } else {
602     UNREACHABLE();
603   }
604 }
605 
606 
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode,int argc)607 void LCodeGen::CallCodeGeneric(Handle<Code> code,
608                                RelocInfo::Mode mode,
609                                LInstruction* instr,
610                                SafepointMode safepoint_mode,
611                                int argc) {
612   DCHECK(instr != NULL);
613   __ call(code, mode);
614   RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
615 
616   // Signal that we don't inline smi code before these stubs in the
617   // optimizing code generator.
618   if (code->kind() == Code::BINARY_OP_IC ||
619       code->kind() == Code::COMPARE_IC) {
620     __ nop();
621   }
622 }
623 
624 
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)625 void LCodeGen::CallCode(Handle<Code> code,
626                         RelocInfo::Mode mode,
627                         LInstruction* instr) {
628   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
629 }
630 
631 
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)632 void LCodeGen::CallRuntime(const Runtime::Function* function,
633                            int num_arguments,
634                            LInstruction* instr,
635                            SaveFPRegsMode save_doubles) {
636   DCHECK(instr != NULL);
637   DCHECK(instr->HasPointerMap());
638 
639   __ CallRuntime(function, num_arguments, save_doubles);
640 
641   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
642 }
643 
644 
LoadContextFromDeferred(LOperand * context)645 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
646   if (context->IsRegister()) {
647     if (!ToRegister(context).is(rsi)) {
648       __ movp(rsi, ToRegister(context));
649     }
650   } else if (context->IsStackSlot()) {
651     __ movp(rsi, ToOperand(context));
652   } else if (context->IsConstantOperand()) {
653     HConstant* constant =
654         chunk_->LookupConstant(LConstantOperand::cast(context));
655     __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
656   } else {
657     UNREACHABLE();
658   }
659 }
660 
661 
662 
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)663 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
664                                        int argc,
665                                        LInstruction* instr,
666                                        LOperand* context) {
667   LoadContextFromDeferred(context);
668 
669   __ CallRuntimeSaveDoubles(id);
670   RecordSafepointWithRegisters(
671       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
672 }
673 
674 
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)675 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
676                                                     Safepoint::DeoptMode mode) {
677   environment->set_has_been_used();
678   if (!environment->HasBeenRegistered()) {
679     // Physical stack frame layout:
680     // -x ............. -4  0 ..................................... y
681     // [incoming arguments] [spill slots] [pushed outgoing arguments]
682 
683     // Layout of the environment:
684     // 0 ..................................................... size-1
685     // [parameters] [locals] [expression stack including arguments]
686 
687     // Layout of the translation:
688     // 0 ........................................................ size - 1 + 4
689     // [expression stack including arguments] [locals] [4 words] [parameters]
690     // |>------------  translation_size ------------<|
691 
692     int frame_count = 0;
693     int jsframe_count = 0;
694     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
695       ++frame_count;
696       if (e->frame_type() == JS_FUNCTION) {
697         ++jsframe_count;
698       }
699     }
700     Translation translation(&translations_, frame_count, jsframe_count, zone());
701     WriteTranslation(environment, &translation);
702     int deoptimization_index = deoptimizations_.length();
703     int pc_offset = masm()->pc_offset();
704     environment->Register(deoptimization_index,
705                           translation.index(),
706                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
707     deoptimizations_.Add(environment, environment->zone());
708   }
709 }
710 
DeoptimizeIf(Condition cc,LInstruction * instr,DeoptimizeReason deopt_reason,Deoptimizer::BailoutType bailout_type)711 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
712                             DeoptimizeReason deopt_reason,
713                             Deoptimizer::BailoutType bailout_type) {
714   LEnvironment* environment = instr->environment();
715   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
716   DCHECK(environment->HasBeenRegistered());
717   int id = environment->deoptimization_index();
718   Address entry =
719       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
720   if (entry == NULL) {
721     Abort(kBailoutWasNotPrepared);
722     return;
723   }
724 
725   if (DeoptEveryNTimes()) {
726     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
727     Label no_deopt;
728     __ pushfq();
729     __ pushq(rax);
730     Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
731     __ movl(rax, count_operand);
732     __ subl(rax, Immediate(1));
733     __ j(not_zero, &no_deopt, Label::kNear);
734     if (FLAG_trap_on_deopt) __ int3();
735     __ movl(rax, Immediate(FLAG_deopt_every_n_times));
736     __ movl(count_operand, rax);
737     __ popq(rax);
738     __ popfq();
739     DCHECK(frame_is_built_);
740     __ call(entry, RelocInfo::RUNTIME_ENTRY);
741     __ bind(&no_deopt);
742     __ movl(count_operand, rax);
743     __ popq(rax);
744     __ popfq();
745   }
746 
747   if (info()->ShouldTrapOnDeopt()) {
748     Label done;
749     if (cc != no_condition) {
750       __ j(NegateCondition(cc), &done, Label::kNear);
751     }
752     __ int3();
753     __ bind(&done);
754   }
755 
756   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
757 
758   DCHECK(info()->IsStub() || frame_is_built_);
759   // Go through jump table if we need to handle condition, build frame, or
760   // restore caller doubles.
761   if (cc == no_condition && frame_is_built_ &&
762       !info()->saves_caller_doubles()) {
763     DeoptComment(deopt_info);
764     __ call(entry, RelocInfo::RUNTIME_ENTRY);
765   } else {
766     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
767                                             !frame_is_built_);
768     // We often have several deopts to the same entry, reuse the last
769     // jump entry if this is the case.
770     if (FLAG_trace_deopt || isolate()->is_profiling() ||
771         jump_table_.is_empty() ||
772         !table_entry.IsEquivalentTo(jump_table_.last())) {
773       jump_table_.Add(table_entry, zone());
774     }
775     if (cc == no_condition) {
776       __ jmp(&jump_table_.last().label);
777     } else {
778       __ j(cc, &jump_table_.last().label);
779     }
780   }
781 }
782 
DeoptimizeIf(Condition cc,LInstruction * instr,DeoptimizeReason deopt_reason)783 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
784                             DeoptimizeReason deopt_reason) {
785   Deoptimizer::BailoutType bailout_type = info()->IsStub()
786       ? Deoptimizer::LAZY
787       : Deoptimizer::EAGER;
788   DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
789 }
790 
791 
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode,int argc)792 void LCodeGen::RecordSafepointWithLazyDeopt(
793     LInstruction* instr, SafepointMode safepoint_mode, int argc) {
794   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
795     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
796   } else {
797     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
798     RecordSafepointWithRegisters(
799         instr->pointer_map(), argc, Safepoint::kLazyDeopt);
800   }
801 }
802 
803 
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)804 void LCodeGen::RecordSafepoint(
805     LPointerMap* pointers,
806     Safepoint::Kind kind,
807     int arguments,
808     Safepoint::DeoptMode deopt_mode) {
809   DCHECK(kind == expected_safepoint_kind_);
810 
811   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
812 
813   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
814       kind, arguments, deopt_mode);
815   for (int i = 0; i < operands->length(); i++) {
816     LOperand* pointer = operands->at(i);
817     if (pointer->IsStackSlot()) {
818       safepoint.DefinePointerSlot(pointer->index(), zone());
819     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
820       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
821     }
822   }
823 }
824 
825 
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)826 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
827                                Safepoint::DeoptMode deopt_mode) {
828   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
829 }
830 
831 
RecordSafepoint(Safepoint::DeoptMode deopt_mode)832 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
833   LPointerMap empty_pointers(zone());
834   RecordSafepoint(&empty_pointers, deopt_mode);
835 }
836 
837 
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)838 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
839                                             int arguments,
840                                             Safepoint::DeoptMode deopt_mode) {
841   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
842 }
843 
844 
LabelType(LLabel * label)845 static const char* LabelType(LLabel* label) {
846   if (label->is_loop_header()) return " (loop header)";
847   if (label->is_osr_entry()) return " (OSR entry)";
848   return "";
849 }
850 
851 
DoLabel(LLabel * label)852 void LCodeGen::DoLabel(LLabel* label) {
853   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
854           current_instruction_,
855           label->hydrogen_value()->id(),
856           label->block_id(),
857           LabelType(label));
858   __ bind(label->label());
859   current_block_ = label->block_id();
860   DoGap(label);
861 }
862 
863 
DoParallelMove(LParallelMove * move)864 void LCodeGen::DoParallelMove(LParallelMove* move) {
865   resolver_.Resolve(move);
866 }
867 
868 
DoGap(LGap * gap)869 void LCodeGen::DoGap(LGap* gap) {
870   for (int i = LGap::FIRST_INNER_POSITION;
871        i <= LGap::LAST_INNER_POSITION;
872        i++) {
873     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
874     LParallelMove* move = gap->GetParallelMove(inner_pos);
875     if (move != NULL) DoParallelMove(move);
876   }
877 }
878 
879 
DoInstructionGap(LInstructionGap * instr)880 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
881   DoGap(instr);
882 }
883 
884 
DoParameter(LParameter * instr)885 void LCodeGen::DoParameter(LParameter* instr) {
886   // Nothing to do.
887 }
888 
889 
DoUnknownOSRValue(LUnknownOSRValue * instr)890 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
891   GenerateOsrPrologue();
892 }
893 
894 
DoModByPowerOf2I(LModByPowerOf2I * instr)895 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
896   Register dividend = ToRegister(instr->dividend());
897   int32_t divisor = instr->divisor();
898   DCHECK(dividend.is(ToRegister(instr->result())));
899 
900   // Theoretically, a variation of the branch-free code for integer division by
901   // a power of 2 (calculating the remainder via an additional multiplication
902   // (which gets simplified to an 'and') and subtraction) should be faster, and
903   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
904   // indicate that positive dividends are heavily favored, so the branching
905   // version performs better.
906   HMod* hmod = instr->hydrogen();
907   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
908   Label dividend_is_not_negative, done;
909   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
910     __ testl(dividend, dividend);
911     __ j(not_sign, &dividend_is_not_negative, Label::kNear);
912     // Note that this is correct even for kMinInt operands.
913     __ negl(dividend);
914     __ andl(dividend, Immediate(mask));
915     __ negl(dividend);
916     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
917       DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
918     }
919     __ jmp(&done, Label::kNear);
920   }
921 
922   __ bind(&dividend_is_not_negative);
923   __ andl(dividend, Immediate(mask));
924   __ bind(&done);
925 }
926 
927 
DoModByConstI(LModByConstI * instr)928 void LCodeGen::DoModByConstI(LModByConstI* instr) {
929   Register dividend = ToRegister(instr->dividend());
930   int32_t divisor = instr->divisor();
931   DCHECK(ToRegister(instr->result()).is(rax));
932 
933   if (divisor == 0) {
934     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
935     return;
936   }
937 
938   __ TruncatingDiv(dividend, Abs(divisor));
939   __ imull(rdx, rdx, Immediate(Abs(divisor)));
940   __ movl(rax, dividend);
941   __ subl(rax, rdx);
942 
943   // Check for negative zero.
944   HMod* hmod = instr->hydrogen();
945   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
946     Label remainder_not_zero;
947     __ j(not_zero, &remainder_not_zero, Label::kNear);
948     __ cmpl(dividend, Immediate(0));
949     DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
950     __ bind(&remainder_not_zero);
951   }
952 }
953 
954 
DoModI(LModI * instr)955 void LCodeGen::DoModI(LModI* instr) {
956   HMod* hmod = instr->hydrogen();
957 
958   Register left_reg = ToRegister(instr->left());
959   DCHECK(left_reg.is(rax));
960   Register right_reg = ToRegister(instr->right());
961   DCHECK(!right_reg.is(rax));
962   DCHECK(!right_reg.is(rdx));
963   Register result_reg = ToRegister(instr->result());
964   DCHECK(result_reg.is(rdx));
965 
966   Label done;
967   // Check for x % 0, idiv would signal a divide error. We have to
968   // deopt in this case because we can't return a NaN.
969   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
970     __ testl(right_reg, right_reg);
971     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
972   }
973 
974   // Check for kMinInt % -1, idiv would signal a divide error. We
975   // have to deopt if we care about -0, because we can't return that.
976   if (hmod->CheckFlag(HValue::kCanOverflow)) {
977     Label no_overflow_possible;
978     __ cmpl(left_reg, Immediate(kMinInt));
979     __ j(not_zero, &no_overflow_possible, Label::kNear);
980     __ cmpl(right_reg, Immediate(-1));
981     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
982       DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
983     } else {
984       __ j(not_equal, &no_overflow_possible, Label::kNear);
985       __ Set(result_reg, 0);
986       __ jmp(&done, Label::kNear);
987     }
988     __ bind(&no_overflow_possible);
989   }
990 
991   // Sign extend dividend in eax into edx:eax, since we are using only the low
992   // 32 bits of the values.
993   __ cdq();
994 
995   // If we care about -0, test if the dividend is <0 and the result is 0.
996   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
997     Label positive_left;
998     __ testl(left_reg, left_reg);
999     __ j(not_sign, &positive_left, Label::kNear);
1000     __ idivl(right_reg);
1001     __ testl(result_reg, result_reg);
1002     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1003     __ jmp(&done, Label::kNear);
1004     __ bind(&positive_left);
1005   }
1006   __ idivl(right_reg);
1007   __ bind(&done);
1008 }
1009 
1010 
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1011 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1012   Register dividend = ToRegister(instr->dividend());
1013   int32_t divisor = instr->divisor();
1014   DCHECK(dividend.is(ToRegister(instr->result())));
1015 
1016   // If the divisor is positive, things are easy: There can be no deopts and we
1017   // can simply do an arithmetic right shift.
1018   if (divisor == 1) return;
1019   int32_t shift = WhichPowerOf2Abs(divisor);
1020   if (divisor > 1) {
1021     __ sarl(dividend, Immediate(shift));
1022     return;
1023   }
1024 
1025   // If the divisor is negative, we have to negate and handle edge cases.
1026   __ negl(dividend);
1027   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1028     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1029   }
1030 
1031   // Dividing by -1 is basically negation, unless we overflow.
1032   if (divisor == -1) {
1033     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1034       DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1035     }
1036     return;
1037   }
1038 
1039   // If the negation could not overflow, simply shifting is OK.
1040   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1041     __ sarl(dividend, Immediate(shift));
1042     return;
1043   }
1044 
1045   Label not_kmin_int, done;
1046   __ j(no_overflow, &not_kmin_int, Label::kNear);
1047   __ movl(dividend, Immediate(kMinInt / divisor));
1048   __ jmp(&done, Label::kNear);
1049   __ bind(&not_kmin_int);
1050   __ sarl(dividend, Immediate(shift));
1051   __ bind(&done);
1052 }
1053 
1054 
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1055 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1056   Register dividend = ToRegister(instr->dividend());
1057   int32_t divisor = instr->divisor();
1058   DCHECK(ToRegister(instr->result()).is(rdx));
1059 
1060   if (divisor == 0) {
1061     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
1062     return;
1063   }
1064 
1065   // Check for (0 / -x) that will produce negative zero.
1066   HMathFloorOfDiv* hdiv = instr->hydrogen();
1067   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1068     __ testl(dividend, dividend);
1069     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1070   }
1071 
1072   // Easy case: We need no dynamic check for the dividend and the flooring
1073   // division is the same as the truncating division.
1074   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1075       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1076     __ TruncatingDiv(dividend, Abs(divisor));
1077     if (divisor < 0) __ negl(rdx);
1078     return;
1079   }
1080 
1081   // In the general case we may need to adjust before and after the truncating
1082   // division to get a flooring division.
1083   Register temp = ToRegister(instr->temp3());
1084   DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
1085   Label needs_adjustment, done;
1086   __ cmpl(dividend, Immediate(0));
1087   __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
1088   __ TruncatingDiv(dividend, Abs(divisor));
1089   if (divisor < 0) __ negl(rdx);
1090   __ jmp(&done, Label::kNear);
1091   __ bind(&needs_adjustment);
1092   __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
1093   __ TruncatingDiv(temp, Abs(divisor));
1094   if (divisor < 0) __ negl(rdx);
1095   __ decl(rdx);
1096   __ bind(&done);
1097 }
1098 
1099 
1100 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1101 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1102   HBinaryOperation* hdiv = instr->hydrogen();
1103   Register dividend = ToRegister(instr->dividend());
1104   Register divisor = ToRegister(instr->divisor());
1105   Register remainder = ToRegister(instr->temp());
1106   Register result = ToRegister(instr->result());
1107   DCHECK(dividend.is(rax));
1108   DCHECK(remainder.is(rdx));
1109   DCHECK(result.is(rax));
1110   DCHECK(!divisor.is(rax));
1111   DCHECK(!divisor.is(rdx));
1112 
1113   // Check for x / 0.
1114   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1115     __ testl(divisor, divisor);
1116     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
1117   }
1118 
1119   // Check for (0 / -x) that will produce negative zero.
1120   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1121     Label dividend_not_zero;
1122     __ testl(dividend, dividend);
1123     __ j(not_zero, &dividend_not_zero, Label::kNear);
1124     __ testl(divisor, divisor);
1125     DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1126     __ bind(&dividend_not_zero);
1127   }
1128 
1129   // Check for (kMinInt / -1).
1130   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1131     Label dividend_not_min_int;
1132     __ cmpl(dividend, Immediate(kMinInt));
1133     __ j(not_zero, &dividend_not_min_int, Label::kNear);
1134     __ cmpl(divisor, Immediate(-1));
1135     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
1136     __ bind(&dividend_not_min_int);
1137   }
1138 
1139   // Sign extend to rdx (= remainder).
1140   __ cdq();
1141   __ idivl(divisor);
1142 
1143   Label done;
1144   __ testl(remainder, remainder);
1145   __ j(zero, &done, Label::kNear);
1146   __ xorl(remainder, divisor);
1147   __ sarl(remainder, Immediate(31));
1148   __ addl(result, remainder);
1149   __ bind(&done);
1150 }
1151 
1152 
DoDivByPowerOf2I(LDivByPowerOf2I * instr)1153 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1154   Register dividend = ToRegister(instr->dividend());
1155   int32_t divisor = instr->divisor();
1156   Register result = ToRegister(instr->result());
1157   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1158   DCHECK(!result.is(dividend));
1159 
1160   // Check for (0 / -x) that will produce negative zero.
1161   HDiv* hdiv = instr->hydrogen();
1162   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1163     __ testl(dividend, dividend);
1164     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1165   }
1166   // Check for (kMinInt / -1).
1167   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1168     __ cmpl(dividend, Immediate(kMinInt));
1169     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
1170   }
1171   // Deoptimize if remainder will not be 0.
1172   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1173       divisor != 1 && divisor != -1) {
1174     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1175     __ testl(dividend, Immediate(mask));
1176     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
1177   }
1178   __ Move(result, dividend);
1179   int32_t shift = WhichPowerOf2Abs(divisor);
1180   if (shift > 0) {
1181     // The arithmetic shift is always OK, the 'if' is an optimization only.
1182     if (shift > 1) __ sarl(result, Immediate(31));
1183     __ shrl(result, Immediate(32 - shift));
1184     __ addl(result, dividend);
1185     __ sarl(result, Immediate(shift));
1186   }
1187   if (divisor < 0) __ negl(result);
1188 }
1189 
1190 
DoDivByConstI(LDivByConstI * instr)1191 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1192   Register dividend = ToRegister(instr->dividend());
1193   int32_t divisor = instr->divisor();
1194   DCHECK(ToRegister(instr->result()).is(rdx));
1195 
1196   if (divisor == 0) {
1197     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
1198     return;
1199   }
1200 
1201   // Check for (0 / -x) that will produce negative zero.
1202   HDiv* hdiv = instr->hydrogen();
1203   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1204     __ testl(dividend, dividend);
1205     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
1206   }
1207 
1208   __ TruncatingDiv(dividend, Abs(divisor));
1209   if (divisor < 0) __ negl(rdx);
1210 
1211   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1212     __ movl(rax, rdx);
1213     __ imull(rax, rax, Immediate(divisor));
1214     __ subl(rax, dividend);
1215     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
1216   }
1217 }
1218 
1219 
1220 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1221 void LCodeGen::DoDivI(LDivI* instr) {
1222   HBinaryOperation* hdiv = instr->hydrogen();
1223   Register dividend = ToRegister(instr->dividend());
1224   Register divisor = ToRegister(instr->divisor());
1225   Register remainder = ToRegister(instr->temp());
1226   DCHECK(dividend.is(rax));
1227   DCHECK(remainder.is(rdx));
1228   DCHECK(ToRegister(instr->result()).is(rax));
1229   DCHECK(!divisor.is(rax));
1230   DCHECK(!divisor.is(rdx));
1231 
1232   // Check for x / 0.
1233   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1234     __ testl(divisor, divisor);
1235     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
1236   }
1237 
1238   // Check for (0 / -x) that will produce negative zero.
1239   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1240     Label dividend_not_zero;
1241     __ testl(dividend, dividend);
1242     __ j(not_zero, &dividend_not_zero, Label::kNear);
1243     __ testl(divisor, divisor);
1244     DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1245     __ bind(&dividend_not_zero);
1246   }
1247 
1248   // Check for (kMinInt / -1).
1249   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1250     Label dividend_not_min_int;
1251     __ cmpl(dividend, Immediate(kMinInt));
1252     __ j(not_zero, &dividend_not_min_int, Label::kNear);
1253     __ cmpl(divisor, Immediate(-1));
1254     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
1255     __ bind(&dividend_not_min_int);
1256   }
1257 
1258   // Sign extend to rdx (= remainder).
1259   __ cdq();
1260   __ idivl(divisor);
1261 
1262   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1263     // Deoptimize if remainder is not 0.
1264     __ testl(remainder, remainder);
1265     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
1266   }
1267 }
1268 
1269 
DoMulI(LMulI * instr)1270 void LCodeGen::DoMulI(LMulI* instr) {
1271   Register left = ToRegister(instr->left());
1272   LOperand* right = instr->right();
1273 
1274   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1275     if (instr->hydrogen_value()->representation().IsSmi()) {
1276       __ movp(kScratchRegister, left);
1277     } else {
1278       __ movl(kScratchRegister, left);
1279     }
1280   }
1281 
1282   bool can_overflow =
1283       instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1284   if (right->IsConstantOperand()) {
1285     int32_t right_value = ToInteger32(LConstantOperand::cast(right));
1286     if (right_value == -1) {
1287       __ negl(left);
1288     } else if (right_value == 0) {
1289       __ xorl(left, left);
1290     } else if (right_value == 2) {
1291       __ addl(left, left);
1292     } else if (!can_overflow) {
1293       // If the multiplication is known to not overflow, we
1294       // can use operations that don't set the overflow flag
1295       // correctly.
1296       switch (right_value) {
1297         case 1:
1298           // Do nothing.
1299           break;
1300         case 3:
1301           __ leal(left, Operand(left, left, times_2, 0));
1302           break;
1303         case 4:
1304           __ shll(left, Immediate(2));
1305           break;
1306         case 5:
1307           __ leal(left, Operand(left, left, times_4, 0));
1308           break;
1309         case 8:
1310           __ shll(left, Immediate(3));
1311           break;
1312         case 9:
1313           __ leal(left, Operand(left, left, times_8, 0));
1314           break;
1315         case 16:
1316           __ shll(left, Immediate(4));
1317           break;
1318         default:
1319           __ imull(left, left, Immediate(right_value));
1320           break;
1321       }
1322     } else {
1323       __ imull(left, left, Immediate(right_value));
1324     }
1325   } else if (right->IsStackSlot()) {
1326     if (instr->hydrogen_value()->representation().IsSmi()) {
1327       __ SmiToInteger64(left, left);
1328       __ imulp(left, ToOperand(right));
1329     } else {
1330       __ imull(left, ToOperand(right));
1331     }
1332   } else {
1333     if (instr->hydrogen_value()->representation().IsSmi()) {
1334       __ SmiToInteger64(left, left);
1335       __ imulp(left, ToRegister(right));
1336     } else {
1337       __ imull(left, ToRegister(right));
1338     }
1339   }
1340 
1341   if (can_overflow) {
1342     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1343   }
1344 
1345   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1346     // Bail out if the result is supposed to be negative zero.
1347     Label done;
1348     if (instr->hydrogen_value()->representation().IsSmi()) {
1349       __ testp(left, left);
1350     } else {
1351       __ testl(left, left);
1352     }
1353     __ j(not_zero, &done, Label::kNear);
1354     if (right->IsConstantOperand()) {
1355       // Constant can't be represented as 32-bit Smi due to immediate size
1356       // limit.
1357       DCHECK(SmiValuesAre32Bits()
1358           ? !instr->hydrogen_value()->representation().IsSmi()
1359           : SmiValuesAre31Bits());
1360       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1361         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
1362       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1363         __ cmpl(kScratchRegister, Immediate(0));
1364         DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
1365       }
1366     } else if (right->IsStackSlot()) {
1367       if (instr->hydrogen_value()->representation().IsSmi()) {
1368         __ orp(kScratchRegister, ToOperand(right));
1369       } else {
1370         __ orl(kScratchRegister, ToOperand(right));
1371       }
1372       DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1373     } else {
1374       // Test the non-zero operand for negative sign.
1375       if (instr->hydrogen_value()->representation().IsSmi()) {
1376         __ orp(kScratchRegister, ToRegister(right));
1377       } else {
1378         __ orl(kScratchRegister, ToRegister(right));
1379       }
1380       DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
1381     }
1382     __ bind(&done);
1383   }
1384 }
1385 
1386 
DoBitI(LBitI * instr)1387 void LCodeGen::DoBitI(LBitI* instr) {
1388   LOperand* left = instr->left();
1389   LOperand* right = instr->right();
1390   DCHECK(left->Equals(instr->result()));
1391   DCHECK(left->IsRegister());
1392 
1393   if (right->IsConstantOperand()) {
1394     int32_t right_operand =
1395         ToRepresentation(LConstantOperand::cast(right),
1396                          instr->hydrogen()->right()->representation());
1397     switch (instr->op()) {
1398       case Token::BIT_AND:
1399         __ andl(ToRegister(left), Immediate(right_operand));
1400         break;
1401       case Token::BIT_OR:
1402         __ orl(ToRegister(left), Immediate(right_operand));
1403         break;
1404       case Token::BIT_XOR:
1405         if (right_operand == int32_t(~0)) {
1406           __ notl(ToRegister(left));
1407         } else {
1408           __ xorl(ToRegister(left), Immediate(right_operand));
1409         }
1410         break;
1411       default:
1412         UNREACHABLE();
1413         break;
1414     }
1415   } else if (right->IsStackSlot()) {
1416     switch (instr->op()) {
1417       case Token::BIT_AND:
1418         if (instr->IsInteger32()) {
1419           __ andl(ToRegister(left), ToOperand(right));
1420         } else {
1421           __ andp(ToRegister(left), ToOperand(right));
1422         }
1423         break;
1424       case Token::BIT_OR:
1425         if (instr->IsInteger32()) {
1426           __ orl(ToRegister(left), ToOperand(right));
1427         } else {
1428           __ orp(ToRegister(left), ToOperand(right));
1429         }
1430         break;
1431       case Token::BIT_XOR:
1432         if (instr->IsInteger32()) {
1433           __ xorl(ToRegister(left), ToOperand(right));
1434         } else {
1435           __ xorp(ToRegister(left), ToOperand(right));
1436         }
1437         break;
1438       default:
1439         UNREACHABLE();
1440         break;
1441     }
1442   } else {
1443     DCHECK(right->IsRegister());
1444     switch (instr->op()) {
1445       case Token::BIT_AND:
1446         if (instr->IsInteger32()) {
1447           __ andl(ToRegister(left), ToRegister(right));
1448         } else {
1449           __ andp(ToRegister(left), ToRegister(right));
1450         }
1451         break;
1452       case Token::BIT_OR:
1453         if (instr->IsInteger32()) {
1454           __ orl(ToRegister(left), ToRegister(right));
1455         } else {
1456           __ orp(ToRegister(left), ToRegister(right));
1457         }
1458         break;
1459       case Token::BIT_XOR:
1460         if (instr->IsInteger32()) {
1461           __ xorl(ToRegister(left), ToRegister(right));
1462         } else {
1463           __ xorp(ToRegister(left), ToRegister(right));
1464         }
1465         break;
1466       default:
1467         UNREACHABLE();
1468         break;
1469     }
1470   }
1471 }
1472 
1473 
DoShiftI(LShiftI * instr)1474 void LCodeGen::DoShiftI(LShiftI* instr) {
1475   LOperand* left = instr->left();
1476   LOperand* right = instr->right();
1477   DCHECK(left->Equals(instr->result()));
1478   DCHECK(left->IsRegister());
1479   if (right->IsRegister()) {
1480     DCHECK(ToRegister(right).is(rcx));
1481 
1482     switch (instr->op()) {
1483       case Token::ROR:
1484         __ rorl_cl(ToRegister(left));
1485         break;
1486       case Token::SAR:
1487         __ sarl_cl(ToRegister(left));
1488         break;
1489       case Token::SHR:
1490         __ shrl_cl(ToRegister(left));
1491         if (instr->can_deopt()) {
1492           __ testl(ToRegister(left), ToRegister(left));
1493           DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
1494         }
1495         break;
1496       case Token::SHL:
1497         __ shll_cl(ToRegister(left));
1498         break;
1499       default:
1500         UNREACHABLE();
1501         break;
1502     }
1503   } else {
1504     int32_t value = ToInteger32(LConstantOperand::cast(right));
1505     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1506     switch (instr->op()) {
1507       case Token::ROR:
1508         if (shift_count != 0) {
1509           __ rorl(ToRegister(left), Immediate(shift_count));
1510         }
1511         break;
1512       case Token::SAR:
1513         if (shift_count != 0) {
1514           __ sarl(ToRegister(left), Immediate(shift_count));
1515         }
1516         break;
1517       case Token::SHR:
1518         if (shift_count != 0) {
1519           __ shrl(ToRegister(left), Immediate(shift_count));
1520         } else if (instr->can_deopt()) {
1521           __ testl(ToRegister(left), ToRegister(left));
1522           DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
1523         }
1524         break;
1525       case Token::SHL:
1526         if (shift_count != 0) {
1527           if (instr->hydrogen_value()->representation().IsSmi()) {
1528             if (SmiValuesAre32Bits()) {
1529               __ shlp(ToRegister(left), Immediate(shift_count));
1530             } else {
1531               DCHECK(SmiValuesAre31Bits());
1532               if (instr->can_deopt()) {
1533                 if (shift_count != 1) {
1534                   __ shll(ToRegister(left), Immediate(shift_count - 1));
1535                 }
1536                 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
1537                 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1538               } else {
1539                 __ shll(ToRegister(left), Immediate(shift_count));
1540               }
1541             }
1542           } else {
1543             __ shll(ToRegister(left), Immediate(shift_count));
1544           }
1545         }
1546         break;
1547       default:
1548         UNREACHABLE();
1549         break;
1550     }
1551   }
1552 }
1553 
1554 
DoSubI(LSubI * instr)1555 void LCodeGen::DoSubI(LSubI* instr) {
1556   LOperand* left = instr->left();
1557   LOperand* right = instr->right();
1558   DCHECK(left->Equals(instr->result()));
1559 
1560   if (right->IsConstantOperand()) {
1561     int32_t right_operand =
1562         ToRepresentation(LConstantOperand::cast(right),
1563                          instr->hydrogen()->right()->representation());
1564     __ subl(ToRegister(left), Immediate(right_operand));
1565   } else if (right->IsRegister()) {
1566     if (instr->hydrogen_value()->representation().IsSmi()) {
1567       __ subp(ToRegister(left), ToRegister(right));
1568     } else {
1569       __ subl(ToRegister(left), ToRegister(right));
1570     }
1571   } else {
1572     if (instr->hydrogen_value()->representation().IsSmi()) {
1573       __ subp(ToRegister(left), ToOperand(right));
1574     } else {
1575       __ subl(ToRegister(left), ToOperand(right));
1576     }
1577   }
1578 
1579   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1580     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1581   }
1582 }
1583 
1584 
DoConstantI(LConstantI * instr)1585 void LCodeGen::DoConstantI(LConstantI* instr) {
1586   Register dst = ToRegister(instr->result());
1587   if (instr->value() == 0) {
1588     __ xorl(dst, dst);
1589   } else {
1590     __ movl(dst, Immediate(instr->value()));
1591   }
1592 }
1593 
1594 
DoConstantS(LConstantS * instr)1595 void LCodeGen::DoConstantS(LConstantS* instr) {
1596   __ Move(ToRegister(instr->result()), instr->value());
1597 }
1598 
1599 
DoConstantD(LConstantD * instr)1600 void LCodeGen::DoConstantD(LConstantD* instr) {
1601   __ Move(ToDoubleRegister(instr->result()), instr->bits());
1602 }
1603 
1604 
DoConstantE(LConstantE * instr)1605 void LCodeGen::DoConstantE(LConstantE* instr) {
1606   __ LoadAddress(ToRegister(instr->result()), instr->value());
1607 }
1608 
1609 
DoConstantT(LConstantT * instr)1610 void LCodeGen::DoConstantT(LConstantT* instr) {
1611   Handle<Object> object = instr->value(isolate());
1612   AllowDeferredHandleDereference smi_check;
1613   __ Move(ToRegister(instr->result()), object);
1614 }
1615 
1616 
BuildSeqStringOperand(Register string,LOperand * index,String::Encoding encoding)1617 Operand LCodeGen::BuildSeqStringOperand(Register string,
1618                                         LOperand* index,
1619                                         String::Encoding encoding) {
1620   if (index->IsConstantOperand()) {
1621     int offset = ToInteger32(LConstantOperand::cast(index));
1622     if (encoding == String::TWO_BYTE_ENCODING) {
1623       offset *= kUC16Size;
1624     }
1625     STATIC_ASSERT(kCharSize == 1);
1626     return FieldOperand(string, SeqString::kHeaderSize + offset);
1627   }
1628   return FieldOperand(
1629       string, ToRegister(index),
1630       encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
1631       SeqString::kHeaderSize);
1632 }
1633 
1634 
DoSeqStringGetChar(LSeqStringGetChar * instr)1635 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1636   String::Encoding encoding = instr->hydrogen()->encoding();
1637   Register result = ToRegister(instr->result());
1638   Register string = ToRegister(instr->string());
1639 
1640   if (FLAG_debug_code) {
1641     __ Push(string);
1642     __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
1643     __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
1644 
1645     __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
1646     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1647     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1648     __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
1649                               ? one_byte_seq_type : two_byte_seq_type));
1650     __ Check(equal, kUnexpectedStringType);
1651     __ Pop(string);
1652   }
1653 
1654   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1655   if (encoding == String::ONE_BYTE_ENCODING) {
1656     __ movzxbl(result, operand);
1657   } else {
1658     __ movzxwl(result, operand);
1659   }
1660 }
1661 
1662 
DoSeqStringSetChar(LSeqStringSetChar * instr)1663 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1664   String::Encoding encoding = instr->hydrogen()->encoding();
1665   Register string = ToRegister(instr->string());
1666 
1667   if (FLAG_debug_code) {
1668     Register value = ToRegister(instr->value());
1669     Register index = ToRegister(instr->index());
1670     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1671     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1672     int encoding_mask =
1673         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1674         ? one_byte_seq_type : two_byte_seq_type;
1675     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1676   }
1677 
1678   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1679   if (instr->value()->IsConstantOperand()) {
1680     int value = ToInteger32(LConstantOperand::cast(instr->value()));
1681     DCHECK_LE(0, value);
1682     if (encoding == String::ONE_BYTE_ENCODING) {
1683       DCHECK_LE(value, String::kMaxOneByteCharCode);
1684       __ movb(operand, Immediate(value));
1685     } else {
1686       DCHECK_LE(value, String::kMaxUtf16CodeUnit);
1687       __ movw(operand, Immediate(value));
1688     }
1689   } else {
1690     Register value = ToRegister(instr->value());
1691     if (encoding == String::ONE_BYTE_ENCODING) {
1692       __ movb(operand, value);
1693     } else {
1694       __ movw(operand, value);
1695     }
1696   }
1697 }
1698 
1699 
DoAddI(LAddI * instr)1700 void LCodeGen::DoAddI(LAddI* instr) {
1701   LOperand* left = instr->left();
1702   LOperand* right = instr->right();
1703 
1704   Representation target_rep = instr->hydrogen()->representation();
1705   bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
1706 
1707   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
1708     if (right->IsConstantOperand()) {
1709       // No support for smi-immediates for 32-bit SMI.
1710       DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1711       int32_t offset =
1712           ToRepresentation(LConstantOperand::cast(right),
1713                            instr->hydrogen()->right()->representation());
1714       if (is_p) {
1715         __ leap(ToRegister(instr->result()),
1716                 MemOperand(ToRegister(left), offset));
1717       } else {
1718         __ leal(ToRegister(instr->result()),
1719                 MemOperand(ToRegister(left), offset));
1720       }
1721     } else {
1722       Operand address(ToRegister(left), ToRegister(right), times_1, 0);
1723       if (is_p) {
1724         __ leap(ToRegister(instr->result()), address);
1725       } else {
1726         __ leal(ToRegister(instr->result()), address);
1727       }
1728     }
1729   } else {
1730     if (right->IsConstantOperand()) {
1731       // No support for smi-immediates for 32-bit SMI.
1732       DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
1733       int32_t right_operand =
1734           ToRepresentation(LConstantOperand::cast(right),
1735                            instr->hydrogen()->right()->representation());
1736       if (is_p) {
1737         __ addp(ToRegister(left), Immediate(right_operand));
1738       } else {
1739         __ addl(ToRegister(left), Immediate(right_operand));
1740       }
1741     } else if (right->IsRegister()) {
1742       if (is_p) {
1743         __ addp(ToRegister(left), ToRegister(right));
1744       } else {
1745         __ addl(ToRegister(left), ToRegister(right));
1746       }
1747     } else {
1748       if (is_p) {
1749         __ addp(ToRegister(left), ToOperand(right));
1750       } else {
1751         __ addl(ToRegister(left), ToOperand(right));
1752       }
1753     }
1754     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1755       DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
1756     }
1757   }
1758 }
1759 
1760 
DoMathMinMax(LMathMinMax * instr)1761 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1762   LOperand* left = instr->left();
1763   LOperand* right = instr->right();
1764   DCHECK(left->Equals(instr->result()));
1765   HMathMinMax::Operation operation = instr->hydrogen()->operation();
1766   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1767     Label return_left;
1768     Condition condition = (operation == HMathMinMax::kMathMin)
1769         ? less_equal
1770         : greater_equal;
1771     Register left_reg = ToRegister(left);
1772     if (right->IsConstantOperand()) {
1773       Immediate right_imm = Immediate(
1774           ToRepresentation(LConstantOperand::cast(right),
1775                            instr->hydrogen()->right()->representation()));
1776       DCHECK(SmiValuesAre32Bits()
1777           ? !instr->hydrogen()->representation().IsSmi()
1778           : SmiValuesAre31Bits());
1779       __ cmpl(left_reg, right_imm);
1780       __ j(condition, &return_left, Label::kNear);
1781       __ movl(left_reg, right_imm);
1782     } else if (right->IsRegister()) {
1783       Register right_reg = ToRegister(right);
1784       if (instr->hydrogen_value()->representation().IsSmi()) {
1785         __ cmpp(left_reg, right_reg);
1786       } else {
1787         __ cmpl(left_reg, right_reg);
1788       }
1789       __ j(condition, &return_left, Label::kNear);
1790       __ movp(left_reg, right_reg);
1791     } else {
1792       Operand right_op = ToOperand(right);
1793       if (instr->hydrogen_value()->representation().IsSmi()) {
1794         __ cmpp(left_reg, right_op);
1795       } else {
1796         __ cmpl(left_reg, right_op);
1797       }
1798       __ j(condition, &return_left, Label::kNear);
1799       __ movp(left_reg, right_op);
1800     }
1801     __ bind(&return_left);
1802   } else {
1803     DCHECK(instr->hydrogen()->representation().IsDouble());
1804     Label not_nan, distinct, return_left, return_right;
1805     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
1806     XMMRegister left_reg = ToDoubleRegister(left);
1807     XMMRegister right_reg = ToDoubleRegister(right);
1808     __ Ucomisd(left_reg, right_reg);
1809     __ j(parity_odd, &not_nan, Label::kNear);  // Both are not NaN.
1810 
1811     // One of the numbers is NaN. Find which one and return it.
1812     __ Ucomisd(left_reg, left_reg);
1813     __ j(parity_even, &return_left, Label::kNear);  // left is NaN.
1814     __ jmp(&return_right, Label::kNear);            // right is NaN.
1815 
1816     __ bind(&not_nan);
1817     __ j(not_equal, &distinct, Label::kNear);  // left != right.
1818 
1819     // left == right
1820     XMMRegister xmm_scratch = double_scratch0();
1821     __ Xorpd(xmm_scratch, xmm_scratch);
1822     __ Ucomisd(left_reg, xmm_scratch);
1823     __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
1824 
1825     // At this point, both left and right are either +0 or -0.
1826     if (operation == HMathMinMax::kMathMin) {
1827       __ Orpd(left_reg, right_reg);
1828     } else {
1829       __ Andpd(left_reg, right_reg);
1830     }
1831     __ jmp(&return_left, Label::kNear);
1832 
1833     __ bind(&distinct);
1834     __ j(condition, &return_left, Label::kNear);
1835 
1836     __ bind(&return_right);
1837     __ Movapd(left_reg, right_reg);
1838 
1839     __ bind(&return_left);
1840   }
1841 }
1842 
1843 
DoArithmeticD(LArithmeticD * instr)1844 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1845   XMMRegister left = ToDoubleRegister(instr->left());
1846   XMMRegister right = ToDoubleRegister(instr->right());
1847   XMMRegister result = ToDoubleRegister(instr->result());
1848   switch (instr->op()) {
1849     case Token::ADD:
1850       if (CpuFeatures::IsSupported(AVX)) {
1851         CpuFeatureScope scope(masm(), AVX);
1852         __ vaddsd(result, left, right);
1853       } else {
1854         DCHECK(result.is(left));
1855         __ addsd(left, right);
1856       }
1857       break;
1858     case Token::SUB:
1859       if (CpuFeatures::IsSupported(AVX)) {
1860         CpuFeatureScope scope(masm(), AVX);
1861         __ vsubsd(result, left, right);
1862       } else {
1863         DCHECK(result.is(left));
1864         __ subsd(left, right);
1865       }
1866        break;
1867     case Token::MUL:
1868       if (CpuFeatures::IsSupported(AVX)) {
1869         CpuFeatureScope scope(masm(), AVX);
1870         __ vmulsd(result, left, right);
1871       } else {
1872         DCHECK(result.is(left));
1873         __ mulsd(left, right);
1874       }
1875       break;
1876     case Token::DIV:
1877       if (CpuFeatures::IsSupported(AVX)) {
1878         CpuFeatureScope scope(masm(), AVX);
1879         __ vdivsd(result, left, right);
1880       } else {
1881         DCHECK(result.is(left));
1882         __ divsd(left, right);
1883       }
1884       // Don't delete this mov. It may improve performance on some CPUs,
1885       // when there is a (v)mulsd depending on the result
1886       __ Movapd(result, result);
1887       break;
1888     case Token::MOD: {
1889       DCHECK(left.is(xmm0));
1890       DCHECK(right.is(xmm1));
1891       DCHECK(result.is(xmm0));
1892       __ PrepareCallCFunction(2);
1893       __ CallCFunction(
1894           ExternalReference::mod_two_doubles_operation(isolate()), 2);
1895       break;
1896     }
1897     default:
1898       UNREACHABLE();
1899       break;
1900   }
1901 }
1902 
1903 
DoArithmeticT(LArithmeticT * instr)1904 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1905   DCHECK(ToRegister(instr->context()).is(rsi));
1906   DCHECK(ToRegister(instr->left()).is(rdx));
1907   DCHECK(ToRegister(instr->right()).is(rax));
1908   DCHECK(ToRegister(instr->result()).is(rax));
1909 
1910   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
1911   CallCode(code, RelocInfo::CODE_TARGET, instr);
1912 }
1913 
1914 
1915 template<class InstrType>
EmitBranch(InstrType instr,Condition cc)1916 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
1917   int left_block = instr->TrueDestination(chunk_);
1918   int right_block = instr->FalseDestination(chunk_);
1919 
1920   int next_block = GetNextEmittedBlock();
1921 
1922   if (right_block == left_block || cc == no_condition) {
1923     EmitGoto(left_block);
1924   } else if (left_block == next_block) {
1925     __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1926   } else if (right_block == next_block) {
1927     __ j(cc, chunk_->GetAssemblyLabel(left_block));
1928   } else {
1929     __ j(cc, chunk_->GetAssemblyLabel(left_block));
1930     if (cc != always) {
1931       __ jmp(chunk_->GetAssemblyLabel(right_block));
1932     }
1933   }
1934 }
1935 
1936 
1937 template <class InstrType>
EmitTrueBranch(InstrType instr,Condition cc)1938 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
1939   int true_block = instr->TrueDestination(chunk_);
1940   __ j(cc, chunk_->GetAssemblyLabel(true_block));
1941 }
1942 
1943 
1944 template <class InstrType>
EmitFalseBranch(InstrType instr,Condition cc)1945 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
1946   int false_block = instr->FalseDestination(chunk_);
1947   __ j(cc, chunk_->GetAssemblyLabel(false_block));
1948 }
1949 
1950 
DoDebugBreak(LDebugBreak * instr)1951 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
1952   __ int3();
1953 }
1954 
1955 
DoBranch(LBranch * instr)1956 void LCodeGen::DoBranch(LBranch* instr) {
1957   Representation r = instr->hydrogen()->value()->representation();
1958   if (r.IsInteger32()) {
1959     DCHECK(!info()->IsStub());
1960     Register reg = ToRegister(instr->value());
1961     __ testl(reg, reg);
1962     EmitBranch(instr, not_zero);
1963   } else if (r.IsSmi()) {
1964     DCHECK(!info()->IsStub());
1965     Register reg = ToRegister(instr->value());
1966     __ testp(reg, reg);
1967     EmitBranch(instr, not_zero);
1968   } else if (r.IsDouble()) {
1969     DCHECK(!info()->IsStub());
1970     XMMRegister reg = ToDoubleRegister(instr->value());
1971     XMMRegister xmm_scratch = double_scratch0();
1972     __ Xorpd(xmm_scratch, xmm_scratch);
1973     __ Ucomisd(reg, xmm_scratch);
1974     EmitBranch(instr, not_equal);
1975   } else {
1976     DCHECK(r.IsTagged());
1977     Register reg = ToRegister(instr->value());
1978     HType type = instr->hydrogen()->value()->type();
1979     if (type.IsBoolean()) {
1980       DCHECK(!info()->IsStub());
1981       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1982       EmitBranch(instr, equal);
1983     } else if (type.IsSmi()) {
1984       DCHECK(!info()->IsStub());
1985       __ SmiCompare(reg, Smi::kZero);
1986       EmitBranch(instr, not_equal);
1987     } else if (type.IsJSArray()) {
1988       DCHECK(!info()->IsStub());
1989       EmitBranch(instr, no_condition);
1990     } else if (type.IsHeapNumber()) {
1991       DCHECK(!info()->IsStub());
1992       XMMRegister xmm_scratch = double_scratch0();
1993       __ Xorpd(xmm_scratch, xmm_scratch);
1994       __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
1995       EmitBranch(instr, not_equal);
1996     } else if (type.IsString()) {
1997       DCHECK(!info()->IsStub());
1998       __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
1999       EmitBranch(instr, not_equal);
2000     } else {
2001       ToBooleanHints expected = instr->hydrogen()->expected_input_types();
2002       // Avoid deopts in the case where we've never executed this path before.
2003       if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
2004 
2005       if (expected & ToBooleanHint::kUndefined) {
2006         // undefined -> false.
2007         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2008         __ j(equal, instr->FalseLabel(chunk_));
2009       }
2010       if (expected & ToBooleanHint::kBoolean) {
2011         // true -> true.
2012         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2013         __ j(equal, instr->TrueLabel(chunk_));
2014         // false -> false.
2015         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2016         __ j(equal, instr->FalseLabel(chunk_));
2017       }
2018       if (expected & ToBooleanHint::kNull) {
2019         // 'null' -> false.
2020         __ CompareRoot(reg, Heap::kNullValueRootIndex);
2021         __ j(equal, instr->FalseLabel(chunk_));
2022       }
2023 
2024       if (expected & ToBooleanHint::kSmallInteger) {
2025         // Smis: 0 -> false, all other -> true.
2026         __ Cmp(reg, Smi::kZero);
2027         __ j(equal, instr->FalseLabel(chunk_));
2028         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2029       } else if (expected & ToBooleanHint::kNeedsMap) {
2030         // If we need a map later and have a Smi -> deopt.
2031         __ testb(reg, Immediate(kSmiTagMask));
2032         DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
2033       }
2034 
2035       const Register map = kScratchRegister;
2036       if (expected & ToBooleanHint::kNeedsMap) {
2037         __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
2038 
2039         if (expected & ToBooleanHint::kCanBeUndetectable) {
2040           // Undetectable -> false.
2041           __ testb(FieldOperand(map, Map::kBitFieldOffset),
2042                    Immediate(1 << Map::kIsUndetectable));
2043           __ j(not_zero, instr->FalseLabel(chunk_));
2044         }
2045       }
2046 
2047       if (expected & ToBooleanHint::kReceiver) {
2048         // spec object -> true.
2049         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
2050         __ j(above_equal, instr->TrueLabel(chunk_));
2051       }
2052 
2053       if (expected & ToBooleanHint::kString) {
2054         // String value -> false iff empty.
2055         Label not_string;
2056         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2057         __ j(above_equal, &not_string, Label::kNear);
2058         __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2059         __ j(not_zero, instr->TrueLabel(chunk_));
2060         __ jmp(instr->FalseLabel(chunk_));
2061         __ bind(&not_string);
2062       }
2063 
2064       if (expected & ToBooleanHint::kSymbol) {
2065         // Symbol value -> true.
2066         __ CmpInstanceType(map, SYMBOL_TYPE);
2067         __ j(equal, instr->TrueLabel(chunk_));
2068       }
2069 
2070       if (expected & ToBooleanHint::kHeapNumber) {
2071         // heap number -> false iff +0, -0, or NaN.
2072         Label not_heap_number;
2073         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2074         __ j(not_equal, &not_heap_number, Label::kNear);
2075         XMMRegister xmm_scratch = double_scratch0();
2076         __ Xorpd(xmm_scratch, xmm_scratch);
2077         __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2078         __ j(zero, instr->FalseLabel(chunk_));
2079         __ jmp(instr->TrueLabel(chunk_));
2080         __ bind(&not_heap_number);
2081       }
2082 
2083       if (expected != ToBooleanHint::kAny) {
2084         // We've seen something for the first time -> deopt.
2085         // This can only happen if we are not generic already.
2086         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
2087       }
2088     }
2089   }
2090 }
2091 
2092 
EmitGoto(int block)2093 void LCodeGen::EmitGoto(int block) {
2094   if (!IsNextEmittedBlock(block)) {
2095     __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
2096   }
2097 }
2098 
2099 
DoGoto(LGoto * instr)2100 void LCodeGen::DoGoto(LGoto* instr) {
2101   EmitGoto(instr->block_id());
2102 }
2103 
2104 
TokenToCondition(Token::Value op,bool is_unsigned)2105 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2106   Condition cond = no_condition;
2107   switch (op) {
2108     case Token::EQ:
2109     case Token::EQ_STRICT:
2110       cond = equal;
2111       break;
2112     case Token::NE:
2113     case Token::NE_STRICT:
2114       cond = not_equal;
2115       break;
2116     case Token::LT:
2117       cond = is_unsigned ? below : less;
2118       break;
2119     case Token::GT:
2120       cond = is_unsigned ? above : greater;
2121       break;
2122     case Token::LTE:
2123       cond = is_unsigned ? below_equal : less_equal;
2124       break;
2125     case Token::GTE:
2126       cond = is_unsigned ? above_equal : greater_equal;
2127       break;
2128     case Token::IN:
2129     case Token::INSTANCEOF:
2130     default:
2131       UNREACHABLE();
2132   }
2133   return cond;
2134 }
2135 
2136 
DoCompareNumericAndBranch(LCompareNumericAndBranch * instr)2137 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2138   LOperand* left = instr->left();
2139   LOperand* right = instr->right();
2140   bool is_unsigned =
2141       instr->is_double() ||
2142       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2143       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2144   Condition cc = TokenToCondition(instr->op(), is_unsigned);
2145 
2146   if (left->IsConstantOperand() && right->IsConstantOperand()) {
2147     // We can statically evaluate the comparison.
2148     double left_val = ToDouble(LConstantOperand::cast(left));
2149     double right_val = ToDouble(LConstantOperand::cast(right));
2150     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2151                          ? instr->TrueDestination(chunk_)
2152                          : instr->FalseDestination(chunk_);
2153     EmitGoto(next_block);
2154   } else {
2155     if (instr->is_double()) {
2156       // Don't base result on EFLAGS when a NaN is involved. Instead
2157       // jump to the false block.
2158       __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2159       __ j(parity_even, instr->FalseLabel(chunk_));
2160     } else {
2161       int32_t value;
2162       if (right->IsConstantOperand()) {
2163         value = ToInteger32(LConstantOperand::cast(right));
2164         if (instr->hydrogen_value()->representation().IsSmi()) {
2165           __ Cmp(ToRegister(left), Smi::FromInt(value));
2166         } else {
2167           __ cmpl(ToRegister(left), Immediate(value));
2168         }
2169       } else if (left->IsConstantOperand()) {
2170         value = ToInteger32(LConstantOperand::cast(left));
2171         if (instr->hydrogen_value()->representation().IsSmi()) {
2172           if (right->IsRegister()) {
2173             __ Cmp(ToRegister(right), Smi::FromInt(value));
2174           } else {
2175             __ Cmp(ToOperand(right), Smi::FromInt(value));
2176           }
2177         } else if (right->IsRegister()) {
2178           __ cmpl(ToRegister(right), Immediate(value));
2179         } else {
2180           __ cmpl(ToOperand(right), Immediate(value));
2181         }
2182         // We commuted the operands, so commute the condition.
2183         cc = CommuteCondition(cc);
2184       } else if (instr->hydrogen_value()->representation().IsSmi()) {
2185         if (right->IsRegister()) {
2186           __ cmpp(ToRegister(left), ToRegister(right));
2187         } else {
2188           __ cmpp(ToRegister(left), ToOperand(right));
2189         }
2190       } else {
2191         if (right->IsRegister()) {
2192           __ cmpl(ToRegister(left), ToRegister(right));
2193         } else {
2194           __ cmpl(ToRegister(left), ToOperand(right));
2195         }
2196       }
2197     }
2198     EmitBranch(instr, cc);
2199   }
2200 }
2201 
2202 
DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch * instr)2203 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2204   Register left = ToRegister(instr->left());
2205 
2206   if (instr->right()->IsConstantOperand()) {
2207     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2208     __ Cmp(left, right);
2209   } else {
2210     Register right = ToRegister(instr->right());
2211     __ cmpp(left, right);
2212   }
2213   EmitBranch(instr, equal);
2214 }
2215 
2216 
DoCmpHoleAndBranch(LCmpHoleAndBranch * instr)2217 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2218   if (instr->hydrogen()->representation().IsTagged()) {
2219     Register input_reg = ToRegister(instr->object());
2220     __ Cmp(input_reg, factory()->the_hole_value());
2221     EmitBranch(instr, equal);
2222     return;
2223   }
2224 
2225   XMMRegister input_reg = ToDoubleRegister(instr->object());
2226   __ Ucomisd(input_reg, input_reg);
2227   EmitFalseBranch(instr, parity_odd);
2228 
2229   __ subp(rsp, Immediate(kDoubleSize));
2230   __ Movsd(MemOperand(rsp, 0), input_reg);
2231   __ addp(rsp, Immediate(kDoubleSize));
2232 
2233   int offset = sizeof(kHoleNanUpper32);
2234   __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
2235   EmitBranch(instr, equal);
2236 }
2237 
2238 
EmitIsString(Register input,Register temp1,Label * is_not_string,SmiCheck check_needed=INLINE_SMI_CHECK)2239 Condition LCodeGen::EmitIsString(Register input,
2240                                  Register temp1,
2241                                  Label* is_not_string,
2242                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
2243   if (check_needed == INLINE_SMI_CHECK) {
2244     __ JumpIfSmi(input, is_not_string);
2245   }
2246 
2247   Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
2248 
2249   return cond;
2250 }
2251 
2252 
DoIsStringAndBranch(LIsStringAndBranch * instr)2253 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2254   Register reg = ToRegister(instr->value());
2255   Register temp = ToRegister(instr->temp());
2256 
2257   SmiCheck check_needed =
2258       instr->hydrogen()->value()->type().IsHeapObject()
2259           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2260 
2261   Condition true_cond = EmitIsString(
2262       reg, temp, instr->FalseLabel(chunk_), check_needed);
2263 
2264   EmitBranch(instr, true_cond);
2265 }
2266 
2267 
DoIsSmiAndBranch(LIsSmiAndBranch * instr)2268 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2269   Condition is_smi;
2270   if (instr->value()->IsRegister()) {
2271     Register input = ToRegister(instr->value());
2272     is_smi = masm()->CheckSmi(input);
2273   } else {
2274     Operand input = ToOperand(instr->value());
2275     is_smi = masm()->CheckSmi(input);
2276   }
2277   EmitBranch(instr, is_smi);
2278 }
2279 
2280 
DoIsUndetectableAndBranch(LIsUndetectableAndBranch * instr)2281 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2282   Register input = ToRegister(instr->value());
2283   Register temp = ToRegister(instr->temp());
2284 
2285   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2286     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2287   }
2288   __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
2289   __ testb(FieldOperand(temp, Map::kBitFieldOffset),
2290            Immediate(1 << Map::kIsUndetectable));
2291   EmitBranch(instr, not_zero);
2292 }
2293 
2294 
DoStringCompareAndBranch(LStringCompareAndBranch * instr)2295 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2296   DCHECK(ToRegister(instr->context()).is(rsi));
2297   DCHECK(ToRegister(instr->left()).is(rdx));
2298   DCHECK(ToRegister(instr->right()).is(rax));
2299 
2300   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2301   CallCode(code, RelocInfo::CODE_TARGET, instr);
2302   __ CompareRoot(rax, Heap::kTrueValueRootIndex);
2303   EmitBranch(instr, equal);
2304 }
2305 
2306 
TestType(HHasInstanceTypeAndBranch * instr)2307 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2308   InstanceType from = instr->from();
2309   InstanceType to = instr->to();
2310   if (from == FIRST_TYPE) return to;
2311   DCHECK(from == to || to == LAST_TYPE);
2312   return from;
2313 }
2314 
2315 
BranchCondition(HHasInstanceTypeAndBranch * instr)2316 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2317   InstanceType from = instr->from();
2318   InstanceType to = instr->to();
2319   if (from == to) return equal;
2320   if (to == LAST_TYPE) return above_equal;
2321   if (from == FIRST_TYPE) return below_equal;
2322   UNREACHABLE();
2323   return equal;
2324 }
2325 
2326 
DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch * instr)2327 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2328   Register input = ToRegister(instr->value());
2329 
2330   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2331     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2332   }
2333 
2334   __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
2335   EmitBranch(instr, BranchCondition(instr->hydrogen()));
2336 }
2337 
2338 // Branches to a label or falls through with the answer in the z flag.
2339 // Trashes the temp register.
EmitClassOfTest(Label * is_true,Label * is_false,Handle<String> class_name,Register input,Register temp,Register temp2)2340 void LCodeGen::EmitClassOfTest(Label* is_true,
2341                                Label* is_false,
2342                                Handle<String> class_name,
2343                                Register input,
2344                                Register temp,
2345                                Register temp2) {
2346   DCHECK(!input.is(temp));
2347   DCHECK(!input.is(temp2));
2348   DCHECK(!temp.is(temp2));
2349 
2350   __ JumpIfSmi(input, is_false);
2351 
2352   __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
2353   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2354   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2355     __ j(above_equal, is_true);
2356   } else {
2357     __ j(above_equal, is_false);
2358   }
2359 
2360   // Check if the constructor in the map is a function.
2361   __ GetMapConstructor(temp, temp, kScratchRegister);
2362 
2363   // Objects with a non-function constructor have class 'Object'.
2364   __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
2365   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2366     __ j(not_equal, is_true);
2367   } else {
2368     __ j(not_equal, is_false);
2369   }
2370 
2371   // temp now contains the constructor function. Grab the
2372   // instance class name from there.
2373   __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2374   __ movp(temp, FieldOperand(temp,
2375                              SharedFunctionInfo::kInstanceClassNameOffset));
2376   // The class name we are testing against is internalized since it's a literal.
2377   // The name in the constructor is internalized because of the way the context
2378   // is booted.  This routine isn't expected to work for random API-created
2379   // classes and it doesn't have to because you can't access it with natives
2380   // syntax.  Since both sides are internalized it is sufficient to use an
2381   // identity comparison.
2382   DCHECK(class_name->IsInternalizedString());
2383   __ Cmp(temp, class_name);
2384   // End with the answer in the z flag.
2385 }
2386 
2387 
DoClassOfTestAndBranch(LClassOfTestAndBranch * instr)2388 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2389   Register input = ToRegister(instr->value());
2390   Register temp = ToRegister(instr->temp());
2391   Register temp2 = ToRegister(instr->temp2());
2392   Handle<String> class_name = instr->hydrogen()->class_name();
2393 
2394   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2395       class_name, input, temp, temp2);
2396 
2397   EmitBranch(instr, equal);
2398 }
2399 
2400 
DoCmpMapAndBranch(LCmpMapAndBranch * instr)2401 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2402   Register reg = ToRegister(instr->value());
2403 
2404   __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2405   EmitBranch(instr, equal);
2406 }
2407 
2408 
DoHasInPrototypeChainAndBranch(LHasInPrototypeChainAndBranch * instr)2409 void LCodeGen::DoHasInPrototypeChainAndBranch(
2410     LHasInPrototypeChainAndBranch* instr) {
2411   Register const object = ToRegister(instr->object());
2412   Register const object_map = kScratchRegister;
2413   Register const object_prototype = object_map;
2414   Register const prototype = ToRegister(instr->prototype());
2415 
2416   // The {object} must be a spec object.  It's sufficient to know that {object}
2417   // is not a smi, since all other non-spec objects have {null} prototypes and
2418   // will be ruled out below.
2419   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2420     Condition is_smi = __ CheckSmi(object);
2421     EmitFalseBranch(instr, is_smi);
2422   }
2423 
2424   // Loop through the {object}s prototype chain looking for the {prototype}.
2425   __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
2426   Label loop;
2427   __ bind(&loop);
2428 
2429   // Deoptimize if the object needs to be access checked.
2430   __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
2431            Immediate(1 << Map::kIsAccessCheckNeeded));
2432   DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
2433   // Deoptimize for proxies.
2434   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
2435   DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
2436 
2437   __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
2438   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2439   EmitFalseBranch(instr, equal);
2440   __ cmpp(object_prototype, prototype);
2441   EmitTrueBranch(instr, equal);
2442   __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
2443   __ jmp(&loop);
2444 }
2445 
2446 
DoCmpT(LCmpT * instr)2447 void LCodeGen::DoCmpT(LCmpT* instr) {
2448   DCHECK(ToRegister(instr->context()).is(rsi));
2449   Token::Value op = instr->op();
2450 
2451   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2452   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2453 
2454   Condition condition = TokenToCondition(op, false);
2455   Label true_value, done;
2456   __ testp(rax, rax);
2457   __ j(condition, &true_value, Label::kNear);
2458   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2459   __ jmp(&done, Label::kNear);
2460   __ bind(&true_value);
2461   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2462   __ bind(&done);
2463 }
2464 
2465 
DoReturn(LReturn * instr)2466 void LCodeGen::DoReturn(LReturn* instr) {
2467   if (FLAG_trace && info()->IsOptimizing()) {
2468     // Preserve the return value on the stack and rely on the runtime call
2469     // to return the value in the same register.  We're leaving the code
2470     // managed by the register allocator and tearing down the frame, it's
2471     // safe to write to the context register.
2472     __ Push(rax);
2473     __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2474     __ CallRuntime(Runtime::kTraceExit);
2475   }
2476   if (info()->saves_caller_doubles()) {
2477     RestoreCallerDoubles();
2478   }
2479   if (NeedsEagerFrame()) {
2480     __ movp(rsp, rbp);
2481     __ popq(rbp);
2482   }
2483   if (instr->has_constant_parameter_count()) {
2484     __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
2485            rcx);
2486   } else {
2487     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2488     Register reg = ToRegister(instr->parameter_count());
2489     // The argument count parameter is a smi
2490     __ SmiToInteger32(reg, reg);
2491     Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
2492     __ PopReturnAddressTo(return_addr_reg);
2493     __ shlp(reg, Immediate(kPointerSizeLog2));
2494     __ addp(rsp, reg);
2495     __ jmp(return_addr_reg);
2496   }
2497 }
2498 
2499 
DoLoadContextSlot(LLoadContextSlot * instr)2500 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2501   Register context = ToRegister(instr->context());
2502   Register result = ToRegister(instr->result());
2503   __ movp(result, ContextOperand(context, instr->slot_index()));
2504   if (instr->hydrogen()->RequiresHoleCheck()) {
2505     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2506     if (instr->hydrogen()->DeoptimizesOnHole()) {
2507       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2508     } else {
2509       Label is_not_hole;
2510       __ j(not_equal, &is_not_hole, Label::kNear);
2511       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2512       __ bind(&is_not_hole);
2513     }
2514   }
2515 }
2516 
2517 
DoStoreContextSlot(LStoreContextSlot * instr)2518 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2519   Register context = ToRegister(instr->context());
2520   Register value = ToRegister(instr->value());
2521 
2522   Operand target = ContextOperand(context, instr->slot_index());
2523 
2524   Label skip_assignment;
2525   if (instr->hydrogen()->RequiresHoleCheck()) {
2526     __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
2527     if (instr->hydrogen()->DeoptimizesOnHole()) {
2528       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2529     } else {
2530       __ j(not_equal, &skip_assignment);
2531     }
2532   }
2533   __ movp(target, value);
2534 
2535   if (instr->hydrogen()->NeedsWriteBarrier()) {
2536     SmiCheck check_needed =
2537       instr->hydrogen()->value()->type().IsHeapObject()
2538           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2539     int offset = Context::SlotOffset(instr->slot_index());
2540     Register scratch = ToRegister(instr->temp());
2541     __ RecordWriteContextSlot(context,
2542                               offset,
2543                               value,
2544                               scratch,
2545                               kSaveFPRegs,
2546                               EMIT_REMEMBERED_SET,
2547                               check_needed);
2548   }
2549 
2550   __ bind(&skip_assignment);
2551 }
2552 
2553 
DoLoadNamedField(LLoadNamedField * instr)2554 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2555   HObjectAccess access = instr->hydrogen()->access();
2556   int offset = access.offset();
2557 
2558   if (access.IsExternalMemory()) {
2559     Register result = ToRegister(instr->result());
2560     if (instr->object()->IsConstantOperand()) {
2561       DCHECK(result.is(rax));
2562       __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
2563     } else {
2564       Register object = ToRegister(instr->object());
2565       __ Load(result, MemOperand(object, offset), access.representation());
2566     }
2567     return;
2568   }
2569 
2570   Register object = ToRegister(instr->object());
2571   if (instr->hydrogen()->representation().IsDouble()) {
2572     DCHECK(access.IsInobject());
2573     XMMRegister result = ToDoubleRegister(instr->result());
2574     __ Movsd(result, FieldOperand(object, offset));
2575     return;
2576   }
2577 
2578   Register result = ToRegister(instr->result());
2579   if (!access.IsInobject()) {
2580     __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
2581     object = result;
2582   }
2583 
2584   Representation representation = access.representation();
2585   if (representation.IsSmi() && SmiValuesAre32Bits() &&
2586       instr->hydrogen()->representation().IsInteger32()) {
2587     if (FLAG_debug_code) {
2588       Register scratch = kScratchRegister;
2589       __ Load(scratch, FieldOperand(object, offset), representation);
2590       __ AssertSmi(scratch);
2591     }
2592 
2593     // Read int value directly from upper half of the smi.
2594     STATIC_ASSERT(kSmiTag == 0);
2595     DCHECK(kSmiTagSize + kSmiShiftSize == 32);
2596     offset += kPointerSize / 2;
2597     representation = Representation::Integer32();
2598   }
2599   __ Load(result, FieldOperand(object, offset), representation);
2600 }
2601 
2602 
DoLoadFunctionPrototype(LLoadFunctionPrototype * instr)2603 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2604   Register function = ToRegister(instr->function());
2605   Register result = ToRegister(instr->result());
2606 
2607   // Get the prototype or initial map from the function.
2608   __ movp(result,
2609          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2610 
2611   // Check that the function has a prototype or an initial map.
2612   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2613   DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2614 
2615   // If the function does not have an initial map, we're done.
2616   Label done;
2617   __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
2618   __ j(not_equal, &done, Label::kNear);
2619 
2620   // Get the prototype from the initial map.
2621   __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
2622 
2623   // All done.
2624   __ bind(&done);
2625 }
2626 
2627 
DoLoadRoot(LLoadRoot * instr)2628 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2629   Register result = ToRegister(instr->result());
2630   __ LoadRoot(result, instr->index());
2631 }
2632 
2633 
DoAccessArgumentsAt(LAccessArgumentsAt * instr)2634 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2635   Register arguments = ToRegister(instr->arguments());
2636   Register result = ToRegister(instr->result());
2637 
2638   if (instr->length()->IsConstantOperand() &&
2639       instr->index()->IsConstantOperand()) {
2640     int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2641     int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2642     if (const_index >= 0 && const_index < const_length) {
2643       StackArgumentsAccessor args(arguments, const_length,
2644                                   ARGUMENTS_DONT_CONTAIN_RECEIVER);
2645       __ movp(result, args.GetArgumentOperand(const_index));
2646     } else if (FLAG_debug_code) {
2647       __ int3();
2648     }
2649   } else {
2650     Register length = ToRegister(instr->length());
2651     // There are two words between the frame pointer and the last argument.
2652     // Subtracting from length accounts for one of them add one more.
2653     if (instr->index()->IsRegister()) {
2654       __ subl(length, ToRegister(instr->index()));
2655     } else {
2656       __ subl(length, ToOperand(instr->index()));
2657     }
2658     StackArgumentsAccessor args(arguments, length,
2659                                 ARGUMENTS_DONT_CONTAIN_RECEIVER);
2660     __ movp(result, args.GetArgumentOperand(0));
2661   }
2662 }
2663 
2664 
DoLoadKeyedExternalArray(LLoadKeyed * instr)2665 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2666   ElementsKind elements_kind = instr->elements_kind();
2667   LOperand* key = instr->key();
2668   if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
2669     Register key_reg = ToRegister(key);
2670     Representation key_representation =
2671         instr->hydrogen()->key()->representation();
2672     if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
2673       __ SmiToInteger64(key_reg, key_reg);
2674     } else if (instr->hydrogen()->IsDehoisted()) {
2675       // Sign extend key because it could be a 32 bit negative value
2676       // and the dehoisted address computation happens in 64 bits
2677       __ movsxlq(key_reg, key_reg);
2678     }
2679   }
2680   Operand operand(BuildFastArrayOperand(
2681       instr->elements(),
2682       key,
2683       instr->hydrogen()->key()->representation(),
2684       elements_kind,
2685       instr->base_offset()));
2686 
2687   if (elements_kind == FLOAT32_ELEMENTS) {
2688     XMMRegister result(ToDoubleRegister(instr->result()));
2689     __ Cvtss2sd(result, operand);
2690   } else if (elements_kind == FLOAT64_ELEMENTS) {
2691     __ Movsd(ToDoubleRegister(instr->result()), operand);
2692   } else {
2693     Register result(ToRegister(instr->result()));
2694     switch (elements_kind) {
2695       case INT8_ELEMENTS:
2696         __ movsxbl(result, operand);
2697         break;
2698       case UINT8_ELEMENTS:
2699       case UINT8_CLAMPED_ELEMENTS:
2700         __ movzxbl(result, operand);
2701         break;
2702       case INT16_ELEMENTS:
2703         __ movsxwl(result, operand);
2704         break;
2705       case UINT16_ELEMENTS:
2706         __ movzxwl(result, operand);
2707         break;
2708       case INT32_ELEMENTS:
2709         __ movl(result, operand);
2710         break;
2711       case UINT32_ELEMENTS:
2712         __ movl(result, operand);
2713         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2714           __ testl(result, result);
2715           DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
2716         }
2717         break;
2718       case FLOAT32_ELEMENTS:
2719       case FLOAT64_ELEMENTS:
2720       case FAST_ELEMENTS:
2721       case FAST_SMI_ELEMENTS:
2722       case FAST_DOUBLE_ELEMENTS:
2723       case FAST_HOLEY_ELEMENTS:
2724       case FAST_HOLEY_SMI_ELEMENTS:
2725       case FAST_HOLEY_DOUBLE_ELEMENTS:
2726       case DICTIONARY_ELEMENTS:
2727       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2728       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2729       case FAST_STRING_WRAPPER_ELEMENTS:
2730       case SLOW_STRING_WRAPPER_ELEMENTS:
2731       case NO_ELEMENTS:
2732         UNREACHABLE();
2733         break;
2734     }
2735   }
2736 }
2737 
2738 
DoLoadKeyedFixedDoubleArray(LLoadKeyed * instr)2739 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2740   XMMRegister result(ToDoubleRegister(instr->result()));
2741   LOperand* key = instr->key();
2742   if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
2743       instr->hydrogen()->IsDehoisted()) {
2744     // Sign extend key because it could be a 32 bit negative value
2745     // and the dehoisted address computation happens in 64 bits
2746     __ movsxlq(ToRegister(key), ToRegister(key));
2747   }
2748   if (instr->hydrogen()->RequiresHoleCheck()) {
2749     Operand hole_check_operand = BuildFastArrayOperand(
2750         instr->elements(),
2751         key,
2752         instr->hydrogen()->key()->representation(),
2753         FAST_DOUBLE_ELEMENTS,
2754         instr->base_offset() + sizeof(kHoleNanLower32));
2755     __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
2756     DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2757   }
2758 
2759   Operand double_load_operand = BuildFastArrayOperand(
2760       instr->elements(),
2761       key,
2762       instr->hydrogen()->key()->representation(),
2763       FAST_DOUBLE_ELEMENTS,
2764       instr->base_offset());
2765   __ Movsd(result, double_load_operand);
2766 }
2767 
2768 
DoLoadKeyedFixedArray(LLoadKeyed * instr)2769 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2770   HLoadKeyed* hinstr = instr->hydrogen();
2771   Register result = ToRegister(instr->result());
2772   LOperand* key = instr->key();
2773   bool requires_hole_check = hinstr->RequiresHoleCheck();
2774   Representation representation = hinstr->representation();
2775   int offset = instr->base_offset();
2776 
2777   if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
2778       instr->hydrogen()->IsDehoisted()) {
2779     // Sign extend key because it could be a 32 bit negative value
2780     // and the dehoisted address computation happens in 64 bits
2781     __ movsxlq(ToRegister(key), ToRegister(key));
2782   }
2783   if (representation.IsInteger32() && SmiValuesAre32Bits() &&
2784       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
2785     DCHECK(!requires_hole_check);
2786     if (FLAG_debug_code) {
2787       Register scratch = kScratchRegister;
2788       __ Load(scratch,
2789               BuildFastArrayOperand(instr->elements(),
2790                                     key,
2791                                     instr->hydrogen()->key()->representation(),
2792                                     FAST_ELEMENTS,
2793                                     offset),
2794               Representation::Smi());
2795       __ AssertSmi(scratch);
2796     }
2797     // Read int value directly from upper half of the smi.
2798     STATIC_ASSERT(kSmiTag == 0);
2799     DCHECK(kSmiTagSize + kSmiShiftSize == 32);
2800     offset += kPointerSize / 2;
2801   }
2802 
2803   __ Load(result,
2804           BuildFastArrayOperand(instr->elements(), key,
2805                                 instr->hydrogen()->key()->representation(),
2806                                 FAST_ELEMENTS, offset),
2807           representation);
2808 
2809   // Check for the hole value.
2810   if (requires_hole_check) {
2811     if (IsFastSmiElementsKind(hinstr->elements_kind())) {
2812       Condition smi = __ CheckSmi(result);
2813       DeoptimizeIf(NegateCondition(smi), instr, DeoptimizeReason::kNotASmi);
2814     } else {
2815       __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2816       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
2817     }
2818   } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
2819     DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
2820     Label done;
2821     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2822     __ j(not_equal, &done);
2823     if (info()->IsStub()) {
2824       // A stub can safely convert the hole to undefined only if the array
2825       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
2826       // it needs to bail out.
2827       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
2828       __ Cmp(FieldOperand(result, PropertyCell::kValueOffset),
2829              Smi::FromInt(Isolate::kProtectorValid));
2830       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
2831     }
2832     __ Move(result, isolate()->factory()->undefined_value());
2833     __ bind(&done);
2834   }
2835 }
2836 
2837 
DoLoadKeyed(LLoadKeyed * instr)2838 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
2839   if (instr->is_fixed_typed_array()) {
2840     DoLoadKeyedExternalArray(instr);
2841   } else if (instr->hydrogen()->representation().IsDouble()) {
2842     DoLoadKeyedFixedDoubleArray(instr);
2843   } else {
2844     DoLoadKeyedFixedArray(instr);
2845   }
2846 }
2847 
2848 
BuildFastArrayOperand(LOperand * elements_pointer,LOperand * key,Representation key_representation,ElementsKind elements_kind,uint32_t offset)2849 Operand LCodeGen::BuildFastArrayOperand(
2850     LOperand* elements_pointer,
2851     LOperand* key,
2852     Representation key_representation,
2853     ElementsKind elements_kind,
2854     uint32_t offset) {
2855   Register elements_pointer_reg = ToRegister(elements_pointer);
2856   int shift_size = ElementsKindToShiftSize(elements_kind);
2857   if (key->IsConstantOperand()) {
2858     int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
2859     if (constant_value & 0xF0000000) {
2860       Abort(kArrayIndexConstantValueTooBig);
2861     }
2862     return Operand(elements_pointer_reg,
2863                    (constant_value << shift_size) + offset);
2864   } else {
2865     // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
2866     DCHECK(key_representation.IsInteger32());
2867 
2868     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
2869     return Operand(elements_pointer_reg,
2870                    ToRegister(key),
2871                    scale_factor,
2872                    offset);
2873   }
2874 }
2875 
2876 
DoArgumentsElements(LArgumentsElements * instr)2877 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2878   Register result = ToRegister(instr->result());
2879 
2880   if (instr->hydrogen()->from_inlined()) {
2881     __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
2882   } else if (instr->hydrogen()->arguments_adaptor()) {
2883     // Check for arguments adapter frame.
2884     Label done, adapted;
2885     __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2886     __ cmpp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
2887             Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
2888     __ j(equal, &adapted, Label::kNear);
2889 
2890     // No arguments adaptor frame.
2891     __ movp(result, rbp);
2892     __ jmp(&done, Label::kNear);
2893 
2894     // Arguments adaptor frame present.
2895     __ bind(&adapted);
2896     __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2897 
2898     // Result is the frame pointer for the frame if not adapted and for the real
2899     // frame below the adaptor frame if adapted.
2900     __ bind(&done);
2901   } else {
2902     __ movp(result, rbp);
2903   }
2904 }
2905 
2906 
DoArgumentsLength(LArgumentsLength * instr)2907 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2908   Register result = ToRegister(instr->result());
2909 
2910   Label done;
2911 
2912   // If no arguments adaptor frame the number of arguments is fixed.
2913   if (instr->elements()->IsRegister()) {
2914     __ cmpp(rbp, ToRegister(instr->elements()));
2915   } else {
2916     __ cmpp(rbp, ToOperand(instr->elements()));
2917   }
2918   __ movl(result, Immediate(scope()->num_parameters()));
2919   __ j(equal, &done, Label::kNear);
2920 
2921   // Arguments adaptor frame present. Get argument length from there.
2922   __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2923   __ SmiToInteger32(result,
2924                     Operand(result,
2925                             ArgumentsAdaptorFrameConstants::kLengthOffset));
2926 
2927   // Argument length is in result register.
2928   __ bind(&done);
2929 }
2930 
2931 
DoWrapReceiver(LWrapReceiver * instr)2932 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2933   Register receiver = ToRegister(instr->receiver());
2934   Register function = ToRegister(instr->function());
2935 
2936   // If the receiver is null or undefined, we have to pass the global
2937   // object as a receiver to normal functions. Values have to be
2938   // passed unchanged to builtins and strict-mode functions.
2939   Label global_object, receiver_ok;
2940   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
2941 
2942   if (!instr->hydrogen()->known_function()) {
2943     // Do not transform the receiver to object for strict mode
2944     // functions.
2945     __ movp(kScratchRegister,
2946             FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2947     __ testb(FieldOperand(kScratchRegister,
2948                           SharedFunctionInfo::kStrictModeByteOffset),
2949              Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
2950     __ j(not_equal, &receiver_ok, dist);
2951 
2952     // Do not transform the receiver to object for builtins.
2953     __ testb(FieldOperand(kScratchRegister,
2954                           SharedFunctionInfo::kNativeByteOffset),
2955              Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
2956     __ j(not_equal, &receiver_ok, dist);
2957   }
2958 
2959   // Normal function. Replace undefined or null with global receiver.
2960   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
2961   __ j(equal, &global_object, dist);
2962   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
2963   __ j(equal, &global_object, dist);
2964 
2965   // The receiver should be a JS object.
2966   Condition is_smi = __ CheckSmi(receiver);
2967   DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
2968   __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
2969   DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
2970 
2971   __ jmp(&receiver_ok, dist);
2972   __ bind(&global_object);
2973   __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
2974   __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
2975   __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
2976 
2977   __ bind(&receiver_ok);
2978 }
2979 
2980 
DoApplyArguments(LApplyArguments * instr)2981 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2982   Register receiver = ToRegister(instr->receiver());
2983   Register function = ToRegister(instr->function());
2984   Register length = ToRegister(instr->length());
2985   Register elements = ToRegister(instr->elements());
2986   DCHECK(receiver.is(rax));  // Used for parameter count.
2987   DCHECK(function.is(rdi));  // Required by InvokeFunction.
2988   DCHECK(ToRegister(instr->result()).is(rax));
2989 
2990   // Copy the arguments to this function possibly from the
2991   // adaptor frame below it.
2992   const uint32_t kArgumentsLimit = 1 * KB;
2993   __ cmpp(length, Immediate(kArgumentsLimit));
2994   DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
2995 
2996   __ Push(receiver);
2997   __ movp(receiver, length);
2998 
2999   // Loop through the arguments pushing them onto the execution
3000   // stack.
3001   Label invoke, loop;
3002   // length is a small non-negative integer, due to the test above.
3003   __ testl(length, length);
3004   __ j(zero, &invoke, Label::kNear);
3005   __ bind(&loop);
3006   StackArgumentsAccessor args(elements, length,
3007                               ARGUMENTS_DONT_CONTAIN_RECEIVER);
3008   __ Push(args.GetArgumentOperand(0));
3009   __ decl(length);
3010   __ j(not_zero, &loop);
3011 
3012   // Invoke the function.
3013   __ bind(&invoke);
3014 
3015   InvokeFlag flag = CALL_FUNCTION;
3016   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3017     DCHECK(!info()->saves_caller_doubles());
3018     // TODO(ishell): drop current frame before pushing arguments to the stack.
3019     flag = JUMP_FUNCTION;
3020     ParameterCount actual(rax);
3021     // It is safe to use rbx, rcx and r8 as scratch registers here given that
3022     // 1) we are not going to return to caller function anyway,
3023     // 2) rbx (expected number of arguments) will be initialized below.
3024     PrepareForTailCall(actual, rbx, rcx, r8);
3025   }
3026 
3027   DCHECK(instr->HasPointerMap());
3028   LPointerMap* pointers = instr->pointer_map();
3029   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3030   ParameterCount actual(rax);
3031   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
3032 }
3033 
3034 
DoPushArgument(LPushArgument * instr)3035 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3036   LOperand* argument = instr->value();
3037   EmitPushTaggedOperand(argument);
3038 }
3039 
3040 
DoDrop(LDrop * instr)3041 void LCodeGen::DoDrop(LDrop* instr) {
3042   __ Drop(instr->count());
3043 }
3044 
3045 
DoThisFunction(LThisFunction * instr)3046 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3047   Register result = ToRegister(instr->result());
3048   __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
3049 }
3050 
3051 
DoContext(LContext * instr)3052 void LCodeGen::DoContext(LContext* instr) {
3053   Register result = ToRegister(instr->result());
3054   if (info()->IsOptimizing()) {
3055     __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
3056   } else {
3057     // If there is no frame, the context must be in rsi.
3058     DCHECK(result.is(rsi));
3059   }
3060 }
3061 
3062 
DoDeclareGlobals(LDeclareGlobals * instr)3063 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3064   DCHECK(ToRegister(instr->context()).is(rsi));
3065   __ Push(instr->hydrogen()->declarations());
3066   __ Push(Smi::FromInt(instr->hydrogen()->flags()));
3067   __ Push(instr->hydrogen()->feedback_vector());
3068   CallRuntime(Runtime::kDeclareGlobals, instr);
3069 }
3070 
CallKnownFunction(Handle<JSFunction> function,int formal_parameter_count,int arity,bool is_tail_call,LInstruction * instr)3071 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3072                                  int formal_parameter_count, int arity,
3073                                  bool is_tail_call, LInstruction* instr) {
3074   bool dont_adapt_arguments =
3075       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3076   bool can_invoke_directly =
3077       dont_adapt_arguments || formal_parameter_count == arity;
3078 
3079   Register function_reg = rdi;
3080   LPointerMap* pointers = instr->pointer_map();
3081 
3082   if (can_invoke_directly) {
3083     // Change context.
3084     __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
3085 
3086     // Always initialize new target and number of actual arguments.
3087     __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
3088     __ Set(rax, arity);
3089 
3090     bool is_self_call = function.is_identical_to(info()->closure());
3091 
3092     // Invoke function.
3093     if (is_self_call) {
3094       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3095       if (is_tail_call) {
3096         __ Jump(self, RelocInfo::CODE_TARGET);
3097       } else {
3098         __ Call(self, RelocInfo::CODE_TARGET);
3099       }
3100     } else {
3101       Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
3102       if (is_tail_call) {
3103         __ Jump(target);
3104       } else {
3105         __ Call(target);
3106       }
3107     }
3108 
3109     if (!is_tail_call) {
3110       // Set up deoptimization.
3111       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
3112     }
3113   } else {
3114     // We need to adapt arguments.
3115     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3116     ParameterCount actual(arity);
3117     ParameterCount expected(formal_parameter_count);
3118     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3119     __ InvokeFunction(function_reg, no_reg, expected, actual, flag, generator);
3120   }
3121 }
3122 
3123 
DoCallWithDescriptor(LCallWithDescriptor * instr)3124 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3125   DCHECK(ToRegister(instr->result()).is(rax));
3126 
3127   if (instr->hydrogen()->IsTailCall()) {
3128     if (NeedsEagerFrame()) __ leave();
3129 
3130     if (instr->target()->IsConstantOperand()) {
3131       LConstantOperand* target = LConstantOperand::cast(instr->target());
3132       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3133       __ jmp(code, RelocInfo::CODE_TARGET);
3134     } else {
3135       DCHECK(instr->target()->IsRegister());
3136       Register target = ToRegister(instr->target());
3137       __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3138       __ jmp(target);
3139     }
3140   } else {
3141     LPointerMap* pointers = instr->pointer_map();
3142     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3143 
3144     if (instr->target()->IsConstantOperand()) {
3145       LConstantOperand* target = LConstantOperand::cast(instr->target());
3146       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3147       generator.BeforeCall(__ CallSize(code));
3148       __ call(code, RelocInfo::CODE_TARGET);
3149     } else {
3150       DCHECK(instr->target()->IsRegister());
3151       Register target = ToRegister(instr->target());
3152       generator.BeforeCall(__ CallSize(target));
3153       __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
3154       __ call(target);
3155     }
3156     generator.AfterCall();
3157   }
3158 }
3159 
3160 
DoDeferredMathAbsTaggedHeapNumber(LMathAbs * instr)3161 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3162   Register input_reg = ToRegister(instr->value());
3163   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3164                  Heap::kHeapNumberMapRootIndex);
3165   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
3166 
3167   Label slow, allocated, done;
3168   uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit();
3169   available_regs &= ~input_reg.bit();
3170   if (instr->context()->IsRegister()) {
3171     // Make sure that the context isn't overwritten in the AllocateHeapNumber
3172     // macro below.
3173     available_regs &= ~ToRegister(instr->context()).bit();
3174   }
3175 
3176   Register tmp =
3177       Register::from_code(base::bits::CountTrailingZeros32(available_regs));
3178   available_regs &= ~tmp.bit();
3179   Register tmp2 =
3180       Register::from_code(base::bits::CountTrailingZeros32(available_regs));
3181 
3182   // Preserve the value of all registers.
3183   PushSafepointRegistersScope scope(this);
3184 
3185   __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3186   // Check the sign of the argument. If the argument is positive, just
3187   // return it. We do not need to patch the stack since |input| and
3188   // |result| are the same register and |input| will be restored
3189   // unchanged by popping safepoint registers.
3190   __ testl(tmp, Immediate(HeapNumber::kSignMask));
3191   __ j(zero, &done);
3192 
3193   __ AllocateHeapNumber(tmp, tmp2, &slow);
3194   __ jmp(&allocated, Label::kNear);
3195 
3196   // Slow case: Call the runtime system to do the number allocation.
3197   __ bind(&slow);
3198   CallRuntimeFromDeferred(
3199       Runtime::kAllocateHeapNumber, 0, instr, instr->context());
3200   // Set the pointer to the new heap number in tmp.
3201   if (!tmp.is(rax)) __ movp(tmp, rax);
3202   // Restore input_reg after call to runtime.
3203   __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3204 
3205   __ bind(&allocated);
3206   __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
3207   __ shlq(tmp2, Immediate(1));
3208   __ shrq(tmp2, Immediate(1));
3209   __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
3210   __ StoreToSafepointRegisterSlot(input_reg, tmp);
3211 
3212   __ bind(&done);
3213 }
3214 
3215 
EmitIntegerMathAbs(LMathAbs * instr)3216 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3217   Register input_reg = ToRegister(instr->value());
3218   __ testl(input_reg, input_reg);
3219   Label is_positive;
3220   __ j(not_sign, &is_positive, Label::kNear);
3221   __ negl(input_reg);  // Sets flags.
3222   DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
3223   __ bind(&is_positive);
3224 }
3225 
3226 
EmitSmiMathAbs(LMathAbs * instr)3227 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
3228   Register input_reg = ToRegister(instr->value());
3229   __ testp(input_reg, input_reg);
3230   Label is_positive;
3231   __ j(not_sign, &is_positive, Label::kNear);
3232   __ negp(input_reg);  // Sets flags.
3233   DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
3234   __ bind(&is_positive);
3235 }
3236 
3237 
DoMathAbs(LMathAbs * instr)3238 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3239   // Class for deferred case.
3240   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3241    public:
3242     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3243         : LDeferredCode(codegen), instr_(instr) { }
3244     void Generate() override {
3245       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3246     }
3247     LInstruction* instr() override { return instr_; }
3248 
3249    private:
3250     LMathAbs* instr_;
3251   };
3252 
3253   DCHECK(instr->value()->Equals(instr->result()));
3254   Representation r = instr->hydrogen()->value()->representation();
3255 
3256   if (r.IsDouble()) {
3257     XMMRegister scratch = double_scratch0();
3258     XMMRegister input_reg = ToDoubleRegister(instr->value());
3259     __ Xorpd(scratch, scratch);
3260     __ Subsd(scratch, input_reg);
3261     __ Andpd(input_reg, scratch);
3262   } else if (r.IsInteger32()) {
3263     EmitIntegerMathAbs(instr);
3264   } else if (r.IsSmi()) {
3265     EmitSmiMathAbs(instr);
3266   } else {  // Tagged case.
3267     DeferredMathAbsTaggedHeapNumber* deferred =
3268         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3269     Register input_reg = ToRegister(instr->value());
3270     // Smi check.
3271     __ JumpIfNotSmi(input_reg, deferred->entry());
3272     EmitSmiMathAbs(instr);
3273     __ bind(deferred->exit());
3274   }
3275 }
3276 
DoMathFloorD(LMathFloorD * instr)3277 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
3278   XMMRegister output_reg = ToDoubleRegister(instr->result());
3279   XMMRegister input_reg = ToDoubleRegister(instr->value());
3280   CpuFeatureScope scope(masm(), SSE4_1);
3281   __ Roundsd(output_reg, input_reg, kRoundDown);
3282 }
3283 
DoMathFloorI(LMathFloorI * instr)3284 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
3285   XMMRegister xmm_scratch = double_scratch0();
3286   Register output_reg = ToRegister(instr->result());
3287   XMMRegister input_reg = ToDoubleRegister(instr->value());
3288 
3289   if (CpuFeatures::IsSupported(SSE4_1)) {
3290     CpuFeatureScope scope(masm(), SSE4_1);
3291     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3292       // Deoptimize if minus zero.
3293       __ Movq(output_reg, input_reg);
3294       __ subq(output_reg, Immediate(1));
3295       DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero);
3296     }
3297     __ Roundsd(xmm_scratch, input_reg, kRoundDown);
3298     __ Cvttsd2si(output_reg, xmm_scratch);
3299     __ cmpl(output_reg, Immediate(0x1));
3300     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3301   } else {
3302     Label negative_sign, done;
3303     // Deoptimize on unordered.
3304     __ Xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
3305     __ Ucomisd(input_reg, xmm_scratch);
3306     DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
3307     __ j(below, &negative_sign, Label::kNear);
3308 
3309     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3310       // Check for negative zero.
3311       Label positive_sign;
3312       __ j(above, &positive_sign, Label::kNear);
3313       __ Movmskpd(output_reg, input_reg);
3314       __ testl(output_reg, Immediate(1));
3315       DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
3316       __ Set(output_reg, 0);
3317       __ jmp(&done);
3318       __ bind(&positive_sign);
3319     }
3320 
3321     // Use truncating instruction (OK because input is positive).
3322     __ Cvttsd2si(output_reg, input_reg);
3323     // Overflow is signalled with minint.
3324     __ cmpl(output_reg, Immediate(0x1));
3325     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3326     __ jmp(&done, Label::kNear);
3327 
3328     // Non-zero negative reaches here.
3329     __ bind(&negative_sign);
3330     // Truncate, then compare and compensate.
3331     __ Cvttsd2si(output_reg, input_reg);
3332     __ Cvtlsi2sd(xmm_scratch, output_reg);
3333     __ Ucomisd(input_reg, xmm_scratch);
3334     __ j(equal, &done, Label::kNear);
3335     __ subl(output_reg, Immediate(1));
3336     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3337 
3338     __ bind(&done);
3339   }
3340 }
3341 
DoMathRoundD(LMathRoundD * instr)3342 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
3343   XMMRegister xmm_scratch = double_scratch0();
3344   XMMRegister output_reg = ToDoubleRegister(instr->result());
3345   XMMRegister input_reg = ToDoubleRegister(instr->value());
3346   CpuFeatureScope scope(masm(), SSE4_1);
3347   Label done;
3348   __ Roundsd(output_reg, input_reg, kRoundUp);
3349   __ Move(xmm_scratch, -0.5);
3350   __ Addsd(xmm_scratch, output_reg);
3351   __ Ucomisd(xmm_scratch, input_reg);
3352   __ j(below_equal, &done, Label::kNear);
3353   __ Move(xmm_scratch, 1.0);
3354   __ Subsd(output_reg, xmm_scratch);
3355   __ bind(&done);
3356 }
3357 
DoMathRoundI(LMathRoundI * instr)3358 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
3359   const XMMRegister xmm_scratch = double_scratch0();
3360   Register output_reg = ToRegister(instr->result());
3361   XMMRegister input_reg = ToDoubleRegister(instr->value());
3362   XMMRegister input_temp = ToDoubleRegister(instr->temp());
3363   static int64_t one_half = V8_INT64_C(0x3FE0000000000000);  // 0.5
3364   static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000);  // -0.5
3365 
3366   Label done, round_to_zero, below_one_half;
3367   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
3368   __ movq(kScratchRegister, one_half);
3369   __ Movq(xmm_scratch, kScratchRegister);
3370   __ Ucomisd(xmm_scratch, input_reg);
3371   __ j(above, &below_one_half, Label::kNear);
3372 
3373   // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3374   __ Addsd(xmm_scratch, input_reg);
3375   __ Cvttsd2si(output_reg, xmm_scratch);
3376   // Overflow is signalled with minint.
3377   __ cmpl(output_reg, Immediate(0x1));
3378   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3379   __ jmp(&done, dist);
3380 
3381   __ bind(&below_one_half);
3382   __ movq(kScratchRegister, minus_one_half);
3383   __ Movq(xmm_scratch, kScratchRegister);
3384   __ Ucomisd(xmm_scratch, input_reg);
3385   __ j(below_equal, &round_to_zero, Label::kNear);
3386 
3387   // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3388   // compare and compensate.
3389   __ Movapd(input_temp, input_reg);  // Do not alter input_reg.
3390   __ Subsd(input_temp, xmm_scratch);
3391   __ Cvttsd2si(output_reg, input_temp);
3392   // Catch minint due to overflow, and to prevent overflow when compensating.
3393   __ cmpl(output_reg, Immediate(0x1));
3394   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
3395 
3396   __ Cvtlsi2sd(xmm_scratch, output_reg);
3397   __ Ucomisd(xmm_scratch, input_temp);
3398   __ j(equal, &done, dist);
3399   __ subl(output_reg, Immediate(1));
3400   // No overflow because we already ruled out minint.
3401   __ jmp(&done, dist);
3402 
3403   __ bind(&round_to_zero);
3404   // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
3405   // we can ignore the difference between a result of -0 and +0.
3406   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3407     __ Movq(output_reg, input_reg);
3408     __ testq(output_reg, output_reg);
3409     DeoptimizeIf(negative, instr, DeoptimizeReason::kMinusZero);
3410   }
3411   __ Set(output_reg, 0);
3412   __ bind(&done);
3413 }
3414 
3415 
DoMathFround(LMathFround * instr)3416 void LCodeGen::DoMathFround(LMathFround* instr) {
3417   XMMRegister input_reg = ToDoubleRegister(instr->value());
3418   XMMRegister output_reg = ToDoubleRegister(instr->result());
3419   __ Cvtsd2ss(output_reg, input_reg);
3420   __ Cvtss2sd(output_reg, output_reg);
3421 }
3422 
3423 
DoMathSqrt(LMathSqrt * instr)3424 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3425   XMMRegister output = ToDoubleRegister(instr->result());
3426   if (instr->value()->IsDoubleRegister()) {
3427     XMMRegister input = ToDoubleRegister(instr->value());
3428     __ Sqrtsd(output, input);
3429   } else {
3430     Operand input = ToOperand(instr->value());
3431     __ Sqrtsd(output, input);
3432   }
3433 }
3434 
3435 
DoMathPowHalf(LMathPowHalf * instr)3436 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3437   XMMRegister xmm_scratch = double_scratch0();
3438   XMMRegister input_reg = ToDoubleRegister(instr->value());
3439   DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
3440 
3441   // Note that according to ECMA-262 15.8.2.13:
3442   // Math.pow(-Infinity, 0.5) == Infinity
3443   // Math.sqrt(-Infinity) == NaN
3444   Label done, sqrt;
3445   // Check base for -Infinity.  According to IEEE-754, double-precision
3446   // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
3447   __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
3448   __ Movq(xmm_scratch, kScratchRegister);
3449   __ Ucomisd(xmm_scratch, input_reg);
3450   // Comparing -Infinity with NaN results in "unordered", which sets the
3451   // zero flag as if both were equal.  However, it also sets the carry flag.
3452   __ j(not_equal, &sqrt, Label::kNear);
3453   __ j(carry, &sqrt, Label::kNear);
3454   // If input is -Infinity, return Infinity.
3455   __ Xorpd(input_reg, input_reg);
3456   __ Subsd(input_reg, xmm_scratch);
3457   __ jmp(&done, Label::kNear);
3458 
3459   // Square root.
3460   __ bind(&sqrt);
3461   __ Xorpd(xmm_scratch, xmm_scratch);
3462   __ Addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
3463   __ Sqrtsd(input_reg, input_reg);
3464   __ bind(&done);
3465 }
3466 
3467 
DoPower(LPower * instr)3468 void LCodeGen::DoPower(LPower* instr) {
3469   Representation exponent_type = instr->hydrogen()->right()->representation();
3470   // Having marked this as a call, we can use any registers.
3471   // Just make sure that the input/output registers are the expected ones.
3472 
3473   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3474   DCHECK(!instr->right()->IsRegister() ||
3475          ToRegister(instr->right()).is(tagged_exponent));
3476   DCHECK(!instr->right()->IsDoubleRegister() ||
3477          ToDoubleRegister(instr->right()).is(xmm1));
3478   DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
3479   DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
3480 
3481   if (exponent_type.IsSmi()) {
3482     MathPowStub stub(isolate(), MathPowStub::TAGGED);
3483     __ CallStub(&stub);
3484   } else if (exponent_type.IsTagged()) {
3485     Label no_deopt;
3486     __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
3487     __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
3488     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
3489     __ bind(&no_deopt);
3490     MathPowStub stub(isolate(), MathPowStub::TAGGED);
3491     __ CallStub(&stub);
3492   } else if (exponent_type.IsInteger32()) {
3493     MathPowStub stub(isolate(), MathPowStub::INTEGER);
3494     __ CallStub(&stub);
3495   } else {
3496     DCHECK(exponent_type.IsDouble());
3497     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3498     __ CallStub(&stub);
3499   }
3500 }
3501 
DoMathCos(LMathCos * instr)3502 void LCodeGen::DoMathCos(LMathCos* instr) {
3503   DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
3504   DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
3505   __ PrepareCallCFunction(1);
3506   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 1);
3507 }
3508 
DoMathExp(LMathExp * instr)3509 void LCodeGen::DoMathExp(LMathExp* instr) {
3510   DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
3511   DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
3512   __ PrepareCallCFunction(1);
3513   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 1);
3514 }
3515 
DoMathSin(LMathSin * instr)3516 void LCodeGen::DoMathSin(LMathSin* instr) {
3517   DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
3518   DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
3519   __ PrepareCallCFunction(1);
3520   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 1);
3521 }
3522 
DoMathLog(LMathLog * instr)3523 void LCodeGen::DoMathLog(LMathLog* instr) {
3524   DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
3525   DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
3526   __ PrepareCallCFunction(1);
3527   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 1);
3528 }
3529 
3530 
DoMathClz32(LMathClz32 * instr)3531 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3532   Register input = ToRegister(instr->value());
3533   Register result = ToRegister(instr->result());
3534 
3535   __ Lzcntl(result, input);
3536 }
3537 
PrepareForTailCall(const ParameterCount & actual,Register scratch1,Register scratch2,Register scratch3)3538 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3539                                   Register scratch1, Register scratch2,
3540                                   Register scratch3) {
3541 #if DEBUG
3542   if (actual.is_reg()) {
3543     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3544   } else {
3545     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3546   }
3547 #endif
3548   if (FLAG_code_comments) {
3549     if (actual.is_reg()) {
3550       Comment(";;; PrepareForTailCall, actual: %s {",
3551               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3552                   actual.reg().code()));
3553     } else {
3554       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3555     }
3556   }
3557 
3558   // Check if next frame is an arguments adaptor frame.
3559   Register caller_args_count_reg = scratch1;
3560   Label no_arguments_adaptor, formal_parameter_count_loaded;
3561   __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
3562   __ cmpp(Operand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset),
3563           Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
3564   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
3565 
3566   // Drop current frame and load arguments count from arguments adaptor frame.
3567   __ movp(rbp, scratch2);
3568   __ SmiToInteger32(
3569       caller_args_count_reg,
3570       Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3571   __ jmp(&formal_parameter_count_loaded, Label::kNear);
3572 
3573   __ bind(&no_arguments_adaptor);
3574   // Load caller's formal parameter count.
3575   __ movp(caller_args_count_reg,
3576           Immediate(info()->literal()->parameter_count()));
3577 
3578   __ bind(&formal_parameter_count_loaded);
3579   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
3580                         ReturnAddressState::kNotOnStack);
3581   Comment(";;; }");
3582 }
3583 
DoInvokeFunction(LInvokeFunction * instr)3584 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3585   HInvokeFunction* hinstr = instr->hydrogen();
3586   DCHECK(ToRegister(instr->context()).is(rsi));
3587   DCHECK(ToRegister(instr->function()).is(rdi));
3588   DCHECK(instr->HasPointerMap());
3589 
3590   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3591 
3592   if (is_tail_call) {
3593     DCHECK(!info()->saves_caller_doubles());
3594     ParameterCount actual(instr->arity());
3595     // It is safe to use rbx, rcx and r8 as scratch registers here given that
3596     // 1) we are not going to return to caller function anyway,
3597     // 2) rbx (expected number of arguments) will be initialized below.
3598     PrepareForTailCall(actual, rbx, rcx, r8);
3599   }
3600 
3601   Handle<JSFunction> known_function = hinstr->known_function();
3602   if (known_function.is_null()) {
3603     LPointerMap* pointers = instr->pointer_map();
3604     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3605     ParameterCount actual(instr->arity());
3606     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3607     __ InvokeFunction(rdi, no_reg, actual, flag, generator);
3608   } else {
3609     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3610                       instr->arity(), is_tail_call, instr);
3611   }
3612 }
3613 
3614 
DoCallNewArray(LCallNewArray * instr)3615 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3616   DCHECK(ToRegister(instr->context()).is(rsi));
3617   DCHECK(ToRegister(instr->constructor()).is(rdi));
3618   DCHECK(ToRegister(instr->result()).is(rax));
3619 
3620   __ Set(rax, instr->arity());
3621   __ Move(rbx, instr->hydrogen()->site());
3622 
3623   ElementsKind kind = instr->hydrogen()->elements_kind();
3624   AllocationSiteOverrideMode override_mode =
3625       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3626           ? DISABLE_ALLOCATION_SITES
3627           : DONT_OVERRIDE;
3628 
3629   if (instr->arity() == 0) {
3630     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3631     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3632   } else if (instr->arity() == 1) {
3633     Label done;
3634     if (IsFastPackedElementsKind(kind)) {
3635       Label packed_case;
3636       // We might need a change here
3637       // look at the first argument
3638       __ movp(rcx, Operand(rsp, 0));
3639       __ testp(rcx, rcx);
3640       __ j(zero, &packed_case, Label::kNear);
3641 
3642       ElementsKind holey_kind = GetHoleyElementsKind(kind);
3643       ArraySingleArgumentConstructorStub stub(isolate(),
3644                                               holey_kind,
3645                                               override_mode);
3646       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3647       __ jmp(&done, Label::kNear);
3648       __ bind(&packed_case);
3649     }
3650 
3651     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3652     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3653     __ bind(&done);
3654   } else {
3655     ArrayNArgumentsConstructorStub stub(isolate());
3656     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3657   }
3658 }
3659 
3660 
DoCallRuntime(LCallRuntime * instr)3661 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3662   DCHECK(ToRegister(instr->context()).is(rsi));
3663   CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
3664 }
3665 
3666 
DoStoreCodeEntry(LStoreCodeEntry * instr)3667 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3668   Register function = ToRegister(instr->function());
3669   Register code_object = ToRegister(instr->code_object());
3670   __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
3671   __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
3672 }
3673 
3674 
DoInnerAllocatedObject(LInnerAllocatedObject * instr)3675 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3676   Register result = ToRegister(instr->result());
3677   Register base = ToRegister(instr->base_object());
3678   if (instr->offset()->IsConstantOperand()) {
3679     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3680     __ leap(result, Operand(base, ToInteger32(offset)));
3681   } else {
3682     Register offset = ToRegister(instr->offset());
3683     __ leap(result, Operand(base, offset, times_1, 0));
3684   }
3685 }
3686 
3687 
DoStoreNamedField(LStoreNamedField * instr)3688 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3689   HStoreNamedField* hinstr = instr->hydrogen();
3690   Representation representation = instr->representation();
3691 
3692   HObjectAccess access = hinstr->access();
3693   int offset = access.offset();
3694 
3695   if (access.IsExternalMemory()) {
3696     DCHECK(!hinstr->NeedsWriteBarrier());
3697     Register value = ToRegister(instr->value());
3698     if (instr->object()->IsConstantOperand()) {
3699       DCHECK(value.is(rax));
3700       LConstantOperand* object = LConstantOperand::cast(instr->object());
3701       __ store_rax(ToExternalReference(object));
3702     } else {
3703       Register object = ToRegister(instr->object());
3704       __ Store(MemOperand(object, offset), value, representation);
3705     }
3706     return;
3707   }
3708 
3709   Register object = ToRegister(instr->object());
3710   __ AssertNotSmi(object);
3711 
3712   DCHECK(!representation.IsSmi() ||
3713          !instr->value()->IsConstantOperand() ||
3714          IsInteger32Constant(LConstantOperand::cast(instr->value())));
3715   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3716     DCHECK(access.IsInobject());
3717     DCHECK(!hinstr->has_transition());
3718     DCHECK(!hinstr->NeedsWriteBarrier());
3719     XMMRegister value = ToDoubleRegister(instr->value());
3720     __ Movsd(FieldOperand(object, offset), value);
3721     return;
3722   }
3723 
3724   if (hinstr->has_transition()) {
3725     Handle<Map> transition = hinstr->transition_map();
3726     AddDeprecationDependency(transition);
3727     if (!hinstr->NeedsWriteBarrierForMap()) {
3728       __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
3729     } else {
3730       Register temp = ToRegister(instr->temp());
3731       __ Move(kScratchRegister, transition);
3732       __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
3733       // Update the write barrier for the map field.
3734       __ RecordWriteForMap(object,
3735                            kScratchRegister,
3736                            temp,
3737                            kSaveFPRegs);
3738     }
3739   }
3740 
3741   // Do the store.
3742   Register write_register = object;
3743   if (!access.IsInobject()) {
3744     write_register = ToRegister(instr->temp());
3745     __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
3746   }
3747 
3748   if (representation.IsSmi() && SmiValuesAre32Bits() &&
3749       hinstr->value()->representation().IsInteger32()) {
3750     DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3751     if (FLAG_debug_code) {
3752       Register scratch = kScratchRegister;
3753       __ Load(scratch, FieldOperand(write_register, offset), representation);
3754       __ AssertSmi(scratch);
3755     }
3756     // Store int value directly to upper half of the smi.
3757     STATIC_ASSERT(kSmiTag == 0);
3758     DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3759     offset += kPointerSize / 2;
3760     representation = Representation::Integer32();
3761   }
3762 
3763   Operand operand = FieldOperand(write_register, offset);
3764 
3765   if (FLAG_unbox_double_fields && representation.IsDouble()) {
3766     DCHECK(access.IsInobject());
3767     XMMRegister value = ToDoubleRegister(instr->value());
3768     __ Movsd(operand, value);
3769 
3770   } else if (instr->value()->IsRegister()) {
3771     Register value = ToRegister(instr->value());
3772     __ Store(operand, value, representation);
3773   } else {
3774     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
3775     if (IsInteger32Constant(operand_value)) {
3776       DCHECK(!hinstr->NeedsWriteBarrier());
3777       int32_t value = ToInteger32(operand_value);
3778       if (representation.IsSmi()) {
3779         __ Move(operand, Smi::FromInt(value));
3780 
3781       } else {
3782         __ movl(operand, Immediate(value));
3783       }
3784 
3785     } else if (IsExternalConstant(operand_value)) {
3786       DCHECK(!hinstr->NeedsWriteBarrier());
3787       ExternalReference ptr = ToExternalReference(operand_value);
3788       __ Move(kScratchRegister, ptr);
3789       __ movp(operand, kScratchRegister);
3790     } else {
3791       Handle<Object> handle_value = ToHandle(operand_value);
3792       DCHECK(!hinstr->NeedsWriteBarrier());
3793       __ Move(operand, handle_value);
3794     }
3795   }
3796 
3797   if (hinstr->NeedsWriteBarrier()) {
3798     Register value = ToRegister(instr->value());
3799     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
3800     // Update the write barrier for the object for in-object properties.
3801     __ RecordWriteField(write_register,
3802                         offset,
3803                         value,
3804                         temp,
3805                         kSaveFPRegs,
3806                         EMIT_REMEMBERED_SET,
3807                         hinstr->SmiCheckForWriteBarrier(),
3808                         hinstr->PointersToHereCheckForValue());
3809   }
3810 }
3811 
3812 
DoBoundsCheck(LBoundsCheck * instr)3813 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3814   Representation representation = instr->hydrogen()->length()->representation();
3815   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
3816   DCHECK(representation.IsSmiOrInteger32());
3817 
3818   Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
3819   if (instr->length()->IsConstantOperand()) {
3820     int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
3821     Register index = ToRegister(instr->index());
3822     if (representation.IsSmi()) {
3823       __ Cmp(index, Smi::FromInt(length));
3824     } else {
3825       __ cmpl(index, Immediate(length));
3826     }
3827     cc = CommuteCondition(cc);
3828   } else if (instr->index()->IsConstantOperand()) {
3829     int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
3830     if (instr->length()->IsRegister()) {
3831       Register length = ToRegister(instr->length());
3832       if (representation.IsSmi()) {
3833         __ Cmp(length, Smi::FromInt(index));
3834       } else {
3835         __ cmpl(length, Immediate(index));
3836       }
3837     } else {
3838       Operand length = ToOperand(instr->length());
3839       if (representation.IsSmi()) {
3840         __ Cmp(length, Smi::FromInt(index));
3841       } else {
3842         __ cmpl(length, Immediate(index));
3843       }
3844     }
3845   } else {
3846     Register index = ToRegister(instr->index());
3847     if (instr->length()->IsRegister()) {
3848       Register length = ToRegister(instr->length());
3849       if (representation.IsSmi()) {
3850         __ cmpp(length, index);
3851       } else {
3852         __ cmpl(length, index);
3853       }
3854     } else {
3855       Operand length = ToOperand(instr->length());
3856       if (representation.IsSmi()) {
3857         __ cmpp(length, index);
3858       } else {
3859         __ cmpl(length, index);
3860       }
3861     }
3862   }
3863   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
3864     Label done;
3865     __ j(NegateCondition(cc), &done, Label::kNear);
3866     __ int3();
3867     __ bind(&done);
3868   } else {
3869     DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
3870   }
3871 }
3872 
3873 
DoStoreKeyedExternalArray(LStoreKeyed * instr)3874 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3875   ElementsKind elements_kind = instr->elements_kind();
3876   LOperand* key = instr->key();
3877   if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
3878     Register key_reg = ToRegister(key);
3879     Representation key_representation =
3880         instr->hydrogen()->key()->representation();
3881     if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
3882       __ SmiToInteger64(key_reg, key_reg);
3883     } else if (instr->hydrogen()->IsDehoisted()) {
3884       // Sign extend key because it could be a 32 bit negative value
3885       // and the dehoisted address computation happens in 64 bits
3886       __ movsxlq(key_reg, key_reg);
3887     }
3888   }
3889   Operand operand(BuildFastArrayOperand(
3890       instr->elements(),
3891       key,
3892       instr->hydrogen()->key()->representation(),
3893       elements_kind,
3894       instr->base_offset()));
3895 
3896   if (elements_kind == FLOAT32_ELEMENTS) {
3897     XMMRegister value(ToDoubleRegister(instr->value()));
3898     __ Cvtsd2ss(value, value);
3899     __ Movss(operand, value);
3900   } else if (elements_kind == FLOAT64_ELEMENTS) {
3901     __ Movsd(operand, ToDoubleRegister(instr->value()));
3902   } else {
3903     Register value(ToRegister(instr->value()));
3904     switch (elements_kind) {
3905       case INT8_ELEMENTS:
3906       case UINT8_ELEMENTS:
3907       case UINT8_CLAMPED_ELEMENTS:
3908         __ movb(operand, value);
3909         break;
3910       case INT16_ELEMENTS:
3911       case UINT16_ELEMENTS:
3912         __ movw(operand, value);
3913         break;
3914       case INT32_ELEMENTS:
3915       case UINT32_ELEMENTS:
3916         __ movl(operand, value);
3917         break;
3918       case FLOAT32_ELEMENTS:
3919       case FLOAT64_ELEMENTS:
3920       case FAST_ELEMENTS:
3921       case FAST_SMI_ELEMENTS:
3922       case FAST_DOUBLE_ELEMENTS:
3923       case FAST_HOLEY_ELEMENTS:
3924       case FAST_HOLEY_SMI_ELEMENTS:
3925       case FAST_HOLEY_DOUBLE_ELEMENTS:
3926       case DICTIONARY_ELEMENTS:
3927       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3928       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3929       case FAST_STRING_WRAPPER_ELEMENTS:
3930       case SLOW_STRING_WRAPPER_ELEMENTS:
3931       case NO_ELEMENTS:
3932         UNREACHABLE();
3933         break;
3934     }
3935   }
3936 }
3937 
3938 
DoStoreKeyedFixedDoubleArray(LStoreKeyed * instr)3939 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
3940   XMMRegister value = ToDoubleRegister(instr->value());
3941   LOperand* key = instr->key();
3942   if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3943       instr->hydrogen()->IsDehoisted()) {
3944     // Sign extend key because it could be a 32 bit negative value
3945     // and the dehoisted address computation happens in 64 bits
3946     __ movsxlq(ToRegister(key), ToRegister(key));
3947   }
3948   if (instr->NeedsCanonicalization()) {
3949     XMMRegister xmm_scratch = double_scratch0();
3950     // Turn potential sNaN value into qNaN.
3951     __ Xorpd(xmm_scratch, xmm_scratch);
3952     __ Subsd(value, xmm_scratch);
3953   }
3954 
3955   Operand double_store_operand = BuildFastArrayOperand(
3956       instr->elements(),
3957       key,
3958       instr->hydrogen()->key()->representation(),
3959       FAST_DOUBLE_ELEMENTS,
3960       instr->base_offset());
3961 
3962   __ Movsd(double_store_operand, value);
3963 }
3964 
3965 
DoStoreKeyedFixedArray(LStoreKeyed * instr)3966 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
3967   HStoreKeyed* hinstr = instr->hydrogen();
3968   LOperand* key = instr->key();
3969   int offset = instr->base_offset();
3970   Representation representation = hinstr->value()->representation();
3971 
3972   if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
3973       instr->hydrogen()->IsDehoisted()) {
3974     // Sign extend key because it could be a 32 bit negative value
3975     // and the dehoisted address computation happens in 64 bits
3976     __ movsxlq(ToRegister(key), ToRegister(key));
3977   }
3978   if (representation.IsInteger32() && SmiValuesAre32Bits()) {
3979     DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3980     DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
3981     if (FLAG_debug_code) {
3982       Register scratch = kScratchRegister;
3983       __ Load(scratch,
3984               BuildFastArrayOperand(instr->elements(),
3985                                     key,
3986                                     instr->hydrogen()->key()->representation(),
3987                                     FAST_ELEMENTS,
3988                                     offset),
3989               Representation::Smi());
3990       __ AssertSmi(scratch);
3991     }
3992     // Store int value directly to upper half of the smi.
3993     STATIC_ASSERT(kSmiTag == 0);
3994     DCHECK(kSmiTagSize + kSmiShiftSize == 32);
3995     offset += kPointerSize / 2;
3996   }
3997 
3998   Operand operand =
3999       BuildFastArrayOperand(instr->elements(),
4000                             key,
4001                             instr->hydrogen()->key()->representation(),
4002                             FAST_ELEMENTS,
4003                             offset);
4004   if (instr->value()->IsRegister()) {
4005     __ Store(operand, ToRegister(instr->value()), representation);
4006   } else {
4007     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4008     if (IsInteger32Constant(operand_value)) {
4009       int32_t value = ToInteger32(operand_value);
4010       if (representation.IsSmi()) {
4011         __ Move(operand, Smi::FromInt(value));
4012 
4013       } else {
4014         __ movl(operand, Immediate(value));
4015       }
4016     } else {
4017       Handle<Object> handle_value = ToHandle(operand_value);
4018       __ Move(operand, handle_value);
4019     }
4020   }
4021 
4022   if (hinstr->NeedsWriteBarrier()) {
4023     Register elements = ToRegister(instr->elements());
4024     DCHECK(instr->value()->IsRegister());
4025     Register value = ToRegister(instr->value());
4026     DCHECK(!key->IsConstantOperand());
4027     SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4028             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4029     // Compute address of modified element and store it into key register.
4030     Register key_reg(ToRegister(key));
4031     __ leap(key_reg, operand);
4032     __ RecordWrite(elements,
4033                    key_reg,
4034                    value,
4035                    kSaveFPRegs,
4036                    EMIT_REMEMBERED_SET,
4037                    check_needed,
4038                    hinstr->PointersToHereCheckForValue());
4039   }
4040 }
4041 
4042 
DoStoreKeyed(LStoreKeyed * instr)4043 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4044   if (instr->is_fixed_typed_array()) {
4045     DoStoreKeyedExternalArray(instr);
4046   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4047     DoStoreKeyedFixedDoubleArray(instr);
4048   } else {
4049     DoStoreKeyedFixedArray(instr);
4050   }
4051 }
4052 
4053 
DoMaybeGrowElements(LMaybeGrowElements * instr)4054 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4055   class DeferredMaybeGrowElements final : public LDeferredCode {
4056    public:
4057     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4058         : LDeferredCode(codegen), instr_(instr) {}
4059     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4060     LInstruction* instr() override { return instr_; }
4061 
4062    private:
4063     LMaybeGrowElements* instr_;
4064   };
4065 
4066   Register result = rax;
4067   DeferredMaybeGrowElements* deferred =
4068       new (zone()) DeferredMaybeGrowElements(this, instr);
4069   LOperand* key = instr->key();
4070   LOperand* current_capacity = instr->current_capacity();
4071 
4072   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4073   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4074   DCHECK(key->IsConstantOperand() || key->IsRegister());
4075   DCHECK(current_capacity->IsConstantOperand() ||
4076          current_capacity->IsRegister());
4077 
4078   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4079     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4080     int32_t constant_capacity =
4081         ToInteger32(LConstantOperand::cast(current_capacity));
4082     if (constant_key >= constant_capacity) {
4083       // Deferred case.
4084       __ jmp(deferred->entry());
4085     }
4086   } else if (key->IsConstantOperand()) {
4087     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4088     __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
4089     __ j(less_equal, deferred->entry());
4090   } else if (current_capacity->IsConstantOperand()) {
4091     int32_t constant_capacity =
4092         ToInteger32(LConstantOperand::cast(current_capacity));
4093     __ cmpl(ToRegister(key), Immediate(constant_capacity));
4094     __ j(greater_equal, deferred->entry());
4095   } else {
4096     __ cmpl(ToRegister(key), ToRegister(current_capacity));
4097     __ j(greater_equal, deferred->entry());
4098   }
4099 
4100   if (instr->elements()->IsRegister()) {
4101     __ movp(result, ToRegister(instr->elements()));
4102   } else {
4103     __ movp(result, ToOperand(instr->elements()));
4104   }
4105 
4106   __ bind(deferred->exit());
4107 }
4108 
4109 
DoDeferredMaybeGrowElements(LMaybeGrowElements * instr)4110 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4111   // TODO(3095996): Get rid of this. For now, we need to make the
4112   // result register contain a valid pointer because it is already
4113   // contained in the register pointer map.
4114   Register result = rax;
4115   __ Move(result, Smi::kZero);
4116 
4117   // We have to call a stub.
4118   {
4119     PushSafepointRegistersScope scope(this);
4120     if (instr->object()->IsConstantOperand()) {
4121       LConstantOperand* constant_object =
4122           LConstantOperand::cast(instr->object());
4123       if (IsSmiConstant(constant_object)) {
4124         Smi* immediate = ToSmi(constant_object);
4125         __ Move(result, immediate);
4126       } else {
4127         Handle<Object> handle_value = ToHandle(constant_object);
4128         __ Move(result, handle_value);
4129       }
4130     } else if (instr->object()->IsRegister()) {
4131       __ Move(result, ToRegister(instr->object()));
4132     } else {
4133       __ movp(result, ToOperand(instr->object()));
4134     }
4135 
4136     LOperand* key = instr->key();
4137     if (key->IsConstantOperand()) {
4138       __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
4139     } else {
4140       __ Move(rbx, ToRegister(key));
4141       __ Integer32ToSmi(rbx, rbx);
4142     }
4143 
4144     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
4145     __ CallStub(&stub);
4146     RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4147     __ StoreToSafepointRegisterSlot(result, result);
4148   }
4149 
4150   // Deopt on smi, which means the elements array changed to dictionary mode.
4151   Condition is_smi = __ CheckSmi(result);
4152   DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
4153 }
4154 
4155 
DoTransitionElementsKind(LTransitionElementsKind * instr)4156 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4157   Register object_reg = ToRegister(instr->object());
4158 
4159   Handle<Map> from_map = instr->original_map();
4160   Handle<Map> to_map = instr->transitioned_map();
4161   ElementsKind from_kind = instr->from_kind();
4162   ElementsKind to_kind = instr->to_kind();
4163 
4164   Label not_applicable;
4165   __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4166   __ j(not_equal, &not_applicable);
4167   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4168     Register new_map_reg = ToRegister(instr->new_map_temp());
4169     __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
4170     __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
4171     // Write barrier.
4172     __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
4173                          kDontSaveFPRegs);
4174   } else {
4175     DCHECK(object_reg.is(rax));
4176     DCHECK(ToRegister(instr->context()).is(rsi));
4177     PushSafepointRegistersScope scope(this);
4178     __ Move(rbx, to_map);
4179     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
4180     __ CallStub(&stub);
4181     RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
4182   }
4183   __ bind(&not_applicable);
4184 }
4185 
4186 
DoTrapAllocationMemento(LTrapAllocationMemento * instr)4187 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4188   Register object = ToRegister(instr->object());
4189   Register temp = ToRegister(instr->temp());
4190   Label no_memento_found;
4191   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4192   DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
4193   __ bind(&no_memento_found);
4194 }
4195 
4196 
DoStringAdd(LStringAdd * instr)4197 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4198   DCHECK(ToRegister(instr->context()).is(rsi));
4199   DCHECK(ToRegister(instr->left()).is(rdx));
4200   DCHECK(ToRegister(instr->right()).is(rax));
4201   StringAddStub stub(isolate(),
4202                      instr->hydrogen()->flags(),
4203                      instr->hydrogen()->pretenure_flag());
4204   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4205 }
4206 
4207 
DoStringCharCodeAt(LStringCharCodeAt * instr)4208 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4209   class DeferredStringCharCodeAt final : public LDeferredCode {
4210    public:
4211     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4212         : LDeferredCode(codegen), instr_(instr) { }
4213     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4214     LInstruction* instr() override { return instr_; }
4215 
4216    private:
4217     LStringCharCodeAt* instr_;
4218   };
4219 
4220   DeferredStringCharCodeAt* deferred =
4221       new(zone()) DeferredStringCharCodeAt(this, instr);
4222 
4223   StringCharLoadGenerator::Generate(masm(),
4224                                     ToRegister(instr->string()),
4225                                     ToRegister(instr->index()),
4226                                     ToRegister(instr->result()),
4227                                     deferred->entry());
4228   __ bind(deferred->exit());
4229 }
4230 
4231 
DoDeferredStringCharCodeAt(LStringCharCodeAt * instr)4232 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4233   Register string = ToRegister(instr->string());
4234   Register result = ToRegister(instr->result());
4235 
4236   // TODO(3095996): Get rid of this. For now, we need to make the
4237   // result register contain a valid pointer because it is already
4238   // contained in the register pointer map.
4239   __ Set(result, 0);
4240 
4241   PushSafepointRegistersScope scope(this);
4242   __ Push(string);
4243   // Push the index as a smi. This is safe because of the checks in
4244   // DoStringCharCodeAt above.
4245   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4246   if (instr->index()->IsConstantOperand()) {
4247     int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4248     __ Push(Smi::FromInt(const_index));
4249   } else {
4250     Register index = ToRegister(instr->index());
4251     __ Integer32ToSmi(index, index);
4252     __ Push(index);
4253   }
4254   CallRuntimeFromDeferred(
4255       Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
4256   __ AssertSmi(rax);
4257   __ SmiToInteger32(rax, rax);
4258   __ StoreToSafepointRegisterSlot(result, rax);
4259 }
4260 
4261 
DoStringCharFromCode(LStringCharFromCode * instr)4262 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4263   class DeferredStringCharFromCode final : public LDeferredCode {
4264    public:
4265     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4266         : LDeferredCode(codegen), instr_(instr) { }
4267     void Generate() override {
4268       codegen()->DoDeferredStringCharFromCode(instr_);
4269     }
4270     LInstruction* instr() override { return instr_; }
4271 
4272    private:
4273     LStringCharFromCode* instr_;
4274   };
4275 
4276   DeferredStringCharFromCode* deferred =
4277       new(zone()) DeferredStringCharFromCode(this, instr);
4278 
4279   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4280   Register char_code = ToRegister(instr->char_code());
4281   Register result = ToRegister(instr->result());
4282   DCHECK(!char_code.is(result));
4283 
4284   __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
4285   __ j(above, deferred->entry());
4286   __ movsxlq(char_code, char_code);
4287   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4288   __ movp(result, FieldOperand(result,
4289                                char_code, times_pointer_size,
4290                                FixedArray::kHeaderSize));
4291   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4292   __ j(equal, deferred->entry());
4293   __ bind(deferred->exit());
4294 }
4295 
4296 
DoDeferredStringCharFromCode(LStringCharFromCode * instr)4297 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4298   Register char_code = ToRegister(instr->char_code());
4299   Register result = ToRegister(instr->result());
4300 
4301   // TODO(3095996): Get rid of this. For now, we need to make the
4302   // result register contain a valid pointer because it is already
4303   // contained in the register pointer map.
4304   __ Set(result, 0);
4305 
4306   PushSafepointRegistersScope scope(this);
4307   __ Integer32ToSmi(char_code, char_code);
4308   __ Push(char_code);
4309   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4310                           instr->context());
4311   __ StoreToSafepointRegisterSlot(result, rax);
4312 }
4313 
4314 
DoInteger32ToDouble(LInteger32ToDouble * instr)4315 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4316   LOperand* input = instr->value();
4317   DCHECK(input->IsRegister() || input->IsStackSlot());
4318   LOperand* output = instr->result();
4319   DCHECK(output->IsDoubleRegister());
4320   if (input->IsRegister()) {
4321     __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
4322   } else {
4323     __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
4324   }
4325 }
4326 
4327 
DoUint32ToDouble(LUint32ToDouble * instr)4328 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4329   LOperand* input = instr->value();
4330   LOperand* output = instr->result();
4331 
4332   __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
4333 }
4334 
4335 
DoNumberTagI(LNumberTagI * instr)4336 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4337   class DeferredNumberTagI final : public LDeferredCode {
4338    public:
4339     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4340         : LDeferredCode(codegen), instr_(instr) { }
4341     void Generate() override {
4342       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4343                                        instr_->temp2(), SIGNED_INT32);
4344     }
4345     LInstruction* instr() override { return instr_; }
4346 
4347    private:
4348     LNumberTagI* instr_;
4349   };
4350 
4351   LOperand* input = instr->value();
4352   DCHECK(input->IsRegister() && input->Equals(instr->result()));
4353   Register reg = ToRegister(input);
4354 
4355   if (SmiValuesAre32Bits()) {
4356     __ Integer32ToSmi(reg, reg);
4357   } else {
4358     DCHECK(SmiValuesAre31Bits());
4359     DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4360     __ Integer32ToSmi(reg, reg);
4361     __ j(overflow, deferred->entry());
4362     __ bind(deferred->exit());
4363   }
4364 }
4365 
4366 
DoNumberTagU(LNumberTagU * instr)4367 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4368   class DeferredNumberTagU final : public LDeferredCode {
4369    public:
4370     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4371         : LDeferredCode(codegen), instr_(instr) { }
4372     void Generate() override {
4373       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4374                                        instr_->temp2(), UNSIGNED_INT32);
4375     }
4376     LInstruction* instr() override { return instr_; }
4377 
4378    private:
4379     LNumberTagU* instr_;
4380   };
4381 
4382   LOperand* input = instr->value();
4383   DCHECK(input->IsRegister() && input->Equals(instr->result()));
4384   Register reg = ToRegister(input);
4385 
4386   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4387   __ cmpl(reg, Immediate(Smi::kMaxValue));
4388   __ j(above, deferred->entry());
4389   __ Integer32ToSmi(reg, reg);
4390   __ bind(deferred->exit());
4391 }
4392 
4393 
DoDeferredNumberTagIU(LInstruction * instr,LOperand * value,LOperand * temp1,LOperand * temp2,IntegerSignedness signedness)4394 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4395                                      LOperand* value,
4396                                      LOperand* temp1,
4397                                      LOperand* temp2,
4398                                      IntegerSignedness signedness) {
4399   Label done, slow;
4400   Register reg = ToRegister(value);
4401   Register tmp = ToRegister(temp1);
4402   XMMRegister temp_xmm = ToDoubleRegister(temp2);
4403 
4404   // Load value into temp_xmm which will be preserved across potential call to
4405   // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
4406   // XMM registers on x64).
4407   if (signedness == SIGNED_INT32) {
4408     DCHECK(SmiValuesAre31Bits());
4409     // There was overflow, so bits 30 and 31 of the original integer
4410     // disagree. Try to allocate a heap number in new space and store
4411     // the value in there. If that fails, call the runtime system.
4412     __ SmiToInteger32(reg, reg);
4413     __ xorl(reg, Immediate(0x80000000));
4414     __ Cvtlsi2sd(temp_xmm, reg);
4415   } else {
4416     DCHECK(signedness == UNSIGNED_INT32);
4417     __ LoadUint32(temp_xmm, reg);
4418   }
4419 
4420   if (FLAG_inline_new) {
4421     __ AllocateHeapNumber(reg, tmp, &slow);
4422     __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
4423   }
4424 
4425   // Slow case: Call the runtime system to do the number allocation.
4426   __ bind(&slow);
4427   {
4428     // Put a valid pointer value in the stack slot where the result
4429     // register is stored, as this register is in the pointer map, but contains
4430     // an integer value.
4431     __ Set(reg, 0);
4432 
4433     // Preserve the value of all registers.
4434     PushSafepointRegistersScope scope(this);
4435     // Reset the context register.
4436     if (!reg.is(rsi)) {
4437       __ Set(rsi, 0);
4438     }
4439     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4440     RecordSafepointWithRegisters(
4441         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4442     __ StoreToSafepointRegisterSlot(reg, rax);
4443   }
4444 
4445   // Done. Put the value in temp_xmm into the value of the allocated heap
4446   // number.
4447   __ bind(&done);
4448   __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
4449 }
4450 
4451 
DoNumberTagD(LNumberTagD * instr)4452 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4453   class DeferredNumberTagD final : public LDeferredCode {
4454    public:
4455     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4456         : LDeferredCode(codegen), instr_(instr) { }
4457     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4458     LInstruction* instr() override { return instr_; }
4459 
4460    private:
4461     LNumberTagD* instr_;
4462   };
4463 
4464   XMMRegister input_reg = ToDoubleRegister(instr->value());
4465   Register reg = ToRegister(instr->result());
4466   Register tmp = ToRegister(instr->temp());
4467 
4468   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4469   if (FLAG_inline_new) {
4470     __ AllocateHeapNumber(reg, tmp, deferred->entry());
4471   } else {
4472     __ jmp(deferred->entry());
4473   }
4474   __ bind(deferred->exit());
4475   __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4476 }
4477 
4478 
DoDeferredNumberTagD(LNumberTagD * instr)4479 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4480   // TODO(3095996): Get rid of this. For now, we need to make the
4481   // result register contain a valid pointer because it is already
4482   // contained in the register pointer map.
4483   Register reg = ToRegister(instr->result());
4484   __ Move(reg, Smi::kZero);
4485 
4486   {
4487     PushSafepointRegistersScope scope(this);
4488     // Reset the context register.
4489     if (!reg.is(rsi)) {
4490       __ Move(rsi, 0);
4491     }
4492     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4493     RecordSafepointWithRegisters(
4494         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4495     __ movp(kScratchRegister, rax);
4496   }
4497   __ movp(reg, kScratchRegister);
4498 }
4499 
4500 
DoSmiTag(LSmiTag * instr)4501 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4502   HChange* hchange = instr->hydrogen();
4503   Register input = ToRegister(instr->value());
4504   Register output = ToRegister(instr->result());
4505   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4506       hchange->value()->CheckFlag(HValue::kUint32)) {
4507     Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
4508     DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kOverflow);
4509   }
4510   __ Integer32ToSmi(output, input);
4511   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4512       !hchange->value()->CheckFlag(HValue::kUint32)) {
4513     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
4514   }
4515 }
4516 
4517 
DoSmiUntag(LSmiUntag * instr)4518 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4519   DCHECK(instr->value()->Equals(instr->result()));
4520   Register input = ToRegister(instr->value());
4521   if (instr->needs_check()) {
4522     Condition is_smi = __ CheckSmi(input);
4523     DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kNotASmi);
4524   } else {
4525     __ AssertSmi(input);
4526   }
4527   __ SmiToInteger32(input, input);
4528 }
4529 
4530 
EmitNumberUntagD(LNumberUntagD * instr,Register input_reg,XMMRegister result_reg,NumberUntagDMode mode)4531 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4532                                 XMMRegister result_reg, NumberUntagDMode mode) {
4533   bool can_convert_undefined_to_nan = instr->truncating();
4534   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4535 
4536   Label convert, load_smi, done;
4537 
4538   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4539     // Smi check.
4540     __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
4541 
4542     // Heap number map check.
4543     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4544                    Heap::kHeapNumberMapRootIndex);
4545 
4546     // On x64 it is safe to load at heap number offset before evaluating the map
4547     // check, since all heap objects are at least two words long.
4548     __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
4549 
4550     if (can_convert_undefined_to_nan) {
4551       __ j(not_equal, &convert, Label::kNear);
4552     } else {
4553       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
4554     }
4555 
4556     if (deoptimize_on_minus_zero) {
4557       XMMRegister xmm_scratch = double_scratch0();
4558       __ Xorpd(xmm_scratch, xmm_scratch);
4559       __ Ucomisd(xmm_scratch, result_reg);
4560       __ j(not_equal, &done, Label::kNear);
4561       __ Movmskpd(kScratchRegister, result_reg);
4562       __ testl(kScratchRegister, Immediate(1));
4563       DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
4564     }
4565     __ jmp(&done, Label::kNear);
4566 
4567     if (can_convert_undefined_to_nan) {
4568       __ bind(&convert);
4569 
4570       // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
4571       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4572       DeoptimizeIf(not_equal, instr,
4573                    DeoptimizeReason::kNotAHeapNumberUndefined);
4574 
4575       __ Xorpd(result_reg, result_reg);
4576       __ Divsd(result_reg, result_reg);
4577       __ jmp(&done, Label::kNear);
4578     }
4579   } else {
4580     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4581   }
4582 
4583   // Smi to XMM conversion
4584   __ bind(&load_smi);
4585   __ SmiToInteger32(kScratchRegister, input_reg);
4586   __ Cvtlsi2sd(result_reg, kScratchRegister);
4587   __ bind(&done);
4588 }
4589 
4590 
DoDeferredTaggedToI(LTaggedToI * instr,Label * done)4591 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
4592   Register input_reg = ToRegister(instr->value());
4593 
4594   if (instr->truncating()) {
4595     Register input_map_reg = kScratchRegister;
4596     Label truncate;
4597     Label::Distance truncate_distance =
4598         DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4599     __ movp(input_map_reg, FieldOperand(input_reg, HeapObject::kMapOffset));
4600     __ JumpIfRoot(input_map_reg, Heap::kHeapNumberMapRootIndex, &truncate,
4601                   truncate_distance);
4602     __ CmpInstanceType(input_map_reg, ODDBALL_TYPE);
4603     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
4604     __ bind(&truncate);
4605     __ TruncateHeapNumberToI(input_reg, input_reg);
4606   } else {
4607     XMMRegister scratch = ToDoubleRegister(instr->temp());
4608     DCHECK(!scratch.is(double_scratch0()));
4609     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
4610                    Heap::kHeapNumberMapRootIndex);
4611     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
4612     __ Movsd(double_scratch0(),
4613              FieldOperand(input_reg, HeapNumber::kValueOffset));
4614     __ Cvttsd2si(input_reg, double_scratch0());
4615     __ Cvtlsi2sd(scratch, input_reg);
4616     __ Ucomisd(double_scratch0(), scratch);
4617     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
4618     DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
4619     if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
4620       __ testl(input_reg, input_reg);
4621       __ j(not_zero, done);
4622       __ Movmskpd(input_reg, double_scratch0());
4623       __ andl(input_reg, Immediate(1));
4624       DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
4625     }
4626   }
4627 }
4628 
4629 
DoTaggedToI(LTaggedToI * instr)4630 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4631   class DeferredTaggedToI final : public LDeferredCode {
4632    public:
4633     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4634         : LDeferredCode(codegen), instr_(instr) { }
4635     void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
4636     LInstruction* instr() override { return instr_; }
4637 
4638    private:
4639     LTaggedToI* instr_;
4640   };
4641 
4642   LOperand* input = instr->value();
4643   DCHECK(input->IsRegister());
4644   DCHECK(input->Equals(instr->result()));
4645   Register input_reg = ToRegister(input);
4646 
4647   if (instr->hydrogen()->value()->representation().IsSmi()) {
4648     __ SmiToInteger32(input_reg, input_reg);
4649   } else {
4650     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4651     __ JumpIfNotSmi(input_reg, deferred->entry());
4652     __ SmiToInteger32(input_reg, input_reg);
4653     __ bind(deferred->exit());
4654   }
4655 }
4656 
4657 
DoNumberUntagD(LNumberUntagD * instr)4658 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4659   LOperand* input = instr->value();
4660   DCHECK(input->IsRegister());
4661   LOperand* result = instr->result();
4662   DCHECK(result->IsDoubleRegister());
4663 
4664   Register input_reg = ToRegister(input);
4665   XMMRegister result_reg = ToDoubleRegister(result);
4666 
4667   HValue* value = instr->hydrogen()->value();
4668   NumberUntagDMode mode = value->representation().IsSmi()
4669       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4670 
4671   EmitNumberUntagD(instr, input_reg, result_reg, mode);
4672 }
4673 
4674 
DoDoubleToI(LDoubleToI * instr)4675 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4676   LOperand* input = instr->value();
4677   DCHECK(input->IsDoubleRegister());
4678   LOperand* result = instr->result();
4679   DCHECK(result->IsRegister());
4680 
4681   XMMRegister input_reg = ToDoubleRegister(input);
4682   Register result_reg = ToRegister(result);
4683 
4684   if (instr->truncating()) {
4685     __ TruncateDoubleToI(result_reg, input_reg);
4686   } else {
4687     Label lost_precision, is_nan, minus_zero, done;
4688     XMMRegister xmm_scratch = double_scratch0();
4689     Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4690     __ DoubleToI(result_reg, input_reg, xmm_scratch,
4691                  instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
4692                  &is_nan, &minus_zero, dist);
4693     __ jmp(&done, dist);
4694     __ bind(&lost_precision);
4695     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
4696     __ bind(&is_nan);
4697     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
4698     __ bind(&minus_zero);
4699     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
4700     __ bind(&done);
4701   }
4702 }
4703 
4704 
DoDoubleToSmi(LDoubleToSmi * instr)4705 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4706   LOperand* input = instr->value();
4707   DCHECK(input->IsDoubleRegister());
4708   LOperand* result = instr->result();
4709   DCHECK(result->IsRegister());
4710 
4711   XMMRegister input_reg = ToDoubleRegister(input);
4712   Register result_reg = ToRegister(result);
4713 
4714   Label lost_precision, is_nan, minus_zero, done;
4715   XMMRegister xmm_scratch = double_scratch0();
4716   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4717   __ DoubleToI(result_reg, input_reg, xmm_scratch,
4718                instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
4719                &minus_zero, dist);
4720   __ jmp(&done, dist);
4721   __ bind(&lost_precision);
4722   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
4723   __ bind(&is_nan);
4724   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
4725   __ bind(&minus_zero);
4726   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
4727   __ bind(&done);
4728   __ Integer32ToSmi(result_reg, result_reg);
4729   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
4730 }
4731 
4732 
DoCheckSmi(LCheckSmi * instr)4733 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4734   LOperand* input = instr->value();
4735   Condition cc = masm()->CheckSmi(ToRegister(input));
4736   DeoptimizeIf(NegateCondition(cc), instr, DeoptimizeReason::kNotASmi);
4737 }
4738 
4739 
DoCheckNonSmi(LCheckNonSmi * instr)4740 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4741   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4742     LOperand* input = instr->value();
4743     Condition cc = masm()->CheckSmi(ToRegister(input));
4744     DeoptimizeIf(cc, instr, DeoptimizeReason::kSmi);
4745   }
4746 }
4747 
4748 
DoCheckArrayBufferNotNeutered(LCheckArrayBufferNotNeutered * instr)4749 void LCodeGen::DoCheckArrayBufferNotNeutered(
4750     LCheckArrayBufferNotNeutered* instr) {
4751   Register view = ToRegister(instr->view());
4752 
4753   __ movp(kScratchRegister,
4754           FieldOperand(view, JSArrayBufferView::kBufferOffset));
4755   __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
4756            Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
4757   DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
4758 }
4759 
4760 
DoCheckInstanceType(LCheckInstanceType * instr)4761 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4762   Register input = ToRegister(instr->value());
4763 
4764   __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
4765 
4766   if (instr->hydrogen()->is_interval_check()) {
4767     InstanceType first;
4768     InstanceType last;
4769     instr->hydrogen()->GetCheckInterval(&first, &last);
4770 
4771     __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
4772             Immediate(static_cast<int8_t>(first)));
4773 
4774     // If there is only one type in the interval check for equality.
4775     if (first == last) {
4776       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
4777     } else {
4778       DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
4779       // Omit check for the last type.
4780       if (last != LAST_TYPE) {
4781         __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
4782                 Immediate(static_cast<int8_t>(last)));
4783         DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
4784       }
4785     }
4786   } else {
4787     uint8_t mask;
4788     uint8_t tag;
4789     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4790 
4791     if (base::bits::IsPowerOfTwo32(mask)) {
4792       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
4793       __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
4794                Immediate(mask));
4795       DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
4796                    DeoptimizeReason::kWrongInstanceType);
4797     } else {
4798       __ movzxbl(kScratchRegister,
4799                  FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
4800       __ andb(kScratchRegister, Immediate(mask));
4801       __ cmpb(kScratchRegister, Immediate(tag));
4802       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
4803     }
4804   }
4805 }
4806 
4807 
DoCheckValue(LCheckValue * instr)4808 void LCodeGen::DoCheckValue(LCheckValue* instr) {
4809   Register reg = ToRegister(instr->value());
4810   __ Cmp(reg, instr->hydrogen()->object().handle());
4811   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
4812 }
4813 
4814 
DoDeferredInstanceMigration(LCheckMaps * instr,Register object)4815 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
4816   Label deopt, done;
4817   // If the map is not deprecated the migration attempt does not make sense.
4818   __ Push(object);
4819   __ movp(object, FieldOperand(object, HeapObject::kMapOffset));
4820   __ testl(FieldOperand(object, Map::kBitField3Offset),
4821            Immediate(Map::Deprecated::kMask));
4822   __ Pop(object);
4823   __ j(zero, &deopt);
4824 
4825   {
4826     PushSafepointRegistersScope scope(this);
4827     __ Push(object);
4828 
4829     __ Set(rsi, 0);
4830     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
4831     RecordSafepointWithRegisters(
4832         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
4833 
4834     __ testp(rax, Immediate(kSmiTagMask));
4835   }
4836   __ j(not_zero, &done);
4837 
4838   __ bind(&deopt);
4839   DeoptimizeIf(always, instr, DeoptimizeReason::kInstanceMigrationFailed);
4840 
4841   __ bind(&done);
4842 }
4843 
4844 
DoCheckMaps(LCheckMaps * instr)4845 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4846   class DeferredCheckMaps final : public LDeferredCode {
4847    public:
4848     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
4849         : LDeferredCode(codegen), instr_(instr), object_(object) {
4850       SetExit(check_maps());
4851     }
4852     void Generate() override {
4853       codegen()->DoDeferredInstanceMigration(instr_, object_);
4854     }
4855     Label* check_maps() { return &check_maps_; }
4856     LInstruction* instr() override { return instr_; }
4857 
4858    private:
4859     LCheckMaps* instr_;
4860     Label check_maps_;
4861     Register object_;
4862   };
4863 
4864   if (instr->hydrogen()->IsStabilityCheck()) {
4865     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4866     for (int i = 0; i < maps->size(); ++i) {
4867       AddStabilityDependency(maps->at(i).handle());
4868     }
4869     return;
4870   }
4871 
4872   LOperand* input = instr->value();
4873   DCHECK(input->IsRegister());
4874   Register reg = ToRegister(input);
4875 
4876   DeferredCheckMaps* deferred = NULL;
4877   if (instr->hydrogen()->HasMigrationTarget()) {
4878     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
4879     __ bind(deferred->check_maps());
4880   }
4881 
4882   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
4883   Label success;
4884   for (int i = 0; i < maps->size() - 1; i++) {
4885     Handle<Map> map = maps->at(i).handle();
4886     __ CompareMap(reg, map);
4887     __ j(equal, &success, Label::kNear);
4888   }
4889 
4890   Handle<Map> map = maps->at(maps->size() - 1).handle();
4891   __ CompareMap(reg, map);
4892   if (instr->hydrogen()->HasMigrationTarget()) {
4893     __ j(not_equal, deferred->entry());
4894   } else {
4895     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
4896   }
4897 
4898   __ bind(&success);
4899 }
4900 
4901 
DoClampDToUint8(LClampDToUint8 * instr)4902 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4903   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4904   XMMRegister xmm_scratch = double_scratch0();
4905   Register result_reg = ToRegister(instr->result());
4906   __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
4907 }
4908 
4909 
DoClampIToUint8(LClampIToUint8 * instr)4910 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4911   DCHECK(instr->unclamped()->Equals(instr->result()));
4912   Register value_reg = ToRegister(instr->result());
4913   __ ClampUint8(value_reg);
4914 }
4915 
4916 
DoClampTToUint8(LClampTToUint8 * instr)4917 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4918   DCHECK(instr->unclamped()->Equals(instr->result()));
4919   Register input_reg = ToRegister(instr->unclamped());
4920   XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
4921   XMMRegister xmm_scratch = double_scratch0();
4922   Label is_smi, done, heap_number;
4923   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
4924   __ JumpIfSmi(input_reg, &is_smi, dist);
4925 
4926   // Check for heap number
4927   __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4928          factory()->heap_number_map());
4929   __ j(equal, &heap_number, Label::kNear);
4930 
4931   // Check for undefined. Undefined is converted to zero for clamping
4932   // conversions.
4933   __ Cmp(input_reg, factory()->undefined_value());
4934   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
4935   __ xorl(input_reg, input_reg);
4936   __ jmp(&done, Label::kNear);
4937 
4938   // Heap number
4939   __ bind(&heap_number);
4940   __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
4941   __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
4942   __ jmp(&done, Label::kNear);
4943 
4944   // smi
4945   __ bind(&is_smi);
4946   __ SmiToInteger32(input_reg, input_reg);
4947   __ ClampUint8(input_reg);
4948 
4949   __ bind(&done);
4950 }
4951 
4952 
DoAllocate(LAllocate * instr)4953 void LCodeGen::DoAllocate(LAllocate* instr) {
4954   class DeferredAllocate final : public LDeferredCode {
4955    public:
4956     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
4957         : LDeferredCode(codegen), instr_(instr) { }
4958     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
4959     LInstruction* instr() override { return instr_; }
4960 
4961    private:
4962     LAllocate* instr_;
4963   };
4964 
4965   DeferredAllocate* deferred =
4966       new(zone()) DeferredAllocate(this, instr);
4967 
4968   Register result = ToRegister(instr->result());
4969   Register temp = ToRegister(instr->temp());
4970 
4971   // Allocate memory for the object.
4972   AllocationFlags flags = NO_ALLOCATION_FLAGS;
4973   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
4974     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
4975   }
4976   if (instr->hydrogen()->IsOldSpaceAllocation()) {
4977     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
4978     flags = static_cast<AllocationFlags>(flags | PRETENURE);
4979   }
4980 
4981   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
4982     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
4983   }
4984   DCHECK(!instr->hydrogen()->IsAllocationFolded());
4985 
4986   if (instr->size()->IsConstantOperand()) {
4987     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
4988     CHECK(size <= kMaxRegularHeapObjectSize);
4989     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
4990   } else {
4991     Register size = ToRegister(instr->size());
4992     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
4993   }
4994 
4995   __ bind(deferred->exit());
4996 
4997   if (instr->hydrogen()->MustPrefillWithFiller()) {
4998     if (instr->size()->IsConstantOperand()) {
4999       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5000       __ movl(temp, Immediate((size / kPointerSize) - 1));
5001     } else {
5002       temp = ToRegister(instr->size());
5003       __ sarp(temp, Immediate(kPointerSizeLog2));
5004       __ decl(temp);
5005     }
5006     Label loop;
5007     __ bind(&loop);
5008     __ Move(FieldOperand(result, temp, times_pointer_size, 0),
5009         isolate()->factory()->one_pointer_filler_map());
5010     __ decl(temp);
5011     __ j(not_zero, &loop);
5012   }
5013 }
5014 
DoFastAllocate(LFastAllocate * instr)5015 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5016   DCHECK(instr->hydrogen()->IsAllocationFolded());
5017   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5018   Register result = ToRegister(instr->result());
5019   Register temp = ToRegister(instr->temp());
5020 
5021   AllocationFlags flags = ALLOCATION_FOLDED;
5022   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5023     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5024   }
5025   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5026     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5027     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5028   }
5029   if (instr->size()->IsConstantOperand()) {
5030     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5031     CHECK(size <= kMaxRegularHeapObjectSize);
5032     __ FastAllocate(size, result, temp, flags);
5033   } else {
5034     Register size = ToRegister(instr->size());
5035     __ FastAllocate(size, result, temp, flags);
5036   }
5037 }
5038 
DoDeferredAllocate(LAllocate * instr)5039 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5040   Register result = ToRegister(instr->result());
5041 
5042   // TODO(3095996): Get rid of this. For now, we need to make the
5043   // result register contain a valid pointer because it is already
5044   // contained in the register pointer map.
5045   __ Move(result, Smi::kZero);
5046 
5047   PushSafepointRegistersScope scope(this);
5048   if (instr->size()->IsRegister()) {
5049     Register size = ToRegister(instr->size());
5050     DCHECK(!size.is(result));
5051     __ Integer32ToSmi(size, size);
5052     __ Push(size);
5053   } else {
5054     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5055     __ Push(Smi::FromInt(size));
5056   }
5057 
5058   int flags = 0;
5059   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5060     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5061     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5062   } else {
5063     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5064   }
5065   __ Push(Smi::FromInt(flags));
5066 
5067   CallRuntimeFromDeferred(
5068       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5069   __ StoreToSafepointRegisterSlot(result, rax);
5070 
5071   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5072     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5073     if (instr->hydrogen()->IsOldSpaceAllocation()) {
5074       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5075       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5076     }
5077     // If the allocation folding dominator allocate triggered a GC, allocation
5078     // happend in the runtime. We have to reset the top pointer to virtually
5079     // undo the allocation.
5080     ExternalReference allocation_top =
5081         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5082     __ subp(rax, Immediate(kHeapObjectTag));
5083     __ Store(allocation_top, rax);
5084     __ addp(rax, Immediate(kHeapObjectTag));
5085   }
5086 }
5087 
5088 
DoTypeof(LTypeof * instr)5089 void LCodeGen::DoTypeof(LTypeof* instr) {
5090   DCHECK(ToRegister(instr->context()).is(rsi));
5091   DCHECK(ToRegister(instr->value()).is(rbx));
5092   Label end, do_call;
5093   Register value_register = ToRegister(instr->value());
5094   __ JumpIfNotSmi(value_register, &do_call);
5095   __ Move(rax, isolate()->factory()->number_string());
5096   __ jmp(&end);
5097   __ bind(&do_call);
5098   Callable callable = CodeFactory::Typeof(isolate());
5099   CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
5100   __ bind(&end);
5101 }
5102 
5103 
EmitPushTaggedOperand(LOperand * operand)5104 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
5105   DCHECK(!operand->IsDoubleRegister());
5106   if (operand->IsConstantOperand()) {
5107     __ Push(ToHandle(LConstantOperand::cast(operand)));
5108   } else if (operand->IsRegister()) {
5109     __ Push(ToRegister(operand));
5110   } else {
5111     __ Push(ToOperand(operand));
5112   }
5113 }
5114 
5115 
DoTypeofIsAndBranch(LTypeofIsAndBranch * instr)5116 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5117   Register input = ToRegister(instr->value());
5118   Condition final_branch_condition = EmitTypeofIs(instr, input);
5119   if (final_branch_condition != no_condition) {
5120     EmitBranch(instr, final_branch_condition);
5121   }
5122 }
5123 
5124 
EmitTypeofIs(LTypeofIsAndBranch * instr,Register input)5125 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
5126   Label* true_label = instr->TrueLabel(chunk_);
5127   Label* false_label = instr->FalseLabel(chunk_);
5128   Handle<String> type_name = instr->type_literal();
5129   int left_block = instr->TrueDestination(chunk_);
5130   int right_block = instr->FalseDestination(chunk_);
5131   int next_block = GetNextEmittedBlock();
5132 
5133   Label::Distance true_distance = left_block == next_block ? Label::kNear
5134                                                            : Label::kFar;
5135   Label::Distance false_distance = right_block == next_block ? Label::kNear
5136                                                              : Label::kFar;
5137   Condition final_branch_condition = no_condition;
5138   Factory* factory = isolate()->factory();
5139   if (String::Equals(type_name, factory->number_string())) {
5140     __ JumpIfSmi(input, true_label, true_distance);
5141     __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
5142                    Heap::kHeapNumberMapRootIndex);
5143 
5144     final_branch_condition = equal;
5145 
5146   } else if (String::Equals(type_name, factory->string_string())) {
5147     __ JumpIfSmi(input, false_label, false_distance);
5148     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
5149     final_branch_condition = below;
5150 
5151   } else if (String::Equals(type_name, factory->symbol_string())) {
5152     __ JumpIfSmi(input, false_label, false_distance);
5153     __ CmpObjectType(input, SYMBOL_TYPE, input);
5154     final_branch_condition = equal;
5155 
5156   } else if (String::Equals(type_name, factory->boolean_string())) {
5157     __ CompareRoot(input, Heap::kTrueValueRootIndex);
5158     __ j(equal, true_label, true_distance);
5159     __ CompareRoot(input, Heap::kFalseValueRootIndex);
5160     final_branch_condition = equal;
5161 
5162   } else if (String::Equals(type_name, factory->undefined_string())) {
5163     __ CompareRoot(input, Heap::kNullValueRootIndex);
5164     __ j(equal, false_label, false_distance);
5165     __ JumpIfSmi(input, false_label, false_distance);
5166     // Check for undetectable objects => true.
5167     __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5168     __ testb(FieldOperand(input, Map::kBitFieldOffset),
5169              Immediate(1 << Map::kIsUndetectable));
5170     final_branch_condition = not_zero;
5171 
5172   } else if (String::Equals(type_name, factory->function_string())) {
5173     __ JumpIfSmi(input, false_label, false_distance);
5174     // Check for callable and not undetectable objects => true.
5175     __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
5176     __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset));
5177     __ andb(input,
5178             Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5179     __ cmpb(input, Immediate(1 << Map::kIsCallable));
5180     final_branch_condition = equal;
5181 
5182   } else if (String::Equals(type_name, factory->object_string())) {
5183     __ JumpIfSmi(input, false_label, false_distance);
5184     __ CompareRoot(input, Heap::kNullValueRootIndex);
5185     __ j(equal, true_label, true_distance);
5186     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5187     __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
5188     __ j(below, false_label, false_distance);
5189     // Check for callable or undetectable objects => false.
5190     __ testb(FieldOperand(input, Map::kBitFieldOffset),
5191              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5192     final_branch_condition = zero;
5193 
5194   } else {
5195     __ jmp(false_label, false_distance);
5196   }
5197 
5198   return final_branch_condition;
5199 }
5200 
5201 
EnsureSpaceForLazyDeopt(int space_needed)5202 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5203   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5204     // Ensure that we have enough space after the previous lazy-bailout
5205     // instruction for patching the code here.
5206     int current_pc = masm()->pc_offset();
5207     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5208       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5209       __ Nop(padding_size);
5210     }
5211   }
5212   last_lazy_deopt_pc_ = masm()->pc_offset();
5213 }
5214 
5215 
DoLazyBailout(LLazyBailout * instr)5216 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5217   last_lazy_deopt_pc_ = masm()->pc_offset();
5218   DCHECK(instr->HasEnvironment());
5219   LEnvironment* env = instr->environment();
5220   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5221   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5222 }
5223 
5224 
DoDeoptimize(LDeoptimize * instr)5225 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5226   Deoptimizer::BailoutType type = instr->hydrogen()->type();
5227   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5228   // needed return address), even though the implementation of LAZY and EAGER is
5229   // now identical. When LAZY is eventually completely folded into EAGER, remove
5230   // the special case below.
5231   if (info()->IsStub() && type == Deoptimizer::EAGER) {
5232     type = Deoptimizer::LAZY;
5233   }
5234   DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
5235 }
5236 
5237 
DoDummy(LDummy * instr)5238 void LCodeGen::DoDummy(LDummy* instr) {
5239   // Nothing to see here, move on!
5240 }
5241 
5242 
DoDummyUse(LDummyUse * instr)5243 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5244   // Nothing to see here, move on!
5245 }
5246 
5247 
DoDeferredStackCheck(LStackCheck * instr)5248 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5249   PushSafepointRegistersScope scope(this);
5250   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
5251   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5252   RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
5253   DCHECK(instr->HasEnvironment());
5254   LEnvironment* env = instr->environment();
5255   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5256 }
5257 
5258 
DoStackCheck(LStackCheck * instr)5259 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5260   class DeferredStackCheck final : public LDeferredCode {
5261    public:
5262     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5263         : LDeferredCode(codegen), instr_(instr) { }
5264     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5265     LInstruction* instr() override { return instr_; }
5266 
5267    private:
5268     LStackCheck* instr_;
5269   };
5270 
5271   DCHECK(instr->HasEnvironment());
5272   LEnvironment* env = instr->environment();
5273   // There is no LLazyBailout instruction for stack-checks. We have to
5274   // prepare for lazy deoptimization explicitly here.
5275   if (instr->hydrogen()->is_function_entry()) {
5276     // Perform stack overflow check.
5277     Label done;
5278     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5279     __ j(above_equal, &done, Label::kNear);
5280 
5281     DCHECK(instr->context()->IsRegister());
5282     DCHECK(ToRegister(instr->context()).is(rsi));
5283     CallCode(isolate()->builtins()->StackCheck(),
5284              RelocInfo::CODE_TARGET,
5285              instr);
5286     __ bind(&done);
5287   } else {
5288     DCHECK(instr->hydrogen()->is_backwards_branch());
5289     // Perform stack overflow check if this goto needs it before jumping.
5290     DeferredStackCheck* deferred_stack_check =
5291         new(zone()) DeferredStackCheck(this, instr);
5292     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
5293     __ j(below, deferred_stack_check->entry());
5294     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5295     __ bind(instr->done_label());
5296     deferred_stack_check->SetExit(instr->done_label());
5297     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5298     // Don't record a deoptimization index for the safepoint here.
5299     // This will be done explicitly when emitting call and the safepoint in
5300     // the deferred code.
5301   }
5302 }
5303 
5304 
DoOsrEntry(LOsrEntry * instr)5305 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5306   // This is a pseudo-instruction that ensures that the environment here is
5307   // properly registered for deoptimization and records the assembler's PC
5308   // offset.
5309   LEnvironment* environment = instr->environment();
5310 
5311   // If the environment were already registered, we would have no way of
5312   // backpatching it with the spill slot operands.
5313   DCHECK(!environment->HasBeenRegistered());
5314   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5315 
5316   GenerateOsrPrologue();
5317 }
5318 
5319 
DoForInPrepareMap(LForInPrepareMap * instr)5320 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5321   DCHECK(ToRegister(instr->context()).is(rsi));
5322 
5323   Label use_cache, call_runtime;
5324   __ CheckEnumCache(&call_runtime);
5325 
5326   __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
5327   __ jmp(&use_cache, Label::kNear);
5328 
5329   // Get the set of properties to enumerate.
5330   __ bind(&call_runtime);
5331   __ Push(rax);
5332   CallRuntime(Runtime::kForInEnumerate, instr);
5333   __ bind(&use_cache);
5334 }
5335 
5336 
DoForInCacheArray(LForInCacheArray * instr)5337 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5338   Register map = ToRegister(instr->map());
5339   Register result = ToRegister(instr->result());
5340   Label load_cache, done;
5341   __ EnumLength(result, map);
5342   __ Cmp(result, Smi::kZero);
5343   __ j(not_equal, &load_cache, Label::kNear);
5344   __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
5345   __ jmp(&done, Label::kNear);
5346   __ bind(&load_cache);
5347   __ LoadInstanceDescriptors(map, result);
5348   __ movp(result,
5349           FieldOperand(result, DescriptorArray::kEnumCacheOffset));
5350   __ movp(result,
5351           FieldOperand(result, FixedArray::SizeFor(instr->idx())));
5352   __ bind(&done);
5353   Condition cc = masm()->CheckSmi(result);
5354   DeoptimizeIf(cc, instr, DeoptimizeReason::kNoCache);
5355 }
5356 
5357 
DoCheckMapValue(LCheckMapValue * instr)5358 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5359   Register object = ToRegister(instr->value());
5360   __ cmpp(ToRegister(instr->map()),
5361           FieldOperand(object, HeapObject::kMapOffset));
5362   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
5363 }
5364 
5365 
DoDeferredLoadMutableDouble(LLoadFieldByIndex * instr,Register object,Register index)5366 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5367                                            Register object,
5368                                            Register index) {
5369   PushSafepointRegistersScope scope(this);
5370   __ Push(object);
5371   __ Push(index);
5372   __ xorp(rsi, rsi);
5373   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5374   RecordSafepointWithRegisters(
5375       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5376   __ StoreToSafepointRegisterSlot(object, rax);
5377 }
5378 
5379 
DoLoadFieldByIndex(LLoadFieldByIndex * instr)5380 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5381   class DeferredLoadMutableDouble final : public LDeferredCode {
5382    public:
5383     DeferredLoadMutableDouble(LCodeGen* codegen,
5384                               LLoadFieldByIndex* instr,
5385                               Register object,
5386                               Register index)
5387         : LDeferredCode(codegen),
5388           instr_(instr),
5389           object_(object),
5390           index_(index) {
5391     }
5392     void Generate() override {
5393       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
5394     }
5395     LInstruction* instr() override { return instr_; }
5396 
5397    private:
5398     LLoadFieldByIndex* instr_;
5399     Register object_;
5400     Register index_;
5401   };
5402 
5403   Register object = ToRegister(instr->object());
5404   Register index = ToRegister(instr->index());
5405 
5406   DeferredLoadMutableDouble* deferred;
5407   deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
5408 
5409   Label out_of_object, done;
5410   __ Move(kScratchRegister, Smi::FromInt(1));
5411   __ testp(index, kScratchRegister);
5412   __ j(not_zero, deferred->entry());
5413 
5414   __ sarp(index, Immediate(1));
5415 
5416   __ SmiToInteger32(index, index);
5417   __ cmpl(index, Immediate(0));
5418   __ j(less, &out_of_object, Label::kNear);
5419   __ movp(object, FieldOperand(object,
5420                                index,
5421                                times_pointer_size,
5422                                JSObject::kHeaderSize));
5423   __ jmp(&done, Label::kNear);
5424 
5425   __ bind(&out_of_object);
5426   __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
5427   __ negl(index);
5428   // Index is now equal to out of object property index plus 1.
5429   __ movp(object, FieldOperand(object,
5430                                index,
5431                                times_pointer_size,
5432                                FixedArray::kHeaderSize - kPointerSize));
5433   __ bind(deferred->exit());
5434   __ bind(&done);
5435 }
5436 
5437 #undef __
5438 
5439 }  // namespace internal
5440 }  // namespace v8
5441 
5442 #endif  // V8_TARGET_ARCH_X64
5443