• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 //
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 
6 #include "src/crankshaft/s390/lithium-codegen-s390.h"
7 
8 #include "src/base/bits.h"
9 #include "src/code-factory.h"
10 #include "src/code-stubs.h"
11 #include "src/crankshaft/hydrogen-osr.h"
12 #include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
13 #include "src/ic/ic.h"
14 #include "src/ic/stub-cache.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 class SafepointGenerator final : public CallWrapper {
20  public:
SafepointGenerator(LCodeGen * codegen,LPointerMap * pointers,Safepoint::DeoptMode mode)21   SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
22                      Safepoint::DeoptMode mode)
23       : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
~SafepointGenerator()24   virtual ~SafepointGenerator() {}
25 
BeforeCall(int call_size) const26   void BeforeCall(int call_size) const override {}
27 
AfterCall() const28   void AfterCall() const override {
29     codegen_->RecordSafepoint(pointers_, deopt_mode_);
30   }
31 
32  private:
33   LCodeGen* codegen_;
34   LPointerMap* pointers_;
35   Safepoint::DeoptMode deopt_mode_;
36 };
37 
38 #define __ masm()->
39 
GenerateCode()40 bool LCodeGen::GenerateCode() {
41   LPhase phase("Z_Code generation", chunk());
42   DCHECK(is_unused());
43   status_ = GENERATING;
44 
45   // Open a frame scope to indicate that there is a frame on the stack.  The
46   // NONE indicates that the scope shouldn't actually generate code to set up
47   // the frame (that is done in GeneratePrologue).
48   FrameScope frame_scope(masm_, StackFrame::NONE);
49 
50   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
51          GenerateJumpTable() && GenerateSafepointTable();
52 }
53 
FinishCode(Handle<Code> code)54 void LCodeGen::FinishCode(Handle<Code> code) {
55   DCHECK(is_done());
56   code->set_stack_slots(GetTotalFrameSlotCount());
57   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
58   PopulateDeoptimizationData(code);
59 }
60 
SaveCallerDoubles()61 void LCodeGen::SaveCallerDoubles() {
62   DCHECK(info()->saves_caller_doubles());
63   DCHECK(NeedsEagerFrame());
64   Comment(";;; Save clobbered callee double registers");
65   int count = 0;
66   BitVector* doubles = chunk()->allocated_double_registers();
67   BitVector::Iterator save_iterator(doubles);
68   while (!save_iterator.Done()) {
69     __ std(DoubleRegister::from_code(save_iterator.Current()),
70            MemOperand(sp, count * kDoubleSize));
71     save_iterator.Advance();
72     count++;
73   }
74 }
75 
RestoreCallerDoubles()76 void LCodeGen::RestoreCallerDoubles() {
77   DCHECK(info()->saves_caller_doubles());
78   DCHECK(NeedsEagerFrame());
79   Comment(";;; Restore clobbered callee double registers");
80   BitVector* doubles = chunk()->allocated_double_registers();
81   BitVector::Iterator save_iterator(doubles);
82   int count = 0;
83   while (!save_iterator.Done()) {
84     __ ld(DoubleRegister::from_code(save_iterator.Current()),
85           MemOperand(sp, count * kDoubleSize));
86     save_iterator.Advance();
87     count++;
88   }
89 }
90 
GeneratePrologue()91 bool LCodeGen::GeneratePrologue() {
92   DCHECK(is_generating());
93 
94   if (info()->IsOptimizing()) {
95     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
96 
97     // r3: Callee's JS function.
98     // cp: Callee's context.
99     // fp: Caller's frame pointer.
100     // lr: Caller's pc.
101     // ip: Our own function entry (required by the prologue)
102   }
103 
104   int prologue_offset = masm_->pc_offset();
105 
106   if (prologue_offset) {
107     // Prologue logic requires its starting address in ip and the
108     // corresponding offset from the function entry.  Need to add
109     // 4 bytes for the size of AHI/AGHI that AddP expands into.
110     prologue_offset += sizeof(FourByteInstr);
111     __ AddP(ip, ip, Operand(prologue_offset));
112   }
113   info()->set_prologue_offset(prologue_offset);
114   if (NeedsEagerFrame()) {
115     if (info()->IsStub()) {
116       __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
117     } else {
118       __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
119     }
120     frame_is_built_ = true;
121   }
122 
123   // Reserve space for the stack slots needed by the code.
124   int slots = GetStackSlotCount();
125   if (slots > 0) {
126     __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
127     if (FLAG_debug_code) {
128       __ Push(r2, r3);
129       __ mov(r2, Operand(slots * kPointerSize));
130       __ mov(r3, Operand(kSlotsZapValue));
131       Label loop;
132       __ bind(&loop);
133       __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
134       __ lay(r2, MemOperand(r2, -kPointerSize));
135       __ CmpP(r2, Operand::Zero());
136       __ bne(&loop);
137       __ Pop(r2, r3);
138     }
139   }
140 
141   if (info()->saves_caller_doubles()) {
142     SaveCallerDoubles();
143   }
144   return !is_aborted();
145 }
146 
DoPrologue(LPrologue * instr)147 void LCodeGen::DoPrologue(LPrologue* instr) {
148   Comment(";;; Prologue begin");
149 
150   // Possibly allocate a local context.
151   if (info()->scope()->num_heap_slots() > 0) {
152     Comment(";;; Allocate local context");
153     bool need_write_barrier = true;
154     // Argument to NewContext is the function, which is in r3.
155     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
156     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
157     if (info()->scope()->is_script_scope()) {
158       __ push(r3);
159       __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
160       __ CallRuntime(Runtime::kNewScriptContext);
161       deopt_mode = Safepoint::kLazyDeopt;
162     } else if (slots <= FastNewContextStub::kMaximumSlots) {
163       FastNewContextStub stub(isolate(), slots);
164       __ CallStub(&stub);
165       // Result of FastNewContextStub is always in new space.
166       need_write_barrier = false;
167     } else {
168       __ push(r3);
169       __ CallRuntime(Runtime::kNewFunctionContext);
170     }
171     RecordSafepoint(deopt_mode);
172 
173     // Context is returned in both r2 and cp.  It replaces the context
174     // passed to us.  It's saved in the stack and kept live in cp.
175     __ LoadRR(cp, r2);
176     __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
177     // Copy any necessary parameters into the context.
178     int num_parameters = scope()->num_parameters();
179     int first_parameter = scope()->has_this_declaration() ? -1 : 0;
180     for (int i = first_parameter; i < num_parameters; i++) {
181       Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
182       if (var->IsContextSlot()) {
183         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
184                                (num_parameters - 1 - i) * kPointerSize;
185         // Load parameter from stack.
186         __ LoadP(r2, MemOperand(fp, parameter_offset));
187         // Store it in the context.
188         MemOperand target = ContextMemOperand(cp, var->index());
189         __ StoreP(r2, target);
190         // Update the write barrier. This clobbers r5 and r2.
191         if (need_write_barrier) {
192           __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
193                                     GetLinkRegisterState(), kSaveFPRegs);
194         } else if (FLAG_debug_code) {
195           Label done;
196           __ JumpIfInNewSpace(cp, r2, &done);
197           __ Abort(kExpectedNewSpaceObject);
198           __ bind(&done);
199         }
200       }
201     }
202     Comment(";;; End allocate local context");
203   }
204 
205   Comment(";;; Prologue end");
206 }
207 
GenerateOsrPrologue()208 void LCodeGen::GenerateOsrPrologue() {
209   // Generate the OSR entry prologue at the first unknown OSR value, or if there
210   // are none, at the OSR entrypoint instruction.
211   if (osr_pc_offset_ >= 0) return;
212 
213   osr_pc_offset_ = masm()->pc_offset();
214 
215   // Adjust the frame size, subsuming the unoptimized frame into the
216   // optimized frame.
217   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
218   DCHECK(slots >= 0);
219   __ lay(sp, MemOperand(sp, -slots * kPointerSize));
220 }
221 
GenerateBodyInstructionPre(LInstruction * instr)222 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
223   if (instr->IsCall()) {
224     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
225   }
226   if (!instr->IsLazyBailout() && !instr->IsGap()) {
227     safepoints_.BumpLastLazySafepointIndex();
228   }
229 }
230 
GenerateDeferredCode()231 bool LCodeGen::GenerateDeferredCode() {
232   DCHECK(is_generating());
233   if (deferred_.length() > 0) {
234     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
235       LDeferredCode* code = deferred_[i];
236 
237       HValue* value =
238           instructions_->at(code->instruction_index())->hydrogen_value();
239       RecordAndWritePosition(
240           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
241 
242       Comment(
243           ";;; <@%d,#%d> "
244           "-------------------- Deferred %s --------------------",
245           code->instruction_index(), code->instr()->hydrogen_value()->id(),
246           code->instr()->Mnemonic());
247       __ bind(code->entry());
248       if (NeedsDeferredFrame()) {
249         Comment(";;; Build frame");
250         DCHECK(!frame_is_built_);
251         DCHECK(info()->IsStub());
252         frame_is_built_ = true;
253         __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
254         __ PushCommonFrame(scratch0());
255         Comment(";;; Deferred code");
256       }
257       code->Generate();
258       if (NeedsDeferredFrame()) {
259         Comment(";;; Destroy frame");
260         DCHECK(frame_is_built_);
261         __ PopCommonFrame(scratch0());
262         frame_is_built_ = false;
263       }
264       __ b(code->exit());
265     }
266   }
267 
268   return !is_aborted();
269 }
270 
GenerateJumpTable()271 bool LCodeGen::GenerateJumpTable() {
272   // Check that the jump table is accessible from everywhere in the function
273   // code, i.e. that offsets in halfworld to the table can be encoded in the
274   // 32-bit signed immediate of a branch instruction.
275   // To simplify we consider the code size from the first instruction to the
276   // end of the jump table. We also don't consider the pc load delta.
277   // Each entry in the jump table generates one instruction and inlines one
278   // 32bit data after it.
279   // TODO(joransiu): The Int24 condition can likely be relaxed for S390
280   if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
281     Abort(kGeneratedCodeIsTooLarge);
282   }
283 
284   if (jump_table_.length() > 0) {
285     Label needs_frame, call_deopt_entry;
286 
287     Comment(";;; -------------------- Jump table --------------------");
288     Address base = jump_table_[0].address;
289 
290     Register entry_offset = scratch0();
291 
292     int length = jump_table_.length();
293     for (int i = 0; i < length; i++) {
294       Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
295       __ bind(&table_entry->label);
296 
297       DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
298       Address entry = table_entry->address;
299       DeoptComment(table_entry->deopt_info);
300 
301       // Second-level deopt table entries are contiguous and small, so instead
302       // of loading the full, absolute address of each one, load an immediate
303       // offset which will be added to the base address later.
304       __ mov(entry_offset, Operand(entry - base));
305 
306       if (table_entry->needs_frame) {
307         DCHECK(!info()->saves_caller_doubles());
308         Comment(";;; call deopt with frame");
309         __ PushCommonFrame();
310         __ b(r14, &needs_frame);
311       } else {
312         __ b(r14, &call_deopt_entry);
313       }
314     }
315 
316     if (needs_frame.is_linked()) {
317       __ bind(&needs_frame);
318       // This variant of deopt can only be used with stubs. Since we don't
319       // have a function pointer to install in the stack frame that we're
320       // building, install a special marker there instead.
321       DCHECK(info()->IsStub());
322       __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
323       __ push(ip);
324       DCHECK(info()->IsStub());
325     }
326 
327     Comment(";;; call deopt");
328     __ bind(&call_deopt_entry);
329 
330     if (info()->saves_caller_doubles()) {
331       DCHECK(info()->IsStub());
332       RestoreCallerDoubles();
333     }
334 
335     // Add the base address to the offset previously loaded in entry_offset.
336     __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
337     __ AddP(ip, entry_offset, ip);
338     __ Jump(ip);
339   }
340 
341   // The deoptimization jump table is the last part of the instruction
342   // sequence. Mark the generated code as done unless we bailed out.
343   if (!is_aborted()) status_ = DONE;
344   return !is_aborted();
345 }
346 
GenerateSafepointTable()347 bool LCodeGen::GenerateSafepointTable() {
348   DCHECK(is_done());
349   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
350   return !is_aborted();
351 }
352 
ToRegister(int code) const353 Register LCodeGen::ToRegister(int code) const {
354   return Register::from_code(code);
355 }
356 
ToDoubleRegister(int code) const357 DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
358   return DoubleRegister::from_code(code);
359 }
360 
ToRegister(LOperand * op) const361 Register LCodeGen::ToRegister(LOperand* op) const {
362   DCHECK(op->IsRegister());
363   return ToRegister(op->index());
364 }
365 
EmitLoadRegister(LOperand * op,Register scratch)366 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
367   if (op->IsRegister()) {
368     return ToRegister(op->index());
369   } else if (op->IsConstantOperand()) {
370     LConstantOperand* const_op = LConstantOperand::cast(op);
371     HConstant* constant = chunk_->LookupConstant(const_op);
372     Handle<Object> literal = constant->handle(isolate());
373     Representation r = chunk_->LookupLiteralRepresentation(const_op);
374     if (r.IsInteger32()) {
375       AllowDeferredHandleDereference get_number;
376       DCHECK(literal->IsNumber());
377       __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
378     } else if (r.IsDouble()) {
379       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
380     } else {
381       DCHECK(r.IsSmiOrTagged());
382       __ Move(scratch, literal);
383     }
384     return scratch;
385   } else if (op->IsStackSlot()) {
386     __ LoadP(scratch, ToMemOperand(op));
387     return scratch;
388   }
389   UNREACHABLE();
390   return scratch;
391 }
392 
EmitLoadIntegerConstant(LConstantOperand * const_op,Register dst)393 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
394                                        Register dst) {
395   DCHECK(IsInteger32(const_op));
396   HConstant* constant = chunk_->LookupConstant(const_op);
397   int32_t value = constant->Integer32Value();
398   if (IsSmi(const_op)) {
399     __ LoadSmiLiteral(dst, Smi::FromInt(value));
400   } else {
401     __ LoadIntLiteral(dst, value);
402   }
403 }
404 
ToDoubleRegister(LOperand * op) const405 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
406   DCHECK(op->IsDoubleRegister());
407   return ToDoubleRegister(op->index());
408 }
409 
ToHandle(LConstantOperand * op) const410 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
411   HConstant* constant = chunk_->LookupConstant(op);
412   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
413   return constant->handle(isolate());
414 }
415 
IsInteger32(LConstantOperand * op) const416 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
417   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
418 }
419 
IsSmi(LConstantOperand * op) const420 bool LCodeGen::IsSmi(LConstantOperand* op) const {
421   return chunk_->LookupLiteralRepresentation(op).IsSmi();
422 }
423 
ToInteger32(LConstantOperand * op) const424 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
425   return ToRepresentation(op, Representation::Integer32());
426 }
427 
ToRepresentation(LConstantOperand * op,const Representation & r) const428 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
429                                     const Representation& r) const {
430   HConstant* constant = chunk_->LookupConstant(op);
431   int32_t value = constant->Integer32Value();
432   if (r.IsInteger32()) return value;
433   DCHECK(r.IsSmiOrTagged());
434   return reinterpret_cast<intptr_t>(Smi::FromInt(value));
435 }
436 
ToSmi(LConstantOperand * op) const437 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
438   HConstant* constant = chunk_->LookupConstant(op);
439   return Smi::FromInt(constant->Integer32Value());
440 }
441 
ToDouble(LConstantOperand * op) const442 double LCodeGen::ToDouble(LConstantOperand* op) const {
443   HConstant* constant = chunk_->LookupConstant(op);
444   DCHECK(constant->HasDoubleValue());
445   return constant->DoubleValue();
446 }
447 
ToOperand(LOperand * op)448 Operand LCodeGen::ToOperand(LOperand* op) {
449   if (op->IsConstantOperand()) {
450     LConstantOperand* const_op = LConstantOperand::cast(op);
451     HConstant* constant = chunk()->LookupConstant(const_op);
452     Representation r = chunk_->LookupLiteralRepresentation(const_op);
453     if (r.IsSmi()) {
454       DCHECK(constant->HasSmiValue());
455       return Operand(Smi::FromInt(constant->Integer32Value()));
456     } else if (r.IsInteger32()) {
457       DCHECK(constant->HasInteger32Value());
458       return Operand(constant->Integer32Value());
459     } else if (r.IsDouble()) {
460       Abort(kToOperandUnsupportedDoubleImmediate);
461     }
462     DCHECK(r.IsTagged());
463     return Operand(constant->handle(isolate()));
464   } else if (op->IsRegister()) {
465     return Operand(ToRegister(op));
466   } else if (op->IsDoubleRegister()) {
467     Abort(kToOperandIsDoubleRegisterUnimplemented);
468     return Operand::Zero();
469   }
470   // Stack slots not implemented, use ToMemOperand instead.
471   UNREACHABLE();
472   return Operand::Zero();
473 }
474 
ArgumentsOffsetWithoutFrame(int index)475 static int ArgumentsOffsetWithoutFrame(int index) {
476   DCHECK(index < 0);
477   return -(index + 1) * kPointerSize;
478 }
479 
ToMemOperand(LOperand * op) const480 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
481   DCHECK(!op->IsRegister());
482   DCHECK(!op->IsDoubleRegister());
483   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
484   if (NeedsEagerFrame()) {
485     return MemOperand(fp, FrameSlotToFPOffset(op->index()));
486   } else {
487     // Retrieve parameter without eager stack-frame relative to the
488     // stack-pointer.
489     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
490   }
491 }
492 
ToHighMemOperand(LOperand * op) const493 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
494   DCHECK(op->IsDoubleStackSlot());
495   if (NeedsEagerFrame()) {
496     return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
497   } else {
498     // Retrieve parameter without eager stack-frame relative to the
499     // stack-pointer.
500     return MemOperand(sp,
501                       ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
502   }
503 }
504 
WriteTranslation(LEnvironment * environment,Translation * translation)505 void LCodeGen::WriteTranslation(LEnvironment* environment,
506                                 Translation* translation) {
507   if (environment == NULL) return;
508 
509   // The translation includes one command per value in the environment.
510   int translation_size = environment->translation_size();
511 
512   WriteTranslation(environment->outer(), translation);
513   WriteTranslationFrame(environment, translation);
514 
515   int object_index = 0;
516   int dematerialized_index = 0;
517   for (int i = 0; i < translation_size; ++i) {
518     LOperand* value = environment->values()->at(i);
519     AddToTranslation(
520         environment, translation, value, environment->HasTaggedValueAt(i),
521         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
522   }
523 }
524 
AddToTranslation(LEnvironment * environment,Translation * translation,LOperand * op,bool is_tagged,bool is_uint32,int * object_index_pointer,int * dematerialized_index_pointer)525 void LCodeGen::AddToTranslation(LEnvironment* environment,
526                                 Translation* translation, LOperand* op,
527                                 bool is_tagged, bool is_uint32,
528                                 int* object_index_pointer,
529                                 int* dematerialized_index_pointer) {
530   if (op == LEnvironment::materialization_marker()) {
531     int object_index = (*object_index_pointer)++;
532     if (environment->ObjectIsDuplicateAt(object_index)) {
533       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
534       translation->DuplicateObject(dupe_of);
535       return;
536     }
537     int object_length = environment->ObjectLengthAt(object_index);
538     if (environment->ObjectIsArgumentsAt(object_index)) {
539       translation->BeginArgumentsObject(object_length);
540     } else {
541       translation->BeginCapturedObject(object_length);
542     }
543     int dematerialized_index = *dematerialized_index_pointer;
544     int env_offset = environment->translation_size() + dematerialized_index;
545     *dematerialized_index_pointer += object_length;
546     for (int i = 0; i < object_length; ++i) {
547       LOperand* value = environment->values()->at(env_offset + i);
548       AddToTranslation(environment, translation, value,
549                        environment->HasTaggedValueAt(env_offset + i),
550                        environment->HasUint32ValueAt(env_offset + i),
551                        object_index_pointer, dematerialized_index_pointer);
552     }
553     return;
554   }
555 
556   if (op->IsStackSlot()) {
557     int index = op->index();
558     if (is_tagged) {
559       translation->StoreStackSlot(index);
560     } else if (is_uint32) {
561       translation->StoreUint32StackSlot(index);
562     } else {
563       translation->StoreInt32StackSlot(index);
564     }
565   } else if (op->IsDoubleStackSlot()) {
566     int index = op->index();
567     translation->StoreDoubleStackSlot(index);
568   } else if (op->IsRegister()) {
569     Register reg = ToRegister(op);
570     if (is_tagged) {
571       translation->StoreRegister(reg);
572     } else if (is_uint32) {
573       translation->StoreUint32Register(reg);
574     } else {
575       translation->StoreInt32Register(reg);
576     }
577   } else if (op->IsDoubleRegister()) {
578     DoubleRegister reg = ToDoubleRegister(op);
579     translation->StoreDoubleRegister(reg);
580   } else if (op->IsConstantOperand()) {
581     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
582     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
583     translation->StoreLiteral(src_index);
584   } else {
585     UNREACHABLE();
586   }
587 }
588 
CallCode(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr)589 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
590                         LInstruction* instr) {
591   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
592 }
593 
CallCodeGeneric(Handle<Code> code,RelocInfo::Mode mode,LInstruction * instr,SafepointMode safepoint_mode)594 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
595                                LInstruction* instr,
596                                SafepointMode safepoint_mode) {
597   DCHECK(instr != NULL);
598   __ Call(code, mode);
599   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
600 
601   // Signal that we don't inline smi code before these stubs in the
602   // optimizing code generator.
603   if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
604     __ nop();
605   }
606 }
607 
CallRuntime(const Runtime::Function * function,int num_arguments,LInstruction * instr,SaveFPRegsMode save_doubles)608 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
609                            LInstruction* instr, SaveFPRegsMode save_doubles) {
610   DCHECK(instr != NULL);
611 
612   __ CallRuntime(function, num_arguments, save_doubles);
613 
614   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
615 }
616 
LoadContextFromDeferred(LOperand * context)617 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
618   if (context->IsRegister()) {
619     __ Move(cp, ToRegister(context));
620   } else if (context->IsStackSlot()) {
621     __ LoadP(cp, ToMemOperand(context));
622   } else if (context->IsConstantOperand()) {
623     HConstant* constant =
624         chunk_->LookupConstant(LConstantOperand::cast(context));
625     __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
626   } else {
627     UNREACHABLE();
628   }
629 }
630 
CallRuntimeFromDeferred(Runtime::FunctionId id,int argc,LInstruction * instr,LOperand * context)631 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
632                                        LInstruction* instr, LOperand* context) {
633   LoadContextFromDeferred(context);
634   __ CallRuntimeSaveDoubles(id);
635   RecordSafepointWithRegisters(instr->pointer_map(), argc,
636                                Safepoint::kNoLazyDeopt);
637 }
638 
RegisterEnvironmentForDeoptimization(LEnvironment * environment,Safepoint::DeoptMode mode)639 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
640                                                     Safepoint::DeoptMode mode) {
641   environment->set_has_been_used();
642   if (!environment->HasBeenRegistered()) {
643     // Physical stack frame layout:
644     // -x ............. -4  0 ..................................... y
645     // [incoming arguments] [spill slots] [pushed outgoing arguments]
646 
647     // Layout of the environment:
648     // 0 ..................................................... size-1
649     // [parameters] [locals] [expression stack including arguments]
650 
651     // Layout of the translation:
652     // 0 ........................................................ size - 1 + 4
653     // [expression stack including arguments] [locals] [4 words] [parameters]
654     // |>------------  translation_size ------------<|
655 
656     int frame_count = 0;
657     int jsframe_count = 0;
658     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
659       ++frame_count;
660       if (e->frame_type() == JS_FUNCTION) {
661         ++jsframe_count;
662       }
663     }
664     Translation translation(&translations_, frame_count, jsframe_count, zone());
665     WriteTranslation(environment, &translation);
666     int deoptimization_index = deoptimizations_.length();
667     int pc_offset = masm()->pc_offset();
668     environment->Register(deoptimization_index, translation.index(),
669                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
670     deoptimizations_.Add(environment, zone());
671   }
672 }
673 
DeoptimizeIf(Condition cond,LInstruction * instr,Deoptimizer::DeoptReason deopt_reason,Deoptimizer::BailoutType bailout_type,CRegister cr)674 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
675                             Deoptimizer::DeoptReason deopt_reason,
676                             Deoptimizer::BailoutType bailout_type,
677                             CRegister cr) {
678   LEnvironment* environment = instr->environment();
679   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
680   DCHECK(environment->HasBeenRegistered());
681   int id = environment->deoptimization_index();
682   Address entry =
683       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
684   if (entry == NULL) {
685     Abort(kBailoutWasNotPrepared);
686     return;
687   }
688 
689   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
690     Register scratch = scratch0();
691     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
692     Label no_deopt;
693 
694     // Store the condition on the stack if necessary
695     if (cond != al) {
696       Label done;
697       __ LoadImmP(scratch, Operand::Zero());
698       __ b(NegateCondition(cond), &done, Label::kNear);
699       __ LoadImmP(scratch, Operand(1));
700       __ bind(&done);
701       __ push(scratch);
702     }
703 
704     Label done;
705     __ Push(r3);
706     __ mov(scratch, Operand(count));
707     __ LoadW(r3, MemOperand(scratch));
708     __ Sub32(r3, r3, Operand(1));
709     __ Cmp32(r3, Operand::Zero());
710     __ bne(&no_deopt, Label::kNear);
711 
712     __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
713     __ StoreW(r3, MemOperand(scratch));
714     __ Pop(r3);
715 
716     if (cond != al) {
717       // Clean up the stack before the deoptimizer call
718       __ pop(scratch);
719     }
720 
721     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
722 
723     __ b(&done);
724 
725     __ bind(&no_deopt);
726     __ StoreW(r3, MemOperand(scratch));
727     __ Pop(r3);
728 
729     if (cond != al) {
730       // Clean up the stack before the deoptimizer call
731       __ pop(scratch);
732     }
733 
734     __ bind(&done);
735 
736     if (cond != al) {
737       cond = ne;
738       __ CmpP(scratch, Operand::Zero());
739     }
740   }
741 
742   if (info()->ShouldTrapOnDeopt()) {
743     __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
744   }
745 
746   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
747 
748   DCHECK(info()->IsStub() || frame_is_built_);
749   // Go through jump table if we need to handle condition, build frame, or
750   // restore caller doubles.
751   if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
752     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
753   } else {
754     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
755                                             !frame_is_built_);
756     // We often have several deopts to the same entry, reuse the last
757     // jump entry if this is the case.
758     if (FLAG_trace_deopt || isolate()->is_profiling() ||
759         jump_table_.is_empty() ||
760         !table_entry.IsEquivalentTo(jump_table_.last())) {
761       jump_table_.Add(table_entry, zone());
762     }
763     __ b(cond, &jump_table_.last().label /*, cr*/);
764   }
765 }
766 
DeoptimizeIf(Condition cond,LInstruction * instr,Deoptimizer::DeoptReason deopt_reason,CRegister cr)767 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
768                             Deoptimizer::DeoptReason deopt_reason,
769                             CRegister cr) {
770   Deoptimizer::BailoutType bailout_type =
771       info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
772   DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
773 }
774 
RecordSafepointWithLazyDeopt(LInstruction * instr,SafepointMode safepoint_mode)775 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
776                                             SafepointMode safepoint_mode) {
777   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
778     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
779   } else {
780     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
781     RecordSafepointWithRegisters(instr->pointer_map(), 0,
782                                  Safepoint::kLazyDeopt);
783   }
784 }
785 
RecordSafepoint(LPointerMap * pointers,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)786 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
787                                int arguments, Safepoint::DeoptMode deopt_mode) {
788   DCHECK(expected_safepoint_kind_ == kind);
789 
790   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
791   Safepoint safepoint =
792       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
793   for (int i = 0; i < operands->length(); i++) {
794     LOperand* pointer = operands->at(i);
795     if (pointer->IsStackSlot()) {
796       safepoint.DefinePointerSlot(pointer->index(), zone());
797     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
798       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
799     }
800   }
801 }
802 
RecordSafepoint(LPointerMap * pointers,Safepoint::DeoptMode deopt_mode)803 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
804                                Safepoint::DeoptMode deopt_mode) {
805   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
806 }
807 
RecordSafepoint(Safepoint::DeoptMode deopt_mode)808 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
809   LPointerMap empty_pointers(zone());
810   RecordSafepoint(&empty_pointers, deopt_mode);
811 }
812 
RecordSafepointWithRegisters(LPointerMap * pointers,int arguments,Safepoint::DeoptMode deopt_mode)813 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
814                                             int arguments,
815                                             Safepoint::DeoptMode deopt_mode) {
816   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
817 }
818 
RecordAndWritePosition(int position)819 void LCodeGen::RecordAndWritePosition(int position) {
820   if (position == RelocInfo::kNoPosition) return;
821   masm()->positions_recorder()->RecordPosition(position);
822 }
823 
LabelType(LLabel * label)824 static const char* LabelType(LLabel* label) {
825   if (label->is_loop_header()) return " (loop header)";
826   if (label->is_osr_entry()) return " (OSR entry)";
827   return "";
828 }
829 
DoLabel(LLabel * label)830 void LCodeGen::DoLabel(LLabel* label) {
831   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
832           current_instruction_, label->hydrogen_value()->id(),
833           label->block_id(), LabelType(label));
834   __ bind(label->label());
835   current_block_ = label->block_id();
836   DoGap(label);
837 }
838 
DoParallelMove(LParallelMove * move)839 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
840 
DoGap(LGap * gap)841 void LCodeGen::DoGap(LGap* gap) {
842   for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
843        i++) {
844     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
845     LParallelMove* move = gap->GetParallelMove(inner_pos);
846     if (move != NULL) DoParallelMove(move);
847   }
848 }
849 
DoInstructionGap(LInstructionGap * instr)850 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
851 
DoParameter(LParameter * instr)852 void LCodeGen::DoParameter(LParameter* instr) {
853   // Nothing to do.
854 }
855 
DoUnknownOSRValue(LUnknownOSRValue * instr)856 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
857   GenerateOsrPrologue();
858 }
859 
DoModByPowerOf2I(LModByPowerOf2I * instr)860 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
861   Register dividend = ToRegister(instr->dividend());
862   int32_t divisor = instr->divisor();
863   DCHECK(dividend.is(ToRegister(instr->result())));
864 
865   // Theoretically, a variation of the branch-free code for integer division by
866   // a power of 2 (calculating the remainder via an additional multiplication
867   // (which gets simplified to an 'and') and subtraction) should be faster, and
868   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
869   // indicate that positive dividends are heavily favored, so the branching
870   // version performs better.
871   HMod* hmod = instr->hydrogen();
872   int32_t shift = WhichPowerOf2Abs(divisor);
873   Label dividend_is_not_negative, done;
874   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
875     __ CmpP(dividend, Operand::Zero());
876     __ bge(&dividend_is_not_negative, Label::kNear);
877     if (shift) {
878       // Note that this is correct even for kMinInt operands.
879       __ LoadComplementRR(dividend, dividend);
880       __ ExtractBitRange(dividend, dividend, shift - 1, 0);
881       __ LoadComplementRR(dividend, dividend);
882       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
883         DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
884       }
885     } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
886       __ mov(dividend, Operand::Zero());
887     } else {
888       DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
889     }
890     __ b(&done, Label::kNear);
891   }
892 
893   __ bind(&dividend_is_not_negative);
894   if (shift) {
895     __ ExtractBitRange(dividend, dividend, shift - 1, 0);
896   } else {
897     __ mov(dividend, Operand::Zero());
898   }
899   __ bind(&done);
900 }
901 
DoModByConstI(LModByConstI * instr)902 void LCodeGen::DoModByConstI(LModByConstI* instr) {
903   Register dividend = ToRegister(instr->dividend());
904   int32_t divisor = instr->divisor();
905   Register result = ToRegister(instr->result());
906   DCHECK(!dividend.is(result));
907 
908   if (divisor == 0) {
909     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
910     return;
911   }
912 
913   __ TruncatingDiv(result, dividend, Abs(divisor));
914   __ mov(ip, Operand(Abs(divisor)));
915   __ Mul(result, result, ip);
916   __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
917 
918   // Check for negative zero.
919   HMod* hmod = instr->hydrogen();
920   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
921     Label remainder_not_zero;
922     __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
923     __ Cmp32(dividend, Operand::Zero());
924     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
925     __ bind(&remainder_not_zero);
926   }
927 }
928 
DoModI(LModI * instr)929 void LCodeGen::DoModI(LModI* instr) {
930   HMod* hmod = instr->hydrogen();
931   Register left_reg = ToRegister(instr->left());
932   Register right_reg = ToRegister(instr->right());
933   Register result_reg = ToRegister(instr->result());
934   Label done;
935 
936   // Check for x % 0.
937   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
938     __ Cmp32(right_reg, Operand::Zero());
939     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
940   }
941 
942   // Check for kMinInt % -1, dr will return undefined, which is not what we
943   // want. We have to deopt if we care about -0, because we can't return that.
944   if (hmod->CheckFlag(HValue::kCanOverflow)) {
945     Label no_overflow_possible;
946     __ Cmp32(left_reg, Operand(kMinInt));
947     __ bne(&no_overflow_possible, Label::kNear);
948     __ Cmp32(right_reg, Operand(-1));
949     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
950       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
951     } else {
952       __ b(ne, &no_overflow_possible, Label::kNear);
953       __ mov(result_reg, Operand::Zero());
954       __ b(&done, Label::kNear);
955     }
956     __ bind(&no_overflow_possible);
957   }
958 
959   // Divide instruction dr will implicity use register pair
960   // r0 & r1 below.
961   DCHECK(!left_reg.is(r1));
962   DCHECK(!right_reg.is(r1));
963   DCHECK(!result_reg.is(r1));
964   __ LoadRR(r0, left_reg);
965   __ srda(r0, Operand(32));
966   __ dr(r0, right_reg);  // R0:R1 = R1 / divisor - R0 remainder
967 
968   __ LoadAndTestP_ExtendSrc(result_reg, r0);  // Copy remainder to resultreg
969 
970   // If we care about -0, test if the dividend is <0 and the result is 0.
971   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
972     __ bne(&done, Label::kNear);
973     __ Cmp32(left_reg, Operand::Zero());
974     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
975   }
976 
977   __ bind(&done);
978 }
979 
DoDivByPowerOf2I(LDivByPowerOf2I * instr)980 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
981   Register dividend = ToRegister(instr->dividend());
982   int32_t divisor = instr->divisor();
983   Register result = ToRegister(instr->result());
984   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
985   DCHECK(!result.is(dividend));
986 
987   // Check for (0 / -x) that will produce negative zero.
988   HDiv* hdiv = instr->hydrogen();
989   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
990     __ Cmp32(dividend, Operand::Zero());
991     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
992   }
993   // Check for (kMinInt / -1).
994   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
995     __ Cmp32(dividend, Operand(0x80000000));
996     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
997   }
998 
999   int32_t shift = WhichPowerOf2Abs(divisor);
1000 
1001   // Deoptimize if remainder will not be 0.
1002   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1003     __ TestBitRange(dividend, shift - 1, 0, r0);
1004     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
1005   }
1006 
1007   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
1008     __ LoadComplementRR(result, dividend);
1009     return;
1010   }
1011   if (shift == 0) {
1012     __ LoadRR(result, dividend);
1013   } else {
1014     if (shift == 1) {
1015       __ ShiftRight(result, dividend, Operand(31));
1016     } else {
1017       __ ShiftRightArith(result, dividend, Operand(31));
1018       __ ShiftRight(result, result, Operand(32 - shift));
1019     }
1020     __ AddP(result, dividend, result);
1021     __ ShiftRightArith(result, result, Operand(shift));
1022 #if V8_TARGET_ARCH_S390X
1023     __ lgfr(result, result);
1024 #endif
1025   }
1026   if (divisor < 0) __ LoadComplementRR(result, result);
1027 }
1028 
DoDivByConstI(LDivByConstI * instr)1029 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1030   Register dividend = ToRegister(instr->dividend());
1031   int32_t divisor = instr->divisor();
1032   Register result = ToRegister(instr->result());
1033   DCHECK(!dividend.is(result));
1034 
1035   if (divisor == 0) {
1036     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1037     return;
1038   }
1039 
1040   // Check for (0 / -x) that will produce negative zero.
1041   HDiv* hdiv = instr->hydrogen();
1042   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1043     __ Cmp32(dividend, Operand::Zero());
1044     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1045   }
1046 
1047   __ TruncatingDiv(result, dividend, Abs(divisor));
1048   if (divisor < 0) __ LoadComplementRR(result, result);
1049 
1050   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1051     Register scratch = scratch0();
1052     __ mov(ip, Operand(divisor));
1053     __ Mul(scratch, result, ip);
1054     __ Cmp32(scratch, dividend);
1055     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1056   }
1057 }
1058 
1059 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
DoDivI(LDivI * instr)1060 void LCodeGen::DoDivI(LDivI* instr) {
1061   HBinaryOperation* hdiv = instr->hydrogen();
1062   const Register dividend = ToRegister(instr->dividend());
1063   const Register divisor = ToRegister(instr->divisor());
1064   Register result = ToRegister(instr->result());
1065 
1066   DCHECK(!dividend.is(result));
1067   DCHECK(!divisor.is(result));
1068 
1069   // Check for x / 0.
1070   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1071     __ Cmp32(divisor, Operand::Zero());
1072     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1073   }
1074 
1075   // Check for (0 / -x) that will produce negative zero.
1076   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1077     Label dividend_not_zero;
1078     __ Cmp32(dividend, Operand::Zero());
1079     __ bne(&dividend_not_zero, Label::kNear);
1080     __ Cmp32(divisor, Operand::Zero());
1081     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1082     __ bind(&dividend_not_zero);
1083   }
1084 
1085   // Check for (kMinInt / -1).
1086   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1087     Label dividend_not_min_int;
1088     __ Cmp32(dividend, Operand(kMinInt));
1089     __ bne(&dividend_not_min_int, Label::kNear);
1090     __ Cmp32(divisor, Operand(-1));
1091     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1092     __ bind(&dividend_not_min_int);
1093   }
1094 
1095   __ LoadRR(r0, dividend);
1096   __ srda(r0, Operand(32));
1097   __ dr(r0, divisor);  // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
1098 
1099   __ LoadAndTestP_ExtendSrc(result, r1);  // Move quotient to result register
1100 
1101   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1102     // Deoptimize if remainder is not 0.
1103     __ Cmp32(r0, Operand::Zero());
1104     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1105   }
1106 }
1107 
DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I * instr)1108 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1109   HBinaryOperation* hdiv = instr->hydrogen();
1110   Register dividend = ToRegister(instr->dividend());
1111   Register result = ToRegister(instr->result());
1112   int32_t divisor = instr->divisor();
1113   bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1114 
1115   // If the divisor is positive, things are easy: There can be no deopts and we
1116   // can simply do an arithmetic right shift.
1117   int32_t shift = WhichPowerOf2Abs(divisor);
1118   if (divisor > 0) {
1119     if (shift || !result.is(dividend)) {
1120       __ ShiftRightArith(result, dividend, Operand(shift));
1121 #if V8_TARGET_ARCH_S390X
1122       __ lgfr(result, result);
1123 #endif
1124     }
1125     return;
1126   }
1127 
1128 // If the divisor is negative, we have to negate and handle edge cases.
1129 #if V8_TARGET_ARCH_S390X
1130   if (divisor == -1 && can_overflow) {
1131     __ Cmp32(dividend, Operand(0x80000000));
1132     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1133   }
1134 #endif
1135 
1136   __ LoadComplementRR(result, dividend);
1137   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1138     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1139   }
1140 
1141 // If the negation could not overflow, simply shifting is OK.
1142 #if !V8_TARGET_ARCH_S390X
1143   if (!can_overflow) {
1144 #endif
1145     if (shift) {
1146       __ ShiftRightArithP(result, result, Operand(shift));
1147     }
1148     return;
1149 #if !V8_TARGET_ARCH_S390X
1150   }
1151 
1152   // Dividing by -1 is basically negation, unless we overflow.
1153   if (divisor == -1) {
1154     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1155     return;
1156   }
1157 
1158   Label overflow_label, done;
1159   __ b(overflow, &overflow_label, Label::kNear);
1160   __ ShiftRightArith(result, result, Operand(shift));
1161 #if V8_TARGET_ARCH_S390X
1162   __ lgfr(result, result);
1163 #endif
1164   __ b(&done, Label::kNear);
1165   __ bind(&overflow_label);
1166   __ mov(result, Operand(kMinInt / divisor));
1167   __ bind(&done);
1168 #endif
1169 }
1170 
DoFlooringDivByConstI(LFlooringDivByConstI * instr)1171 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1172   Register dividend = ToRegister(instr->dividend());
1173   int32_t divisor = instr->divisor();
1174   Register result = ToRegister(instr->result());
1175   DCHECK(!dividend.is(result));
1176 
1177   if (divisor == 0) {
1178     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1179     return;
1180   }
1181 
1182   // Check for (0 / -x) that will produce negative zero.
1183   HMathFloorOfDiv* hdiv = instr->hydrogen();
1184   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1185     __ Cmp32(dividend, Operand::Zero());
1186     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1187   }
1188 
1189   // Easy case: We need no dynamic check for the dividend and the flooring
1190   // division is the same as the truncating division.
1191   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1192       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1193     __ TruncatingDiv(result, dividend, Abs(divisor));
1194     if (divisor < 0) __ LoadComplementRR(result, result);
1195     return;
1196   }
1197 
1198   // In the general case we may need to adjust before and after the truncating
1199   // division to get a flooring division.
1200   Register temp = ToRegister(instr->temp());
1201   DCHECK(!temp.is(dividend) && !temp.is(result));
1202   Label needs_adjustment, done;
1203   __ Cmp32(dividend, Operand::Zero());
1204   __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1205   __ TruncatingDiv(result, dividend, Abs(divisor));
1206   if (divisor < 0) __ LoadComplementRR(result, result);
1207   __ b(&done, Label::kNear);
1208   __ bind(&needs_adjustment);
1209   __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1210   __ TruncatingDiv(result, temp, Abs(divisor));
1211   if (divisor < 0) __ LoadComplementRR(result, result);
1212   __ SubP(result, result, Operand(1));
1213   __ bind(&done);
1214 }
1215 
1216 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
DoFlooringDivI(LFlooringDivI * instr)1217 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1218   HBinaryOperation* hdiv = instr->hydrogen();
1219   const Register dividend = ToRegister(instr->dividend());
1220   const Register divisor = ToRegister(instr->divisor());
1221   Register result = ToRegister(instr->result());
1222 
1223   DCHECK(!dividend.is(result));
1224   DCHECK(!divisor.is(result));
1225 
1226   // Check for x / 0.
1227   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1228     __ Cmp32(divisor, Operand::Zero());
1229     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1230   }
1231 
1232   // Check for (0 / -x) that will produce negative zero.
1233   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1234     Label dividend_not_zero;
1235     __ Cmp32(dividend, Operand::Zero());
1236     __ bne(&dividend_not_zero, Label::kNear);
1237     __ Cmp32(divisor, Operand::Zero());
1238     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1239     __ bind(&dividend_not_zero);
1240   }
1241 
1242   // Check for (kMinInt / -1).
1243   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
1244     Label no_overflow_possible;
1245     __ Cmp32(dividend, Operand(kMinInt));
1246     __ bne(&no_overflow_possible, Label::kNear);
1247     __ Cmp32(divisor, Operand(-1));
1248     if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1249       DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1250     } else {
1251       __ bne(&no_overflow_possible, Label::kNear);
1252       __ LoadRR(result, dividend);
1253     }
1254     __ bind(&no_overflow_possible);
1255   }
1256 
1257   __ LoadRR(r0, dividend);
1258   __ srda(r0, Operand(32));
1259   __ dr(r0, divisor);  // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
1260 
1261   __ lr(result, r1);  // Move quotient to result register
1262 
1263   Label done;
1264   Register scratch = scratch0();
1265   // If both operands have the same sign then we are done.
1266   __ Xor(scratch, dividend, divisor);
1267   __ ltr(scratch, scratch);  // use 32 bit version LoadAndTestRR even in 64 bit
1268   __ bge(&done, Label::kNear);
1269 
1270   // If there is no remainder then we are done.
1271   __ lr(scratch, result);
1272   __ msr(scratch, divisor);
1273   __ Cmp32(dividend, scratch);
1274   __ beq(&done, Label::kNear);
1275 
1276   // We performed a truncating division. Correct the result.
1277   __ Sub32(result, result, Operand(1));
1278   __ bind(&done);
1279 }
1280 
DoMultiplyAddD(LMultiplyAddD * instr)1281 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1282   DoubleRegister addend = ToDoubleRegister(instr->addend());
1283   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1284   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1285   DoubleRegister result = ToDoubleRegister(instr->result());
1286 
1287   // Unable to use madbr as the intermediate value is not rounded
1288   // to proper precision
1289   __ ldr(result, multiplier);
1290   __ mdbr(result, multiplicand);
1291   __ adbr(result, addend);
1292 }
1293 
DoMultiplySubD(LMultiplySubD * instr)1294 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1295   DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1296   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1297   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1298   DoubleRegister result = ToDoubleRegister(instr->result());
1299 
1300   // Unable to use msdbr as the intermediate value is not rounded
1301   // to proper precision
1302   __ ldr(result, multiplier);
1303   __ mdbr(result, multiplicand);
1304   __ sdbr(result, minuend);
1305 }
1306 
DoMulI(LMulI * instr)1307 void LCodeGen::DoMulI(LMulI* instr) {
1308   Register scratch = scratch0();
1309   Register result = ToRegister(instr->result());
1310   // Note that result may alias left.
1311   Register left = ToRegister(instr->left());
1312   LOperand* right_op = instr->right();
1313 
1314   bool bailout_on_minus_zero =
1315       instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1316   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1317 
1318   if (right_op->IsConstantOperand()) {
1319     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1320 
1321     if (bailout_on_minus_zero && (constant < 0)) {
1322       // The case of a null constant will be handled separately.
1323       // If constant is negative and left is null, the result should be -0.
1324       __ CmpP(left, Operand::Zero());
1325       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1326     }
1327 
1328     switch (constant) {
1329       case -1:
1330         if (can_overflow) {
1331 #if V8_TARGET_ARCH_S390X
1332           if (instr->hydrogen()->representation().IsSmi()) {
1333 #endif
1334             __ LoadComplementRR(result, left);
1335             DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1336 #if V8_TARGET_ARCH_S390X
1337           } else {
1338             __ LoadComplementRR(result, left);
1339             __ TestIfInt32(result, r0);
1340             DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1341           }
1342 #endif
1343         } else {
1344           __ LoadComplementRR(result, left);
1345         }
1346         break;
1347       case 0:
1348         if (bailout_on_minus_zero) {
1349 // If left is strictly negative and the constant is null, the
1350 // result is -0. Deoptimize if required, otherwise return 0.
1351 #if V8_TARGET_ARCH_S390X
1352           if (instr->hydrogen()->representation().IsSmi()) {
1353 #endif
1354             __ Cmp32(left, Operand::Zero());
1355 #if V8_TARGET_ARCH_S390X
1356           } else {
1357             __ Cmp32(left, Operand::Zero());
1358           }
1359 #endif
1360           DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1361         }
1362         __ LoadImmP(result, Operand::Zero());
1363         break;
1364       case 1:
1365         __ Move(result, left);
1366         break;
1367       default:
1368         // Multiplying by powers of two and powers of two plus or minus
1369         // one can be done faster with shifted operands.
1370         // For other constants we emit standard code.
1371         int32_t mask = constant >> 31;
1372         uint32_t constant_abs = (constant + mask) ^ mask;
1373 
1374         if (base::bits::IsPowerOfTwo32(constant_abs)) {
1375           int32_t shift = WhichPowerOf2(constant_abs);
1376           __ ShiftLeftP(result, left, Operand(shift));
1377           // Correct the sign of the result if the constant is negative.
1378           if (constant < 0) __ LoadComplementRR(result, result);
1379         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1380           int32_t shift = WhichPowerOf2(constant_abs - 1);
1381           __ ShiftLeftP(scratch, left, Operand(shift));
1382           __ AddP(result, scratch, left);
1383           // Correct the sign of the result if the constant is negative.
1384           if (constant < 0) __ LoadComplementRR(result, result);
1385         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1386           int32_t shift = WhichPowerOf2(constant_abs + 1);
1387           __ ShiftLeftP(scratch, left, Operand(shift));
1388           __ SubP(result, scratch, left);
1389           // Correct the sign of the result if the constant is negative.
1390           if (constant < 0) __ LoadComplementRR(result, result);
1391         } else {
1392           // Generate standard code.
1393           __ Move(result, left);
1394           __ MulP(result, Operand(constant));
1395         }
1396     }
1397 
1398   } else {
1399     DCHECK(right_op->IsRegister());
1400     Register right = ToRegister(right_op);
1401 
1402     if (can_overflow) {
1403 #if V8_TARGET_ARCH_S390X
1404       // result = left * right.
1405       if (instr->hydrogen()->representation().IsSmi()) {
1406         __ SmiUntag(result, left);
1407         __ SmiUntag(scratch, right);
1408         __ msgr(result, scratch);
1409       } else {
1410         __ LoadRR(result, left);
1411         __ msgr(result, right);
1412       }
1413       __ TestIfInt32(result, r0);
1414       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1415       if (instr->hydrogen()->representation().IsSmi()) {
1416         __ SmiTag(result);
1417       }
1418 #else
1419       // r0:scratch = scratch * right
1420       if (instr->hydrogen()->representation().IsSmi()) {
1421         __ SmiUntag(scratch, left);
1422         __ mr_z(r0, right);
1423         __ LoadRR(result, scratch);
1424       } else {
1425         // r0:scratch = scratch * right
1426         __ LoadRR(scratch, left);
1427         __ mr_z(r0, right);
1428         __ LoadRR(result, scratch);
1429       }
1430       __ TestIfInt32(r0, result, scratch);
1431       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1432 #endif
1433     } else {
1434       if (instr->hydrogen()->representation().IsSmi()) {
1435         __ SmiUntag(result, left);
1436         __ Mul(result, result, right);
1437       } else {
1438         __ Mul(result, left, right);
1439       }
1440     }
1441 
1442     if (bailout_on_minus_zero) {
1443       Label done;
1444 #if V8_TARGET_ARCH_S390X
1445       if (instr->hydrogen()->representation().IsSmi()) {
1446 #endif
1447         __ XorP(r0, left, right);
1448         __ LoadAndTestRR(r0, r0);
1449         __ bge(&done, Label::kNear);
1450 #if V8_TARGET_ARCH_S390X
1451       } else {
1452         __ XorP(r0, left, right);
1453         __ Cmp32(r0, Operand::Zero());
1454         __ bge(&done, Label::kNear);
1455       }
1456 #endif
1457       // Bail out if the result is minus zero.
1458       __ CmpP(result, Operand::Zero());
1459       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1460       __ bind(&done);
1461     }
1462   }
1463 }
1464 
DoBitI(LBitI * instr)1465 void LCodeGen::DoBitI(LBitI* instr) {
1466   LOperand* left_op = instr->left();
1467   LOperand* right_op = instr->right();
1468   DCHECK(left_op->IsRegister());
1469   Register left = ToRegister(left_op);
1470   Register result = ToRegister(instr->result());
1471 
1472   if (right_op->IsConstantOperand()) {
1473     switch (instr->op()) {
1474       case Token::BIT_AND:
1475         __ AndP(result, left, Operand(ToOperand(right_op)));
1476         break;
1477       case Token::BIT_OR:
1478         __ OrP(result, left, Operand(ToOperand(right_op)));
1479         break;
1480       case Token::BIT_XOR:
1481         __ XorP(result, left, Operand(ToOperand(right_op)));
1482         break;
1483       default:
1484         UNREACHABLE();
1485         break;
1486     }
1487   } else if (right_op->IsStackSlot()) {
1488     // Reg-Mem instruction clobbers, so copy src to dst first.
1489     if (!left.is(result)) __ LoadRR(result, left);
1490     switch (instr->op()) {
1491       case Token::BIT_AND:
1492         __ AndP(result, ToMemOperand(right_op));
1493         break;
1494       case Token::BIT_OR:
1495         __ OrP(result, ToMemOperand(right_op));
1496         break;
1497       case Token::BIT_XOR:
1498         __ XorP(result, ToMemOperand(right_op));
1499         break;
1500       default:
1501         UNREACHABLE();
1502         break;
1503     }
1504   } else {
1505     DCHECK(right_op->IsRegister());
1506 
1507     switch (instr->op()) {
1508       case Token::BIT_AND:
1509         __ AndP(result, left, ToRegister(right_op));
1510         break;
1511       case Token::BIT_OR:
1512         __ OrP(result, left, ToRegister(right_op));
1513         break;
1514       case Token::BIT_XOR:
1515         __ XorP(result, left, ToRegister(right_op));
1516         break;
1517       default:
1518         UNREACHABLE();
1519         break;
1520     }
1521   }
1522 }
1523 
DoShiftI(LShiftI * instr)1524 void LCodeGen::DoShiftI(LShiftI* instr) {
1525   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1526   // result may alias either of them.
1527   LOperand* right_op = instr->right();
1528   Register left = ToRegister(instr->left());
1529   Register result = ToRegister(instr->result());
1530   Register scratch = scratch0();
1531   if (right_op->IsRegister()) {
1532     // Mask the right_op operand.
1533     __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
1534     switch (instr->op()) {
1535       case Token::ROR:
1536         // rotate_right(a, b) == rotate_left(a, 32 - b)
1537         __ LoadComplementRR(scratch, scratch);
1538         __ rll(result, left, scratch, Operand(32));
1539 #if V8_TARGET_ARCH_S390X
1540         __ lgfr(result, result);
1541 #endif
1542         break;
1543       case Token::SAR:
1544         __ ShiftRightArith(result, left, scratch);
1545 #if V8_TARGET_ARCH_S390X
1546         __ lgfr(result, result);
1547 #endif
1548         break;
1549       case Token::SHR:
1550         __ ShiftRight(result, left, scratch);
1551 #if V8_TARGET_ARCH_S390X
1552         __ lgfr(result, result);
1553 #endif
1554         if (instr->can_deopt()) {
1555 #if V8_TARGET_ARCH_S390X
1556           __ ltgfr(result, result /*, SetRC*/);
1557 #else
1558           __ ltr(result, result);  // Set the <,==,> condition
1559 #endif
1560           DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
1561         }
1562         break;
1563       case Token::SHL:
1564         __ ShiftLeft(result, left, scratch);
1565 #if V8_TARGET_ARCH_S390X
1566         __ lgfr(result, result);
1567 #endif
1568         break;
1569       default:
1570         UNREACHABLE();
1571         break;
1572     }
1573   } else {
1574     // Mask the right_op operand.
1575     int value = ToInteger32(LConstantOperand::cast(right_op));
1576     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1577     switch (instr->op()) {
1578       case Token::ROR:
1579         if (shift_count != 0) {
1580           __ rll(result, left, Operand(32 - shift_count));
1581 #if V8_TARGET_ARCH_S390X
1582           __ lgfr(result, result);
1583 #endif
1584         } else {
1585           __ Move(result, left);
1586         }
1587         break;
1588       case Token::SAR:
1589         if (shift_count != 0) {
1590           __ ShiftRightArith(result, left, Operand(shift_count));
1591 #if V8_TARGET_ARCH_S390X
1592           __ lgfr(result, result);
1593 #endif
1594         } else {
1595           __ Move(result, left);
1596         }
1597         break;
1598       case Token::SHR:
1599         if (shift_count != 0) {
1600           __ ShiftRight(result, left, Operand(shift_count));
1601 #if V8_TARGET_ARCH_S390X
1602           __ lgfr(result, result);
1603 #endif
1604         } else {
1605           if (instr->can_deopt()) {
1606             __ Cmp32(left, Operand::Zero());
1607             DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
1608           }
1609           __ Move(result, left);
1610         }
1611         break;
1612       case Token::SHL:
1613         if (shift_count != 0) {
1614 #if V8_TARGET_ARCH_S390X
1615           if (instr->hydrogen_value()->representation().IsSmi()) {
1616             __ ShiftLeftP(result, left, Operand(shift_count));
1617 #else
1618           if (instr->hydrogen_value()->representation().IsSmi() &&
1619               instr->can_deopt()) {
1620             if (shift_count != 1) {
1621               __ ShiftLeft(result, left, Operand(shift_count - 1));
1622 #if V8_TARGET_ARCH_S390X
1623               __ lgfr(result, result);
1624 #endif
1625               __ SmiTagCheckOverflow(result, result, scratch);
1626             } else {
1627               __ SmiTagCheckOverflow(result, left, scratch);
1628             }
1629             DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1630 #endif
1631           } else {
1632             __ ShiftLeft(result, left, Operand(shift_count));
1633 #if V8_TARGET_ARCH_S390X
1634             __ lgfr(result, result);
1635 #endif
1636           }
1637         } else {
1638           __ Move(result, left);
1639         }
1640         break;
1641       default:
1642         UNREACHABLE();
1643         break;
1644     }
1645   }
1646 }
1647 
1648 void LCodeGen::DoSubI(LSubI* instr) {
1649   LOperand* left = instr->left();
1650   LOperand* right = instr->right();
1651   LOperand* result = instr->result();
1652 
1653   bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
1654                      instr->hydrogen()->representation().IsExternal());
1655 
1656 #if V8_TARGET_ARCH_S390X
1657   // The overflow detection needs to be tested on the lower 32-bits.
1658   // As a result, on 64-bit, we need to force 32-bit arithmetic operations
1659   // to set the CC overflow bit properly.  The result is then sign-extended.
1660   bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1661 #else
1662   bool checkOverflow = true;
1663 #endif
1664 
1665   if (right->IsConstantOperand()) {
1666     if (!isInteger || !checkOverflow)
1667       __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
1668     else
1669       __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
1670   } else if (right->IsRegister()) {
1671     if (!isInteger)
1672       __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
1673     else if (!checkOverflow)
1674       __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
1675                         ToRegister(right));
1676     else
1677       __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
1678   } else {
1679     if (!left->Equals(instr->result()))
1680       __ LoadRR(ToRegister(result), ToRegister(left));
1681 
1682     MemOperand mem = ToMemOperand(right);
1683     if (!isInteger) {
1684       __ SubP(ToRegister(result), mem);
1685     } else {
1686 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
1687       // We want to read the 32-bits directly from memory
1688       MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
1689 #else
1690       MemOperand Upper32Mem = ToMemOperand(right);
1691 #endif
1692       if (checkOverflow) {
1693         __ Sub32(ToRegister(result), Upper32Mem);
1694       } else {
1695         __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
1696       }
1697     }
1698   }
1699 
1700 #if V8_TARGET_ARCH_S390X
1701   if (isInteger && checkOverflow)
1702     __ lgfr(ToRegister(result), ToRegister(result));
1703 #endif
1704   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1705     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1706   }
1707 }
1708 
1709 void LCodeGen::DoRSubI(LRSubI* instr) {
1710   LOperand* left = instr->left();
1711   LOperand* right = instr->right();
1712   LOperand* result = instr->result();
1713 
1714   DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1715          right->IsConstantOperand());
1716 
1717 #if V8_TARGET_ARCH_S390X
1718   // The overflow detection needs to be tested on the lower 32-bits.
1719   // As a result, on 64-bit, we need to force 32-bit arithmetic operations
1720   // to set the CC overflow bit properly.  The result is then sign-extended.
1721   bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1722 #else
1723   bool checkOverflow = true;
1724 #endif
1725 
1726   Operand right_operand = ToOperand(right);
1727   __ mov(r0, right_operand);
1728 
1729   if (!checkOverflow) {
1730     __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
1731   } else {
1732     __ Sub32(ToRegister(result), r0, ToRegister(left));
1733   }
1734 }
1735 
1736 void LCodeGen::DoConstantI(LConstantI* instr) {
1737   __ mov(ToRegister(instr->result()), Operand(instr->value()));
1738 }
1739 
1740 void LCodeGen::DoConstantS(LConstantS* instr) {
1741   __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1742 }
1743 
1744 void LCodeGen::DoConstantD(LConstantD* instr) {
1745   DCHECK(instr->result()->IsDoubleRegister());
1746   DoubleRegister result = ToDoubleRegister(instr->result());
1747   uint64_t bits = instr->bits();
1748   __ LoadDoubleLiteral(result, bits, scratch0());
1749 }
1750 
1751 void LCodeGen::DoConstantE(LConstantE* instr) {
1752   __ mov(ToRegister(instr->result()), Operand(instr->value()));
1753 }
1754 
1755 void LCodeGen::DoConstantT(LConstantT* instr) {
1756   Handle<Object> object = instr->value(isolate());
1757   AllowDeferredHandleDereference smi_check;
1758   __ Move(ToRegister(instr->result()), object);
1759 }
1760 
1761 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1762                                            String::Encoding encoding) {
1763   if (index->IsConstantOperand()) {
1764     int offset = ToInteger32(LConstantOperand::cast(index));
1765     if (encoding == String::TWO_BYTE_ENCODING) {
1766       offset *= kUC16Size;
1767     }
1768     STATIC_ASSERT(kCharSize == 1);
1769     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1770   }
1771   Register scratch = scratch0();
1772   DCHECK(!scratch.is(string));
1773   DCHECK(!scratch.is(ToRegister(index)));
1774   // TODO(joransiu) : Fold Add into FieldMemOperand
1775   if (encoding == String::ONE_BYTE_ENCODING) {
1776     __ AddP(scratch, string, ToRegister(index));
1777   } else {
1778     STATIC_ASSERT(kUC16Size == 2);
1779     __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
1780     __ AddP(scratch, string, scratch);
1781   }
1782   return FieldMemOperand(scratch, SeqString::kHeaderSize);
1783 }
1784 
1785 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1786   String::Encoding encoding = instr->hydrogen()->encoding();
1787   Register string = ToRegister(instr->string());
1788   Register result = ToRegister(instr->result());
1789 
1790   if (FLAG_debug_code) {
1791     Register scratch = scratch0();
1792     __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1793     __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1794 
1795     __ AndP(scratch, scratch,
1796             Operand(kStringRepresentationMask | kStringEncodingMask));
1797     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1798     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1799     __ CmpP(scratch,
1800             Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1801                                                           : two_byte_seq_type));
1802     __ Check(eq, kUnexpectedStringType);
1803   }
1804 
1805   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1806   if (encoding == String::ONE_BYTE_ENCODING) {
1807     __ llc(result, operand);
1808   } else {
1809     __ llh(result, operand);
1810   }
1811 }
1812 
1813 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1814   String::Encoding encoding = instr->hydrogen()->encoding();
1815   Register string = ToRegister(instr->string());
1816   Register value = ToRegister(instr->value());
1817 
1818   if (FLAG_debug_code) {
1819     Register index = ToRegister(instr->index());
1820     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1821     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1822     int encoding_mask =
1823         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1824             ? one_byte_seq_type
1825             : two_byte_seq_type;
1826     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1827   }
1828 
1829   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1830   if (encoding == String::ONE_BYTE_ENCODING) {
1831     __ stc(value, operand);
1832   } else {
1833     __ sth(value, operand);
1834   }
1835 }
1836 
1837 void LCodeGen::DoAddI(LAddI* instr) {
1838   LOperand* left = instr->left();
1839   LOperand* right = instr->right();
1840   LOperand* result = instr->result();
1841   bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
1842                      instr->hydrogen()->representation().IsExternal());
1843 #if V8_TARGET_ARCH_S390X
1844   // The overflow detection needs to be tested on the lower 32-bits.
1845   // As a result, on 64-bit, we need to force 32-bit arithmetic operations
1846   // to set the CC overflow bit properly.  The result is then sign-extended.
1847   bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1848 #else
1849   bool checkOverflow = true;
1850 #endif
1851 
1852   if (right->IsConstantOperand()) {
1853     if (!isInteger || !checkOverflow)
1854       __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
1855     else
1856       __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
1857   } else if (right->IsRegister()) {
1858     if (!isInteger)
1859       __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
1860     else if (!checkOverflow)
1861       __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
1862                         ToRegister(right));
1863     else
1864       __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
1865   } else {
1866     if (!left->Equals(instr->result()))
1867       __ LoadRR(ToRegister(result), ToRegister(left));
1868 
1869     MemOperand mem = ToMemOperand(right);
1870     if (!isInteger) {
1871       __ AddP(ToRegister(result), mem);
1872     } else {
1873 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
1874       // We want to read the 32-bits directly from memory
1875       MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
1876 #else
1877       MemOperand Upper32Mem = ToMemOperand(right);
1878 #endif
1879       if (checkOverflow) {
1880         __ Add32(ToRegister(result), Upper32Mem);
1881       } else {
1882         __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
1883       }
1884     }
1885   }
1886 
1887 #if V8_TARGET_ARCH_S390X
1888   if (isInteger && checkOverflow)
1889     __ lgfr(ToRegister(result), ToRegister(result));
1890 #endif
1891   // Doptimize on overflow
1892   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1893     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
1894   }
1895 }
1896 
1897 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1898   LOperand* left = instr->left();
1899   LOperand* right = instr->right();
1900   HMathMinMax::Operation operation = instr->hydrogen()->operation();
1901   Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
1902   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1903     Register left_reg = ToRegister(left);
1904     Register right_reg = EmitLoadRegister(right, ip);
1905     Register result_reg = ToRegister(instr->result());
1906     Label return_left, done;
1907 #if V8_TARGET_ARCH_S390X
1908     if (instr->hydrogen_value()->representation().IsSmi()) {
1909 #endif
1910       __ CmpP(left_reg, right_reg);
1911 #if V8_TARGET_ARCH_S390X
1912     } else {
1913       __ Cmp32(left_reg, right_reg);
1914     }
1915 #endif
1916     __ b(cond, &return_left, Label::kNear);
1917     __ Move(result_reg, right_reg);
1918     __ b(&done, Label::kNear);
1919     __ bind(&return_left);
1920     __ Move(result_reg, left_reg);
1921     __ bind(&done);
1922   } else {
1923     DCHECK(instr->hydrogen()->representation().IsDouble());
1924     DoubleRegister left_reg = ToDoubleRegister(left);
1925     DoubleRegister right_reg = ToDoubleRegister(right);
1926     DoubleRegister result_reg = ToDoubleRegister(instr->result());
1927     Label check_nan_left, check_zero, return_left, return_right, done;
1928     __ cdbr(left_reg, right_reg);
1929     __ bunordered(&check_nan_left, Label::kNear);
1930     __ beq(&check_zero);
1931     __ b(cond, &return_left, Label::kNear);
1932     __ b(&return_right, Label::kNear);
1933 
1934     __ bind(&check_zero);
1935     __ lzdr(kDoubleRegZero);
1936     __ cdbr(left_reg, kDoubleRegZero);
1937     __ bne(&return_left, Label::kNear);  // left == right != 0.
1938 
1939     // At this point, both left and right are either 0 or -0.
1940     // N.B. The following works because +0 + -0 == +0
1941     if (operation == HMathMinMax::kMathMin) {
1942       // For min we want logical-or of sign bit: -(-L + -R)
1943       __ lcdbr(left_reg, left_reg);
1944       __ ldr(result_reg, left_reg);
1945       if (left_reg.is(right_reg)) {
1946         __ adbr(result_reg, right_reg);
1947       } else {
1948         __ sdbr(result_reg, right_reg);
1949       }
1950       __ lcdbr(result_reg, result_reg);
1951     } else {
1952       // For max we want logical-and of sign bit: (L + R)
1953       __ ldr(result_reg, left_reg);
1954       __ adbr(result_reg, right_reg);
1955     }
1956     __ b(&done, Label::kNear);
1957 
1958     __ bind(&check_nan_left);
1959     __ cdbr(left_reg, left_reg);
1960     __ bunordered(&return_left, Label::kNear);  // left == NaN.
1961 
1962     __ bind(&return_right);
1963     if (!right_reg.is(result_reg)) {
1964       __ ldr(result_reg, right_reg);
1965     }
1966     __ b(&done, Label::kNear);
1967 
1968     __ bind(&return_left);
1969     if (!left_reg.is(result_reg)) {
1970       __ ldr(result_reg, left_reg);
1971     }
1972     __ bind(&done);
1973   }
1974 }
1975 
1976 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1977   DoubleRegister left = ToDoubleRegister(instr->left());
1978   DoubleRegister right = ToDoubleRegister(instr->right());
1979   DoubleRegister result = ToDoubleRegister(instr->result());
1980   // All operations except MOD are computed in-place.
1981   DCHECK(instr->op() == Token::MOD || left.is(result));
1982   switch (instr->op()) {
1983     case Token::ADD:
1984       __ adbr(result, right);
1985       break;
1986     case Token::SUB:
1987       __ sdbr(result, right);
1988       break;
1989     case Token::MUL:
1990       __ mdbr(result, right);
1991       break;
1992     case Token::DIV:
1993       __ ddbr(result, right);
1994       break;
1995     case Token::MOD: {
1996       __ PrepareCallCFunction(0, 2, scratch0());
1997       __ MovToFloatParameters(left, right);
1998       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1999                        0, 2);
2000       // Move the result in the double result register.
2001       __ MovFromFloatResult(result);
2002       break;
2003     }
2004     default:
2005       UNREACHABLE();
2006       break;
2007   }
2008 }
2009 
2010 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2011   DCHECK(ToRegister(instr->context()).is(cp));
2012   DCHECK(ToRegister(instr->left()).is(r3));
2013   DCHECK(ToRegister(instr->right()).is(r2));
2014   DCHECK(ToRegister(instr->result()).is(r2));
2015 
2016   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2017   CallCode(code, RelocInfo::CODE_TARGET, instr);
2018 }
2019 
2020 template <class InstrType>
2021 void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
2022   int left_block = instr->TrueDestination(chunk_);
2023   int right_block = instr->FalseDestination(chunk_);
2024 
2025   int next_block = GetNextEmittedBlock();
2026 
2027   if (right_block == left_block || cond == al) {
2028     EmitGoto(left_block);
2029   } else if (left_block == next_block) {
2030     __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
2031   } else if (right_block == next_block) {
2032     __ b(cond, chunk_->GetAssemblyLabel(left_block));
2033   } else {
2034     __ b(cond, chunk_->GetAssemblyLabel(left_block));
2035     __ b(chunk_->GetAssemblyLabel(right_block));
2036   }
2037 }
2038 
2039 template <class InstrType>
2040 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
2041   int true_block = instr->TrueDestination(chunk_);
2042   __ b(cond, chunk_->GetAssemblyLabel(true_block));
2043 }
2044 
2045 template <class InstrType>
2046 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
2047   int false_block = instr->FalseDestination(chunk_);
2048   __ b(cond, chunk_->GetAssemblyLabel(false_block));
2049 }
2050 
2051 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2052 
2053 void LCodeGen::DoBranch(LBranch* instr) {
2054   Representation r = instr->hydrogen()->value()->representation();
2055   DoubleRegister dbl_scratch = double_scratch0();
2056 
2057   if (r.IsInteger32()) {
2058     DCHECK(!info()->IsStub());
2059     Register reg = ToRegister(instr->value());
2060     __ Cmp32(reg, Operand::Zero());
2061     EmitBranch(instr, ne);
2062   } else if (r.IsSmi()) {
2063     DCHECK(!info()->IsStub());
2064     Register reg = ToRegister(instr->value());
2065     __ CmpP(reg, Operand::Zero());
2066     EmitBranch(instr, ne);
2067   } else if (r.IsDouble()) {
2068     DCHECK(!info()->IsStub());
2069     DoubleRegister reg = ToDoubleRegister(instr->value());
2070     __ lzdr(kDoubleRegZero);
2071     __ cdbr(reg, kDoubleRegZero);
2072     // Test the double value. Zero and NaN are false.
2073     Condition lt_gt = static_cast<Condition>(lt | gt);
2074 
2075     EmitBranch(instr, lt_gt);
2076   } else {
2077     DCHECK(r.IsTagged());
2078     Register reg = ToRegister(instr->value());
2079     HType type = instr->hydrogen()->value()->type();
2080     if (type.IsBoolean()) {
2081       DCHECK(!info()->IsStub());
2082       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2083       EmitBranch(instr, eq);
2084     } else if (type.IsSmi()) {
2085       DCHECK(!info()->IsStub());
2086       __ CmpP(reg, Operand::Zero());
2087       EmitBranch(instr, ne);
2088     } else if (type.IsJSArray()) {
2089       DCHECK(!info()->IsStub());
2090       EmitBranch(instr, al);
2091     } else if (type.IsHeapNumber()) {
2092       DCHECK(!info()->IsStub());
2093       __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2094       // Test the double value. Zero and NaN are false.
2095       __ lzdr(kDoubleRegZero);
2096       __ cdbr(dbl_scratch, kDoubleRegZero);
2097       Condition lt_gt = static_cast<Condition>(lt | gt);
2098       EmitBranch(instr, lt_gt);
2099     } else if (type.IsString()) {
2100       DCHECK(!info()->IsStub());
2101       __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2102       __ CmpP(ip, Operand::Zero());
2103       EmitBranch(instr, ne);
2104     } else {
2105       ToBooleanICStub::Types expected =
2106           instr->hydrogen()->expected_input_types();
2107       // Avoid deopts in the case where we've never executed this path before.
2108       if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
2109 
2110       if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
2111         // undefined -> false.
2112         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2113         __ beq(instr->FalseLabel(chunk_));
2114       }
2115       if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
2116         // Boolean -> its value.
2117         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2118         __ beq(instr->TrueLabel(chunk_));
2119         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2120         __ beq(instr->FalseLabel(chunk_));
2121       }
2122       if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
2123         // 'null' -> false.
2124         __ CompareRoot(reg, Heap::kNullValueRootIndex);
2125         __ beq(instr->FalseLabel(chunk_));
2126       }
2127 
2128       if (expected.Contains(ToBooleanICStub::SMI)) {
2129         // Smis: 0 -> false, all other -> true.
2130         __ CmpP(reg, Operand::Zero());
2131         __ beq(instr->FalseLabel(chunk_));
2132         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2133       } else if (expected.NeedsMap()) {
2134         // If we need a map later and have a Smi -> deopt.
2135         __ TestIfSmi(reg);
2136         DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
2137       }
2138 
2139       const Register map = scratch0();
2140       if (expected.NeedsMap()) {
2141         __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2142 
2143         if (expected.CanBeUndetectable()) {
2144           // Undetectable -> false.
2145           __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
2146                 Operand(1 << Map::kIsUndetectable));
2147           __ bne(instr->FalseLabel(chunk_));
2148         }
2149       }
2150 
2151       if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
2152         // spec object -> true.
2153         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
2154         __ bge(instr->TrueLabel(chunk_));
2155       }
2156 
2157       if (expected.Contains(ToBooleanICStub::STRING)) {
2158         // String value -> false iff empty.
2159         Label not_string;
2160         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2161         __ bge(&not_string, Label::kNear);
2162         __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2163         __ CmpP(ip, Operand::Zero());
2164         __ bne(instr->TrueLabel(chunk_));
2165         __ b(instr->FalseLabel(chunk_));
2166         __ bind(&not_string);
2167       }
2168 
2169       if (expected.Contains(ToBooleanICStub::SYMBOL)) {
2170         // Symbol value -> true.
2171         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2172         __ beq(instr->TrueLabel(chunk_));
2173       }
2174 
2175       if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
2176         // SIMD value -> true.
2177         Label not_simd;
2178         __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
2179         __ beq(instr->TrueLabel(chunk_));
2180       }
2181 
2182       if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
2183         // heap number -> false iff +0, -0, or NaN.
2184         Label not_heap_number;
2185         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2186         __ bne(&not_heap_number, Label::kNear);
2187         __ LoadDouble(dbl_scratch,
2188                       FieldMemOperand(reg, HeapNumber::kValueOffset));
2189         __ lzdr(kDoubleRegZero);
2190         __ cdbr(dbl_scratch, kDoubleRegZero);
2191         __ bunordered(instr->FalseLabel(chunk_));  // NaN -> false.
2192         __ beq(instr->FalseLabel(chunk_));         // +0, -0 -> false.
2193         __ b(instr->TrueLabel(chunk_));
2194         __ bind(&not_heap_number);
2195       }
2196 
2197       if (!expected.IsGeneric()) {
2198         // We've seen something for the first time -> deopt.
2199         // This can only happen if we are not generic already.
2200         DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2201       }
2202     }
2203   }
2204 }
2205 
2206 void LCodeGen::EmitGoto(int block) {
2207   if (!IsNextEmittedBlock(block)) {
2208     __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2209   }
2210 }
2211 
2212 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2213 
2214 Condition LCodeGen::TokenToCondition(Token::Value op) {
2215   Condition cond = kNoCondition;
2216   switch (op) {
2217     case Token::EQ:
2218     case Token::EQ_STRICT:
2219       cond = eq;
2220       break;
2221     case Token::NE:
2222     case Token::NE_STRICT:
2223       cond = ne;
2224       break;
2225     case Token::LT:
2226       cond = lt;
2227       break;
2228     case Token::GT:
2229       cond = gt;
2230       break;
2231     case Token::LTE:
2232       cond = le;
2233       break;
2234     case Token::GTE:
2235       cond = ge;
2236       break;
2237     case Token::IN:
2238     case Token::INSTANCEOF:
2239     default:
2240       UNREACHABLE();
2241   }
2242   return cond;
2243 }
2244 
2245 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2246   LOperand* left = instr->left();
2247   LOperand* right = instr->right();
2248   bool is_unsigned =
2249       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2250       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2251   Condition cond = TokenToCondition(instr->op());
2252 
2253   if (left->IsConstantOperand() && right->IsConstantOperand()) {
2254     // We can statically evaluate the comparison.
2255     double left_val = ToDouble(LConstantOperand::cast(left));
2256     double right_val = ToDouble(LConstantOperand::cast(right));
2257     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
2258                          ? instr->TrueDestination(chunk_)
2259                          : instr->FalseDestination(chunk_);
2260     EmitGoto(next_block);
2261   } else {
2262     if (instr->is_double()) {
2263       // Compare left and right operands as doubles and load the
2264       // resulting flags into the normal status register.
2265       __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
2266       // If a NaN is involved, i.e. the result is unordered,
2267       // jump to false block label.
2268       __ bunordered(instr->FalseLabel(chunk_));
2269     } else {
2270       if (right->IsConstantOperand()) {
2271         int32_t value = ToInteger32(LConstantOperand::cast(right));
2272         if (instr->hydrogen_value()->representation().IsSmi()) {
2273           if (is_unsigned) {
2274             __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2275           } else {
2276             __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2277           }
2278         } else {
2279           if (is_unsigned) {
2280             __ CmpLogical32(ToRegister(left), ToOperand(right));
2281           } else {
2282             __ Cmp32(ToRegister(left), ToOperand(right));
2283           }
2284         }
2285       } else if (left->IsConstantOperand()) {
2286         int32_t value = ToInteger32(LConstantOperand::cast(left));
2287         if (instr->hydrogen_value()->representation().IsSmi()) {
2288           if (is_unsigned) {
2289             __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2290           } else {
2291             __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2292           }
2293         } else {
2294           if (is_unsigned) {
2295             __ CmpLogical32(ToRegister(right), ToOperand(left));
2296           } else {
2297             __ Cmp32(ToRegister(right), ToOperand(left));
2298           }
2299         }
2300         // We commuted the operands, so commute the condition.
2301         cond = CommuteCondition(cond);
2302       } else if (instr->hydrogen_value()->representation().IsSmi()) {
2303         if (is_unsigned) {
2304           __ CmpLogicalP(ToRegister(left), ToRegister(right));
2305         } else {
2306           __ CmpP(ToRegister(left), ToRegister(right));
2307         }
2308       } else {
2309         if (is_unsigned) {
2310           __ CmpLogical32(ToRegister(left), ToRegister(right));
2311         } else {
2312           __ Cmp32(ToRegister(left), ToRegister(right));
2313         }
2314       }
2315     }
2316     EmitBranch(instr, cond);
2317   }
2318 }
2319 
2320 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2321   Register left = ToRegister(instr->left());
2322   Register right = ToRegister(instr->right());
2323 
2324   __ CmpP(left, right);
2325   EmitBranch(instr, eq);
2326 }
2327 
2328 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2329   if (instr->hydrogen()->representation().IsTagged()) {
2330     Register input_reg = ToRegister(instr->object());
2331     __ CmpP(input_reg, Operand(factory()->the_hole_value()));
2332     EmitBranch(instr, eq);
2333     return;
2334   }
2335 
2336   DoubleRegister input_reg = ToDoubleRegister(instr->object());
2337   __ cdbr(input_reg, input_reg);
2338   EmitFalseBranch(instr, ordered);
2339 
2340   Register scratch = scratch0();
2341   // Convert to GPR and examine the upper 32 bits
2342   __ lgdr(scratch, input_reg);
2343   __ srlg(scratch, scratch, Operand(32));
2344   __ Cmp32(scratch, Operand(kHoleNanUpper32));
2345   EmitBranch(instr, eq);
2346 }
2347 
2348 Condition LCodeGen::EmitIsString(Register input, Register temp1,
2349                                  Label* is_not_string,
2350                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
2351   if (check_needed == INLINE_SMI_CHECK) {
2352     __ JumpIfSmi(input, is_not_string);
2353   }
2354   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2355 
2356   return lt;
2357 }
2358 
2359 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2360   Register reg = ToRegister(instr->value());
2361   Register temp1 = ToRegister(instr->temp());
2362 
2363   SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2364                               ? OMIT_SMI_CHECK
2365                               : INLINE_SMI_CHECK;
2366   Condition true_cond =
2367       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2368 
2369   EmitBranch(instr, true_cond);
2370 }
2371 
2372 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2373   Register input_reg = EmitLoadRegister(instr->value(), ip);
2374   __ TestIfSmi(input_reg);
2375   EmitBranch(instr, eq);
2376 }
2377 
2378 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2379   Register input = ToRegister(instr->value());
2380   Register temp = ToRegister(instr->temp());
2381 
2382   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2383     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2384   }
2385   __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2386   __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
2387         Operand(1 << Map::kIsUndetectable));
2388   EmitBranch(instr, ne);
2389 }
2390 
2391 static Condition ComputeCompareCondition(Token::Value op) {
2392   switch (op) {
2393     case Token::EQ_STRICT:
2394     case Token::EQ:
2395       return eq;
2396     case Token::LT:
2397       return lt;
2398     case Token::GT:
2399       return gt;
2400     case Token::LTE:
2401       return le;
2402     case Token::GTE:
2403       return ge;
2404     default:
2405       UNREACHABLE();
2406       return kNoCondition;
2407   }
2408 }
2409 
2410 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2411   DCHECK(ToRegister(instr->context()).is(cp));
2412   DCHECK(ToRegister(instr->left()).is(r3));
2413   DCHECK(ToRegister(instr->right()).is(r2));
2414 
2415   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
2416   CallCode(code, RelocInfo::CODE_TARGET, instr);
2417   __ CompareRoot(r2, Heap::kTrueValueRootIndex);
2418   EmitBranch(instr, eq);
2419 }
2420 
2421 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2422   InstanceType from = instr->from();
2423   InstanceType to = instr->to();
2424   if (from == FIRST_TYPE) return to;
2425   DCHECK(from == to || to == LAST_TYPE);
2426   return from;
2427 }
2428 
2429 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2430   InstanceType from = instr->from();
2431   InstanceType to = instr->to();
2432   if (from == to) return eq;
2433   if (to == LAST_TYPE) return ge;
2434   if (from == FIRST_TYPE) return le;
2435   UNREACHABLE();
2436   return eq;
2437 }
2438 
2439 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2440   Register scratch = scratch0();
2441   Register input = ToRegister(instr->value());
2442 
2443   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2444     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2445   }
2446 
2447   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2448   EmitBranch(instr, BranchCondition(instr->hydrogen()));
2449 }
2450 
2451 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2452   Register input = ToRegister(instr->value());
2453   Register result = ToRegister(instr->result());
2454 
2455   __ AssertString(input);
2456 
2457   __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset));
2458   __ IndexFromHash(result, result);
2459 }
2460 
2461 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2462     LHasCachedArrayIndexAndBranch* instr) {
2463   Register input = ToRegister(instr->value());
2464   Register scratch = scratch0();
2465 
2466   __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset));
2467   __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2468   __ AndP(r0, scratch);
2469   EmitBranch(instr, eq);
2470 }
2471 
2472 // Branches to a label or falls through with the answer in flags.  Trashes
2473 // the temp registers, but not the input.
2474 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2475                                Handle<String> class_name, Register input,
2476                                Register temp, Register temp2) {
2477   DCHECK(!input.is(temp));
2478   DCHECK(!input.is(temp2));
2479   DCHECK(!temp.is(temp2));
2480 
2481   __ JumpIfSmi(input, is_false);
2482 
2483   __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
2484   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
2485   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2486     __ bge(is_true);
2487   } else {
2488     __ bge(is_false);
2489   }
2490 
2491   // Check if the constructor in the map is a function.
2492   Register instance_type = ip;
2493   __ GetMapConstructor(temp, temp, temp2, instance_type);
2494 
2495   // Objects with a non-function constructor have class 'Object'.
2496   __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
2497   if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
2498     __ bne(is_true);
2499   } else {
2500     __ bne(is_false);
2501   }
2502 
2503   // temp now contains the constructor function. Grab the
2504   // instance class name from there.
2505   __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2506   __ LoadP(temp,
2507            FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2508   // The class name we are testing against is internalized since it's a literal.
2509   // The name in the constructor is internalized because of the way the context
2510   // is booted.  This routine isn't expected to work for random API-created
2511   // classes and it doesn't have to because you can't access it with natives
2512   // syntax.  Since both sides are internalized it is sufficient to use an
2513   // identity comparison.
2514   __ CmpP(temp, Operand(class_name));
2515   // End with the answer in flags.
2516 }
2517 
2518 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2519   Register input = ToRegister(instr->value());
2520   Register temp = scratch0();
2521   Register temp2 = ToRegister(instr->temp());
2522   Handle<String> class_name = instr->hydrogen()->class_name();
2523 
2524   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2525                   class_name, input, temp, temp2);
2526 
2527   EmitBranch(instr, eq);
2528 }
2529 
2530 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2531   Register reg = ToRegister(instr->value());
2532   Register temp = ToRegister(instr->temp());
2533 
2534   __ mov(temp, Operand(instr->map()));
2535   __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2536   EmitBranch(instr, eq);
2537 }
2538 
2539 void LCodeGen::DoHasInPrototypeChainAndBranch(
2540     LHasInPrototypeChainAndBranch* instr) {
2541   Register const object = ToRegister(instr->object());
2542   Register const object_map = scratch0();
2543   Register const object_instance_type = ip;
2544   Register const object_prototype = object_map;
2545   Register const prototype = ToRegister(instr->prototype());
2546 
2547   // The {object} must be a spec object.  It's sufficient to know that {object}
2548   // is not a smi, since all other non-spec objects have {null} prototypes and
2549   // will be ruled out below.
2550   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
2551     __ TestIfSmi(object);
2552     EmitFalseBranch(instr, eq);
2553   }
2554   // Loop through the {object}s prototype chain looking for the {prototype}.
2555   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
2556   Label loop;
2557   __ bind(&loop);
2558 
2559   // Deoptimize if the object needs to be access checked.
2560   __ LoadlB(object_instance_type,
2561             FieldMemOperand(object_map, Map::kBitFieldOffset));
2562   __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
2563   DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
2564   // Deoptimize for proxies.
2565   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
2566   DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
2567   __ LoadP(object_prototype,
2568            FieldMemOperand(object_map, Map::kPrototypeOffset));
2569   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
2570   EmitFalseBranch(instr, eq);
2571   __ CmpP(object_prototype, prototype);
2572   EmitTrueBranch(instr, eq);
2573   __ LoadP(object_map,
2574            FieldMemOperand(object_prototype, HeapObject::kMapOffset));
2575   __ b(&loop);
2576 }
2577 
2578 void LCodeGen::DoCmpT(LCmpT* instr) {
2579   DCHECK(ToRegister(instr->context()).is(cp));
2580   Token::Value op = instr->op();
2581 
2582   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2583   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2584   // This instruction also signals no smi code inlined
2585   __ CmpP(r2, Operand::Zero());
2586 
2587   Condition condition = ComputeCompareCondition(op);
2588   Label true_value, done;
2589 
2590   __ b(condition, &true_value, Label::kNear);
2591 
2592   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2593   __ b(&done, Label::kNear);
2594 
2595   __ bind(&true_value);
2596   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2597 
2598   __ bind(&done);
2599 }
2600 
2601 void LCodeGen::DoReturn(LReturn* instr) {
2602   if (FLAG_trace && info()->IsOptimizing()) {
2603     // Push the return value on the stack as the parameter.
2604     // Runtime::TraceExit returns its parameter in r2.  We're leaving the code
2605     // managed by the register allocator and tearing down the frame, it's
2606     // safe to write to the context register.
2607     __ push(r2);
2608     __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2609     __ CallRuntime(Runtime::kTraceExit);
2610   }
2611   if (info()->saves_caller_doubles()) {
2612     RestoreCallerDoubles();
2613   }
2614   if (instr->has_constant_parameter_count()) {
2615     int parameter_count = ToInteger32(instr->constant_parameter_count());
2616     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2617     if (NeedsEagerFrame()) {
2618       masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2619     } else if (sp_delta != 0) {
2620       // TODO(joransiu): Clean this up into Macro Assembler
2621       if (sp_delta >= 0 && sp_delta < 4096)
2622         __ la(sp, MemOperand(sp, sp_delta));
2623       else
2624         __ lay(sp, MemOperand(sp, sp_delta));
2625     }
2626   } else {
2627     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
2628     Register reg = ToRegister(instr->parameter_count());
2629     // The argument count parameter is a smi
2630     if (NeedsEagerFrame()) {
2631       masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2632     }
2633     __ SmiToPtrArrayOffset(r0, reg);
2634     __ AddP(sp, sp, r0);
2635   }
2636 
2637   __ Ret();
2638 }
2639 
2640 template <class T>
2641 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2642   Register vector_register = ToRegister(instr->temp_vector());
2643   Register slot_register = LoadDescriptor::SlotRegister();
2644   DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2645   DCHECK(slot_register.is(r2));
2646 
2647   AllowDeferredHandleDereference vector_structure_check;
2648   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2649   __ Move(vector_register, vector);
2650   // No need to allocate this register.
2651   FeedbackVectorSlot slot = instr->hydrogen()->slot();
2652   int index = vector->GetIndex(slot);
2653   __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
2654 }
2655 
2656 template <class T>
2657 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
2658   Register vector_register = ToRegister(instr->temp_vector());
2659   Register slot_register = ToRegister(instr->temp_slot());
2660 
2661   AllowDeferredHandleDereference vector_structure_check;
2662   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2663   __ Move(vector_register, vector);
2664   FeedbackVectorSlot slot = instr->hydrogen()->slot();
2665   int index = vector->GetIndex(slot);
2666   __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
2667 }
2668 
2669 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2670   DCHECK(ToRegister(instr->context()).is(cp));
2671   DCHECK(ToRegister(instr->result()).is(r2));
2672 
2673   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2674   Handle<Code> ic =
2675       CodeFactory::LoadGlobalICInOptimizedCode(isolate(), instr->typeof_mode())
2676           .code();
2677   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2678 }
2679 
2680 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2681   Register context = ToRegister(instr->context());
2682   Register result = ToRegister(instr->result());
2683   __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
2684   if (instr->hydrogen()->RequiresHoleCheck()) {
2685     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2686     if (instr->hydrogen()->DeoptimizesOnHole()) {
2687       DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2688     } else {
2689       Label skip;
2690       __ bne(&skip, Label::kNear);
2691       __ mov(result, Operand(factory()->undefined_value()));
2692       __ bind(&skip);
2693     }
2694   }
2695 }
2696 
2697 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2698   Register context = ToRegister(instr->context());
2699   Register value = ToRegister(instr->value());
2700   Register scratch = scratch0();
2701   MemOperand target = ContextMemOperand(context, instr->slot_index());
2702 
2703   Label skip_assignment;
2704 
2705   if (instr->hydrogen()->RequiresHoleCheck()) {
2706     __ LoadP(scratch, target);
2707     __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
2708     if (instr->hydrogen()->DeoptimizesOnHole()) {
2709       DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2710     } else {
2711       __ bne(&skip_assignment);
2712     }
2713   }
2714 
2715   __ StoreP(value, target);
2716   if (instr->hydrogen()->NeedsWriteBarrier()) {
2717     SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2718                                 ? OMIT_SMI_CHECK
2719                                 : INLINE_SMI_CHECK;
2720     __ RecordWriteContextSlot(context, target.offset(), value, scratch,
2721                               GetLinkRegisterState(), kSaveFPRegs,
2722                               EMIT_REMEMBERED_SET, check_needed);
2723   }
2724 
2725   __ bind(&skip_assignment);
2726 }
2727 
2728 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2729   HObjectAccess access = instr->hydrogen()->access();
2730   int offset = access.offset();
2731   Register object = ToRegister(instr->object());
2732 
2733   if (access.IsExternalMemory()) {
2734     Register result = ToRegister(instr->result());
2735     MemOperand operand = MemOperand(object, offset);
2736     __ LoadRepresentation(result, operand, access.representation(), r0);
2737     return;
2738   }
2739 
2740   if (instr->hydrogen()->representation().IsDouble()) {
2741     DCHECK(access.IsInobject());
2742     DoubleRegister result = ToDoubleRegister(instr->result());
2743     __ ld(result, FieldMemOperand(object, offset));
2744     return;
2745   }
2746 
2747   Register result = ToRegister(instr->result());
2748   if (!access.IsInobject()) {
2749     __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2750     object = result;
2751   }
2752 
2753   Representation representation = access.representation();
2754 
2755 #if V8_TARGET_ARCH_S390X
2756   // 64-bit Smi optimization
2757   if (representation.IsSmi() &&
2758       instr->hydrogen()->representation().IsInteger32()) {
2759     // Read int value directly from upper half of the smi.
2760     offset = SmiWordOffset(offset);
2761     representation = Representation::Integer32();
2762   }
2763 #endif
2764 
2765   __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
2766                         r0);
2767 }
2768 
2769 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2770   DCHECK(ToRegister(instr->context()).is(cp));
2771   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
2772   DCHECK(ToRegister(instr->result()).is(r2));
2773 
2774   // Name is always in r4.
2775   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
2776   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
2777   Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate()).code();
2778   CallCode(ic, RelocInfo::CODE_TARGET, instr);
2779 }
2780 
2781 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2782   Register scratch = scratch0();
2783   Register function = ToRegister(instr->function());
2784   Register result = ToRegister(instr->result());
2785 
2786   // Get the prototype or initial map from the function.
2787   __ LoadP(result,
2788            FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2789 
2790   // Check that the function has a prototype or an initial map.
2791   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
2792   DeoptimizeIf(eq, instr, Deoptimizer::kHole);
2793 
2794   // If the function does not have an initial map, we're done.
2795   Label done;
2796   __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2797   __ bne(&done, Label::kNear);
2798 
2799   // Get the prototype from the initial map.
2800   __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2801 
2802   // All done.
2803   __ bind(&done);
2804 }
2805 
2806 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2807   Register result = ToRegister(instr->result());
2808   __ LoadRoot(result, instr->index());
2809 }
2810 
2811 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2812   Register arguments = ToRegister(instr->arguments());
2813   Register result = ToRegister(instr->result());
2814   // There are two words between the frame pointer and the last argument.
2815   // Subtracting from length accounts for one of them add one more.
2816   if (instr->length()->IsConstantOperand()) {
2817     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2818     if (instr->index()->IsConstantOperand()) {
2819       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2820       int index = (const_length - const_index) + 1;
2821       __ LoadP(result, MemOperand(arguments, index * kPointerSize));
2822     } else {
2823       Register index = ToRegister(instr->index());
2824       __ SubP(result, index, Operand(const_length + 1));
2825       __ LoadComplementRR(result, result);
2826       __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
2827       __ LoadP(result, MemOperand(arguments, result));
2828     }
2829   } else if (instr->index()->IsConstantOperand()) {
2830     Register length = ToRegister(instr->length());
2831     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2832     int loc = const_index - 1;
2833     if (loc != 0) {
2834       __ SubP(result, length, Operand(loc));
2835       __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
2836       __ LoadP(result, MemOperand(arguments, result));
2837     } else {
2838       __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
2839       __ LoadP(result, MemOperand(arguments, result));
2840     }
2841   } else {
2842     Register length = ToRegister(instr->length());
2843     Register index = ToRegister(instr->index());
2844     __ SubP(result, length, index);
2845     __ AddP(result, result, Operand(1));
2846     __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
2847     __ LoadP(result, MemOperand(arguments, result));
2848   }
2849 }
2850 
2851 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2852   Register external_pointer = ToRegister(instr->elements());
2853   Register key = no_reg;
2854   ElementsKind elements_kind = instr->elements_kind();
2855   bool key_is_constant = instr->key()->IsConstantOperand();
2856   int constant_key = 0;
2857   if (key_is_constant) {
2858     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2859     if (constant_key & 0xF0000000) {
2860       Abort(kArrayIndexConstantValueTooBig);
2861     }
2862   } else {
2863     key = ToRegister(instr->key());
2864   }
2865   int element_size_shift = ElementsKindToShiftSize(elements_kind);
2866   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
2867   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
2868   int base_offset = instr->base_offset();
2869   bool use_scratch = false;
2870 
2871   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
2872     DoubleRegister result = ToDoubleRegister(instr->result());
2873     if (key_is_constant) {
2874       base_offset += constant_key << element_size_shift;
2875       if (!is_int20(base_offset)) {
2876         __ mov(scratch0(), Operand(base_offset));
2877         base_offset = 0;
2878         use_scratch = true;
2879       }
2880     } else {
2881       __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi,
2882                             keyMaybeNegative);
2883       use_scratch = true;
2884     }
2885     if (elements_kind == FLOAT32_ELEMENTS) {
2886       if (!use_scratch) {
2887         __ ldeb(result, MemOperand(external_pointer, base_offset));
2888       } else {
2889         __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
2890       }
2891     } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2892       if (!use_scratch) {
2893         __ ld(result, MemOperand(external_pointer, base_offset));
2894       } else {
2895         __ ld(result, MemOperand(scratch0(), external_pointer, base_offset));
2896       }
2897     }
2898   } else {
2899     Register result = ToRegister(instr->result());
2900     MemOperand mem_operand =
2901         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
2902                             constant_key, element_size_shift, base_offset,
2903                             keyMaybeNegative);
2904     switch (elements_kind) {
2905       case INT8_ELEMENTS:
2906         __ LoadB(result, mem_operand);
2907         break;
2908       case UINT8_ELEMENTS:
2909       case UINT8_CLAMPED_ELEMENTS:
2910         __ LoadlB(result, mem_operand);
2911         break;
2912       case INT16_ELEMENTS:
2913         __ LoadHalfWordP(result, mem_operand);
2914         break;
2915       case UINT16_ELEMENTS:
2916         __ LoadLogicalHalfWordP(result, mem_operand);
2917         break;
2918       case INT32_ELEMENTS:
2919         __ LoadW(result, mem_operand, r0);
2920         break;
2921       case UINT32_ELEMENTS:
2922         __ LoadlW(result, mem_operand, r0);
2923         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
2924           __ CmpLogical32(result, Operand(0x80000000));
2925           DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
2926         }
2927         break;
2928       case FLOAT32_ELEMENTS:
2929       case FLOAT64_ELEMENTS:
2930       case FAST_HOLEY_DOUBLE_ELEMENTS:
2931       case FAST_HOLEY_ELEMENTS:
2932       case FAST_HOLEY_SMI_ELEMENTS:
2933       case FAST_DOUBLE_ELEMENTS:
2934       case FAST_ELEMENTS:
2935       case FAST_SMI_ELEMENTS:
2936       case DICTIONARY_ELEMENTS:
2937       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
2938       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
2939       case FAST_STRING_WRAPPER_ELEMENTS:
2940       case SLOW_STRING_WRAPPER_ELEMENTS:
2941       case NO_ELEMENTS:
2942         UNREACHABLE();
2943         break;
2944     }
2945   }
2946 }
2947 
2948 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2949   Register elements = ToRegister(instr->elements());
2950   bool key_is_constant = instr->key()->IsConstantOperand();
2951   Register key = no_reg;
2952   DoubleRegister result = ToDoubleRegister(instr->result());
2953   Register scratch = scratch0();
2954 
2955   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2956   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
2957   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
2958   int constant_key = 0;
2959   if (key_is_constant) {
2960     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2961     if (constant_key & 0xF0000000) {
2962       Abort(kArrayIndexConstantValueTooBig);
2963     }
2964   } else {
2965     key = ToRegister(instr->key());
2966   }
2967 
2968   bool use_scratch = false;
2969   intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
2970   if (!key_is_constant) {
2971     use_scratch = true;
2972     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
2973                           keyMaybeNegative);
2974   }
2975 
2976   // Memory references support up to 20-bits signed displacement in RXY form
2977   // Include Register::kExponentOffset in check, so we are guaranteed not to
2978   // overflow displacement later.
2979   if (!is_int20(base_offset + Register::kExponentOffset)) {
2980     use_scratch = true;
2981     if (key_is_constant) {
2982       __ mov(scratch, Operand(base_offset));
2983     } else {
2984       __ AddP(scratch, Operand(base_offset));
2985     }
2986     base_offset = 0;
2987   }
2988 
2989   if (!use_scratch) {
2990     __ ld(result, MemOperand(elements, base_offset));
2991   } else {
2992     __ ld(result, MemOperand(scratch, elements, base_offset));
2993   }
2994 
2995   if (instr->hydrogen()->RequiresHoleCheck()) {
2996     if (!use_scratch) {
2997       __ LoadlW(r0,
2998                 MemOperand(elements, base_offset + Register::kExponentOffset));
2999     } else {
3000       __ LoadlW(r0, MemOperand(scratch, elements,
3001                                base_offset + Register::kExponentOffset));
3002     }
3003     __ Cmp32(r0, Operand(kHoleNanUpper32));
3004     DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3005   }
3006 }
3007 
3008 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3009   HLoadKeyed* hinstr = instr->hydrogen();
3010   Register elements = ToRegister(instr->elements());
3011   Register result = ToRegister(instr->result());
3012   Register scratch = scratch0();
3013   int offset = instr->base_offset();
3014 
3015   if (instr->key()->IsConstantOperand()) {
3016     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3017     offset += ToInteger32(const_operand) * kPointerSize;
3018   } else {
3019     Register key = ToRegister(instr->key());
3020     // Even though the HLoadKeyed instruction forces the input
3021     // representation for the key to be an integer, the input gets replaced
3022     // during bound check elimination with the index argument to the bounds
3023     // check, which can be tagged, so that case must be handled here, too.
3024     if (hinstr->key()->representation().IsSmi()) {
3025       __ SmiToPtrArrayOffset(scratch, key);
3026     } else {
3027       __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
3028     }
3029   }
3030 
3031   bool requires_hole_check = hinstr->RequiresHoleCheck();
3032   Representation representation = hinstr->representation();
3033 
3034 #if V8_TARGET_ARCH_S390X
3035   // 64-bit Smi optimization
3036   if (representation.IsInteger32() &&
3037       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3038     DCHECK(!requires_hole_check);
3039     // Read int value directly from upper half of the smi.
3040     offset = SmiWordOffset(offset);
3041   }
3042 #endif
3043 
3044   if (instr->key()->IsConstantOperand()) {
3045     __ LoadRepresentation(result, MemOperand(elements, offset), representation,
3046                           r1);
3047   } else {
3048     __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
3049                           representation, r1);
3050   }
3051 
3052   // Check for the hole value.
3053   if (requires_hole_check) {
3054     if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3055       __ TestIfSmi(result);
3056       DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
3057     } else {
3058       __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
3059       DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3060     }
3061   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3062     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3063     Label done;
3064     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3065     __ CmpP(result, scratch);
3066     __ bne(&done);
3067     if (info()->IsStub()) {
3068       // A stub can safely convert the hole to undefined only if the array
3069       // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3070       // it needs to bail out.
3071       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3072       __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
3073       __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
3074       DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3075     }
3076     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3077     __ bind(&done);
3078   }
3079 }
3080 
3081 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3082   if (instr->is_fixed_typed_array()) {
3083     DoLoadKeyedExternalArray(instr);
3084   } else if (instr->hydrogen()->representation().IsDouble()) {
3085     DoLoadKeyedFixedDoubleArray(instr);
3086   } else {
3087     DoLoadKeyedFixedArray(instr);
3088   }
3089 }
3090 
3091 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3092                                          bool key_is_constant, bool key_is_smi,
3093                                          int constant_key,
3094                                          int element_size_shift,
3095                                          int base_offset,
3096                                          bool keyMaybeNegative) {
3097   Register scratch = scratch0();
3098 
3099   if (key_is_constant) {
3100     int offset = (base_offset + (constant_key << element_size_shift));
3101     if (!is_int20(offset)) {
3102       __ mov(scratch, Operand(offset));
3103       return MemOperand(base, scratch);
3104     } else {
3105       return MemOperand(base,
3106                         (constant_key << element_size_shift) + base_offset);
3107     }
3108   }
3109 
3110   bool needs_shift =
3111       (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3112 
3113   if (needs_shift) {
3114     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
3115                           keyMaybeNegative);
3116   } else {
3117     scratch = key;
3118   }
3119 
3120   if (!is_int20(base_offset)) {
3121     __ AddP(scratch, Operand(base_offset));
3122     base_offset = 0;
3123   }
3124   return MemOperand(scratch, base, base_offset);
3125 }
3126 
3127 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3128   DCHECK(ToRegister(instr->context()).is(cp));
3129   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3130   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3131   EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3132 
3133   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
3134   CallCode(ic, RelocInfo::CODE_TARGET, instr);
3135 }
3136 
3137 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3138   Register scratch = scratch0();
3139   Register result = ToRegister(instr->result());
3140 
3141   if (instr->hydrogen()->from_inlined()) {
3142     __ lay(result, MemOperand(sp, -2 * kPointerSize));
3143   } else if (instr->hydrogen()->arguments_adaptor()) {
3144     // Check if the calling frame is an arguments adaptor frame.
3145     Label done, adapted;
3146     __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3147     __ LoadP(
3148         result,
3149         MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
3150     __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3151 
3152     // Result is the frame pointer for the frame if not adapted and for the real
3153     // frame below the adaptor frame if adapted.
3154     __ beq(&adapted, Label::kNear);
3155     __ LoadRR(result, fp);
3156     __ b(&done, Label::kNear);
3157 
3158     __ bind(&adapted);
3159     __ LoadRR(result, scratch);
3160     __ bind(&done);
3161   } else {
3162     __ LoadRR(result, fp);
3163   }
3164 }
3165 
3166 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3167   Register elem = ToRegister(instr->elements());
3168   Register result = ToRegister(instr->result());
3169 
3170   Label done;
3171 
3172   // If no arguments adaptor frame the number of arguments is fixed.
3173   __ CmpP(fp, elem);
3174   __ mov(result, Operand(scope()->num_parameters()));
3175   __ beq(&done, Label::kNear);
3176 
3177   // Arguments adaptor frame present. Get argument length from there.
3178   __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3179   __ LoadP(result,
3180            MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3181   __ SmiUntag(result);
3182 
3183   // Argument length is in result register.
3184   __ bind(&done);
3185 }
3186 
3187 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3188   Register receiver = ToRegister(instr->receiver());
3189   Register function = ToRegister(instr->function());
3190   Register result = ToRegister(instr->result());
3191   Register scratch = scratch0();
3192 
3193   // If the receiver is null or undefined, we have to pass the global
3194   // object as a receiver to normal functions. Values have to be
3195   // passed unchanged to builtins and strict-mode functions.
3196   Label global_object, result_in_receiver;
3197 
3198   if (!instr->hydrogen()->known_function()) {
3199     // Do not transform the receiver to object for strict mode
3200     // functions or builtins.
3201     __ LoadP(scratch,
3202              FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3203     __ LoadlW(scratch, FieldMemOperand(
3204                            scratch, SharedFunctionInfo::kCompilerHintsOffset));
3205     __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
3206                                  (1 << SharedFunctionInfo::kNativeBit)));
3207     __ bne(&result_in_receiver, Label::kNear);
3208   }
3209 
3210   // Normal function. Replace undefined or null with global receiver.
3211   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
3212   __ beq(&global_object, Label::kNear);
3213   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
3214   __ beq(&global_object, Label::kNear);
3215 
3216   // Deoptimize if the receiver is not a JS object.
3217   __ TestIfSmi(receiver);
3218   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
3219   __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
3220   DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3221 
3222   __ b(&result_in_receiver, Label::kNear);
3223   __ bind(&global_object);
3224   __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3225   __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
3226   __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
3227 
3228   if (result.is(receiver)) {
3229     __ bind(&result_in_receiver);
3230   } else {
3231     Label result_ok;
3232     __ b(&result_ok, Label::kNear);
3233     __ bind(&result_in_receiver);
3234     __ LoadRR(result, receiver);
3235     __ bind(&result_ok);
3236   }
3237 }
3238 
3239 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3240   Register receiver = ToRegister(instr->receiver());
3241   Register function = ToRegister(instr->function());
3242   Register length = ToRegister(instr->length());
3243   Register elements = ToRegister(instr->elements());
3244   Register scratch = scratch0();
3245   DCHECK(receiver.is(r2));  // Used for parameter count.
3246   DCHECK(function.is(r3));  // Required by InvokeFunction.
3247   DCHECK(ToRegister(instr->result()).is(r2));
3248 
3249   // Copy the arguments to this function possibly from the
3250   // adaptor frame below it.
3251   const uint32_t kArgumentsLimit = 1 * KB;
3252   __ CmpLogicalP(length, Operand(kArgumentsLimit));
3253   DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
3254 
3255   // Push the receiver and use the register to keep the original
3256   // number of arguments.
3257   __ push(receiver);
3258   __ LoadRR(receiver, length);
3259   // The arguments are at a one pointer size offset from elements.
3260   __ AddP(elements, Operand(1 * kPointerSize));
3261 
3262   // Loop through the arguments pushing them onto the execution
3263   // stack.
3264   Label invoke, loop;
3265   // length is a small non-negative integer, due to the test above.
3266   __ CmpP(length, Operand::Zero());
3267   __ beq(&invoke, Label::kNear);
3268   __ bind(&loop);
3269   __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
3270   __ LoadP(scratch, MemOperand(elements, r1));
3271   __ push(scratch);
3272   __ BranchOnCount(length, &loop);
3273 
3274   __ bind(&invoke);
3275 
3276   InvokeFlag flag = CALL_FUNCTION;
3277   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
3278     DCHECK(!info()->saves_caller_doubles());
3279     // TODO(ishell): drop current frame before pushing arguments to the stack.
3280     flag = JUMP_FUNCTION;
3281     ParameterCount actual(r2);
3282     // It is safe to use r5, r6 and r7 as scratch registers here given that
3283     // 1) we are not going to return to caller function anyway,
3284     // 2) r5 (new.target) will be initialized below.
3285     PrepareForTailCall(actual, r5, r6, r7);
3286   }
3287 
3288   DCHECK(instr->HasPointerMap());
3289   LPointerMap* pointers = instr->pointer_map();
3290   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3291   // The number of arguments is stored in receiver which is r2, as expected
3292   // by InvokeFunction.
3293   ParameterCount actual(receiver);
3294   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
3295 }
3296 
3297 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3298   LOperand* argument = instr->value();
3299   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3300     Abort(kDoPushArgumentNotImplementedForDoubleType);
3301   } else {
3302     Register argument_reg = EmitLoadRegister(argument, ip);
3303     __ push(argument_reg);
3304   }
3305 }
3306 
3307 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3308 
3309 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3310   Register result = ToRegister(instr->result());
3311   __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3312 }
3313 
3314 void LCodeGen::DoContext(LContext* instr) {
3315   // If there is a non-return use, the context must be moved to a register.
3316   Register result = ToRegister(instr->result());
3317   if (info()->IsOptimizing()) {
3318     __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3319   } else {
3320     // If there is no frame, the context must be in cp.
3321     DCHECK(result.is(cp));
3322   }
3323 }
3324 
3325 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3326   DCHECK(ToRegister(instr->context()).is(cp));
3327   __ Move(scratch0(), instr->hydrogen()->pairs());
3328   __ push(scratch0());
3329   __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3330   __ push(scratch0());
3331   CallRuntime(Runtime::kDeclareGlobals, instr);
3332 }
3333 
3334 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3335                                  int formal_parameter_count, int arity,
3336                                  bool is_tail_call, LInstruction* instr) {
3337   bool dont_adapt_arguments =
3338       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3339   bool can_invoke_directly =
3340       dont_adapt_arguments || formal_parameter_count == arity;
3341 
3342   Register function_reg = r3;
3343 
3344   LPointerMap* pointers = instr->pointer_map();
3345 
3346   if (can_invoke_directly) {
3347     // Change context.
3348     __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3349 
3350     // Always initialize new target and number of actual arguments.
3351     __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
3352     __ mov(r2, Operand(arity));
3353 
3354     bool is_self_call = function.is_identical_to(info()->closure());
3355 
3356     // Invoke function.
3357     if (is_self_call) {
3358       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
3359       if (is_tail_call) {
3360         __ Jump(self, RelocInfo::CODE_TARGET);
3361       } else {
3362         __ Call(self, RelocInfo::CODE_TARGET);
3363       }
3364     } else {
3365       __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3366       if (is_tail_call) {
3367         __ JumpToJSEntry(ip);
3368       } else {
3369         __ CallJSEntry(ip);
3370       }
3371     }
3372 
3373     if (!is_tail_call) {
3374       // Set up deoptimization.
3375       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3376     }
3377   } else {
3378     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3379     ParameterCount actual(arity);
3380     ParameterCount expected(formal_parameter_count);
3381     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3382     __ InvokeFunction(function_reg, expected, actual, flag, generator);
3383   }
3384 }
3385 
3386 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3387   DCHECK(instr->context() != NULL);
3388   DCHECK(ToRegister(instr->context()).is(cp));
3389   Register input = ToRegister(instr->value());
3390   Register result = ToRegister(instr->result());
3391   Register scratch = scratch0();
3392 
3393   // Deoptimize if not a heap number.
3394   __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3395   __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3396   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3397 
3398   Label done;
3399   Register exponent = scratch0();
3400   scratch = no_reg;
3401   __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3402   // Check the sign of the argument. If the argument is positive, just
3403   // return it.
3404   __ Cmp32(exponent, Operand::Zero());
3405   // Move the input to the result if necessary.
3406   __ Move(result, input);
3407   __ bge(&done);
3408 
3409   // Input is negative. Reverse its sign.
3410   // Preserve the value of all registers.
3411   {
3412     PushSafepointRegistersScope scope(this);
3413 
3414     // Registers were saved at the safepoint, so we can use
3415     // many scratch registers.
3416     Register tmp1 = input.is(r3) ? r2 : r3;
3417     Register tmp2 = input.is(r4) ? r2 : r4;
3418     Register tmp3 = input.is(r5) ? r2 : r5;
3419     Register tmp4 = input.is(r6) ? r2 : r6;
3420 
3421     // exponent: floating point exponent value.
3422 
3423     Label allocated, slow;
3424     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3425     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3426     __ b(&allocated);
3427 
3428     // Slow case: Call the runtime system to do the number allocation.
3429     __ bind(&slow);
3430 
3431     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3432                             instr->context());
3433     // Set the pointer to the new heap number in tmp.
3434     if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
3435     // Restore input_reg after call to runtime.
3436     __ LoadFromSafepointRegisterSlot(input, input);
3437     __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3438 
3439     __ bind(&allocated);
3440     // exponent: floating point exponent value.
3441     // tmp1: allocated heap number.
3442 
3443     // Clear the sign bit.
3444     __ nilf(exponent, Operand(~HeapNumber::kSignMask));
3445     __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3446     __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3447     __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3448 
3449     __ StoreToSafepointRegisterSlot(tmp1, result);
3450   }
3451 
3452   __ bind(&done);
3453 }
3454 
3455 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3456   Register input = ToRegister(instr->value());
3457   Register result = ToRegister(instr->result());
3458   Label done;
3459   __ CmpP(input, Operand::Zero());
3460   __ Move(result, input);
3461   __ bge(&done, Label::kNear);
3462   __ LoadComplementRR(result, result);
3463   // Deoptimize on overflow.
3464   DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
3465   __ bind(&done);
3466 }
3467 
3468 #if V8_TARGET_ARCH_S390X
3469 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3470   Register input = ToRegister(instr->value());
3471   Register result = ToRegister(instr->result());
3472   Label done;
3473   __ Cmp32(input, Operand::Zero());
3474   __ Move(result, input);
3475   __ bge(&done, Label::kNear);
3476 
3477   // Deoptimize on overflow.
3478   __ Cmp32(input, Operand(0x80000000));
3479   DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
3480 
3481   __ LoadComplementRR(result, result);
3482   __ bind(&done);
3483 }
3484 #endif
3485 
3486 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3487   // Class for deferred case.
3488   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3489    public:
3490     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3491         : LDeferredCode(codegen), instr_(instr) {}
3492     void Generate() override {
3493       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3494     }
3495     LInstruction* instr() override { return instr_; }
3496 
3497    private:
3498     LMathAbs* instr_;
3499   };
3500 
3501   Representation r = instr->hydrogen()->value()->representation();
3502   if (r.IsDouble()) {
3503     DoubleRegister input = ToDoubleRegister(instr->value());
3504     DoubleRegister result = ToDoubleRegister(instr->result());
3505     __ lpdbr(result, input);
3506 #if V8_TARGET_ARCH_S390X
3507   } else if (r.IsInteger32()) {
3508     EmitInteger32MathAbs(instr);
3509   } else if (r.IsSmi()) {
3510 #else
3511   } else if (r.IsSmiOrInteger32()) {
3512 #endif
3513     EmitMathAbs(instr);
3514   } else {
3515     // Representation is tagged.
3516     DeferredMathAbsTaggedHeapNumber* deferred =
3517         new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3518     Register input = ToRegister(instr->value());
3519     // Smi check.
3520     __ JumpIfNotSmi(input, deferred->entry());
3521     // If smi, handle it directly.
3522     EmitMathAbs(instr);
3523     __ bind(deferred->exit());
3524   }
3525 }
3526 
3527 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3528   DoubleRegister input = ToDoubleRegister(instr->value());
3529   Register result = ToRegister(instr->result());
3530   Register input_high = scratch0();
3531   Register scratch = ip;
3532   Label done, exact;
3533 
3534   __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3535                    &exact);
3536   DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3537 
3538   __ bind(&exact);
3539   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3540     // Test for -0.
3541     __ CmpP(result, Operand::Zero());
3542     __ bne(&done, Label::kNear);
3543     __ Cmp32(input_high, Operand::Zero());
3544     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3545   }
3546   __ bind(&done);
3547 }
3548 
3549 void LCodeGen::DoMathRound(LMathRound* instr) {
3550   DoubleRegister input = ToDoubleRegister(instr->value());
3551   Register result = ToRegister(instr->result());
3552   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3553   DoubleRegister input_plus_dot_five = double_scratch1;
3554   Register scratch1 = scratch0();
3555   Register scratch2 = ip;
3556   DoubleRegister dot_five = double_scratch0();
3557   Label convert, done;
3558 
3559   __ LoadDoubleLiteral(dot_five, 0.5, r0);
3560   __ lpdbr(double_scratch1, input);
3561   __ cdbr(double_scratch1, dot_five);
3562   DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
3563   // If input is in [-0.5, -0], the result is -0.
3564   // If input is in [+0, +0.5[, the result is +0.
3565   // If the input is +0.5, the result is 1.
3566   __ bgt(&convert, Label::kNear);  // Out of [-0.5, +0.5].
3567   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3568     // [-0.5, -0] (negative) yields minus zero.
3569     __ TestDoubleSign(input, scratch1);
3570     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3571   }
3572   Label return_zero;
3573   __ cdbr(input, dot_five);
3574   __ bne(&return_zero, Label::kNear);
3575   __ LoadImmP(result, Operand(1));  // +0.5.
3576   __ b(&done, Label::kNear);
3577   // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3578   // flag kBailoutOnMinusZero.
3579   __ bind(&return_zero);
3580   __ LoadImmP(result, Operand::Zero());
3581   __ b(&done, Label::kNear);
3582 
3583   __ bind(&convert);
3584   __ ldr(input_plus_dot_five, input);
3585   __ adbr(input_plus_dot_five, dot_five);
3586   // Reuse dot_five (double_scratch0) as we no longer need this value.
3587   __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
3588                    double_scratch0(), &done, &done);
3589   DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3590   __ bind(&done);
3591 }
3592 
3593 void LCodeGen::DoMathFround(LMathFround* instr) {
3594   DoubleRegister input_reg = ToDoubleRegister(instr->value());
3595   DoubleRegister output_reg = ToDoubleRegister(instr->result());
3596 
3597   // Round double to float
3598   __ ledbr(output_reg, input_reg);
3599   // Extend from float to double
3600   __ ldebr(output_reg, output_reg);
3601 }
3602 
3603 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3604   DoubleRegister input = ToDoubleRegister(instr->value());
3605   DoubleRegister result = ToDoubleRegister(instr->result());
3606   __ sqdbr(result, input);
3607 }
3608 
3609 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3610   DoubleRegister input = ToDoubleRegister(instr->value());
3611   DoubleRegister result = ToDoubleRegister(instr->result());
3612   DoubleRegister temp = double_scratch0();
3613 
3614   // Note that according to ECMA-262 15.8.2.13:
3615   // Math.pow(-Infinity, 0.5) == Infinity
3616   // Math.sqrt(-Infinity) == NaN
3617   Label skip, done;
3618 
3619   __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
3620   __ cdbr(input, temp);
3621   __ bne(&skip, Label::kNear);
3622   __ lcdbr(result, temp);
3623   __ b(&done, Label::kNear);
3624 
3625   // Add +0 to convert -0 to +0.
3626   __ bind(&skip);
3627   __ ldr(result, input);
3628   __ lzdr(kDoubleRegZero);
3629   __ adbr(result, kDoubleRegZero);
3630   __ sqdbr(result, result);
3631   __ bind(&done);
3632 }
3633 
3634 void LCodeGen::DoPower(LPower* instr) {
3635   Representation exponent_type = instr->hydrogen()->right()->representation();
3636   // Having marked this as a call, we can use any registers.
3637   // Just make sure that the input/output registers are the expected ones.
3638   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3639   DCHECK(!instr->right()->IsDoubleRegister() ||
3640          ToDoubleRegister(instr->right()).is(d2));
3641   DCHECK(!instr->right()->IsRegister() ||
3642          ToRegister(instr->right()).is(tagged_exponent));
3643   DCHECK(ToDoubleRegister(instr->left()).is(d1));
3644   DCHECK(ToDoubleRegister(instr->result()).is(d3));
3645 
3646   if (exponent_type.IsSmi()) {
3647     MathPowStub stub(isolate(), MathPowStub::TAGGED);
3648     __ CallStub(&stub);
3649   } else if (exponent_type.IsTagged()) {
3650     Label no_deopt;
3651     __ JumpIfSmi(tagged_exponent, &no_deopt);
3652     __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3653     __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
3654     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3655     __ bind(&no_deopt);
3656     MathPowStub stub(isolate(), MathPowStub::TAGGED);
3657     __ CallStub(&stub);
3658   } else if (exponent_type.IsInteger32()) {
3659     MathPowStub stub(isolate(), MathPowStub::INTEGER);
3660     __ CallStub(&stub);
3661   } else {
3662     DCHECK(exponent_type.IsDouble());
3663     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3664     __ CallStub(&stub);
3665   }
3666 }
3667 
3668 void LCodeGen::DoMathCos(LMathCos* instr) {
3669   __ PrepareCallCFunction(0, 1, scratch0());
3670   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3671   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
3672   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3673 }
3674 
3675 void LCodeGen::DoMathSin(LMathSin* instr) {
3676   __ PrepareCallCFunction(0, 1, scratch0());
3677   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3678   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
3679   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3680 }
3681 
3682 void LCodeGen::DoMathExp(LMathExp* instr) {
3683   __ PrepareCallCFunction(0, 1, scratch0());
3684   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3685   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
3686   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3687 }
3688 
3689 void LCodeGen::DoMathLog(LMathLog* instr) {
3690   __ PrepareCallCFunction(0, 1, scratch0());
3691   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3692   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
3693   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3694 }
3695 
3696 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3697   Register input = ToRegister(instr->value());
3698   Register result = ToRegister(instr->result());
3699   Label done;
3700   __ llgfr(result, input);
3701   __ flogr(r0, result);
3702   __ LoadRR(result, r0);
3703   __ CmpP(r0, Operand::Zero());
3704   __ beq(&done, Label::kNear);
3705   __ SubP(result, Operand(32));
3706   __ bind(&done);
3707 }
3708 
3709 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
3710                                   Register scratch1, Register scratch2,
3711                                   Register scratch3) {
3712 #if DEBUG
3713   if (actual.is_reg()) {
3714     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
3715   } else {
3716     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
3717   }
3718 #endif
3719   if (FLAG_code_comments) {
3720     if (actual.is_reg()) {
3721       Comment(";;; PrepareForTailCall, actual: %s {",
3722               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
3723                   actual.reg().code()));
3724     } else {
3725       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
3726     }
3727   }
3728 
3729   // Check if next frame is an arguments adaptor frame.
3730   Register caller_args_count_reg = scratch1;
3731   Label no_arguments_adaptor, formal_parameter_count_loaded;
3732   __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3733   __ LoadP(scratch3,
3734            MemOperand(scratch2, StandardFrameConstants::kContextOffset));
3735   __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3736   __ bne(&no_arguments_adaptor);
3737 
3738   // Drop current frame and load arguments count from arguments adaptor frame.
3739   __ LoadRR(fp, scratch2);
3740   __ LoadP(caller_args_count_reg,
3741            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
3742   __ SmiUntag(caller_args_count_reg);
3743   __ b(&formal_parameter_count_loaded);
3744 
3745   __ bind(&no_arguments_adaptor);
3746   // Load caller's formal parameter count
3747   __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
3748 
3749   __ bind(&formal_parameter_count_loaded);
3750   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
3751 
3752   Comment(";;; }");
3753 }
3754 
3755 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3756   HInvokeFunction* hinstr = instr->hydrogen();
3757   DCHECK(ToRegister(instr->context()).is(cp));
3758   DCHECK(ToRegister(instr->function()).is(r3));
3759   DCHECK(instr->HasPointerMap());
3760 
3761   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
3762 
3763   if (is_tail_call) {
3764     DCHECK(!info()->saves_caller_doubles());
3765     ParameterCount actual(instr->arity());
3766     // It is safe to use r5, r6 and r7 as scratch registers here given that
3767     // 1) we are not going to return to caller function anyway,
3768     // 2) r5 (new.target) will be initialized below.
3769     PrepareForTailCall(actual, r5, r6, r7);
3770   }
3771 
3772   Handle<JSFunction> known_function = hinstr->known_function();
3773   if (known_function.is_null()) {
3774     LPointerMap* pointers = instr->pointer_map();
3775     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3776     ParameterCount actual(instr->arity());
3777     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
3778     __ InvokeFunction(r3, no_reg, actual, flag, generator);
3779   } else {
3780     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
3781                       instr->arity(), is_tail_call, instr);
3782   }
3783 }
3784 
3785 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3786   DCHECK(ToRegister(instr->result()).is(r2));
3787 
3788   if (instr->hydrogen()->IsTailCall()) {
3789     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3790 
3791     if (instr->target()->IsConstantOperand()) {
3792       LConstantOperand* target = LConstantOperand::cast(instr->target());
3793       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3794       __ Jump(code, RelocInfo::CODE_TARGET);
3795     } else {
3796       DCHECK(instr->target()->IsRegister());
3797       Register target = ToRegister(instr->target());
3798       __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3799       __ JumpToJSEntry(ip);
3800     }
3801   } else {
3802     LPointerMap* pointers = instr->pointer_map();
3803     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3804 
3805     if (instr->target()->IsConstantOperand()) {
3806       LConstantOperand* target = LConstantOperand::cast(instr->target());
3807       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3808       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3809       __ Call(code, RelocInfo::CODE_TARGET);
3810     } else {
3811       DCHECK(instr->target()->IsRegister());
3812       Register target = ToRegister(instr->target());
3813       generator.BeforeCall(__ CallSize(target));
3814       __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3815       __ CallJSEntry(ip);
3816     }
3817     generator.AfterCall();
3818   }
3819 }
3820 
3821 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3822   DCHECK(ToRegister(instr->context()).is(cp));
3823   DCHECK(ToRegister(instr->constructor()).is(r3));
3824   DCHECK(ToRegister(instr->result()).is(r2));
3825 
3826   __ mov(r2, Operand(instr->arity()));
3827   __ Move(r4, instr->hydrogen()->site());
3828 
3829   ElementsKind kind = instr->hydrogen()->elements_kind();
3830   AllocationSiteOverrideMode override_mode =
3831       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3832           ? DISABLE_ALLOCATION_SITES
3833           : DONT_OVERRIDE;
3834 
3835   if (instr->arity() == 0) {
3836     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3837     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3838   } else if (instr->arity() == 1) {
3839     Label done;
3840     if (IsFastPackedElementsKind(kind)) {
3841       Label packed_case;
3842       // We might need a change here
3843       // look at the first argument
3844       __ LoadP(r7, MemOperand(sp, 0));
3845       __ CmpP(r7, Operand::Zero());
3846       __ beq(&packed_case, Label::kNear);
3847 
3848       ElementsKind holey_kind = GetHoleyElementsKind(kind);
3849       ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
3850                                               override_mode);
3851       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3852       __ b(&done, Label::kNear);
3853       __ bind(&packed_case);
3854     }
3855 
3856     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
3857     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3858     __ bind(&done);
3859   } else {
3860     ArrayNArgumentsConstructorStub stub(isolate());
3861     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3862   }
3863 }
3864 
3865 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3866   CallRuntime(instr->function(), instr->arity(), instr);
3867 }
3868 
3869 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3870   Register function = ToRegister(instr->function());
3871   Register code_object = ToRegister(instr->code_object());
3872   __ lay(code_object,
3873          MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
3874   __ StoreP(code_object,
3875             FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
3876 }
3877 
3878 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3879   Register result = ToRegister(instr->result());
3880   Register base = ToRegister(instr->base_object());
3881   if (instr->offset()->IsConstantOperand()) {
3882     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3883     __ lay(result, MemOperand(base, ToInteger32(offset)));
3884   } else {
3885     Register offset = ToRegister(instr->offset());
3886     __ lay(result, MemOperand(base, offset));
3887   }
3888 }
3889 
3890 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3891   HStoreNamedField* hinstr = instr->hydrogen();
3892   Representation representation = instr->representation();
3893 
3894   Register object = ToRegister(instr->object());
3895   Register scratch = scratch0();
3896   HObjectAccess access = hinstr->access();
3897   int offset = access.offset();
3898 
3899   if (access.IsExternalMemory()) {
3900     Register value = ToRegister(instr->value());
3901     MemOperand operand = MemOperand(object, offset);
3902     __ StoreRepresentation(value, operand, representation, r0);
3903     return;
3904   }
3905 
3906   __ AssertNotSmi(object);
3907 
3908 #if V8_TARGET_ARCH_S390X
3909   DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3910          IsInteger32(LConstantOperand::cast(instr->value())));
3911 #else
3912   DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
3913          IsSmi(LConstantOperand::cast(instr->value())));
3914 #endif
3915   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
3916     DCHECK(access.IsInobject());
3917     DCHECK(!hinstr->has_transition());
3918     DCHECK(!hinstr->NeedsWriteBarrier());
3919     DoubleRegister value = ToDoubleRegister(instr->value());
3920     DCHECK(offset >= 0);
3921     __ std(value, FieldMemOperand(object, offset));
3922     return;
3923   }
3924 
3925   if (hinstr->has_transition()) {
3926     Handle<Map> transition = hinstr->transition_map();
3927     AddDeprecationDependency(transition);
3928     __ mov(scratch, Operand(transition));
3929     __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
3930     if (hinstr->NeedsWriteBarrierForMap()) {
3931       Register temp = ToRegister(instr->temp());
3932       // Update the write barrier for the map field.
3933       __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
3934                            kSaveFPRegs);
3935     }
3936   }
3937 
3938   // Do the store.
3939   Register record_dest = object;
3940   Register record_value = no_reg;
3941   Register record_scratch = scratch;
3942 #if V8_TARGET_ARCH_S390X
3943   if (FLAG_unbox_double_fields && representation.IsDouble()) {
3944     DCHECK(access.IsInobject());
3945     DoubleRegister value = ToDoubleRegister(instr->value());
3946     __ std(value, FieldMemOperand(object, offset));
3947     if (hinstr->NeedsWriteBarrier()) {
3948       record_value = ToRegister(instr->value());
3949     }
3950   } else {
3951     if (representation.IsSmi() &&
3952         hinstr->value()->representation().IsInteger32()) {
3953       DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
3954       // 64-bit Smi optimization
3955       // Store int value directly to upper half of the smi.
3956       offset = SmiWordOffset(offset);
3957       representation = Representation::Integer32();
3958     }
3959 #endif
3960     if (access.IsInobject()) {
3961       Register value = ToRegister(instr->value());
3962       MemOperand operand = FieldMemOperand(object, offset);
3963       __ StoreRepresentation(value, operand, representation, r0);
3964       record_value = value;
3965     } else {
3966       Register value = ToRegister(instr->value());
3967       __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3968       MemOperand operand = FieldMemOperand(scratch, offset);
3969       __ StoreRepresentation(value, operand, representation, r0);
3970       record_dest = scratch;
3971       record_value = value;
3972       record_scratch = object;
3973     }
3974 #if V8_TARGET_ARCH_S390X
3975   }
3976 #endif
3977 
3978   if (hinstr->NeedsWriteBarrier()) {
3979     __ RecordWriteField(record_dest, offset, record_value, record_scratch,
3980                         GetLinkRegisterState(), kSaveFPRegs,
3981                         EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
3982                         hinstr->PointersToHereCheckForValue());
3983   }
3984 }
3985 
3986 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3987   DCHECK(ToRegister(instr->context()).is(cp));
3988   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
3989   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
3990 
3991   EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
3992 
3993   __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
3994   Handle<Code> ic =
3995       CodeFactory::StoreICInOptimizedCode(isolate(), instr->language_mode())
3996           .code();
3997   CallCode(ic, RelocInfo::CODE_TARGET, instr);
3998 }
3999 
4000 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4001   Representation representation = instr->hydrogen()->length()->representation();
4002   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4003   DCHECK(representation.IsSmiOrInteger32());
4004 
4005   Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4006   if (instr->length()->IsConstantOperand()) {
4007     int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4008     Register index = ToRegister(instr->index());
4009     if (representation.IsSmi()) {
4010       __ CmpLogicalP(index, Operand(Smi::FromInt(length)));
4011     } else {
4012       __ CmpLogical32(index, Operand(length));
4013     }
4014     cc = CommuteCondition(cc);
4015   } else if (instr->index()->IsConstantOperand()) {
4016     int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4017     Register length = ToRegister(instr->length());
4018     if (representation.IsSmi()) {
4019       __ CmpLogicalP(length, Operand(Smi::FromInt(index)));
4020     } else {
4021       __ CmpLogical32(length, Operand(index));
4022     }
4023   } else {
4024     Register index = ToRegister(instr->index());
4025     Register length = ToRegister(instr->length());
4026     if (representation.IsSmi()) {
4027       __ CmpLogicalP(length, index);
4028     } else {
4029       __ CmpLogical32(length, index);
4030     }
4031   }
4032   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4033     Label done;
4034     __ b(NegateCondition(cc), &done, Label::kNear);
4035     __ stop("eliminated bounds check failed");
4036     __ bind(&done);
4037   } else {
4038     DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4039   }
4040 }
4041 
4042 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4043   Register external_pointer = ToRegister(instr->elements());
4044   Register key = no_reg;
4045   ElementsKind elements_kind = instr->elements_kind();
4046   bool key_is_constant = instr->key()->IsConstantOperand();
4047   int constant_key = 0;
4048   if (key_is_constant) {
4049     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4050     if (constant_key & 0xF0000000) {
4051       Abort(kArrayIndexConstantValueTooBig);
4052     }
4053   } else {
4054     key = ToRegister(instr->key());
4055   }
4056   int element_size_shift = ElementsKindToShiftSize(elements_kind);
4057   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4058   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
4059   int base_offset = instr->base_offset();
4060 
4061   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4062     Register address = scratch0();
4063     DoubleRegister value(ToDoubleRegister(instr->value()));
4064     if (key_is_constant) {
4065       if (constant_key != 0) {
4066         base_offset += constant_key << element_size_shift;
4067         if (!is_int20(base_offset)) {
4068           __ mov(address, Operand(base_offset));
4069           __ AddP(address, external_pointer);
4070         } else {
4071           __ AddP(address, external_pointer, Operand(base_offset));
4072         }
4073         base_offset = 0;
4074       } else {
4075         address = external_pointer;
4076       }
4077     } else {
4078       __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi,
4079                             keyMaybeNegative);
4080       __ AddP(address, external_pointer);
4081     }
4082     if (elements_kind == FLOAT32_ELEMENTS) {
4083       __ ledbr(double_scratch0(), value);
4084       __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
4085     } else {  // Storing doubles, not floats.
4086       __ StoreDouble(value, MemOperand(address, base_offset));
4087     }
4088   } else {
4089     Register value(ToRegister(instr->value()));
4090     MemOperand mem_operand =
4091         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4092                             constant_key, element_size_shift, base_offset,
4093                             keyMaybeNegative);
4094     switch (elements_kind) {
4095       case UINT8_ELEMENTS:
4096       case UINT8_CLAMPED_ELEMENTS:
4097       case INT8_ELEMENTS:
4098         if (key_is_constant) {
4099           __ StoreByte(value, mem_operand, r0);
4100         } else {
4101           __ StoreByte(value, mem_operand);
4102         }
4103         break;
4104       case INT16_ELEMENTS:
4105       case UINT16_ELEMENTS:
4106         if (key_is_constant) {
4107           __ StoreHalfWord(value, mem_operand, r0);
4108         } else {
4109           __ StoreHalfWord(value, mem_operand);
4110         }
4111         break;
4112       case INT32_ELEMENTS:
4113       case UINT32_ELEMENTS:
4114         if (key_is_constant) {
4115           __ StoreW(value, mem_operand, r0);
4116         } else {
4117           __ StoreW(value, mem_operand);
4118         }
4119         break;
4120       case FLOAT32_ELEMENTS:
4121       case FLOAT64_ELEMENTS:
4122       case FAST_DOUBLE_ELEMENTS:
4123       case FAST_ELEMENTS:
4124       case FAST_SMI_ELEMENTS:
4125       case FAST_HOLEY_DOUBLE_ELEMENTS:
4126       case FAST_HOLEY_ELEMENTS:
4127       case FAST_HOLEY_SMI_ELEMENTS:
4128       case DICTIONARY_ELEMENTS:
4129       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4130       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4131       case FAST_STRING_WRAPPER_ELEMENTS:
4132       case SLOW_STRING_WRAPPER_ELEMENTS:
4133       case NO_ELEMENTS:
4134         UNREACHABLE();
4135         break;
4136     }
4137   }
4138 }
4139 
4140 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4141   DoubleRegister value = ToDoubleRegister(instr->value());
4142   Register elements = ToRegister(instr->elements());
4143   Register key = no_reg;
4144   Register scratch = scratch0();
4145   DoubleRegister double_scratch = double_scratch0();
4146   bool key_is_constant = instr->key()->IsConstantOperand();
4147   int constant_key = 0;
4148 
4149   // Calculate the effective address of the slot in the array to store the
4150   // double value.
4151   if (key_is_constant) {
4152     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4153     if (constant_key & 0xF0000000) {
4154       Abort(kArrayIndexConstantValueTooBig);
4155     }
4156   } else {
4157     key = ToRegister(instr->key());
4158   }
4159   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4160   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4161   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
4162   int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4163   bool use_scratch = false;
4164   intptr_t address_offset = base_offset;
4165 
4166   if (key_is_constant) {
4167     // Memory references support up to 20-bits signed displacement in RXY form
4168     if (!is_int20((address_offset))) {
4169       __ mov(scratch, Operand(address_offset));
4170       address_offset = 0;
4171       use_scratch = true;
4172     }
4173   } else {
4174     use_scratch = true;
4175     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
4176                           keyMaybeNegative);
4177     // Memory references support up to 20-bits signed displacement in RXY form
4178     if (!is_int20((address_offset))) {
4179       __ AddP(scratch, Operand(address_offset));
4180       address_offset = 0;
4181     }
4182   }
4183 
4184   if (instr->NeedsCanonicalization()) {
4185     // Turn potential sNaN value into qNaN.
4186     __ CanonicalizeNaN(double_scratch, value);
4187     DCHECK(address_offset >= 0);
4188     if (use_scratch)
4189       __ std(double_scratch, MemOperand(scratch, elements, address_offset));
4190     else
4191       __ std(double_scratch, MemOperand(elements, address_offset));
4192   } else {
4193     if (use_scratch)
4194       __ std(value, MemOperand(scratch, elements, address_offset));
4195     else
4196       __ std(value, MemOperand(elements, address_offset));
4197   }
4198 }
4199 
4200 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4201   HStoreKeyed* hinstr = instr->hydrogen();
4202   Register value = ToRegister(instr->value());
4203   Register elements = ToRegister(instr->elements());
4204   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4205   Register scratch = scratch0();
4206   int offset = instr->base_offset();
4207 
4208   // Do the store.
4209   if (instr->key()->IsConstantOperand()) {
4210     DCHECK(!hinstr->NeedsWriteBarrier());
4211     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4212     offset += ToInteger32(const_operand) * kPointerSize;
4213   } else {
4214     // Even though the HLoadKeyed instruction forces the input
4215     // representation for the key to be an integer, the input gets replaced
4216     // during bound check elimination with the index argument to the bounds
4217     // check, which can be tagged, so that case must be handled here, too.
4218     if (hinstr->key()->representation().IsSmi()) {
4219       __ SmiToPtrArrayOffset(scratch, key);
4220     } else {
4221       if (instr->hydrogen()->IsDehoisted() ||
4222           !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
4223 #if V8_TARGET_ARCH_S390X
4224         // If array access is dehoisted, the key, being an int32, can contain
4225         // a negative value, as needs to be sign-extended to 64-bit for
4226         // memory access.
4227         __ lgfr(key, key);
4228 #endif
4229         __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
4230       } else {
4231         // Small optimization to reduce pathlength.  After Bounds Check,
4232         // the key is guaranteed to be non-negative.  Leverage RISBG,
4233         // which also performs zero-extension.
4234         __ risbg(scratch, key, Operand(32 - kPointerSizeLog2),
4235                  Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2),
4236                  true);
4237       }
4238     }
4239   }
4240 
4241   Representation representation = hinstr->value()->representation();
4242 
4243 #if V8_TARGET_ARCH_S390X
4244   // 64-bit Smi optimization
4245   if (representation.IsInteger32()) {
4246     DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4247     DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4248     // Store int value directly to upper half of the smi.
4249     offset = SmiWordOffset(offset);
4250   }
4251 #endif
4252 
4253   if (instr->key()->IsConstantOperand()) {
4254     __ StoreRepresentation(value, MemOperand(elements, offset), representation,
4255                            scratch);
4256   } else {
4257     __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
4258                            representation, r0);
4259   }
4260 
4261   if (hinstr->NeedsWriteBarrier()) {
4262     SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4263                                 ? OMIT_SMI_CHECK
4264                                 : INLINE_SMI_CHECK;
4265     // Compute address of modified element and store it into key register.
4266     if (instr->key()->IsConstantOperand()) {
4267       __ lay(key, MemOperand(elements, offset));
4268     } else {
4269       __ lay(key, MemOperand(scratch, elements, offset));
4270     }
4271     __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4272                    EMIT_REMEMBERED_SET, check_needed,
4273                    hinstr->PointersToHereCheckForValue());
4274   }
4275 }
4276 
4277 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4278   // By cases: external, fast double
4279   if (instr->is_fixed_typed_array()) {
4280     DoStoreKeyedExternalArray(instr);
4281   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4282     DoStoreKeyedFixedDoubleArray(instr);
4283   } else {
4284     DoStoreKeyedFixedArray(instr);
4285   }
4286 }
4287 
4288 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4289   DCHECK(ToRegister(instr->context()).is(cp));
4290   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4291   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4292   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4293 
4294   EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4295 
4296   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4297                         isolate(), instr->language_mode())
4298                         .code();
4299   CallCode(ic, RelocInfo::CODE_TARGET, instr);
4300 }
4301 
4302 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4303   class DeferredMaybeGrowElements final : public LDeferredCode {
4304    public:
4305     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4306         : LDeferredCode(codegen), instr_(instr) {}
4307     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4308     LInstruction* instr() override { return instr_; }
4309 
4310    private:
4311     LMaybeGrowElements* instr_;
4312   };
4313 
4314   Register result = r2;
4315   DeferredMaybeGrowElements* deferred =
4316       new (zone()) DeferredMaybeGrowElements(this, instr);
4317   LOperand* key = instr->key();
4318   LOperand* current_capacity = instr->current_capacity();
4319 
4320   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4321   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4322   DCHECK(key->IsConstantOperand() || key->IsRegister());
4323   DCHECK(current_capacity->IsConstantOperand() ||
4324          current_capacity->IsRegister());
4325 
4326   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4327     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4328     int32_t constant_capacity =
4329         ToInteger32(LConstantOperand::cast(current_capacity));
4330     if (constant_key >= constant_capacity) {
4331       // Deferred case.
4332       __ b(deferred->entry());
4333     }
4334   } else if (key->IsConstantOperand()) {
4335     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4336     __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
4337     __ ble(deferred->entry());
4338   } else if (current_capacity->IsConstantOperand()) {
4339     int32_t constant_capacity =
4340         ToInteger32(LConstantOperand::cast(current_capacity));
4341     __ Cmp32(ToRegister(key), Operand(constant_capacity));
4342     __ bge(deferred->entry());
4343   } else {
4344     __ Cmp32(ToRegister(key), ToRegister(current_capacity));
4345     __ bge(deferred->entry());
4346   }
4347 
4348   if (instr->elements()->IsRegister()) {
4349     __ Move(result, ToRegister(instr->elements()));
4350   } else {
4351     __ LoadP(result, ToMemOperand(instr->elements()));
4352   }
4353 
4354   __ bind(deferred->exit());
4355 }
4356 
4357 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4358   // TODO(3095996): Get rid of this. For now, we need to make the
4359   // result register contain a valid pointer because it is already
4360   // contained in the register pointer map.
4361   Register result = r2;
4362   __ LoadImmP(result, Operand::Zero());
4363 
4364   // We have to call a stub.
4365   {
4366     PushSafepointRegistersScope scope(this);
4367     if (instr->object()->IsRegister()) {
4368       __ Move(result, ToRegister(instr->object()));
4369     } else {
4370       __ LoadP(result, ToMemOperand(instr->object()));
4371     }
4372 
4373     LOperand* key = instr->key();
4374     if (key->IsConstantOperand()) {
4375       LConstantOperand* constant_key = LConstantOperand::cast(key);
4376       int32_t int_key = ToInteger32(constant_key);
4377       if (Smi::IsValid(int_key)) {
4378         __ LoadSmiLiteral(r5, Smi::FromInt(int_key));
4379       } else {
4380         // We should never get here at runtime because there is a smi check on
4381         // the key before this point.
4382         __ stop("expected smi");
4383       }
4384     } else {
4385       __ SmiTag(r5, ToRegister(key));
4386     }
4387 
4388     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4389                                instr->hydrogen()->kind());
4390     __ CallStub(&stub);
4391     RecordSafepointWithLazyDeopt(
4392         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4393     __ StoreToSafepointRegisterSlot(result, result);
4394   }
4395 
4396   // Deopt on smi, which means the elements array changed to dictionary mode.
4397   __ TestIfSmi(result);
4398   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
4399 }
4400 
4401 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4402   Register object_reg = ToRegister(instr->object());
4403   Register scratch = scratch0();
4404 
4405   Handle<Map> from_map = instr->original_map();
4406   Handle<Map> to_map = instr->transitioned_map();
4407   ElementsKind from_kind = instr->from_kind();
4408   ElementsKind to_kind = instr->to_kind();
4409 
4410   Label not_applicable;
4411   __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4412   __ CmpP(scratch, Operand(from_map));
4413   __ bne(&not_applicable);
4414 
4415   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4416     Register new_map_reg = ToRegister(instr->new_map_temp());
4417     __ mov(new_map_reg, Operand(to_map));
4418     __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4419     // Write barrier.
4420     __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4421                          GetLinkRegisterState(), kDontSaveFPRegs);
4422   } else {
4423     DCHECK(ToRegister(instr->context()).is(cp));
4424     DCHECK(object_reg.is(r2));
4425     PushSafepointRegistersScope scope(this);
4426     __ Move(r3, to_map);
4427     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
4428     __ CallStub(&stub);
4429     RecordSafepointWithRegisters(instr->pointer_map(), 0,
4430                                  Safepoint::kLazyDeopt);
4431   }
4432   __ bind(&not_applicable);
4433 }
4434 
4435 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4436   Register object = ToRegister(instr->object());
4437   Register temp1 = ToRegister(instr->temp1());
4438   Register temp2 = ToRegister(instr->temp2());
4439   Label no_memento_found;
4440   __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
4441   DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4442   __ bind(&no_memento_found);
4443 }
4444 
4445 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4446   DCHECK(ToRegister(instr->context()).is(cp));
4447   DCHECK(ToRegister(instr->left()).is(r3));
4448   DCHECK(ToRegister(instr->right()).is(r2));
4449   StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4450                      instr->hydrogen()->pretenure_flag());
4451   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4452 }
4453 
4454 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4455   class DeferredStringCharCodeAt final : public LDeferredCode {
4456    public:
4457     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4458         : LDeferredCode(codegen), instr_(instr) {}
4459     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4460     LInstruction* instr() override { return instr_; }
4461 
4462    private:
4463     LStringCharCodeAt* instr_;
4464   };
4465 
4466   DeferredStringCharCodeAt* deferred =
4467       new (zone()) DeferredStringCharCodeAt(this, instr);
4468 
4469   StringCharLoadGenerator::Generate(
4470       masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4471       ToRegister(instr->result()), deferred->entry());
4472   __ bind(deferred->exit());
4473 }
4474 
4475 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4476   Register string = ToRegister(instr->string());
4477   Register result = ToRegister(instr->result());
4478   Register scratch = scratch0();
4479 
4480   // TODO(3095996): Get rid of this. For now, we need to make the
4481   // result register contain a valid pointer because it is already
4482   // contained in the register pointer map.
4483   __ LoadImmP(result, Operand::Zero());
4484 
4485   PushSafepointRegistersScope scope(this);
4486   __ push(string);
4487   // Push the index as a smi. This is safe because of the checks in
4488   // DoStringCharCodeAt above.
4489   if (instr->index()->IsConstantOperand()) {
4490     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4491     __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4492     __ push(scratch);
4493   } else {
4494     Register index = ToRegister(instr->index());
4495     __ SmiTag(index);
4496     __ push(index);
4497   }
4498   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4499                           instr->context());
4500   __ AssertSmi(r2);
4501   __ SmiUntag(r2);
4502   __ StoreToSafepointRegisterSlot(r2, result);
4503 }
4504 
4505 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4506   class DeferredStringCharFromCode final : public LDeferredCode {
4507    public:
4508     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4509         : LDeferredCode(codegen), instr_(instr) {}
4510     void Generate() override {
4511       codegen()->DoDeferredStringCharFromCode(instr_);
4512     }
4513     LInstruction* instr() override { return instr_; }
4514 
4515    private:
4516     LStringCharFromCode* instr_;
4517   };
4518 
4519   DeferredStringCharFromCode* deferred =
4520       new (zone()) DeferredStringCharFromCode(this, instr);
4521 
4522   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4523   Register char_code = ToRegister(instr->char_code());
4524   Register result = ToRegister(instr->result());
4525   DCHECK(!char_code.is(result));
4526 
4527   __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
4528   __ bgt(deferred->entry());
4529   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4530   __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
4531   __ AddP(result, r0);
4532   __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4533   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
4534   __ beq(deferred->entry());
4535   __ bind(deferred->exit());
4536 }
4537 
4538 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4539   Register char_code = ToRegister(instr->char_code());
4540   Register result = ToRegister(instr->result());
4541 
4542   // TODO(3095996): Get rid of this. For now, we need to make the
4543   // result register contain a valid pointer because it is already
4544   // contained in the register pointer map.
4545   __ LoadImmP(result, Operand::Zero());
4546 
4547   PushSafepointRegistersScope scope(this);
4548   __ SmiTag(char_code);
4549   __ push(char_code);
4550   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
4551                           instr->context());
4552   __ StoreToSafepointRegisterSlot(r2, result);
4553 }
4554 
4555 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4556   LOperand* input = instr->value();
4557   DCHECK(input->IsRegister() || input->IsStackSlot());
4558   LOperand* output = instr->result();
4559   DCHECK(output->IsDoubleRegister());
4560   if (input->IsStackSlot()) {
4561     Register scratch = scratch0();
4562     __ LoadP(scratch, ToMemOperand(input));
4563     __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4564   } else {
4565     __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4566   }
4567 }
4568 
4569 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4570   LOperand* input = instr->value();
4571   LOperand* output = instr->result();
4572   __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4573 }
4574 
4575 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4576   class DeferredNumberTagI final : public LDeferredCode {
4577    public:
4578     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4579         : LDeferredCode(codegen), instr_(instr) {}
4580     void Generate() override {
4581       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4582                                        instr_->temp2(), SIGNED_INT32);
4583     }
4584     LInstruction* instr() override { return instr_; }
4585 
4586    private:
4587     LNumberTagI* instr_;
4588   };
4589 
4590   Register src = ToRegister(instr->value());
4591   Register dst = ToRegister(instr->result());
4592 
4593   DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
4594 #if V8_TARGET_ARCH_S390X
4595   __ SmiTag(dst, src);
4596 #else
4597   // Add src to itself to defect SMI overflow.
4598   __ Add32(dst, src, src);
4599   __ b(overflow, deferred->entry());
4600 #endif
4601   __ bind(deferred->exit());
4602 }
4603 
4604 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4605   class DeferredNumberTagU final : public LDeferredCode {
4606    public:
4607     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4608         : LDeferredCode(codegen), instr_(instr) {}
4609     void Generate() override {
4610       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4611                                        instr_->temp2(), UNSIGNED_INT32);
4612     }
4613     LInstruction* instr() override { return instr_; }
4614 
4615    private:
4616     LNumberTagU* instr_;
4617   };
4618 
4619   Register input = ToRegister(instr->value());
4620   Register result = ToRegister(instr->result());
4621 
4622   DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
4623   __ CmpLogicalP(input, Operand(Smi::kMaxValue));
4624   __ bgt(deferred->entry());
4625   __ SmiTag(result, input);
4626   __ bind(deferred->exit());
4627 }
4628 
4629 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
4630                                      LOperand* temp1, LOperand* temp2,
4631                                      IntegerSignedness signedness) {
4632   Label done, slow;
4633   Register src = ToRegister(value);
4634   Register dst = ToRegister(instr->result());
4635   Register tmp1 = scratch0();
4636   Register tmp2 = ToRegister(temp1);
4637   Register tmp3 = ToRegister(temp2);
4638   DoubleRegister dbl_scratch = double_scratch0();
4639 
4640   if (signedness == SIGNED_INT32) {
4641     // There was overflow, so bits 30 and 31 of the original integer
4642     // disagree. Try to allocate a heap number in new space and store
4643     // the value in there. If that fails, call the runtime system.
4644     if (dst.is(src)) {
4645       __ SmiUntag(src, dst);
4646       __ xilf(src, Operand(HeapNumber::kSignMask));
4647     }
4648     __ ConvertIntToDouble(src, dbl_scratch);
4649   } else {
4650     __ ConvertUnsignedIntToDouble(src, dbl_scratch);
4651   }
4652 
4653   if (FLAG_inline_new) {
4654     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4655     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
4656     __ b(&done);
4657   }
4658 
4659   // Slow case: Call the runtime system to do the number allocation.
4660   __ bind(&slow);
4661   {
4662     // TODO(3095996): Put a valid pointer value in the stack slot where the
4663     // result register is stored, as this register is in the pointer map, but
4664     // contains an integer value.
4665     __ LoadImmP(dst, Operand::Zero());
4666 
4667     // Preserve the value of all registers.
4668     PushSafepointRegistersScope scope(this);
4669 
4670     // NumberTagI and NumberTagD use the context from the frame, rather than
4671     // the environment's HContext or HInlinedContext value.
4672     // They only call Runtime::kAllocateHeapNumber.
4673     // The corresponding HChange instructions are added in a phase that does
4674     // not have easy access to the local context.
4675     __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4676     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4677     RecordSafepointWithRegisters(instr->pointer_map(), 0,
4678                                  Safepoint::kNoLazyDeopt);
4679     __ StoreToSafepointRegisterSlot(r2, dst);
4680   }
4681 
4682   // Done. Put the value in dbl_scratch into the value of the allocated heap
4683   // number.
4684   __ bind(&done);
4685   __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4686 }
4687 
4688 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4689   class DeferredNumberTagD final : public LDeferredCode {
4690    public:
4691     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4692         : LDeferredCode(codegen), instr_(instr) {}
4693     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4694     LInstruction* instr() override { return instr_; }
4695 
4696    private:
4697     LNumberTagD* instr_;
4698   };
4699 
4700   DoubleRegister input_reg = ToDoubleRegister(instr->value());
4701   Register scratch = scratch0();
4702   Register reg = ToRegister(instr->result());
4703   Register temp1 = ToRegister(instr->temp());
4704   Register temp2 = ToRegister(instr->temp2());
4705 
4706   DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
4707   if (FLAG_inline_new) {
4708     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4709     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4710   } else {
4711     __ b(deferred->entry());
4712   }
4713   __ bind(deferred->exit());
4714   __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
4715 }
4716 
4717 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4718   // TODO(3095996): Get rid of this. For now, we need to make the
4719   // result register contain a valid pointer because it is already
4720   // contained in the register pointer map.
4721   Register reg = ToRegister(instr->result());
4722   __ LoadImmP(reg, Operand::Zero());
4723 
4724   PushSafepointRegistersScope scope(this);
4725   // NumberTagI and NumberTagD use the context from the frame, rather than
4726   // the environment's HContext or HInlinedContext value.
4727   // They only call Runtime::kAllocateHeapNumber.
4728   // The corresponding HChange instructions are added in a phase that does
4729   // not have easy access to the local context.
4730   __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4731   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4732   RecordSafepointWithRegisters(instr->pointer_map(), 0,
4733                                Safepoint::kNoLazyDeopt);
4734   __ StoreToSafepointRegisterSlot(r2, reg);
4735 }
4736 
4737 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4738   HChange* hchange = instr->hydrogen();
4739   Register input = ToRegister(instr->value());
4740   Register output = ToRegister(instr->result());
4741   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4742       hchange->value()->CheckFlag(HValue::kUint32)) {
4743     __ TestUnsignedSmiCandidate(input, r0);
4744     DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
4745   }
4746 #if !V8_TARGET_ARCH_S390X
4747   if (hchange->CheckFlag(HValue::kCanOverflow) &&
4748       !hchange->value()->CheckFlag(HValue::kUint32)) {
4749     __ SmiTagCheckOverflow(output, input, r0);
4750     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
4751   } else {
4752 #endif
4753     __ SmiTag(output, input);
4754 #if !V8_TARGET_ARCH_S390X
4755   }
4756 #endif
4757 }
4758 
4759 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4760   Register input = ToRegister(instr->value());
4761   Register result = ToRegister(instr->result());
4762   if (instr->needs_check()) {
4763     __ tmll(input, Operand(kHeapObjectTag));
4764     DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
4765     __ SmiUntag(result, input);
4766   } else {
4767     __ SmiUntag(result, input);
4768   }
4769 }
4770 
4771 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4772                                 DoubleRegister result_reg,
4773                                 NumberUntagDMode mode) {
4774   bool can_convert_undefined_to_nan =
4775       instr->hydrogen()->can_convert_undefined_to_nan();
4776   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4777 
4778   Register scratch = scratch0();
4779   DCHECK(!result_reg.is(double_scratch0()));
4780 
4781   Label convert, load_smi, done;
4782 
4783   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4784     // Smi check.
4785     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4786 
4787     // Heap number map check.
4788     __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4789     __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
4790 
4791     if (can_convert_undefined_to_nan) {
4792       __ bne(&convert, Label::kNear);
4793     } else {
4794       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4795     }
4796     // load heap number
4797     __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4798     if (deoptimize_on_minus_zero) {
4799       __ TestDoubleIsMinusZero(result_reg, scratch, ip);
4800       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4801     }
4802     __ b(&done, Label::kNear);
4803     if (can_convert_undefined_to_nan) {
4804       __ bind(&convert);
4805       // Convert undefined (and hole) to NaN.
4806       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4807       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
4808       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4809       __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4810       __ b(&done, Label::kNear);
4811     }
4812   } else {
4813     __ SmiUntag(scratch, input_reg);
4814     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4815   }
4816   // Smi to double register conversion
4817   __ bind(&load_smi);
4818   // scratch: untagged value of input_reg
4819   __ ConvertIntToDouble(scratch, result_reg);
4820   __ bind(&done);
4821 }
4822 
4823 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4824   Register input_reg = ToRegister(instr->value());
4825   Register scratch1 = scratch0();
4826   Register scratch2 = ToRegister(instr->temp());
4827   DoubleRegister double_scratch = double_scratch0();
4828   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4829 
4830   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4831   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4832 
4833   Label done;
4834 
4835   // Heap number map check.
4836   __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4837   __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
4838 
4839   if (instr->truncating()) {
4840     // Performs a truncating conversion of a floating point number as used by
4841     // the JS bitwise operations.
4842     Label no_heap_number, check_bools, check_false;
4843     __ bne(&no_heap_number, Label::kNear);
4844     __ LoadRR(scratch2, input_reg);
4845     __ TruncateHeapNumberToI(input_reg, scratch2);
4846     __ b(&done, Label::kNear);
4847 
4848     // Check for Oddballs. Undefined/False is converted to zero and True to one
4849     // for truncating conversions.
4850     __ bind(&no_heap_number);
4851     __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
4852     __ bne(&check_bools);
4853     __ LoadImmP(input_reg, Operand::Zero());
4854     __ b(&done, Label::kNear);
4855 
4856     __ bind(&check_bools);
4857     __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
4858     __ bne(&check_false, Label::kNear);
4859     __ LoadImmP(input_reg, Operand(1));
4860     __ b(&done, Label::kNear);
4861 
4862     __ bind(&check_false);
4863     __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
4864     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
4865     __ LoadImmP(input_reg, Operand::Zero());
4866   } else {
4867     // Deoptimize if we don't have a heap number.
4868     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4869 
4870     __ ld(double_scratch2,
4871           FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4872     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4873       // preserve heap number pointer in scratch2 for minus zero check below
4874       __ LoadRR(scratch2, input_reg);
4875     }
4876     __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
4877                              double_scratch);
4878     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4879 
4880     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4881       __ CmpP(input_reg, Operand::Zero());
4882       __ bne(&done, Label::kNear);
4883       __ TestHeapNumberSign(scratch2, scratch1);
4884       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4885     }
4886   }
4887   __ bind(&done);
4888 }
4889 
4890 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4891   class DeferredTaggedToI final : public LDeferredCode {
4892    public:
4893     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4894         : LDeferredCode(codegen), instr_(instr) {}
4895     void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
4896     LInstruction* instr() override { return instr_; }
4897 
4898    private:
4899     LTaggedToI* instr_;
4900   };
4901 
4902   LOperand* input = instr->value();
4903   DCHECK(input->IsRegister());
4904   DCHECK(input->Equals(instr->result()));
4905 
4906   Register input_reg = ToRegister(input);
4907 
4908   if (instr->hydrogen()->value()->representation().IsSmi()) {
4909     __ SmiUntag(input_reg);
4910   } else {
4911     DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
4912 
4913     // Branch to deferred code if the input is a HeapObject.
4914     __ JumpIfNotSmi(input_reg, deferred->entry());
4915 
4916     __ SmiUntag(input_reg);
4917     __ bind(deferred->exit());
4918   }
4919 }
4920 
4921 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4922   LOperand* input = instr->value();
4923   DCHECK(input->IsRegister());
4924   LOperand* result = instr->result();
4925   DCHECK(result->IsDoubleRegister());
4926 
4927   Register input_reg = ToRegister(input);
4928   DoubleRegister result_reg = ToDoubleRegister(result);
4929 
4930   HValue* value = instr->hydrogen()->value();
4931   NumberUntagDMode mode = value->representation().IsSmi()
4932                               ? NUMBER_CANDIDATE_IS_SMI
4933                               : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4934 
4935   EmitNumberUntagD(instr, input_reg, result_reg, mode);
4936 }
4937 
4938 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4939   Register result_reg = ToRegister(instr->result());
4940   Register scratch1 = scratch0();
4941   DoubleRegister double_input = ToDoubleRegister(instr->value());
4942   DoubleRegister double_scratch = double_scratch0();
4943 
4944   if (instr->truncating()) {
4945     __ TruncateDoubleToI(result_reg, double_input);
4946   } else {
4947     __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4948                              double_scratch);
4949     // Deoptimize if the input wasn't a int32 (inside a double).
4950     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4951     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4952       Label done;
4953       __ CmpP(result_reg, Operand::Zero());
4954       __ bne(&done, Label::kNear);
4955       __ TestDoubleSign(double_input, scratch1);
4956       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4957       __ bind(&done);
4958     }
4959   }
4960 }
4961 
4962 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4963   Register result_reg = ToRegister(instr->result());
4964   Register scratch1 = scratch0();
4965   DoubleRegister double_input = ToDoubleRegister(instr->value());
4966   DoubleRegister double_scratch = double_scratch0();
4967 
4968   if (instr->truncating()) {
4969     __ TruncateDoubleToI(result_reg, double_input);
4970   } else {
4971     __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
4972                              double_scratch);
4973     // Deoptimize if the input wasn't a int32 (inside a double).
4974     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
4975     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4976       Label done;
4977       __ CmpP(result_reg, Operand::Zero());
4978       __ bne(&done, Label::kNear);
4979       __ TestDoubleSign(double_input, scratch1);
4980       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4981       __ bind(&done);
4982     }
4983   }
4984 #if V8_TARGET_ARCH_S390X
4985   __ SmiTag(result_reg);
4986 #else
4987   __ SmiTagCheckOverflow(result_reg, r0);
4988   DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
4989 #endif
4990 }
4991 
4992 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4993   LOperand* input = instr->value();
4994   __ TestIfSmi(ToRegister(input));
4995   DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
4996 }
4997 
4998 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4999   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5000     LOperand* input = instr->value();
5001     __ TestIfSmi(ToRegister(input));
5002     DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
5003   }
5004 }
5005 
5006 void LCodeGen::DoCheckArrayBufferNotNeutered(
5007     LCheckArrayBufferNotNeutered* instr) {
5008   Register view = ToRegister(instr->view());
5009   Register scratch = scratch0();
5010 
5011   __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5012   __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5013   __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
5014   DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
5015 }
5016 
5017 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5018   Register input = ToRegister(instr->value());
5019   Register scratch = scratch0();
5020 
5021   __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5022 
5023   if (instr->hydrogen()->is_interval_check()) {
5024     InstanceType first;
5025     InstanceType last;
5026     instr->hydrogen()->GetCheckInterval(&first, &last);
5027 
5028     __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
5029                       Operand(first));
5030 
5031     // If there is only one type in the interval check for equality.
5032     if (first == last) {
5033       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5034     } else {
5035       DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
5036       // Omit check for the last type.
5037       if (last != LAST_TYPE) {
5038         __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
5039                           Operand(last));
5040         DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
5041       }
5042     }
5043   } else {
5044     uint8_t mask;
5045     uint8_t tag;
5046     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5047 
5048     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5049 
5050     if (base::bits::IsPowerOfTwo32(mask)) {
5051       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5052       __ AndP(scratch, Operand(mask));
5053       DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
5054     } else {
5055       __ AndP(scratch, Operand(mask));
5056       __ CmpP(scratch, Operand(tag));
5057       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5058     }
5059   }
5060 }
5061 
5062 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5063   Register reg = ToRegister(instr->value());
5064   Handle<HeapObject> object = instr->hydrogen()->object().handle();
5065   AllowDeferredHandleDereference smi_check;
5066   if (isolate()->heap()->InNewSpace(*object)) {
5067     Register reg = ToRegister(instr->value());
5068     Handle<Cell> cell = isolate()->factory()->NewCell(object);
5069     __ mov(ip, Operand(cell));
5070     __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
5071   } else {
5072     __ CmpP(reg, Operand(object));
5073   }
5074   DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5075 }
5076 
5077 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5078   Register temp = ToRegister(instr->temp());
5079   {
5080     PushSafepointRegistersScope scope(this);
5081     __ push(object);
5082     __ LoadImmP(cp, Operand::Zero());
5083     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5084     RecordSafepointWithRegisters(instr->pointer_map(), 1,
5085                                  Safepoint::kNoLazyDeopt);
5086     __ StoreToSafepointRegisterSlot(r2, temp);
5087   }
5088   __ TestIfSmi(temp);
5089   DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
5090 }
5091 
5092 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5093   class DeferredCheckMaps final : public LDeferredCode {
5094    public:
5095     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5096         : LDeferredCode(codegen), instr_(instr), object_(object) {
5097       SetExit(check_maps());
5098     }
5099     void Generate() override {
5100       codegen()->DoDeferredInstanceMigration(instr_, object_);
5101     }
5102     Label* check_maps() { return &check_maps_; }
5103     LInstruction* instr() override { return instr_; }
5104 
5105    private:
5106     LCheckMaps* instr_;
5107     Label check_maps_;
5108     Register object_;
5109   };
5110 
5111   if (instr->hydrogen()->IsStabilityCheck()) {
5112     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5113     for (int i = 0; i < maps->size(); ++i) {
5114       AddStabilityDependency(maps->at(i).handle());
5115     }
5116     return;
5117   }
5118 
5119   LOperand* input = instr->value();
5120   DCHECK(input->IsRegister());
5121   Register reg = ToRegister(input);
5122 
5123   DeferredCheckMaps* deferred = NULL;
5124   if (instr->hydrogen()->HasMigrationTarget()) {
5125     deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
5126     __ bind(deferred->check_maps());
5127   }
5128 
5129   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5130   Label success;
5131   for (int i = 0; i < maps->size() - 1; i++) {
5132     Handle<Map> map = maps->at(i).handle();
5133     __ CompareMap(reg, map, &success);
5134     __ beq(&success);
5135   }
5136 
5137   Handle<Map> map = maps->at(maps->size() - 1).handle();
5138   __ CompareMap(reg, map, &success);
5139   if (instr->hydrogen()->HasMigrationTarget()) {
5140     __ bne(deferred->entry());
5141   } else {
5142     DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5143   }
5144 
5145   __ bind(&success);
5146 }
5147 
5148 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5149   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5150   Register result_reg = ToRegister(instr->result());
5151   __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5152 }
5153 
5154 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5155   Register unclamped_reg = ToRegister(instr->unclamped());
5156   Register result_reg = ToRegister(instr->result());
5157   __ ClampUint8(result_reg, unclamped_reg);
5158 }
5159 
5160 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5161   Register scratch = scratch0();
5162   Register input_reg = ToRegister(instr->unclamped());
5163   Register result_reg = ToRegister(instr->result());
5164   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5165   Label is_smi, done, heap_number;
5166 
5167   // Both smi and heap number cases are handled.
5168   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5169 
5170   // Check for heap number
5171   __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5172   __ CmpP(scratch, Operand(factory()->heap_number_map()));
5173   __ beq(&heap_number, Label::kNear);
5174 
5175   // Check for undefined. Undefined is converted to zero for clamping
5176   // conversions.
5177   __ CmpP(input_reg, Operand(factory()->undefined_value()));
5178   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5179   __ LoadImmP(result_reg, Operand::Zero());
5180   __ b(&done, Label::kNear);
5181 
5182   // Heap number
5183   __ bind(&heap_number);
5184   __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5185   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5186   __ b(&done, Label::kNear);
5187 
5188   // smi
5189   __ bind(&is_smi);
5190   __ ClampUint8(result_reg, result_reg);
5191 
5192   __ bind(&done);
5193 }
5194 
5195 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5196   DoubleRegister value_reg = ToDoubleRegister(instr->value());
5197   Register result_reg = ToRegister(instr->result());
5198   __ lgdr(result_reg, value_reg);
5199   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5200     __ srlg(result_reg, result_reg, Operand(32));
5201   } else {
5202     __ llgfr(result_reg, result_reg);
5203   }
5204 }
5205 
5206 void LCodeGen::DoAllocate(LAllocate* instr) {
5207   class DeferredAllocate final : public LDeferredCode {
5208    public:
5209     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5210         : LDeferredCode(codegen), instr_(instr) {}
5211     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5212     LInstruction* instr() override { return instr_; }
5213 
5214    private:
5215     LAllocate* instr_;
5216   };
5217 
5218   DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
5219 
5220   Register result = ToRegister(instr->result());
5221   Register scratch = ToRegister(instr->temp1());
5222   Register scratch2 = ToRegister(instr->temp2());
5223 
5224   // Allocate memory for the object.
5225   AllocationFlags flags = NO_ALLOCATION_FLAGS;
5226   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5227     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5228   }
5229   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5230     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5231     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5232   }
5233 
5234   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5235     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
5236   }
5237 
5238   DCHECK(!instr->hydrogen()->IsAllocationFolded());
5239 
5240   if (instr->size()->IsConstantOperand()) {
5241     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5242     CHECK(size <= Page::kMaxRegularHeapObjectSize);
5243     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5244   } else {
5245     Register size = ToRegister(instr->size());
5246     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5247   }
5248 
5249   __ bind(deferred->exit());
5250 
5251   if (instr->hydrogen()->MustPrefillWithFiller()) {
5252     if (instr->size()->IsConstantOperand()) {
5253       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5254       __ LoadIntLiteral(scratch, size);
5255     } else {
5256       scratch = ToRegister(instr->size());
5257     }
5258     __ lay(scratch, MemOperand(scratch, -kPointerSize));
5259     Label loop;
5260     __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5261     __ bind(&loop);
5262     __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
5263 #if V8_TARGET_ARCH_S390X
5264     __ lay(scratch, MemOperand(scratch, -kPointerSize));
5265 #else
5266     // TODO(joransiu): Improve the following sequence.
5267     // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
5268     // incorrect result with the signed compare
5269     __ AddP(scratch, Operand(-kPointerSize));
5270 #endif
5271     __ CmpP(scratch, Operand::Zero());
5272     __ bge(&loop);
5273   }
5274 }
5275 
5276 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5277   Register result = ToRegister(instr->result());
5278 
5279   // TODO(3095996): Get rid of this. For now, we need to make the
5280   // result register contain a valid pointer because it is already
5281   // contained in the register pointer map.
5282   __ LoadSmiLiteral(result, Smi::FromInt(0));
5283 
5284   PushSafepointRegistersScope scope(this);
5285   if (instr->size()->IsRegister()) {
5286     Register size = ToRegister(instr->size());
5287     DCHECK(!size.is(result));
5288     __ SmiTag(size);
5289     __ push(size);
5290   } else {
5291     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5292 #if !V8_TARGET_ARCH_S390X
5293     if (size >= 0 && size <= Smi::kMaxValue) {
5294 #endif
5295       __ Push(Smi::FromInt(size));
5296 #if !V8_TARGET_ARCH_S390X
5297     } else {
5298       // We should never get here at runtime => abort
5299       __ stop("invalid allocation size");
5300       return;
5301     }
5302 #endif
5303   }
5304 
5305   int flags = AllocateDoubleAlignFlag::encode(
5306       instr->hydrogen()->MustAllocateDoubleAligned());
5307   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5308     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5309     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5310   } else {
5311     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5312   }
5313   __ Push(Smi::FromInt(flags));
5314 
5315   CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5316                           instr->context());
5317   __ StoreToSafepointRegisterSlot(r2, result);
5318 
5319   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
5320     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
5321     if (instr->hydrogen()->IsOldSpaceAllocation()) {
5322       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5323       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
5324     }
5325     // If the allocation folding dominator allocate triggered a GC, allocation
5326     // happend in the runtime. We have to reset the top pointer to virtually
5327     // undo the allocation.
5328     ExternalReference allocation_top =
5329         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
5330     Register top_address = scratch0();
5331     __ SubP(r2, r2, Operand(kHeapObjectTag));
5332     __ mov(top_address, Operand(allocation_top));
5333     __ StoreP(r2, MemOperand(top_address));
5334     __ AddP(r2, r2, Operand(kHeapObjectTag));
5335   }
5336 }
5337 
5338 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
5339   DCHECK(instr->hydrogen()->IsAllocationFolded());
5340   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
5341   Register result = ToRegister(instr->result());
5342   Register scratch1 = ToRegister(instr->temp1());
5343   Register scratch2 = ToRegister(instr->temp2());
5344 
5345   AllocationFlags flags = ALLOCATION_FOLDED;
5346   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5347     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5348   }
5349   if (instr->hydrogen()->IsOldSpaceAllocation()) {
5350     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5351     flags = static_cast<AllocationFlags>(flags | PRETENURE);
5352   }
5353   if (instr->size()->IsConstantOperand()) {
5354     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5355     CHECK(size <= Page::kMaxRegularHeapObjectSize);
5356     __ FastAllocate(size, result, scratch1, scratch2, flags);
5357   } else {
5358     Register size = ToRegister(instr->size());
5359     __ FastAllocate(size, result, scratch1, scratch2, flags);
5360   }
5361 }
5362 
5363 void LCodeGen::DoTypeof(LTypeof* instr) {
5364   DCHECK(ToRegister(instr->value()).is(r5));
5365   DCHECK(ToRegister(instr->result()).is(r2));
5366   Label end, do_call;
5367   Register value_register = ToRegister(instr->value());
5368   __ JumpIfNotSmi(value_register, &do_call);
5369   __ mov(r2, Operand(isolate()->factory()->number_string()));
5370   __ b(&end);
5371   __ bind(&do_call);
5372   TypeofStub stub(isolate());
5373   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5374   __ bind(&end);
5375 }
5376 
5377 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5378   Register input = ToRegister(instr->value());
5379 
5380   Condition final_branch_condition =
5381       EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5382                    instr->type_literal());
5383   if (final_branch_condition != kNoCondition) {
5384     EmitBranch(instr, final_branch_condition);
5385   }
5386 }
5387 
5388 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5389                                  Register input, Handle<String> type_name) {
5390   Condition final_branch_condition = kNoCondition;
5391   Register scratch = scratch0();
5392   Factory* factory = isolate()->factory();
5393   if (String::Equals(type_name, factory->number_string())) {
5394     __ JumpIfSmi(input, true_label);
5395     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5396     __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5397     final_branch_condition = eq;
5398 
5399   } else if (String::Equals(type_name, factory->string_string())) {
5400     __ JumpIfSmi(input, false_label);
5401     __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5402     final_branch_condition = lt;
5403 
5404   } else if (String::Equals(type_name, factory->symbol_string())) {
5405     __ JumpIfSmi(input, false_label);
5406     __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5407     final_branch_condition = eq;
5408 
5409   } else if (String::Equals(type_name, factory->boolean_string())) {
5410     __ CompareRoot(input, Heap::kTrueValueRootIndex);
5411     __ beq(true_label);
5412     __ CompareRoot(input, Heap::kFalseValueRootIndex);
5413     final_branch_condition = eq;
5414 
5415   } else if (String::Equals(type_name, factory->undefined_string())) {
5416     __ CompareRoot(input, Heap::kNullValueRootIndex);
5417     __ beq(false_label);
5418     __ JumpIfSmi(input, false_label);
5419     // Check for undetectable objects => true.
5420     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5421     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5422     __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5423     __ CmpP(r0, Operand::Zero());
5424     final_branch_condition = ne;
5425 
5426   } else if (String::Equals(type_name, factory->function_string())) {
5427     __ JumpIfSmi(input, false_label);
5428     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5429     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5430     __ AndP(scratch, scratch,
5431             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5432     __ CmpP(scratch, Operand(1 << Map::kIsCallable));
5433     final_branch_condition = eq;
5434 
5435   } else if (String::Equals(type_name, factory->object_string())) {
5436     __ JumpIfSmi(input, false_label);
5437     __ CompareRoot(input, Heap::kNullValueRootIndex);
5438     __ beq(true_label);
5439     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
5440     __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
5441     __ blt(false_label);
5442     // Check for callable or undetectable objects => false.
5443     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5444     __ AndP(r0, scratch,
5445             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
5446     __ CmpP(r0, Operand::Zero());
5447     final_branch_condition = eq;
5448 
5449 // clang-format off
5450 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
5451   } else if (String::Equals(type_name, factory->type##_string())) {  \
5452     __ JumpIfSmi(input, false_label);                                \
5453     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
5454     __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
5455     final_branch_condition = eq;
5456   SIMD128_TYPES(SIMD128_TYPE)
5457 #undef SIMD128_TYPE
5458     // clang-format on
5459 
5460   } else {
5461     __ b(false_label);
5462   }
5463 
5464   return final_branch_condition;
5465 }
5466 
5467 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5468   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
5469     // Ensure that we have enough space after the previous lazy-bailout
5470     // instruction for patching the code here.
5471     int current_pc = masm()->pc_offset();
5472     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5473       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5474       DCHECK_EQ(0, padding_size % 2);
5475       while (padding_size > 0) {
5476         __ nop();
5477         padding_size -= 2;
5478       }
5479     }
5480   }
5481   last_lazy_deopt_pc_ = masm()->pc_offset();
5482 }
5483 
5484 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5485   last_lazy_deopt_pc_ = masm()->pc_offset();
5486   DCHECK(instr->HasEnvironment());
5487   LEnvironment* env = instr->environment();
5488   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5489   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5490 }
5491 
5492 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5493   Deoptimizer::BailoutType type = instr->hydrogen()->type();
5494   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5495   // needed return address), even though the implementation of LAZY and EAGER is
5496   // now identical. When LAZY is eventually completely folded into EAGER, remove
5497   // the special case below.
5498   if (info()->IsStub() && type == Deoptimizer::EAGER) {
5499     type = Deoptimizer::LAZY;
5500   }
5501 
5502   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5503 }
5504 
5505 void LCodeGen::DoDummy(LDummy* instr) {
5506   // Nothing to see here, move on!
5507 }
5508 
5509 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5510   // Nothing to see here, move on!
5511 }
5512 
5513 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5514   PushSafepointRegistersScope scope(this);
5515   LoadContextFromDeferred(instr->context());
5516   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5517   RecordSafepointWithLazyDeopt(
5518       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5519   DCHECK(instr->HasEnvironment());
5520   LEnvironment* env = instr->environment();
5521   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5522 }
5523 
5524 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5525   class DeferredStackCheck final : public LDeferredCode {
5526    public:
5527     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5528         : LDeferredCode(codegen), instr_(instr) {}
5529     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5530     LInstruction* instr() override { return instr_; }
5531 
5532    private:
5533     LStackCheck* instr_;
5534   };
5535 
5536   DCHECK(instr->HasEnvironment());
5537   LEnvironment* env = instr->environment();
5538   // There is no LLazyBailout instruction for stack-checks. We have to
5539   // prepare for lazy deoptimization explicitly here.
5540   if (instr->hydrogen()->is_function_entry()) {
5541     // Perform stack overflow check.
5542     Label done;
5543     __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
5544     __ bge(&done, Label::kNear);
5545     DCHECK(instr->context()->IsRegister());
5546     DCHECK(ToRegister(instr->context()).is(cp));
5547     CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
5548              instr);
5549     __ bind(&done);
5550   } else {
5551     DCHECK(instr->hydrogen()->is_backwards_branch());
5552     // Perform stack overflow check if this goto needs it before jumping.
5553     DeferredStackCheck* deferred_stack_check =
5554         new (zone()) DeferredStackCheck(this, instr);
5555     __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
5556     __ blt(deferred_stack_check->entry());
5557     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5558     __ bind(instr->done_label());
5559     deferred_stack_check->SetExit(instr->done_label());
5560     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5561     // Don't record a deoptimization index for the safepoint here.
5562     // This will be done explicitly when emitting call and the safepoint in
5563     // the deferred code.
5564   }
5565 }
5566 
5567 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5568   // This is a pseudo-instruction that ensures that the environment here is
5569   // properly registered for deoptimization and records the assembler's PC
5570   // offset.
5571   LEnvironment* environment = instr->environment();
5572 
5573   // If the environment were already registered, we would have no way of
5574   // backpatching it with the spill slot operands.
5575   DCHECK(!environment->HasBeenRegistered());
5576   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5577 
5578   GenerateOsrPrologue();
5579 }
5580 
5581 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5582   Label use_cache, call_runtime;
5583   __ CheckEnumCache(&call_runtime);
5584 
5585   __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
5586   __ b(&use_cache);
5587 
5588   // Get the set of properties to enumerate.
5589   __ bind(&call_runtime);
5590   __ push(r2);
5591   CallRuntime(Runtime::kForInEnumerate, instr);
5592   __ bind(&use_cache);
5593 }
5594 
5595 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5596   Register map = ToRegister(instr->map());
5597   Register result = ToRegister(instr->result());
5598   Label load_cache, done;
5599   __ EnumLength(result, map);
5600   __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
5601   __ bne(&load_cache, Label::kNear);
5602   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5603   __ b(&done, Label::kNear);
5604 
5605   __ bind(&load_cache);
5606   __ LoadInstanceDescriptors(map, result);
5607   __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5608   __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5609   __ CmpP(result, Operand::Zero());
5610   DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
5611 
5612   __ bind(&done);
5613 }
5614 
5615 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5616   Register object = ToRegister(instr->value());
5617   Register map = ToRegister(instr->map());
5618   __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5619   __ CmpP(map, scratch0());
5620   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5621 }
5622 
5623 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5624                                            Register result, Register object,
5625                                            Register index) {
5626   PushSafepointRegistersScope scope(this);
5627   __ Push(object, index);
5628   __ LoadImmP(cp, Operand::Zero());
5629   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5630   RecordSafepointWithRegisters(instr->pointer_map(), 2,
5631                                Safepoint::kNoLazyDeopt);
5632   __ StoreToSafepointRegisterSlot(r2, result);
5633 }
5634 
5635 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5636   class DeferredLoadMutableDouble final : public LDeferredCode {
5637    public:
5638     DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
5639                               Register result, Register object, Register index)
5640         : LDeferredCode(codegen),
5641           instr_(instr),
5642           result_(result),
5643           object_(object),
5644           index_(index) {}
5645     void Generate() override {
5646       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5647     }
5648     LInstruction* instr() override { return instr_; }
5649 
5650    private:
5651     LLoadFieldByIndex* instr_;
5652     Register result_;
5653     Register object_;
5654     Register index_;
5655   };
5656 
5657   Register object = ToRegister(instr->object());
5658   Register index = ToRegister(instr->index());
5659   Register result = ToRegister(instr->result());
5660   Register scratch = scratch0();
5661 
5662   DeferredLoadMutableDouble* deferred;
5663   deferred = new (zone())
5664       DeferredLoadMutableDouble(this, instr, result, object, index);
5665 
5666   Label out_of_object, done;
5667 
5668   __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
5669   __ bne(deferred->entry());
5670   __ ShiftRightArithP(index, index, Operand(1));
5671 
5672   __ CmpP(index, Operand::Zero());
5673   __ blt(&out_of_object, Label::kNear);
5674 
5675   __ SmiToPtrArrayOffset(r0, index);
5676   __ AddP(scratch, object, r0);
5677   __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5678 
5679   __ b(&done, Label::kNear);
5680 
5681   __ bind(&out_of_object);
5682   __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5683   // Index is equal to negated out of object property index plus 1.
5684   __ SmiToPtrArrayOffset(r0, index);
5685   __ SubP(scratch, result, r0);
5686   __ LoadP(result,
5687            FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
5688   __ bind(deferred->exit());
5689   __ bind(&done);
5690 }
5691 
5692 #undef __
5693 
5694 }  // namespace internal
5695 }  // namespace v8
5696