• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/code-generator.h"
6 
7 #include "src/arm64/frames-arm64.h"
8 #include "src/arm64/macro-assembler-arm64.h"
9 #include "src/ast/scopes.h"
10 #include "src/compiler/code-generator-impl.h"
11 #include "src/compiler/gap-resolver.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/osr.h"
14 
15 namespace v8 {
16 namespace internal {
17 namespace compiler {
18 
19 #define __ masm()->
20 
21 
22 // Adds Arm64-specific methods to convert InstructionOperands.
23 class Arm64OperandConverter final : public InstructionOperandConverter {
24  public:
Arm64OperandConverter(CodeGenerator * gen,Instruction * instr)25   Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
26       : InstructionOperandConverter(gen, instr) {}
27 
InputFloat32Register(size_t index)28   DoubleRegister InputFloat32Register(size_t index) {
29     return InputDoubleRegister(index).S();
30   }
31 
InputFloat64Register(size_t index)32   DoubleRegister InputFloat64Register(size_t index) {
33     return InputDoubleRegister(index);
34   }
35 
OutputCount()36   size_t OutputCount() { return instr_->OutputCount(); }
37 
OutputFloat32Register()38   DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
39 
OutputFloat64Register()40   DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
41 
InputRegister32(size_t index)42   Register InputRegister32(size_t index) {
43     return ToRegister(instr_->InputAt(index)).W();
44   }
45 
InputOrZeroRegister32(size_t index)46   Register InputOrZeroRegister32(size_t index) {
47     DCHECK(instr_->InputAt(index)->IsRegister() ||
48            (instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0)));
49     if (instr_->InputAt(index)->IsImmediate()) {
50       return wzr;
51     }
52     return InputRegister32(index);
53   }
54 
InputRegister64(size_t index)55   Register InputRegister64(size_t index) { return InputRegister(index); }
56 
InputOrZeroRegister64(size_t index)57   Register InputOrZeroRegister64(size_t index) {
58     DCHECK(instr_->InputAt(index)->IsRegister() ||
59            (instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0)));
60     if (instr_->InputAt(index)->IsImmediate()) {
61       return xzr;
62     }
63     return InputRegister64(index);
64   }
65 
InputImmediate(size_t index)66   Operand InputImmediate(size_t index) {
67     return ToImmediate(instr_->InputAt(index));
68   }
69 
InputOperand(size_t index)70   Operand InputOperand(size_t index) {
71     return ToOperand(instr_->InputAt(index));
72   }
73 
InputOperand64(size_t index)74   Operand InputOperand64(size_t index) { return InputOperand(index); }
75 
InputOperand32(size_t index)76   Operand InputOperand32(size_t index) {
77     return ToOperand32(instr_->InputAt(index));
78   }
79 
OutputRegister64()80   Register OutputRegister64() { return OutputRegister(); }
81 
OutputRegister32()82   Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
83 
InputOperand2_32(size_t index)84   Operand InputOperand2_32(size_t index) {
85     switch (AddressingModeField::decode(instr_->opcode())) {
86       case kMode_None:
87         return InputOperand32(index);
88       case kMode_Operand2_R_LSL_I:
89         return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
90       case kMode_Operand2_R_LSR_I:
91         return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
92       case kMode_Operand2_R_ASR_I:
93         return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
94       case kMode_Operand2_R_ROR_I:
95         return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
96       case kMode_Operand2_R_UXTB:
97         return Operand(InputRegister32(index), UXTB);
98       case kMode_Operand2_R_UXTH:
99         return Operand(InputRegister32(index), UXTH);
100       case kMode_Operand2_R_SXTB:
101         return Operand(InputRegister32(index), SXTB);
102       case kMode_Operand2_R_SXTH:
103         return Operand(InputRegister32(index), SXTH);
104       case kMode_MRI:
105       case kMode_MRR:
106         break;
107     }
108     UNREACHABLE();
109     return Operand(-1);
110   }
111 
InputOperand2_64(size_t index)112   Operand InputOperand2_64(size_t index) {
113     switch (AddressingModeField::decode(instr_->opcode())) {
114       case kMode_None:
115         return InputOperand64(index);
116       case kMode_Operand2_R_LSL_I:
117         return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
118       case kMode_Operand2_R_LSR_I:
119         return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
120       case kMode_Operand2_R_ASR_I:
121         return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
122       case kMode_Operand2_R_ROR_I:
123         return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
124       case kMode_Operand2_R_UXTB:
125         return Operand(InputRegister64(index), UXTB);
126       case kMode_Operand2_R_UXTH:
127         return Operand(InputRegister64(index), UXTH);
128       case kMode_Operand2_R_SXTB:
129         return Operand(InputRegister64(index), SXTB);
130       case kMode_Operand2_R_SXTH:
131         return Operand(InputRegister64(index), SXTH);
132       case kMode_MRI:
133       case kMode_MRR:
134         break;
135     }
136     UNREACHABLE();
137     return Operand(-1);
138   }
139 
MemoryOperand(size_t * first_index)140   MemOperand MemoryOperand(size_t* first_index) {
141     const size_t index = *first_index;
142     switch (AddressingModeField::decode(instr_->opcode())) {
143       case kMode_None:
144       case kMode_Operand2_R_LSL_I:
145       case kMode_Operand2_R_LSR_I:
146       case kMode_Operand2_R_ASR_I:
147       case kMode_Operand2_R_ROR_I:
148       case kMode_Operand2_R_UXTB:
149       case kMode_Operand2_R_UXTH:
150       case kMode_Operand2_R_SXTB:
151       case kMode_Operand2_R_SXTH:
152         break;
153       case kMode_MRI:
154         *first_index += 2;
155         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
156       case kMode_MRR:
157         *first_index += 2;
158         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
159     }
160     UNREACHABLE();
161     return MemOperand(no_reg);
162   }
163 
MemoryOperand(size_t first_index=0)164   MemOperand MemoryOperand(size_t first_index = 0) {
165     return MemoryOperand(&first_index);
166   }
167 
ToOperand(InstructionOperand * op)168   Operand ToOperand(InstructionOperand* op) {
169     if (op->IsRegister()) {
170       return Operand(ToRegister(op));
171     }
172     return ToImmediate(op);
173   }
174 
ToOperand32(InstructionOperand * op)175   Operand ToOperand32(InstructionOperand* op) {
176     if (op->IsRegister()) {
177       return Operand(ToRegister(op).W());
178     }
179     return ToImmediate(op);
180   }
181 
ToImmediate(InstructionOperand * operand)182   Operand ToImmediate(InstructionOperand* operand) {
183     Constant constant = ToConstant(operand);
184     switch (constant.type()) {
185       case Constant::kInt32:
186         return Operand(constant.ToInt32());
187       case Constant::kInt64:
188         return Operand(constant.ToInt64());
189       case Constant::kFloat32:
190         return Operand(
191             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
192       case Constant::kFloat64:
193         return Operand(
194             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
195       case Constant::kExternalReference:
196         return Operand(constant.ToExternalReference());
197       case Constant::kHeapObject:
198         return Operand(constant.ToHeapObject());
199       case Constant::kRpoNumber:
200         UNREACHABLE();  // TODO(dcarney): RPO immediates on arm64.
201         break;
202     }
203     UNREACHABLE();
204     return Operand(-1);
205   }
206 
ToMemOperand(InstructionOperand * op,MacroAssembler * masm) const207   MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
208     DCHECK_NOT_NULL(op);
209     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
210     FrameOffset offset = frame_access_state()->GetFrameOffset(
211         AllocatedOperand::cast(op)->index());
212     if (offset.from_frame_pointer()) {
213       int from_sp =
214           offset.offset() +
215           ((frame()->GetSpToFpSlotCount() + frame_access_state()->sp_delta()) *
216            kPointerSize);
217       // Convert FP-offsets to SP-offsets if it results in better code.
218       if (Assembler::IsImmLSUnscaled(from_sp) ||
219           Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
220         offset = FrameOffset::FromStackPointer(from_sp);
221       }
222     }
223     return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
224                       offset.offset());
225   }
226 };
227 
228 
229 namespace {
230 
231 class OutOfLineLoadNaN32 final : public OutOfLineCode {
232  public:
OutOfLineLoadNaN32(CodeGenerator * gen,DoubleRegister result)233   OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
234       : OutOfLineCode(gen), result_(result) {}
235 
Generate()236   void Generate() final {
237     __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
238   }
239 
240  private:
241   DoubleRegister const result_;
242 };
243 
244 
245 class OutOfLineLoadNaN64 final : public OutOfLineCode {
246  public:
OutOfLineLoadNaN64(CodeGenerator * gen,DoubleRegister result)247   OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
248       : OutOfLineCode(gen), result_(result) {}
249 
Generate()250   void Generate() final {
251     __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
252   }
253 
254  private:
255   DoubleRegister const result_;
256 };
257 
258 
259 class OutOfLineLoadZero final : public OutOfLineCode {
260  public:
OutOfLineLoadZero(CodeGenerator * gen,Register result)261   OutOfLineLoadZero(CodeGenerator* gen, Register result)
262       : OutOfLineCode(gen), result_(result) {}
263 
Generate()264   void Generate() final { __ Mov(result_, 0); }
265 
266  private:
267   Register const result_;
268 };
269 
270 
271 class OutOfLineRecordWrite final : public OutOfLineCode {
272  public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)273   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
274                        Register value, Register scratch0, Register scratch1,
275                        RecordWriteMode mode)
276       : OutOfLineCode(gen),
277         object_(object),
278         index_(index),
279         value_(value),
280         scratch0_(scratch0),
281         scratch1_(scratch1),
282         mode_(mode) {}
283 
Generate()284   void Generate() final {
285     if (mode_ > RecordWriteMode::kValueIsPointer) {
286       __ JumpIfSmi(value_, exit());
287     }
288     if (mode_ > RecordWriteMode::kValueIsMap) {
289       __ CheckPageFlagClear(value_, scratch0_,
290                             MemoryChunk::kPointersToHereAreInterestingMask,
291                             exit());
292     }
293     SaveFPRegsMode const save_fp_mode =
294         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
295     // TODO(turbofan): Once we get frame elision working, we need to save
296     // and restore lr properly here if the frame was elided.
297     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
298                          EMIT_REMEMBERED_SET, save_fp_mode);
299     __ Add(scratch1_, object_, index_);
300     __ CallStub(&stub);
301   }
302 
303  private:
304   Register const object_;
305   Register const index_;
306   Register const value_;
307   Register const scratch0_;
308   Register const scratch1_;
309   RecordWriteMode const mode_;
310 };
311 
312 
FlagsConditionToCondition(FlagsCondition condition)313 Condition FlagsConditionToCondition(FlagsCondition condition) {
314   switch (condition) {
315     case kEqual:
316       return eq;
317     case kNotEqual:
318       return ne;
319     case kSignedLessThan:
320       return lt;
321     case kSignedGreaterThanOrEqual:
322       return ge;
323     case kSignedLessThanOrEqual:
324       return le;
325     case kSignedGreaterThan:
326       return gt;
327     case kUnsignedLessThan:
328       return lo;
329     case kUnsignedGreaterThanOrEqual:
330       return hs;
331     case kUnsignedLessThanOrEqual:
332       return ls;
333     case kUnsignedGreaterThan:
334       return hi;
335     case kFloatLessThanOrUnordered:
336       return lt;
337     case kFloatGreaterThanOrEqual:
338       return ge;
339     case kFloatLessThanOrEqual:
340       return ls;
341     case kFloatGreaterThanOrUnordered:
342       return hi;
343     case kFloatLessThan:
344       return lo;
345     case kFloatGreaterThanOrEqualOrUnordered:
346       return hs;
347     case kFloatLessThanOrEqualOrUnordered:
348       return le;
349     case kFloatGreaterThan:
350       return gt;
351     case kOverflow:
352       return vs;
353     case kNotOverflow:
354       return vc;
355     case kUnorderedEqual:
356     case kUnorderedNotEqual:
357       break;
358   }
359   UNREACHABLE();
360   return nv;
361 }
362 
363 }  // namespace
364 
365 
366 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width)                         \
367   do {                                                             \
368     auto result = i.OutputFloat##width##Register();                \
369     auto buffer = i.InputRegister(0);                              \
370     auto offset = i.InputRegister32(1);                            \
371     auto length = i.InputOperand32(2);                             \
372     __ Cmp(offset, length);                                        \
373     auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
374     __ B(hs, ool->entry());                                        \
375     __ Ldr(result, MemOperand(buffer, offset, UXTW));              \
376     __ Bind(ool->exit());                                          \
377   } while (0)
378 
379 
380 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
381   do {                                                       \
382     auto result = i.OutputRegister32();                      \
383     auto buffer = i.InputRegister(0);                        \
384     auto offset = i.InputRegister32(1);                      \
385     auto length = i.InputOperand32(2);                       \
386     __ Cmp(offset, length);                                  \
387     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
388     __ B(hs, ool->entry());                                  \
389     __ asm_instr(result, MemOperand(buffer, offset, UXTW));  \
390     __ Bind(ool->exit());                                    \
391   } while (0)
392 
393 
394 #define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr)          \
395   do {                                                       \
396     auto result = i.OutputRegister();                        \
397     auto buffer = i.InputRegister(0);                        \
398     auto offset = i.InputRegister32(1);                      \
399     auto length = i.InputOperand32(2);                       \
400     __ Cmp(offset, length);                                  \
401     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
402     __ B(hs, ool->entry());                                  \
403     __ asm_instr(result, MemOperand(buffer, offset, UXTW));  \
404     __ Bind(ool->exit());                                    \
405   } while (0)
406 
407 
408 #define ASSEMBLE_CHECKED_STORE_FLOAT(width)          \
409   do {                                               \
410     auto buffer = i.InputRegister(0);                \
411     auto offset = i.InputRegister32(1);              \
412     auto length = i.InputOperand32(2);               \
413     auto value = i.InputFloat##width##Register(3);   \
414     __ Cmp(offset, length);                          \
415     Label done;                                      \
416     __ B(hs, &done);                                 \
417     __ Str(value, MemOperand(buffer, offset, UXTW)); \
418     __ Bind(&done);                                  \
419   } while (0)
420 
421 
422 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)          \
423   do {                                                     \
424     auto buffer = i.InputRegister(0);                      \
425     auto offset = i.InputRegister32(1);                    \
426     auto length = i.InputOperand32(2);                     \
427     auto value = i.InputRegister32(3);                     \
428     __ Cmp(offset, length);                                \
429     Label done;                                            \
430     __ B(hs, &done);                                       \
431     __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
432     __ Bind(&done);                                        \
433   } while (0)
434 
435 
436 #define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr)       \
437   do {                                                     \
438     auto buffer = i.InputRegister(0);                      \
439     auto offset = i.InputRegister32(1);                    \
440     auto length = i.InputOperand32(2);                     \
441     auto value = i.InputRegister(3);                       \
442     __ Cmp(offset, length);                                \
443     Label done;                                            \
444     __ B(hs, &done);                                       \
445     __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
446     __ Bind(&done);                                        \
447   } while (0)
448 
449 
450 #define ASSEMBLE_SHIFT(asm_instr, width)                                    \
451   do {                                                                      \
452     if (instr->InputAt(1)->IsRegister()) {                                  \
453       __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0),    \
454                    i.InputRegister##width(1));                              \
455     } else {                                                                \
456       uint32_t imm =                                                        \
457           static_cast<uint32_t>(i.InputOperand##width(1).ImmediateValue()); \
458       __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0),    \
459                    imm % (width));                                          \
460     }                                                                       \
461   } while (0)
462 
463 
AssembleDeconstructActivationRecord(int stack_param_delta)464 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
465   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
466   if (sp_slot_delta > 0) {
467     __ Drop(sp_slot_delta);
468   }
469   frame_access_state()->SetFrameAccessToDefault();
470 }
471 
472 
AssemblePrepareTailCall(int stack_param_delta)473 void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
474   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
475   if (sp_slot_delta < 0) {
476     __ Claim(-sp_slot_delta);
477     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
478   }
479   if (frame()->needs_frame()) {
480     __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
481     __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
482   }
483   frame_access_state()->SetFrameAccessToSP();
484 }
485 
486 
487 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)488 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
489   Arm64OperandConverter i(this, instr);
490   InstructionCode opcode = instr->opcode();
491   switch (ArchOpcodeField::decode(opcode)) {
492     case kArchCallCodeObject: {
493       EnsureSpaceForLazyDeopt();
494       if (instr->InputAt(0)->IsImmediate()) {
495         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
496                 RelocInfo::CODE_TARGET);
497       } else {
498         Register target = i.InputRegister(0);
499         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
500         __ Call(target);
501       }
502       frame_access_state()->ClearSPDelta();
503       RecordCallPosition(instr);
504       break;
505     }
506     case kArchTailCallCodeObject: {
507       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
508       AssembleDeconstructActivationRecord(stack_param_delta);
509       if (instr->InputAt(0)->IsImmediate()) {
510         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
511                 RelocInfo::CODE_TARGET);
512       } else {
513         Register target = i.InputRegister(0);
514         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
515         __ Jump(target);
516       }
517       frame_access_state()->ClearSPDelta();
518       break;
519     }
520     case kArchCallJSFunction: {
521       EnsureSpaceForLazyDeopt();
522       Register func = i.InputRegister(0);
523       if (FLAG_debug_code) {
524         // Check the function's context matches the context argument.
525         UseScratchRegisterScope scope(masm());
526         Register temp = scope.AcquireX();
527         __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
528         __ cmp(cp, temp);
529         __ Assert(eq, kWrongFunctionContext);
530       }
531       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
532       __ Call(x10);
533       frame_access_state()->ClearSPDelta();
534       RecordCallPosition(instr);
535       break;
536     }
537     case kArchTailCallJSFunction: {
538       Register func = i.InputRegister(0);
539       if (FLAG_debug_code) {
540         // Check the function's context matches the context argument.
541         UseScratchRegisterScope scope(masm());
542         Register temp = scope.AcquireX();
543         __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
544         __ cmp(cp, temp);
545         __ Assert(eq, kWrongFunctionContext);
546       }
547       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
548       AssembleDeconstructActivationRecord(stack_param_delta);
549       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
550       __ Jump(x10);
551       frame_access_state()->ClearSPDelta();
552       break;
553     }
554     case kArchLazyBailout: {
555       EnsureSpaceForLazyDeopt();
556       RecordCallPosition(instr);
557       break;
558     }
559     case kArchPrepareCallCFunction:
560       // We don't need kArchPrepareCallCFunction on arm64 as the instruction
561       // selector already perform a Claim to reserve space on the stack and
562       // guarantee correct alignment of stack pointer.
563       UNREACHABLE();
564       break;
565     case kArchPrepareTailCall:
566       AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
567       break;
568     case kArchCallCFunction: {
569       int const num_parameters = MiscField::decode(instr->opcode());
570       if (instr->InputAt(0)->IsImmediate()) {
571         ExternalReference ref = i.InputExternalReference(0);
572         __ CallCFunction(ref, num_parameters, 0);
573       } else {
574         Register func = i.InputRegister(0);
575         __ CallCFunction(func, num_parameters, 0);
576       }
577       // CallCFunction only supports register arguments so we never need to call
578       // frame()->ClearOutgoingParameterSlots() here.
579       DCHECK(frame_access_state()->sp_delta() == 0);
580       break;
581     }
582     case kArchJmp:
583       AssembleArchJump(i.InputRpo(0));
584       break;
585     case kArchTableSwitch:
586       AssembleArchTableSwitch(instr);
587       break;
588     case kArchLookupSwitch:
589       AssembleArchLookupSwitch(instr);
590       break;
591     case kArchNop:
592     case kArchThrowTerminator:
593       // don't emit code for nops.
594       break;
595     case kArchDeoptimize: {
596       int deopt_state_id =
597           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
598       Deoptimizer::BailoutType bailout_type =
599           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
600       AssembleDeoptimizerCall(deopt_state_id, bailout_type);
601       break;
602     }
603     case kArchRet:
604       AssembleReturn();
605       break;
606     case kArchStackPointer:
607       __ mov(i.OutputRegister(), masm()->StackPointer());
608       break;
609     case kArchFramePointer:
610       __ mov(i.OutputRegister(), fp);
611       break;
612     case kArchTruncateDoubleToI:
613       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
614       break;
615     case kArchStoreWithWriteBarrier: {
616       RecordWriteMode mode =
617           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
618       Register object = i.InputRegister(0);
619       Register index = i.InputRegister(1);
620       Register value = i.InputRegister(2);
621       Register scratch0 = i.TempRegister(0);
622       Register scratch1 = i.TempRegister(1);
623       auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
624                                                    scratch0, scratch1, mode);
625       __ Str(value, MemOperand(object, index));
626       __ CheckPageFlagSet(object, scratch0,
627                           MemoryChunk::kPointersFromHereAreInterestingMask,
628                           ool->entry());
629       __ Bind(ool->exit());
630       break;
631     }
632     case kArm64Float32RoundDown:
633       __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
634       break;
635     case kArm64Float64RoundDown:
636       __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
637       break;
638     case kArm64Float32RoundUp:
639       __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
640       break;
641     case kArm64Float64RoundUp:
642       __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
643       break;
644     case kArm64Float64RoundTiesAway:
645       __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
646       break;
647     case kArm64Float32RoundTruncate:
648       __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
649       break;
650     case kArm64Float64RoundTruncate:
651       __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
652       break;
653     case kArm64Float32RoundTiesEven:
654       __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
655       break;
656     case kArm64Float64RoundTiesEven:
657       __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
658       break;
659     case kArm64Add:
660       if (FlagsModeField::decode(opcode) != kFlags_none) {
661         __ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0),
662                 i.InputOperand2_64(1));
663       } else {
664       __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
665              i.InputOperand2_64(1));
666       }
667       break;
668     case kArm64Add32:
669       if (FlagsModeField::decode(opcode) != kFlags_none) {
670         __ Adds(i.OutputRegister32(), i.InputOrZeroRegister32(0),
671                 i.InputOperand2_32(1));
672       } else {
673         __ Add(i.OutputRegister32(), i.InputOrZeroRegister32(0),
674                i.InputOperand2_32(1));
675       }
676       break;
677     case kArm64And:
678       __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
679              i.InputOperand2_64(1));
680       break;
681     case kArm64And32:
682       __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
683              i.InputOperand2_32(1));
684       break;
685     case kArm64Bic:
686       __ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
687              i.InputOperand2_64(1));
688       break;
689     case kArm64Bic32:
690       __ Bic(i.OutputRegister32(), i.InputOrZeroRegister32(0),
691              i.InputOperand2_32(1));
692       break;
693     case kArm64Mul:
694       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
695       break;
696     case kArm64Mul32:
697       __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
698       break;
699     case kArm64Smull:
700       __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
701       break;
702     case kArm64Umull:
703       __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
704       break;
705     case kArm64Madd:
706       __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
707               i.InputRegister(2));
708       break;
709     case kArm64Madd32:
710       __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
711               i.InputRegister32(2));
712       break;
713     case kArm64Msub:
714       __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
715               i.InputRegister(2));
716       break;
717     case kArm64Msub32:
718       __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
719               i.InputRegister32(2));
720       break;
721     case kArm64Mneg:
722       __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
723       break;
724     case kArm64Mneg32:
725       __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
726       break;
727     case kArm64Idiv:
728       __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
729       break;
730     case kArm64Idiv32:
731       __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
732       break;
733     case kArm64Udiv:
734       __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
735       break;
736     case kArm64Udiv32:
737       __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
738       break;
739     case kArm64Imod: {
740       UseScratchRegisterScope scope(masm());
741       Register temp = scope.AcquireX();
742       __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
743       __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
744       break;
745     }
746     case kArm64Imod32: {
747       UseScratchRegisterScope scope(masm());
748       Register temp = scope.AcquireW();
749       __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
750       __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
751               i.InputRegister32(0));
752       break;
753     }
754     case kArm64Umod: {
755       UseScratchRegisterScope scope(masm());
756       Register temp = scope.AcquireX();
757       __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
758       __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
759       break;
760     }
761     case kArm64Umod32: {
762       UseScratchRegisterScope scope(masm());
763       Register temp = scope.AcquireW();
764       __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
765       __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
766               i.InputRegister32(0));
767       break;
768     }
769     case kArm64Not:
770       __ Mvn(i.OutputRegister(), i.InputOperand(0));
771       break;
772     case kArm64Not32:
773       __ Mvn(i.OutputRegister32(), i.InputOperand32(0));
774       break;
775     case kArm64Or:
776       __ Orr(i.OutputRegister(), i.InputOrZeroRegister64(0),
777              i.InputOperand2_64(1));
778       break;
779     case kArm64Or32:
780       __ Orr(i.OutputRegister32(), i.InputOrZeroRegister32(0),
781              i.InputOperand2_32(1));
782       break;
783     case kArm64Orn:
784       __ Orn(i.OutputRegister(), i.InputOrZeroRegister64(0),
785              i.InputOperand2_64(1));
786       break;
787     case kArm64Orn32:
788       __ Orn(i.OutputRegister32(), i.InputOrZeroRegister32(0),
789              i.InputOperand2_32(1));
790       break;
791     case kArm64Eor:
792       __ Eor(i.OutputRegister(), i.InputOrZeroRegister64(0),
793              i.InputOperand2_64(1));
794       break;
795     case kArm64Eor32:
796       __ Eor(i.OutputRegister32(), i.InputOrZeroRegister32(0),
797              i.InputOperand2_32(1));
798       break;
799     case kArm64Eon:
800       __ Eon(i.OutputRegister(), i.InputOrZeroRegister64(0),
801              i.InputOperand2_64(1));
802       break;
803     case kArm64Eon32:
804       __ Eon(i.OutputRegister32(), i.InputOrZeroRegister32(0),
805              i.InputOperand2_32(1));
806       break;
807     case kArm64Sub:
808       if (FlagsModeField::decode(opcode) != kFlags_none) {
809         __ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0),
810                 i.InputOperand2_64(1));
811       } else {
812       __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
813              i.InputOperand2_64(1));
814       }
815       break;
816     case kArm64Sub32:
817       if (FlagsModeField::decode(opcode) != kFlags_none) {
818         __ Subs(i.OutputRegister32(), i.InputOrZeroRegister32(0),
819                 i.InputOperand2_32(1));
820       } else {
821         __ Sub(i.OutputRegister32(), i.InputOrZeroRegister32(0),
822                i.InputOperand2_32(1));
823       }
824       break;
825     case kArm64Lsl:
826       ASSEMBLE_SHIFT(Lsl, 64);
827       break;
828     case kArm64Lsl32:
829       ASSEMBLE_SHIFT(Lsl, 32);
830       break;
831     case kArm64Lsr:
832       ASSEMBLE_SHIFT(Lsr, 64);
833       break;
834     case kArm64Lsr32:
835       ASSEMBLE_SHIFT(Lsr, 32);
836       break;
837     case kArm64Asr:
838       ASSEMBLE_SHIFT(Asr, 64);
839       break;
840     case kArm64Asr32:
841       ASSEMBLE_SHIFT(Asr, 32);
842       break;
843     case kArm64Ror:
844       ASSEMBLE_SHIFT(Ror, 64);
845       break;
846     case kArm64Ror32:
847       ASSEMBLE_SHIFT(Ror, 32);
848       break;
849     case kArm64Mov32:
850       __ Mov(i.OutputRegister32(), i.InputRegister32(0));
851       break;
852     case kArm64Sxtb32:
853       __ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
854       break;
855     case kArm64Sxth32:
856       __ Sxth(i.OutputRegister32(), i.InputRegister32(0));
857       break;
858     case kArm64Sxtw:
859       __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
860       break;
861     case kArm64Sbfx32:
862       __ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
863               i.InputInt5(2));
864       break;
865     case kArm64Ubfx:
866       __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1),
867               i.InputInt6(2));
868       break;
869     case kArm64Ubfx32:
870       __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
871               i.InputInt5(2));
872       break;
873     case kArm64Ubfiz32:
874       __ Ubfiz(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
875                i.InputInt5(2));
876       break;
877     case kArm64Bfi:
878       __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
879              i.InputInt6(3));
880       break;
881     case kArm64TestAndBranch32:
882     case kArm64TestAndBranch:
883       // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
884       break;
885     case kArm64CompareAndBranch32:
886       // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
887       break;
888     case kArm64ClaimForCallArguments: {
889       __ Claim(i.InputInt32(0));
890       frame_access_state()->IncreaseSPDelta(i.InputInt32(0));
891       break;
892     }
893     case kArm64Poke: {
894       Operand operand(i.InputInt32(1) * kPointerSize);
895       if (instr->InputAt(0)->IsDoubleRegister()) {
896         __ Poke(i.InputFloat64Register(0), operand);
897       } else {
898         __ Poke(i.InputRegister(0), operand);
899       }
900       break;
901     }
902     case kArm64PokePair: {
903       int slot = i.InputInt32(2) - 1;
904       if (instr->InputAt(0)->IsDoubleRegister()) {
905         __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
906                     slot * kPointerSize);
907       } else {
908         __ PokePair(i.InputRegister(1), i.InputRegister(0),
909                     slot * kPointerSize);
910       }
911       break;
912     }
913     case kArm64Clz:
914       __ Clz(i.OutputRegister64(), i.InputRegister64(0));
915       break;
916     case kArm64Clz32:
917       __ Clz(i.OutputRegister32(), i.InputRegister32(0));
918       break;
919     case kArm64Cmp:
920       __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
921       break;
922     case kArm64Cmp32:
923       __ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
924       break;
925     case kArm64Cmn:
926       __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand(1));
927       break;
928     case kArm64Cmn32:
929       __ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
930       break;
931     case kArm64Tst:
932       __ Tst(i.InputRegister(0), i.InputOperand(1));
933       break;
934     case kArm64Tst32:
935       __ Tst(i.InputRegister32(0), i.InputOperand32(1));
936       break;
937     case kArm64Float32Cmp:
938       if (instr->InputAt(1)->IsDoubleRegister()) {
939         __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
940       } else {
941         DCHECK(instr->InputAt(1)->IsImmediate());
942         // 0.0 is the only immediate supported by fcmp instructions.
943         DCHECK(i.InputFloat32(1) == 0.0f);
944         __ Fcmp(i.InputFloat32Register(0), i.InputFloat32(1));
945       }
946       break;
947     case kArm64Float32Add:
948       __ Fadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
949               i.InputFloat32Register(1));
950       break;
951     case kArm64Float32Sub:
952       __ Fsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
953               i.InputFloat32Register(1));
954       break;
955     case kArm64Float32Mul:
956       __ Fmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
957               i.InputFloat32Register(1));
958       break;
959     case kArm64Float32Div:
960       __ Fdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
961               i.InputFloat32Register(1));
962       break;
963     case kArm64Float32Max:
964       // (b < a) ? a : b
965       __ Fcmp(i.InputFloat32Register(1), i.InputFloat32Register(0));
966       __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
967                i.InputFloat32Register(1), lo);
968       break;
969     case kArm64Float32Min:
970       // (a < b) ? a : b
971       __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
972       __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
973                i.InputFloat32Register(1), lo);
974       break;
975     case kArm64Float32Abs:
976       __ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
977       break;
978     case kArm64Float32Sqrt:
979       __ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
980       break;
981     case kArm64Float64Cmp:
982       if (instr->InputAt(1)->IsDoubleRegister()) {
983         __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
984       } else {
985         DCHECK(instr->InputAt(1)->IsImmediate());
986         // 0.0 is the only immediate supported by fcmp instructions.
987         DCHECK(i.InputDouble(1) == 0.0);
988         __ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1));
989       }
990       break;
991     case kArm64Float64Add:
992       __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
993               i.InputDoubleRegister(1));
994       break;
995     case kArm64Float64Sub:
996       __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
997               i.InputDoubleRegister(1));
998       break;
999     case kArm64Float64Mul:
1000       __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1001               i.InputDoubleRegister(1));
1002       break;
1003     case kArm64Float64Div:
1004       __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1005               i.InputDoubleRegister(1));
1006       break;
1007     case kArm64Float64Mod: {
1008       // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
1009       FrameScope scope(masm(), StackFrame::MANUAL);
1010       DCHECK(d0.is(i.InputDoubleRegister(0)));
1011       DCHECK(d1.is(i.InputDoubleRegister(1)));
1012       DCHECK(d0.is(i.OutputDoubleRegister()));
1013       // TODO(dcarney): make sure this saves all relevant registers.
1014       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1015                        0, 2);
1016       break;
1017     }
1018     case kArm64Float64Max:
1019       // (b < a) ? a : b
1020       __ Fcmp(i.InputDoubleRegister(1), i.InputDoubleRegister(0));
1021       __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1022                i.InputDoubleRegister(1), lo);
1023       break;
1024     case kArm64Float64Min:
1025       // (a < b) ? a : b
1026       __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1027       __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1028                i.InputDoubleRegister(1), lo);
1029       break;
1030     case kArm64Float64Abs:
1031       __ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1032       break;
1033     case kArm64Float64Neg:
1034       __ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1035       break;
1036     case kArm64Float64Sqrt:
1037       __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1038       break;
1039     case kArm64Float32ToFloat64:
1040       __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
1041       break;
1042     case kArm64Float64ToFloat32:
1043       __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
1044       break;
1045     case kArm64Float64ToInt32:
1046       __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
1047       break;
1048     case kArm64Float64ToUint32:
1049       __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
1050       break;
1051     case kArm64Float32ToInt64:
1052       __ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
1053       if (i.OutputCount() > 1) {
1054         __ Mov(i.OutputRegister(1), 1);
1055         Label done;
1056         __ Cmp(i.OutputRegister(0), 1);
1057         __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
1058         __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag,
1059                  vc);
1060         __ B(vc, &done);
1061         __ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN));
1062         __ Cset(i.OutputRegister(1), eq);
1063         __ Bind(&done);
1064       }
1065       break;
1066     case kArm64Float64ToInt64:
1067       __ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
1068       if (i.OutputCount() > 1) {
1069         __ Mov(i.OutputRegister(1), 1);
1070         Label done;
1071         __ Cmp(i.OutputRegister(0), 1);
1072         __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
1073         __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc);
1074         __ B(vc, &done);
1075         __ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
1076         __ Cset(i.OutputRegister(1), eq);
1077         __ Bind(&done);
1078       }
1079       break;
1080     case kArm64Float32ToUint64:
1081       __ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
1082       if (i.OutputCount() > 1) {
1083         __ Fcmp(i.InputFloat32Register(0), -1.0);
1084         __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
1085         __ Cset(i.OutputRegister(1), ne);
1086       }
1087       break;
1088     case kArm64Float64ToUint64:
1089       __ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0));
1090       if (i.OutputCount() > 1) {
1091         __ Fcmp(i.InputDoubleRegister(0), -1.0);
1092         __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
1093         __ Cset(i.OutputRegister(1), ne);
1094       }
1095       break;
1096     case kArm64Int32ToFloat64:
1097       __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
1098       break;
1099     case kArm64Int64ToFloat32:
1100       __ Scvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
1101       break;
1102     case kArm64Int64ToFloat64:
1103       __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
1104       break;
1105     case kArm64Uint32ToFloat64:
1106       __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
1107       break;
1108     case kArm64Uint64ToFloat32:
1109       __ Ucvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
1110       break;
1111     case kArm64Uint64ToFloat64:
1112       __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
1113       break;
1114     case kArm64Float64ExtractLowWord32:
1115       __ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
1116       break;
1117     case kArm64Float64ExtractHighWord32:
1118       // TODO(arm64): This should use MOV (to general) when NEON is supported.
1119       __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
1120       __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
1121       break;
1122     case kArm64Float64InsertLowWord32: {
1123       // TODO(arm64): This should use MOV (from general) when NEON is supported.
1124       UseScratchRegisterScope scope(masm());
1125       Register tmp = scope.AcquireX();
1126       __ Fmov(tmp, i.InputFloat64Register(0));
1127       __ Bfi(tmp, i.InputRegister(1), 0, 32);
1128       __ Fmov(i.OutputFloat64Register(), tmp);
1129       break;
1130     }
1131     case kArm64Float64InsertHighWord32: {
1132       // TODO(arm64): This should use MOV (from general) when NEON is supported.
1133       UseScratchRegisterScope scope(masm());
1134       Register tmp = scope.AcquireX();
1135       __ Fmov(tmp.W(), i.InputFloat32Register(0));
1136       __ Bfi(tmp, i.InputRegister(1), 32, 32);
1137       __ Fmov(i.OutputFloat64Register(), tmp);
1138       break;
1139     }
1140     case kArm64Float64MoveU64:
1141       __ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
1142       break;
1143     case kArm64U64MoveFloat64:
1144       __ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
1145       break;
1146     case kArm64Ldrb:
1147       __ Ldrb(i.OutputRegister(), i.MemoryOperand());
1148       break;
1149     case kArm64Ldrsb:
1150       __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
1151       break;
1152     case kArm64Strb:
1153       __ Strb(i.InputRegister(2), i.MemoryOperand());
1154       break;
1155     case kArm64Ldrh:
1156       __ Ldrh(i.OutputRegister(), i.MemoryOperand());
1157       break;
1158     case kArm64Ldrsh:
1159       __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
1160       break;
1161     case kArm64Strh:
1162       __ Strh(i.InputRegister(2), i.MemoryOperand());
1163       break;
1164     case kArm64LdrW:
1165       __ Ldr(i.OutputRegister32(), i.MemoryOperand());
1166       break;
1167     case kArm64StrW:
1168       __ Str(i.InputRegister32(2), i.MemoryOperand());
1169       break;
1170     case kArm64Ldr:
1171       __ Ldr(i.OutputRegister(), i.MemoryOperand());
1172       break;
1173     case kArm64Str:
1174       __ Str(i.InputRegister(2), i.MemoryOperand());
1175       break;
1176     case kArm64LdrS:
1177       __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
1178       break;
1179     case kArm64StrS:
1180       __ Str(i.InputDoubleRegister(2).S(), i.MemoryOperand());
1181       break;
1182     case kArm64LdrD:
1183       __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
1184       break;
1185     case kArm64StrD:
1186       __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
1187       break;
1188     case kCheckedLoadInt8:
1189       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
1190       break;
1191     case kCheckedLoadUint8:
1192       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
1193       break;
1194     case kCheckedLoadInt16:
1195       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
1196       break;
1197     case kCheckedLoadUint16:
1198       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
1199       break;
1200     case kCheckedLoadWord32:
1201       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
1202       break;
1203     case kCheckedLoadWord64:
1204       ASSEMBLE_CHECKED_LOAD_INTEGER_64(Ldr);
1205       break;
1206     case kCheckedLoadFloat32:
1207       ASSEMBLE_CHECKED_LOAD_FLOAT(32);
1208       break;
1209     case kCheckedLoadFloat64:
1210       ASSEMBLE_CHECKED_LOAD_FLOAT(64);
1211       break;
1212     case kCheckedStoreWord8:
1213       ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
1214       break;
1215     case kCheckedStoreWord16:
1216       ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
1217       break;
1218     case kCheckedStoreWord32:
1219       ASSEMBLE_CHECKED_STORE_INTEGER(Str);
1220       break;
1221     case kCheckedStoreWord64:
1222       ASSEMBLE_CHECKED_STORE_INTEGER_64(Str);
1223       break;
1224     case kCheckedStoreFloat32:
1225       ASSEMBLE_CHECKED_STORE_FLOAT(32);
1226       break;
1227     case kCheckedStoreFloat64:
1228       ASSEMBLE_CHECKED_STORE_FLOAT(64);
1229       break;
1230   }
1231 }  // NOLINT(readability/fn_size)
1232 
1233 
1234 // Assemble branches after this instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1235 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1236   Arm64OperandConverter i(this, instr);
1237   Label* tlabel = branch->true_label;
1238   Label* flabel = branch->false_label;
1239   FlagsCondition condition = branch->condition;
1240   ArchOpcode opcode = instr->arch_opcode();
1241 
1242   if (opcode == kArm64CompareAndBranch32) {
1243     switch (condition) {
1244       case kEqual:
1245         __ Cbz(i.InputRegister32(0), tlabel);
1246         break;
1247       case kNotEqual:
1248         __ Cbnz(i.InputRegister32(0), tlabel);
1249         break;
1250       default:
1251         UNREACHABLE();
1252     }
1253   } else if (opcode == kArm64TestAndBranch32) {
1254     switch (condition) {
1255       case kEqual:
1256         __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
1257         break;
1258       case kNotEqual:
1259         __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
1260         break;
1261       default:
1262         UNREACHABLE();
1263     }
1264   } else if (opcode == kArm64TestAndBranch) {
1265     switch (condition) {
1266       case kEqual:
1267         __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
1268         break;
1269       case kNotEqual:
1270         __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
1271         break;
1272       default:
1273         UNREACHABLE();
1274     }
1275   } else {
1276     Condition cc = FlagsConditionToCondition(condition);
1277     __ B(cc, tlabel);
1278   }
1279   if (!branch->fallthru) __ B(flabel);  // no fallthru to flabel.
1280 }
1281 
1282 
AssembleArchJump(RpoNumber target)1283 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1284   if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
1285 }
1286 
1287 
1288 // Assemble boolean materializations after this instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)1289 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1290                                         FlagsCondition condition) {
1291   Arm64OperandConverter i(this, instr);
1292 
1293   // Materialize a full 64-bit 1 or 0 value. The result register is always the
1294   // last output of the instruction.
1295   DCHECK_NE(0u, instr->OutputCount());
1296   Register reg = i.OutputRegister(instr->OutputCount() - 1);
1297   Condition cc = FlagsConditionToCondition(condition);
1298   __ Cset(reg, cc);
1299 }
1300 
1301 
AssembleArchLookupSwitch(Instruction * instr)1302 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1303   Arm64OperandConverter i(this, instr);
1304   Register input = i.InputRegister32(0);
1305   for (size_t index = 2; index < instr->InputCount(); index += 2) {
1306     __ Cmp(input, i.InputInt32(index + 0));
1307     __ B(eq, GetLabel(i.InputRpo(index + 1)));
1308   }
1309   AssembleArchJump(i.InputRpo(1));
1310 }
1311 
1312 
AssembleArchTableSwitch(Instruction * instr)1313 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1314   Arm64OperandConverter i(this, instr);
1315   UseScratchRegisterScope scope(masm());
1316   Register input = i.InputRegister32(0);
1317   Register temp = scope.AcquireX();
1318   size_t const case_count = instr->InputCount() - 2;
1319   Label table;
1320   __ Cmp(input, case_count);
1321   __ B(hs, GetLabel(i.InputRpo(1)));
1322   __ Adr(temp, &table);
1323   __ Add(temp, temp, Operand(input, UXTW, 2));
1324   __ Br(temp);
1325   __ StartBlockPools();
1326   __ Bind(&table);
1327   for (size_t index = 0; index < case_count; ++index) {
1328     __ B(GetLabel(i.InputRpo(index + 2)));
1329   }
1330   __ EndBlockPools();
1331 }
1332 
1333 
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type)1334 void CodeGenerator::AssembleDeoptimizerCall(
1335     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1336   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1337       isolate(), deoptimization_id, bailout_type);
1338   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1339 }
1340 
1341 
AssemblePrologue()1342 void CodeGenerator::AssemblePrologue() {
1343   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1344   if (descriptor->IsCFunctionCall()) {
1345     __ SetStackPointer(csp);
1346     __ Push(lr, fp);
1347     __ Mov(fp, csp);
1348   } else if (descriptor->IsJSFunctionCall()) {
1349     __ SetStackPointer(jssp);
1350     __ Prologue(this->info()->GeneratePreagedPrologue());
1351   } else if (frame()->needs_frame()) {
1352     if (descriptor->UseNativeStack()) {
1353       __ SetStackPointer(csp);
1354     } else {
1355       __ SetStackPointer(jssp);
1356     }
1357     __ StubPrologue();
1358   } else {
1359     if (descriptor->UseNativeStack()) {
1360       __ SetStackPointer(csp);
1361     } else {
1362       __ SetStackPointer(jssp);
1363     }
1364     frame()->SetElidedFrameSizeInSlots(0);
1365   }
1366   frame_access_state()->SetFrameAccessToDefault();
1367 
1368   int stack_shrink_slots = frame()->GetSpillSlotCount();
1369   if (info()->is_osr()) {
1370     // TurboFan OSR-compiled functions cannot be entered directly.
1371     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1372 
1373     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1374     // frame is still on the stack. Optimized code uses OSR values directly from
1375     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1376     // remaining stack slots.
1377     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1378     osr_pc_offset_ = __ pc_offset();
1379     // TODO(titzer): cannot address target function == local #-1
1380     __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1381     stack_shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1382   }
1383 
1384   // If frame()->needs_frame() is false, then
1385   // frame()->AlignSavedCalleeRegisterSlots() is guaranteed to return 0.
1386   if (csp.Is(masm()->StackPointer()) && frame()->needs_frame()) {
1387     // The system stack pointer requires 16-byte alignment at function call
1388     // boundaries.
1389 
1390     stack_shrink_slots += frame()->AlignSavedCalleeRegisterSlots();
1391   }
1392   __ Claim(stack_shrink_slots);
1393 
1394   // Save FP registers.
1395   CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
1396                                    descriptor->CalleeSavedFPRegisters());
1397   int saved_count = saves_fp.Count();
1398   if (saved_count != 0) {
1399     DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
1400     __ PushCPURegList(saves_fp);
1401     frame()->AllocateSavedCalleeRegisterSlots(saved_count *
1402                                               (kDoubleSize / kPointerSize));
1403   }
1404   // Save registers.
1405   // TODO(palfia): TF save list is not in sync with
1406   // CPURegList::GetCalleeSaved(): x30 is missing.
1407   // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
1408   CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
1409                                 descriptor->CalleeSavedRegisters());
1410   saved_count = saves.Count();
1411   if (saved_count != 0) {
1412     __ PushCPURegList(saves);
1413     frame()->AllocateSavedCalleeRegisterSlots(saved_count);
1414   }
1415 }
1416 
1417 
AssembleReturn()1418 void CodeGenerator::AssembleReturn() {
1419   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1420 
1421   // Restore registers.
1422   CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
1423                                 descriptor->CalleeSavedRegisters());
1424   if (saves.Count() != 0) {
1425     __ PopCPURegList(saves);
1426   }
1427 
1428   // Restore fp registers.
1429   CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
1430                                    descriptor->CalleeSavedFPRegisters());
1431   if (saves_fp.Count() != 0) {
1432     __ PopCPURegList(saves_fp);
1433   }
1434 
1435   int pop_count = static_cast<int>(descriptor->StackParameterCount());
1436   if (descriptor->IsCFunctionCall()) {
1437     __ Mov(csp, fp);
1438     __ Pop(fp, lr);
1439   } else if (frame()->needs_frame()) {
1440     // Canonicalize JSFunction return sites for now.
1441     if (return_label_.is_bound()) {
1442       __ B(&return_label_);
1443       return;
1444     } else {
1445       __ Bind(&return_label_);
1446       if (descriptor->UseNativeStack()) {
1447         __ Mov(csp, fp);
1448       } else {
1449         __ Mov(jssp, fp);
1450       }
1451       __ Pop(fp, lr);
1452     }
1453   } else if (descriptor->UseNativeStack()) {
1454     pop_count += (pop_count & 1);
1455   }
1456   __ Drop(pop_count);
1457   __ Ret();
1458 }
1459 
1460 
AssembleMove(InstructionOperand * source,InstructionOperand * destination)1461 void CodeGenerator::AssembleMove(InstructionOperand* source,
1462                                  InstructionOperand* destination) {
1463   Arm64OperandConverter g(this, nullptr);
1464   // Dispatch on the source and destination operand kinds.  Not all
1465   // combinations are possible.
1466   if (source->IsRegister()) {
1467     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1468     Register src = g.ToRegister(source);
1469     if (destination->IsRegister()) {
1470       __ Mov(g.ToRegister(destination), src);
1471     } else {
1472       __ Str(src, g.ToMemOperand(destination, masm()));
1473     }
1474   } else if (source->IsStackSlot()) {
1475     MemOperand src = g.ToMemOperand(source, masm());
1476     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1477     if (destination->IsRegister()) {
1478       __ Ldr(g.ToRegister(destination), src);
1479     } else {
1480       UseScratchRegisterScope scope(masm());
1481       Register temp = scope.AcquireX();
1482       __ Ldr(temp, src);
1483       __ Str(temp, g.ToMemOperand(destination, masm()));
1484     }
1485   } else if (source->IsConstant()) {
1486     Constant src = g.ToConstant(ConstantOperand::cast(source));
1487     if (destination->IsRegister() || destination->IsStackSlot()) {
1488       UseScratchRegisterScope scope(masm());
1489       Register dst = destination->IsRegister() ? g.ToRegister(destination)
1490                                                : scope.AcquireX();
1491       if (src.type() == Constant::kHeapObject) {
1492         Handle<HeapObject> src_object = src.ToHeapObject();
1493         Heap::RootListIndex index;
1494         int offset;
1495         if (IsMaterializableFromFrame(src_object, &offset)) {
1496           __ Ldr(dst, MemOperand(fp, offset));
1497         } else if (IsMaterializableFromRoot(src_object, &index)) {
1498           __ LoadRoot(dst, index);
1499         } else {
1500           __ LoadObject(dst, src_object);
1501         }
1502       } else {
1503         __ Mov(dst, g.ToImmediate(source));
1504       }
1505       if (destination->IsStackSlot()) {
1506         __ Str(dst, g.ToMemOperand(destination, masm()));
1507       }
1508     } else if (src.type() == Constant::kFloat32) {
1509       if (destination->IsDoubleRegister()) {
1510         FPRegister dst = g.ToDoubleRegister(destination).S();
1511         __ Fmov(dst, src.ToFloat32());
1512       } else {
1513         DCHECK(destination->IsDoubleStackSlot());
1514         UseScratchRegisterScope scope(masm());
1515         FPRegister temp = scope.AcquireS();
1516         __ Fmov(temp, src.ToFloat32());
1517         __ Str(temp, g.ToMemOperand(destination, masm()));
1518       }
1519     } else {
1520       DCHECK_EQ(Constant::kFloat64, src.type());
1521       if (destination->IsDoubleRegister()) {
1522         FPRegister dst = g.ToDoubleRegister(destination);
1523         __ Fmov(dst, src.ToFloat64());
1524       } else {
1525         DCHECK(destination->IsDoubleStackSlot());
1526         UseScratchRegisterScope scope(masm());
1527         FPRegister temp = scope.AcquireD();
1528         __ Fmov(temp, src.ToFloat64());
1529         __ Str(temp, g.ToMemOperand(destination, masm()));
1530       }
1531     }
1532   } else if (source->IsDoubleRegister()) {
1533     FPRegister src = g.ToDoubleRegister(source);
1534     if (destination->IsDoubleRegister()) {
1535       FPRegister dst = g.ToDoubleRegister(destination);
1536       __ Fmov(dst, src);
1537     } else {
1538       DCHECK(destination->IsDoubleStackSlot());
1539       __ Str(src, g.ToMemOperand(destination, masm()));
1540     }
1541   } else if (source->IsDoubleStackSlot()) {
1542     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1543     MemOperand src = g.ToMemOperand(source, masm());
1544     if (destination->IsDoubleRegister()) {
1545       __ Ldr(g.ToDoubleRegister(destination), src);
1546     } else {
1547       UseScratchRegisterScope scope(masm());
1548       FPRegister temp = scope.AcquireD();
1549       __ Ldr(temp, src);
1550       __ Str(temp, g.ToMemOperand(destination, masm()));
1551     }
1552   } else {
1553     UNREACHABLE();
1554   }
1555 }
1556 
1557 
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)1558 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1559                                  InstructionOperand* destination) {
1560   Arm64OperandConverter g(this, nullptr);
1561   // Dispatch on the source and destination operand kinds.  Not all
1562   // combinations are possible.
1563   if (source->IsRegister()) {
1564     // Register-register.
1565     UseScratchRegisterScope scope(masm());
1566     Register temp = scope.AcquireX();
1567     Register src = g.ToRegister(source);
1568     if (destination->IsRegister()) {
1569       Register dst = g.ToRegister(destination);
1570       __ Mov(temp, src);
1571       __ Mov(src, dst);
1572       __ Mov(dst, temp);
1573     } else {
1574       DCHECK(destination->IsStackSlot());
1575       MemOperand dst = g.ToMemOperand(destination, masm());
1576       __ Mov(temp, src);
1577       __ Ldr(src, dst);
1578       __ Str(temp, dst);
1579     }
1580   } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
1581     UseScratchRegisterScope scope(masm());
1582     DoubleRegister temp_0 = scope.AcquireD();
1583     DoubleRegister temp_1 = scope.AcquireD();
1584     MemOperand src = g.ToMemOperand(source, masm());
1585     MemOperand dst = g.ToMemOperand(destination, masm());
1586     __ Ldr(temp_0, src);
1587     __ Ldr(temp_1, dst);
1588     __ Str(temp_0, dst);
1589     __ Str(temp_1, src);
1590   } else if (source->IsDoubleRegister()) {
1591     UseScratchRegisterScope scope(masm());
1592     FPRegister temp = scope.AcquireD();
1593     FPRegister src = g.ToDoubleRegister(source);
1594     if (destination->IsDoubleRegister()) {
1595       FPRegister dst = g.ToDoubleRegister(destination);
1596       __ Fmov(temp, src);
1597       __ Fmov(src, dst);
1598       __ Fmov(dst, temp);
1599     } else {
1600       DCHECK(destination->IsDoubleStackSlot());
1601       MemOperand dst = g.ToMemOperand(destination, masm());
1602       __ Fmov(temp, src);
1603       __ Ldr(src, dst);
1604       __ Str(temp, dst);
1605     }
1606   } else {
1607     // No other combinations are possible.
1608     UNREACHABLE();
1609   }
1610 }
1611 
1612 
AssembleJumpTable(Label ** targets,size_t target_count)1613 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1614   // On 64-bit ARM we emit the jump tables inline.
1615   UNREACHABLE();
1616 }
1617 
1618 
AddNopForSmiCodeInlining()1619 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
1620 
1621 
EnsureSpaceForLazyDeopt()1622 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1623   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
1624     return;
1625   }
1626 
1627   int space_needed = Deoptimizer::patch_size();
1628   // Ensure that we have enough space after the previous lazy-bailout
1629   // instruction for patching the code here.
1630   intptr_t current_pc = masm()->pc_offset();
1631 
1632   if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
1633     intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1634     DCHECK((padding_size % kInstructionSize) == 0);
1635     InstructionAccurateScope instruction_accurate(
1636         masm(), padding_size / kInstructionSize);
1637 
1638     while (padding_size > 0) {
1639       __ nop();
1640       padding_size -= kInstructionSize;
1641     }
1642   }
1643 }
1644 
1645 #undef __
1646 
1647 }  // namespace compiler
1648 }  // namespace internal
1649 }  // namespace v8
1650