1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/arm64/frames-arm64.h"
8 #include "src/arm64/macro-assembler-arm64.h"
9 #include "src/compilation-info.h"
10 #include "src/compiler/code-generator-impl.h"
11 #include "src/compiler/gap-resolver.h"
12 #include "src/compiler/node-matchers.h"
13 #include "src/compiler/osr.h"
14
15 namespace v8 {
16 namespace internal {
17 namespace compiler {
18
19 #define __ masm()->
20
21
22 // Adds Arm64-specific methods to convert InstructionOperands.
23 class Arm64OperandConverter final : public InstructionOperandConverter {
24 public:
Arm64OperandConverter(CodeGenerator * gen,Instruction * instr)25 Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
27
InputFloat32Register(size_t index)28 DoubleRegister InputFloat32Register(size_t index) {
29 return InputDoubleRegister(index).S();
30 }
31
InputFloat64Register(size_t index)32 DoubleRegister InputFloat64Register(size_t index) {
33 return InputDoubleRegister(index);
34 }
35
InputFloat32OrZeroRegister(size_t index)36 CPURegister InputFloat32OrZeroRegister(size_t index) {
37 if (instr_->InputAt(index)->IsImmediate()) {
38 DCHECK(bit_cast<int32_t>(InputFloat32(index)) == 0);
39 return wzr;
40 }
41 DCHECK(instr_->InputAt(index)->IsFPRegister());
42 return InputDoubleRegister(index).S();
43 }
44
InputFloat64OrZeroRegister(size_t index)45 CPURegister InputFloat64OrZeroRegister(size_t index) {
46 if (instr_->InputAt(index)->IsImmediate()) {
47 DCHECK(bit_cast<int64_t>(InputDouble(index)) == 0);
48 return xzr;
49 }
50 DCHECK(instr_->InputAt(index)->IsDoubleRegister());
51 return InputDoubleRegister(index);
52 }
53
OutputCount()54 size_t OutputCount() { return instr_->OutputCount(); }
55
OutputFloat32Register()56 DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
57
OutputFloat64Register()58 DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
59
InputRegister32(size_t index)60 Register InputRegister32(size_t index) {
61 return ToRegister(instr_->InputAt(index)).W();
62 }
63
InputOrZeroRegister32(size_t index)64 Register InputOrZeroRegister32(size_t index) {
65 DCHECK(instr_->InputAt(index)->IsRegister() ||
66 (instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0)));
67 if (instr_->InputAt(index)->IsImmediate()) {
68 return wzr;
69 }
70 return InputRegister32(index);
71 }
72
InputRegister64(size_t index)73 Register InputRegister64(size_t index) { return InputRegister(index); }
74
InputOrZeroRegister64(size_t index)75 Register InputOrZeroRegister64(size_t index) {
76 DCHECK(instr_->InputAt(index)->IsRegister() ||
77 (instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0)));
78 if (instr_->InputAt(index)->IsImmediate()) {
79 return xzr;
80 }
81 return InputRegister64(index);
82 }
83
InputImmediate(size_t index)84 Operand InputImmediate(size_t index) {
85 return ToImmediate(instr_->InputAt(index));
86 }
87
InputOperand(size_t index)88 Operand InputOperand(size_t index) {
89 return ToOperand(instr_->InputAt(index));
90 }
91
InputOperand64(size_t index)92 Operand InputOperand64(size_t index) { return InputOperand(index); }
93
InputOperand32(size_t index)94 Operand InputOperand32(size_t index) {
95 return ToOperand32(instr_->InputAt(index));
96 }
97
OutputRegister64()98 Register OutputRegister64() { return OutputRegister(); }
99
OutputRegister32()100 Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
101
InputOperand2_32(size_t index)102 Operand InputOperand2_32(size_t index) {
103 switch (AddressingModeField::decode(instr_->opcode())) {
104 case kMode_None:
105 return InputOperand32(index);
106 case kMode_Operand2_R_LSL_I:
107 return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
108 case kMode_Operand2_R_LSR_I:
109 return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
110 case kMode_Operand2_R_ASR_I:
111 return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
112 case kMode_Operand2_R_ROR_I:
113 return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
114 case kMode_Operand2_R_UXTB:
115 return Operand(InputRegister32(index), UXTB);
116 case kMode_Operand2_R_UXTH:
117 return Operand(InputRegister32(index), UXTH);
118 case kMode_Operand2_R_SXTB:
119 return Operand(InputRegister32(index), SXTB);
120 case kMode_Operand2_R_SXTH:
121 return Operand(InputRegister32(index), SXTH);
122 case kMode_Operand2_R_SXTW:
123 return Operand(InputRegister32(index), SXTW);
124 case kMode_MRI:
125 case kMode_MRR:
126 break;
127 }
128 UNREACHABLE();
129 return Operand(-1);
130 }
131
InputOperand2_64(size_t index)132 Operand InputOperand2_64(size_t index) {
133 switch (AddressingModeField::decode(instr_->opcode())) {
134 case kMode_None:
135 return InputOperand64(index);
136 case kMode_Operand2_R_LSL_I:
137 return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
138 case kMode_Operand2_R_LSR_I:
139 return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
140 case kMode_Operand2_R_ASR_I:
141 return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
142 case kMode_Operand2_R_ROR_I:
143 return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
144 case kMode_Operand2_R_UXTB:
145 return Operand(InputRegister64(index), UXTB);
146 case kMode_Operand2_R_UXTH:
147 return Operand(InputRegister64(index), UXTH);
148 case kMode_Operand2_R_SXTB:
149 return Operand(InputRegister64(index), SXTB);
150 case kMode_Operand2_R_SXTH:
151 return Operand(InputRegister64(index), SXTH);
152 case kMode_Operand2_R_SXTW:
153 return Operand(InputRegister64(index), SXTW);
154 case kMode_MRI:
155 case kMode_MRR:
156 break;
157 }
158 UNREACHABLE();
159 return Operand(-1);
160 }
161
MemoryOperand(size_t * first_index)162 MemOperand MemoryOperand(size_t* first_index) {
163 const size_t index = *first_index;
164 switch (AddressingModeField::decode(instr_->opcode())) {
165 case kMode_None:
166 case kMode_Operand2_R_LSR_I:
167 case kMode_Operand2_R_ASR_I:
168 case kMode_Operand2_R_ROR_I:
169 case kMode_Operand2_R_UXTB:
170 case kMode_Operand2_R_UXTH:
171 case kMode_Operand2_R_SXTB:
172 case kMode_Operand2_R_SXTH:
173 case kMode_Operand2_R_SXTW:
174 break;
175 case kMode_Operand2_R_LSL_I:
176 *first_index += 3;
177 return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
178 LSL, InputInt32(index + 2));
179 case kMode_MRI:
180 *first_index += 2;
181 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
182 case kMode_MRR:
183 *first_index += 2;
184 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
185 }
186 UNREACHABLE();
187 return MemOperand(no_reg);
188 }
189
MemoryOperand(size_t first_index=0)190 MemOperand MemoryOperand(size_t first_index = 0) {
191 return MemoryOperand(&first_index);
192 }
193
ToOperand(InstructionOperand * op)194 Operand ToOperand(InstructionOperand* op) {
195 if (op->IsRegister()) {
196 return Operand(ToRegister(op));
197 }
198 return ToImmediate(op);
199 }
200
ToOperand32(InstructionOperand * op)201 Operand ToOperand32(InstructionOperand* op) {
202 if (op->IsRegister()) {
203 return Operand(ToRegister(op).W());
204 }
205 return ToImmediate(op);
206 }
207
ToImmediate(InstructionOperand * operand)208 Operand ToImmediate(InstructionOperand* operand) {
209 Constant constant = ToConstant(operand);
210 switch (constant.type()) {
211 case Constant::kInt32:
212 if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
213 return Operand(constant.ToInt32(), constant.rmode());
214 } else {
215 return Operand(constant.ToInt32());
216 }
217 case Constant::kInt64:
218 if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
219 constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
220 return Operand(constant.ToInt64(), constant.rmode());
221 } else {
222 DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
223 return Operand(constant.ToInt64());
224 }
225 case Constant::kFloat32:
226 return Operand(
227 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
228 case Constant::kFloat64:
229 return Operand(
230 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
231 case Constant::kExternalReference:
232 return Operand(constant.ToExternalReference());
233 case Constant::kHeapObject:
234 return Operand(constant.ToHeapObject());
235 case Constant::kRpoNumber:
236 UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
237 break;
238 }
239 UNREACHABLE();
240 return Operand(-1);
241 }
242
ToMemOperand(InstructionOperand * op,MacroAssembler * masm) const243 MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
244 DCHECK_NOT_NULL(op);
245 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
246 return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
247 }
248
SlotToMemOperand(int slot,MacroAssembler * masm) const249 MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
250 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
251 if (offset.from_frame_pointer()) {
252 int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
253 // Convert FP-offsets to SP-offsets if it results in better code.
254 if (Assembler::IsImmLSUnscaled(from_sp) ||
255 Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
256 offset = FrameOffset::FromStackPointer(from_sp);
257 }
258 }
259 return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
260 offset.offset());
261 }
262 };
263
264
265 namespace {
266
267 class OutOfLineLoadNaN32 final : public OutOfLineCode {
268 public:
OutOfLineLoadNaN32(CodeGenerator * gen,DoubleRegister result)269 OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
270 : OutOfLineCode(gen), result_(result) {}
271
Generate()272 void Generate() final {
273 __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
274 }
275
276 private:
277 DoubleRegister const result_;
278 };
279
280
281 class OutOfLineLoadNaN64 final : public OutOfLineCode {
282 public:
OutOfLineLoadNaN64(CodeGenerator * gen,DoubleRegister result)283 OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
284 : OutOfLineCode(gen), result_(result) {}
285
Generate()286 void Generate() final {
287 __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
288 }
289
290 private:
291 DoubleRegister const result_;
292 };
293
294
295 class OutOfLineLoadZero final : public OutOfLineCode {
296 public:
OutOfLineLoadZero(CodeGenerator * gen,Register result)297 OutOfLineLoadZero(CodeGenerator* gen, Register result)
298 : OutOfLineCode(gen), result_(result) {}
299
Generate()300 void Generate() final { __ Mov(result_, 0); }
301
302 private:
303 Register const result_;
304 };
305
306
307 class OutOfLineRecordWrite final : public OutOfLineCode {
308 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Operand index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode,UnwindingInfoWriter * unwinding_info_writer)309 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
310 Register value, Register scratch0, Register scratch1,
311 RecordWriteMode mode,
312 UnwindingInfoWriter* unwinding_info_writer)
313 : OutOfLineCode(gen),
314 object_(object),
315 index_(index),
316 value_(value),
317 scratch0_(scratch0),
318 scratch1_(scratch1),
319 mode_(mode),
320 must_save_lr_(!gen->frame_access_state()->has_frame()),
321 unwinding_info_writer_(unwinding_info_writer) {}
322
Generate()323 void Generate() final {
324 if (mode_ > RecordWriteMode::kValueIsPointer) {
325 __ JumpIfSmi(value_, exit());
326 }
327 __ CheckPageFlagClear(value_, scratch0_,
328 MemoryChunk::kPointersToHereAreInterestingMask,
329 exit());
330 RememberedSetAction const remembered_set_action =
331 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
332 : OMIT_REMEMBERED_SET;
333 SaveFPRegsMode const save_fp_mode =
334 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
335 if (must_save_lr_) {
336 // We need to save and restore lr if the frame was elided.
337 __ Push(lr);
338 unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(),
339 __ StackPointer());
340 }
341 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
342 remembered_set_action, save_fp_mode);
343 __ Add(scratch1_, object_, index_);
344 __ CallStub(&stub);
345 if (must_save_lr_) {
346 __ Pop(lr);
347 unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
348 }
349 }
350
351 private:
352 Register const object_;
353 Operand const index_;
354 Register const value_;
355 Register const scratch0_;
356 Register const scratch1_;
357 RecordWriteMode const mode_;
358 bool must_save_lr_;
359 UnwindingInfoWriter* const unwinding_info_writer_;
360 };
361
362
FlagsConditionToCondition(FlagsCondition condition)363 Condition FlagsConditionToCondition(FlagsCondition condition) {
364 switch (condition) {
365 case kEqual:
366 return eq;
367 case kNotEqual:
368 return ne;
369 case kSignedLessThan:
370 return lt;
371 case kSignedGreaterThanOrEqual:
372 return ge;
373 case kSignedLessThanOrEqual:
374 return le;
375 case kSignedGreaterThan:
376 return gt;
377 case kUnsignedLessThan:
378 return lo;
379 case kUnsignedGreaterThanOrEqual:
380 return hs;
381 case kUnsignedLessThanOrEqual:
382 return ls;
383 case kUnsignedGreaterThan:
384 return hi;
385 case kFloatLessThanOrUnordered:
386 return lt;
387 case kFloatGreaterThanOrEqual:
388 return ge;
389 case kFloatLessThanOrEqual:
390 return ls;
391 case kFloatGreaterThanOrUnordered:
392 return hi;
393 case kFloatLessThan:
394 return lo;
395 case kFloatGreaterThanOrEqualOrUnordered:
396 return hs;
397 case kFloatLessThanOrEqualOrUnordered:
398 return le;
399 case kFloatGreaterThan:
400 return gt;
401 case kOverflow:
402 return vs;
403 case kNotOverflow:
404 return vc;
405 case kUnorderedEqual:
406 case kUnorderedNotEqual:
407 break;
408 case kPositiveOrZero:
409 return pl;
410 case kNegative:
411 return mi;
412 }
413 UNREACHABLE();
414 return nv;
415 }
416
417 } // namespace
418
419 #define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds) \
420 do { \
421 if (length.IsImmediate() && \
422 base::bits::IsPowerOfTwo64(length.ImmediateValue())) { \
423 __ Tst(offset, ~(length.ImmediateValue() - 1)); \
424 __ B(ne, out_of_bounds); \
425 } else { \
426 __ Cmp(offset, length); \
427 __ B(hs, out_of_bounds); \
428 } \
429 } while (0)
430
431 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
432 do { \
433 auto result = i.OutputFloat##width##Register(); \
434 auto buffer = i.InputRegister(0); \
435 auto offset = i.InputRegister32(1); \
436 auto length = i.InputOperand32(2); \
437 auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
438 ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
439 __ Ldr(result, MemOperand(buffer, offset, UXTW)); \
440 __ Bind(ool->exit()); \
441 } while (0)
442
443 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
444 do { \
445 auto result = i.OutputRegister32(); \
446 auto buffer = i.InputRegister(0); \
447 auto offset = i.InputRegister32(1); \
448 auto length = i.InputOperand32(2); \
449 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
450 ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
451 __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
452 __ Bind(ool->exit()); \
453 } while (0)
454
455 #define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr) \
456 do { \
457 auto result = i.OutputRegister(); \
458 auto buffer = i.InputRegister(0); \
459 auto offset = i.InputRegister32(1); \
460 auto length = i.InputOperand32(2); \
461 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
462 ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry()); \
463 __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
464 __ Bind(ool->exit()); \
465 } while (0)
466
467 #define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
468 do { \
469 auto buffer = i.InputRegister(0); \
470 auto offset = i.InputRegister32(1); \
471 auto length = i.InputOperand32(2); \
472 auto value = i.InputFloat##width##OrZeroRegister(3); \
473 Label done; \
474 ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
475 __ Str(value, MemOperand(buffer, offset, UXTW)); \
476 __ Bind(&done); \
477 } while (0)
478
479 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
480 do { \
481 auto buffer = i.InputRegister(0); \
482 auto offset = i.InputRegister32(1); \
483 auto length = i.InputOperand32(2); \
484 auto value = i.InputOrZeroRegister32(3); \
485 Label done; \
486 ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
487 __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
488 __ Bind(&done); \
489 } while (0)
490
491 #define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr) \
492 do { \
493 auto buffer = i.InputRegister(0); \
494 auto offset = i.InputRegister32(1); \
495 auto length = i.InputOperand32(2); \
496 auto value = i.InputOrZeroRegister64(3); \
497 Label done; \
498 ASSEMBLE_BOUNDS_CHECK(offset, length, &done); \
499 __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
500 __ Bind(&done); \
501 } while (0)
502
503 #define ASSEMBLE_SHIFT(asm_instr, width) \
504 do { \
505 if (instr->InputAt(1)->IsRegister()) { \
506 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
507 i.InputRegister##width(1)); \
508 } else { \
509 uint32_t imm = \
510 static_cast<uint32_t>(i.InputOperand##width(1).ImmediateValue()); \
511 __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), \
512 imm % (width)); \
513 } \
514 } while (0)
515
516 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
517 do { \
518 __ asm_instr(i.OutputRegister(), \
519 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
520 __ Dmb(InnerShareable, BarrierAll); \
521 } while (0)
522
523 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
524 do { \
525 __ Dmb(InnerShareable, BarrierAll); \
526 __ asm_instr(i.InputRegister(2), \
527 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
528 __ Dmb(InnerShareable, BarrierAll); \
529 } while (0)
530
531 #define ASSEMBLE_IEEE754_BINOP(name) \
532 do { \
533 FrameScope scope(masm(), StackFrame::MANUAL); \
534 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
535 0, 2); \
536 } while (0)
537
538 #define ASSEMBLE_IEEE754_UNOP(name) \
539 do { \
540 FrameScope scope(masm(), StackFrame::MANUAL); \
541 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
542 0, 1); \
543 } while (0)
544
AssembleDeconstructFrame()545 void CodeGenerator::AssembleDeconstructFrame() {
546 const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
547 if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
548 __ Mov(csp, fp);
549 } else {
550 __ Mov(jssp, fp);
551 }
552 __ Pop(fp, lr);
553
554 unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
555 }
556
AssemblePrepareTailCall()557 void CodeGenerator::AssemblePrepareTailCall() {
558 if (frame_access_state()->has_frame()) {
559 __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
560 __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
561 }
562 frame_access_state()->SetFrameAccessToSP();
563 }
564
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)565 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
566 Register scratch1,
567 Register scratch2,
568 Register scratch3) {
569 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
570 Label done;
571
572 // Check if current frame is an arguments adaptor frame.
573 __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
574 __ Cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
575 __ B(ne, &done);
576
577 // Load arguments count from current arguments adaptor frame (note, it
578 // does not include receiver).
579 Register caller_args_count_reg = scratch1;
580 __ Ldr(caller_args_count_reg,
581 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
582 __ SmiUntag(caller_args_count_reg);
583
584 ParameterCount callee_args_count(args_reg);
585 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
586 scratch3);
587 __ bind(&done);
588 }
589
590 namespace {
591
AdjustStackPointerForTailCall(MacroAssembler * masm,FrameAccessState * state,int new_slot_above_sp,bool allow_shrinkage=true)592 void AdjustStackPointerForTailCall(MacroAssembler* masm,
593 FrameAccessState* state,
594 int new_slot_above_sp,
595 bool allow_shrinkage = true) {
596 int current_sp_offset = state->GetSPToFPSlotCount() +
597 StandardFrameConstants::kFixedSlotCountAboveFp;
598 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
599 if (stack_slot_delta > 0) {
600 masm->Claim(stack_slot_delta);
601 state->IncreaseSPDelta(stack_slot_delta);
602 } else if (allow_shrinkage && stack_slot_delta < 0) {
603 masm->Drop(-stack_slot_delta);
604 state->IncreaseSPDelta(stack_slot_delta);
605 }
606 }
607
608 } // namespace
609
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)610 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
611 int first_unused_stack_slot) {
612 AdjustStackPointerForTailCall(masm(), frame_access_state(),
613 first_unused_stack_slot, false);
614 }
615
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)616 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
617 int first_unused_stack_slot) {
618 AdjustStackPointerForTailCall(masm(), frame_access_state(),
619 first_unused_stack_slot);
620 }
621
622 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)623 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
624 Instruction* instr) {
625 Arm64OperandConverter i(this, instr);
626 InstructionCode opcode = instr->opcode();
627 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
628 switch (arch_opcode) {
629 case kArchCallCodeObject: {
630 EnsureSpaceForLazyDeopt();
631 if (instr->InputAt(0)->IsImmediate()) {
632 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
633 RelocInfo::CODE_TARGET);
634 } else {
635 Register target = i.InputRegister(0);
636 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
637 __ Call(target);
638 }
639 RecordCallPosition(instr);
640 // TODO(titzer): this is ugly. JSSP should be a caller-save register
641 // in this case, but it is not possible to express in the register
642 // allocator.
643 CallDescriptor::Flags flags(MiscField::decode(opcode));
644 if (flags & CallDescriptor::kRestoreJSSP) {
645 __ Ldr(jssp, MemOperand(csp));
646 __ Mov(csp, jssp);
647 }
648 if (flags & CallDescriptor::kRestoreCSP) {
649 __ Mov(csp, jssp);
650 __ AssertCspAligned();
651 }
652 frame_access_state()->ClearSPDelta();
653 break;
654 }
655 case kArchTailCallCodeObjectFromJSFunction:
656 case kArchTailCallCodeObject: {
657 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
658 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
659 i.TempRegister(0), i.TempRegister(1),
660 i.TempRegister(2));
661 }
662 if (instr->InputAt(0)->IsImmediate()) {
663 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
664 RelocInfo::CODE_TARGET);
665 } else {
666 Register target = i.InputRegister(0);
667 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
668 __ Jump(target);
669 }
670 unwinding_info_writer_.MarkBlockWillExit();
671 frame_access_state()->ClearSPDelta();
672 frame_access_state()->SetFrameAccessToDefault();
673 break;
674 }
675 case kArchTailCallAddress: {
676 CHECK(!instr->InputAt(0)->IsImmediate());
677 __ Jump(i.InputRegister(0));
678 unwinding_info_writer_.MarkBlockWillExit();
679 frame_access_state()->ClearSPDelta();
680 frame_access_state()->SetFrameAccessToDefault();
681 break;
682 }
683 case kArchCallJSFunction: {
684 EnsureSpaceForLazyDeopt();
685 Register func = i.InputRegister(0);
686 if (FLAG_debug_code) {
687 // Check the function's context matches the context argument.
688 UseScratchRegisterScope scope(masm());
689 Register temp = scope.AcquireX();
690 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
691 __ cmp(cp, temp);
692 __ Assert(eq, kWrongFunctionContext);
693 }
694 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
695 __ Call(x10);
696 RecordCallPosition(instr);
697 // TODO(titzer): this is ugly. JSSP should be a caller-save register
698 // in this case, but it is not possible to express in the register
699 // allocator.
700 CallDescriptor::Flags flags(MiscField::decode(opcode));
701 if (flags & CallDescriptor::kRestoreJSSP) {
702 __ Ldr(jssp, MemOperand(csp));
703 __ Mov(csp, jssp);
704 }
705 if (flags & CallDescriptor::kRestoreCSP) {
706 __ Mov(csp, jssp);
707 __ AssertCspAligned();
708 }
709 frame_access_state()->ClearSPDelta();
710 break;
711 }
712 case kArchTailCallJSFunctionFromJSFunction: {
713 Register func = i.InputRegister(0);
714 if (FLAG_debug_code) {
715 // Check the function's context matches the context argument.
716 UseScratchRegisterScope scope(masm());
717 Register temp = scope.AcquireX();
718 __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
719 __ cmp(cp, temp);
720 __ Assert(eq, kWrongFunctionContext);
721 }
722 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
723 i.TempRegister(0), i.TempRegister(1),
724 i.TempRegister(2));
725 __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
726 __ Jump(x10);
727 frame_access_state()->ClearSPDelta();
728 frame_access_state()->SetFrameAccessToDefault();
729 break;
730 }
731 case kArchPrepareCallCFunction:
732 // We don't need kArchPrepareCallCFunction on arm64 as the instruction
733 // selector already perform a Claim to reserve space on the stack and
734 // guarantee correct alignment of stack pointer.
735 UNREACHABLE();
736 break;
737 case kArchPrepareTailCall:
738 AssemblePrepareTailCall();
739 break;
740 case kArchCallCFunction: {
741 int const num_parameters = MiscField::decode(instr->opcode());
742 if (instr->InputAt(0)->IsImmediate()) {
743 ExternalReference ref = i.InputExternalReference(0);
744 __ CallCFunction(ref, num_parameters, 0);
745 } else {
746 Register func = i.InputRegister(0);
747 __ CallCFunction(func, num_parameters, 0);
748 }
749 // CallCFunction only supports register arguments so we never need to call
750 // frame()->ClearOutgoingParameterSlots() here.
751 DCHECK(frame_access_state()->sp_delta() == 0);
752 break;
753 }
754 case kArchJmp:
755 AssembleArchJump(i.InputRpo(0));
756 break;
757 case kArchTableSwitch:
758 AssembleArchTableSwitch(instr);
759 break;
760 case kArchLookupSwitch:
761 AssembleArchLookupSwitch(instr);
762 break;
763 case kArchDebugBreak:
764 __ Debug("kArchDebugBreak", 0, BREAK);
765 break;
766 case kArchComment: {
767 Address comment_string = i.InputExternalReference(0).address();
768 __ RecordComment(reinterpret_cast<const char*>(comment_string));
769 break;
770 }
771 case kArchNop:
772 case kArchThrowTerminator:
773 // don't emit code for nops.
774 break;
775 case kArchDeoptimize: {
776 int deopt_state_id =
777 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
778 Deoptimizer::BailoutType bailout_type =
779 Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
780 CodeGenResult result = AssembleDeoptimizerCall(
781 deopt_state_id, bailout_type, current_source_position_);
782 if (result != kSuccess) return result;
783 break;
784 }
785 case kArchRet:
786 AssembleReturn(instr->InputAt(0));
787 break;
788 case kArchStackPointer:
789 __ mov(i.OutputRegister(), masm()->StackPointer());
790 break;
791 case kArchFramePointer:
792 __ mov(i.OutputRegister(), fp);
793 break;
794 case kArchParentFramePointer:
795 if (frame_access_state()->has_frame()) {
796 __ ldr(i.OutputRegister(), MemOperand(fp, 0));
797 } else {
798 __ mov(i.OutputRegister(), fp);
799 }
800 break;
801 case kArchTruncateDoubleToI:
802 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
803 break;
804 case kArchStoreWithWriteBarrier: {
805 RecordWriteMode mode =
806 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
807 AddressingMode addressing_mode =
808 AddressingModeField::decode(instr->opcode());
809 Register object = i.InputRegister(0);
810 Operand index(0);
811 if (addressing_mode == kMode_MRI) {
812 index = Operand(i.InputInt64(1));
813 } else {
814 DCHECK_EQ(addressing_mode, kMode_MRR);
815 index = Operand(i.InputRegister(1));
816 }
817 Register value = i.InputRegister(2);
818 Register scratch0 = i.TempRegister(0);
819 Register scratch1 = i.TempRegister(1);
820 auto ool = new (zone())
821 OutOfLineRecordWrite(this, object, index, value, scratch0, scratch1,
822 mode, &unwinding_info_writer_);
823 __ Str(value, MemOperand(object, index));
824 __ CheckPageFlagSet(object, scratch0,
825 MemoryChunk::kPointersFromHereAreInterestingMask,
826 ool->entry());
827 __ Bind(ool->exit());
828 break;
829 }
830 case kArchStackSlot: {
831 FrameOffset offset =
832 frame_access_state()->GetFrameOffset(i.InputInt32(0));
833 Register base;
834 if (offset.from_stack_pointer()) {
835 base = __ StackPointer();
836 } else {
837 base = fp;
838 }
839 __ Add(i.OutputRegister(0), base, Operand(offset.offset()));
840 break;
841 }
842 case kIeee754Float64Acos:
843 ASSEMBLE_IEEE754_UNOP(acos);
844 break;
845 case kIeee754Float64Acosh:
846 ASSEMBLE_IEEE754_UNOP(acosh);
847 break;
848 case kIeee754Float64Asin:
849 ASSEMBLE_IEEE754_UNOP(asin);
850 break;
851 case kIeee754Float64Asinh:
852 ASSEMBLE_IEEE754_UNOP(asinh);
853 break;
854 case kIeee754Float64Atan:
855 ASSEMBLE_IEEE754_UNOP(atan);
856 break;
857 case kIeee754Float64Atanh:
858 ASSEMBLE_IEEE754_UNOP(atanh);
859 break;
860 case kIeee754Float64Atan2:
861 ASSEMBLE_IEEE754_BINOP(atan2);
862 break;
863 case kIeee754Float64Cos:
864 ASSEMBLE_IEEE754_UNOP(cos);
865 break;
866 case kIeee754Float64Cosh:
867 ASSEMBLE_IEEE754_UNOP(cosh);
868 break;
869 case kIeee754Float64Cbrt:
870 ASSEMBLE_IEEE754_UNOP(cbrt);
871 break;
872 case kIeee754Float64Exp:
873 ASSEMBLE_IEEE754_UNOP(exp);
874 break;
875 case kIeee754Float64Expm1:
876 ASSEMBLE_IEEE754_UNOP(expm1);
877 break;
878 case kIeee754Float64Log:
879 ASSEMBLE_IEEE754_UNOP(log);
880 break;
881 case kIeee754Float64Log1p:
882 ASSEMBLE_IEEE754_UNOP(log1p);
883 break;
884 case kIeee754Float64Log2:
885 ASSEMBLE_IEEE754_UNOP(log2);
886 break;
887 case kIeee754Float64Log10:
888 ASSEMBLE_IEEE754_UNOP(log10);
889 break;
890 case kIeee754Float64Pow: {
891 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
892 __ CallStub(&stub);
893 break;
894 }
895 case kIeee754Float64Sin:
896 ASSEMBLE_IEEE754_UNOP(sin);
897 break;
898 case kIeee754Float64Sinh:
899 ASSEMBLE_IEEE754_UNOP(sinh);
900 break;
901 case kIeee754Float64Tan:
902 ASSEMBLE_IEEE754_UNOP(tan);
903 break;
904 case kIeee754Float64Tanh:
905 ASSEMBLE_IEEE754_UNOP(tanh);
906 break;
907 case kArm64Float32RoundDown:
908 __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
909 break;
910 case kArm64Float64RoundDown:
911 __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
912 break;
913 case kArm64Float32RoundUp:
914 __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
915 break;
916 case kArm64Float64RoundUp:
917 __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
918 break;
919 case kArm64Float64RoundTiesAway:
920 __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
921 break;
922 case kArm64Float32RoundTruncate:
923 __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
924 break;
925 case kArm64Float64RoundTruncate:
926 __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
927 break;
928 case kArm64Float32RoundTiesEven:
929 __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
930 break;
931 case kArm64Float64RoundTiesEven:
932 __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
933 break;
934 case kArm64Add:
935 if (FlagsModeField::decode(opcode) != kFlags_none) {
936 __ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0),
937 i.InputOperand2_64(1));
938 } else {
939 __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
940 i.InputOperand2_64(1));
941 }
942 break;
943 case kArm64Add32:
944 if (FlagsModeField::decode(opcode) != kFlags_none) {
945 __ Adds(i.OutputRegister32(), i.InputOrZeroRegister32(0),
946 i.InputOperand2_32(1));
947 } else {
948 __ Add(i.OutputRegister32(), i.InputOrZeroRegister32(0),
949 i.InputOperand2_32(1));
950 }
951 break;
952 case kArm64And:
953 if (FlagsModeField::decode(opcode) != kFlags_none) {
954 // The ands instruction only sets N and Z, so only the following
955 // conditions make sense.
956 DCHECK(FlagsConditionField::decode(opcode) == kEqual ||
957 FlagsConditionField::decode(opcode) == kNotEqual ||
958 FlagsConditionField::decode(opcode) == kPositiveOrZero ||
959 FlagsConditionField::decode(opcode) == kNegative);
960 __ Ands(i.OutputRegister(), i.InputOrZeroRegister64(0),
961 i.InputOperand2_64(1));
962 } else {
963 __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
964 i.InputOperand2_64(1));
965 }
966 break;
967 case kArm64And32:
968 if (FlagsModeField::decode(opcode) != kFlags_none) {
969 // The ands instruction only sets N and Z, so only the following
970 // conditions make sense.
971 DCHECK(FlagsConditionField::decode(opcode) == kEqual ||
972 FlagsConditionField::decode(opcode) == kNotEqual ||
973 FlagsConditionField::decode(opcode) == kPositiveOrZero ||
974 FlagsConditionField::decode(opcode) == kNegative);
975 __ Ands(i.OutputRegister32(), i.InputOrZeroRegister32(0),
976 i.InputOperand2_32(1));
977 } else {
978 __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
979 i.InputOperand2_32(1));
980 }
981 break;
982 case kArm64Bic:
983 __ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
984 i.InputOperand2_64(1));
985 break;
986 case kArm64Bic32:
987 __ Bic(i.OutputRegister32(), i.InputOrZeroRegister32(0),
988 i.InputOperand2_32(1));
989 break;
990 case kArm64Mul:
991 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
992 break;
993 case kArm64Mul32:
994 __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
995 break;
996 case kArm64Smull:
997 __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
998 break;
999 case kArm64Umull:
1000 __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
1001 break;
1002 case kArm64Madd:
1003 __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1004 i.InputRegister(2));
1005 break;
1006 case kArm64Madd32:
1007 __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
1008 i.InputRegister32(2));
1009 break;
1010 case kArm64Msub:
1011 __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1012 i.InputRegister(2));
1013 break;
1014 case kArm64Msub32:
1015 __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
1016 i.InputRegister32(2));
1017 break;
1018 case kArm64Mneg:
1019 __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1020 break;
1021 case kArm64Mneg32:
1022 __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
1023 break;
1024 case kArm64Idiv:
1025 __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1026 break;
1027 case kArm64Idiv32:
1028 __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
1029 break;
1030 case kArm64Udiv:
1031 __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1032 break;
1033 case kArm64Udiv32:
1034 __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
1035 break;
1036 case kArm64Imod: {
1037 UseScratchRegisterScope scope(masm());
1038 Register temp = scope.AcquireX();
1039 __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
1040 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
1041 break;
1042 }
1043 case kArm64Imod32: {
1044 UseScratchRegisterScope scope(masm());
1045 Register temp = scope.AcquireW();
1046 __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
1047 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
1048 i.InputRegister32(0));
1049 break;
1050 }
1051 case kArm64Umod: {
1052 UseScratchRegisterScope scope(masm());
1053 Register temp = scope.AcquireX();
1054 __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
1055 __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
1056 break;
1057 }
1058 case kArm64Umod32: {
1059 UseScratchRegisterScope scope(masm());
1060 Register temp = scope.AcquireW();
1061 __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
1062 __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
1063 i.InputRegister32(0));
1064 break;
1065 }
1066 case kArm64Not:
1067 __ Mvn(i.OutputRegister(), i.InputOperand(0));
1068 break;
1069 case kArm64Not32:
1070 __ Mvn(i.OutputRegister32(), i.InputOperand32(0));
1071 break;
1072 case kArm64Or:
1073 __ Orr(i.OutputRegister(), i.InputOrZeroRegister64(0),
1074 i.InputOperand2_64(1));
1075 break;
1076 case kArm64Or32:
1077 __ Orr(i.OutputRegister32(), i.InputOrZeroRegister32(0),
1078 i.InputOperand2_32(1));
1079 break;
1080 case kArm64Orn:
1081 __ Orn(i.OutputRegister(), i.InputOrZeroRegister64(0),
1082 i.InputOperand2_64(1));
1083 break;
1084 case kArm64Orn32:
1085 __ Orn(i.OutputRegister32(), i.InputOrZeroRegister32(0),
1086 i.InputOperand2_32(1));
1087 break;
1088 case kArm64Eor:
1089 __ Eor(i.OutputRegister(), i.InputOrZeroRegister64(0),
1090 i.InputOperand2_64(1));
1091 break;
1092 case kArm64Eor32:
1093 __ Eor(i.OutputRegister32(), i.InputOrZeroRegister32(0),
1094 i.InputOperand2_32(1));
1095 break;
1096 case kArm64Eon:
1097 __ Eon(i.OutputRegister(), i.InputOrZeroRegister64(0),
1098 i.InputOperand2_64(1));
1099 break;
1100 case kArm64Eon32:
1101 __ Eon(i.OutputRegister32(), i.InputOrZeroRegister32(0),
1102 i.InputOperand2_32(1));
1103 break;
1104 case kArm64Sub:
1105 if (FlagsModeField::decode(opcode) != kFlags_none) {
1106 __ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0),
1107 i.InputOperand2_64(1));
1108 } else {
1109 __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
1110 i.InputOperand2_64(1));
1111 }
1112 break;
1113 case kArm64Sub32:
1114 if (FlagsModeField::decode(opcode) != kFlags_none) {
1115 __ Subs(i.OutputRegister32(), i.InputOrZeroRegister32(0),
1116 i.InputOperand2_32(1));
1117 } else {
1118 __ Sub(i.OutputRegister32(), i.InputOrZeroRegister32(0),
1119 i.InputOperand2_32(1));
1120 }
1121 break;
1122 case kArm64Lsl:
1123 ASSEMBLE_SHIFT(Lsl, 64);
1124 break;
1125 case kArm64Lsl32:
1126 ASSEMBLE_SHIFT(Lsl, 32);
1127 break;
1128 case kArm64Lsr:
1129 ASSEMBLE_SHIFT(Lsr, 64);
1130 break;
1131 case kArm64Lsr32:
1132 ASSEMBLE_SHIFT(Lsr, 32);
1133 break;
1134 case kArm64Asr:
1135 ASSEMBLE_SHIFT(Asr, 64);
1136 break;
1137 case kArm64Asr32:
1138 ASSEMBLE_SHIFT(Asr, 32);
1139 break;
1140 case kArm64Ror:
1141 ASSEMBLE_SHIFT(Ror, 64);
1142 break;
1143 case kArm64Ror32:
1144 ASSEMBLE_SHIFT(Ror, 32);
1145 break;
1146 case kArm64Mov32:
1147 __ Mov(i.OutputRegister32(), i.InputRegister32(0));
1148 break;
1149 case kArm64Sxtb32:
1150 __ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
1151 break;
1152 case kArm64Sxth32:
1153 __ Sxth(i.OutputRegister32(), i.InputRegister32(0));
1154 break;
1155 case kArm64Sxtw:
1156 __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
1157 break;
1158 case kArm64Sbfx32:
1159 __ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
1160 i.InputInt5(2));
1161 break;
1162 case kArm64Ubfx:
1163 __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1),
1164 i.InputInt6(2));
1165 break;
1166 case kArm64Ubfx32:
1167 __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
1168 i.InputInt5(2));
1169 break;
1170 case kArm64Ubfiz32:
1171 __ Ubfiz(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
1172 i.InputInt5(2));
1173 break;
1174 case kArm64Bfi:
1175 __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
1176 i.InputInt6(3));
1177 break;
1178 case kArm64TestAndBranch32:
1179 case kArm64TestAndBranch:
1180 // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
1181 break;
1182 case kArm64CompareAndBranch32:
1183 case kArm64CompareAndBranch:
1184 // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
1185 break;
1186 case kArm64ClaimCSP: {
1187 int count = RoundUp(i.InputInt32(0), 2);
1188 Register prev = __ StackPointer();
1189 if (prev.Is(jssp)) {
1190 // TODO(titzer): make this a macro-assembler method.
1191 // Align the CSP and store the previous JSSP on the stack.
1192 UseScratchRegisterScope scope(masm());
1193 Register tmp = scope.AcquireX();
1194
1195 int sp_alignment = __ ActivationFrameAlignment();
1196 __ Sub(tmp, jssp, kPointerSize);
1197 __ And(tmp, tmp, Operand(~static_cast<uint64_t>(sp_alignment - 1)));
1198 __ Mov(csp, tmp);
1199 __ Str(jssp, MemOperand(csp));
1200 if (count > 0) {
1201 __ SetStackPointer(csp);
1202 __ Claim(count);
1203 __ SetStackPointer(prev);
1204 }
1205 } else {
1206 __ AssertCspAligned();
1207 if (count > 0) {
1208 __ Claim(count);
1209 frame_access_state()->IncreaseSPDelta(count);
1210 }
1211 }
1212 break;
1213 }
1214 case kArm64ClaimJSSP: {
1215 int count = i.InputInt32(0);
1216 if (csp.Is(__ StackPointer())) {
1217 // No JSSP is set up. Compute it from the CSP.
1218 __ AssertCspAligned();
1219 if (count > 0) {
1220 int even = RoundUp(count, 2);
1221 __ Sub(jssp, csp, count * kPointerSize);
1222 __ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
1223 frame_access_state()->IncreaseSPDelta(even);
1224 } else {
1225 __ Mov(jssp, csp);
1226 }
1227 } else {
1228 // JSSP is the current stack pointer, just use regular Claim().
1229 __ Claim(count);
1230 frame_access_state()->IncreaseSPDelta(count);
1231 }
1232 break;
1233 }
1234 case kArm64PokeCSP: // fall through
1235 case kArm64PokeJSSP: {
1236 Register prev = __ StackPointer();
1237 __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
1238 Operand operand(i.InputInt32(1) * kPointerSize);
1239 if (instr->InputAt(0)->IsFPRegister()) {
1240 __ Poke(i.InputFloat64Register(0), operand);
1241 } else {
1242 __ Poke(i.InputRegister(0), operand);
1243 }
1244 __ SetStackPointer(prev);
1245 break;
1246 }
1247 case kArm64PokePair: {
1248 int slot = i.InputInt32(2) - 1;
1249 if (instr->InputAt(0)->IsFPRegister()) {
1250 __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
1251 slot * kPointerSize);
1252 } else {
1253 __ PokePair(i.InputRegister(1), i.InputRegister(0),
1254 slot * kPointerSize);
1255 }
1256 break;
1257 }
1258 case kArm64Clz:
1259 __ Clz(i.OutputRegister64(), i.InputRegister64(0));
1260 break;
1261 case kArm64Clz32:
1262 __ Clz(i.OutputRegister32(), i.InputRegister32(0));
1263 break;
1264 case kArm64Rbit:
1265 __ Rbit(i.OutputRegister64(), i.InputRegister64(0));
1266 break;
1267 case kArm64Rbit32:
1268 __ Rbit(i.OutputRegister32(), i.InputRegister32(0));
1269 break;
1270 case kArm64Cmp:
1271 __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
1272 break;
1273 case kArm64Cmp32:
1274 __ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
1275 break;
1276 case kArm64Cmn:
1277 __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand2_64(1));
1278 break;
1279 case kArm64Cmn32:
1280 __ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
1281 break;
1282 case kArm64Tst:
1283 __ Tst(i.InputOrZeroRegister64(0), i.InputOperand(1));
1284 break;
1285 case kArm64Tst32:
1286 __ Tst(i.InputOrZeroRegister32(0), i.InputOperand32(1));
1287 break;
1288 case kArm64Float32Cmp:
1289 if (instr->InputAt(1)->IsFPRegister()) {
1290 __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
1291 } else {
1292 DCHECK(instr->InputAt(1)->IsImmediate());
1293 // 0.0 is the only immediate supported by fcmp instructions.
1294 DCHECK(i.InputFloat32(1) == 0.0f);
1295 __ Fcmp(i.InputFloat32Register(0), i.InputFloat32(1));
1296 }
1297 break;
1298 case kArm64Float32Add:
1299 __ Fadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
1300 i.InputFloat32Register(1));
1301 break;
1302 case kArm64Float32Sub:
1303 __ Fsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
1304 i.InputFloat32Register(1));
1305 break;
1306 case kArm64Float32Mul:
1307 __ Fmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
1308 i.InputFloat32Register(1));
1309 break;
1310 case kArm64Float32Div:
1311 __ Fdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
1312 i.InputFloat32Register(1));
1313 break;
1314 case kArm64Float32Abs:
1315 __ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
1316 break;
1317 case kArm64Float32Neg:
1318 __ Fneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
1319 break;
1320 case kArm64Float32Sqrt:
1321 __ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
1322 break;
1323 case kArm64Float64Cmp:
1324 if (instr->InputAt(1)->IsFPRegister()) {
1325 __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
1326 } else {
1327 DCHECK(instr->InputAt(1)->IsImmediate());
1328 // 0.0 is the only immediate supported by fcmp instructions.
1329 DCHECK(i.InputDouble(1) == 0.0);
1330 __ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1));
1331 }
1332 break;
1333 case kArm64Float64Add:
1334 __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1335 i.InputDoubleRegister(1));
1336 break;
1337 case kArm64Float64Sub:
1338 __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1339 i.InputDoubleRegister(1));
1340 break;
1341 case kArm64Float64Mul:
1342 __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1343 i.InputDoubleRegister(1));
1344 break;
1345 case kArm64Float64Div:
1346 __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1347 i.InputDoubleRegister(1));
1348 break;
1349 case kArm64Float64Mod: {
1350 // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
1351 FrameScope scope(masm(), StackFrame::MANUAL);
1352 DCHECK(d0.is(i.InputDoubleRegister(0)));
1353 DCHECK(d1.is(i.InputDoubleRegister(1)));
1354 DCHECK(d0.is(i.OutputDoubleRegister()));
1355 // TODO(dcarney): make sure this saves all relevant registers.
1356 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1357 0, 2);
1358 break;
1359 }
1360 case kArm64Float32Max: {
1361 __ Fmax(i.OutputFloat32Register(), i.InputFloat32Register(0),
1362 i.InputFloat32Register(1));
1363 break;
1364 }
1365 case kArm64Float64Max: {
1366 __ Fmax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1367 i.InputDoubleRegister(1));
1368 break;
1369 }
1370 case kArm64Float32Min: {
1371 __ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0),
1372 i.InputFloat32Register(1));
1373 break;
1374 }
1375 case kArm64Float64Min: {
1376 __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1377 i.InputDoubleRegister(1));
1378 break;
1379 }
1380 case kArm64Float64Abs:
1381 __ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1382 break;
1383 case kArm64Float64Neg:
1384 __ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1385 break;
1386 case kArm64Float64Sqrt:
1387 __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1388 break;
1389 case kArm64Float32ToFloat64:
1390 __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
1391 break;
1392 case kArm64Float64ToFloat32:
1393 __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
1394 break;
1395 case kArm64Float32ToInt32:
1396 __ Fcvtzs(i.OutputRegister32(), i.InputFloat32Register(0));
1397 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1398 // because INT32_MIN allows easier out-of-bounds detection.
1399 __ Cmn(i.OutputRegister32(), 1);
1400 __ Csinc(i.OutputRegister32(), i.OutputRegister32(), i.OutputRegister32(),
1401 vc);
1402 break;
1403 case kArm64Float64ToInt32:
1404 __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
1405 break;
1406 case kArm64Float32ToUint32:
1407 __ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0));
1408 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1409 // because 0 allows easier out-of-bounds detection.
1410 __ Cmn(i.OutputRegister32(), 1);
1411 __ Adc(i.OutputRegister32(), i.OutputRegister32(), Operand(0));
1412 break;
1413 case kArm64Float64ToUint32:
1414 __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
1415 break;
1416 case kArm64Float32ToInt64:
1417 __ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
1418 if (i.OutputCount() > 1) {
1419 __ Mov(i.OutputRegister(1), 1);
1420 Label done;
1421 __ Cmp(i.OutputRegister(0), 1);
1422 __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
1423 __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag,
1424 vc);
1425 __ B(vc, &done);
1426 __ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN));
1427 __ Cset(i.OutputRegister(1), eq);
1428 __ Bind(&done);
1429 }
1430 break;
1431 case kArm64Float64ToInt64:
1432 __ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
1433 if (i.OutputCount() > 1) {
1434 __ Mov(i.OutputRegister(1), 1);
1435 Label done;
1436 __ Cmp(i.OutputRegister(0), 1);
1437 __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
1438 __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc);
1439 __ B(vc, &done);
1440 __ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
1441 __ Cset(i.OutputRegister(1), eq);
1442 __ Bind(&done);
1443 }
1444 break;
1445 case kArm64Float32ToUint64:
1446 __ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
1447 if (i.OutputCount() > 1) {
1448 __ Fcmp(i.InputFloat32Register(0), -1.0);
1449 __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
1450 __ Cset(i.OutputRegister(1), ne);
1451 }
1452 break;
1453 case kArm64Float64ToUint64:
1454 __ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0));
1455 if (i.OutputCount() > 1) {
1456 __ Fcmp(i.InputDoubleRegister(0), -1.0);
1457 __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
1458 __ Cset(i.OutputRegister(1), ne);
1459 }
1460 break;
1461 case kArm64Int32ToFloat32:
1462 __ Scvtf(i.OutputFloat32Register(), i.InputRegister32(0));
1463 break;
1464 case kArm64Int32ToFloat64:
1465 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
1466 break;
1467 case kArm64Int64ToFloat32:
1468 __ Scvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
1469 break;
1470 case kArm64Int64ToFloat64:
1471 __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
1472 break;
1473 case kArm64Uint32ToFloat32:
1474 __ Ucvtf(i.OutputFloat32Register(), i.InputRegister32(0));
1475 break;
1476 case kArm64Uint32ToFloat64:
1477 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
1478 break;
1479 case kArm64Uint64ToFloat32:
1480 __ Ucvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
1481 break;
1482 case kArm64Uint64ToFloat64:
1483 __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
1484 break;
1485 case kArm64Float64ExtractLowWord32:
1486 __ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
1487 break;
1488 case kArm64Float64ExtractHighWord32:
1489 // TODO(arm64): This should use MOV (to general) when NEON is supported.
1490 __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
1491 __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
1492 break;
1493 case kArm64Float64InsertLowWord32: {
1494 // TODO(arm64): This should use MOV (from general) when NEON is supported.
1495 UseScratchRegisterScope scope(masm());
1496 Register tmp = scope.AcquireX();
1497 __ Fmov(tmp, i.InputFloat64Register(0));
1498 __ Bfi(tmp, i.InputRegister(1), 0, 32);
1499 __ Fmov(i.OutputFloat64Register(), tmp);
1500 break;
1501 }
1502 case kArm64Float64InsertHighWord32: {
1503 // TODO(arm64): This should use MOV (from general) when NEON is supported.
1504 UseScratchRegisterScope scope(masm());
1505 Register tmp = scope.AcquireX();
1506 __ Fmov(tmp.W(), i.InputFloat32Register(0));
1507 __ Bfi(tmp, i.InputRegister(1), 32, 32);
1508 __ Fmov(i.OutputFloat64Register(), tmp);
1509 break;
1510 }
1511 case kArm64Float64MoveU64:
1512 __ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
1513 break;
1514 case kArm64Float64SilenceNaN:
1515 __ CanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1516 break;
1517 case kArm64U64MoveFloat64:
1518 __ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
1519 break;
1520 case kArm64Ldrb:
1521 __ Ldrb(i.OutputRegister(), i.MemoryOperand());
1522 break;
1523 case kArm64Ldrsb:
1524 __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
1525 break;
1526 case kArm64Strb:
1527 __ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
1528 break;
1529 case kArm64Ldrh:
1530 __ Ldrh(i.OutputRegister(), i.MemoryOperand());
1531 break;
1532 case kArm64Ldrsh:
1533 __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
1534 break;
1535 case kArm64Strh:
1536 __ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
1537 break;
1538 case kArm64Ldrsw:
1539 __ Ldrsw(i.OutputRegister(), i.MemoryOperand());
1540 break;
1541 case kArm64LdrW:
1542 __ Ldr(i.OutputRegister32(), i.MemoryOperand());
1543 break;
1544 case kArm64StrW:
1545 __ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
1546 break;
1547 case kArm64Ldr:
1548 __ Ldr(i.OutputRegister(), i.MemoryOperand());
1549 break;
1550 case kArm64Str:
1551 __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
1552 break;
1553 case kArm64LdrS:
1554 __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
1555 break;
1556 case kArm64StrS:
1557 __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
1558 break;
1559 case kArm64LdrD:
1560 __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
1561 break;
1562 case kArm64StrD:
1563 __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
1564 break;
1565 case kCheckedLoadInt8:
1566 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
1567 break;
1568 case kCheckedLoadUint8:
1569 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
1570 break;
1571 case kCheckedLoadInt16:
1572 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
1573 break;
1574 case kCheckedLoadUint16:
1575 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
1576 break;
1577 case kCheckedLoadWord32:
1578 ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
1579 break;
1580 case kCheckedLoadWord64:
1581 ASSEMBLE_CHECKED_LOAD_INTEGER_64(Ldr);
1582 break;
1583 case kCheckedLoadFloat32:
1584 ASSEMBLE_CHECKED_LOAD_FLOAT(32);
1585 break;
1586 case kCheckedLoadFloat64:
1587 ASSEMBLE_CHECKED_LOAD_FLOAT(64);
1588 break;
1589 case kCheckedStoreWord8:
1590 ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
1591 break;
1592 case kCheckedStoreWord16:
1593 ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
1594 break;
1595 case kCheckedStoreWord32:
1596 ASSEMBLE_CHECKED_STORE_INTEGER(Str);
1597 break;
1598 case kCheckedStoreWord64:
1599 ASSEMBLE_CHECKED_STORE_INTEGER_64(Str);
1600 break;
1601 case kCheckedStoreFloat32:
1602 ASSEMBLE_CHECKED_STORE_FLOAT(32);
1603 break;
1604 case kCheckedStoreFloat64:
1605 ASSEMBLE_CHECKED_STORE_FLOAT(64);
1606 break;
1607 case kAtomicLoadInt8:
1608 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsb);
1609 break;
1610 case kAtomicLoadUint8:
1611 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrb);
1612 break;
1613 case kAtomicLoadInt16:
1614 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsh);
1615 break;
1616 case kAtomicLoadUint16:
1617 ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrh);
1618 break;
1619 case kAtomicLoadWord32:
1620 __ Ldr(i.OutputRegister32(),
1621 MemOperand(i.InputRegister(0), i.InputRegister(1)));
1622 __ Dmb(InnerShareable, BarrierAll);
1623 break;
1624 case kAtomicStoreWord8:
1625 ASSEMBLE_ATOMIC_STORE_INTEGER(Strb);
1626 break;
1627 case kAtomicStoreWord16:
1628 ASSEMBLE_ATOMIC_STORE_INTEGER(Strh);
1629 break;
1630 case kAtomicStoreWord32:
1631 __ Dmb(InnerShareable, BarrierAll);
1632 __ Str(i.InputRegister32(2),
1633 MemOperand(i.InputRegister(0), i.InputRegister(1)));
1634 __ Dmb(InnerShareable, BarrierAll);
1635 break;
1636 }
1637 return kSuccess;
1638 } // NOLINT(readability/fn_size)
1639
1640
1641 // Assemble branches after this instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1642 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1643 Arm64OperandConverter i(this, instr);
1644 Label* tlabel = branch->true_label;
1645 Label* flabel = branch->false_label;
1646 FlagsCondition condition = branch->condition;
1647 ArchOpcode opcode = instr->arch_opcode();
1648
1649 if (opcode == kArm64CompareAndBranch32) {
1650 switch (condition) {
1651 case kEqual:
1652 __ Cbz(i.InputRegister32(0), tlabel);
1653 break;
1654 case kNotEqual:
1655 __ Cbnz(i.InputRegister32(0), tlabel);
1656 break;
1657 default:
1658 UNREACHABLE();
1659 }
1660 } else if (opcode == kArm64CompareAndBranch) {
1661 switch (condition) {
1662 case kEqual:
1663 __ Cbz(i.InputRegister64(0), tlabel);
1664 break;
1665 case kNotEqual:
1666 __ Cbnz(i.InputRegister64(0), tlabel);
1667 break;
1668 default:
1669 UNREACHABLE();
1670 }
1671 } else if (opcode == kArm64TestAndBranch32) {
1672 switch (condition) {
1673 case kEqual:
1674 __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
1675 break;
1676 case kNotEqual:
1677 __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
1678 break;
1679 default:
1680 UNREACHABLE();
1681 }
1682 } else if (opcode == kArm64TestAndBranch) {
1683 switch (condition) {
1684 case kEqual:
1685 __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
1686 break;
1687 case kNotEqual:
1688 __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
1689 break;
1690 default:
1691 UNREACHABLE();
1692 }
1693 } else {
1694 Condition cc = FlagsConditionToCondition(condition);
1695 __ B(cc, tlabel);
1696 }
1697 if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
1698 }
1699
1700
AssembleArchJump(RpoNumber target)1701 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1702 if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
1703 }
1704
1705
1706 // Assemble boolean materializations after this instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)1707 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1708 FlagsCondition condition) {
1709 Arm64OperandConverter i(this, instr);
1710
1711 // Materialize a full 64-bit 1 or 0 value. The result register is always the
1712 // last output of the instruction.
1713 DCHECK_NE(0u, instr->OutputCount());
1714 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1715 Condition cc = FlagsConditionToCondition(condition);
1716 __ Cset(reg, cc);
1717 }
1718
1719
AssembleArchLookupSwitch(Instruction * instr)1720 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1721 Arm64OperandConverter i(this, instr);
1722 Register input = i.InputRegister32(0);
1723 for (size_t index = 2; index < instr->InputCount(); index += 2) {
1724 __ Cmp(input, i.InputInt32(index + 0));
1725 __ B(eq, GetLabel(i.InputRpo(index + 1)));
1726 }
1727 AssembleArchJump(i.InputRpo(1));
1728 }
1729
1730
AssembleArchTableSwitch(Instruction * instr)1731 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1732 Arm64OperandConverter i(this, instr);
1733 UseScratchRegisterScope scope(masm());
1734 Register input = i.InputRegister32(0);
1735 Register temp = scope.AcquireX();
1736 size_t const case_count = instr->InputCount() - 2;
1737 Label table;
1738 __ Cmp(input, case_count);
1739 __ B(hs, GetLabel(i.InputRpo(1)));
1740 __ Adr(temp, &table);
1741 __ Add(temp, temp, Operand(input, UXTW, 2));
1742 __ Br(temp);
1743 __ StartBlockPools();
1744 __ Bind(&table);
1745 for (size_t index = 0; index < case_count; ++index) {
1746 __ B(GetLabel(i.InputRpo(index + 2)));
1747 }
1748 __ EndBlockPools();
1749 }
1750
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type,SourcePosition pos)1751 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
1752 int deoptimization_id, Deoptimizer::BailoutType bailout_type,
1753 SourcePosition pos) {
1754 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1755 isolate(), deoptimization_id, bailout_type);
1756 if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
1757 DeoptimizeReason deoptimization_reason =
1758 GetDeoptimizationReason(deoptimization_id);
1759 __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
1760 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1761 return kSuccess;
1762 }
1763
FinishFrame(Frame * frame)1764 void CodeGenerator::FinishFrame(Frame* frame) {
1765 frame->AlignFrame(16);
1766 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1767
1768 if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
1769 __ SetStackPointer(csp);
1770 } else {
1771 __ SetStackPointer(jssp);
1772 }
1773
1774 // Save FP registers.
1775 CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
1776 descriptor->CalleeSavedFPRegisters());
1777 int saved_count = saves_fp.Count();
1778 if (saved_count != 0) {
1779 DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
1780 frame->AllocateSavedCalleeRegisterSlots(saved_count *
1781 (kDoubleSize / kPointerSize));
1782 }
1783
1784 CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
1785 descriptor->CalleeSavedRegisters());
1786 saved_count = saves.Count();
1787 if (saved_count != 0) {
1788 frame->AllocateSavedCalleeRegisterSlots(saved_count);
1789 }
1790 }
1791
AssembleConstructFrame()1792 void CodeGenerator::AssembleConstructFrame() {
1793 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1794 if (descriptor->UseNativeStack()) {
1795 __ AssertCspAligned();
1796 }
1797
1798 int fixed_frame_size = descriptor->CalculateFixedFrameSize();
1799 int shrink_slots =
1800 frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
1801
1802 if (frame_access_state()->has_frame()) {
1803 // Link the frame
1804 if (descriptor->IsJSFunctionCall()) {
1805 DCHECK(!descriptor->UseNativeStack());
1806 __ Prologue(this->info()->GeneratePreagedPrologue());
1807 } else {
1808 __ Push(lr, fp);
1809 __ Mov(fp, masm_.StackPointer());
1810 }
1811 if (!info()->GeneratePreagedPrologue()) {
1812 unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
1813 }
1814
1815 // Create OSR entry if applicable
1816 if (info()->is_osr()) {
1817 // TurboFan OSR-compiled functions cannot be entered directly.
1818 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1819
1820 // Unoptimized code jumps directly to this entrypoint while the
1821 // unoptimized
1822 // frame is still on the stack. Optimized code uses OSR values directly
1823 // from
1824 // the unoptimized frame. Thus, all that needs to be done is to allocate
1825 // the
1826 // remaining stack slots.
1827 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1828 osr_pc_offset_ = __ pc_offset();
1829 shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
1830 }
1831
1832 // Build remainder of frame, including accounting for and filling-in
1833 // frame-specific header information, e.g. claiming the extra slot that
1834 // other platforms explicitly push for STUB frames and frames recording
1835 // their argument count.
1836 __ Claim(shrink_slots + (fixed_frame_size & 1));
1837 if (descriptor->PushArgumentCount()) {
1838 __ Str(kJavaScriptCallArgCountRegister,
1839 MemOperand(fp, OptimizedBuiltinFrameConstants::kArgCOffset));
1840 }
1841 bool is_stub_frame =
1842 !descriptor->IsJSFunctionCall() && !descriptor->IsCFunctionCall();
1843 if (is_stub_frame) {
1844 UseScratchRegisterScope temps(masm());
1845 Register temp = temps.AcquireX();
1846 __ Mov(temp, Smi::FromInt(info()->GetOutputStackFrameType()));
1847 __ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
1848 }
1849 }
1850
1851 // Save FP registers.
1852 CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
1853 descriptor->CalleeSavedFPRegisters());
1854 int saved_count = saves_fp.Count();
1855 if (saved_count != 0) {
1856 DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
1857 __ PushCPURegList(saves_fp);
1858 }
1859 // Save registers.
1860 // TODO(palfia): TF save list is not in sync with
1861 // CPURegList::GetCalleeSaved(): x30 is missing.
1862 // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
1863 CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
1864 descriptor->CalleeSavedRegisters());
1865 saved_count = saves.Count();
1866 if (saved_count != 0) {
1867 __ PushCPURegList(saves);
1868 }
1869 }
1870
AssembleReturn(InstructionOperand * pop)1871 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
1872 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1873
1874 // Restore registers.
1875 CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
1876 descriptor->CalleeSavedRegisters());
1877 if (saves.Count() != 0) {
1878 __ PopCPURegList(saves);
1879 }
1880
1881 // Restore fp registers.
1882 CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
1883 descriptor->CalleeSavedFPRegisters());
1884 if (saves_fp.Count() != 0) {
1885 __ PopCPURegList(saves_fp);
1886 }
1887
1888 unwinding_info_writer_.MarkBlockWillExit();
1889
1890 Arm64OperandConverter g(this, nullptr);
1891 int pop_count = static_cast<int>(descriptor->StackParameterCount());
1892 if (descriptor->IsCFunctionCall()) {
1893 AssembleDeconstructFrame();
1894 } else if (frame_access_state()->has_frame()) {
1895 // Canonicalize JSFunction return sites for now unless they have an variable
1896 // number of stack slot pops.
1897 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
1898 if (return_label_.is_bound()) {
1899 __ B(&return_label_);
1900 return;
1901 } else {
1902 __ Bind(&return_label_);
1903 AssembleDeconstructFrame();
1904 if (descriptor->UseNativeStack()) {
1905 pop_count += (pop_count & 1); // align
1906 }
1907 }
1908 } else {
1909 AssembleDeconstructFrame();
1910 if (descriptor->UseNativeStack()) {
1911 pop_count += (pop_count & 1); // align
1912 }
1913 }
1914 } else if (descriptor->UseNativeStack()) {
1915 pop_count += (pop_count & 1); // align
1916 }
1917
1918 if (pop->IsImmediate()) {
1919 DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
1920 pop_count += g.ToConstant(pop).ToInt32();
1921 __ Drop(pop_count);
1922 } else {
1923 Register pop_reg = g.ToRegister(pop);
1924 __ Add(pop_reg, pop_reg, pop_count);
1925 __ Drop(pop_reg);
1926 }
1927
1928 if (descriptor->UseNativeStack()) {
1929 __ AssertCspAligned();
1930 }
1931 __ Ret();
1932 }
1933
1934
AssembleMove(InstructionOperand * source,InstructionOperand * destination)1935 void CodeGenerator::AssembleMove(InstructionOperand* source,
1936 InstructionOperand* destination) {
1937 Arm64OperandConverter g(this, nullptr);
1938 // Dispatch on the source and destination operand kinds. Not all
1939 // combinations are possible.
1940 if (source->IsRegister()) {
1941 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1942 Register src = g.ToRegister(source);
1943 if (destination->IsRegister()) {
1944 __ Mov(g.ToRegister(destination), src);
1945 } else {
1946 __ Str(src, g.ToMemOperand(destination, masm()));
1947 }
1948 } else if (source->IsStackSlot()) {
1949 MemOperand src = g.ToMemOperand(source, masm());
1950 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1951 if (destination->IsRegister()) {
1952 __ Ldr(g.ToRegister(destination), src);
1953 } else {
1954 UseScratchRegisterScope scope(masm());
1955 Register temp = scope.AcquireX();
1956 __ Ldr(temp, src);
1957 __ Str(temp, g.ToMemOperand(destination, masm()));
1958 }
1959 } else if (source->IsConstant()) {
1960 Constant src = g.ToConstant(ConstantOperand::cast(source));
1961 if (destination->IsRegister() || destination->IsStackSlot()) {
1962 UseScratchRegisterScope scope(masm());
1963 Register dst = destination->IsRegister() ? g.ToRegister(destination)
1964 : scope.AcquireX();
1965 if (src.type() == Constant::kHeapObject) {
1966 Handle<HeapObject> src_object = src.ToHeapObject();
1967 Heap::RootListIndex index;
1968 if (IsMaterializableFromRoot(src_object, &index)) {
1969 __ LoadRoot(dst, index);
1970 } else {
1971 __ LoadObject(dst, src_object);
1972 }
1973 } else {
1974 __ Mov(dst, g.ToImmediate(source));
1975 }
1976 if (destination->IsStackSlot()) {
1977 __ Str(dst, g.ToMemOperand(destination, masm()));
1978 }
1979 } else if (src.type() == Constant::kFloat32) {
1980 if (destination->IsFPRegister()) {
1981 FPRegister dst = g.ToDoubleRegister(destination).S();
1982 __ Fmov(dst, src.ToFloat32());
1983 } else {
1984 DCHECK(destination->IsFPStackSlot());
1985 if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
1986 __ Str(wzr, g.ToMemOperand(destination, masm()));
1987 } else {
1988 UseScratchRegisterScope scope(masm());
1989 FPRegister temp = scope.AcquireS();
1990 __ Fmov(temp, src.ToFloat32());
1991 __ Str(temp, g.ToMemOperand(destination, masm()));
1992 }
1993 }
1994 } else {
1995 DCHECK_EQ(Constant::kFloat64, src.type());
1996 if (destination->IsFPRegister()) {
1997 FPRegister dst = g.ToDoubleRegister(destination);
1998 __ Fmov(dst, src.ToFloat64());
1999 } else {
2000 DCHECK(destination->IsFPStackSlot());
2001 if (bit_cast<int64_t>(src.ToFloat64()) == 0) {
2002 __ Str(xzr, g.ToMemOperand(destination, masm()));
2003 } else {
2004 UseScratchRegisterScope scope(masm());
2005 FPRegister temp = scope.AcquireD();
2006 __ Fmov(temp, src.ToFloat64());
2007 __ Str(temp, g.ToMemOperand(destination, masm()));
2008 }
2009 }
2010 }
2011 } else if (source->IsFPRegister()) {
2012 FPRegister src = g.ToDoubleRegister(source);
2013 if (destination->IsFPRegister()) {
2014 FPRegister dst = g.ToDoubleRegister(destination);
2015 __ Fmov(dst, src);
2016 } else {
2017 DCHECK(destination->IsFPStackSlot());
2018 __ Str(src, g.ToMemOperand(destination, masm()));
2019 }
2020 } else if (source->IsFPStackSlot()) {
2021 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2022 MemOperand src = g.ToMemOperand(source, masm());
2023 if (destination->IsFPRegister()) {
2024 __ Ldr(g.ToDoubleRegister(destination), src);
2025 } else {
2026 UseScratchRegisterScope scope(masm());
2027 FPRegister temp = scope.AcquireD();
2028 __ Ldr(temp, src);
2029 __ Str(temp, g.ToMemOperand(destination, masm()));
2030 }
2031 } else {
2032 UNREACHABLE();
2033 }
2034 }
2035
2036
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)2037 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2038 InstructionOperand* destination) {
2039 Arm64OperandConverter g(this, nullptr);
2040 // Dispatch on the source and destination operand kinds. Not all
2041 // combinations are possible.
2042 if (source->IsRegister()) {
2043 // Register-register.
2044 UseScratchRegisterScope scope(masm());
2045 Register temp = scope.AcquireX();
2046 Register src = g.ToRegister(source);
2047 if (destination->IsRegister()) {
2048 Register dst = g.ToRegister(destination);
2049 __ Mov(temp, src);
2050 __ Mov(src, dst);
2051 __ Mov(dst, temp);
2052 } else {
2053 DCHECK(destination->IsStackSlot());
2054 MemOperand dst = g.ToMemOperand(destination, masm());
2055 __ Mov(temp, src);
2056 __ Ldr(src, dst);
2057 __ Str(temp, dst);
2058 }
2059 } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
2060 UseScratchRegisterScope scope(masm());
2061 DoubleRegister temp_0 = scope.AcquireD();
2062 DoubleRegister temp_1 = scope.AcquireD();
2063 MemOperand src = g.ToMemOperand(source, masm());
2064 MemOperand dst = g.ToMemOperand(destination, masm());
2065 __ Ldr(temp_0, src);
2066 __ Ldr(temp_1, dst);
2067 __ Str(temp_0, dst);
2068 __ Str(temp_1, src);
2069 } else if (source->IsFPRegister()) {
2070 UseScratchRegisterScope scope(masm());
2071 FPRegister temp = scope.AcquireD();
2072 FPRegister src = g.ToDoubleRegister(source);
2073 if (destination->IsFPRegister()) {
2074 FPRegister dst = g.ToDoubleRegister(destination);
2075 __ Fmov(temp, src);
2076 __ Fmov(src, dst);
2077 __ Fmov(dst, temp);
2078 } else {
2079 DCHECK(destination->IsFPStackSlot());
2080 MemOperand dst = g.ToMemOperand(destination, masm());
2081 __ Fmov(temp, src);
2082 __ Ldr(src, dst);
2083 __ Str(temp, dst);
2084 }
2085 } else {
2086 // No other combinations are possible.
2087 UNREACHABLE();
2088 }
2089 }
2090
2091
AssembleJumpTable(Label ** targets,size_t target_count)2092 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2093 // On 64-bit ARM we emit the jump tables inline.
2094 UNREACHABLE();
2095 }
2096
2097
EnsureSpaceForLazyDeopt()2098 void CodeGenerator::EnsureSpaceForLazyDeopt() {
2099 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2100 return;
2101 }
2102
2103 int space_needed = Deoptimizer::patch_size();
2104 // Ensure that we have enough space after the previous lazy-bailout
2105 // instruction for patching the code here.
2106 intptr_t current_pc = masm()->pc_offset();
2107
2108 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
2109 intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2110 DCHECK((padding_size % kInstructionSize) == 0);
2111 InstructionAccurateScope instruction_accurate(
2112 masm(), padding_size / kInstructionSize);
2113
2114 while (padding_size > 0) {
2115 __ nop();
2116 padding_size -= kInstructionSize;
2117 }
2118 }
2119 }
2120
2121 #undef __
2122
2123 } // namespace compiler
2124 } // namespace internal
2125 } // namespace v8
2126