• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/compiler/code-generator.h"
6 
7 #include "src/compilation-info.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/osr.h"
12 #include "src/ppc/macro-assembler-ppc.h"
13 
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17 
18 #define __ masm()->
19 
20 
21 #define kScratchReg r11
22 
23 
24 // Adds PPC-specific methods to convert InstructionOperands.
25 class PPCOperandConverter final : public InstructionOperandConverter {
26  public:
PPCOperandConverter(CodeGenerator * gen,Instruction * instr)27   PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
28       : InstructionOperandConverter(gen, instr) {}
29 
OutputCount()30   size_t OutputCount() { return instr_->OutputCount(); }
31 
OutputRCBit() const32   RCBit OutputRCBit() const {
33     switch (instr_->flags_mode()) {
34       case kFlags_branch:
35       case kFlags_deoptimize:
36       case kFlags_set:
37         return SetRC;
38       case kFlags_none:
39         return LeaveRC;
40     }
41     UNREACHABLE();
42     return LeaveRC;
43   }
44 
CompareLogical() const45   bool CompareLogical() const {
46     switch (instr_->flags_condition()) {
47       case kUnsignedLessThan:
48       case kUnsignedGreaterThanOrEqual:
49       case kUnsignedLessThanOrEqual:
50       case kUnsignedGreaterThan:
51         return true;
52       default:
53         return false;
54     }
55     UNREACHABLE();
56     return false;
57   }
58 
InputImmediate(size_t index)59   Operand InputImmediate(size_t index) {
60     Constant constant = ToConstant(instr_->InputAt(index));
61     switch (constant.type()) {
62       case Constant::kInt32:
63         return Operand(constant.ToInt32());
64       case Constant::kFloat32:
65         return Operand(
66             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
67       case Constant::kFloat64:
68         return Operand(
69             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
70       case Constant::kInt64:
71 #if V8_TARGET_ARCH_PPC64
72         return Operand(constant.ToInt64());
73 #endif
74       case Constant::kExternalReference:
75       case Constant::kHeapObject:
76       case Constant::kRpoNumber:
77         break;
78     }
79     UNREACHABLE();
80     return Operand::Zero();
81   }
82 
MemoryOperand(AddressingMode * mode,size_t * first_index)83   MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
84     const size_t index = *first_index;
85     *mode = AddressingModeField::decode(instr_->opcode());
86     switch (*mode) {
87       case kMode_None:
88         break;
89       case kMode_MRI:
90         *first_index += 2;
91         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
92       case kMode_MRR:
93         *first_index += 2;
94         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
95     }
96     UNREACHABLE();
97     return MemOperand(r0);
98   }
99 
MemoryOperand(AddressingMode * mode,size_t first_index=0)100   MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
101     return MemoryOperand(mode, &first_index);
102   }
103 
ToMemOperand(InstructionOperand * op) const104   MemOperand ToMemOperand(InstructionOperand* op) const {
105     DCHECK_NOT_NULL(op);
106     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
107     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
108   }
109 
SlotToMemOperand(int slot) const110   MemOperand SlotToMemOperand(int slot) const {
111     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
112     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
113   }
114 };
115 
116 
HasRegisterInput(Instruction * instr,size_t index)117 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
118   return instr->InputAt(index)->IsRegister();
119 }
120 
121 
122 namespace {
123 
124 class OutOfLineLoadNAN32 final : public OutOfLineCode {
125  public:
OutOfLineLoadNAN32(CodeGenerator * gen,DoubleRegister result)126   OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
127       : OutOfLineCode(gen), result_(result) {}
128 
Generate()129   void Generate() final {
130     __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
131                          kScratchReg);
132   }
133 
134  private:
135   DoubleRegister const result_;
136 };
137 
138 
139 class OutOfLineLoadNAN64 final : public OutOfLineCode {
140  public:
OutOfLineLoadNAN64(CodeGenerator * gen,DoubleRegister result)141   OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
142       : OutOfLineCode(gen), result_(result) {}
143 
Generate()144   void Generate() final {
145     __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
146                          kScratchReg);
147   }
148 
149  private:
150   DoubleRegister const result_;
151 };
152 
153 
154 class OutOfLineLoadZero final : public OutOfLineCode {
155  public:
OutOfLineLoadZero(CodeGenerator * gen,Register result)156   OutOfLineLoadZero(CodeGenerator* gen, Register result)
157       : OutOfLineCode(gen), result_(result) {}
158 
Generate()159   void Generate() final { __ li(result_, Operand::Zero()); }
160 
161  private:
162   Register const result_;
163 };
164 
165 
166 class OutOfLineRecordWrite final : public OutOfLineCode {
167  public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)168   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
169                        Register value, Register scratch0, Register scratch1,
170                        RecordWriteMode mode)
171       : OutOfLineCode(gen),
172         object_(object),
173         offset_(offset),
174         offset_immediate_(0),
175         value_(value),
176         scratch0_(scratch0),
177         scratch1_(scratch1),
178         mode_(mode),
179         must_save_lr_(!gen->frame_access_state()->has_frame()) {}
180 
OutOfLineRecordWrite(CodeGenerator * gen,Register object,int32_t offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)181   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
182                        Register value, Register scratch0, Register scratch1,
183                        RecordWriteMode mode)
184       : OutOfLineCode(gen),
185         object_(object),
186         offset_(no_reg),
187         offset_immediate_(offset),
188         value_(value),
189         scratch0_(scratch0),
190         scratch1_(scratch1),
191         mode_(mode),
192         must_save_lr_(!gen->frame_access_state()->has_frame()) {}
193 
Generate()194   void Generate() final {
195     if (mode_ > RecordWriteMode::kValueIsPointer) {
196       __ JumpIfSmi(value_, exit());
197     }
198     __ CheckPageFlag(value_, scratch0_,
199                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
200                      exit());
201     RememberedSetAction const remembered_set_action =
202         mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
203                                              : OMIT_REMEMBERED_SET;
204     SaveFPRegsMode const save_fp_mode =
205         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
206     if (must_save_lr_) {
207       // We need to save and restore lr if the frame was elided.
208       __ mflr(scratch1_);
209       __ Push(scratch1_);
210     }
211     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
212                          remembered_set_action, save_fp_mode);
213     if (offset_.is(no_reg)) {
214       __ addi(scratch1_, object_, Operand(offset_immediate_));
215     } else {
216       DCHECK_EQ(0, offset_immediate_);
217       __ add(scratch1_, object_, offset_);
218     }
219     if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
220       ConstantPoolUnavailableScope constant_pool_unavailable(masm());
221       __ CallStub(&stub);
222     } else {
223       __ CallStub(&stub);
224     }
225     if (must_save_lr_) {
226       // We need to save and restore lr if the frame was elided.
227       __ Pop(scratch1_);
228       __ mtlr(scratch1_);
229     }
230   }
231 
232  private:
233   Register const object_;
234   Register const offset_;
235   int32_t const offset_immediate_;  // Valid if offset_.is(no_reg).
236   Register const value_;
237   Register const scratch0_;
238   Register const scratch1_;
239   RecordWriteMode const mode_;
240   bool must_save_lr_;
241 };
242 
243 
FlagsConditionToCondition(FlagsCondition condition,ArchOpcode op)244 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
245   switch (condition) {
246     case kEqual:
247       return eq;
248     case kNotEqual:
249       return ne;
250     case kSignedLessThan:
251     case kUnsignedLessThan:
252       return lt;
253     case kSignedGreaterThanOrEqual:
254     case kUnsignedGreaterThanOrEqual:
255       return ge;
256     case kSignedLessThanOrEqual:
257     case kUnsignedLessThanOrEqual:
258       return le;
259     case kSignedGreaterThan:
260     case kUnsignedGreaterThan:
261       return gt;
262     case kOverflow:
263       // Overflow checked for add/sub only.
264       switch (op) {
265 #if V8_TARGET_ARCH_PPC64
266         case kPPC_Add:
267         case kPPC_Sub:
268 #endif
269         case kPPC_AddWithOverflow32:
270         case kPPC_SubWithOverflow32:
271           return lt;
272         default:
273           break;
274       }
275       break;
276     case kNotOverflow:
277       switch (op) {
278 #if V8_TARGET_ARCH_PPC64
279         case kPPC_Add:
280         case kPPC_Sub:
281 #endif
282         case kPPC_AddWithOverflow32:
283         case kPPC_SubWithOverflow32:
284           return ge;
285         default:
286           break;
287       }
288       break;
289     default:
290       break;
291   }
292   UNREACHABLE();
293   return kNoCondition;
294 }
295 
296 }  // namespace
297 
298 #define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round)                     \
299   do {                                                               \
300     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
301                  i.OutputRCBit());                                   \
302     if (round) {                                                     \
303       __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
304     }                                                                \
305   } while (0)
306 
307 #define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round)                    \
308   do {                                                               \
309     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
310                  i.InputDoubleRegister(1), i.OutputRCBit());         \
311     if (round) {                                                     \
312       __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
313     }                                                                \
314   } while (0)
315 
316 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
317   do {                                                         \
318     if (HasRegisterInput(instr, 1)) {                          \
319       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
320                        i.InputRegister(1));                    \
321     } else {                                                   \
322       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
323                        i.InputImmediate(1));                   \
324     }                                                          \
325   } while (0)
326 
327 
328 #define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm)        \
329   do {                                                         \
330     if (HasRegisterInput(instr, 1)) {                          \
331       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
332                        i.InputRegister(1), i.OutputRCBit());   \
333     } else {                                                   \
334       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
335                        i.InputImmediate(1), i.OutputRCBit());  \
336     }                                                          \
337   } while (0)
338 
339 
340 #define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm)    \
341   do {                                                         \
342     if (HasRegisterInput(instr, 1)) {                          \
343       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
344                        i.InputRegister(1), i.OutputRCBit());   \
345     } else {                                                   \
346       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
347                        i.InputInt32(1), i.OutputRCBit());      \
348     }                                                          \
349   } while (0)
350 
351 
352 #define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
353   do {                                                                  \
354     if (HasRegisterInput(instr, 1)) {                                   \
355       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
356                                 i.InputRegister(1), kScratchReg, r0);   \
357     } else {                                                            \
358       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
359                                 i.InputInt32(1), kScratchReg, r0);      \
360     }                                                                   \
361   } while (0)
362 
363 
364 #define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
365   do {                                                                  \
366     if (HasRegisterInput(instr, 1)) {                                   \
367       __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
368                                 i.InputRegister(1), kScratchReg, r0);   \
369     } else {                                                            \
370       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
371                                 -i.InputInt32(1), kScratchReg, r0);     \
372     }                                                                   \
373   } while (0)
374 
375 
376 #if V8_TARGET_ARCH_PPC64
377 #define ASSEMBLE_ADD_WITH_OVERFLOW32()         \
378   do {                                         \
379     ASSEMBLE_ADD_WITH_OVERFLOW();              \
380     __ extsw(kScratchReg, kScratchReg, SetRC); \
381   } while (0)
382 
383 #define ASSEMBLE_SUB_WITH_OVERFLOW32()         \
384   do {                                         \
385     ASSEMBLE_SUB_WITH_OVERFLOW();              \
386     __ extsw(kScratchReg, kScratchReg, SetRC); \
387   } while (0)
388 #else
389 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
390 #define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
391 #endif
392 
393 
394 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                        \
395   do {                                                                 \
396     const CRegister cr = cr0;                                          \
397     if (HasRegisterInput(instr, 1)) {                                  \
398       if (i.CompareLogical()) {                                        \
399         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr);     \
400       } else {                                                         \
401         __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr);      \
402       }                                                                \
403     } else {                                                           \
404       if (i.CompareLogical()) {                                        \
405         __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
406       } else {                                                         \
407         __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr);  \
408       }                                                                \
409     }                                                                  \
410     DCHECK_EQ(SetRC, i.OutputRCBit());                                 \
411   } while (0)
412 
413 
414 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                                 \
415   do {                                                                    \
416     const CRegister cr = cr0;                                             \
417     __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
418     DCHECK_EQ(SetRC, i.OutputRCBit());                                    \
419   } while (0)
420 
421 
422 #define ASSEMBLE_MODULO(div_instr, mul_instr)                        \
423   do {                                                               \
424     const Register scratch = kScratchReg;                            \
425     __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1));   \
426     __ mul_instr(scratch, scratch, i.InputRegister(1));              \
427     __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
428            i.OutputRCBit());                                         \
429   } while (0)
430 
431 
432 #define ASSEMBLE_FLOAT_MODULO()                                               \
433   do {                                                                        \
434     FrameScope scope(masm(), StackFrame::MANUAL);                             \
435     __ PrepareCallCFunction(0, 2, kScratchReg);                               \
436     __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
437                             i.InputDoubleRegister(1));                        \
438     __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
439                      0, 2);                                                   \
440     __ MovFromFloatResult(i.OutputDoubleRegister());                          \
441     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                      \
442   } while (0)
443 
444 #define ASSEMBLE_IEEE754_UNOP(name)                                            \
445   do {                                                                         \
446     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
447     /* and generate a CallAddress instruction instead. */                      \
448     FrameScope scope(masm(), StackFrame::MANUAL);                              \
449     __ PrepareCallCFunction(0, 1, kScratchReg);                                \
450     __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
451     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
452                      0, 1);                                                    \
453     /* Move the result in the double result register. */                       \
454     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
455     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
456   } while (0)
457 
458 #define ASSEMBLE_IEEE754_BINOP(name)                                           \
459   do {                                                                         \
460     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
461     /* and generate a CallAddress instruction instead. */                      \
462     FrameScope scope(masm(), StackFrame::MANUAL);                              \
463     __ PrepareCallCFunction(0, 2, kScratchReg);                                \
464     __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
465                            i.InputDoubleRegister(1));                          \
466     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
467                      0, 2);                                                    \
468     /* Move the result in the double result register. */                       \
469     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
470     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
471   } while (0)
472 
473 #define ASSEMBLE_FLOAT_MAX()                                                  \
474   do {                                                                        \
475     DoubleRegister left_reg = i.InputDoubleRegister(0);                       \
476     DoubleRegister right_reg = i.InputDoubleRegister(1);                      \
477     DoubleRegister result_reg = i.OutputDoubleRegister();                     \
478     Label check_nan_left, check_zero, return_left, return_right, done;        \
479     __ fcmpu(left_reg, right_reg);                                            \
480     __ bunordered(&check_nan_left);                                           \
481     __ beq(&check_zero);                                                      \
482     __ bge(&return_left);                                                     \
483     __ b(&return_right);                                                      \
484                                                                               \
485     __ bind(&check_zero);                                                     \
486     __ fcmpu(left_reg, kDoubleRegZero);                                       \
487     /* left == right != 0. */                                                 \
488     __ bne(&return_left);                                                     \
489     /* At this point, both left and right are either 0 or -0. */              \
490     __ fadd(result_reg, left_reg, right_reg);                                 \
491     __ b(&done);                                                              \
492                                                                               \
493     __ bind(&check_nan_left);                                                 \
494     __ fcmpu(left_reg, left_reg);                                             \
495     /* left == NaN. */                                                        \
496     __ bunordered(&return_left);                                              \
497     __ bind(&return_right);                                                   \
498     if (!right_reg.is(result_reg)) {                                          \
499       __ fmr(result_reg, right_reg);                                          \
500     }                                                                         \
501     __ b(&done);                                                              \
502                                                                               \
503     __ bind(&return_left);                                                    \
504     if (!left_reg.is(result_reg)) {                                           \
505       __ fmr(result_reg, left_reg);                                           \
506     }                                                                         \
507     __ bind(&done);                                                           \
508   } while (0)                                                                 \
509 
510 
511 #define ASSEMBLE_FLOAT_MIN()                                                   \
512   do {                                                                         \
513     DoubleRegister left_reg = i.InputDoubleRegister(0);                        \
514     DoubleRegister right_reg = i.InputDoubleRegister(1);                       \
515     DoubleRegister result_reg = i.OutputDoubleRegister();                      \
516     Label check_nan_left, check_zero, return_left, return_right, done;         \
517     __ fcmpu(left_reg, right_reg);                                             \
518     __ bunordered(&check_nan_left);                                            \
519     __ beq(&check_zero);                                                       \
520     __ ble(&return_left);                                                      \
521     __ b(&return_right);                                                       \
522                                                                                \
523     __ bind(&check_zero);                                                      \
524     __ fcmpu(left_reg, kDoubleRegZero);                                        \
525     /* left == right != 0. */                                                  \
526     __ bne(&return_left);                                                      \
527     /* At this point, both left and right are either 0 or -0. */               \
528     /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R being */\
529     /* different registers is most efficiently expressed as -((-L) - R). */    \
530     __ fneg(left_reg, left_reg);                                               \
531     if (left_reg.is(right_reg)) {                                              \
532       __ fadd(result_reg, left_reg, right_reg);                                \
533     } else {                                                                   \
534       __ fsub(result_reg, left_reg, right_reg);                                \
535     }                                                                          \
536     __ fneg(result_reg, result_reg);                                           \
537     __ b(&done);                                                               \
538                                                                                \
539     __ bind(&check_nan_left);                                                  \
540     __ fcmpu(left_reg, left_reg);                                              \
541     /* left == NaN. */                                                         \
542     __ bunordered(&return_left);                                               \
543                                                                                \
544     __ bind(&return_right);                                                    \
545     if (!right_reg.is(result_reg)) {                                           \
546       __ fmr(result_reg, right_reg);                                           \
547     }                                                                          \
548     __ b(&done);                                                               \
549                                                                                \
550     __ bind(&return_left);                                                     \
551     if (!left_reg.is(result_reg)) {                                            \
552       __ fmr(result_reg, left_reg);                                            \
553     }                                                                          \
554     __ bind(&done);                                                            \
555   } while (0)
556 
557 
558 #define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx)    \
559   do {                                                \
560     DoubleRegister result = i.OutputDoubleRegister(); \
561     AddressingMode mode = kMode_None;                 \
562     MemOperand operand = i.MemoryOperand(&mode);      \
563     if (mode == kMode_MRI) {                          \
564       __ asm_instr(result, operand);                  \
565     } else {                                          \
566       __ asm_instrx(result, operand);                 \
567     }                                                 \
568     DCHECK_EQ(LeaveRC, i.OutputRCBit());              \
569   } while (0)
570 
571 
572 #define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
573   do {                                               \
574     Register result = i.OutputRegister();            \
575     AddressingMode mode = kMode_None;                \
576     MemOperand operand = i.MemoryOperand(&mode);     \
577     if (mode == kMode_MRI) {                         \
578       __ asm_instr(result, operand);                 \
579     } else {                                         \
580       __ asm_instrx(result, operand);                \
581     }                                                \
582     DCHECK_EQ(LeaveRC, i.OutputRCBit());             \
583   } while (0)
584 
585 
586 #define ASSEMBLE_STORE_FLOAT32()                         \
587   do {                                                   \
588     size_t index = 0;                                    \
589     AddressingMode mode = kMode_None;                    \
590     MemOperand operand = i.MemoryOperand(&mode, &index); \
591     DoubleRegister value = i.InputDoubleRegister(index); \
592     __ frsp(kScratchDoubleReg, value);                   \
593     if (mode == kMode_MRI) {                             \
594       __ stfs(kScratchDoubleReg, operand);               \
595     } else {                                             \
596       __ stfsx(kScratchDoubleReg, operand);              \
597     }                                                    \
598     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
599   } while (0)
600 
601 
602 #define ASSEMBLE_STORE_DOUBLE()                          \
603   do {                                                   \
604     size_t index = 0;                                    \
605     AddressingMode mode = kMode_None;                    \
606     MemOperand operand = i.MemoryOperand(&mode, &index); \
607     DoubleRegister value = i.InputDoubleRegister(index); \
608     if (mode == kMode_MRI) {                             \
609       __ stfd(value, operand);                           \
610     } else {                                             \
611       __ stfdx(value, operand);                          \
612     }                                                    \
613     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
614   } while (0)
615 
616 
617 #define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx)    \
618   do {                                                   \
619     size_t index = 0;                                    \
620     AddressingMode mode = kMode_None;                    \
621     MemOperand operand = i.MemoryOperand(&mode, &index); \
622     Register value = i.InputRegister(index);             \
623     if (mode == kMode_MRI) {                             \
624       __ asm_instr(value, operand);                      \
625     } else {                                             \
626       __ asm_instrx(value, operand);                     \
627     }                                                    \
628     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
629   } while (0)
630 
631 #if V8_TARGET_ARCH_PPC64
632 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
633 #define CleanUInt32(x) __ ClearLeftImm(x, x, Operand(32))
634 #else
635 #define CleanUInt32(x)
636 #endif
637 
638 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width)  \
639   do {                                                             \
640     DoubleRegister result = i.OutputDoubleRegister();              \
641     size_t index = 0;                                              \
642     AddressingMode mode = kMode_None;                              \
643     MemOperand operand = i.MemoryOperand(&mode, index);            \
644     DCHECK_EQ(kMode_MRR, mode);                                    \
645     Register offset = operand.rb();                                \
646     if (HasRegisterInput(instr, 2)) {                              \
647       __ cmplw(offset, i.InputRegister(2));                        \
648     } else {                                                       \
649       __ cmplwi(offset, i.InputImmediate(2));                      \
650     }                                                              \
651     auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
652     __ bge(ool->entry());                                          \
653     if (mode == kMode_MRI) {                                       \
654       __ asm_instr(result, operand);                               \
655     } else {                                                       \
656       CleanUInt32(offset);                                         \
657       __ asm_instrx(result, operand);                              \
658     }                                                              \
659     __ bind(ool->exit());                                          \
660     DCHECK_EQ(LeaveRC, i.OutputRCBit());                           \
661   } while (0)
662 
663 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
664   do {                                                       \
665     Register result = i.OutputRegister();                    \
666     size_t index = 0;                                        \
667     AddressingMode mode = kMode_None;                        \
668     MemOperand operand = i.MemoryOperand(&mode, index);      \
669     DCHECK_EQ(kMode_MRR, mode);                              \
670     Register offset = operand.rb();                          \
671     if (HasRegisterInput(instr, 2)) {                        \
672       __ cmplw(offset, i.InputRegister(2));                  \
673     } else {                                                 \
674       __ cmplwi(offset, i.InputImmediate(2));                \
675     }                                                        \
676     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
677     __ bge(ool->entry());                                    \
678     if (mode == kMode_MRI) {                                 \
679       __ asm_instr(result, operand);                         \
680     } else {                                                 \
681       CleanUInt32(offset);                                   \
682       __ asm_instrx(result, operand);                        \
683     }                                                        \
684     __ bind(ool->exit());                                    \
685     DCHECK_EQ(LeaveRC, i.OutputRCBit());                     \
686   } while (0)
687 
688 #define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
689   do {                                                  \
690     Label done;                                         \
691     size_t index = 0;                                   \
692     AddressingMode mode = kMode_None;                   \
693     MemOperand operand = i.MemoryOperand(&mode, index); \
694     DCHECK_EQ(kMode_MRR, mode);                         \
695     Register offset = operand.rb();                     \
696     if (HasRegisterInput(instr, 2)) {                   \
697       __ cmplw(offset, i.InputRegister(2));             \
698     } else {                                            \
699       __ cmplwi(offset, i.InputImmediate(2));           \
700     }                                                   \
701     __ bge(&done);                                      \
702     DoubleRegister value = i.InputDoubleRegister(3);    \
703     __ frsp(kScratchDoubleReg, value);                  \
704     if (mode == kMode_MRI) {                            \
705       __ stfs(kScratchDoubleReg, operand);              \
706     } else {                                            \
707       CleanUInt32(offset);                              \
708       __ stfsx(kScratchDoubleReg, operand);             \
709     }                                                   \
710     __ bind(&done);                                     \
711     DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
712   } while (0)
713 
714 #define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
715   do {                                                  \
716     Label done;                                         \
717     size_t index = 0;                                   \
718     AddressingMode mode = kMode_None;                   \
719     MemOperand operand = i.MemoryOperand(&mode, index); \
720     DCHECK_EQ(kMode_MRR, mode);                         \
721     Register offset = operand.rb();                     \
722     if (HasRegisterInput(instr, 2)) {                   \
723       __ cmplw(offset, i.InputRegister(2));             \
724     } else {                                            \
725       __ cmplwi(offset, i.InputImmediate(2));           \
726     }                                                   \
727     __ bge(&done);                                      \
728     DoubleRegister value = i.InputDoubleRegister(3);    \
729     if (mode == kMode_MRI) {                            \
730       __ stfd(value, operand);                          \
731     } else {                                            \
732       CleanUInt32(offset);                              \
733       __ stfdx(value, operand);                         \
734     }                                                   \
735     __ bind(&done);                                     \
736     DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
737   } while (0)
738 
739 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
740   do {                                                        \
741     Label done;                                               \
742     size_t index = 0;                                         \
743     AddressingMode mode = kMode_None;                         \
744     MemOperand operand = i.MemoryOperand(&mode, index);       \
745     DCHECK_EQ(kMode_MRR, mode);                               \
746     Register offset = operand.rb();                           \
747     if (HasRegisterInput(instr, 2)) {                         \
748       __ cmplw(offset, i.InputRegister(2));                   \
749     } else {                                                  \
750       __ cmplwi(offset, i.InputImmediate(2));                 \
751     }                                                         \
752     __ bge(&done);                                            \
753     Register value = i.InputRegister(3);                      \
754     if (mode == kMode_MRI) {                                  \
755       __ asm_instr(value, operand);                           \
756     } else {                                                  \
757       CleanUInt32(offset);                                    \
758       __ asm_instrx(value, operand);                          \
759     }                                                         \
760     __ bind(&done);                                           \
761     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
762   } while (0)
763 
764 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx)   \
765   do {                                                        \
766     Label done;                                               \
767     Register result = i.OutputRegister();                     \
768     AddressingMode mode = kMode_None;                         \
769     MemOperand operand = i.MemoryOperand(&mode);              \
770     __ sync();                                                \
771     if (mode == kMode_MRI) {                                  \
772     __ asm_instr(result, operand);                            \
773     } else {                                                  \
774     __ asm_instrx(result, operand);                           \
775     }                                                         \
776     __ bind(&done);                                           \
777     __ cmp(result, result);                                   \
778     __ bne(&done);                                            \
779     __ isync();                                               \
780   } while (0)
781 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx)  \
782   do {                                                        \
783     size_t index = 0;                                         \
784     AddressingMode mode = kMode_None;                         \
785     MemOperand operand = i.MemoryOperand(&mode, &index);      \
786     Register value = i.InputRegister(index);                  \
787     __ sync();                                                \
788     if (mode == kMode_MRI) {                                  \
789       __ asm_instr(value, operand);                           \
790     } else {                                                  \
791       __ asm_instrx(value, operand);                          \
792     }                                                         \
793     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
794   } while (0)
795 
AssembleDeconstructFrame()796 void CodeGenerator::AssembleDeconstructFrame() {
797   __ LeaveFrame(StackFrame::MANUAL);
798 }
799 
AssemblePrepareTailCall()800 void CodeGenerator::AssemblePrepareTailCall() {
801   if (frame_access_state()->has_frame()) {
802     __ RestoreFrameStateForTailCall();
803   }
804   frame_access_state()->SetFrameAccessToSP();
805 }
806 
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)807 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
808                                                      Register scratch1,
809                                                      Register scratch2,
810                                                      Register scratch3) {
811   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
812   Label done;
813 
814   // Check if current frame is an arguments adaptor frame.
815   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
816   __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
817   __ bne(&done);
818 
819   // Load arguments count from current arguments adaptor frame (note, it
820   // does not include receiver).
821   Register caller_args_count_reg = scratch1;
822   __ LoadP(caller_args_count_reg,
823            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
824   __ SmiUntag(caller_args_count_reg);
825 
826   ParameterCount callee_args_count(args_reg);
827   __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
828                         scratch3);
829   __ bind(&done);
830 }
831 
832 namespace {
833 
FlushPendingPushRegisters(MacroAssembler * masm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes)834 void FlushPendingPushRegisters(MacroAssembler* masm,
835                                FrameAccessState* frame_access_state,
836                                ZoneVector<Register>* pending_pushes) {
837   switch (pending_pushes->size()) {
838     case 0:
839       break;
840     case 1:
841       masm->Push((*pending_pushes)[0]);
842       break;
843     case 2:
844       masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
845       break;
846     case 3:
847       masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
848                  (*pending_pushes)[2]);
849       break;
850     default:
851       UNREACHABLE();
852       break;
853   }
854   frame_access_state->IncreaseSPDelta(pending_pushes->size());
855   pending_pushes->resize(0);
856 }
857 
AddPendingPushRegister(MacroAssembler * masm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes,Register reg)858 void AddPendingPushRegister(MacroAssembler* masm,
859                             FrameAccessState* frame_access_state,
860                             ZoneVector<Register>* pending_pushes,
861                             Register reg) {
862   pending_pushes->push_back(reg);
863   if (pending_pushes->size() == 3 || reg.is(ip)) {
864     FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
865   }
866 }
867 
AdjustStackPointerForTailCall(MacroAssembler * masm,FrameAccessState * state,int new_slot_above_sp,ZoneVector<Register> * pending_pushes=nullptr,bool allow_shrinkage=true)868 void AdjustStackPointerForTailCall(
869     MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
870     ZoneVector<Register>* pending_pushes = nullptr,
871     bool allow_shrinkage = true) {
872   int current_sp_offset = state->GetSPToFPSlotCount() +
873                           StandardFrameConstants::kFixedSlotCountAboveFp;
874   int stack_slot_delta = new_slot_above_sp - current_sp_offset;
875   if (stack_slot_delta > 0) {
876     if (pending_pushes != nullptr) {
877       FlushPendingPushRegisters(masm, state, pending_pushes);
878     }
879     masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
880     state->IncreaseSPDelta(stack_slot_delta);
881   } else if (allow_shrinkage && stack_slot_delta < 0) {
882     if (pending_pushes != nullptr) {
883       FlushPendingPushRegisters(masm, state, pending_pushes);
884     }
885     masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
886     state->IncreaseSPDelta(stack_slot_delta);
887   }
888 }
889 
890 }  // namespace
891 
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)892 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
893                                               int first_unused_stack_slot) {
894   CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
895   ZoneVector<MoveOperands*> pushes(zone());
896   GetPushCompatibleMoves(instr, flags, &pushes);
897 
898   if (!pushes.empty() &&
899       (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
900        first_unused_stack_slot)) {
901     PPCOperandConverter g(this, instr);
902     ZoneVector<Register> pending_pushes(zone());
903     for (auto move : pushes) {
904       LocationOperand destination_location(
905           LocationOperand::cast(move->destination()));
906       InstructionOperand source(move->source());
907       AdjustStackPointerForTailCall(
908           masm(), frame_access_state(),
909           destination_location.index() - pending_pushes.size(),
910           &pending_pushes);
911       if (source.IsStackSlot()) {
912         LocationOperand source_location(LocationOperand::cast(source));
913         __ LoadP(ip, g.SlotToMemOperand(source_location.index()));
914         AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
915                                ip);
916       } else if (source.IsRegister()) {
917         LocationOperand source_location(LocationOperand::cast(source));
918         AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
919                                source_location.GetRegister());
920       } else if (source.IsImmediate()) {
921         AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
922                                ip);
923       } else {
924         // Pushes of non-scalar data types is not supported.
925         UNIMPLEMENTED();
926       }
927       move->Eliminate();
928     }
929     FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
930   }
931   AdjustStackPointerForTailCall(masm(), frame_access_state(),
932                                 first_unused_stack_slot, nullptr, false);
933 }
934 
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)935 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
936                                              int first_unused_stack_slot) {
937   AdjustStackPointerForTailCall(masm(), frame_access_state(),
938                                 first_unused_stack_slot);
939 }
940 
941 
942 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)943 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
944     Instruction* instr) {
945   PPCOperandConverter i(this, instr);
946   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
947 
948   switch (opcode) {
949     case kArchCallCodeObject: {
950       v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
951           masm());
952       EnsureSpaceForLazyDeopt();
953       if (HasRegisterInput(instr, 0)) {
954         __ addi(ip, i.InputRegister(0),
955                 Operand(Code::kHeaderSize - kHeapObjectTag));
956         __ Call(ip);
957       } else {
958         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
959                 RelocInfo::CODE_TARGET);
960       }
961       RecordCallPosition(instr);
962       DCHECK_EQ(LeaveRC, i.OutputRCBit());
963       frame_access_state()->ClearSPDelta();
964       break;
965     }
966     case kArchTailCallCodeObjectFromJSFunction:
967     case kArchTailCallCodeObject: {
968       if (opcode == kArchTailCallCodeObjectFromJSFunction) {
969         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
970                                          i.TempRegister(0), i.TempRegister(1),
971                                          i.TempRegister(2));
972       }
973       if (HasRegisterInput(instr, 0)) {
974         __ addi(ip, i.InputRegister(0),
975                 Operand(Code::kHeaderSize - kHeapObjectTag));
976         __ Jump(ip);
977       } else {
978         // We cannot use the constant pool to load the target since
979         // we've already restored the caller's frame.
980         ConstantPoolUnavailableScope constant_pool_unavailable(masm());
981         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
982                 RelocInfo::CODE_TARGET);
983       }
984       DCHECK_EQ(LeaveRC, i.OutputRCBit());
985       frame_access_state()->ClearSPDelta();
986       frame_access_state()->SetFrameAccessToDefault();
987       break;
988     }
989     case kArchTailCallAddress: {
990       CHECK(!instr->InputAt(0)->IsImmediate());
991       __ Jump(i.InputRegister(0));
992       frame_access_state()->ClearSPDelta();
993       frame_access_state()->SetFrameAccessToDefault();
994       break;
995     }
996     case kArchCallJSFunction: {
997       v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
998           masm());
999       EnsureSpaceForLazyDeopt();
1000       Register func = i.InputRegister(0);
1001       if (FLAG_debug_code) {
1002         // Check the function's context matches the context argument.
1003         __ LoadP(kScratchReg,
1004                  FieldMemOperand(func, JSFunction::kContextOffset));
1005         __ cmp(cp, kScratchReg);
1006         __ Assert(eq, kWrongFunctionContext);
1007       }
1008       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
1009       __ Call(ip);
1010       RecordCallPosition(instr);
1011       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1012       frame_access_state()->ClearSPDelta();
1013       break;
1014     }
1015     case kArchTailCallJSFunctionFromJSFunction: {
1016       Register func = i.InputRegister(0);
1017       if (FLAG_debug_code) {
1018         // Check the function's context matches the context argument.
1019         __ LoadP(kScratchReg,
1020                  FieldMemOperand(func, JSFunction::kContextOffset));
1021         __ cmp(cp, kScratchReg);
1022         __ Assert(eq, kWrongFunctionContext);
1023       }
1024       AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
1025                                        i.TempRegister(0), i.TempRegister(1),
1026                                        i.TempRegister(2));
1027       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
1028       __ Jump(ip);
1029       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1030       frame_access_state()->ClearSPDelta();
1031       frame_access_state()->SetFrameAccessToDefault();
1032       break;
1033     }
1034     case kArchPrepareCallCFunction: {
1035       int const num_parameters = MiscField::decode(instr->opcode());
1036       __ PrepareCallCFunction(num_parameters, kScratchReg);
1037       // Frame alignment requires using FP-relative frame addressing.
1038       frame_access_state()->SetFrameAccessToFP();
1039       break;
1040     }
1041     case kArchPrepareTailCall:
1042       AssemblePrepareTailCall();
1043       break;
1044     case kArchComment: {
1045       Address comment_string = i.InputExternalReference(0).address();
1046       __ RecordComment(reinterpret_cast<const char*>(comment_string));
1047       break;
1048     }
1049     case kArchCallCFunction: {
1050       int const num_parameters = MiscField::decode(instr->opcode());
1051       if (instr->InputAt(0)->IsImmediate()) {
1052         ExternalReference ref = i.InputExternalReference(0);
1053         __ CallCFunction(ref, num_parameters);
1054       } else {
1055         Register func = i.InputRegister(0);
1056         __ CallCFunction(func, num_parameters);
1057       }
1058       frame_access_state()->SetFrameAccessToDefault();
1059       frame_access_state()->ClearSPDelta();
1060       break;
1061     }
1062     case kArchJmp:
1063       AssembleArchJump(i.InputRpo(0));
1064       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1065       break;
1066     case kArchLookupSwitch:
1067       AssembleArchLookupSwitch(instr);
1068       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1069       break;
1070     case kArchTableSwitch:
1071       AssembleArchTableSwitch(instr);
1072       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1073       break;
1074     case kArchDebugBreak:
1075       __ stop("kArchDebugBreak");
1076       break;
1077     case kArchNop:
1078     case kArchThrowTerminator:
1079       // don't emit code for nops.
1080       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1081       break;
1082     case kArchDeoptimize: {
1083       int deopt_state_id =
1084           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
1085       Deoptimizer::BailoutType bailout_type =
1086           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
1087       CodeGenResult result = AssembleDeoptimizerCall(
1088           deopt_state_id, bailout_type, current_source_position_);
1089       if (result != kSuccess) return result;
1090       break;
1091     }
1092     case kArchRet:
1093       AssembleReturn(instr->InputAt(0));
1094       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1095       break;
1096     case kArchStackPointer:
1097       __ mr(i.OutputRegister(), sp);
1098       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1099       break;
1100     case kArchFramePointer:
1101       __ mr(i.OutputRegister(), fp);
1102       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1103       break;
1104     case kArchParentFramePointer:
1105       if (frame_access_state()->has_frame()) {
1106         __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
1107       } else {
1108         __ mr(i.OutputRegister(), fp);
1109       }
1110       break;
1111     case kArchTruncateDoubleToI:
1112       // TODO(mbrandy): move slow call to stub out of line.
1113       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
1114       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1115       break;
1116     case kArchStoreWithWriteBarrier: {
1117       RecordWriteMode mode =
1118           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1119       Register object = i.InputRegister(0);
1120       Register value = i.InputRegister(2);
1121       Register scratch0 = i.TempRegister(0);
1122       Register scratch1 = i.TempRegister(1);
1123       OutOfLineRecordWrite* ool;
1124 
1125       AddressingMode addressing_mode =
1126           AddressingModeField::decode(instr->opcode());
1127       if (addressing_mode == kMode_MRI) {
1128         int32_t offset = i.InputInt32(1);
1129         ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1130                                                 scratch0, scratch1, mode);
1131         __ StoreP(value, MemOperand(object, offset));
1132       } else {
1133         DCHECK_EQ(kMode_MRR, addressing_mode);
1134         Register offset(i.InputRegister(1));
1135         ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1136                                                 scratch0, scratch1, mode);
1137         __ StorePX(value, MemOperand(object, offset));
1138       }
1139       __ CheckPageFlag(object, scratch0,
1140                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1141                        ool->entry());
1142       __ bind(ool->exit());
1143       break;
1144     }
1145     case kArchStackSlot: {
1146       FrameOffset offset =
1147           frame_access_state()->GetFrameOffset(i.InputInt32(0));
1148       __ addi(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1149               Operand(offset.offset()));
1150       break;
1151     }
1152     case kPPC_And:
1153       if (HasRegisterInput(instr, 1)) {
1154         __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1155                 i.OutputRCBit());
1156       } else {
1157         __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1158       }
1159       break;
1160     case kPPC_AndComplement:
1161       __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1162               i.OutputRCBit());
1163       break;
1164     case kPPC_Or:
1165       if (HasRegisterInput(instr, 1)) {
1166         __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1167                i.OutputRCBit());
1168       } else {
1169         __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1170         DCHECK_EQ(LeaveRC, i.OutputRCBit());
1171       }
1172       break;
1173     case kPPC_OrComplement:
1174       __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1175              i.OutputRCBit());
1176       break;
1177     case kPPC_Xor:
1178       if (HasRegisterInput(instr, 1)) {
1179         __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1180                 i.OutputRCBit());
1181       } else {
1182         __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1183         DCHECK_EQ(LeaveRC, i.OutputRCBit());
1184       }
1185       break;
1186     case kPPC_ShiftLeft32:
1187       ASSEMBLE_BINOP_RC(slw, slwi);
1188       break;
1189 #if V8_TARGET_ARCH_PPC64
1190     case kPPC_ShiftLeft64:
1191       ASSEMBLE_BINOP_RC(sld, sldi);
1192       break;
1193 #endif
1194     case kPPC_ShiftRight32:
1195       ASSEMBLE_BINOP_RC(srw, srwi);
1196       break;
1197 #if V8_TARGET_ARCH_PPC64
1198     case kPPC_ShiftRight64:
1199       ASSEMBLE_BINOP_RC(srd, srdi);
1200       break;
1201 #endif
1202     case kPPC_ShiftRightAlg32:
1203       ASSEMBLE_BINOP_INT_RC(sraw, srawi);
1204       break;
1205 #if V8_TARGET_ARCH_PPC64
1206     case kPPC_ShiftRightAlg64:
1207       ASSEMBLE_BINOP_INT_RC(srad, sradi);
1208       break;
1209 #endif
1210 #if !V8_TARGET_ARCH_PPC64
1211     case kPPC_AddPair:
1212       // i.InputRegister(0) ... left low word.
1213       // i.InputRegister(1) ... left high word.
1214       // i.InputRegister(2) ... right low word.
1215       // i.InputRegister(3) ... right high word.
1216       __ addc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1217       __ adde(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
1218       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1219       break;
1220     case kPPC_SubPair:
1221       // i.InputRegister(0) ... left low word.
1222       // i.InputRegister(1) ... left high word.
1223       // i.InputRegister(2) ... right low word.
1224       // i.InputRegister(3) ... right high word.
1225       __ subc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1226       __ sube(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
1227       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1228       break;
1229     case kPPC_MulPair:
1230       // i.InputRegister(0) ... left low word.
1231       // i.InputRegister(1) ... left high word.
1232       // i.InputRegister(2) ... right low word.
1233       // i.InputRegister(3) ... right high word.
1234       __ mullw(i.TempRegister(0), i.InputRegister(0), i.InputRegister(3));
1235       __ mullw(i.TempRegister(1), i.InputRegister(2), i.InputRegister(1));
1236       __ add(i.TempRegister(0), i.TempRegister(0), i.TempRegister(1));
1237       __ mullw(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1238       __ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
1239       __ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
1240       break;
1241     case kPPC_ShiftLeftPair: {
1242       Register second_output =
1243           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1244       if (instr->InputAt(2)->IsImmediate()) {
1245         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1246                          i.InputRegister(1), i.InputInt32(2));
1247       } else {
1248         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1249                          i.InputRegister(1), kScratchReg, i.InputRegister(2));
1250       }
1251       break;
1252     }
1253     case kPPC_ShiftRightPair: {
1254       Register second_output =
1255           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1256       if (instr->InputAt(2)->IsImmediate()) {
1257         __ ShiftRightPair(i.OutputRegister(0), second_output,
1258                           i.InputRegister(0), i.InputRegister(1),
1259                           i.InputInt32(2));
1260       } else {
1261         __ ShiftRightPair(i.OutputRegister(0), second_output,
1262                           i.InputRegister(0), i.InputRegister(1), kScratchReg,
1263                           i.InputRegister(2));
1264       }
1265       break;
1266     }
1267     case kPPC_ShiftRightAlgPair: {
1268       Register second_output =
1269           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1270       if (instr->InputAt(2)->IsImmediate()) {
1271         __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
1272                              i.InputRegister(0), i.InputRegister(1),
1273                              i.InputInt32(2));
1274       } else {
1275         __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
1276                              i.InputRegister(0), i.InputRegister(1),
1277                              kScratchReg, i.InputRegister(2));
1278       }
1279       break;
1280     }
1281 #endif
1282     case kPPC_RotRight32:
1283       if (HasRegisterInput(instr, 1)) {
1284         __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
1285         __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
1286                  i.OutputRCBit());
1287       } else {
1288         int sh = i.InputInt32(1);
1289         __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
1290       }
1291       break;
1292 #if V8_TARGET_ARCH_PPC64
1293     case kPPC_RotRight64:
1294       if (HasRegisterInput(instr, 1)) {
1295         __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
1296         __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
1297                  i.OutputRCBit());
1298       } else {
1299         int sh = i.InputInt32(1);
1300         __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
1301       }
1302       break;
1303 #endif
1304     case kPPC_Not:
1305       __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
1306       break;
1307     case kPPC_RotLeftAndMask32:
1308       __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1309                 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
1310       break;
1311 #if V8_TARGET_ARCH_PPC64
1312     case kPPC_RotLeftAndClear64:
1313       __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1314                63 - i.InputInt32(2), i.OutputRCBit());
1315       break;
1316     case kPPC_RotLeftAndClearLeft64:
1317       __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1318                 63 - i.InputInt32(2), i.OutputRCBit());
1319       break;
1320     case kPPC_RotLeftAndClearRight64:
1321       __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1322                 63 - i.InputInt32(2), i.OutputRCBit());
1323       break;
1324 #endif
1325     case kPPC_Add:
1326 #if V8_TARGET_ARCH_PPC64
1327       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1328         ASSEMBLE_ADD_WITH_OVERFLOW();
1329       } else {
1330 #endif
1331         if (HasRegisterInput(instr, 1)) {
1332           __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1333                  LeaveOE, i.OutputRCBit());
1334         } else {
1335           __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1336           DCHECK_EQ(LeaveRC, i.OutputRCBit());
1337         }
1338 #if V8_TARGET_ARCH_PPC64
1339       }
1340 #endif
1341       break;
1342     case kPPC_AddWithOverflow32:
1343       ASSEMBLE_ADD_WITH_OVERFLOW32();
1344       break;
1345     case kPPC_AddDouble:
1346       ASSEMBLE_FLOAT_BINOP_RC(fadd, MiscField::decode(instr->opcode()));
1347       break;
1348     case kPPC_Sub:
1349 #if V8_TARGET_ARCH_PPC64
1350       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1351         ASSEMBLE_SUB_WITH_OVERFLOW();
1352       } else {
1353 #endif
1354         if (HasRegisterInput(instr, 1)) {
1355           __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1356                  LeaveOE, i.OutputRCBit());
1357         } else {
1358           __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1359           DCHECK_EQ(LeaveRC, i.OutputRCBit());
1360         }
1361 #if V8_TARGET_ARCH_PPC64
1362       }
1363 #endif
1364       break;
1365     case kPPC_SubWithOverflow32:
1366       ASSEMBLE_SUB_WITH_OVERFLOW32();
1367       break;
1368     case kPPC_SubDouble:
1369       ASSEMBLE_FLOAT_BINOP_RC(fsub, MiscField::decode(instr->opcode()));
1370       break;
1371     case kPPC_Mul32:
1372       __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1373                LeaveOE, i.OutputRCBit());
1374       break;
1375 #if V8_TARGET_ARCH_PPC64
1376     case kPPC_Mul64:
1377       __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1378                LeaveOE, i.OutputRCBit());
1379       break;
1380 #endif
1381 
1382     case kPPC_Mul32WithHigh32:
1383       if (i.OutputRegister(0).is(i.InputRegister(0)) ||
1384           i.OutputRegister(0).is(i.InputRegister(1)) ||
1385           i.OutputRegister(1).is(i.InputRegister(0)) ||
1386           i.OutputRegister(1).is(i.InputRegister(1))) {
1387         __ mullw(kScratchReg,
1388                  i.InputRegister(0), i.InputRegister(1));  // low
1389         __ mulhw(i.OutputRegister(1),
1390                  i.InputRegister(0), i.InputRegister(1));  // high
1391         __ mr(i.OutputRegister(0), kScratchReg);
1392       } else {
1393         __ mullw(i.OutputRegister(0),
1394                  i.InputRegister(0), i.InputRegister(1));  // low
1395         __ mulhw(i.OutputRegister(1),
1396                  i.InputRegister(0), i.InputRegister(1));  // high
1397       }
1398       break;
1399     case kPPC_MulHigh32:
1400       __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1401                i.OutputRCBit());
1402       break;
1403     case kPPC_MulHighU32:
1404       __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1405                 i.OutputRCBit());
1406       break;
1407     case kPPC_MulDouble:
1408       ASSEMBLE_FLOAT_BINOP_RC(fmul, MiscField::decode(instr->opcode()));
1409       break;
1410     case kPPC_Div32:
1411       __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1412       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1413       break;
1414 #if V8_TARGET_ARCH_PPC64
1415     case kPPC_Div64:
1416       __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1417       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1418       break;
1419 #endif
1420     case kPPC_DivU32:
1421       __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1422       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1423       break;
1424 #if V8_TARGET_ARCH_PPC64
1425     case kPPC_DivU64:
1426       __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1427       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1428       break;
1429 #endif
1430     case kPPC_DivDouble:
1431       ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
1432       break;
1433     case kPPC_Mod32:
1434       ASSEMBLE_MODULO(divw, mullw);
1435       break;
1436 #if V8_TARGET_ARCH_PPC64
1437     case kPPC_Mod64:
1438       ASSEMBLE_MODULO(divd, mulld);
1439       break;
1440 #endif
1441     case kPPC_ModU32:
1442       ASSEMBLE_MODULO(divwu, mullw);
1443       break;
1444 #if V8_TARGET_ARCH_PPC64
1445     case kPPC_ModU64:
1446       ASSEMBLE_MODULO(divdu, mulld);
1447       break;
1448 #endif
1449     case kPPC_ModDouble:
1450       // TODO(bmeurer): We should really get rid of this special instruction,
1451       // and generate a CallAddress instruction instead.
1452       ASSEMBLE_FLOAT_MODULO();
1453       break;
1454     case kIeee754Float64Acos:
1455       ASSEMBLE_IEEE754_UNOP(acos);
1456       break;
1457     case kIeee754Float64Acosh:
1458       ASSEMBLE_IEEE754_UNOP(acosh);
1459       break;
1460     case kIeee754Float64Asin:
1461       ASSEMBLE_IEEE754_UNOP(asin);
1462       break;
1463     case kIeee754Float64Asinh:
1464       ASSEMBLE_IEEE754_UNOP(asinh);
1465       break;
1466     case kIeee754Float64Atan:
1467       ASSEMBLE_IEEE754_UNOP(atan);
1468       break;
1469     case kIeee754Float64Atan2:
1470       ASSEMBLE_IEEE754_BINOP(atan2);
1471       break;
1472     case kIeee754Float64Atanh:
1473       ASSEMBLE_IEEE754_UNOP(atanh);
1474       break;
1475     case kIeee754Float64Tan:
1476       ASSEMBLE_IEEE754_UNOP(tan);
1477       break;
1478     case kIeee754Float64Tanh:
1479       ASSEMBLE_IEEE754_UNOP(tanh);
1480       break;
1481     case kIeee754Float64Cbrt:
1482       ASSEMBLE_IEEE754_UNOP(cbrt);
1483       break;
1484     case kIeee754Float64Sin:
1485       ASSEMBLE_IEEE754_UNOP(sin);
1486       break;
1487     case kIeee754Float64Sinh:
1488       ASSEMBLE_IEEE754_UNOP(sinh);
1489       break;
1490     case kIeee754Float64Cos:
1491       ASSEMBLE_IEEE754_UNOP(cos);
1492       break;
1493     case kIeee754Float64Cosh:
1494       ASSEMBLE_IEEE754_UNOP(cosh);
1495       break;
1496     case kIeee754Float64Exp:
1497       ASSEMBLE_IEEE754_UNOP(exp);
1498       break;
1499     case kIeee754Float64Expm1:
1500       ASSEMBLE_IEEE754_UNOP(expm1);
1501       break;
1502     case kIeee754Float64Log:
1503       ASSEMBLE_IEEE754_UNOP(log);
1504       break;
1505     case kIeee754Float64Log1p:
1506       ASSEMBLE_IEEE754_UNOP(log1p);
1507       break;
1508     case kIeee754Float64Log2:
1509       ASSEMBLE_IEEE754_UNOP(log2);
1510       break;
1511     case kIeee754Float64Log10:
1512       ASSEMBLE_IEEE754_UNOP(log10);
1513       break;
1514     case kIeee754Float64Pow: {
1515       MathPowStub stub(isolate(), MathPowStub::DOUBLE);
1516       __ CallStub(&stub);
1517       __ Move(d1, d3);
1518       break;
1519     }
1520     case kPPC_Neg:
1521       __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
1522       break;
1523     case kPPC_MaxDouble:
1524       ASSEMBLE_FLOAT_MAX();
1525       break;
1526     case kPPC_MinDouble:
1527       ASSEMBLE_FLOAT_MIN();
1528       break;
1529     case kPPC_AbsDouble:
1530       ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
1531       break;
1532     case kPPC_SqrtDouble:
1533       ASSEMBLE_FLOAT_UNOP_RC(fsqrt, MiscField::decode(instr->opcode()));
1534       break;
1535     case kPPC_FloorDouble:
1536       ASSEMBLE_FLOAT_UNOP_RC(frim, MiscField::decode(instr->opcode()));
1537       break;
1538     case kPPC_CeilDouble:
1539       ASSEMBLE_FLOAT_UNOP_RC(frip, MiscField::decode(instr->opcode()));
1540       break;
1541     case kPPC_TruncateDouble:
1542       ASSEMBLE_FLOAT_UNOP_RC(friz, MiscField::decode(instr->opcode()));
1543       break;
1544     case kPPC_RoundDouble:
1545       ASSEMBLE_FLOAT_UNOP_RC(frin, MiscField::decode(instr->opcode()));
1546       break;
1547     case kPPC_NegDouble:
1548       ASSEMBLE_FLOAT_UNOP_RC(fneg, 0);
1549       break;
1550     case kPPC_Cntlz32:
1551       __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
1552       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1553       break;
1554 #if V8_TARGET_ARCH_PPC64
1555     case kPPC_Cntlz64:
1556       __ cntlzd_(i.OutputRegister(), i.InputRegister(0));
1557       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1558       break;
1559 #endif
1560     case kPPC_Popcnt32:
1561       __ popcntw(i.OutputRegister(), i.InputRegister(0));
1562       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1563       break;
1564 #if V8_TARGET_ARCH_PPC64
1565     case kPPC_Popcnt64:
1566       __ popcntd(i.OutputRegister(), i.InputRegister(0));
1567       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1568       break;
1569 #endif
1570     case kPPC_Cmp32:
1571       ASSEMBLE_COMPARE(cmpw, cmplw);
1572       break;
1573 #if V8_TARGET_ARCH_PPC64
1574     case kPPC_Cmp64:
1575       ASSEMBLE_COMPARE(cmp, cmpl);
1576       break;
1577 #endif
1578     case kPPC_CmpDouble:
1579       ASSEMBLE_FLOAT_COMPARE(fcmpu);
1580       break;
1581     case kPPC_Tst32:
1582       if (HasRegisterInput(instr, 1)) {
1583         __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1584       } else {
1585         __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
1586       }
1587 #if V8_TARGET_ARCH_PPC64
1588       __ extsw(r0, r0, i.OutputRCBit());
1589 #endif
1590       DCHECK_EQ(SetRC, i.OutputRCBit());
1591       break;
1592 #if V8_TARGET_ARCH_PPC64
1593     case kPPC_Tst64:
1594       if (HasRegisterInput(instr, 1)) {
1595         __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1596       } else {
1597         __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
1598       }
1599       DCHECK_EQ(SetRC, i.OutputRCBit());
1600       break;
1601 #endif
1602     case kPPC_Float64SilenceNaN: {
1603       DoubleRegister value = i.InputDoubleRegister(0);
1604       DoubleRegister result = i.OutputDoubleRegister();
1605       __ CanonicalizeNaN(result, value);
1606       break;
1607     }
1608     case kPPC_Push:
1609       if (instr->InputAt(0)->IsFPRegister()) {
1610         __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1611         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1612       } else {
1613         __ Push(i.InputRegister(0));
1614         frame_access_state()->IncreaseSPDelta(1);
1615       }
1616       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1617       break;
1618     case kPPC_PushFrame: {
1619       int num_slots = i.InputInt32(1);
1620       if (instr->InputAt(0)->IsFPRegister()) {
1621         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1622         if (op->representation() == MachineRepresentation::kFloat64) {
1623           __ StoreDoubleU(i.InputDoubleRegister(0),
1624                         MemOperand(sp, -num_slots * kPointerSize), r0);
1625         } else {
1626           DCHECK(op->representation() == MachineRepresentation::kFloat32);
1627           __ StoreSingleU(i.InputDoubleRegister(0),
1628                         MemOperand(sp, -num_slots * kPointerSize), r0);
1629         }
1630       } else {
1631         __ StorePU(i.InputRegister(0),
1632                    MemOperand(sp, -num_slots * kPointerSize), r0);
1633       }
1634       break;
1635     }
1636     case kPPC_StoreToStackSlot: {
1637       int slot = i.InputInt32(1);
1638       if (instr->InputAt(0)->IsFPRegister()) {
1639         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1640         if (op->representation() == MachineRepresentation::kFloat64) {
1641           __ StoreDouble(i.InputDoubleRegister(0),
1642                         MemOperand(sp, slot * kPointerSize), r0);
1643         } else {
1644           DCHECK(op->representation() == MachineRepresentation::kFloat32);
1645           __ StoreSingle(i.InputDoubleRegister(0),
1646                         MemOperand(sp, slot * kPointerSize), r0);
1647         }
1648       } else {
1649         __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
1650       }
1651       break;
1652     }
1653     case kPPC_ExtendSignWord8:
1654       __ extsb(i.OutputRegister(), i.InputRegister(0));
1655       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1656       break;
1657     case kPPC_ExtendSignWord16:
1658       __ extsh(i.OutputRegister(), i.InputRegister(0));
1659       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1660       break;
1661 #if V8_TARGET_ARCH_PPC64
1662     case kPPC_ExtendSignWord32:
1663       __ extsw(i.OutputRegister(), i.InputRegister(0));
1664       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1665       break;
1666     case kPPC_Uint32ToUint64:
1667       // Zero extend
1668       __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
1669       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1670       break;
1671     case kPPC_Int64ToInt32:
1672       __ extsw(i.OutputRegister(), i.InputRegister(0));
1673       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1674       break;
1675     case kPPC_Int64ToFloat32:
1676       __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1677       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1678       break;
1679     case kPPC_Int64ToDouble:
1680       __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1681       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1682       break;
1683     case kPPC_Uint64ToFloat32:
1684       __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
1685                                      i.OutputDoubleRegister());
1686       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1687       break;
1688     case kPPC_Uint64ToDouble:
1689       __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
1690                                       i.OutputDoubleRegister());
1691       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1692       break;
1693 #endif
1694     case kPPC_Int32ToFloat32:
1695       __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1696       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1697       break;
1698     case kPPC_Int32ToDouble:
1699       __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1700       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1701       break;
1702     case kPPC_Uint32ToFloat32:
1703       __ ConvertUnsignedIntToFloat(i.InputRegister(0),
1704                                    i.OutputDoubleRegister());
1705       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1706       break;
1707     case kPPC_Uint32ToDouble:
1708       __ ConvertUnsignedIntToDouble(i.InputRegister(0),
1709                                     i.OutputDoubleRegister());
1710       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1711       break;
1712     case kPPC_DoubleToInt32:
1713     case kPPC_DoubleToUint32:
1714     case kPPC_DoubleToInt64: {
1715 #if V8_TARGET_ARCH_PPC64
1716       bool check_conversion =
1717           (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
1718       if (check_conversion) {
1719         __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
1720       }
1721 #endif
1722       __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
1723 #if !V8_TARGET_ARCH_PPC64
1724                               kScratchReg,
1725 #endif
1726                               i.OutputRegister(0), kScratchDoubleReg);
1727 #if V8_TARGET_ARCH_PPC64
1728       if (check_conversion) {
1729         // Set 2nd output to zero if conversion fails.
1730         CRegister cr = cr7;
1731         int crbit = v8::internal::Assembler::encode_crbit(
1732             cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1733         __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
1734         if (CpuFeatures::IsSupported(ISELECT)) {
1735           __ li(i.OutputRegister(1), Operand(1));
1736           __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
1737         } else {
1738           __ li(i.OutputRegister(1), Operand::Zero());
1739           __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
1740           __ li(i.OutputRegister(1), Operand(1));
1741         }
1742       }
1743 #endif
1744       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1745       break;
1746     }
1747 #if V8_TARGET_ARCH_PPC64
1748     case kPPC_DoubleToUint64: {
1749       bool check_conversion = (i.OutputCount() > 1);
1750       if (check_conversion) {
1751         __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
1752       }
1753       __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
1754                                       i.OutputRegister(0), kScratchDoubleReg);
1755       if (check_conversion) {
1756         // Set 2nd output to zero if conversion fails.
1757         CRegister cr = cr7;
1758         int crbit = v8::internal::Assembler::encode_crbit(
1759             cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1760         __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
1761         if (CpuFeatures::IsSupported(ISELECT)) {
1762           __ li(i.OutputRegister(1), Operand(1));
1763           __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
1764         } else {
1765           __ li(i.OutputRegister(1), Operand::Zero());
1766           __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
1767           __ li(i.OutputRegister(1), Operand(1));
1768         }
1769       }
1770       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1771       break;
1772     }
1773 #endif
1774     case kPPC_DoubleToFloat32:
1775       ASSEMBLE_FLOAT_UNOP_RC(frsp, 0);
1776       break;
1777     case kPPC_Float32ToDouble:
1778       // Nothing to do.
1779       __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1780       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1781       break;
1782     case kPPC_DoubleExtractLowWord32:
1783       __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1784       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1785       break;
1786     case kPPC_DoubleExtractHighWord32:
1787       __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1788       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1789       break;
1790     case kPPC_DoubleInsertLowWord32:
1791       __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
1792       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1793       break;
1794     case kPPC_DoubleInsertHighWord32:
1795       __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
1796       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1797       break;
1798     case kPPC_DoubleConstruct:
1799 #if V8_TARGET_ARCH_PPC64
1800       __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
1801                                     i.InputRegister(0), i.InputRegister(1), r0);
1802 #else
1803       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
1804                           i.InputRegister(1));
1805 #endif
1806       DCHECK_EQ(LeaveRC, i.OutputRCBit());
1807       break;
1808     case kPPC_BitcastFloat32ToInt32:
1809       __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1810       break;
1811     case kPPC_BitcastInt32ToFloat32:
1812       __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
1813       break;
1814 #if V8_TARGET_ARCH_PPC64
1815     case kPPC_BitcastDoubleToInt64:
1816       __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
1817       break;
1818     case kPPC_BitcastInt64ToDouble:
1819       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
1820       break;
1821 #endif
1822     case kPPC_LoadWordU8:
1823       ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
1824       break;
1825     case kPPC_LoadWordS8:
1826       ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
1827       __ extsb(i.OutputRegister(), i.OutputRegister());
1828       break;
1829     case kPPC_LoadWordU16:
1830       ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
1831       break;
1832     case kPPC_LoadWordS16:
1833       ASSEMBLE_LOAD_INTEGER(lha, lhax);
1834       break;
1835     case kPPC_LoadWordU32:
1836       ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
1837       break;
1838     case kPPC_LoadWordS32:
1839       ASSEMBLE_LOAD_INTEGER(lwa, lwax);
1840       break;
1841 #if V8_TARGET_ARCH_PPC64
1842     case kPPC_LoadWord64:
1843       ASSEMBLE_LOAD_INTEGER(ld, ldx);
1844       break;
1845 #endif
1846     case kPPC_LoadFloat32:
1847       ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
1848       break;
1849     case kPPC_LoadDouble:
1850       ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
1851       break;
1852     case kPPC_StoreWord8:
1853       ASSEMBLE_STORE_INTEGER(stb, stbx);
1854       break;
1855     case kPPC_StoreWord16:
1856       ASSEMBLE_STORE_INTEGER(sth, sthx);
1857       break;
1858     case kPPC_StoreWord32:
1859       ASSEMBLE_STORE_INTEGER(stw, stwx);
1860       break;
1861 #if V8_TARGET_ARCH_PPC64
1862     case kPPC_StoreWord64:
1863       ASSEMBLE_STORE_INTEGER(std, stdx);
1864       break;
1865 #endif
1866     case kPPC_StoreFloat32:
1867       ASSEMBLE_STORE_FLOAT32();
1868       break;
1869     case kPPC_StoreDouble:
1870       ASSEMBLE_STORE_DOUBLE();
1871       break;
1872     case kCheckedLoadInt8:
1873       ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
1874       __ extsb(i.OutputRegister(), i.OutputRegister());
1875       break;
1876     case kCheckedLoadUint8:
1877       ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
1878       break;
1879     case kCheckedLoadInt16:
1880       ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
1881       break;
1882     case kCheckedLoadUint16:
1883       ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
1884       break;
1885     case kCheckedLoadWord32:
1886       ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx);
1887       break;
1888     case kCheckedLoadWord64:
1889 #if V8_TARGET_ARCH_PPC64
1890       ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
1891 #else
1892       UNREACHABLE();
1893 #endif
1894       break;
1895     case kCheckedLoadFloat32:
1896       ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
1897       break;
1898     case kCheckedLoadFloat64:
1899       ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
1900       break;
1901     case kCheckedStoreWord8:
1902       ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
1903       break;
1904     case kCheckedStoreWord16:
1905       ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
1906       break;
1907     case kCheckedStoreWord32:
1908       ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
1909       break;
1910     case kCheckedStoreWord64:
1911 #if V8_TARGET_ARCH_PPC64
1912       ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
1913 #else
1914       UNREACHABLE();
1915 #endif
1916       break;
1917     case kCheckedStoreFloat32:
1918       ASSEMBLE_CHECKED_STORE_FLOAT32();
1919       break;
1920     case kCheckedStoreFloat64:
1921       ASSEMBLE_CHECKED_STORE_DOUBLE();
1922       break;
1923 
1924     case kAtomicLoadInt8:
1925       ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
1926       __ extsb(i.OutputRegister(), i.OutputRegister());
1927       break;
1928     case kAtomicLoadUint8:
1929       ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
1930       break;
1931     case kAtomicLoadInt16:
1932       ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
1933       break;
1934     case kAtomicLoadUint16:
1935       ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
1936       break;
1937     case kAtomicLoadWord32:
1938       ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
1939       break;
1940 
1941     case kAtomicStoreWord8:
1942       ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
1943       break;
1944     case kAtomicStoreWord16:
1945       ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
1946       break;
1947     case kAtomicStoreWord32:
1948       ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
1949       break;
1950     default:
1951       UNREACHABLE();
1952       break;
1953   }
1954   return kSuccess;
1955 }  // NOLINT(readability/fn_size)
1956 
1957 
1958 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)1959 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1960   PPCOperandConverter i(this, instr);
1961   Label* tlabel = branch->true_label;
1962   Label* flabel = branch->false_label;
1963   ArchOpcode op = instr->arch_opcode();
1964   FlagsCondition condition = branch->condition;
1965   CRegister cr = cr0;
1966 
1967   Condition cond = FlagsConditionToCondition(condition, op);
1968   if (op == kPPC_CmpDouble) {
1969     // check for unordered if necessary
1970     if (cond == le) {
1971       __ bunordered(flabel, cr);
1972       // Unnecessary for eq/lt since only FU bit will be set.
1973     } else if (cond == gt) {
1974       __ bunordered(tlabel, cr);
1975       // Unnecessary for ne/ge since only FU bit will be set.
1976     }
1977   }
1978   __ b(cond, tlabel, cr);
1979   if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
1980 }
1981 
1982 
AssembleArchJump(RpoNumber target)1983 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1984   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
1985 }
1986 
1987 
1988 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)1989 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1990                                         FlagsCondition condition) {
1991   PPCOperandConverter i(this, instr);
1992   Label done;
1993   ArchOpcode op = instr->arch_opcode();
1994   CRegister cr = cr0;
1995   int reg_value = -1;
1996 
1997   // Materialize a full 32-bit 1 or 0 value. The result register is always the
1998   // last output of the instruction.
1999   DCHECK_NE(0u, instr->OutputCount());
2000   Register reg = i.OutputRegister(instr->OutputCount() - 1);
2001 
2002   Condition cond = FlagsConditionToCondition(condition, op);
2003   if (op == kPPC_CmpDouble) {
2004     // check for unordered if necessary
2005     if (cond == le) {
2006       reg_value = 0;
2007       __ li(reg, Operand::Zero());
2008       __ bunordered(&done, cr);
2009     } else if (cond == gt) {
2010       reg_value = 1;
2011       __ li(reg, Operand(1));
2012       __ bunordered(&done, cr);
2013     }
2014     // Unnecessary for eq/lt & ne/ge since only FU bit will be set.
2015   }
2016 
2017   if (CpuFeatures::IsSupported(ISELECT)) {
2018     switch (cond) {
2019       case eq:
2020       case lt:
2021       case gt:
2022         if (reg_value != 1) __ li(reg, Operand(1));
2023         __ li(kScratchReg, Operand::Zero());
2024         __ isel(cond, reg, reg, kScratchReg, cr);
2025         break;
2026       case ne:
2027       case ge:
2028       case le:
2029         if (reg_value != 1) __ li(reg, Operand(1));
2030         // r0 implies logical zero in this form
2031         __ isel(NegateCondition(cond), reg, r0, reg, cr);
2032         break;
2033     default:
2034       UNREACHABLE();
2035       break;
2036     }
2037   } else {
2038     if (reg_value != 0) __ li(reg, Operand::Zero());
2039     __ b(NegateCondition(cond), &done, cr);
2040     __ li(reg, Operand(1));
2041   }
2042   __ bind(&done);
2043 }
2044 
2045 
AssembleArchLookupSwitch(Instruction * instr)2046 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2047   PPCOperandConverter i(this, instr);
2048   Register input = i.InputRegister(0);
2049   for (size_t index = 2; index < instr->InputCount(); index += 2) {
2050     __ Cmpwi(input, Operand(i.InputInt32(index + 0)), r0);
2051     __ beq(GetLabel(i.InputRpo(index + 1)));
2052   }
2053   AssembleArchJump(i.InputRpo(1));
2054 }
2055 
2056 
AssembleArchTableSwitch(Instruction * instr)2057 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2058   PPCOperandConverter i(this, instr);
2059   Register input = i.InputRegister(0);
2060   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
2061   Label** cases = zone()->NewArray<Label*>(case_count);
2062   for (int32_t index = 0; index < case_count; ++index) {
2063     cases[index] = GetLabel(i.InputRpo(index + 2));
2064   }
2065   Label* const table = AddJumpTable(cases, case_count);
2066   __ Cmpli(input, Operand(case_count), r0);
2067   __ bge(GetLabel(i.InputRpo(1)));
2068   __ mov_label_addr(kScratchReg, table);
2069   __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
2070   __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
2071   __ Jump(kScratchReg);
2072 }
2073 
AssembleDeoptimizerCall(int deoptimization_id,Deoptimizer::BailoutType bailout_type,SourcePosition pos)2074 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
2075     int deoptimization_id, Deoptimizer::BailoutType bailout_type,
2076     SourcePosition pos) {
2077   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
2078       isolate(), deoptimization_id, bailout_type);
2079   // TODO(turbofan): We should be able to generate better code by sharing the
2080   // actual final call site and just bl'ing to it here, similar to what we do
2081   // in the lithium backend.
2082   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
2083   DeoptimizeReason deoptimization_reason =
2084       GetDeoptimizationReason(deoptimization_id);
2085   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
2086   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
2087   return kSuccess;
2088 }
2089 
FinishFrame(Frame * frame)2090 void CodeGenerator::FinishFrame(Frame* frame) {
2091   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2092   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2093 
2094   // Save callee-saved Double registers.
2095   if (double_saves != 0) {
2096     frame->AlignSavedCalleeRegisterSlots();
2097     DCHECK(kNumCalleeSavedDoubles ==
2098            base::bits::CountPopulation32(double_saves));
2099     frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
2100                                              (kDoubleSize / kPointerSize));
2101   }
2102   // Save callee-saved registers.
2103   const RegList saves =
2104       FLAG_enable_embedded_constant_pool
2105           ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
2106           : descriptor->CalleeSavedRegisters();
2107   if (saves != 0) {
2108     // register save area does not include the fp or constant pool pointer.
2109     const int num_saves =
2110         kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
2111     DCHECK(num_saves == base::bits::CountPopulation32(saves));
2112     frame->AllocateSavedCalleeRegisterSlots(num_saves);
2113   }
2114 }
2115 
AssembleConstructFrame()2116 void CodeGenerator::AssembleConstructFrame() {
2117   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2118   if (frame_access_state()->has_frame()) {
2119     if (descriptor->IsCFunctionCall()) {
2120       __ function_descriptor();
2121       __ mflr(r0);
2122       if (FLAG_enable_embedded_constant_pool) {
2123         __ Push(r0, fp, kConstantPoolRegister);
2124         // Adjust FP to point to saved FP.
2125         __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
2126       } else {
2127         __ Push(r0, fp);
2128         __ mr(fp, sp);
2129       }
2130     } else if (descriptor->IsJSFunctionCall()) {
2131       __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
2132       if (descriptor->PushArgumentCount()) {
2133         __ Push(kJavaScriptCallArgCountRegister);
2134       }
2135     } else {
2136       StackFrame::Type type = info()->GetOutputStackFrameType();
2137       // TODO(mbrandy): Detect cases where ip is the entrypoint (for
2138       // efficient intialization of the constant pool pointer register).
2139       __ StubPrologue(type);
2140     }
2141   }
2142 
2143   int shrink_slots =
2144       frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
2145   if (info()->is_osr()) {
2146     // TurboFan OSR-compiled functions cannot be entered directly.
2147     __ Abort(kShouldNotDirectlyEnterOsrFunction);
2148 
2149     // Unoptimized code jumps directly to this entrypoint while the unoptimized
2150     // frame is still on the stack. Optimized code uses OSR values directly from
2151     // the unoptimized frame. Thus, all that needs to be done is to allocate the
2152     // remaining stack slots.
2153     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
2154     osr_pc_offset_ = __ pc_offset();
2155     shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
2156   }
2157 
2158   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2159   if (shrink_slots > 0) {
2160     __ Add(sp, sp, -shrink_slots * kPointerSize, r0);
2161   }
2162 
2163   // Save callee-saved Double registers.
2164   if (double_saves != 0) {
2165     __ MultiPushDoubles(double_saves);
2166     DCHECK(kNumCalleeSavedDoubles ==
2167            base::bits::CountPopulation32(double_saves));
2168   }
2169 
2170   // Save callee-saved registers.
2171   const RegList saves =
2172       FLAG_enable_embedded_constant_pool
2173           ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
2174           : descriptor->CalleeSavedRegisters();
2175   if (saves != 0) {
2176     __ MultiPush(saves);
2177     // register save area does not include the fp or constant pool pointer.
2178   }
2179 }
2180 
AssembleReturn(InstructionOperand * pop)2181 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
2182   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2183   int pop_count = static_cast<int>(descriptor->StackParameterCount());
2184 
2185   // Restore registers.
2186   const RegList saves =
2187       FLAG_enable_embedded_constant_pool
2188           ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
2189           : descriptor->CalleeSavedRegisters();
2190   if (saves != 0) {
2191     __ MultiPop(saves);
2192   }
2193 
2194   // Restore double registers.
2195   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
2196   if (double_saves != 0) {
2197     __ MultiPopDoubles(double_saves);
2198   }
2199   PPCOperandConverter g(this, nullptr);
2200 
2201   if (descriptor->IsCFunctionCall()) {
2202     AssembleDeconstructFrame();
2203   } else if (frame_access_state()->has_frame()) {
2204     // Canonicalize JSFunction return sites for now unless they have an variable
2205     // number of stack slot pops
2206     if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2207       if (return_label_.is_bound()) {
2208         __ b(&return_label_);
2209         return;
2210       } else {
2211         __ bind(&return_label_);
2212         AssembleDeconstructFrame();
2213       }
2214     } else {
2215       AssembleDeconstructFrame();
2216     }
2217   }
2218   if (pop->IsImmediate()) {
2219     DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
2220     pop_count += g.ToConstant(pop).ToInt32();
2221   } else {
2222     __ Drop(g.ToRegister(pop));
2223   }
2224   __ Drop(pop_count);
2225   __ Ret();
2226 }
2227 
2228 
AssembleMove(InstructionOperand * source,InstructionOperand * destination)2229 void CodeGenerator::AssembleMove(InstructionOperand* source,
2230                                  InstructionOperand* destination) {
2231   PPCOperandConverter g(this, nullptr);
2232   // Dispatch on the source and destination operand kinds.  Not all
2233   // combinations are possible.
2234   if (source->IsRegister()) {
2235     DCHECK(destination->IsRegister() || destination->IsStackSlot());
2236     Register src = g.ToRegister(source);
2237     if (destination->IsRegister()) {
2238       __ Move(g.ToRegister(destination), src);
2239     } else {
2240       __ StoreP(src, g.ToMemOperand(destination), r0);
2241     }
2242   } else if (source->IsStackSlot()) {
2243     DCHECK(destination->IsRegister() || destination->IsStackSlot());
2244     MemOperand src = g.ToMemOperand(source);
2245     if (destination->IsRegister()) {
2246       __ LoadP(g.ToRegister(destination), src, r0);
2247     } else {
2248       Register temp = kScratchReg;
2249       __ LoadP(temp, src, r0);
2250       __ StoreP(temp, g.ToMemOperand(destination), r0);
2251     }
2252   } else if (source->IsConstant()) {
2253     Constant src = g.ToConstant(source);
2254     if (destination->IsRegister() || destination->IsStackSlot()) {
2255       Register dst =
2256           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
2257       switch (src.type()) {
2258         case Constant::kInt32:
2259 #if V8_TARGET_ARCH_PPC64
2260           if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
2261 #else
2262           if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
2263               src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
2264               src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
2265 #endif
2266             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
2267           } else {
2268             __ mov(dst, Operand(src.ToInt32()));
2269           }
2270           break;
2271         case Constant::kInt64:
2272 #if V8_TARGET_ARCH_PPC64
2273           if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
2274               src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
2275             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
2276           } else {
2277             DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
2278 #endif
2279             __ mov(dst, Operand(src.ToInt64()));
2280 #if V8_TARGET_ARCH_PPC64
2281           }
2282 #endif
2283           break;
2284         case Constant::kFloat32:
2285           __ Move(dst,
2286                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
2287           break;
2288         case Constant::kFloat64:
2289           __ Move(dst,
2290                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
2291           break;
2292         case Constant::kExternalReference:
2293           __ mov(dst, Operand(src.ToExternalReference()));
2294           break;
2295         case Constant::kHeapObject: {
2296           Handle<HeapObject> src_object = src.ToHeapObject();
2297           Heap::RootListIndex index;
2298           if (IsMaterializableFromRoot(src_object, &index)) {
2299             __ LoadRoot(dst, index);
2300           } else {
2301             __ Move(dst, src_object);
2302           }
2303           break;
2304         }
2305         case Constant::kRpoNumber:
2306           UNREACHABLE();  // TODO(dcarney): loading RPO constants on PPC.
2307           break;
2308       }
2309       if (destination->IsStackSlot()) {
2310         __ StoreP(dst, g.ToMemOperand(destination), r0);
2311       }
2312     } else {
2313       DoubleRegister dst = destination->IsFPRegister()
2314                                ? g.ToDoubleRegister(destination)
2315                                : kScratchDoubleReg;
2316       double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
2317                                                         : src.ToFloat64();
2318       __ LoadDoubleLiteral(dst, value, kScratchReg);
2319       if (destination->IsFPStackSlot()) {
2320         __ StoreDouble(dst, g.ToMemOperand(destination), r0);
2321       }
2322     }
2323   } else if (source->IsFPRegister()) {
2324     DoubleRegister src = g.ToDoubleRegister(source);
2325     if (destination->IsFPRegister()) {
2326       DoubleRegister dst = g.ToDoubleRegister(destination);
2327       __ Move(dst, src);
2328     } else {
2329       DCHECK(destination->IsFPStackSlot());
2330       LocationOperand* op = LocationOperand::cast(source);
2331       if (op->representation() == MachineRepresentation::kFloat64) {
2332         __ StoreDouble(src, g.ToMemOperand(destination), r0);
2333       } else {
2334         __ StoreSingle(src, g.ToMemOperand(destination), r0);
2335       }
2336     }
2337   } else if (source->IsFPStackSlot()) {
2338     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2339     MemOperand src = g.ToMemOperand(source);
2340     if (destination->IsFPRegister()) {
2341       LocationOperand* op = LocationOperand::cast(source);
2342       if (op->representation() == MachineRepresentation::kFloat64) {
2343         __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
2344       } else {
2345         __ LoadSingle(g.ToDoubleRegister(destination), src, r0);
2346       }
2347     } else {
2348       LocationOperand* op = LocationOperand::cast(source);
2349       DoubleRegister temp = kScratchDoubleReg;
2350       if (op->representation() == MachineRepresentation::kFloat64) {
2351         __ LoadDouble(temp, src, r0);
2352         __ StoreDouble(temp, g.ToMemOperand(destination), r0);
2353       } else {
2354         __ LoadSingle(temp, src, r0);
2355         __ StoreSingle(temp, g.ToMemOperand(destination), r0);
2356       }
2357     }
2358   } else {
2359     UNREACHABLE();
2360   }
2361 }
2362 
2363 
2364 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2365                                  InstructionOperand* destination) {
2366   PPCOperandConverter g(this, nullptr);
2367   // Dispatch on the source and destination operand kinds.  Not all
2368   // combinations are possible.
2369   if (source->IsRegister()) {
2370     // Register-register.
2371     Register temp = kScratchReg;
2372     Register src = g.ToRegister(source);
2373     if (destination->IsRegister()) {
2374       Register dst = g.ToRegister(destination);
2375       __ mr(temp, src);
2376       __ mr(src, dst);
2377       __ mr(dst, temp);
2378     } else {
2379       DCHECK(destination->IsStackSlot());
2380       MemOperand dst = g.ToMemOperand(destination);
2381       __ mr(temp, src);
2382       __ LoadP(src, dst);
2383       __ StoreP(temp, dst);
2384     }
2385 #if V8_TARGET_ARCH_PPC64
2386   } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
2387 #else
2388   } else if (source->IsStackSlot()) {
2389     DCHECK(destination->IsStackSlot());
2390 #endif
2391     Register temp_0 = kScratchReg;
2392     Register temp_1 = r0;
2393     MemOperand src = g.ToMemOperand(source);
2394     MemOperand dst = g.ToMemOperand(destination);
2395     __ LoadP(temp_0, src);
2396     __ LoadP(temp_1, dst);
2397     __ StoreP(temp_0, dst);
2398     __ StoreP(temp_1, src);
2399   } else if (source->IsFPRegister()) {
2400     DoubleRegister temp = kScratchDoubleReg;
2401     DoubleRegister src = g.ToDoubleRegister(source);
2402     if (destination->IsFPRegister()) {
2403       DoubleRegister dst = g.ToDoubleRegister(destination);
2404       __ fmr(temp, src);
2405       __ fmr(src, dst);
2406       __ fmr(dst, temp);
2407     } else {
2408       DCHECK(destination->IsFPStackSlot());
2409       MemOperand dst = g.ToMemOperand(destination);
2410       __ fmr(temp, src);
2411       __ lfd(src, dst);
2412       __ stfd(temp, dst);
2413     }
2414 #if !V8_TARGET_ARCH_PPC64
2415   } else if (source->IsFPStackSlot()) {
2416     DCHECK(destination->IsFPStackSlot());
2417     DoubleRegister temp_0 = kScratchDoubleReg;
2418     DoubleRegister temp_1 = d0;
2419     MemOperand src = g.ToMemOperand(source);
2420     MemOperand dst = g.ToMemOperand(destination);
2421     __ lfd(temp_0, src);
2422     __ lfd(temp_1, dst);
2423     __ stfd(temp_0, dst);
2424     __ stfd(temp_1, src);
2425 #endif
2426   } else {
2427     // No other combinations are possible.
2428     UNREACHABLE();
2429   }
2430 }
2431 
2432 
2433 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2434   for (size_t index = 0; index < target_count; ++index) {
2435     __ emit_label_addr(targets[index]);
2436   }
2437 }
2438 
2439 
2440 void CodeGenerator::EnsureSpaceForLazyDeopt() {
2441   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2442     return;
2443   }
2444 
2445   int space_needed = Deoptimizer::patch_size();
2446   // Ensure that we have enough space after the previous lazy-bailout
2447   // instruction for patching the code here.
2448   int current_pc = masm()->pc_offset();
2449   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2450     // Block tramoline pool emission for duration of padding.
2451     v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
2452         masm());
2453     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2454     DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
2455     while (padding_size > 0) {
2456       __ nop();
2457       padding_size -= v8::internal::Assembler::kInstrSize;
2458     }
2459   }
2460 }
2461 
2462 #undef __
2463 
2464 }  // namespace compiler
2465 }  // namespace internal
2466 }  // namespace v8
2467