1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/assembler-inl.h"
8 #include "src/callable.h"
9 #include "src/compiler/code-generator-impl.h"
10 #include "src/compiler/gap-resolver.h"
11 #include "src/compiler/node-matchers.h"
12 #include "src/compiler/osr.h"
13 #include "src/double.h"
14 #include "src/optimized-compilation-info.h"
15 #include "src/ppc/macro-assembler-ppc.h"
16 #include "src/wasm/wasm-objects.h"
17
18 namespace v8 {
19 namespace internal {
20 namespace compiler {
21
22 #define __ tasm()->
23
24 #define kScratchReg r11
25
26
27 // Adds PPC-specific methods to convert InstructionOperands.
28 class PPCOperandConverter final : public InstructionOperandConverter {
29 public:
PPCOperandConverter(CodeGenerator * gen,Instruction * instr)30 PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
31 : InstructionOperandConverter(gen, instr) {}
32
OutputCount()33 size_t OutputCount() { return instr_->OutputCount(); }
34
OutputRCBit() const35 RCBit OutputRCBit() const {
36 switch (instr_->flags_mode()) {
37 case kFlags_branch:
38 case kFlags_branch_and_poison:
39 case kFlags_deoptimize:
40 case kFlags_deoptimize_and_poison:
41 case kFlags_set:
42 case kFlags_trap:
43 return SetRC;
44 case kFlags_none:
45 return LeaveRC;
46 }
47 UNREACHABLE();
48 }
49
CompareLogical() const50 bool CompareLogical() const {
51 switch (instr_->flags_condition()) {
52 case kUnsignedLessThan:
53 case kUnsignedGreaterThanOrEqual:
54 case kUnsignedLessThanOrEqual:
55 case kUnsignedGreaterThan:
56 return true;
57 default:
58 return false;
59 }
60 UNREACHABLE();
61 }
62
InputImmediate(size_t index)63 Operand InputImmediate(size_t index) {
64 Constant constant = ToConstant(instr_->InputAt(index));
65 switch (constant.type()) {
66 case Constant::kInt32:
67 return Operand(constant.ToInt32());
68 case Constant::kFloat32:
69 return Operand::EmbeddedNumber(constant.ToFloat32());
70 case Constant::kFloat64:
71 return Operand::EmbeddedNumber(constant.ToFloat64().value());
72 case Constant::kInt64:
73 #if V8_TARGET_ARCH_PPC64
74 return Operand(constant.ToInt64());
75 #endif
76 case Constant::kExternalReference:
77 case Constant::kHeapObject:
78 case Constant::kRpoNumber:
79 break;
80 }
81 UNREACHABLE();
82 }
83
MemoryOperand(AddressingMode * mode,size_t * first_index)84 MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
85 const size_t index = *first_index;
86 *mode = AddressingModeField::decode(instr_->opcode());
87 switch (*mode) {
88 case kMode_None:
89 break;
90 case kMode_MRI:
91 *first_index += 2;
92 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
93 case kMode_MRR:
94 *first_index += 2;
95 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
96 }
97 UNREACHABLE();
98 }
99
MemoryOperand(AddressingMode * mode,size_t first_index=0)100 MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
101 return MemoryOperand(mode, &first_index);
102 }
103
ToMemOperand(InstructionOperand * op) const104 MemOperand ToMemOperand(InstructionOperand* op) const {
105 DCHECK_NOT_NULL(op);
106 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
107 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
108 }
109
SlotToMemOperand(int slot) const110 MemOperand SlotToMemOperand(int slot) const {
111 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
112 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
113 }
114 };
115
116
HasRegisterInput(Instruction * instr,size_t index)117 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
118 return instr->InputAt(index)->IsRegister();
119 }
120
121
122 namespace {
123
124 class OutOfLineRecordWrite final : public OutOfLineCode {
125 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)126 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
127 Register value, Register scratch0, Register scratch1,
128 RecordWriteMode mode)
129 : OutOfLineCode(gen),
130 object_(object),
131 offset_(offset),
132 offset_immediate_(0),
133 value_(value),
134 scratch0_(scratch0),
135 scratch1_(scratch1),
136 mode_(mode),
137 must_save_lr_(!gen->frame_access_state()->has_frame()),
138 zone_(gen->zone()) {}
139
OutOfLineRecordWrite(CodeGenerator * gen,Register object,int32_t offset,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)140 OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
141 Register value, Register scratch0, Register scratch1,
142 RecordWriteMode mode)
143 : OutOfLineCode(gen),
144 object_(object),
145 offset_(no_reg),
146 offset_immediate_(offset),
147 value_(value),
148 scratch0_(scratch0),
149 scratch1_(scratch1),
150 mode_(mode),
151 must_save_lr_(!gen->frame_access_state()->has_frame()),
152 zone_(gen->zone()) {}
153
SaveRegisters(RegList registers)154 void SaveRegisters(RegList registers) {
155 DCHECK_LT(0, NumRegs(registers));
156 RegList regs = 0;
157 for (int i = 0; i < Register::kNumRegisters; ++i) {
158 if ((registers >> i) & 1u) {
159 regs |= Register::from_code(i).bit();
160 }
161 }
162
163 __ MultiPush(regs);
164 }
165
RestoreRegisters(RegList registers)166 void RestoreRegisters(RegList registers) {
167 DCHECK_LT(0, NumRegs(registers));
168 RegList regs = 0;
169 for (int i = 0; i < Register::kNumRegisters; ++i) {
170 if ((registers >> i) & 1u) {
171 regs |= Register::from_code(i).bit();
172 }
173 }
174 __ MultiPop(regs);
175 }
176
Generate()177 void Generate() final {
178 ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
179 if (mode_ > RecordWriteMode::kValueIsPointer) {
180 __ JumpIfSmi(value_, exit());
181 }
182 __ CheckPageFlag(value_, scratch0_,
183 MemoryChunk::kPointersToHereAreInterestingMask, eq,
184 exit());
185 if (offset_ == no_reg) {
186 __ addi(scratch1_, object_, Operand(offset_immediate_));
187 } else {
188 DCHECK_EQ(0, offset_immediate_);
189 __ add(scratch1_, object_, offset_);
190 }
191 RememberedSetAction const remembered_set_action =
192 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
193 : OMIT_REMEMBERED_SET;
194 SaveFPRegsMode const save_fp_mode =
195 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
196 if (must_save_lr_) {
197 // We need to save and restore lr if the frame was elided.
198 __ mflr(scratch0_);
199 __ Push(scratch0_);
200 }
201 __ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
202 save_fp_mode);
203 if (must_save_lr_) {
204 // We need to save and restore lr if the frame was elided.
205 __ Pop(scratch0_);
206 __ mtlr(scratch0_);
207 }
208 }
209
210 private:
211 Register const object_;
212 Register const offset_;
213 int32_t const offset_immediate_; // Valid if offset_ == no_reg.
214 Register const value_;
215 Register const scratch0_;
216 Register const scratch1_;
217 RecordWriteMode const mode_;
218 bool must_save_lr_;
219 Zone* zone_;
220 };
221
222
FlagsConditionToCondition(FlagsCondition condition,ArchOpcode op)223 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
224 switch (condition) {
225 case kEqual:
226 return eq;
227 case kNotEqual:
228 return ne;
229 case kSignedLessThan:
230 case kUnsignedLessThan:
231 return lt;
232 case kSignedGreaterThanOrEqual:
233 case kUnsignedGreaterThanOrEqual:
234 return ge;
235 case kSignedLessThanOrEqual:
236 case kUnsignedLessThanOrEqual:
237 return le;
238 case kSignedGreaterThan:
239 case kUnsignedGreaterThan:
240 return gt;
241 case kOverflow:
242 // Overflow checked for add/sub only.
243 switch (op) {
244 #if V8_TARGET_ARCH_PPC64
245 case kPPC_Add32:
246 case kPPC_Add64:
247 case kPPC_Sub:
248 #endif
249 case kPPC_AddWithOverflow32:
250 case kPPC_SubWithOverflow32:
251 return lt;
252 default:
253 break;
254 }
255 break;
256 case kNotOverflow:
257 switch (op) {
258 #if V8_TARGET_ARCH_PPC64
259 case kPPC_Add32:
260 case kPPC_Add64:
261 case kPPC_Sub:
262 #endif
263 case kPPC_AddWithOverflow32:
264 case kPPC_SubWithOverflow32:
265 return ge;
266 default:
267 break;
268 }
269 break;
270 default:
271 break;
272 }
273 UNREACHABLE();
274 }
275
EmitWordLoadPoisoningIfNeeded(CodeGenerator * codegen,Instruction * instr,PPCOperandConverter & i)276 void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
277 PPCOperandConverter& i) {
278 const MemoryAccessMode access_mode =
279 static_cast<MemoryAccessMode>(MiscField::decode(instr->opcode()));
280 if (access_mode == kMemoryAccessPoisoned) {
281 Register value = i.OutputRegister();
282 codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
283 }
284 }
285
286 } // namespace
287
288 #define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
289 do { \
290 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
291 i.OutputRCBit()); \
292 if (round) { \
293 __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
294 } \
295 } while (0)
296
297 #define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round) \
298 do { \
299 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
300 i.InputDoubleRegister(1), i.OutputRCBit()); \
301 if (round) { \
302 __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
303 } \
304 } while (0)
305
306 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
307 do { \
308 if (HasRegisterInput(instr, 1)) { \
309 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
310 i.InputRegister(1)); \
311 } else { \
312 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
313 i.InputImmediate(1)); \
314 } \
315 } while (0)
316
317
318 #define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
319 do { \
320 if (HasRegisterInput(instr, 1)) { \
321 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
322 i.InputRegister(1), i.OutputRCBit()); \
323 } else { \
324 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
325 i.InputImmediate(1), i.OutputRCBit()); \
326 } \
327 } while (0)
328
329
330 #define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
331 do { \
332 if (HasRegisterInput(instr, 1)) { \
333 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
334 i.InputRegister(1), i.OutputRCBit()); \
335 } else { \
336 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
337 i.InputInt32(1), i.OutputRCBit()); \
338 } \
339 } while (0)
340
341
342 #define ASSEMBLE_ADD_WITH_OVERFLOW() \
343 do { \
344 if (HasRegisterInput(instr, 1)) { \
345 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
346 i.InputRegister(1), kScratchReg, r0); \
347 } else { \
348 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
349 i.InputInt32(1), kScratchReg, r0); \
350 } \
351 } while (0)
352
353
354 #define ASSEMBLE_SUB_WITH_OVERFLOW() \
355 do { \
356 if (HasRegisterInput(instr, 1)) { \
357 __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
358 i.InputRegister(1), kScratchReg, r0); \
359 } else { \
360 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
361 -i.InputInt32(1), kScratchReg, r0); \
362 } \
363 } while (0)
364
365
366 #if V8_TARGET_ARCH_PPC64
367 #define ASSEMBLE_ADD_WITH_OVERFLOW32() \
368 do { \
369 ASSEMBLE_ADD_WITH_OVERFLOW(); \
370 __ extsw(kScratchReg, kScratchReg, SetRC); \
371 } while (0)
372
373 #define ASSEMBLE_SUB_WITH_OVERFLOW32() \
374 do { \
375 ASSEMBLE_SUB_WITH_OVERFLOW(); \
376 __ extsw(kScratchReg, kScratchReg, SetRC); \
377 } while (0)
378 #else
379 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
380 #define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
381 #endif
382
383
384 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
385 do { \
386 const CRegister cr = cr0; \
387 if (HasRegisterInput(instr, 1)) { \
388 if (i.CompareLogical()) { \
389 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr); \
390 } else { \
391 __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr); \
392 } \
393 } else { \
394 if (i.CompareLogical()) { \
395 __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
396 } else { \
397 __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
398 } \
399 } \
400 DCHECK_EQ(SetRC, i.OutputRCBit()); \
401 } while (0)
402
403
404 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
405 do { \
406 const CRegister cr = cr0; \
407 __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
408 DCHECK_EQ(SetRC, i.OutputRCBit()); \
409 } while (0)
410
411
412 #define ASSEMBLE_MODULO(div_instr, mul_instr) \
413 do { \
414 const Register scratch = kScratchReg; \
415 __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1)); \
416 __ mul_instr(scratch, scratch, i.InputRegister(1)); \
417 __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
418 i.OutputRCBit()); \
419 } while (0)
420
421 #define ASSEMBLE_FLOAT_MODULO() \
422 do { \
423 FrameScope scope(tasm(), StackFrame::MANUAL); \
424 __ PrepareCallCFunction(0, 2, kScratchReg); \
425 __ MovToFloatParameters(i.InputDoubleRegister(0), \
426 i.InputDoubleRegister(1)); \
427 __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); \
428 __ MovFromFloatResult(i.OutputDoubleRegister()); \
429 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
430 } while (0)
431
432 #define ASSEMBLE_IEEE754_UNOP(name) \
433 do { \
434 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
435 /* and generate a CallAddress instruction instead. */ \
436 FrameScope scope(tasm(), StackFrame::MANUAL); \
437 __ PrepareCallCFunction(0, 1, kScratchReg); \
438 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
439 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
440 /* Move the result in the double result register. */ \
441 __ MovFromFloatResult(i.OutputDoubleRegister()); \
442 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
443 } while (0)
444
445 #define ASSEMBLE_IEEE754_BINOP(name) \
446 do { \
447 /* TODO(bmeurer): We should really get rid of this special instruction, */ \
448 /* and generate a CallAddress instruction instead. */ \
449 FrameScope scope(tasm(), StackFrame::MANUAL); \
450 __ PrepareCallCFunction(0, 2, kScratchReg); \
451 __ MovToFloatParameters(i.InputDoubleRegister(0), \
452 i.InputDoubleRegister(1)); \
453 __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
454 /* Move the result in the double result register. */ \
455 __ MovFromFloatResult(i.OutputDoubleRegister()); \
456 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
457 } while (0)
458
459 #define ASSEMBLE_FLOAT_MAX() \
460 do { \
461 DoubleRegister left_reg = i.InputDoubleRegister(0); \
462 DoubleRegister right_reg = i.InputDoubleRegister(1); \
463 DoubleRegister result_reg = i.OutputDoubleRegister(); \
464 Label check_nan_left, check_zero, return_left, return_right, done; \
465 __ fcmpu(left_reg, right_reg); \
466 __ bunordered(&check_nan_left); \
467 __ beq(&check_zero); \
468 __ bge(&return_left); \
469 __ b(&return_right); \
470 \
471 __ bind(&check_zero); \
472 __ fcmpu(left_reg, kDoubleRegZero); \
473 /* left == right != 0. */ \
474 __ bne(&return_left); \
475 /* At this point, both left and right are either 0 or -0. */ \
476 __ fadd(result_reg, left_reg, right_reg); \
477 __ b(&done); \
478 \
479 __ bind(&check_nan_left); \
480 __ fcmpu(left_reg, left_reg); \
481 /* left == NaN. */ \
482 __ bunordered(&return_left); \
483 __ bind(&return_right); \
484 if (right_reg != result_reg) { \
485 __ fmr(result_reg, right_reg); \
486 } \
487 __ b(&done); \
488 \
489 __ bind(&return_left); \
490 if (left_reg != result_reg) { \
491 __ fmr(result_reg, left_reg); \
492 } \
493 __ bind(&done); \
494 } while (0)
495
496 #define ASSEMBLE_FLOAT_MIN() \
497 do { \
498 DoubleRegister left_reg = i.InputDoubleRegister(0); \
499 DoubleRegister right_reg = i.InputDoubleRegister(1); \
500 DoubleRegister result_reg = i.OutputDoubleRegister(); \
501 Label check_nan_left, check_zero, return_left, return_right, done; \
502 __ fcmpu(left_reg, right_reg); \
503 __ bunordered(&check_nan_left); \
504 __ beq(&check_zero); \
505 __ ble(&return_left); \
506 __ b(&return_right); \
507 \
508 __ bind(&check_zero); \
509 __ fcmpu(left_reg, kDoubleRegZero); \
510 /* left == right != 0. */ \
511 __ bne(&return_left); \
512 /* At this point, both left and right are either 0 or -0. */ \
513 /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R */ \
514 /* being different registers is most efficiently expressed */ \
515 /* as -((-L) - R). */ \
516 __ fneg(left_reg, left_reg); \
517 if (left_reg == right_reg) { \
518 __ fadd(result_reg, left_reg, right_reg); \
519 } else { \
520 __ fsub(result_reg, left_reg, right_reg); \
521 } \
522 __ fneg(result_reg, result_reg); \
523 __ b(&done); \
524 \
525 __ bind(&check_nan_left); \
526 __ fcmpu(left_reg, left_reg); \
527 /* left == NaN. */ \
528 __ bunordered(&return_left); \
529 \
530 __ bind(&return_right); \
531 if (right_reg != result_reg) { \
532 __ fmr(result_reg, right_reg); \
533 } \
534 __ b(&done); \
535 \
536 __ bind(&return_left); \
537 if (left_reg != result_reg) { \
538 __ fmr(result_reg, left_reg); \
539 } \
540 __ bind(&done); \
541 } while (0)
542
543 #define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
544 do { \
545 DoubleRegister result = i.OutputDoubleRegister(); \
546 AddressingMode mode = kMode_None; \
547 MemOperand operand = i.MemoryOperand(&mode); \
548 if (mode == kMode_MRI) { \
549 __ asm_instr(result, operand); \
550 } else { \
551 __ asm_instrx(result, operand); \
552 } \
553 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
554 } while (0)
555
556
557 #define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
558 do { \
559 Register result = i.OutputRegister(); \
560 AddressingMode mode = kMode_None; \
561 MemOperand operand = i.MemoryOperand(&mode); \
562 if (mode == kMode_MRI) { \
563 __ asm_instr(result, operand); \
564 } else { \
565 __ asm_instrx(result, operand); \
566 } \
567 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
568 } while (0)
569
570
571 #define ASSEMBLE_STORE_FLOAT32() \
572 do { \
573 size_t index = 0; \
574 AddressingMode mode = kMode_None; \
575 MemOperand operand = i.MemoryOperand(&mode, &index); \
576 DoubleRegister value = i.InputDoubleRegister(index); \
577 /* removed frsp as instruction-selector checked */ \
578 /* value to be kFloat32 */ \
579 if (mode == kMode_MRI) { \
580 __ stfs(value, operand); \
581 } else { \
582 __ stfsx(value, operand); \
583 } \
584 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
585 } while (0)
586
587
588 #define ASSEMBLE_STORE_DOUBLE() \
589 do { \
590 size_t index = 0; \
591 AddressingMode mode = kMode_None; \
592 MemOperand operand = i.MemoryOperand(&mode, &index); \
593 DoubleRegister value = i.InputDoubleRegister(index); \
594 if (mode == kMode_MRI) { \
595 __ stfd(value, operand); \
596 } else { \
597 __ stfdx(value, operand); \
598 } \
599 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
600 } while (0)
601
602
603 #define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
604 do { \
605 size_t index = 0; \
606 AddressingMode mode = kMode_None; \
607 MemOperand operand = i.MemoryOperand(&mode, &index); \
608 Register value = i.InputRegister(index); \
609 if (mode == kMode_MRI) { \
610 __ asm_instr(value, operand); \
611 } else { \
612 __ asm_instrx(value, operand); \
613 } \
614 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
615 } while (0)
616
617 #if V8_TARGET_ARCH_PPC64
618 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
619 #define CleanUInt32(x) __ ClearLeftImm(x, x, Operand(32))
620 #else
621 #define CleanUInt32(x)
622 #endif
623
624 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
625 do { \
626 Label done; \
627 Register result = i.OutputRegister(); \
628 AddressingMode mode = kMode_None; \
629 MemOperand operand = i.MemoryOperand(&mode); \
630 if (mode == kMode_MRI) { \
631 __ asm_instr(result, operand); \
632 } else { \
633 __ asm_instrx(result, operand); \
634 } \
635 __ lwsync(); \
636 } while (0)
637 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
638 do { \
639 size_t index = 0; \
640 AddressingMode mode = kMode_None; \
641 MemOperand operand = i.MemoryOperand(&mode, &index); \
642 Register value = i.InputRegister(index); \
643 __ lwsync(); \
644 if (mode == kMode_MRI) { \
645 __ asm_instr(value, operand); \
646 } else { \
647 __ asm_instrx(value, operand); \
648 } \
649 __ sync(); \
650 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
651 } while (0)
652 #define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
653 do { \
654 Label exchange; \
655 __ bind(&exchange); \
656 __ load_instr(i.OutputRegister(0), \
657 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
658 __ store_instr(i.InputRegister(2), \
659 MemOperand(i.InputRegister(0), i.InputRegister(1))); \
660 __ bne(&exchange, cr0); \
661 } while (0)
662
663 #define ASSEMBLE_ATOMIC_BINOP(bin_inst, load_inst, store_inst) \
664 do { \
665 MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
666 Label binop; \
667 __ bind(&binop); \
668 __ load_inst(i.OutputRegister(), operand); \
669 __ bin_inst(i.InputRegister(2), i.OutputRegister(), i.InputRegister(2)); \
670 __ store_inst(i.InputRegister(2), operand); \
671 __ bne(&binop, cr0); \
672 } while (false)
673
674 #define ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(bin_inst, load_inst, \
675 store_inst, ext_instr) \
676 do { \
677 MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
678 Label binop; \
679 __ bind(&binop); \
680 __ load_inst(i.OutputRegister(), operand); \
681 __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
682 __ bin_inst(i.InputRegister(2), i.OutputRegister(), i.InputRegister(2)); \
683 __ store_inst(i.InputRegister(2), operand); \
684 __ bne(&binop, cr0); \
685 } while (false)
686
687 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp_inst, load_inst, store_inst) \
688 do { \
689 MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
690 Label loop; \
691 Label exit; \
692 __ bind(&loop); \
693 __ load_inst(i.OutputRegister(), operand); \
694 __ cmp_inst(i.OutputRegister(), i.InputRegister(2)); \
695 __ bne(&exit, cr0); \
696 __ store_inst(i.InputRegister(3), operand); \
697 __ bne(&loop, cr0); \
698 __ bind(&exit); \
699 } while (false)
700
701 #define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp_inst, load_inst, \
702 store_inst, ext_instr) \
703 do { \
704 MemOperand operand = MemOperand(i.InputRegister(0), i.InputRegister(1)); \
705 Label loop; \
706 Label exit; \
707 __ bind(&loop); \
708 __ load_inst(i.OutputRegister(), operand); \
709 __ ext_instr(i.OutputRegister(), i.OutputRegister()); \
710 __ cmp_inst(i.OutputRegister(), i.InputRegister(2)); \
711 __ bne(&exit, cr0); \
712 __ store_inst(i.InputRegister(3), operand); \
713 __ bne(&loop, cr0); \
714 __ bind(&exit); \
715 } while (false)
716
AssembleDeconstructFrame()717 void CodeGenerator::AssembleDeconstructFrame() {
718 __ LeaveFrame(StackFrame::MANUAL);
719 }
720
AssemblePrepareTailCall()721 void CodeGenerator::AssemblePrepareTailCall() {
722 if (frame_access_state()->has_frame()) {
723 __ RestoreFrameStateForTailCall();
724 }
725 frame_access_state()->SetFrameAccessToSP();
726 }
727
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)728 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
729 Register scratch1,
730 Register scratch2,
731 Register scratch3) {
732 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
733 Label done;
734
735 // Check if current frame is an arguments adaptor frame.
736 __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
737 __ cmpi(scratch1,
738 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
739 __ bne(&done);
740
741 // Load arguments count from current arguments adaptor frame (note, it
742 // does not include receiver).
743 Register caller_args_count_reg = scratch1;
744 __ LoadP(caller_args_count_reg,
745 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
746 __ SmiUntag(caller_args_count_reg);
747
748 ParameterCount callee_args_count(args_reg);
749 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
750 scratch3);
751 __ bind(&done);
752 }
753
754 namespace {
755
FlushPendingPushRegisters(TurboAssembler * tasm,FrameAccessState * frame_access_state,ZoneVector<Register> * pending_pushes)756 void FlushPendingPushRegisters(TurboAssembler* tasm,
757 FrameAccessState* frame_access_state,
758 ZoneVector<Register>* pending_pushes) {
759 switch (pending_pushes->size()) {
760 case 0:
761 break;
762 case 1:
763 tasm->Push((*pending_pushes)[0]);
764 break;
765 case 2:
766 tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
767 break;
768 case 3:
769 tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
770 (*pending_pushes)[2]);
771 break;
772 default:
773 UNREACHABLE();
774 break;
775 }
776 frame_access_state->IncreaseSPDelta(pending_pushes->size());
777 pending_pushes->clear();
778 }
779
AdjustStackPointerForTailCall(TurboAssembler * tasm,FrameAccessState * state,int new_slot_above_sp,ZoneVector<Register> * pending_pushes=nullptr,bool allow_shrinkage=true)780 void AdjustStackPointerForTailCall(
781 TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
782 ZoneVector<Register>* pending_pushes = nullptr,
783 bool allow_shrinkage = true) {
784 int current_sp_offset = state->GetSPToFPSlotCount() +
785 StandardFrameConstants::kFixedSlotCountAboveFp;
786 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
787 if (stack_slot_delta > 0) {
788 if (pending_pushes != nullptr) {
789 FlushPendingPushRegisters(tasm, state, pending_pushes);
790 }
791 tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
792 state->IncreaseSPDelta(stack_slot_delta);
793 } else if (allow_shrinkage && stack_slot_delta < 0) {
794 if (pending_pushes != nullptr) {
795 FlushPendingPushRegisters(tasm, state, pending_pushes);
796 }
797 tasm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
798 state->IncreaseSPDelta(stack_slot_delta);
799 }
800 }
801
802 } // namespace
803
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)804 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
805 int first_unused_stack_slot) {
806 ZoneVector<MoveOperands*> pushes(zone());
807 GetPushCompatibleMoves(instr, kRegisterPush, &pushes);
808
809 if (!pushes.empty() &&
810 (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
811 first_unused_stack_slot)) {
812 PPCOperandConverter g(this, instr);
813 ZoneVector<Register> pending_pushes(zone());
814 for (auto move : pushes) {
815 LocationOperand destination_location(
816 LocationOperand::cast(move->destination()));
817 InstructionOperand source(move->source());
818 AdjustStackPointerForTailCall(
819 tasm(), frame_access_state(),
820 destination_location.index() - pending_pushes.size(),
821 &pending_pushes);
822 // Pushes of non-register data types are not supported.
823 DCHECK(source.IsRegister());
824 LocationOperand source_location(LocationOperand::cast(source));
825 pending_pushes.push_back(source_location.GetRegister());
826 // TODO(arm): We can push more than 3 registers at once. Add support in
827 // the macro-assembler for pushing a list of registers.
828 if (pending_pushes.size() == 3) {
829 FlushPendingPushRegisters(tasm(), frame_access_state(),
830 &pending_pushes);
831 }
832 move->Eliminate();
833 }
834 FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
835 }
836 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
837 first_unused_stack_slot, nullptr, false);
838 }
839
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)840 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
841 int first_unused_stack_slot) {
842 AdjustStackPointerForTailCall(tasm(), frame_access_state(),
843 first_unused_stack_slot);
844 }
845
846 // Check that {kJavaScriptCallCodeStartRegister} is correct.
AssembleCodeStartRegisterCheck()847 void CodeGenerator::AssembleCodeStartRegisterCheck() {
848 Register scratch = kScratchReg;
849 __ ComputeCodeStartAddress(scratch);
850 __ cmp(scratch, kJavaScriptCallCodeStartRegister);
851 __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
852 }
853
854 // Check if the code object is marked for deoptimization. If it is, then it
855 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
856 // to:
857 // 1. read from memory the word that contains that bit, which can be found in
858 // the flags in the referenced {CodeDataContainer} object;
859 // 2. test kMarkedForDeoptimizationBit in those flags; and
860 // 3. if it is not zero then it jumps to the builtin.
BailoutIfDeoptimized()861 void CodeGenerator::BailoutIfDeoptimized() {
862 if (FLAG_debug_code) {
863 // Check that {kJavaScriptCallCodeStartRegister} is correct.
864 __ ComputeCodeStartAddress(ip);
865 __ cmp(ip, kJavaScriptCallCodeStartRegister);
866 __ Assert(eq, AbortReason::kWrongFunctionCodeStart);
867 }
868
869 int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
870 __ LoadP(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset));
871 __ LoadWordArith(
872 r11, FieldMemOperand(r11, CodeDataContainer::kKindSpecificFlagsOffset));
873 __ TestBit(r11, Code::kMarkedForDeoptimizationBit);
874 // Ensure we're not serializing (otherwise we'd need to use an indirection to
875 // access the builtin below).
876 DCHECK(!isolate()->ShouldLoadConstantsFromRootList());
877 Handle<Code> code = isolate()->builtins()->builtin_handle(
878 Builtins::kCompileLazyDeoptimizedCode);
879 __ Jump(code, RelocInfo::CODE_TARGET, ne, cr0);
880 }
881
GenerateSpeculationPoisonFromCodeStartRegister()882 void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
883 Register scratch = kScratchReg;
884
885 __ ComputeCodeStartAddress(scratch);
886
887 // Calculate a mask which has all bits set in the normal case, but has all
888 // bits cleared if we are speculatively executing the wrong PC.
889 __ cmp(kJavaScriptCallCodeStartRegister, scratch);
890 __ li(scratch, Operand::Zero());
891 __ notx(kSpeculationPoisonRegister, scratch);
892 __ isel(eq, kSpeculationPoisonRegister,
893 kSpeculationPoisonRegister, scratch);
894 }
895
AssembleRegisterArgumentPoisoning()896 void CodeGenerator::AssembleRegisterArgumentPoisoning() {
897 __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
898 __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
899 __ and_(sp, sp, kSpeculationPoisonRegister);
900 }
901
902 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)903 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
904 Instruction* instr) {
905 PPCOperandConverter i(this, instr);
906 ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
907
908 switch (opcode) {
909 case kArchCallCodeObject: {
910 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
911 tasm());
912 if (HasRegisterInput(instr, 0)) {
913 Register reg = i.InputRegister(0);
914 DCHECK_IMPLIES(
915 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
916 reg == kJavaScriptCallCodeStartRegister);
917 __ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
918 __ Call(reg);
919 } else {
920 __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
921 }
922 RecordCallPosition(instr);
923 DCHECK_EQ(LeaveRC, i.OutputRCBit());
924 frame_access_state()->ClearSPDelta();
925 break;
926 }
927 case kArchCallWasmFunction: {
928 // We must not share code targets for calls to builtins for wasm code, as
929 // they might need to be patched individually.
930 if (instr->InputAt(0)->IsImmediate()) {
931 Constant constant = i.ToConstant(instr->InputAt(0));
932 #ifdef V8_TARGET_ARCH_PPC64
933 Address wasm_code = static_cast<Address>(constant.ToInt64());
934 #else
935 Address wasm_code = static_cast<Address>(constant.ToInt32());
936 #endif
937 __ Call(wasm_code, constant.rmode());
938 } else {
939 __ Call(i.InputRegister(0));
940 }
941 RecordCallPosition(instr);
942 DCHECK_EQ(LeaveRC, i.OutputRCBit());
943 frame_access_state()->ClearSPDelta();
944 break;
945 }
946 case kArchTailCallCodeObjectFromJSFunction:
947 case kArchTailCallCodeObject: {
948 if (opcode == kArchTailCallCodeObjectFromJSFunction) {
949 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
950 i.TempRegister(0), i.TempRegister(1),
951 i.TempRegister(2));
952 }
953 if (HasRegisterInput(instr, 0)) {
954 Register reg = i.InputRegister(0);
955 DCHECK_IMPLIES(
956 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
957 reg == kJavaScriptCallCodeStartRegister);
958 __ addi(reg, reg, Operand(Code::kHeaderSize - kHeapObjectTag));
959 __ Jump(reg);
960 } else {
961 // We cannot use the constant pool to load the target since
962 // we've already restored the caller's frame.
963 ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
964 __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
965 }
966 DCHECK_EQ(LeaveRC, i.OutputRCBit());
967 frame_access_state()->ClearSPDelta();
968 frame_access_state()->SetFrameAccessToDefault();
969 break;
970 }
971 case kArchTailCallWasm: {
972 // We must not share code targets for calls to builtins for wasm code, as
973 // they might need to be patched individually.
974 if (instr->InputAt(0)->IsImmediate()) {
975 Constant constant = i.ToConstant(instr->InputAt(0));
976 #ifdef V8_TARGET_ARCH_S390X
977 Address wasm_code = static_cast<Address>(constant.ToInt64());
978 #else
979 Address wasm_code = static_cast<Address>(constant.ToInt32());
980 #endif
981 __ Jump(wasm_code, constant.rmode());
982 } else {
983 __ Jump(i.InputRegister(0));
984 }
985 DCHECK_EQ(LeaveRC, i.OutputRCBit());
986 frame_access_state()->ClearSPDelta();
987 frame_access_state()->SetFrameAccessToDefault();
988 break;
989 }
990 case kArchTailCallAddress: {
991 CHECK(!instr->InputAt(0)->IsImmediate());
992 Register reg = i.InputRegister(0);
993 DCHECK_IMPLIES(
994 HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
995 reg == kJavaScriptCallCodeStartRegister);
996 __ Jump(reg);
997 frame_access_state()->ClearSPDelta();
998 frame_access_state()->SetFrameAccessToDefault();
999 break;
1000 }
1001 case kArchCallJSFunction: {
1002 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
1003 tasm());
1004 Register func = i.InputRegister(0);
1005 if (FLAG_debug_code) {
1006 // Check the function's context matches the context argument.
1007 __ LoadP(kScratchReg,
1008 FieldMemOperand(func, JSFunction::kContextOffset));
1009 __ cmp(cp, kScratchReg);
1010 __ Assert(eq, AbortReason::kWrongFunctionContext);
1011 }
1012 static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
1013 __ LoadP(r5, FieldMemOperand(func, JSFunction::kCodeOffset));
1014 __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
1015 __ Call(r5);
1016 RecordCallPosition(instr);
1017 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1018 frame_access_state()->ClearSPDelta();
1019 break;
1020 }
1021 case kArchPrepareCallCFunction: {
1022 int const num_parameters = MiscField::decode(instr->opcode());
1023 __ PrepareCallCFunction(num_parameters, kScratchReg);
1024 // Frame alignment requires using FP-relative frame addressing.
1025 frame_access_state()->SetFrameAccessToFP();
1026 break;
1027 }
1028 case kArchSaveCallerRegisters: {
1029 fp_mode_ =
1030 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
1031 DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1032 // kReturnRegister0 should have been saved before entering the stub.
1033 int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
1034 DCHECK_EQ(0, bytes % kPointerSize);
1035 DCHECK_EQ(0, frame_access_state()->sp_delta());
1036 frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
1037 DCHECK(!caller_registers_saved_);
1038 caller_registers_saved_ = true;
1039 break;
1040 }
1041 case kArchRestoreCallerRegisters: {
1042 DCHECK(fp_mode_ ==
1043 static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
1044 DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
1045 // Don't overwrite the returned value.
1046 int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
1047 frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
1048 DCHECK_EQ(0, frame_access_state()->sp_delta());
1049 DCHECK(caller_registers_saved_);
1050 caller_registers_saved_ = false;
1051 break;
1052 }
1053 case kArchPrepareTailCall:
1054 AssemblePrepareTailCall();
1055 break;
1056 case kArchComment:
1057 #ifdef V8_TARGET_ARCH_PPC64
1058 __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
1059 #else
1060 __ RecordComment(reinterpret_cast<const char*>(i.InputInt32(0)));
1061 #endif
1062 break;
1063 case kArchCallCFunction: {
1064 int const num_parameters = MiscField::decode(instr->opcode());
1065 if (instr->InputAt(0)->IsImmediate()) {
1066 ExternalReference ref = i.InputExternalReference(0);
1067 __ CallCFunction(ref, num_parameters);
1068 } else {
1069 Register func = i.InputRegister(0);
1070 __ CallCFunction(func, num_parameters);
1071 }
1072 frame_access_state()->SetFrameAccessToDefault();
1073 // Ideally, we should decrement SP delta to match the change of stack
1074 // pointer in CallCFunction. However, for certain architectures (e.g.
1075 // ARM), there may be more strict alignment requirement, causing old SP
1076 // to be saved on the stack. In those cases, we can not calculate the SP
1077 // delta statically.
1078 frame_access_state()->ClearSPDelta();
1079 if (caller_registers_saved_) {
1080 // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
1081 // Here, we assume the sequence to be:
1082 // kArchSaveCallerRegisters;
1083 // kArchCallCFunction;
1084 // kArchRestoreCallerRegisters;
1085 int bytes =
1086 __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
1087 frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
1088 }
1089 break;
1090 }
1091 case kArchJmp:
1092 AssembleArchJump(i.InputRpo(0));
1093 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1094 break;
1095 case kArchBinarySearchSwitch:
1096 AssembleArchBinarySearchSwitch(instr);
1097 break;
1098 case kArchLookupSwitch:
1099 AssembleArchLookupSwitch(instr);
1100 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1101 break;
1102 case kArchTableSwitch:
1103 AssembleArchTableSwitch(instr);
1104 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1105 break;
1106 case kArchDebugAbort:
1107 DCHECK(i.InputRegister(0) == r4);
1108 if (!frame_access_state()->has_frame()) {
1109 // We don't actually want to generate a pile of code for this, so just
1110 // claim there is a stack frame, without generating one.
1111 FrameScope scope(tasm(), StackFrame::NONE);
1112 __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
1113 RelocInfo::CODE_TARGET);
1114 } else {
1115 __ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
1116 RelocInfo::CODE_TARGET);
1117 }
1118 __ stop("kArchDebugAbort");
1119 break;
1120 case kArchDebugBreak:
1121 __ stop("kArchDebugBreak");
1122 break;
1123 case kArchNop:
1124 case kArchThrowTerminator:
1125 // don't emit code for nops.
1126 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1127 break;
1128 case kArchDeoptimize: {
1129 int deopt_state_id =
1130 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
1131 CodeGenResult result =
1132 AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
1133 if (result != kSuccess) return result;
1134 break;
1135 }
1136 case kArchRet:
1137 AssembleReturn(instr->InputAt(0));
1138 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1139 break;
1140 case kArchStackPointer:
1141 __ mr(i.OutputRegister(), sp);
1142 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1143 break;
1144 case kArchFramePointer:
1145 __ mr(i.OutputRegister(), fp);
1146 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1147 break;
1148 case kArchParentFramePointer:
1149 if (frame_access_state()->has_frame()) {
1150 __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
1151 } else {
1152 __ mr(i.OutputRegister(), fp);
1153 }
1154 break;
1155 case kArchTruncateDoubleToI:
1156 __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
1157 i.InputDoubleRegister(0), DetermineStubCallMode());
1158 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1159 break;
1160 case kArchStoreWithWriteBarrier: {
1161 RecordWriteMode mode =
1162 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
1163 Register object = i.InputRegister(0);
1164 Register value = i.InputRegister(2);
1165 Register scratch0 = i.TempRegister(0);
1166 Register scratch1 = i.TempRegister(1);
1167 OutOfLineRecordWrite* ool;
1168
1169 AddressingMode addressing_mode =
1170 AddressingModeField::decode(instr->opcode());
1171 if (addressing_mode == kMode_MRI) {
1172 int32_t offset = i.InputInt32(1);
1173 ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1174 scratch0, scratch1, mode);
1175 __ StoreP(value, MemOperand(object, offset));
1176 } else {
1177 DCHECK_EQ(kMode_MRR, addressing_mode);
1178 Register offset(i.InputRegister(1));
1179 ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
1180 scratch0, scratch1, mode);
1181 __ StorePX(value, MemOperand(object, offset));
1182 }
1183 __ CheckPageFlag(object, scratch0,
1184 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
1185 ool->entry());
1186 __ bind(ool->exit());
1187 break;
1188 }
1189 case kArchStackSlot: {
1190 FrameOffset offset =
1191 frame_access_state()->GetFrameOffset(i.InputInt32(0));
1192 __ addi(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
1193 Operand(offset.offset()));
1194 break;
1195 }
1196 case kArchWordPoisonOnSpeculation:
1197 __ and_(i.OutputRegister(), i.InputRegister(0),
1198 kSpeculationPoisonRegister);
1199 break;
1200 case kPPC_And:
1201 if (HasRegisterInput(instr, 1)) {
1202 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1203 i.OutputRCBit());
1204 } else {
1205 __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1206 }
1207 break;
1208 case kPPC_AndComplement:
1209 __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1210 i.OutputRCBit());
1211 break;
1212 case kPPC_Or:
1213 if (HasRegisterInput(instr, 1)) {
1214 __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1215 i.OutputRCBit());
1216 } else {
1217 __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1218 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1219 }
1220 break;
1221 case kPPC_OrComplement:
1222 __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1223 i.OutputRCBit());
1224 break;
1225 case kPPC_Xor:
1226 if (HasRegisterInput(instr, 1)) {
1227 __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1228 i.OutputRCBit());
1229 } else {
1230 __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1231 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1232 }
1233 break;
1234 case kPPC_ShiftLeft32:
1235 ASSEMBLE_BINOP_RC(slw, slwi);
1236 break;
1237 #if V8_TARGET_ARCH_PPC64
1238 case kPPC_ShiftLeft64:
1239 ASSEMBLE_BINOP_RC(sld, sldi);
1240 break;
1241 #endif
1242 case kPPC_ShiftRight32:
1243 ASSEMBLE_BINOP_RC(srw, srwi);
1244 break;
1245 #if V8_TARGET_ARCH_PPC64
1246 case kPPC_ShiftRight64:
1247 ASSEMBLE_BINOP_RC(srd, srdi);
1248 break;
1249 #endif
1250 case kPPC_ShiftRightAlg32:
1251 ASSEMBLE_BINOP_INT_RC(sraw, srawi);
1252 break;
1253 #if V8_TARGET_ARCH_PPC64
1254 case kPPC_ShiftRightAlg64:
1255 ASSEMBLE_BINOP_INT_RC(srad, sradi);
1256 break;
1257 #endif
1258 #if !V8_TARGET_ARCH_PPC64
1259 case kPPC_AddPair:
1260 // i.InputRegister(0) ... left low word.
1261 // i.InputRegister(1) ... left high word.
1262 // i.InputRegister(2) ... right low word.
1263 // i.InputRegister(3) ... right high word.
1264 __ addc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1265 __ adde(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
1266 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1267 break;
1268 case kPPC_SubPair:
1269 // i.InputRegister(0) ... left low word.
1270 // i.InputRegister(1) ... left high word.
1271 // i.InputRegister(2) ... right low word.
1272 // i.InputRegister(3) ... right high word.
1273 __ subc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1274 __ sube(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
1275 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1276 break;
1277 case kPPC_MulPair:
1278 // i.InputRegister(0) ... left low word.
1279 // i.InputRegister(1) ... left high word.
1280 // i.InputRegister(2) ... right low word.
1281 // i.InputRegister(3) ... right high word.
1282 __ mullw(i.TempRegister(0), i.InputRegister(0), i.InputRegister(3));
1283 __ mullw(i.TempRegister(1), i.InputRegister(2), i.InputRegister(1));
1284 __ add(i.TempRegister(0), i.TempRegister(0), i.TempRegister(1));
1285 __ mullw(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
1286 __ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
1287 __ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
1288 break;
1289 case kPPC_ShiftLeftPair: {
1290 Register second_output =
1291 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1292 if (instr->InputAt(2)->IsImmediate()) {
1293 __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1294 i.InputRegister(1), i.InputInt32(2));
1295 } else {
1296 __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
1297 i.InputRegister(1), kScratchReg, i.InputRegister(2));
1298 }
1299 break;
1300 }
1301 case kPPC_ShiftRightPair: {
1302 Register second_output =
1303 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1304 if (instr->InputAt(2)->IsImmediate()) {
1305 __ ShiftRightPair(i.OutputRegister(0), second_output,
1306 i.InputRegister(0), i.InputRegister(1),
1307 i.InputInt32(2));
1308 } else {
1309 __ ShiftRightPair(i.OutputRegister(0), second_output,
1310 i.InputRegister(0), i.InputRegister(1), kScratchReg,
1311 i.InputRegister(2));
1312 }
1313 break;
1314 }
1315 case kPPC_ShiftRightAlgPair: {
1316 Register second_output =
1317 instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
1318 if (instr->InputAt(2)->IsImmediate()) {
1319 __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
1320 i.InputRegister(0), i.InputRegister(1),
1321 i.InputInt32(2));
1322 } else {
1323 __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
1324 i.InputRegister(0), i.InputRegister(1),
1325 kScratchReg, i.InputRegister(2));
1326 }
1327 break;
1328 }
1329 #endif
1330 case kPPC_RotRight32:
1331 if (HasRegisterInput(instr, 1)) {
1332 __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
1333 __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
1334 i.OutputRCBit());
1335 } else {
1336 int sh = i.InputInt32(1);
1337 __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
1338 }
1339 break;
1340 #if V8_TARGET_ARCH_PPC64
1341 case kPPC_RotRight64:
1342 if (HasRegisterInput(instr, 1)) {
1343 __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
1344 __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
1345 i.OutputRCBit());
1346 } else {
1347 int sh = i.InputInt32(1);
1348 __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
1349 }
1350 break;
1351 #endif
1352 case kPPC_Not:
1353 __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
1354 break;
1355 case kPPC_RotLeftAndMask32:
1356 __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1357 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
1358 break;
1359 #if V8_TARGET_ARCH_PPC64
1360 case kPPC_RotLeftAndClear64:
1361 __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1362 63 - i.InputInt32(2), i.OutputRCBit());
1363 break;
1364 case kPPC_RotLeftAndClearLeft64:
1365 __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1366 63 - i.InputInt32(2), i.OutputRCBit());
1367 break;
1368 case kPPC_RotLeftAndClearRight64:
1369 __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
1370 63 - i.InputInt32(2), i.OutputRCBit());
1371 break;
1372 #endif
1373 case kPPC_Add32:
1374 #if V8_TARGET_ARCH_PPC64
1375 if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1376 ASSEMBLE_ADD_WITH_OVERFLOW();
1377 } else {
1378 #endif
1379 if (HasRegisterInput(instr, 1)) {
1380 __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1381 LeaveOE, i.OutputRCBit());
1382 } else {
1383 __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1384 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1385 }
1386 __ extsw(i.OutputRegister(), i.OutputRegister());
1387 #if V8_TARGET_ARCH_PPC64
1388 }
1389 #endif
1390 break;
1391 #if V8_TARGET_ARCH_PPC64
1392 case kPPC_Add64:
1393 if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1394 ASSEMBLE_ADD_WITH_OVERFLOW();
1395 } else {
1396 if (HasRegisterInput(instr, 1)) {
1397 __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1398 LeaveOE, i.OutputRCBit());
1399 } else {
1400 __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
1401 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1402 }
1403 }
1404 break;
1405 #endif
1406 case kPPC_AddWithOverflow32:
1407 ASSEMBLE_ADD_WITH_OVERFLOW32();
1408 break;
1409 case kPPC_AddDouble:
1410 ASSEMBLE_FLOAT_BINOP_RC(fadd, MiscField::decode(instr->opcode()));
1411 break;
1412 case kPPC_Sub:
1413 #if V8_TARGET_ARCH_PPC64
1414 if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
1415 ASSEMBLE_SUB_WITH_OVERFLOW();
1416 } else {
1417 #endif
1418 if (HasRegisterInput(instr, 1)) {
1419 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1420 LeaveOE, i.OutputRCBit());
1421 } else {
1422 if (is_int16(i.InputImmediate(1).immediate())) {
1423 __ subi(i.OutputRegister(), i.InputRegister(0),
1424 i.InputImmediate(1));
1425 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1426 } else {
1427 __ mov(kScratchReg, i.InputImmediate(1));
1428 __ sub(i.OutputRegister(), i.InputRegister(0), kScratchReg, LeaveOE,
1429 i.OutputRCBit());
1430 }
1431 }
1432 #if V8_TARGET_ARCH_PPC64
1433 }
1434 #endif
1435 break;
1436 case kPPC_SubWithOverflow32:
1437 ASSEMBLE_SUB_WITH_OVERFLOW32();
1438 break;
1439 case kPPC_SubDouble:
1440 ASSEMBLE_FLOAT_BINOP_RC(fsub, MiscField::decode(instr->opcode()));
1441 break;
1442 case kPPC_Mul32:
1443 __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1444 LeaveOE, i.OutputRCBit());
1445 break;
1446 #if V8_TARGET_ARCH_PPC64
1447 case kPPC_Mul64:
1448 __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1449 LeaveOE, i.OutputRCBit());
1450 break;
1451 #endif
1452
1453 case kPPC_Mul32WithHigh32:
1454 if (i.OutputRegister(0) == i.InputRegister(0) ||
1455 i.OutputRegister(0) == i.InputRegister(1) ||
1456 i.OutputRegister(1) == i.InputRegister(0) ||
1457 i.OutputRegister(1) == i.InputRegister(1)) {
1458 __ mullw(kScratchReg,
1459 i.InputRegister(0), i.InputRegister(1)); // low
1460 __ mulhw(i.OutputRegister(1),
1461 i.InputRegister(0), i.InputRegister(1)); // high
1462 __ mr(i.OutputRegister(0), kScratchReg);
1463 } else {
1464 __ mullw(i.OutputRegister(0),
1465 i.InputRegister(0), i.InputRegister(1)); // low
1466 __ mulhw(i.OutputRegister(1),
1467 i.InputRegister(0), i.InputRegister(1)); // high
1468 }
1469 break;
1470 case kPPC_MulHigh32:
1471 __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1472 i.OutputRCBit());
1473 break;
1474 case kPPC_MulHighU32:
1475 __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
1476 i.OutputRCBit());
1477 break;
1478 case kPPC_MulDouble:
1479 ASSEMBLE_FLOAT_BINOP_RC(fmul, MiscField::decode(instr->opcode()));
1480 break;
1481 case kPPC_Div32:
1482 __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1483 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1484 break;
1485 #if V8_TARGET_ARCH_PPC64
1486 case kPPC_Div64:
1487 __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1488 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1489 break;
1490 #endif
1491 case kPPC_DivU32:
1492 __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1493 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1494 break;
1495 #if V8_TARGET_ARCH_PPC64
1496 case kPPC_DivU64:
1497 __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1498 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1499 break;
1500 #endif
1501 case kPPC_DivDouble:
1502 ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
1503 break;
1504 case kPPC_Mod32:
1505 if (CpuFeatures::IsSupported(MODULO)) {
1506 __ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1507 } else {
1508 ASSEMBLE_MODULO(divw, mullw);
1509 }
1510 break;
1511 #if V8_TARGET_ARCH_PPC64
1512 case kPPC_Mod64:
1513 if (CpuFeatures::IsSupported(MODULO)) {
1514 __ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1515 } else {
1516 ASSEMBLE_MODULO(divd, mulld);
1517 }
1518 break;
1519 #endif
1520 case kPPC_ModU32:
1521 if (CpuFeatures::IsSupported(MODULO)) {
1522 __ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1523 } else {
1524 ASSEMBLE_MODULO(divwu, mullw);
1525 }
1526 break;
1527 #if V8_TARGET_ARCH_PPC64
1528 case kPPC_ModU64:
1529 if (CpuFeatures::IsSupported(MODULO)) {
1530 __ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1531 } else {
1532 ASSEMBLE_MODULO(divdu, mulld);
1533 }
1534 break;
1535 #endif
1536 case kPPC_ModDouble:
1537 // TODO(bmeurer): We should really get rid of this special instruction,
1538 // and generate a CallAddress instruction instead.
1539 ASSEMBLE_FLOAT_MODULO();
1540 break;
1541 case kIeee754Float64Acos:
1542 ASSEMBLE_IEEE754_UNOP(acos);
1543 break;
1544 case kIeee754Float64Acosh:
1545 ASSEMBLE_IEEE754_UNOP(acosh);
1546 break;
1547 case kIeee754Float64Asin:
1548 ASSEMBLE_IEEE754_UNOP(asin);
1549 break;
1550 case kIeee754Float64Asinh:
1551 ASSEMBLE_IEEE754_UNOP(asinh);
1552 break;
1553 case kIeee754Float64Atan:
1554 ASSEMBLE_IEEE754_UNOP(atan);
1555 break;
1556 case kIeee754Float64Atan2:
1557 ASSEMBLE_IEEE754_BINOP(atan2);
1558 break;
1559 case kIeee754Float64Atanh:
1560 ASSEMBLE_IEEE754_UNOP(atanh);
1561 break;
1562 case kIeee754Float64Tan:
1563 ASSEMBLE_IEEE754_UNOP(tan);
1564 break;
1565 case kIeee754Float64Tanh:
1566 ASSEMBLE_IEEE754_UNOP(tanh);
1567 break;
1568 case kIeee754Float64Cbrt:
1569 ASSEMBLE_IEEE754_UNOP(cbrt);
1570 break;
1571 case kIeee754Float64Sin:
1572 ASSEMBLE_IEEE754_UNOP(sin);
1573 break;
1574 case kIeee754Float64Sinh:
1575 ASSEMBLE_IEEE754_UNOP(sinh);
1576 break;
1577 case kIeee754Float64Cos:
1578 ASSEMBLE_IEEE754_UNOP(cos);
1579 break;
1580 case kIeee754Float64Cosh:
1581 ASSEMBLE_IEEE754_UNOP(cosh);
1582 break;
1583 case kIeee754Float64Exp:
1584 ASSEMBLE_IEEE754_UNOP(exp);
1585 break;
1586 case kIeee754Float64Expm1:
1587 ASSEMBLE_IEEE754_UNOP(expm1);
1588 break;
1589 case kIeee754Float64Log:
1590 ASSEMBLE_IEEE754_UNOP(log);
1591 break;
1592 case kIeee754Float64Log1p:
1593 ASSEMBLE_IEEE754_UNOP(log1p);
1594 break;
1595 case kIeee754Float64Log2:
1596 ASSEMBLE_IEEE754_UNOP(log2);
1597 break;
1598 case kIeee754Float64Log10:
1599 ASSEMBLE_IEEE754_UNOP(log10);
1600 break;
1601 case kIeee754Float64Pow: {
1602 __ Call(BUILTIN_CODE(isolate(), MathPowInternal), RelocInfo::CODE_TARGET);
1603 __ Move(d1, d3);
1604 break;
1605 }
1606 case kPPC_Neg:
1607 __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
1608 break;
1609 case kPPC_MaxDouble:
1610 ASSEMBLE_FLOAT_MAX();
1611 break;
1612 case kPPC_MinDouble:
1613 ASSEMBLE_FLOAT_MIN();
1614 break;
1615 case kPPC_AbsDouble:
1616 ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
1617 break;
1618 case kPPC_SqrtDouble:
1619 ASSEMBLE_FLOAT_UNOP_RC(fsqrt, MiscField::decode(instr->opcode()));
1620 break;
1621 case kPPC_FloorDouble:
1622 ASSEMBLE_FLOAT_UNOP_RC(frim, MiscField::decode(instr->opcode()));
1623 break;
1624 case kPPC_CeilDouble:
1625 ASSEMBLE_FLOAT_UNOP_RC(frip, MiscField::decode(instr->opcode()));
1626 break;
1627 case kPPC_TruncateDouble:
1628 ASSEMBLE_FLOAT_UNOP_RC(friz, MiscField::decode(instr->opcode()));
1629 break;
1630 case kPPC_RoundDouble:
1631 ASSEMBLE_FLOAT_UNOP_RC(frin, MiscField::decode(instr->opcode()));
1632 break;
1633 case kPPC_NegDouble:
1634 ASSEMBLE_FLOAT_UNOP_RC(fneg, 0);
1635 break;
1636 case kPPC_Cntlz32:
1637 __ cntlzw(i.OutputRegister(), i.InputRegister(0));
1638 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1639 break;
1640 #if V8_TARGET_ARCH_PPC64
1641 case kPPC_Cntlz64:
1642 __ cntlzd(i.OutputRegister(), i.InputRegister(0));
1643 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1644 break;
1645 #endif
1646 case kPPC_Popcnt32:
1647 __ popcntw(i.OutputRegister(), i.InputRegister(0));
1648 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1649 break;
1650 #if V8_TARGET_ARCH_PPC64
1651 case kPPC_Popcnt64:
1652 __ popcntd(i.OutputRegister(), i.InputRegister(0));
1653 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1654 break;
1655 #endif
1656 case kPPC_Cmp32:
1657 ASSEMBLE_COMPARE(cmpw, cmplw);
1658 break;
1659 #if V8_TARGET_ARCH_PPC64
1660 case kPPC_Cmp64:
1661 ASSEMBLE_COMPARE(cmp, cmpl);
1662 break;
1663 #endif
1664 case kPPC_CmpDouble:
1665 ASSEMBLE_FLOAT_COMPARE(fcmpu);
1666 break;
1667 case kPPC_Tst32:
1668 if (HasRegisterInput(instr, 1)) {
1669 __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1670 } else {
1671 __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
1672 }
1673 #if V8_TARGET_ARCH_PPC64
1674 __ extsw(r0, r0, i.OutputRCBit());
1675 #endif
1676 DCHECK_EQ(SetRC, i.OutputRCBit());
1677 break;
1678 #if V8_TARGET_ARCH_PPC64
1679 case kPPC_Tst64:
1680 if (HasRegisterInput(instr, 1)) {
1681 __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
1682 } else {
1683 __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
1684 }
1685 DCHECK_EQ(SetRC, i.OutputRCBit());
1686 break;
1687 #endif
1688 case kPPC_Float64SilenceNaN: {
1689 DoubleRegister value = i.InputDoubleRegister(0);
1690 DoubleRegister result = i.OutputDoubleRegister();
1691 __ CanonicalizeNaN(result, value);
1692 break;
1693 }
1694 case kPPC_Push:
1695 if (instr->InputAt(0)->IsFPRegister()) {
1696 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1697 if (op->representation() == MachineRepresentation::kFloat64) {
1698 __ StoreDoubleU(i.InputDoubleRegister(0),
1699 MemOperand(sp, -kDoubleSize), r0);
1700 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1701 } else {
1702 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1703 __ StoreSingleU(i.InputDoubleRegister(0),
1704 MemOperand(sp, -kPointerSize), r0);
1705 frame_access_state()->IncreaseSPDelta(1);
1706 }
1707 } else {
1708 __ StorePU(i.InputRegister(0), MemOperand(sp, -kPointerSize), r0);
1709 frame_access_state()->IncreaseSPDelta(1);
1710 }
1711 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1712 break;
1713 case kPPC_PushFrame: {
1714 int num_slots = i.InputInt32(1);
1715 if (instr->InputAt(0)->IsFPRegister()) {
1716 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1717 if (op->representation() == MachineRepresentation::kFloat64) {
1718 __ StoreDoubleU(i.InputDoubleRegister(0),
1719 MemOperand(sp, -num_slots * kPointerSize), r0);
1720 } else {
1721 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1722 __ StoreSingleU(i.InputDoubleRegister(0),
1723 MemOperand(sp, -num_slots * kPointerSize), r0);
1724 }
1725 } else {
1726 __ StorePU(i.InputRegister(0),
1727 MemOperand(sp, -num_slots * kPointerSize), r0);
1728 }
1729 break;
1730 }
1731 case kPPC_StoreToStackSlot: {
1732 int slot = i.InputInt32(1);
1733 if (instr->InputAt(0)->IsFPRegister()) {
1734 LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
1735 if (op->representation() == MachineRepresentation::kFloat64) {
1736 __ StoreDouble(i.InputDoubleRegister(0),
1737 MemOperand(sp, slot * kPointerSize), r0);
1738 } else {
1739 DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
1740 __ StoreSingle(i.InputDoubleRegister(0),
1741 MemOperand(sp, slot * kPointerSize), r0);
1742 }
1743 } else {
1744 __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
1745 }
1746 break;
1747 }
1748 case kPPC_ExtendSignWord8:
1749 __ extsb(i.OutputRegister(), i.InputRegister(0));
1750 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1751 break;
1752 case kPPC_ExtendSignWord16:
1753 __ extsh(i.OutputRegister(), i.InputRegister(0));
1754 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1755 break;
1756 #if V8_TARGET_ARCH_PPC64
1757 case kPPC_ExtendSignWord32:
1758 __ extsw(i.OutputRegister(), i.InputRegister(0));
1759 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1760 break;
1761 case kPPC_Uint32ToUint64:
1762 // Zero extend
1763 __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
1764 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1765 break;
1766 case kPPC_Int64ToInt32:
1767 __ extsw(i.OutputRegister(), i.InputRegister(0));
1768 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1769 break;
1770 case kPPC_Int64ToFloat32:
1771 __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1772 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1773 break;
1774 case kPPC_Int64ToDouble:
1775 __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1776 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1777 break;
1778 case kPPC_Uint64ToFloat32:
1779 __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
1780 i.OutputDoubleRegister());
1781 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1782 break;
1783 case kPPC_Uint64ToDouble:
1784 __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
1785 i.OutputDoubleRegister());
1786 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1787 break;
1788 #endif
1789 case kPPC_Int32ToFloat32:
1790 __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
1791 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1792 break;
1793 case kPPC_Int32ToDouble:
1794 __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
1795 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1796 break;
1797 case kPPC_Uint32ToFloat32:
1798 __ ConvertUnsignedIntToFloat(i.InputRegister(0),
1799 i.OutputDoubleRegister());
1800 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1801 break;
1802 case kPPC_Uint32ToDouble:
1803 __ ConvertUnsignedIntToDouble(i.InputRegister(0),
1804 i.OutputDoubleRegister());
1805 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1806 break;
1807 case kPPC_DoubleToInt32:
1808 case kPPC_DoubleToUint32:
1809 case kPPC_DoubleToInt64: {
1810 #if V8_TARGET_ARCH_PPC64
1811 bool check_conversion =
1812 (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
1813 if (check_conversion) {
1814 __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1815 }
1816 #endif
1817 __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
1818 #if !V8_TARGET_ARCH_PPC64
1819 kScratchReg,
1820 #endif
1821 i.OutputRegister(0), kScratchDoubleReg);
1822 #if V8_TARGET_ARCH_PPC64
1823 if (check_conversion) {
1824 // Set 2nd output to zero if conversion fails.
1825 CRegister cr = cr7;
1826 int crbit = v8::internal::Assembler::encode_crbit(
1827 cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1828 __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
1829 if (CpuFeatures::IsSupported(ISELECT)) {
1830 __ li(i.OutputRegister(1), Operand(1));
1831 __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
1832 } else {
1833 __ li(i.OutputRegister(1), Operand::Zero());
1834 __ bc(v8::internal::kInstrSize * 2, BT, crbit);
1835 __ li(i.OutputRegister(1), Operand(1));
1836 }
1837 }
1838 #endif
1839 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1840 break;
1841 }
1842 #if V8_TARGET_ARCH_PPC64
1843 case kPPC_DoubleToUint64: {
1844 bool check_conversion = (i.OutputCount() > 1);
1845 if (check_conversion) {
1846 __ mtfsb0(VXCVI); // clear FPSCR:VXCVI bit
1847 }
1848 __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
1849 i.OutputRegister(0), kScratchDoubleReg);
1850 if (check_conversion) {
1851 // Set 2nd output to zero if conversion fails.
1852 CRegister cr = cr7;
1853 int crbit = v8::internal::Assembler::encode_crbit(
1854 cr, static_cast<CRBit>(VXCVI % CRWIDTH));
1855 __ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
1856 if (CpuFeatures::IsSupported(ISELECT)) {
1857 __ li(i.OutputRegister(1), Operand(1));
1858 __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
1859 } else {
1860 __ li(i.OutputRegister(1), Operand::Zero());
1861 __ bc(v8::internal::kInstrSize * 2, BT, crbit);
1862 __ li(i.OutputRegister(1), Operand(1));
1863 }
1864 }
1865 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1866 break;
1867 }
1868 #endif
1869 case kPPC_DoubleToFloat32:
1870 ASSEMBLE_FLOAT_UNOP_RC(frsp, 0);
1871 break;
1872 case kPPC_Float32ToDouble:
1873 // Nothing to do.
1874 __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1875 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1876 break;
1877 case kPPC_DoubleExtractLowWord32:
1878 __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1879 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1880 break;
1881 case kPPC_DoubleExtractHighWord32:
1882 __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1883 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1884 break;
1885 case kPPC_DoubleInsertLowWord32:
1886 __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
1887 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1888 break;
1889 case kPPC_DoubleInsertHighWord32:
1890 __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
1891 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1892 break;
1893 case kPPC_DoubleConstruct:
1894 #if V8_TARGET_ARCH_PPC64
1895 __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
1896 i.InputRegister(0), i.InputRegister(1), r0);
1897 #else
1898 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
1899 i.InputRegister(1));
1900 #endif
1901 DCHECK_EQ(LeaveRC, i.OutputRCBit());
1902 break;
1903 case kPPC_BitcastFloat32ToInt32:
1904 __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
1905 break;
1906 case kPPC_BitcastInt32ToFloat32:
1907 __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
1908 break;
1909 #if V8_TARGET_ARCH_PPC64
1910 case kPPC_BitcastDoubleToInt64:
1911 __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
1912 break;
1913 case kPPC_BitcastInt64ToDouble:
1914 __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
1915 break;
1916 #endif
1917 case kPPC_LoadWordU8:
1918 ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
1919 EmitWordLoadPoisoningIfNeeded(this, instr, i);
1920 break;
1921 case kPPC_LoadWordS8:
1922 ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
1923 __ extsb(i.OutputRegister(), i.OutputRegister());
1924 EmitWordLoadPoisoningIfNeeded(this, instr, i);
1925 break;
1926 case kPPC_LoadWordU16:
1927 ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
1928 EmitWordLoadPoisoningIfNeeded(this, instr, i);
1929 break;
1930 case kPPC_LoadWordS16:
1931 ASSEMBLE_LOAD_INTEGER(lha, lhax);
1932 EmitWordLoadPoisoningIfNeeded(this, instr, i);
1933 break;
1934 case kPPC_LoadWordU32:
1935 ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
1936 EmitWordLoadPoisoningIfNeeded(this, instr, i);
1937 break;
1938 case kPPC_LoadWordS32:
1939 ASSEMBLE_LOAD_INTEGER(lwa, lwax);
1940 EmitWordLoadPoisoningIfNeeded(this, instr, i);
1941 break;
1942 #if V8_TARGET_ARCH_PPC64
1943 case kPPC_LoadWord64:
1944 ASSEMBLE_LOAD_INTEGER(ld, ldx);
1945 EmitWordLoadPoisoningIfNeeded(this, instr, i);
1946 break;
1947 #endif
1948 case kPPC_LoadFloat32:
1949 ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
1950 break;
1951 case kPPC_LoadDouble:
1952 ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
1953 break;
1954 case kPPC_StoreWord8:
1955 ASSEMBLE_STORE_INTEGER(stb, stbx);
1956 break;
1957 case kPPC_StoreWord16:
1958 ASSEMBLE_STORE_INTEGER(sth, sthx);
1959 break;
1960 case kPPC_StoreWord32:
1961 ASSEMBLE_STORE_INTEGER(stw, stwx);
1962 break;
1963 #if V8_TARGET_ARCH_PPC64
1964 case kPPC_StoreWord64:
1965 ASSEMBLE_STORE_INTEGER(std, stdx);
1966 break;
1967 #endif
1968 case kPPC_StoreFloat32:
1969 ASSEMBLE_STORE_FLOAT32();
1970 break;
1971 case kPPC_StoreDouble:
1972 ASSEMBLE_STORE_DOUBLE();
1973 break;
1974 case kWord32AtomicLoadInt8:
1975 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
1976 __ extsb(i.OutputRegister(), i.OutputRegister());
1977 break;
1978 case kWord32AtomicLoadUint8:
1979 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
1980 break;
1981 case kWord32AtomicLoadInt16:
1982 ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
1983 break;
1984 case kWord32AtomicLoadUint16:
1985 ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
1986 break;
1987 case kWord32AtomicLoadWord32:
1988 ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
1989 break;
1990
1991 case kWord32AtomicStoreWord8:
1992 ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
1993 break;
1994 case kWord32AtomicStoreWord16:
1995 ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
1996 break;
1997 case kWord32AtomicStoreWord32:
1998 ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
1999 break;
2000 case kWord32AtomicExchangeInt8:
2001 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
2002 __ extsb(i.OutputRegister(0), i.OutputRegister(0));
2003 break;
2004 case kWord32AtomicExchangeUint8:
2005 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
2006 break;
2007 case kWord32AtomicExchangeInt16:
2008 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
2009 __ extsh(i.OutputRegister(0), i.OutputRegister(0));
2010 break;
2011 case kWord32AtomicExchangeUint16:
2012 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
2013 break;
2014 case kWord32AtomicExchangeWord32:
2015 ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lwarx, stwcx);
2016 break;
2017
2018 case kWord32AtomicCompareExchangeInt8:
2019 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lbarx, stbcx, extsb);
2020 break;
2021 case kWord32AtomicCompareExchangeUint8:
2022 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lbarx, stbcx);
2023 break;
2024 case kWord32AtomicCompareExchangeInt16:
2025 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(cmp, lharx, sthcx, extsh);
2026 break;
2027 case kWord32AtomicCompareExchangeUint16:
2028 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmp, lharx, sthcx);
2029 break;
2030 case kWord32AtomicCompareExchangeWord32:
2031 ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(cmpw, lwarx, stwcx);
2032 break;
2033
2034 #define ATOMIC_BINOP_CASE(op, inst) \
2035 case kWord32Atomic##op##Int8: \
2036 ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lbarx, stbcx, extsb); \
2037 break; \
2038 case kWord32Atomic##op##Uint8: \
2039 ASSEMBLE_ATOMIC_BINOP(inst, lbarx, stbcx); \
2040 break; \
2041 case kWord32Atomic##op##Int16: \
2042 ASSEMBLE_ATOMIC_BINOP_SIGN_EXT(inst, lharx, sthcx, extsh); \
2043 break; \
2044 case kWord32Atomic##op##Uint16: \
2045 ASSEMBLE_ATOMIC_BINOP(inst, lharx, sthcx); \
2046 break; \
2047 case kWord32Atomic##op##Word32: \
2048 ASSEMBLE_ATOMIC_BINOP(inst, lwarx, stwcx); \
2049 break;
2050 ATOMIC_BINOP_CASE(Add, add)
2051 ATOMIC_BINOP_CASE(Sub, sub)
2052 ATOMIC_BINOP_CASE(And, and_)
2053 ATOMIC_BINOP_CASE(Or, orx)
2054 ATOMIC_BINOP_CASE(Xor, xor_)
2055 #undef ATOMIC_BINOP_CASE
2056
2057 case kPPC_ByteRev32: {
2058 Register input = i.InputRegister(0);
2059 Register output = i.OutputRegister();
2060 Register temp1 = r0;
2061 __ rotlwi(temp1, input, 8);
2062 __ rlwimi(temp1, input, 24, 0, 7);
2063 __ rlwimi(temp1, input, 24, 16, 23);
2064 __ extsw(output, temp1);
2065 break;
2066 }
2067 #ifdef V8_TARGET_ARCH_PPC64
2068 case kPPC_ByteRev64: {
2069 Register input = i.InputRegister(0);
2070 Register output = i.OutputRegister();
2071 Register temp1 = r0;
2072 Register temp2 = kScratchReg;
2073 Register temp3 = i.TempRegister(0);
2074 __ rldicl(temp1, input, 32, 32);
2075 __ rotlwi(temp2, input, 8);
2076 __ rlwimi(temp2, input, 24, 0, 7);
2077 __ rotlwi(temp3, temp1, 8);
2078 __ rlwimi(temp2, input, 24, 16, 23);
2079 __ rlwimi(temp3, temp1, 24, 0, 7);
2080 __ rlwimi(temp3, temp1, 24, 16, 23);
2081 __ rldicr(temp2, temp2, 32, 31);
2082 __ orx(output, temp2, temp3);
2083 break;
2084 }
2085 #endif // V8_TARGET_ARCH_PPC64
2086 default:
2087 UNREACHABLE();
2088 break;
2089 }
2090 return kSuccess;
2091 } // NOLINT(readability/fn_size)
2092
2093
2094 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)2095 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
2096 PPCOperandConverter i(this, instr);
2097 Label* tlabel = branch->true_label;
2098 Label* flabel = branch->false_label;
2099 ArchOpcode op = instr->arch_opcode();
2100 FlagsCondition condition = branch->condition;
2101 CRegister cr = cr0;
2102
2103 Condition cond = FlagsConditionToCondition(condition, op);
2104 if (op == kPPC_CmpDouble) {
2105 // check for unordered if necessary
2106 if (cond == le) {
2107 __ bunordered(flabel, cr);
2108 // Unnecessary for eq/lt since only FU bit will be set.
2109 } else if (cond == gt) {
2110 __ bunordered(tlabel, cr);
2111 // Unnecessary for ne/ge since only FU bit will be set.
2112 }
2113 }
2114 __ b(cond, tlabel, cr);
2115 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
2116 }
2117
AssembleBranchPoisoning(FlagsCondition condition,Instruction * instr)2118 void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
2119 Instruction* instr) {
2120 // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
2121 if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
2122 return;
2123 }
2124
2125 ArchOpcode op = instr->arch_opcode();
2126 condition = NegateFlagsCondition(condition);
2127 __ li(kScratchReg, Operand::Zero());
2128 __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
2129 kScratchReg, kSpeculationPoisonRegister, cr0);
2130 }
2131
AssembleArchDeoptBranch(Instruction * instr,BranchInfo * branch)2132 void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
2133 BranchInfo* branch) {
2134 AssembleArchBranch(instr, branch);
2135 }
2136
AssembleArchJump(RpoNumber target)2137 void CodeGenerator::AssembleArchJump(RpoNumber target) {
2138 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
2139 }
2140
AssembleArchTrap(Instruction * instr,FlagsCondition condition)2141 void CodeGenerator::AssembleArchTrap(Instruction* instr,
2142 FlagsCondition condition) {
2143 class OutOfLineTrap final : public OutOfLineCode {
2144 public:
2145 OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
2146 : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
2147
2148 void Generate() final {
2149 PPCOperandConverter i(gen_, instr_);
2150 TrapId trap_id =
2151 static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
2152 GenerateCallToTrap(trap_id);
2153 }
2154
2155 private:
2156 void GenerateCallToTrap(TrapId trap_id) {
2157 if (trap_id == TrapId::kInvalid) {
2158 // We cannot test calls to the runtime in cctest/test-run-wasm.
2159 // Therefore we emit a call to C here instead of a call to the runtime.
2160 // We use the context register as the scratch register, because we do
2161 // not have a context here.
2162 __ PrepareCallCFunction(0, 0, cp);
2163 __ CallCFunction(
2164 ExternalReference::wasm_call_trap_callback_for_testing(), 0);
2165 __ LeaveFrame(StackFrame::WASM_COMPILED);
2166 auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
2167 int pop_count =
2168 static_cast<int>(call_descriptor->StackParameterCount());
2169 __ Drop(pop_count);
2170 __ Ret();
2171 } else {
2172 gen_->AssembleSourcePosition(instr_);
2173 // A direct call to a wasm runtime stub defined in this module.
2174 // Just encode the stub index. This will be patched at relocation.
2175 __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
2176 ReferenceMap* reference_map =
2177 new (gen_->zone()) ReferenceMap(gen_->zone());
2178 gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
2179 Safepoint::kNoLazyDeopt);
2180 if (FLAG_debug_code) {
2181 __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromWasmTrap));
2182 }
2183 }
2184 }
2185
2186 Instruction* instr_;
2187 CodeGenerator* gen_;
2188 };
2189 auto ool = new (zone()) OutOfLineTrap(this, instr);
2190 Label* tlabel = ool->entry();
2191 Label end;
2192
2193 ArchOpcode op = instr->arch_opcode();
2194 CRegister cr = cr0;
2195 Condition cond = FlagsConditionToCondition(condition, op);
2196 if (op == kPPC_CmpDouble) {
2197 // check for unordered if necessary
2198 if (cond == le) {
2199 __ bunordered(&end, cr);
2200 // Unnecessary for eq/lt since only FU bit will be set.
2201 } else if (cond == gt) {
2202 __ bunordered(tlabel, cr);
2203 // Unnecessary for ne/ge since only FU bit will be set.
2204 }
2205 }
2206 __ b(cond, tlabel, cr);
2207 __ bind(&end);
2208 }
2209
2210 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)2211 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
2212 FlagsCondition condition) {
2213 PPCOperandConverter i(this, instr);
2214 Label done;
2215 ArchOpcode op = instr->arch_opcode();
2216 CRegister cr = cr0;
2217 int reg_value = -1;
2218
2219 // Materialize a full 32-bit 1 or 0 value. The result register is always the
2220 // last output of the instruction.
2221 DCHECK_NE(0u, instr->OutputCount());
2222 Register reg = i.OutputRegister(instr->OutputCount() - 1);
2223
2224 Condition cond = FlagsConditionToCondition(condition, op);
2225 if (op == kPPC_CmpDouble) {
2226 // check for unordered if necessary
2227 if (cond == le) {
2228 reg_value = 0;
2229 __ li(reg, Operand::Zero());
2230 __ bunordered(&done, cr);
2231 } else if (cond == gt) {
2232 reg_value = 1;
2233 __ li(reg, Operand(1));
2234 __ bunordered(&done, cr);
2235 }
2236 // Unnecessary for eq/lt & ne/ge since only FU bit will be set.
2237 }
2238
2239 if (CpuFeatures::IsSupported(ISELECT)) {
2240 switch (cond) {
2241 case eq:
2242 case lt:
2243 case gt:
2244 if (reg_value != 1) __ li(reg, Operand(1));
2245 __ li(kScratchReg, Operand::Zero());
2246 __ isel(cond, reg, reg, kScratchReg, cr);
2247 break;
2248 case ne:
2249 case ge:
2250 case le:
2251 if (reg_value != 1) __ li(reg, Operand(1));
2252 // r0 implies logical zero in this form
2253 __ isel(NegateCondition(cond), reg, r0, reg, cr);
2254 break;
2255 default:
2256 UNREACHABLE();
2257 break;
2258 }
2259 } else {
2260 if (reg_value != 0) __ li(reg, Operand::Zero());
2261 __ b(NegateCondition(cond), &done, cr);
2262 __ li(reg, Operand(1));
2263 }
2264 __ bind(&done);
2265 }
2266
AssembleArchBinarySearchSwitch(Instruction * instr)2267 void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
2268 PPCOperandConverter i(this, instr);
2269 Register input = i.InputRegister(0);
2270 std::vector<std::pair<int32_t, Label*>> cases;
2271 for (size_t index = 2; index < instr->InputCount(); index += 2) {
2272 cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
2273 }
2274 AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
2275 cases.data() + cases.size());
2276 }
2277
AssembleArchLookupSwitch(Instruction * instr)2278 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2279 PPCOperandConverter i(this, instr);
2280 Register input = i.InputRegister(0);
2281 for (size_t index = 2; index < instr->InputCount(); index += 2) {
2282 __ Cmpwi(input, Operand(i.InputInt32(index + 0)), r0);
2283 __ beq(GetLabel(i.InputRpo(index + 1)));
2284 }
2285 AssembleArchJump(i.InputRpo(1));
2286 }
2287
AssembleArchTableSwitch(Instruction * instr)2288 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2289 PPCOperandConverter i(this, instr);
2290 Register input = i.InputRegister(0);
2291 int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
2292 Label** cases = zone()->NewArray<Label*>(case_count);
2293 for (int32_t index = 0; index < case_count; ++index) {
2294 cases[index] = GetLabel(i.InputRpo(index + 2));
2295 }
2296 Label* const table = AddJumpTable(cases, case_count);
2297 __ Cmpli(input, Operand(case_count), r0);
2298 __ bge(GetLabel(i.InputRpo(1)));
2299 __ mov_label_addr(kScratchReg, table);
2300 __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
2301 __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
2302 __ Jump(kScratchReg);
2303 }
2304
FinishFrame(Frame * frame)2305 void CodeGenerator::FinishFrame(Frame* frame) {
2306 auto call_descriptor = linkage()->GetIncomingDescriptor();
2307 const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
2308
2309 // Save callee-saved Double registers.
2310 if (double_saves != 0) {
2311 frame->AlignSavedCalleeRegisterSlots();
2312 DCHECK_EQ(kNumCalleeSavedDoubles,
2313 base::bits::CountPopulation(double_saves));
2314 frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
2315 (kDoubleSize / kPointerSize));
2316 }
2317 // Save callee-saved registers.
2318 const RegList saves = FLAG_enable_embedded_constant_pool
2319 ? call_descriptor->CalleeSavedRegisters() &
2320 ~kConstantPoolRegister.bit()
2321 : call_descriptor->CalleeSavedRegisters();
2322 if (saves != 0) {
2323 // register save area does not include the fp or constant pool pointer.
2324 const int num_saves =
2325 kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
2326 DCHECK(num_saves == base::bits::CountPopulation(saves));
2327 frame->AllocateSavedCalleeRegisterSlots(num_saves);
2328 }
2329 }
2330
AssembleConstructFrame()2331 void CodeGenerator::AssembleConstructFrame() {
2332 auto call_descriptor = linkage()->GetIncomingDescriptor();
2333 if (frame_access_state()->has_frame()) {
2334 if (call_descriptor->IsCFunctionCall()) {
2335 __ function_descriptor();
2336 __ mflr(r0);
2337 if (FLAG_enable_embedded_constant_pool) {
2338 __ Push(r0, fp, kConstantPoolRegister);
2339 // Adjust FP to point to saved FP.
2340 __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
2341 } else {
2342 __ Push(r0, fp);
2343 __ mr(fp, sp);
2344 }
2345 } else if (call_descriptor->IsJSFunctionCall()) {
2346 __ Prologue();
2347 if (call_descriptor->PushArgumentCount()) {
2348 __ Push(kJavaScriptCallArgCountRegister);
2349 }
2350 } else {
2351 StackFrame::Type type = info()->GetOutputStackFrameType();
2352 // TODO(mbrandy): Detect cases where ip is the entrypoint (for
2353 // efficient intialization of the constant pool pointer register).
2354 __ StubPrologue(type);
2355 if (call_descriptor->IsWasmFunctionCall()) {
2356 __ Push(kWasmInstanceRegister);
2357 }
2358 }
2359 }
2360
2361 int shrink_slots = frame()->GetTotalFrameSlotCount() -
2362 call_descriptor->CalculateFixedFrameSize();
2363 if (info()->is_osr()) {
2364 // TurboFan OSR-compiled functions cannot be entered directly.
2365 __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
2366
2367 // Unoptimized code jumps directly to this entrypoint while the unoptimized
2368 // frame is still on the stack. Optimized code uses OSR values directly from
2369 // the unoptimized frame. Thus, all that needs to be done is to allocate the
2370 // remaining stack slots.
2371 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
2372 osr_pc_offset_ = __ pc_offset();
2373 shrink_slots -= osr_helper()->UnoptimizedFrameSlots();
2374 ResetSpeculationPoison();
2375 }
2376
2377 const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
2378 const RegList saves = FLAG_enable_embedded_constant_pool
2379 ? call_descriptor->CalleeSavedRegisters() &
2380 ~kConstantPoolRegister.bit()
2381 : call_descriptor->CalleeSavedRegisters();
2382
2383 if (shrink_slots > 0) {
2384 if (info()->IsWasm() && shrink_slots > 128) {
2385 // For WebAssembly functions with big frames we have to do the stack
2386 // overflow check before we construct the frame. Otherwise we may not
2387 // have enough space on the stack to call the runtime for the stack
2388 // overflow.
2389 Label done;
2390
2391 // If the frame is bigger than the stack, we throw the stack overflow
2392 // exception unconditionally. Thereby we can avoid the integer overflow
2393 // check in the condition code.
2394 if ((shrink_slots * kPointerSize) < (FLAG_stack_size * 1024)) {
2395 Register scratch = ip;
2396 __ LoadP(scratch, FieldMemOperand(
2397 kWasmInstanceRegister,
2398 WasmInstanceObject::kRealStackLimitAddressOffset));
2399 __ LoadP(scratch, MemOperand(scratch), r0);
2400 __ Add(scratch, scratch, shrink_slots * kPointerSize, r0);
2401 __ cmpl(sp, scratch);
2402 __ bge(&done);
2403 }
2404
2405 __ LoadP(r5,
2406 FieldMemOperand(kWasmInstanceRegister,
2407 WasmInstanceObject::kCEntryStubOffset),
2408 r0);
2409 __ Move(cp, Smi::kZero);
2410 __ CallRuntimeWithCEntry(Runtime::kThrowWasmStackOverflow, r5);
2411 // We come from WebAssembly, there are no references for the GC.
2412 ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
2413 RecordSafepoint(reference_map, Safepoint::kSimple, 0,
2414 Safepoint::kNoLazyDeopt);
2415 if (FLAG_debug_code) {
2416 __ stop(GetAbortReason(AbortReason::kUnexpectedReturnFromThrow));
2417 }
2418
2419 __ bind(&done);
2420 }
2421
2422 // Skip callee-saved and return slots, which are pushed below.
2423 shrink_slots -= base::bits::CountPopulation(saves);
2424 shrink_slots -= frame()->GetReturnSlotCount();
2425 shrink_slots -=
2426 (kDoubleSize / kPointerSize) * base::bits::CountPopulation(saves_fp);
2427 __ Add(sp, sp, -shrink_slots * kPointerSize, r0);
2428 }
2429
2430 // Save callee-saved Double registers.
2431 if (saves_fp != 0) {
2432 __ MultiPushDoubles(saves_fp);
2433 DCHECK_EQ(kNumCalleeSavedDoubles, base::bits::CountPopulation(saves_fp));
2434 }
2435
2436 // Save callee-saved registers.
2437 if (saves != 0) {
2438 __ MultiPush(saves);
2439 // register save area does not include the fp or constant pool pointer.
2440 }
2441
2442 const int returns = frame()->GetReturnSlotCount();
2443 if (returns != 0) {
2444 // Create space for returns.
2445 __ Add(sp, sp, -returns * kPointerSize, r0);
2446 }
2447 }
2448
AssembleReturn(InstructionOperand * pop)2449 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
2450 auto call_descriptor = linkage()->GetIncomingDescriptor();
2451 int pop_count = static_cast<int>(call_descriptor->StackParameterCount());
2452
2453 const int returns = frame()->GetReturnSlotCount();
2454 if (returns != 0) {
2455 // Create space for returns.
2456 __ Add(sp, sp, returns * kPointerSize, r0);
2457 }
2458
2459 // Restore registers.
2460 const RegList saves = FLAG_enable_embedded_constant_pool
2461 ? call_descriptor->CalleeSavedRegisters() &
2462 ~kConstantPoolRegister.bit()
2463 : call_descriptor->CalleeSavedRegisters();
2464 if (saves != 0) {
2465 __ MultiPop(saves);
2466 }
2467
2468 // Restore double registers.
2469 const RegList double_saves = call_descriptor->CalleeSavedFPRegisters();
2470 if (double_saves != 0) {
2471 __ MultiPopDoubles(double_saves);
2472 }
2473 PPCOperandConverter g(this, nullptr);
2474
2475 if (call_descriptor->IsCFunctionCall()) {
2476 AssembleDeconstructFrame();
2477 } else if (frame_access_state()->has_frame()) {
2478 // Canonicalize JSFunction return sites for now unless they have an variable
2479 // number of stack slot pops
2480 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2481 if (return_label_.is_bound()) {
2482 __ b(&return_label_);
2483 return;
2484 } else {
2485 __ bind(&return_label_);
2486 AssembleDeconstructFrame();
2487 }
2488 } else {
2489 AssembleDeconstructFrame();
2490 }
2491 }
2492 // Constant pool is unavailable since the frame has been destructed
2493 ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
2494 if (pop->IsImmediate()) {
2495 DCHECK(Constant::kInt32 == g.ToConstant(pop).type() ||
2496 Constant::kInt64 == g.ToConstant(pop).type());
2497 pop_count += g.ToConstant(pop).ToInt32();
2498 } else {
2499 __ Drop(g.ToRegister(pop));
2500 }
2501 __ Drop(pop_count);
2502 __ Ret();
2503 }
2504
FinishCode()2505 void CodeGenerator::FinishCode() { __ EmitConstantPool(); }
2506
AssembleMove(InstructionOperand * source,InstructionOperand * destination)2507 void CodeGenerator::AssembleMove(InstructionOperand* source,
2508 InstructionOperand* destination) {
2509 PPCOperandConverter g(this, nullptr);
2510 // Dispatch on the source and destination operand kinds. Not all
2511 // combinations are possible.
2512 if (source->IsRegister()) {
2513 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2514 Register src = g.ToRegister(source);
2515 if (destination->IsRegister()) {
2516 __ Move(g.ToRegister(destination), src);
2517 } else {
2518 __ StoreP(src, g.ToMemOperand(destination), r0);
2519 }
2520 } else if (source->IsStackSlot()) {
2521 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2522 MemOperand src = g.ToMemOperand(source);
2523 if (destination->IsRegister()) {
2524 __ LoadP(g.ToRegister(destination), src, r0);
2525 } else {
2526 Register temp = kScratchReg;
2527 __ LoadP(temp, src, r0);
2528 __ StoreP(temp, g.ToMemOperand(destination), r0);
2529 }
2530 } else if (source->IsConstant()) {
2531 Constant src = g.ToConstant(source);
2532 if (destination->IsRegister() || destination->IsStackSlot()) {
2533 Register dst =
2534 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
2535 switch (src.type()) {
2536 case Constant::kInt32:
2537 #if V8_TARGET_ARCH_PPC64
2538 if (false) {
2539 #else
2540 if (RelocInfo::IsWasmReference(src.rmode())) {
2541 #endif
2542 __ mov(dst, Operand(src.ToInt32(), src.rmode()));
2543 } else {
2544 __ mov(dst, Operand(src.ToInt32()));
2545 }
2546 break;
2547 case Constant::kInt64:
2548 #if V8_TARGET_ARCH_PPC64
2549 if (RelocInfo::IsWasmPtrReference(src.rmode())) {
2550 __ mov(dst, Operand(src.ToInt64(), src.rmode()));
2551 } else {
2552 #endif
2553 __ mov(dst, Operand(src.ToInt64()));
2554 #if V8_TARGET_ARCH_PPC64
2555 }
2556 #endif
2557 break;
2558 case Constant::kFloat32:
2559 __ mov(dst, Operand::EmbeddedNumber(src.ToFloat32()));
2560 break;
2561 case Constant::kFloat64:
2562 __ mov(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
2563 break;
2564 case Constant::kExternalReference:
2565 __ Move(dst, src.ToExternalReference());
2566 break;
2567 case Constant::kHeapObject: {
2568 Handle<HeapObject> src_object = src.ToHeapObject();
2569 Heap::RootListIndex index;
2570 if (IsMaterializableFromRoot(src_object, &index)) {
2571 __ LoadRoot(dst, index);
2572 } else {
2573 __ Move(dst, src_object);
2574 }
2575 break;
2576 }
2577 case Constant::kRpoNumber:
2578 UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
2579 break;
2580 }
2581 if (destination->IsStackSlot()) {
2582 __ StoreP(dst, g.ToMemOperand(destination), r0);
2583 }
2584 } else {
2585 DoubleRegister dst = destination->IsFPRegister()
2586 ? g.ToDoubleRegister(destination)
2587 : kScratchDoubleReg;
2588 Double value;
2589 #if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
2590 // casting double precision snan to single precision
2591 // converts it to qnan on ia32/x64
2592 if (src.type() == Constant::kFloat32) {
2593 uint32_t val = src.ToFloat32AsInt();
2594 if ((val & 0x7F800000) == 0x7F800000) {
2595 uint64_t dval = static_cast<uint64_t>(val);
2596 dval = ((dval & 0xC0000000) << 32) | ((dval & 0x40000000) << 31) |
2597 ((dval & 0x40000000) << 30) | ((dval & 0x7FFFFFFF) << 29);
2598 value = Double(dval);
2599 } else {
2600 value = Double(static_cast<double>(src.ToFloat32()));
2601 }
2602 } else {
2603 value = Double(src.ToFloat64());
2604 }
2605 #else
2606 value = src.type() == Constant::kFloat32
2607 ? Double(static_cast<double>(src.ToFloat32()))
2608 : Double(src.ToFloat64());
2609 #endif
2610 __ LoadDoubleLiteral(dst, value, kScratchReg);
2611 if (destination->IsDoubleStackSlot()) {
2612 __ StoreDouble(dst, g.ToMemOperand(destination), r0);
2613 } else if (destination->IsFloatStackSlot()) {
2614 __ StoreSingle(dst, g.ToMemOperand(destination), r0);
2615 }
2616 }
2617 } else if (source->IsFPRegister()) {
2618 DoubleRegister src = g.ToDoubleRegister(source);
2619 if (destination->IsFPRegister()) {
2620 DoubleRegister dst = g.ToDoubleRegister(destination);
2621 __ Move(dst, src);
2622 } else {
2623 DCHECK(destination->IsFPStackSlot());
2624 LocationOperand* op = LocationOperand::cast(source);
2625 if (op->representation() == MachineRepresentation::kFloat64) {
2626 __ StoreDouble(src, g.ToMemOperand(destination), r0);
2627 } else {
2628 __ StoreSingle(src, g.ToMemOperand(destination), r0);
2629 }
2630 }
2631 } else if (source->IsFPStackSlot()) {
2632 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2633 MemOperand src = g.ToMemOperand(source);
2634 if (destination->IsFPRegister()) {
2635 LocationOperand* op = LocationOperand::cast(source);
2636 if (op->representation() == MachineRepresentation::kFloat64) {
2637 __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
2638 } else {
2639 __ LoadSingle(g.ToDoubleRegister(destination), src, r0);
2640 }
2641 } else {
2642 LocationOperand* op = LocationOperand::cast(source);
2643 DoubleRegister temp = kScratchDoubleReg;
2644 if (op->representation() == MachineRepresentation::kFloat64) {
2645 __ LoadDouble(temp, src, r0);
2646 __ StoreDouble(temp, g.ToMemOperand(destination), r0);
2647 } else {
2648 __ LoadSingle(temp, src, r0);
2649 __ StoreSingle(temp, g.ToMemOperand(destination), r0);
2650 }
2651 }
2652 } else {
2653 UNREACHABLE();
2654 }
2655 }
2656
2657 // Swaping contents in source and destination.
2658 // source and destination could be:
2659 // Register,
2660 // FloatRegister,
2661 // DoubleRegister,
2662 // StackSlot,
2663 // FloatStackSlot,
2664 // or DoubleStackSlot
2665 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2666 InstructionOperand* destination) {
2667 PPCOperandConverter g(this, nullptr);
2668 if (source->IsRegister()) {
2669 Register src = g.ToRegister(source);
2670 if (destination->IsRegister()) {
2671 __ SwapP(src, g.ToRegister(destination), kScratchReg);
2672 } else {
2673 DCHECK(destination->IsStackSlot());
2674 __ SwapP(src, g.ToMemOperand(destination), kScratchReg);
2675 }
2676 } else if (source->IsStackSlot()) {
2677 DCHECK(destination->IsStackSlot());
2678 __ SwapP(g.ToMemOperand(source), g.ToMemOperand(destination), kScratchReg,
2679 r0);
2680 } else if (source->IsFloatRegister()) {
2681 DoubleRegister src = g.ToDoubleRegister(source);
2682 if (destination->IsFloatRegister()) {
2683 __ SwapFloat32(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
2684 } else {
2685 DCHECK(destination->IsFloatStackSlot());
2686 __ SwapFloat32(src, g.ToMemOperand(destination), kScratchDoubleReg);
2687 }
2688 } else if (source->IsDoubleRegister()) {
2689 DoubleRegister src = g.ToDoubleRegister(source);
2690 if (destination->IsDoubleRegister()) {
2691 __ SwapDouble(src, g.ToDoubleRegister(destination), kScratchDoubleReg);
2692 } else {
2693 DCHECK(destination->IsDoubleStackSlot());
2694 __ SwapDouble(src, g.ToMemOperand(destination), kScratchDoubleReg);
2695 }
2696 } else if (source->IsFloatStackSlot()) {
2697 DCHECK(destination->IsFloatStackSlot());
2698 __ SwapFloat32(g.ToMemOperand(source), g.ToMemOperand(destination),
2699 kScratchDoubleReg, d0);
2700 } else if (source->IsDoubleStackSlot()) {
2701 DCHECK(destination->IsDoubleStackSlot());
2702 __ SwapDouble(g.ToMemOperand(source), g.ToMemOperand(destination),
2703 kScratchDoubleReg, d0);
2704 } else if (source->IsSimd128Register()) {
2705 UNREACHABLE();
2706 } else {
2707 UNREACHABLE();
2708 }
2709
2710 return;
2711 }
2712
2713
2714 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2715 for (size_t index = 0; index < target_count; ++index) {
2716 __ emit_label_addr(targets[index]);
2717 }
2718 }
2719
2720
2721 #undef __
2722
2723 } // namespace compiler
2724 } // namespace internal
2725 } // namespace v8
2726