1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6 #include "src/compilation-info.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/osr.h"
11 #include "src/mips/macro-assembler-mips.h"
12
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16
17 #define __ masm()->
18
19
20 // TODO(plind): Possibly avoid using these lithium names.
21 #define kScratchReg kLithiumScratchReg
22 #define kScratchReg2 kLithiumScratchReg2
23 #define kScratchDoubleReg kLithiumScratchDouble
24
25
26 // TODO(plind): consider renaming these macros.
27 #define TRACE_MSG(msg) \
28 PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
29 __LINE__)
30
31 #define TRACE_UNIMPL() \
32 PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
33 __LINE__)
34
35
36 // Adds Mips-specific methods to convert InstructionOperands.
37 class MipsOperandConverter final : public InstructionOperandConverter {
38 public:
MipsOperandConverter(CodeGenerator * gen,Instruction * instr)39 MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
40 : InstructionOperandConverter(gen, instr) {}
41
OutputSingleRegister(size_t index=0)42 FloatRegister OutputSingleRegister(size_t index = 0) {
43 return ToSingleRegister(instr_->OutputAt(index));
44 }
45
InputSingleRegister(size_t index)46 FloatRegister InputSingleRegister(size_t index) {
47 return ToSingleRegister(instr_->InputAt(index));
48 }
49
ToSingleRegister(InstructionOperand * op)50 FloatRegister ToSingleRegister(InstructionOperand* op) {
51 // Single (Float) and Double register namespace is same on MIPS,
52 // both are typedefs of FPURegister.
53 return ToDoubleRegister(op);
54 }
55
InputOrZeroRegister(size_t index)56 Register InputOrZeroRegister(size_t index) {
57 if (instr_->InputAt(index)->IsImmediate()) {
58 DCHECK((InputInt32(index) == 0));
59 return zero_reg;
60 }
61 return InputRegister(index);
62 }
63
InputOrZeroDoubleRegister(size_t index)64 DoubleRegister InputOrZeroDoubleRegister(size_t index) {
65 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
66
67 return InputDoubleRegister(index);
68 }
69
InputOrZeroSingleRegister(size_t index)70 DoubleRegister InputOrZeroSingleRegister(size_t index) {
71 if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
72
73 return InputSingleRegister(index);
74 }
75
InputImmediate(size_t index)76 Operand InputImmediate(size_t index) {
77 Constant constant = ToConstant(instr_->InputAt(index));
78 switch (constant.type()) {
79 case Constant::kInt32:
80 return Operand(constant.ToInt32());
81 case Constant::kInt64:
82 return Operand(constant.ToInt64());
83 case Constant::kFloat32:
84 return Operand(
85 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
86 case Constant::kFloat64:
87 return Operand(
88 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
89 case Constant::kExternalReference:
90 case Constant::kHeapObject:
91 // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
92 // maybe not done on arm due to const pool ??
93 break;
94 case Constant::kRpoNumber:
95 UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
96 break;
97 }
98 UNREACHABLE();
99 return Operand(zero_reg);
100 }
101
InputOperand(size_t index)102 Operand InputOperand(size_t index) {
103 InstructionOperand* op = instr_->InputAt(index);
104 if (op->IsRegister()) {
105 return Operand(ToRegister(op));
106 }
107 return InputImmediate(index);
108 }
109
MemoryOperand(size_t * first_index)110 MemOperand MemoryOperand(size_t* first_index) {
111 const size_t index = *first_index;
112 switch (AddressingModeField::decode(instr_->opcode())) {
113 case kMode_None:
114 break;
115 case kMode_MRI:
116 *first_index += 2;
117 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
118 case kMode_MRR:
119 // TODO(plind): r6 address mode, to be implemented ...
120 UNREACHABLE();
121 }
122 UNREACHABLE();
123 return MemOperand(no_reg);
124 }
125
MemoryOperand(size_t index=0)126 MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
127
ToMemOperand(InstructionOperand * op) const128 MemOperand ToMemOperand(InstructionOperand* op) const {
129 DCHECK_NOT_NULL(op);
130 DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
131 return SlotToMemOperand(AllocatedOperand::cast(op)->index());
132 }
133
SlotToMemOperand(int slot) const134 MemOperand SlotToMemOperand(int slot) const {
135 FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
136 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
137 }
138 };
139
140
HasRegisterInput(Instruction * instr,size_t index)141 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
142 return instr->InputAt(index)->IsRegister();
143 }
144
145
146 namespace {
147
148 class OutOfLineLoadSingle final : public OutOfLineCode {
149 public:
OutOfLineLoadSingle(CodeGenerator * gen,FloatRegister result)150 OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
151 : OutOfLineCode(gen), result_(result) {}
152
Generate()153 void Generate() final {
154 __ Move(result_, std::numeric_limits<float>::quiet_NaN());
155 }
156
157 private:
158 FloatRegister const result_;
159 };
160
161
162 class OutOfLineLoadDouble final : public OutOfLineCode {
163 public:
OutOfLineLoadDouble(CodeGenerator * gen,DoubleRegister result)164 OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
165 : OutOfLineCode(gen), result_(result) {}
166
Generate()167 void Generate() final {
168 __ Move(result_, std::numeric_limits<double>::quiet_NaN());
169 }
170
171 private:
172 DoubleRegister const result_;
173 };
174
175
176 class OutOfLineLoadInteger final : public OutOfLineCode {
177 public:
OutOfLineLoadInteger(CodeGenerator * gen,Register result)178 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
179 : OutOfLineCode(gen), result_(result) {}
180
Generate()181 void Generate() final { __ mov(result_, zero_reg); }
182
183 private:
184 Register const result_;
185 };
186
187
188 class OutOfLineRound : public OutOfLineCode {
189 public:
OutOfLineRound(CodeGenerator * gen,DoubleRegister result)190 OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
191 : OutOfLineCode(gen), result_(result) {}
192
Generate()193 void Generate() final {
194 // Handle rounding to zero case where sign has to be preserved.
195 // High bits of double input already in kScratchReg.
196 __ dsrl(at, kScratchReg, 31);
197 __ dsll(at, at, 31);
198 __ mthc1(at, result_);
199 }
200
201 private:
202 DoubleRegister const result_;
203 };
204
205
206 class OutOfLineRound32 : public OutOfLineCode {
207 public:
OutOfLineRound32(CodeGenerator * gen,DoubleRegister result)208 OutOfLineRound32(CodeGenerator* gen, DoubleRegister result)
209 : OutOfLineCode(gen), result_(result) {}
210
Generate()211 void Generate() final {
212 // Handle rounding to zero case where sign has to be preserved.
213 // High bits of float input already in kScratchReg.
214 __ srl(at, kScratchReg, 31);
215 __ sll(at, at, 31);
216 __ mtc1(at, result_);
217 }
218
219 private:
220 DoubleRegister const result_;
221 };
222
223
224 class OutOfLineRecordWrite final : public OutOfLineCode {
225 public:
OutOfLineRecordWrite(CodeGenerator * gen,Register object,Register index,Register value,Register scratch0,Register scratch1,RecordWriteMode mode)226 OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index,
227 Register value, Register scratch0, Register scratch1,
228 RecordWriteMode mode)
229 : OutOfLineCode(gen),
230 object_(object),
231 index_(index),
232 value_(value),
233 scratch0_(scratch0),
234 scratch1_(scratch1),
235 mode_(mode),
236 must_save_lr_(!gen->frame_access_state()->has_frame()) {}
237
Generate()238 void Generate() final {
239 if (mode_ > RecordWriteMode::kValueIsPointer) {
240 __ JumpIfSmi(value_, exit());
241 }
242 __ CheckPageFlag(value_, scratch0_,
243 MemoryChunk::kPointersToHereAreInterestingMask, eq,
244 exit());
245 RememberedSetAction const remembered_set_action =
246 mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
247 : OMIT_REMEMBERED_SET;
248 SaveFPRegsMode const save_fp_mode =
249 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
250 if (must_save_lr_) {
251 // We need to save and restore ra if the frame was elided.
252 __ Push(ra);
253 }
254 RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
255 remembered_set_action, save_fp_mode);
256 __ Daddu(scratch1_, object_, index_);
257 __ CallStub(&stub);
258 if (must_save_lr_) {
259 __ Pop(ra);
260 }
261 }
262
263 private:
264 Register const object_;
265 Register const index_;
266 Register const value_;
267 Register const scratch0_;
268 Register const scratch1_;
269 RecordWriteMode const mode_;
270 bool must_save_lr_;
271 };
272
273 #define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
274 class ool_name final : public OutOfLineCode { \
275 public: \
276 ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
277 : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
278 \
279 void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
280 \
281 private: \
282 T const dst_; \
283 T const src1_; \
284 T const src2_; \
285 }
286
287 CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
288 CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
289 CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
290 CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
291
292 #undef CREATE_OOL_CLASS
293
FlagsConditionToConditionCmp(FlagsCondition condition)294 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
295 switch (condition) {
296 case kEqual:
297 return eq;
298 case kNotEqual:
299 return ne;
300 case kSignedLessThan:
301 return lt;
302 case kSignedGreaterThanOrEqual:
303 return ge;
304 case kSignedLessThanOrEqual:
305 return le;
306 case kSignedGreaterThan:
307 return gt;
308 case kUnsignedLessThan:
309 return lo;
310 case kUnsignedGreaterThanOrEqual:
311 return hs;
312 case kUnsignedLessThanOrEqual:
313 return ls;
314 case kUnsignedGreaterThan:
315 return hi;
316 case kUnorderedEqual:
317 case kUnorderedNotEqual:
318 break;
319 default:
320 break;
321 }
322 UNREACHABLE();
323 return kNoCondition;
324 }
325
326
FlagsConditionToConditionTst(FlagsCondition condition)327 Condition FlagsConditionToConditionTst(FlagsCondition condition) {
328 switch (condition) {
329 case kNotEqual:
330 return ne;
331 case kEqual:
332 return eq;
333 default:
334 break;
335 }
336 UNREACHABLE();
337 return kNoCondition;
338 }
339
340
FlagsConditionToConditionOvf(FlagsCondition condition)341 Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
342 switch (condition) {
343 case kOverflow:
344 return ne;
345 case kNotOverflow:
346 return eq;
347 default:
348 break;
349 }
350 UNREACHABLE();
351 return kNoCondition;
352 }
353
354
FlagsConditionToConditionCmpFPU(bool & predicate,FlagsCondition condition)355 FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
356 FlagsCondition condition) {
357 switch (condition) {
358 case kEqual:
359 predicate = true;
360 return EQ;
361 case kNotEqual:
362 predicate = false;
363 return EQ;
364 case kUnsignedLessThan:
365 predicate = true;
366 return OLT;
367 case kUnsignedGreaterThanOrEqual:
368 predicate = false;
369 return ULT;
370 case kUnsignedLessThanOrEqual:
371 predicate = true;
372 return OLE;
373 case kUnsignedGreaterThan:
374 predicate = false;
375 return ULE;
376 case kUnorderedEqual:
377 case kUnorderedNotEqual:
378 predicate = true;
379 break;
380 default:
381 predicate = true;
382 break;
383 }
384 UNREACHABLE();
385 return kNoFPUCondition;
386 }
387
388 } // namespace
389 #define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
390 do { \
391 if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
392 __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
393 __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
394 Operand(zero_reg)); \
395 } else { \
396 __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
397 } \
398 } while (0)
399
400 #define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
401 do { \
402 if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
403 __ Or(kScratchReg, zero_reg, Operand(offset)); \
404 __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
405 __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
406 } else { \
407 __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
408 } \
409 } while (0)
410
411 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
412 do { \
413 auto result = i.Output##width##Register(); \
414 auto ool = new (zone()) OutOfLineLoad##width(this, result); \
415 if (instr->InputAt(0)->IsRegister()) { \
416 auto offset = i.InputRegister(0); \
417 ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
418 __ And(kScratchReg, offset, Operand(0xffffffff)); \
419 __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
420 __ asm_instr(result, MemOperand(kScratchReg, 0)); \
421 } else { \
422 int offset = static_cast<int>(i.InputOperand(0).immediate()); \
423 ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
424 ool->entry()); \
425 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
426 } \
427 __ bind(ool->exit()); \
428 } while (0)
429
430 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
431 do { \
432 auto result = i.OutputRegister(); \
433 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
434 if (instr->InputAt(0)->IsRegister()) { \
435 auto offset = i.InputRegister(0); \
436 ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
437 __ And(kScratchReg, offset, Operand(0xffffffff)); \
438 __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
439 __ asm_instr(result, MemOperand(kScratchReg, 0)); \
440 } else { \
441 int offset = static_cast<int>(i.InputOperand(0).immediate()); \
442 ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
443 ool->entry()); \
444 __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
445 } \
446 __ bind(ool->exit()); \
447 } while (0)
448
449 #define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
450 do { \
451 Label done; \
452 if (instr->InputAt(0)->IsRegister()) { \
453 auto offset = i.InputRegister(0); \
454 auto value = i.InputOrZero##width##Register(2); \
455 if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
456 __ Move(kDoubleRegZero, 0.0); \
457 } \
458 ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
459 __ And(kScratchReg, offset, Operand(0xffffffff)); \
460 __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
461 __ asm_instr(value, MemOperand(kScratchReg, 0)); \
462 } else { \
463 int offset = static_cast<int>(i.InputOperand(0).immediate()); \
464 auto value = i.InputOrZero##width##Register(2); \
465 if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
466 __ Move(kDoubleRegZero, 0.0); \
467 } \
468 ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
469 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
470 } \
471 __ bind(&done); \
472 } while (0)
473
474 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
475 do { \
476 Label done; \
477 if (instr->InputAt(0)->IsRegister()) { \
478 auto offset = i.InputRegister(0); \
479 auto value = i.InputOrZeroRegister(2); \
480 ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
481 __ And(kScratchReg, offset, Operand(0xffffffff)); \
482 __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
483 __ asm_instr(value, MemOperand(kScratchReg, 0)); \
484 } else { \
485 int offset = static_cast<int>(i.InputOperand(0).immediate()); \
486 auto value = i.InputOrZeroRegister(2); \
487 ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
488 __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
489 } \
490 __ bind(&done); \
491 } while (0)
492
493 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
494 if (kArchVariant == kMips64r6) { \
495 __ cfc1(kScratchReg, FCSR); \
496 __ li(at, Operand(mode_##mode)); \
497 __ ctc1(at, FCSR); \
498 __ rint_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
499 __ ctc1(kScratchReg, FCSR); \
500 } else { \
501 auto ool = new (zone()) OutOfLineRound(this, i.OutputDoubleRegister()); \
502 Label done; \
503 __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
504 __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
505 HeapNumber::kExponentBits); \
506 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
507 Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
508 __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
509 __ mode##_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
510 __ dmfc1(at, i.OutputDoubleRegister()); \
511 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
512 __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
513 __ bind(ool->exit()); \
514 __ bind(&done); \
515 }
516
517 #define ASSEMBLE_ROUND_FLOAT_TO_FLOAT(mode) \
518 if (kArchVariant == kMips64r6) { \
519 __ cfc1(kScratchReg, FCSR); \
520 __ li(at, Operand(mode_##mode)); \
521 __ ctc1(at, FCSR); \
522 __ rint_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
523 __ ctc1(kScratchReg, FCSR); \
524 } else { \
525 int32_t kFloat32ExponentBias = 127; \
526 int32_t kFloat32MantissaBits = 23; \
527 int32_t kFloat32ExponentBits = 8; \
528 auto ool = new (zone()) OutOfLineRound32(this, i.OutputDoubleRegister()); \
529 Label done; \
530 __ mfc1(kScratchReg, i.InputDoubleRegister(0)); \
531 __ Ext(at, kScratchReg, kFloat32MantissaBits, kFloat32ExponentBits); \
532 __ Branch(USE_DELAY_SLOT, &done, hs, at, \
533 Operand(kFloat32ExponentBias + kFloat32MantissaBits)); \
534 __ mov_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
535 __ mode##_w_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
536 __ mfc1(at, i.OutputDoubleRegister()); \
537 __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
538 __ cvt_s_w(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
539 __ bind(ool->exit()); \
540 __ bind(&done); \
541 }
542
543 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
544 do { \
545 __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
546 __ sync(); \
547 } while (0)
548
549 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
550 do { \
551 __ sync(); \
552 __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
553 __ sync(); \
554 } while (0)
555
556 #define ASSEMBLE_IEEE754_BINOP(name) \
557 do { \
558 FrameScope scope(masm(), StackFrame::MANUAL); \
559 __ PrepareCallCFunction(0, 2, kScratchReg); \
560 __ MovToFloatParameters(i.InputDoubleRegister(0), \
561 i.InputDoubleRegister(1)); \
562 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
563 0, 2); \
564 /* Move the result in the double result register. */ \
565 __ MovFromFloatResult(i.OutputDoubleRegister()); \
566 } while (0)
567
568 #define ASSEMBLE_IEEE754_UNOP(name) \
569 do { \
570 FrameScope scope(masm(), StackFrame::MANUAL); \
571 __ PrepareCallCFunction(0, 1, kScratchReg); \
572 __ MovToFloatParameter(i.InputDoubleRegister(0)); \
573 __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
574 0, 1); \
575 /* Move the result in the double result register. */ \
576 __ MovFromFloatResult(i.OutputDoubleRegister()); \
577 } while (0)
578
AssembleDeconstructFrame()579 void CodeGenerator::AssembleDeconstructFrame() {
580 __ mov(sp, fp);
581 __ Pop(ra, fp);
582 }
583
AssemblePrepareTailCall()584 void CodeGenerator::AssemblePrepareTailCall() {
585 if (frame_access_state()->has_frame()) {
586 __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
587 __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
588 }
589 frame_access_state()->SetFrameAccessToSP();
590 }
591
AssemblePopArgumentsAdaptorFrame(Register args_reg,Register scratch1,Register scratch2,Register scratch3)592 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
593 Register scratch1,
594 Register scratch2,
595 Register scratch3) {
596 DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
597 Label done;
598
599 // Check if current frame is an arguments adaptor frame.
600 __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
601 __ Branch(&done, ne, scratch3,
602 Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
603
604 // Load arguments count from current arguments adaptor frame (note, it
605 // does not include receiver).
606 Register caller_args_count_reg = scratch1;
607 __ ld(caller_args_count_reg,
608 MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
609 __ SmiUntag(caller_args_count_reg);
610
611 ParameterCount callee_args_count(args_reg);
612 __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
613 scratch3);
614 __ bind(&done);
615 }
616
617 namespace {
618
AdjustStackPointerForTailCall(MacroAssembler * masm,FrameAccessState * state,int new_slot_above_sp,bool allow_shrinkage=true)619 void AdjustStackPointerForTailCall(MacroAssembler* masm,
620 FrameAccessState* state,
621 int new_slot_above_sp,
622 bool allow_shrinkage = true) {
623 int current_sp_offset = state->GetSPToFPSlotCount() +
624 StandardFrameConstants::kFixedSlotCountAboveFp;
625 int stack_slot_delta = new_slot_above_sp - current_sp_offset;
626 if (stack_slot_delta > 0) {
627 masm->Dsubu(sp, sp, stack_slot_delta * kPointerSize);
628 state->IncreaseSPDelta(stack_slot_delta);
629 } else if (allow_shrinkage && stack_slot_delta < 0) {
630 masm->Daddu(sp, sp, -stack_slot_delta * kPointerSize);
631 state->IncreaseSPDelta(stack_slot_delta);
632 }
633 }
634
635 } // namespace
636
AssembleTailCallBeforeGap(Instruction * instr,int first_unused_stack_slot)637 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
638 int first_unused_stack_slot) {
639 AdjustStackPointerForTailCall(masm(), frame_access_state(),
640 first_unused_stack_slot, false);
641 }
642
AssembleTailCallAfterGap(Instruction * instr,int first_unused_stack_slot)643 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
644 int first_unused_stack_slot) {
645 AdjustStackPointerForTailCall(masm(), frame_access_state(),
646 first_unused_stack_slot);
647 }
648
649 // Assembles an instruction after register allocation, producing machine code.
AssembleArchInstruction(Instruction * instr)650 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
651 Instruction* instr) {
652 MipsOperandConverter i(this, instr);
653 InstructionCode opcode = instr->opcode();
654 ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
655 switch (arch_opcode) {
656 case kArchCallCodeObject: {
657 EnsureSpaceForLazyDeopt();
658 if (instr->InputAt(0)->IsImmediate()) {
659 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
660 RelocInfo::CODE_TARGET);
661 } else {
662 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
663 __ Call(at);
664 }
665 RecordCallPosition(instr);
666 frame_access_state()->ClearSPDelta();
667 break;
668 }
669 case kArchTailCallCodeObjectFromJSFunction:
670 case kArchTailCallCodeObject: {
671 if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
672 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
673 i.TempRegister(0), i.TempRegister(1),
674 i.TempRegister(2));
675 }
676 if (instr->InputAt(0)->IsImmediate()) {
677 __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
678 RelocInfo::CODE_TARGET);
679 } else {
680 __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
681 __ Jump(at);
682 }
683 frame_access_state()->ClearSPDelta();
684 frame_access_state()->SetFrameAccessToDefault();
685 break;
686 }
687 case kArchTailCallAddress: {
688 CHECK(!instr->InputAt(0)->IsImmediate());
689 __ Jump(i.InputRegister(0));
690 frame_access_state()->ClearSPDelta();
691 frame_access_state()->SetFrameAccessToDefault();
692 break;
693 }
694 case kArchCallJSFunction: {
695 EnsureSpaceForLazyDeopt();
696 Register func = i.InputRegister(0);
697 if (FLAG_debug_code) {
698 // Check the function's context matches the context argument.
699 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
700 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
701 }
702 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
703 __ Call(at);
704 RecordCallPosition(instr);
705 frame_access_state()->ClearSPDelta();
706 break;
707 }
708 case kArchTailCallJSFunctionFromJSFunction: {
709 Register func = i.InputRegister(0);
710 if (FLAG_debug_code) {
711 // Check the function's context matches the context argument.
712 __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
713 __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
714 }
715 AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
716 i.TempRegister(0), i.TempRegister(1),
717 i.TempRegister(2));
718 __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
719 __ Jump(at);
720 frame_access_state()->ClearSPDelta();
721 frame_access_state()->SetFrameAccessToDefault();
722 break;
723 }
724 case kArchPrepareCallCFunction: {
725 int const num_parameters = MiscField::decode(instr->opcode());
726 __ PrepareCallCFunction(num_parameters, kScratchReg);
727 // Frame alignment requires using FP-relative frame addressing.
728 frame_access_state()->SetFrameAccessToFP();
729 break;
730 }
731 case kArchPrepareTailCall:
732 AssemblePrepareTailCall();
733 break;
734 case kArchCallCFunction: {
735 int const num_parameters = MiscField::decode(instr->opcode());
736 if (instr->InputAt(0)->IsImmediate()) {
737 ExternalReference ref = i.InputExternalReference(0);
738 __ CallCFunction(ref, num_parameters);
739 } else {
740 Register func = i.InputRegister(0);
741 __ CallCFunction(func, num_parameters);
742 }
743 frame_access_state()->SetFrameAccessToDefault();
744 frame_access_state()->ClearSPDelta();
745 break;
746 }
747 case kArchJmp:
748 AssembleArchJump(i.InputRpo(0));
749 break;
750 case kArchLookupSwitch:
751 AssembleArchLookupSwitch(instr);
752 break;
753 case kArchTableSwitch:
754 AssembleArchTableSwitch(instr);
755 break;
756 case kArchDebugBreak:
757 __ stop("kArchDebugBreak");
758 break;
759 case kArchComment: {
760 Address comment_string = i.InputExternalReference(0).address();
761 __ RecordComment(reinterpret_cast<const char*>(comment_string));
762 break;
763 }
764 case kArchNop:
765 case kArchThrowTerminator:
766 // don't emit code for nops.
767 break;
768 case kArchDeoptimize: {
769 int deopt_state_id =
770 BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
771 CodeGenResult result =
772 AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
773 if (result != kSuccess) return result;
774 break;
775 }
776 case kArchRet:
777 AssembleReturn(instr->InputAt(0));
778 break;
779 case kArchStackPointer:
780 __ mov(i.OutputRegister(), sp);
781 break;
782 case kArchFramePointer:
783 __ mov(i.OutputRegister(), fp);
784 break;
785 case kArchParentFramePointer:
786 if (frame_access_state()->has_frame()) {
787 __ ld(i.OutputRegister(), MemOperand(fp, 0));
788 } else {
789 __ mov(i.OutputRegister(), fp);
790 }
791 break;
792 case kArchTruncateDoubleToI:
793 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
794 break;
795 case kArchStoreWithWriteBarrier: {
796 RecordWriteMode mode =
797 static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
798 Register object = i.InputRegister(0);
799 Register index = i.InputRegister(1);
800 Register value = i.InputRegister(2);
801 Register scratch0 = i.TempRegister(0);
802 Register scratch1 = i.TempRegister(1);
803 auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
804 scratch0, scratch1, mode);
805 __ Daddu(at, object, index);
806 __ sd(value, MemOperand(at));
807 __ CheckPageFlag(object, scratch0,
808 MemoryChunk::kPointersFromHereAreInterestingMask, ne,
809 ool->entry());
810 __ bind(ool->exit());
811 break;
812 }
813 case kArchStackSlot: {
814 FrameOffset offset =
815 frame_access_state()->GetFrameOffset(i.InputInt32(0));
816 __ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
817 Operand(offset.offset()));
818 break;
819 }
820 case kIeee754Float64Acos:
821 ASSEMBLE_IEEE754_UNOP(acos);
822 break;
823 case kIeee754Float64Acosh:
824 ASSEMBLE_IEEE754_UNOP(acosh);
825 break;
826 case kIeee754Float64Asin:
827 ASSEMBLE_IEEE754_UNOP(asin);
828 break;
829 case kIeee754Float64Asinh:
830 ASSEMBLE_IEEE754_UNOP(asinh);
831 break;
832 case kIeee754Float64Atan:
833 ASSEMBLE_IEEE754_UNOP(atan);
834 break;
835 case kIeee754Float64Atanh:
836 ASSEMBLE_IEEE754_UNOP(atanh);
837 break;
838 case kIeee754Float64Atan2:
839 ASSEMBLE_IEEE754_BINOP(atan2);
840 break;
841 case kIeee754Float64Cos:
842 ASSEMBLE_IEEE754_UNOP(cos);
843 break;
844 case kIeee754Float64Cosh:
845 ASSEMBLE_IEEE754_UNOP(cosh);
846 break;
847 case kIeee754Float64Cbrt:
848 ASSEMBLE_IEEE754_UNOP(cbrt);
849 break;
850 case kIeee754Float64Exp:
851 ASSEMBLE_IEEE754_UNOP(exp);
852 break;
853 case kIeee754Float64Expm1:
854 ASSEMBLE_IEEE754_UNOP(expm1);
855 break;
856 case kIeee754Float64Log:
857 ASSEMBLE_IEEE754_UNOP(log);
858 break;
859 case kIeee754Float64Log1p:
860 ASSEMBLE_IEEE754_UNOP(log1p);
861 break;
862 case kIeee754Float64Log2:
863 ASSEMBLE_IEEE754_UNOP(log2);
864 break;
865 case kIeee754Float64Log10:
866 ASSEMBLE_IEEE754_UNOP(log10);
867 break;
868 case kIeee754Float64Pow: {
869 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
870 __ CallStub(&stub);
871 break;
872 }
873 case kIeee754Float64Sin:
874 ASSEMBLE_IEEE754_UNOP(sin);
875 break;
876 case kIeee754Float64Sinh:
877 ASSEMBLE_IEEE754_UNOP(sinh);
878 break;
879 case kIeee754Float64Tan:
880 ASSEMBLE_IEEE754_UNOP(tan);
881 break;
882 case kIeee754Float64Tanh:
883 ASSEMBLE_IEEE754_UNOP(tanh);
884 break;
885 case kMips64Add:
886 __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
887 break;
888 case kMips64Dadd:
889 __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
890 break;
891 case kMips64DaddOvf:
892 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
893 break;
894 case kMips64Sub:
895 __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
896 break;
897 case kMips64Dsub:
898 __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
899 break;
900 case kMips64DsubOvf:
901 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
902 break;
903 case kMips64Mul:
904 __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
905 break;
906 case kMips64MulOvf:
907 // Pseudo-instruction used for overflow/branch. No opcode emitted here.
908 break;
909 case kMips64MulHigh:
910 __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
911 break;
912 case kMips64MulHighU:
913 __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
914 break;
915 case kMips64DMulHigh:
916 __ Dmulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
917 break;
918 case kMips64Div:
919 __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
920 if (kArchVariant == kMips64r6) {
921 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
922 } else {
923 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
924 }
925 break;
926 case kMips64DivU:
927 __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
928 if (kArchVariant == kMips64r6) {
929 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
930 } else {
931 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
932 }
933 break;
934 case kMips64Mod:
935 __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
936 break;
937 case kMips64ModU:
938 __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
939 break;
940 case kMips64Dmul:
941 __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
942 break;
943 case kMips64Ddiv:
944 __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
945 if (kArchVariant == kMips64r6) {
946 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
947 } else {
948 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
949 }
950 break;
951 case kMips64DdivU:
952 __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
953 if (kArchVariant == kMips64r6) {
954 __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
955 } else {
956 __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1));
957 }
958 break;
959 case kMips64Dmod:
960 __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
961 break;
962 case kMips64DmodU:
963 __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
964 break;
965 case kMips64Dlsa:
966 DCHECK(instr->InputAt(2)->IsImmediate());
967 __ Dlsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
968 i.InputInt8(2));
969 break;
970 case kMips64Lsa:
971 DCHECK(instr->InputAt(2)->IsImmediate());
972 __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
973 i.InputInt8(2));
974 break;
975 case kMips64And:
976 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
977 break;
978 case kMips64And32:
979 if (instr->InputAt(1)->IsRegister()) {
980 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
981 __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
982 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
983 } else {
984 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
985 __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
986 }
987 break;
988 case kMips64Or:
989 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
990 break;
991 case kMips64Or32:
992 if (instr->InputAt(1)->IsRegister()) {
993 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
994 __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
995 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
996 } else {
997 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
998 __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
999 }
1000 break;
1001 case kMips64Nor:
1002 if (instr->InputAt(1)->IsRegister()) {
1003 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1004 } else {
1005 DCHECK(i.InputOperand(1).immediate() == 0);
1006 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1007 }
1008 break;
1009 case kMips64Nor32:
1010 if (instr->InputAt(1)->IsRegister()) {
1011 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1012 __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
1013 __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1014 } else {
1015 DCHECK(i.InputOperand(1).immediate() == 0);
1016 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1017 __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
1018 }
1019 break;
1020 case kMips64Xor:
1021 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1022 break;
1023 case kMips64Xor32:
1024 if (instr->InputAt(1)->IsRegister()) {
1025 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1026 __ sll(i.InputRegister(1), i.InputRegister(1), 0x0);
1027 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1028 } else {
1029 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1030 __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1031 }
1032 break;
1033 case kMips64Clz:
1034 __ Clz(i.OutputRegister(), i.InputRegister(0));
1035 break;
1036 case kMips64Dclz:
1037 __ dclz(i.OutputRegister(), i.InputRegister(0));
1038 break;
1039 case kMips64Ctz: {
1040 Register reg1 = kScratchReg;
1041 Register reg2 = kScratchReg2;
1042 Label skip_for_zero;
1043 Label end;
1044 // Branch if the operand is zero
1045 __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
1046 // Find the number of bits before the last bit set to 1.
1047 __ Subu(reg2, zero_reg, i.InputRegister(0));
1048 __ And(reg2, reg2, i.InputRegister(0));
1049 __ clz(reg2, reg2);
1050 // Get the number of bits after the last bit set to 1.
1051 __ li(reg1, 0x1F);
1052 __ Subu(i.OutputRegister(), reg1, reg2);
1053 __ Branch(&end);
1054 __ bind(&skip_for_zero);
1055 // If the operand is zero, return word length as the result.
1056 __ li(i.OutputRegister(), 0x20);
1057 __ bind(&end);
1058 } break;
1059 case kMips64Dctz: {
1060 Register reg1 = kScratchReg;
1061 Register reg2 = kScratchReg2;
1062 Label skip_for_zero;
1063 Label end;
1064 // Branch if the operand is zero
1065 __ Branch(&skip_for_zero, eq, i.InputRegister(0), Operand(zero_reg));
1066 // Find the number of bits before the last bit set to 1.
1067 __ Dsubu(reg2, zero_reg, i.InputRegister(0));
1068 __ And(reg2, reg2, i.InputRegister(0));
1069 __ dclz(reg2, reg2);
1070 // Get the number of bits after the last bit set to 1.
1071 __ li(reg1, 0x3F);
1072 __ Subu(i.OutputRegister(), reg1, reg2);
1073 __ Branch(&end);
1074 __ bind(&skip_for_zero);
1075 // If the operand is zero, return word length as the result.
1076 __ li(i.OutputRegister(), 0x40);
1077 __ bind(&end);
1078 } break;
1079 case kMips64Popcnt: {
1080 Register reg1 = kScratchReg;
1081 Register reg2 = kScratchReg2;
1082 uint32_t m1 = 0x55555555;
1083 uint32_t m2 = 0x33333333;
1084 uint32_t m4 = 0x0f0f0f0f;
1085 uint32_t m8 = 0x00ff00ff;
1086 uint32_t m16 = 0x0000ffff;
1087
1088 // Put count of ones in every 2 bits into those 2 bits.
1089 __ li(at, m1);
1090 __ dsrl(reg1, i.InputRegister(0), 1);
1091 __ And(reg2, i.InputRegister(0), at);
1092 __ And(reg1, reg1, at);
1093 __ Daddu(reg1, reg1, reg2);
1094
1095 // Put count of ones in every 4 bits into those 4 bits.
1096 __ li(at, m2);
1097 __ dsrl(reg2, reg1, 2);
1098 __ And(reg2, reg2, at);
1099 __ And(reg1, reg1, at);
1100 __ Daddu(reg1, reg1, reg2);
1101
1102 // Put count of ones in every 8 bits into those 8 bits.
1103 __ li(at, m4);
1104 __ dsrl(reg2, reg1, 4);
1105 __ And(reg2, reg2, at);
1106 __ And(reg1, reg1, at);
1107 __ Daddu(reg1, reg1, reg2);
1108
1109 // Put count of ones in every 16 bits into those 16 bits.
1110 __ li(at, m8);
1111 __ dsrl(reg2, reg1, 8);
1112 __ And(reg2, reg2, at);
1113 __ And(reg1, reg1, at);
1114 __ Daddu(reg1, reg1, reg2);
1115
1116 // Calculate total number of ones.
1117 __ li(at, m16);
1118 __ dsrl(reg2, reg1, 16);
1119 __ And(reg2, reg2, at);
1120 __ And(reg1, reg1, at);
1121 __ Daddu(i.OutputRegister(), reg1, reg2);
1122 } break;
1123 case kMips64Dpopcnt: {
1124 Register reg1 = kScratchReg;
1125 Register reg2 = kScratchReg2;
1126 uint64_t m1 = 0x5555555555555555;
1127 uint64_t m2 = 0x3333333333333333;
1128 uint64_t m4 = 0x0f0f0f0f0f0f0f0f;
1129 uint64_t m8 = 0x00ff00ff00ff00ff;
1130 uint64_t m16 = 0x0000ffff0000ffff;
1131 uint64_t m32 = 0x00000000ffffffff;
1132
1133 // Put count of ones in every 2 bits into those 2 bits.
1134 __ li(at, m1);
1135 __ dsrl(reg1, i.InputRegister(0), 1);
1136 __ and_(reg2, i.InputRegister(0), at);
1137 __ and_(reg1, reg1, at);
1138 __ Daddu(reg1, reg1, reg2);
1139
1140 // Put count of ones in every 4 bits into those 4 bits.
1141 __ li(at, m2);
1142 __ dsrl(reg2, reg1, 2);
1143 __ and_(reg2, reg2, at);
1144 __ and_(reg1, reg1, at);
1145 __ Daddu(reg1, reg1, reg2);
1146
1147 // Put count of ones in every 8 bits into those 8 bits.
1148 __ li(at, m4);
1149 __ dsrl(reg2, reg1, 4);
1150 __ and_(reg2, reg2, at);
1151 __ and_(reg1, reg1, at);
1152 __ Daddu(reg1, reg1, reg2);
1153
1154 // Put count of ones in every 16 bits into those 16 bits.
1155 __ li(at, m8);
1156 __ dsrl(reg2, reg1, 8);
1157 __ and_(reg2, reg2, at);
1158 __ and_(reg1, reg1, at);
1159 __ Daddu(reg1, reg1, reg2);
1160
1161 // Put count of ones in every 32 bits into those 32 bits.
1162 __ li(at, m16);
1163 __ dsrl(reg2, reg1, 16);
1164 __ and_(reg2, reg2, at);
1165 __ and_(reg1, reg1, at);
1166 __ Daddu(reg1, reg1, reg2);
1167
1168 // Calculate total number of ones.
1169 __ li(at, m32);
1170 __ dsrl32(reg2, reg1, 0);
1171 __ and_(reg2, reg2, at);
1172 __ and_(reg1, reg1, at);
1173 __ Daddu(i.OutputRegister(), reg1, reg2);
1174 } break;
1175 case kMips64Shl:
1176 if (instr->InputAt(1)->IsRegister()) {
1177 __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1178 } else {
1179 int64_t imm = i.InputOperand(1).immediate();
1180 __ sll(i.OutputRegister(), i.InputRegister(0),
1181 static_cast<uint16_t>(imm));
1182 }
1183 break;
1184 case kMips64Shr:
1185 if (instr->InputAt(1)->IsRegister()) {
1186 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1187 __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1188 } else {
1189 int64_t imm = i.InputOperand(1).immediate();
1190 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1191 __ srl(i.OutputRegister(), i.InputRegister(0),
1192 static_cast<uint16_t>(imm));
1193 }
1194 break;
1195 case kMips64Sar:
1196 if (instr->InputAt(1)->IsRegister()) {
1197 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1198 __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1199 } else {
1200 int64_t imm = i.InputOperand(1).immediate();
1201 __ sll(i.InputRegister(0), i.InputRegister(0), 0x0);
1202 __ sra(i.OutputRegister(), i.InputRegister(0),
1203 static_cast<uint16_t>(imm));
1204 }
1205 break;
1206 case kMips64Ext:
1207 __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1208 i.InputInt8(2));
1209 break;
1210 case kMips64Ins:
1211 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1212 __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1213 } else {
1214 __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1215 i.InputInt8(2));
1216 }
1217 break;
1218 case kMips64Dext: {
1219 int16_t pos = i.InputInt8(1);
1220 int16_t size = i.InputInt8(2);
1221 if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
1222 __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1223 i.InputInt8(2));
1224 } else if (size > 32 && size <= 64 && pos > 0 && pos < 32) {
1225 __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1226 i.InputInt8(2));
1227 } else {
1228 DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64);
1229 __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1230 i.InputInt8(2));
1231 }
1232 break;
1233 }
1234 case kMips64Dins:
1235 if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
1236 __ Dins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2));
1237 } else {
1238 __ Dins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
1239 i.InputInt8(2));
1240 }
1241 break;
1242 case kMips64Dshl:
1243 if (instr->InputAt(1)->IsRegister()) {
1244 __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1245 } else {
1246 int64_t imm = i.InputOperand(1).immediate();
1247 if (imm < 32) {
1248 __ dsll(i.OutputRegister(), i.InputRegister(0),
1249 static_cast<uint16_t>(imm));
1250 } else {
1251 __ dsll32(i.OutputRegister(), i.InputRegister(0),
1252 static_cast<uint16_t>(imm - 32));
1253 }
1254 }
1255 break;
1256 case kMips64Dshr:
1257 if (instr->InputAt(1)->IsRegister()) {
1258 __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1259 } else {
1260 int64_t imm = i.InputOperand(1).immediate();
1261 if (imm < 32) {
1262 __ dsrl(i.OutputRegister(), i.InputRegister(0),
1263 static_cast<uint16_t>(imm));
1264 } else {
1265 __ dsrl32(i.OutputRegister(), i.InputRegister(0),
1266 static_cast<uint16_t>(imm - 32));
1267 }
1268 }
1269 break;
1270 case kMips64Dsar:
1271 if (instr->InputAt(1)->IsRegister()) {
1272 __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
1273 } else {
1274 int64_t imm = i.InputOperand(1).immediate();
1275 if (imm < 32) {
1276 __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
1277 } else {
1278 __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
1279 }
1280 }
1281 break;
1282 case kMips64Ror:
1283 __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1284 break;
1285 case kMips64Dror:
1286 __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
1287 break;
1288 case kMips64Tst:
1289 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1290 break;
1291 case kMips64Cmp:
1292 // Pseudo-instruction used for cmp/branch. No opcode emitted here.
1293 break;
1294 case kMips64Mov:
1295 // TODO(plind): Should we combine mov/li like this, or use separate instr?
1296 // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
1297 if (HasRegisterInput(instr, 0)) {
1298 __ mov(i.OutputRegister(), i.InputRegister(0));
1299 } else {
1300 __ li(i.OutputRegister(), i.InputOperand(0));
1301 }
1302 break;
1303
1304 case kMips64CmpS:
1305 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
1306 break;
1307 case kMips64AddS:
1308 // TODO(plind): add special case: combine mult & add.
1309 __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1310 i.InputDoubleRegister(1));
1311 break;
1312 case kMips64SubS:
1313 __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1314 i.InputDoubleRegister(1));
1315 break;
1316 case kMips64MulS:
1317 // TODO(plind): add special case: right op is -1.0, see arm port.
1318 __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1319 i.InputDoubleRegister(1));
1320 break;
1321 case kMips64DivS:
1322 __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1323 i.InputDoubleRegister(1));
1324 break;
1325 case kMips64ModS: {
1326 // TODO(bmeurer): We should really get rid of this special instruction,
1327 // and generate a CallAddress instruction instead.
1328 FrameScope scope(masm(), StackFrame::MANUAL);
1329 __ PrepareCallCFunction(0, 2, kScratchReg);
1330 __ MovToFloatParameters(i.InputDoubleRegister(0),
1331 i.InputDoubleRegister(1));
1332 // TODO(balazs.kilvady): implement mod_two_floats_operation(isolate())
1333 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1334 0, 2);
1335 // Move the result in the double result register.
1336 __ MovFromFloatResult(i.OutputSingleRegister());
1337 break;
1338 }
1339 case kMips64AbsS:
1340 __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1341 break;
1342 case kMips64NegS:
1343 __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
1344 break;
1345 case kMips64SqrtS: {
1346 __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1347 break;
1348 }
1349 case kMips64MaxS:
1350 __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1351 i.InputDoubleRegister(1));
1352 break;
1353 case kMips64MinS:
1354 __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1355 i.InputDoubleRegister(1));
1356 break;
1357 case kMips64CmpD:
1358 // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
1359 break;
1360 case kMips64AddD:
1361 // TODO(plind): add special case: combine mult & add.
1362 __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1363 i.InputDoubleRegister(1));
1364 break;
1365 case kMips64SubD:
1366 __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1367 i.InputDoubleRegister(1));
1368 break;
1369 case kMips64MaddS:
1370 __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1371 i.InputFloatRegister(1), i.InputFloatRegister(2),
1372 kScratchDoubleReg);
1373 break;
1374 case kMips64MaddD:
1375 __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1376 i.InputDoubleRegister(1), i.InputDoubleRegister(2),
1377 kScratchDoubleReg);
1378 break;
1379 case kMips64MsubS:
1380 __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
1381 i.InputFloatRegister(1), i.InputFloatRegister(2),
1382 kScratchDoubleReg);
1383 break;
1384 case kMips64MsubD:
1385 __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1386 i.InputDoubleRegister(1), i.InputDoubleRegister(2),
1387 kScratchDoubleReg);
1388 break;
1389 case kMips64MulD:
1390 // TODO(plind): add special case: right op is -1.0, see arm port.
1391 __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1392 i.InputDoubleRegister(1));
1393 break;
1394 case kMips64DivD:
1395 __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1396 i.InputDoubleRegister(1));
1397 break;
1398 case kMips64ModD: {
1399 // TODO(bmeurer): We should really get rid of this special instruction,
1400 // and generate a CallAddress instruction instead.
1401 FrameScope scope(masm(), StackFrame::MANUAL);
1402 __ PrepareCallCFunction(0, 2, kScratchReg);
1403 __ MovToFloatParameters(i.InputDoubleRegister(0),
1404 i.InputDoubleRegister(1));
1405 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
1406 0, 2);
1407 // Move the result in the double result register.
1408 __ MovFromFloatResult(i.OutputDoubleRegister());
1409 break;
1410 }
1411 case kMips64AbsD:
1412 __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1413 break;
1414 case kMips64NegD:
1415 __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1416 break;
1417 case kMips64SqrtD: {
1418 __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1419 break;
1420 }
1421 case kMips64MaxD:
1422 __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1423 i.InputDoubleRegister(1));
1424 break;
1425 case kMips64MinD:
1426 __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
1427 i.InputDoubleRegister(1));
1428 break;
1429 case kMips64Float64RoundDown: {
1430 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor);
1431 break;
1432 }
1433 case kMips64Float32RoundDown: {
1434 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(floor);
1435 break;
1436 }
1437 case kMips64Float64RoundTruncate: {
1438 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc);
1439 break;
1440 }
1441 case kMips64Float32RoundTruncate: {
1442 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(trunc);
1443 break;
1444 }
1445 case kMips64Float64RoundUp: {
1446 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil);
1447 break;
1448 }
1449 case kMips64Float32RoundUp: {
1450 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(ceil);
1451 break;
1452 }
1453 case kMips64Float64RoundTiesEven: {
1454 ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(round);
1455 break;
1456 }
1457 case kMips64Float32RoundTiesEven: {
1458 ASSEMBLE_ROUND_FLOAT_TO_FLOAT(round);
1459 break;
1460 }
1461 case kMips64Float32Max: {
1462 FPURegister dst = i.OutputSingleRegister();
1463 FPURegister src1 = i.InputSingleRegister(0);
1464 FPURegister src2 = i.InputSingleRegister(1);
1465 auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
1466 __ Float32Max(dst, src1, src2, ool->entry());
1467 __ bind(ool->exit());
1468 break;
1469 }
1470 case kMips64Float64Max: {
1471 FPURegister dst = i.OutputDoubleRegister();
1472 FPURegister src1 = i.InputDoubleRegister(0);
1473 FPURegister src2 = i.InputDoubleRegister(1);
1474 auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
1475 __ Float64Max(dst, src1, src2, ool->entry());
1476 __ bind(ool->exit());
1477 break;
1478 }
1479 case kMips64Float32Min: {
1480 FPURegister dst = i.OutputSingleRegister();
1481 FPURegister src1 = i.InputSingleRegister(0);
1482 FPURegister src2 = i.InputSingleRegister(1);
1483 auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
1484 __ Float32Min(dst, src1, src2, ool->entry());
1485 __ bind(ool->exit());
1486 break;
1487 }
1488 case kMips64Float64Min: {
1489 FPURegister dst = i.OutputDoubleRegister();
1490 FPURegister src1 = i.InputDoubleRegister(0);
1491 FPURegister src2 = i.InputDoubleRegister(1);
1492 auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
1493 __ Float64Min(dst, src1, src2, ool->entry());
1494 __ bind(ool->exit());
1495 break;
1496 }
1497 case kMips64Float64SilenceNaN:
1498 __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
1499 break;
1500 case kMips64CvtSD:
1501 __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
1502 break;
1503 case kMips64CvtDS:
1504 __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
1505 break;
1506 case kMips64CvtDW: {
1507 FPURegister scratch = kScratchDoubleReg;
1508 __ mtc1(i.InputRegister(0), scratch);
1509 __ cvt_d_w(i.OutputDoubleRegister(), scratch);
1510 break;
1511 }
1512 case kMips64CvtSW: {
1513 FPURegister scratch = kScratchDoubleReg;
1514 __ mtc1(i.InputRegister(0), scratch);
1515 __ cvt_s_w(i.OutputDoubleRegister(), scratch);
1516 break;
1517 }
1518 case kMips64CvtSUw: {
1519 __ Cvt_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1520 break;
1521 }
1522 case kMips64CvtSL: {
1523 FPURegister scratch = kScratchDoubleReg;
1524 __ dmtc1(i.InputRegister(0), scratch);
1525 __ cvt_s_l(i.OutputDoubleRegister(), scratch);
1526 break;
1527 }
1528 case kMips64CvtDL: {
1529 FPURegister scratch = kScratchDoubleReg;
1530 __ dmtc1(i.InputRegister(0), scratch);
1531 __ cvt_d_l(i.OutputDoubleRegister(), scratch);
1532 break;
1533 }
1534 case kMips64CvtDUw: {
1535 __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
1536 break;
1537 }
1538 case kMips64CvtDUl: {
1539 __ Cvt_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1540 break;
1541 }
1542 case kMips64CvtSUl: {
1543 __ Cvt_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
1544 break;
1545 }
1546 case kMips64FloorWD: {
1547 FPURegister scratch = kScratchDoubleReg;
1548 __ floor_w_d(scratch, i.InputDoubleRegister(0));
1549 __ mfc1(i.OutputRegister(), scratch);
1550 break;
1551 }
1552 case kMips64CeilWD: {
1553 FPURegister scratch = kScratchDoubleReg;
1554 __ ceil_w_d(scratch, i.InputDoubleRegister(0));
1555 __ mfc1(i.OutputRegister(), scratch);
1556 break;
1557 }
1558 case kMips64RoundWD: {
1559 FPURegister scratch = kScratchDoubleReg;
1560 __ round_w_d(scratch, i.InputDoubleRegister(0));
1561 __ mfc1(i.OutputRegister(), scratch);
1562 break;
1563 }
1564 case kMips64TruncWD: {
1565 FPURegister scratch = kScratchDoubleReg;
1566 // Other arches use round to zero here, so we follow.
1567 __ trunc_w_d(scratch, i.InputDoubleRegister(0));
1568 __ mfc1(i.OutputRegister(), scratch);
1569 break;
1570 }
1571 case kMips64FloorWS: {
1572 FPURegister scratch = kScratchDoubleReg;
1573 __ floor_w_s(scratch, i.InputDoubleRegister(0));
1574 __ mfc1(i.OutputRegister(), scratch);
1575 break;
1576 }
1577 case kMips64CeilWS: {
1578 FPURegister scratch = kScratchDoubleReg;
1579 __ ceil_w_s(scratch, i.InputDoubleRegister(0));
1580 __ mfc1(i.OutputRegister(), scratch);
1581 break;
1582 }
1583 case kMips64RoundWS: {
1584 FPURegister scratch = kScratchDoubleReg;
1585 __ round_w_s(scratch, i.InputDoubleRegister(0));
1586 __ mfc1(i.OutputRegister(), scratch);
1587 break;
1588 }
1589 case kMips64TruncWS: {
1590 FPURegister scratch = kScratchDoubleReg;
1591 __ trunc_w_s(scratch, i.InputDoubleRegister(0));
1592 __ mfc1(i.OutputRegister(), scratch);
1593 // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
1594 // because INT32_MIN allows easier out-of-bounds detection.
1595 __ addiu(kScratchReg, i.OutputRegister(), 1);
1596 __ slt(kScratchReg2, kScratchReg, i.OutputRegister());
1597 __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2);
1598 break;
1599 }
1600 case kMips64TruncLS: {
1601 FPURegister scratch = kScratchDoubleReg;
1602 Register tmp_fcsr = kScratchReg;
1603 Register result = kScratchReg2;
1604
1605 bool load_status = instr->OutputCount() > 1;
1606 if (load_status) {
1607 // Save FCSR.
1608 __ cfc1(tmp_fcsr, FCSR);
1609 // Clear FPU flags.
1610 __ ctc1(zero_reg, FCSR);
1611 }
1612 // Other arches use round to zero here, so we follow.
1613 __ trunc_l_s(scratch, i.InputDoubleRegister(0));
1614 __ dmfc1(i.OutputRegister(), scratch);
1615 if (load_status) {
1616 __ cfc1(result, FCSR);
1617 // Check for overflow and NaNs.
1618 __ andi(result, result,
1619 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1620 __ Slt(result, zero_reg, result);
1621 __ xori(result, result, 1);
1622 __ mov(i.OutputRegister(1), result);
1623 // Restore FCSR
1624 __ ctc1(tmp_fcsr, FCSR);
1625 }
1626 break;
1627 }
1628 case kMips64TruncLD: {
1629 FPURegister scratch = kScratchDoubleReg;
1630 Register tmp_fcsr = kScratchReg;
1631 Register result = kScratchReg2;
1632
1633 bool load_status = instr->OutputCount() > 1;
1634 if (load_status) {
1635 // Save FCSR.
1636 __ cfc1(tmp_fcsr, FCSR);
1637 // Clear FPU flags.
1638 __ ctc1(zero_reg, FCSR);
1639 }
1640 // Other arches use round to zero here, so we follow.
1641 __ trunc_l_d(scratch, i.InputDoubleRegister(0));
1642 __ dmfc1(i.OutputRegister(0), scratch);
1643 if (load_status) {
1644 __ cfc1(result, FCSR);
1645 // Check for overflow and NaNs.
1646 __ andi(result, result,
1647 (kFCSROverflowFlagMask | kFCSRInvalidOpFlagMask));
1648 __ Slt(result, zero_reg, result);
1649 __ xori(result, result, 1);
1650 __ mov(i.OutputRegister(1), result);
1651 // Restore FCSR
1652 __ ctc1(tmp_fcsr, FCSR);
1653 }
1654 break;
1655 }
1656 case kMips64TruncUwD: {
1657 FPURegister scratch = kScratchDoubleReg;
1658 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
1659 __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
1660 break;
1661 }
1662 case kMips64TruncUwS: {
1663 FPURegister scratch = kScratchDoubleReg;
1664 // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
1665 __ Trunc_uw_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
1666 // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
1667 // because 0 allows easier out-of-bounds detection.
1668 __ addiu(kScratchReg, i.OutputRegister(), 1);
1669 __ Movz(i.OutputRegister(), zero_reg, kScratchReg);
1670 break;
1671 }
1672 case kMips64TruncUlS: {
1673 FPURegister scratch = kScratchDoubleReg;
1674 Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1675 // TODO(plind): Fix wrong param order of Trunc_ul_s() macro-asm function.
1676 __ Trunc_ul_s(i.InputDoubleRegister(0), i.OutputRegister(), scratch,
1677 result);
1678 break;
1679 }
1680 case kMips64TruncUlD: {
1681 FPURegister scratch = kScratchDoubleReg;
1682 Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
1683 // TODO(plind): Fix wrong param order of Trunc_ul_d() macro-asm function.
1684 __ Trunc_ul_d(i.InputDoubleRegister(0), i.OutputRegister(0), scratch,
1685 result);
1686 break;
1687 }
1688 case kMips64BitcastDL:
1689 __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
1690 break;
1691 case kMips64BitcastLD:
1692 __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
1693 break;
1694 case kMips64Float64ExtractLowWord32:
1695 __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
1696 break;
1697 case kMips64Float64ExtractHighWord32:
1698 __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
1699 break;
1700 case kMips64Float64InsertLowWord32:
1701 __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
1702 break;
1703 case kMips64Float64InsertHighWord32:
1704 __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
1705 break;
1706 // ... more basic instructions ...
1707
1708 case kMips64Seb:
1709 __ seb(i.OutputRegister(), i.InputRegister(0));
1710 break;
1711 case kMips64Seh:
1712 __ seh(i.OutputRegister(), i.InputRegister(0));
1713 break;
1714 case kMips64Lbu:
1715 __ lbu(i.OutputRegister(), i.MemoryOperand());
1716 break;
1717 case kMips64Lb:
1718 __ lb(i.OutputRegister(), i.MemoryOperand());
1719 break;
1720 case kMips64Sb:
1721 __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
1722 break;
1723 case kMips64Lhu:
1724 __ lhu(i.OutputRegister(), i.MemoryOperand());
1725 break;
1726 case kMips64Ulhu:
1727 __ Ulhu(i.OutputRegister(), i.MemoryOperand());
1728 break;
1729 case kMips64Lh:
1730 __ lh(i.OutputRegister(), i.MemoryOperand());
1731 break;
1732 case kMips64Ulh:
1733 __ Ulh(i.OutputRegister(), i.MemoryOperand());
1734 break;
1735 case kMips64Sh:
1736 __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
1737 break;
1738 case kMips64Ush:
1739 __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
1740 break;
1741 case kMips64Lw:
1742 __ lw(i.OutputRegister(), i.MemoryOperand());
1743 break;
1744 case kMips64Ulw:
1745 __ Ulw(i.OutputRegister(), i.MemoryOperand());
1746 break;
1747 case kMips64Lwu:
1748 __ lwu(i.OutputRegister(), i.MemoryOperand());
1749 break;
1750 case kMips64Ulwu:
1751 __ Ulwu(i.OutputRegister(), i.MemoryOperand());
1752 break;
1753 case kMips64Ld:
1754 __ ld(i.OutputRegister(), i.MemoryOperand());
1755 break;
1756 case kMips64Uld:
1757 __ Uld(i.OutputRegister(), i.MemoryOperand());
1758 break;
1759 case kMips64Sw:
1760 __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
1761 break;
1762 case kMips64Usw:
1763 __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
1764 break;
1765 case kMips64Sd:
1766 __ sd(i.InputOrZeroRegister(2), i.MemoryOperand());
1767 break;
1768 case kMips64Usd:
1769 __ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
1770 break;
1771 case kMips64Lwc1: {
1772 __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
1773 break;
1774 }
1775 case kMips64Ulwc1: {
1776 __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
1777 break;
1778 }
1779 case kMips64Swc1: {
1780 size_t index = 0;
1781 MemOperand operand = i.MemoryOperand(&index);
1782 FPURegister ft = i.InputOrZeroSingleRegister(index);
1783 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1784 __ Move(kDoubleRegZero, 0.0);
1785 }
1786 __ swc1(ft, operand);
1787 break;
1788 }
1789 case kMips64Uswc1: {
1790 size_t index = 0;
1791 MemOperand operand = i.MemoryOperand(&index);
1792 FPURegister ft = i.InputOrZeroSingleRegister(index);
1793 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1794 __ Move(kDoubleRegZero, 0.0);
1795 }
1796 __ Uswc1(ft, operand, kScratchReg);
1797 break;
1798 }
1799 case kMips64Ldc1:
1800 __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
1801 break;
1802 case kMips64Uldc1:
1803 __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
1804 break;
1805 case kMips64Sdc1: {
1806 FPURegister ft = i.InputOrZeroDoubleRegister(2);
1807 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1808 __ Move(kDoubleRegZero, 0.0);
1809 }
1810 __ sdc1(ft, i.MemoryOperand());
1811 break;
1812 }
1813 case kMips64Usdc1: {
1814 FPURegister ft = i.InputOrZeroDoubleRegister(2);
1815 if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
1816 __ Move(kDoubleRegZero, 0.0);
1817 }
1818 __ Usdc1(ft, i.MemoryOperand(), kScratchReg);
1819 break;
1820 }
1821 case kMips64Push:
1822 if (instr->InputAt(0)->IsFPRegister()) {
1823 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
1824 __ Subu(sp, sp, Operand(kDoubleSize));
1825 frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
1826 } else {
1827 __ Push(i.InputRegister(0));
1828 frame_access_state()->IncreaseSPDelta(1);
1829 }
1830 break;
1831 case kMips64StackClaim: {
1832 __ Dsubu(sp, sp, Operand(i.InputInt32(0)));
1833 frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / kPointerSize);
1834 break;
1835 }
1836 case kMips64StoreToStackSlot: {
1837 if (instr->InputAt(0)->IsFPRegister()) {
1838 __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
1839 } else {
1840 __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
1841 }
1842 break;
1843 }
1844 case kMips64ByteSwap64: {
1845 __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
1846 break;
1847 }
1848 case kMips64ByteSwap32: {
1849 __ ByteSwapUnsigned(i.OutputRegister(0), i.InputRegister(0), 4);
1850 __ dsrl32(i.OutputRegister(0), i.OutputRegister(0), 0);
1851 break;
1852 }
1853 case kCheckedLoadInt8:
1854 ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
1855 break;
1856 case kCheckedLoadUint8:
1857 ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
1858 break;
1859 case kCheckedLoadInt16:
1860 ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
1861 break;
1862 case kCheckedLoadUint16:
1863 ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
1864 break;
1865 case kCheckedLoadWord32:
1866 ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
1867 break;
1868 case kCheckedLoadWord64:
1869 ASSEMBLE_CHECKED_LOAD_INTEGER(ld);
1870 break;
1871 case kCheckedLoadFloat32:
1872 ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
1873 break;
1874 case kCheckedLoadFloat64:
1875 ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
1876 break;
1877 case kCheckedStoreWord8:
1878 ASSEMBLE_CHECKED_STORE_INTEGER(sb);
1879 break;
1880 case kCheckedStoreWord16:
1881 ASSEMBLE_CHECKED_STORE_INTEGER(sh);
1882 break;
1883 case kCheckedStoreWord32:
1884 ASSEMBLE_CHECKED_STORE_INTEGER(sw);
1885 break;
1886 case kCheckedStoreWord64:
1887 ASSEMBLE_CHECKED_STORE_INTEGER(sd);
1888 break;
1889 case kCheckedStoreFloat32:
1890 ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
1891 break;
1892 case kCheckedStoreFloat64:
1893 ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
1894 break;
1895 case kAtomicLoadInt8:
1896 ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
1897 break;
1898 case kAtomicLoadUint8:
1899 ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
1900 break;
1901 case kAtomicLoadInt16:
1902 ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
1903 break;
1904 case kAtomicLoadUint16:
1905 ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
1906 break;
1907 case kAtomicLoadWord32:
1908 ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
1909 break;
1910 case kAtomicStoreWord8:
1911 ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
1912 break;
1913 case kAtomicStoreWord16:
1914 ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
1915 break;
1916 case kAtomicStoreWord32:
1917 ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
1918 break;
1919 case kMips64AssertEqual:
1920 __ Assert(eq, static_cast<BailoutReason>(i.InputOperand(2).immediate()),
1921 i.InputRegister(0), Operand(i.InputRegister(1)));
1922 break;
1923 }
1924 return kSuccess;
1925 } // NOLINT(readability/fn_size)
1926
1927
1928 #define UNSUPPORTED_COND(opcode, condition) \
1929 OFStream out(stdout); \
1930 out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
1931 UNIMPLEMENTED();
1932
convertCondition(FlagsCondition condition,Condition & cc)1933 static bool convertCondition(FlagsCondition condition, Condition& cc) {
1934 switch (condition) {
1935 case kEqual:
1936 cc = eq;
1937 return true;
1938 case kNotEqual:
1939 cc = ne;
1940 return true;
1941 case kUnsignedLessThan:
1942 cc = lt;
1943 return true;
1944 case kUnsignedGreaterThanOrEqual:
1945 cc = uge;
1946 return true;
1947 case kUnsignedLessThanOrEqual:
1948 cc = le;
1949 return true;
1950 case kUnsignedGreaterThan:
1951 cc = ugt;
1952 return true;
1953 default:
1954 break;
1955 }
1956 return false;
1957 }
1958
AssembleBranchToLabels(CodeGenerator * gen,MacroAssembler * masm,Instruction * instr,FlagsCondition condition,Label * tlabel,Label * flabel,bool fallthru)1959 void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
1960 Instruction* instr, FlagsCondition condition,
1961 Label* tlabel, Label* flabel, bool fallthru) {
1962 #undef __
1963 #define __ masm->
1964 MipsOperandConverter i(gen, instr);
1965
1966 Condition cc = kNoCondition;
1967 // MIPS does not have condition code flags, so compare and branch are
1968 // implemented differently than on the other arch's. The compare operations
1969 // emit mips psuedo-instructions, which are handled here by branch
1970 // instructions that do the actual comparison. Essential that the input
1971 // registers to compare pseudo-op are not modified before this branch op, as
1972 // they are tested here.
1973
1974 if (instr->arch_opcode() == kMips64Tst) {
1975 cc = FlagsConditionToConditionTst(condition);
1976 __ And(at, i.InputRegister(0), i.InputOperand(1));
1977 __ Branch(tlabel, cc, at, Operand(zero_reg));
1978 } else if (instr->arch_opcode() == kMips64Dadd ||
1979 instr->arch_opcode() == kMips64Dsub) {
1980 cc = FlagsConditionToConditionOvf(condition);
1981 __ dsra32(kScratchReg, i.OutputRegister(), 0);
1982 __ sra(at, i.OutputRegister(), 31);
1983 __ Branch(tlabel, cc, at, Operand(kScratchReg));
1984 } else if (instr->arch_opcode() == kMips64DaddOvf) {
1985 switch (condition) {
1986 case kOverflow:
1987 __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1988 i.InputOperand(1), tlabel, flabel);
1989 break;
1990 case kNotOverflow:
1991 __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
1992 i.InputOperand(1), flabel, tlabel);
1993 break;
1994 default:
1995 UNSUPPORTED_COND(kMips64DaddOvf, condition);
1996 break;
1997 }
1998 } else if (instr->arch_opcode() == kMips64DsubOvf) {
1999 switch (condition) {
2000 case kOverflow:
2001 __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
2002 i.InputOperand(1), tlabel, flabel);
2003 break;
2004 case kNotOverflow:
2005 __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
2006 i.InputOperand(1), flabel, tlabel);
2007 break;
2008 default:
2009 UNSUPPORTED_COND(kMips64DsubOvf, condition);
2010 break;
2011 }
2012 } else if (instr->arch_opcode() == kMips64MulOvf) {
2013 switch (condition) {
2014 case kOverflow: {
2015 __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
2016 i.InputOperand(1), tlabel, flabel, kScratchReg);
2017 } break;
2018 case kNotOverflow: {
2019 __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
2020 i.InputOperand(1), flabel, tlabel, kScratchReg);
2021 } break;
2022 default:
2023 UNSUPPORTED_COND(kMips64MulOvf, condition);
2024 break;
2025 }
2026 } else if (instr->arch_opcode() == kMips64Cmp) {
2027 cc = FlagsConditionToConditionCmp(condition);
2028 __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
2029 } else if (instr->arch_opcode() == kMips64CmpS) {
2030 if (!convertCondition(condition, cc)) {
2031 UNSUPPORTED_COND(kMips64CmpS, condition);
2032 }
2033 FPURegister left = i.InputOrZeroSingleRegister(0);
2034 FPURegister right = i.InputOrZeroSingleRegister(1);
2035 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
2036 !__ IsDoubleZeroRegSet()) {
2037 __ Move(kDoubleRegZero, 0.0);
2038 }
2039 __ BranchF32(tlabel, nullptr, cc, left, right);
2040 } else if (instr->arch_opcode() == kMips64CmpD) {
2041 if (!convertCondition(condition, cc)) {
2042 UNSUPPORTED_COND(kMips64CmpD, condition);
2043 }
2044 FPURegister left = i.InputOrZeroDoubleRegister(0);
2045 FPURegister right = i.InputOrZeroDoubleRegister(1);
2046 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
2047 !__ IsDoubleZeroRegSet()) {
2048 __ Move(kDoubleRegZero, 0.0);
2049 }
2050 __ BranchF64(tlabel, nullptr, cc, left, right);
2051 } else {
2052 PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
2053 instr->arch_opcode());
2054 UNIMPLEMENTED();
2055 }
2056 if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
2057 #undef __
2058 #define __ masm()->
2059 }
2060
2061 // Assembles branches after an instruction.
AssembleArchBranch(Instruction * instr,BranchInfo * branch)2062 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
2063 Label* tlabel = branch->true_label;
2064 Label* flabel = branch->false_label;
2065
2066 AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
2067 branch->fallthru);
2068 }
2069
2070
AssembleArchJump(RpoNumber target)2071 void CodeGenerator::AssembleArchJump(RpoNumber target) {
2072 if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
2073 }
2074
AssembleArchTrap(Instruction * instr,FlagsCondition condition)2075 void CodeGenerator::AssembleArchTrap(Instruction* instr,
2076 FlagsCondition condition) {
2077 class OutOfLineTrap final : public OutOfLineCode {
2078 public:
2079 OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
2080 : OutOfLineCode(gen),
2081 frame_elided_(frame_elided),
2082 instr_(instr),
2083 gen_(gen) {}
2084 void Generate() final {
2085 MipsOperandConverter i(gen_, instr_);
2086 Builtins::Name trap_id =
2087 static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
2088 bool old_has_frame = __ has_frame();
2089 if (frame_elided_) {
2090 __ set_has_frame(true);
2091 __ EnterFrame(StackFrame::WASM_COMPILED);
2092 }
2093 GenerateCallToTrap(trap_id);
2094 if (frame_elided_) {
2095 __ set_has_frame(old_has_frame);
2096 }
2097 }
2098
2099 private:
2100 void GenerateCallToTrap(Builtins::Name trap_id) {
2101 if (trap_id == Builtins::builtin_count) {
2102 // We cannot test calls to the runtime in cctest/test-run-wasm.
2103 // Therefore we emit a call to C here instead of a call to the runtime.
2104 // We use the context register as the scratch register, because we do
2105 // not have a context here.
2106 __ PrepareCallCFunction(0, 0, cp);
2107 __ CallCFunction(
2108 ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
2109 0);
2110 __ LeaveFrame(StackFrame::WASM_COMPILED);
2111 __ Ret();
2112 } else {
2113 gen_->AssembleSourcePosition(instr_);
2114 __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
2115 RelocInfo::CODE_TARGET);
2116 ReferenceMap* reference_map =
2117 new (gen_->zone()) ReferenceMap(gen_->zone());
2118 gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
2119 Safepoint::kNoLazyDeopt);
2120 if (FLAG_debug_code) {
2121 __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
2122 }
2123 }
2124 }
2125 bool frame_elided_;
2126 Instruction* instr_;
2127 CodeGenerator* gen_;
2128 };
2129 bool frame_elided = !frame_access_state()->has_frame();
2130 auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
2131 Label* tlabel = ool->entry();
2132 AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
2133 }
2134
2135 // Assembles boolean materializations after an instruction.
AssembleArchBoolean(Instruction * instr,FlagsCondition condition)2136 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
2137 FlagsCondition condition) {
2138 MipsOperandConverter i(this, instr);
2139 Label done;
2140
2141 // Materialize a full 32-bit 1 or 0 value. The result register is always the
2142 // last output of the instruction.
2143 Label false_value;
2144 DCHECK_NE(0u, instr->OutputCount());
2145 Register result = i.OutputRegister(instr->OutputCount() - 1);
2146 Condition cc = kNoCondition;
2147 // MIPS does not have condition code flags, so compare and branch are
2148 // implemented differently than on the other arch's. The compare operations
2149 // emit mips pseudo-instructions, which are checked and handled here.
2150
2151 if (instr->arch_opcode() == kMips64Tst) {
2152 cc = FlagsConditionToConditionTst(condition);
2153 if (instr->InputAt(1)->IsImmediate() &&
2154 base::bits::IsPowerOfTwo64(i.InputOperand(1).immediate())) {
2155 uint16_t pos =
2156 base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
2157 __ ExtractBits(result, i.InputRegister(0), pos, 1);
2158 } else {
2159 __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
2160 __ Sltu(result, zero_reg, kScratchReg);
2161 }
2162 if (cc == eq) {
2163 // Sltu produces 0 for equality, invert the result.
2164 __ xori(result, result, 1);
2165 }
2166 return;
2167 } else if (instr->arch_opcode() == kMips64Dadd ||
2168 instr->arch_opcode() == kMips64Dsub) {
2169 cc = FlagsConditionToConditionOvf(condition);
2170 // Check for overflow creates 1 or 0 for result.
2171 __ dsrl32(kScratchReg, i.OutputRegister(), 31);
2172 __ srl(at, i.OutputRegister(), 31);
2173 __ xor_(result, kScratchReg, at);
2174 if (cc == eq) // Toggle result for not overflow.
2175 __ xori(result, result, 1);
2176 return;
2177 } else if (instr->arch_opcode() == kMips64DaddOvf ||
2178 instr->arch_opcode() == kMips64DsubOvf ||
2179 instr->arch_opcode() == kMips64MulOvf) {
2180 Label flabel, tlabel;
2181 switch (instr->arch_opcode()) {
2182 case kMips64DaddOvf:
2183 __ DaddBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
2184 i.InputOperand(1), &flabel);
2185
2186 break;
2187 case kMips64DsubOvf:
2188 __ DsubBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
2189 i.InputOperand(1), &flabel);
2190 break;
2191 case kMips64MulOvf:
2192 __ MulBranchNoOvf(i.OutputRegister(), i.InputRegister(0),
2193 i.InputOperand(1), &flabel, kScratchReg);
2194 break;
2195 default:
2196 UNREACHABLE();
2197 break;
2198 }
2199 __ li(result, 1);
2200 __ Branch(&tlabel);
2201 __ bind(&flabel);
2202 __ li(result, 0);
2203 __ bind(&tlabel);
2204 } else if (instr->arch_opcode() == kMips64Cmp) {
2205 cc = FlagsConditionToConditionCmp(condition);
2206 switch (cc) {
2207 case eq:
2208 case ne: {
2209 Register left = i.InputRegister(0);
2210 Operand right = i.InputOperand(1);
2211 Register select;
2212 if (instr->InputAt(1)->IsImmediate() && right.immediate() == 0) {
2213 // Pass left operand if right is zero.
2214 select = left;
2215 } else {
2216 __ Dsubu(kScratchReg, left, right);
2217 select = kScratchReg;
2218 }
2219 __ Sltu(result, zero_reg, select);
2220 if (cc == eq) {
2221 // Sltu produces 0 for equality, invert the result.
2222 __ xori(result, result, 1);
2223 }
2224 } break;
2225 case lt:
2226 case ge: {
2227 Register left = i.InputRegister(0);
2228 Operand right = i.InputOperand(1);
2229 __ Slt(result, left, right);
2230 if (cc == ge) {
2231 __ xori(result, result, 1);
2232 }
2233 } break;
2234 case gt:
2235 case le: {
2236 Register left = i.InputRegister(1);
2237 Operand right = i.InputOperand(0);
2238 __ Slt(result, left, right);
2239 if (cc == le) {
2240 __ xori(result, result, 1);
2241 }
2242 } break;
2243 case lo:
2244 case hs: {
2245 Register left = i.InputRegister(0);
2246 Operand right = i.InputOperand(1);
2247 __ Sltu(result, left, right);
2248 if (cc == hs) {
2249 __ xori(result, result, 1);
2250 }
2251 } break;
2252 case hi:
2253 case ls: {
2254 Register left = i.InputRegister(1);
2255 Operand right = i.InputOperand(0);
2256 __ Sltu(result, left, right);
2257 if (cc == ls) {
2258 __ xori(result, result, 1);
2259 }
2260 } break;
2261 default:
2262 UNREACHABLE();
2263 }
2264 return;
2265 } else if (instr->arch_opcode() == kMips64CmpD ||
2266 instr->arch_opcode() == kMips64CmpS) {
2267 FPURegister left = i.InputOrZeroDoubleRegister(0);
2268 FPURegister right = i.InputOrZeroDoubleRegister(1);
2269 if ((left.is(kDoubleRegZero) || right.is(kDoubleRegZero)) &&
2270 !__ IsDoubleZeroRegSet()) {
2271 __ Move(kDoubleRegZero, 0.0);
2272 }
2273 bool predicate;
2274 FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
2275 if (kArchVariant != kMips64r6) {
2276 __ li(result, Operand(1));
2277 if (instr->arch_opcode() == kMips64CmpD) {
2278 __ c(cc, D, left, right);
2279 } else {
2280 DCHECK(instr->arch_opcode() == kMips64CmpS);
2281 __ c(cc, S, left, right);
2282 }
2283 if (predicate) {
2284 __ Movf(result, zero_reg);
2285 } else {
2286 __ Movt(result, zero_reg);
2287 }
2288 } else {
2289 if (instr->arch_opcode() == kMips64CmpD) {
2290 __ cmp(cc, L, kDoubleCompareReg, left, right);
2291 } else {
2292 DCHECK(instr->arch_opcode() == kMips64CmpS);
2293 __ cmp(cc, W, kDoubleCompareReg, left, right);
2294 }
2295 __ dmfc1(result, kDoubleCompareReg);
2296 __ andi(result, result, 1); // Cmp returns all 1's/0's, use only LSB.
2297
2298 if (!predicate) // Toggle result for not equal.
2299 __ xori(result, result, 1);
2300 }
2301 return;
2302 } else {
2303 PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
2304 instr->arch_opcode());
2305 TRACE_UNIMPL();
2306 UNIMPLEMENTED();
2307 }
2308 }
2309
2310
AssembleArchLookupSwitch(Instruction * instr)2311 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
2312 MipsOperandConverter i(this, instr);
2313 Register input = i.InputRegister(0);
2314 for (size_t index = 2; index < instr->InputCount(); index += 2) {
2315 __ li(at, Operand(i.InputInt32(index + 0)));
2316 __ beq(input, at, GetLabel(i.InputRpo(index + 1)));
2317 }
2318 __ nop(); // Branch delay slot of the last beq.
2319 AssembleArchJump(i.InputRpo(1));
2320 }
2321
AssembleArchTableSwitch(Instruction * instr)2322 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
2323 MipsOperandConverter i(this, instr);
2324 Register input = i.InputRegister(0);
2325 size_t const case_count = instr->InputCount() - 2;
2326
2327 __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
2328 __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
2329 return GetLabel(i.InputRpo(index + 2));
2330 });
2331 }
2332
AssembleDeoptimizerCall(int deoptimization_id,SourcePosition pos)2333 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
2334 int deoptimization_id, SourcePosition pos) {
2335 DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
2336 DeoptimizeReason deoptimization_reason =
2337 GetDeoptimizationReason(deoptimization_id);
2338 Deoptimizer::BailoutType bailout_type =
2339 deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
2340 : Deoptimizer::EAGER;
2341 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
2342 isolate(), deoptimization_id, bailout_type);
2343 if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
2344 __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
2345 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
2346 return kSuccess;
2347 }
2348
FinishFrame(Frame * frame)2349 void CodeGenerator::FinishFrame(Frame* frame) {
2350 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2351
2352 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
2353 if (saves_fpu != 0) {
2354 int count = base::bits::CountPopulation32(saves_fpu);
2355 DCHECK(kNumCalleeSavedFPU == count);
2356 frame->AllocateSavedCalleeRegisterSlots(count *
2357 (kDoubleSize / kPointerSize));
2358 }
2359
2360 const RegList saves = descriptor->CalleeSavedRegisters();
2361 if (saves != 0) {
2362 int count = base::bits::CountPopulation32(saves);
2363 DCHECK(kNumCalleeSaved == count + 1);
2364 frame->AllocateSavedCalleeRegisterSlots(count);
2365 }
2366 }
2367
AssembleConstructFrame()2368 void CodeGenerator::AssembleConstructFrame() {
2369 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2370 if (frame_access_state()->has_frame()) {
2371 if (descriptor->IsCFunctionCall()) {
2372 __ Push(ra, fp);
2373 __ mov(fp, sp);
2374 } else if (descriptor->IsJSFunctionCall()) {
2375 __ Prologue(this->info()->GeneratePreagedPrologue());
2376 if (descriptor->PushArgumentCount()) {
2377 __ Push(kJavaScriptCallArgCountRegister);
2378 }
2379 } else {
2380 __ StubPrologue(info()->GetOutputStackFrameType());
2381 }
2382 }
2383
2384 int shrink_slots =
2385 frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
2386
2387 if (info()->is_osr()) {
2388 // TurboFan OSR-compiled functions cannot be entered directly.
2389 __ Abort(kShouldNotDirectlyEnterOsrFunction);
2390
2391 // Unoptimized code jumps directly to this entrypoint while the unoptimized
2392 // frame is still on the stack. Optimized code uses OSR values directly from
2393 // the unoptimized frame. Thus, all that needs to be done is to allocate the
2394 // remaining stack slots.
2395 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
2396 osr_pc_offset_ = __ pc_offset();
2397 shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
2398 }
2399
2400 if (shrink_slots > 0) {
2401 __ Dsubu(sp, sp, Operand(shrink_slots * kPointerSize));
2402 }
2403
2404 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
2405 if (saves_fpu != 0) {
2406 // Save callee-saved FPU registers.
2407 __ MultiPushFPU(saves_fpu);
2408 DCHECK(kNumCalleeSavedFPU == base::bits::CountPopulation32(saves_fpu));
2409 }
2410
2411 const RegList saves = descriptor->CalleeSavedRegisters();
2412 if (saves != 0) {
2413 // Save callee-saved registers.
2414 __ MultiPush(saves);
2415 DCHECK(kNumCalleeSaved == base::bits::CountPopulation32(saves) + 1);
2416 }
2417 }
2418
AssembleReturn(InstructionOperand * pop)2419 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
2420 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
2421
2422 // Restore GP registers.
2423 const RegList saves = descriptor->CalleeSavedRegisters();
2424 if (saves != 0) {
2425 __ MultiPop(saves);
2426 }
2427
2428 // Restore FPU registers.
2429 const RegList saves_fpu = descriptor->CalleeSavedFPRegisters();
2430 if (saves_fpu != 0) {
2431 __ MultiPopFPU(saves_fpu);
2432 }
2433
2434 MipsOperandConverter g(this, nullptr);
2435 if (descriptor->IsCFunctionCall()) {
2436 AssembleDeconstructFrame();
2437 } else if (frame_access_state()->has_frame()) {
2438 // Canonicalize JSFunction return sites for now unless they have an variable
2439 // number of stack slot pops.
2440 if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
2441 if (return_label_.is_bound()) {
2442 __ Branch(&return_label_);
2443 return;
2444 } else {
2445 __ bind(&return_label_);
2446 AssembleDeconstructFrame();
2447 }
2448 } else {
2449 AssembleDeconstructFrame();
2450 }
2451 }
2452 int pop_count = static_cast<int>(descriptor->StackParameterCount());
2453 if (pop->IsImmediate()) {
2454 DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
2455 pop_count += g.ToConstant(pop).ToInt32();
2456 } else {
2457 Register pop_reg = g.ToRegister(pop);
2458 __ dsll(pop_reg, pop_reg, kPointerSizeLog2);
2459 __ Daddu(sp, sp, pop_reg);
2460 }
2461 if (pop_count != 0) {
2462 __ DropAndRet(pop_count);
2463 } else {
2464 __ Ret();
2465 }
2466 }
2467
2468
AssembleMove(InstructionOperand * source,InstructionOperand * destination)2469 void CodeGenerator::AssembleMove(InstructionOperand* source,
2470 InstructionOperand* destination) {
2471 MipsOperandConverter g(this, nullptr);
2472 // Dispatch on the source and destination operand kinds. Not all
2473 // combinations are possible.
2474 if (source->IsRegister()) {
2475 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2476 Register src = g.ToRegister(source);
2477 if (destination->IsRegister()) {
2478 __ mov(g.ToRegister(destination), src);
2479 } else {
2480 __ sd(src, g.ToMemOperand(destination));
2481 }
2482 } else if (source->IsStackSlot()) {
2483 DCHECK(destination->IsRegister() || destination->IsStackSlot());
2484 MemOperand src = g.ToMemOperand(source);
2485 if (destination->IsRegister()) {
2486 __ ld(g.ToRegister(destination), src);
2487 } else {
2488 Register temp = kScratchReg;
2489 __ ld(temp, src);
2490 __ sd(temp, g.ToMemOperand(destination));
2491 }
2492 } else if (source->IsConstant()) {
2493 Constant src = g.ToConstant(source);
2494 if (destination->IsRegister() || destination->IsStackSlot()) {
2495 Register dst =
2496 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
2497 switch (src.type()) {
2498 case Constant::kInt32:
2499 if (RelocInfo::IsWasmSizeReference(src.rmode())) {
2500 __ li(dst, Operand(src.ToInt32(), src.rmode()));
2501 } else {
2502 __ li(dst, Operand(src.ToInt32()));
2503 }
2504 break;
2505 case Constant::kFloat32:
2506 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
2507 break;
2508 case Constant::kInt64:
2509 if (RelocInfo::IsWasmPtrReference(src.rmode())) {
2510 __ li(dst, Operand(src.ToInt64(), src.rmode()));
2511 } else {
2512 DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
2513 __ li(dst, Operand(src.ToInt64()));
2514 }
2515 break;
2516 case Constant::kFloat64:
2517 __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
2518 break;
2519 case Constant::kExternalReference:
2520 __ li(dst, Operand(src.ToExternalReference()));
2521 break;
2522 case Constant::kHeapObject: {
2523 Handle<HeapObject> src_object = src.ToHeapObject();
2524 Heap::RootListIndex index;
2525 if (IsMaterializableFromRoot(src_object, &index)) {
2526 __ LoadRoot(dst, index);
2527 } else {
2528 __ li(dst, src_object);
2529 }
2530 break;
2531 }
2532 case Constant::kRpoNumber:
2533 UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
2534 break;
2535 }
2536 if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
2537 } else if (src.type() == Constant::kFloat32) {
2538 if (destination->IsFPStackSlot()) {
2539 MemOperand dst = g.ToMemOperand(destination);
2540 if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
2541 __ sw(zero_reg, dst);
2542 } else {
2543 __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
2544 __ sw(at, dst);
2545 }
2546 } else {
2547 DCHECK(destination->IsFPRegister());
2548 FloatRegister dst = g.ToSingleRegister(destination);
2549 __ Move(dst, src.ToFloat32());
2550 }
2551 } else {
2552 DCHECK_EQ(Constant::kFloat64, src.type());
2553 DoubleRegister dst = destination->IsFPRegister()
2554 ? g.ToDoubleRegister(destination)
2555 : kScratchDoubleReg;
2556 __ Move(dst, src.ToFloat64());
2557 if (destination->IsFPStackSlot()) {
2558 __ sdc1(dst, g.ToMemOperand(destination));
2559 }
2560 }
2561 } else if (source->IsFPRegister()) {
2562 FPURegister src = g.ToDoubleRegister(source);
2563 if (destination->IsFPRegister()) {
2564 FPURegister dst = g.ToDoubleRegister(destination);
2565 __ Move(dst, src);
2566 } else {
2567 DCHECK(destination->IsFPStackSlot());
2568 __ sdc1(src, g.ToMemOperand(destination));
2569 }
2570 } else if (source->IsFPStackSlot()) {
2571 DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
2572 MemOperand src = g.ToMemOperand(source);
2573 if (destination->IsFPRegister()) {
2574 __ ldc1(g.ToDoubleRegister(destination), src);
2575 } else {
2576 FPURegister temp = kScratchDoubleReg;
2577 __ ldc1(temp, src);
2578 __ sdc1(temp, g.ToMemOperand(destination));
2579 }
2580 } else {
2581 UNREACHABLE();
2582 }
2583 }
2584
2585
AssembleSwap(InstructionOperand * source,InstructionOperand * destination)2586 void CodeGenerator::AssembleSwap(InstructionOperand* source,
2587 InstructionOperand* destination) {
2588 MipsOperandConverter g(this, nullptr);
2589 // Dispatch on the source and destination operand kinds. Not all
2590 // combinations are possible.
2591 if (source->IsRegister()) {
2592 // Register-register.
2593 Register temp = kScratchReg;
2594 Register src = g.ToRegister(source);
2595 if (destination->IsRegister()) {
2596 Register dst = g.ToRegister(destination);
2597 __ Move(temp, src);
2598 __ Move(src, dst);
2599 __ Move(dst, temp);
2600 } else {
2601 DCHECK(destination->IsStackSlot());
2602 MemOperand dst = g.ToMemOperand(destination);
2603 __ mov(temp, src);
2604 __ ld(src, dst);
2605 __ sd(temp, dst);
2606 }
2607 } else if (source->IsStackSlot()) {
2608 DCHECK(destination->IsStackSlot());
2609 Register temp_0 = kScratchReg;
2610 Register temp_1 = kScratchReg2;
2611 MemOperand src = g.ToMemOperand(source);
2612 MemOperand dst = g.ToMemOperand(destination);
2613 __ ld(temp_0, src);
2614 __ ld(temp_1, dst);
2615 __ sd(temp_0, dst);
2616 __ sd(temp_1, src);
2617 } else if (source->IsFPRegister()) {
2618 FPURegister temp = kScratchDoubleReg;
2619 FPURegister src = g.ToDoubleRegister(source);
2620 if (destination->IsFPRegister()) {
2621 FPURegister dst = g.ToDoubleRegister(destination);
2622 __ Move(temp, src);
2623 __ Move(src, dst);
2624 __ Move(dst, temp);
2625 } else {
2626 DCHECK(destination->IsFPStackSlot());
2627 MemOperand dst = g.ToMemOperand(destination);
2628 __ Move(temp, src);
2629 __ ldc1(src, dst);
2630 __ sdc1(temp, dst);
2631 }
2632 } else if (source->IsFPStackSlot()) {
2633 DCHECK(destination->IsFPStackSlot());
2634 Register temp_0 = kScratchReg;
2635 FPURegister temp_1 = kScratchDoubleReg;
2636 MemOperand src0 = g.ToMemOperand(source);
2637 MemOperand src1(src0.rm(), src0.offset() + kIntSize);
2638 MemOperand dst0 = g.ToMemOperand(destination);
2639 MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
2640 __ ldc1(temp_1, dst0); // Save destination in temp_1.
2641 __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
2642 __ sw(temp_0, dst0);
2643 __ lw(temp_0, src1);
2644 __ sw(temp_0, dst1);
2645 __ sdc1(temp_1, src0);
2646 } else {
2647 // No other combinations are possible.
2648 UNREACHABLE();
2649 }
2650 }
2651
2652
AssembleJumpTable(Label ** targets,size_t target_count)2653 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
2654 // On 64-bit MIPS we emit the jump tables inline.
2655 UNREACHABLE();
2656 }
2657
2658
EnsureSpaceForLazyDeopt()2659 void CodeGenerator::EnsureSpaceForLazyDeopt() {
2660 if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
2661 return;
2662 }
2663
2664 int space_needed = Deoptimizer::patch_size();
2665 // Ensure that we have enough space after the previous lazy-bailout
2666 // instruction for patching the code here.
2667 int current_pc = masm()->pc_offset();
2668 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
2669 // Block tramoline pool emission for duration of padding.
2670 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
2671 masm());
2672 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
2673 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
2674 while (padding_size > 0) {
2675 __ nop();
2676 padding_size -= v8::internal::Assembler::kInstrSize;
2677 }
2678 }
2679 }
2680
2681 #undef __
2682
2683 } // namespace compiler
2684 } // namespace internal
2685 } // namespace v8
2686