1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/adapters.h"
6 #include "src/base/bits.h"
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties.h"
10
11 namespace v8 {
12 namespace internal {
13 namespace compiler {
14
15 #define TRACE_UNIMPL() \
16 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
17
18 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
19
20
21 // Adds Mips-specific methods for generating InstructionOperands.
22 class Mips64OperandGenerator final : public OperandGenerator {
23 public:
Mips64OperandGenerator(InstructionSelector * selector)24 explicit Mips64OperandGenerator(InstructionSelector* selector)
25 : OperandGenerator(selector) {}
26
UseOperand(Node * node,InstructionCode opcode)27 InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28 if (CanBeImmediate(node, opcode)) {
29 return UseImmediate(node);
30 }
31 return UseRegister(node);
32 }
33
34 // Use the zero register if the node has the immediate value zero, otherwise
35 // assign a register.
UseRegisterOrImmediateZero(Node * node)36 InstructionOperand UseRegisterOrImmediateZero(Node* node) {
37 if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
38 (IsFloatConstant(node) &&
39 (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
40 return UseImmediate(node);
41 }
42 return UseRegister(node);
43 }
44
IsIntegerConstant(Node * node)45 bool IsIntegerConstant(Node* node) {
46 return (node->opcode() == IrOpcode::kInt32Constant) ||
47 (node->opcode() == IrOpcode::kInt64Constant);
48 }
49
GetIntegerConstantValue(Node * node)50 int64_t GetIntegerConstantValue(Node* node) {
51 if (node->opcode() == IrOpcode::kInt32Constant) {
52 return OpParameter<int32_t>(node->op());
53 }
54 DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
55 return OpParameter<int64_t>(node->op());
56 }
57
IsFloatConstant(Node * node)58 bool IsFloatConstant(Node* node) {
59 return (node->opcode() == IrOpcode::kFloat32Constant) ||
60 (node->opcode() == IrOpcode::kFloat64Constant);
61 }
62
GetFloatConstantValue(Node * node)63 double GetFloatConstantValue(Node* node) {
64 if (node->opcode() == IrOpcode::kFloat32Constant) {
65 return OpParameter<float>(node->op());
66 }
67 DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
68 return OpParameter<double>(node->op());
69 }
70
CanBeImmediate(Node * node,InstructionCode mode)71 bool CanBeImmediate(Node* node, InstructionCode mode) {
72 return IsIntegerConstant(node) &&
73 CanBeImmediate(GetIntegerConstantValue(node), mode);
74 }
75
CanBeImmediate(int64_t value,InstructionCode opcode)76 bool CanBeImmediate(int64_t value, InstructionCode opcode) {
77 switch (ArchOpcodeField::decode(opcode)) {
78 case kMips64Shl:
79 case kMips64Sar:
80 case kMips64Shr:
81 return is_uint5(value);
82 case kMips64Dshl:
83 case kMips64Dsar:
84 case kMips64Dshr:
85 return is_uint6(value);
86 case kMips64Add:
87 case kMips64And32:
88 case kMips64And:
89 case kMips64Dadd:
90 case kMips64Or32:
91 case kMips64Or:
92 case kMips64Tst:
93 case kMips64Xor:
94 return is_uint16(value);
95 case kMips64Lb:
96 case kMips64Lbu:
97 case kMips64Sb:
98 case kMips64Lh:
99 case kMips64Lhu:
100 case kMips64Sh:
101 case kMips64Lw:
102 case kMips64Sw:
103 case kMips64Ld:
104 case kMips64Sd:
105 case kMips64Lwc1:
106 case kMips64Swc1:
107 case kMips64Ldc1:
108 case kMips64Sdc1:
109 return is_int32(value);
110 default:
111 return is_int16(value);
112 }
113 }
114
115 private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const116 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
117 TRACE_UNIMPL();
118 return false;
119 }
120 };
121
122
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)123 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
124 Node* node) {
125 Mips64OperandGenerator g(selector);
126 selector->Emit(opcode, g.DefineAsRegister(node),
127 g.UseRegister(node->InputAt(0)));
128 }
129
VisitRRI(InstructionSelector * selector,ArchOpcode opcode,Node * node)130 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
131 Node* node) {
132 Mips64OperandGenerator g(selector);
133 int32_t imm = OpParameter<int32_t>(node->op());
134 selector->Emit(opcode, g.DefineAsRegister(node),
135 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
136 }
137
VisitRRIR(InstructionSelector * selector,ArchOpcode opcode,Node * node)138 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
139 Node* node) {
140 Mips64OperandGenerator g(selector);
141 int32_t imm = OpParameter<int32_t>(node->op());
142 selector->Emit(opcode, g.DefineAsRegister(node),
143 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
144 g.UseRegister(node->InputAt(1)));
145 }
146
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)147 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
148 Node* node) {
149 Mips64OperandGenerator g(selector);
150 selector->Emit(opcode, g.DefineAsRegister(node),
151 g.UseRegister(node->InputAt(0)),
152 g.UseRegister(node->InputAt(1)));
153 }
154
VisitRRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)155 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
156 Mips64OperandGenerator g(selector);
157 selector->Emit(
158 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
159 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
160 }
161
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)162 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
163 Node* node) {
164 Mips64OperandGenerator g(selector);
165 selector->Emit(opcode, g.DefineAsRegister(node),
166 g.UseRegister(node->InputAt(0)),
167 g.UseOperand(node->InputAt(1), opcode));
168 }
169
170 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::ExtendingLoadMatcher171 ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
172 : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
173 Initialize(node);
174 }
175
Matchesv8::internal::compiler::ExtendingLoadMatcher176 bool Matches() const { return matches_; }
177
basev8::internal::compiler::ExtendingLoadMatcher178 Node* base() const {
179 DCHECK(Matches());
180 return base_;
181 }
immediatev8::internal::compiler::ExtendingLoadMatcher182 int64_t immediate() const {
183 DCHECK(Matches());
184 return immediate_;
185 }
opcodev8::internal::compiler::ExtendingLoadMatcher186 ArchOpcode opcode() const {
187 DCHECK(Matches());
188 return opcode_;
189 }
190
191 private:
192 bool matches_;
193 InstructionSelector* selector_;
194 Node* base_;
195 int64_t immediate_;
196 ArchOpcode opcode_;
197
Initializev8::internal::compiler::ExtendingLoadMatcher198 void Initialize(Node* node) {
199 Int64BinopMatcher m(node);
200 // When loading a 64-bit value and shifting by 32, we should
201 // just load and sign-extend the interesting 4 bytes instead.
202 // This happens, for example, when we're loading and untagging SMIs.
203 DCHECK(m.IsWord64Sar());
204 if (m.left().IsLoad() && m.right().Is(32) &&
205 selector_->CanCover(m.node(), m.left().node())) {
206 MachineRepresentation rep =
207 LoadRepresentationOf(m.left().node()->op()).representation();
208 DCHECK_EQ(3, ElementSizeLog2Of(rep));
209 if (rep != MachineRepresentation::kTaggedSigned &&
210 rep != MachineRepresentation::kTaggedPointer &&
211 rep != MachineRepresentation::kTagged &&
212 rep != MachineRepresentation::kWord64) {
213 return;
214 }
215
216 Mips64OperandGenerator g(selector_);
217 Node* load = m.left().node();
218 Node* offset = load->InputAt(1);
219 base_ = load->InputAt(0);
220 opcode_ = kMips64Lw;
221 if (g.CanBeImmediate(offset, opcode_)) {
222 #if defined(V8_TARGET_LITTLE_ENDIAN)
223 immediate_ = g.GetIntegerConstantValue(offset) + 4;
224 #elif defined(V8_TARGET_BIG_ENDIAN)
225 immediate_ = g.GetIntegerConstantValue(offset);
226 #endif
227 matches_ = g.CanBeImmediate(immediate_, kMips64Lw);
228 }
229 }
230 }
231 };
232
TryEmitExtendingLoad(InstructionSelector * selector,Node * node,Node * output_node)233 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
234 Node* output_node) {
235 ExtendingLoadMatcher m(node, selector);
236 Mips64OperandGenerator g(selector);
237 if (m.Matches()) {
238 InstructionOperand inputs[2];
239 inputs[0] = g.UseRegister(m.base());
240 InstructionCode opcode =
241 m.opcode() | AddressingModeField::encode(kMode_MRI);
242 DCHECK(is_int32(m.immediate()));
243 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
244 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
245 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
246 inputs);
247 return true;
248 }
249 return false;
250 }
251
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)252 bool TryMatchImmediate(InstructionSelector* selector,
253 InstructionCode* opcode_return, Node* node,
254 size_t* input_count_return, InstructionOperand* inputs) {
255 Mips64OperandGenerator g(selector);
256 if (g.CanBeImmediate(node, *opcode_return)) {
257 *opcode_return |= AddressingModeField::encode(kMode_MRI);
258 inputs[0] = g.UseImmediate(node);
259 *input_count_return = 1;
260 return true;
261 }
262 return false;
263 }
264
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)265 static void VisitBinop(InstructionSelector* selector, Node* node,
266 InstructionCode opcode, bool has_reverse_opcode,
267 InstructionCode reverse_opcode,
268 FlagsContinuation* cont) {
269 Mips64OperandGenerator g(selector);
270 Int32BinopMatcher m(node);
271 InstructionOperand inputs[2];
272 size_t input_count = 0;
273 InstructionOperand outputs[1];
274 size_t output_count = 0;
275
276 if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
277 &inputs[1])) {
278 inputs[0] = g.UseRegister(m.left().node());
279 input_count++;
280 } else if (has_reverse_opcode &&
281 TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
282 &input_count, &inputs[1])) {
283 inputs[0] = g.UseRegister(m.right().node());
284 opcode = reverse_opcode;
285 input_count++;
286 } else {
287 inputs[input_count++] = g.UseRegister(m.left().node());
288 inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
289 }
290
291 if (cont->IsDeoptimize()) {
292 // If we can deoptimize as a result of the binop, we need to make sure that
293 // the deopt inputs are not overwritten by the binop result. One way
294 // to achieve that is to declare the output register as same-as-first.
295 outputs[output_count++] = g.DefineSameAsFirst(node);
296 } else {
297 outputs[output_count++] = g.DefineAsRegister(node);
298 }
299
300 DCHECK_NE(0u, input_count);
301 DCHECK_EQ(1u, output_count);
302 DCHECK_GE(arraysize(inputs), input_count);
303 DCHECK_GE(arraysize(outputs), output_count);
304
305 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
306 inputs, cont);
307 }
308
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)309 static void VisitBinop(InstructionSelector* selector, Node* node,
310 InstructionCode opcode, bool has_reverse_opcode,
311 InstructionCode reverse_opcode) {
312 FlagsContinuation cont;
313 VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
314 }
315
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)316 static void VisitBinop(InstructionSelector* selector, Node* node,
317 InstructionCode opcode, FlagsContinuation* cont) {
318 VisitBinop(selector, node, opcode, false, kArchNop, cont);
319 }
320
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)321 static void VisitBinop(InstructionSelector* selector, Node* node,
322 InstructionCode opcode) {
323 VisitBinop(selector, node, opcode, false, kArchNop);
324 }
325
VisitStackSlot(Node * node)326 void InstructionSelector::VisitStackSlot(Node* node) {
327 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
328 int alignment = rep.alignment();
329 int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
330 OperandGenerator g(this);
331
332 Emit(kArchStackSlot, g.DefineAsRegister(node),
333 sequence()->AddImmediate(Constant(slot)),
334 sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
335 }
336
VisitDebugAbort(Node * node)337 void InstructionSelector::VisitDebugAbort(Node* node) {
338 Mips64OperandGenerator g(this);
339 Emit(kArchDebugAbort, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
340 }
341
EmitLoad(InstructionSelector * selector,Node * node,InstructionCode opcode,Node * output=nullptr)342 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
343 Node* output = nullptr) {
344 Mips64OperandGenerator g(selector);
345 Node* base = node->InputAt(0);
346 Node* index = node->InputAt(1);
347
348 if (g.CanBeImmediate(index, opcode)) {
349 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
350 g.DefineAsRegister(output == nullptr ? node : output),
351 g.UseRegister(base), g.UseImmediate(index));
352 } else {
353 InstructionOperand addr_reg = g.TempRegister();
354 selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
355 addr_reg, g.UseRegister(index), g.UseRegister(base));
356 // Emit desired load opcode, using temp addr_reg.
357 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
358 g.DefineAsRegister(output == nullptr ? node : output),
359 addr_reg, g.TempImmediate(0));
360 }
361 }
362
VisitLoad(Node * node)363 void InstructionSelector::VisitLoad(Node* node) {
364 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
365
366 InstructionCode opcode = kArchNop;
367 switch (load_rep.representation()) {
368 case MachineRepresentation::kFloat32:
369 opcode = kMips64Lwc1;
370 break;
371 case MachineRepresentation::kFloat64:
372 opcode = kMips64Ldc1;
373 break;
374 case MachineRepresentation::kBit: // Fall through.
375 case MachineRepresentation::kWord8:
376 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
377 break;
378 case MachineRepresentation::kWord16:
379 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
380 break;
381 case MachineRepresentation::kWord32:
382 opcode = load_rep.IsUnsigned() ? kMips64Lwu : kMips64Lw;
383 break;
384 case MachineRepresentation::kTaggedSigned: // Fall through.
385 case MachineRepresentation::kTaggedPointer: // Fall through.
386 case MachineRepresentation::kTagged: // Fall through.
387 case MachineRepresentation::kWord64:
388 opcode = kMips64Ld;
389 break;
390 case MachineRepresentation::kSimd128:
391 opcode = kMips64MsaLd;
392 break;
393 case MachineRepresentation::kNone:
394 UNREACHABLE();
395 return;
396 }
397 if (node->opcode() == IrOpcode::kPoisonedLoad) {
398 CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
399 opcode |= MiscField::encode(kMemoryAccessPoisoned);
400 }
401
402 EmitLoad(this, node, opcode);
403 }
404
VisitPoisonedLoad(Node * node)405 void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
406
VisitProtectedLoad(Node * node)407 void InstructionSelector::VisitProtectedLoad(Node* node) {
408 // TODO(eholk)
409 UNIMPLEMENTED();
410 }
411
VisitStore(Node * node)412 void InstructionSelector::VisitStore(Node* node) {
413 Mips64OperandGenerator g(this);
414 Node* base = node->InputAt(0);
415 Node* index = node->InputAt(1);
416 Node* value = node->InputAt(2);
417
418 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
419 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
420 MachineRepresentation rep = store_rep.representation();
421
422 // TODO(mips): I guess this could be done in a better way.
423 if (write_barrier_kind != kNoWriteBarrier) {
424 DCHECK(CanBeTaggedPointer(rep));
425 InstructionOperand inputs[3];
426 size_t input_count = 0;
427 inputs[input_count++] = g.UseUniqueRegister(base);
428 inputs[input_count++] = g.UseUniqueRegister(index);
429 inputs[input_count++] = g.UseUniqueRegister(value);
430 RecordWriteMode record_write_mode = RecordWriteMode::kValueIsAny;
431 switch (write_barrier_kind) {
432 case kNoWriteBarrier:
433 UNREACHABLE();
434 break;
435 case kMapWriteBarrier:
436 record_write_mode = RecordWriteMode::kValueIsMap;
437 break;
438 case kPointerWriteBarrier:
439 record_write_mode = RecordWriteMode::kValueIsPointer;
440 break;
441 case kFullWriteBarrier:
442 record_write_mode = RecordWriteMode::kValueIsAny;
443 break;
444 }
445 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
446 size_t const temp_count = arraysize(temps);
447 InstructionCode code = kArchStoreWithWriteBarrier;
448 code |= MiscField::encode(static_cast<int>(record_write_mode));
449 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
450 } else {
451 ArchOpcode opcode = kArchNop;
452 switch (rep) {
453 case MachineRepresentation::kFloat32:
454 opcode = kMips64Swc1;
455 break;
456 case MachineRepresentation::kFloat64:
457 opcode = kMips64Sdc1;
458 break;
459 case MachineRepresentation::kBit: // Fall through.
460 case MachineRepresentation::kWord8:
461 opcode = kMips64Sb;
462 break;
463 case MachineRepresentation::kWord16:
464 opcode = kMips64Sh;
465 break;
466 case MachineRepresentation::kWord32:
467 opcode = kMips64Sw;
468 break;
469 case MachineRepresentation::kTaggedSigned: // Fall through.
470 case MachineRepresentation::kTaggedPointer: // Fall through.
471 case MachineRepresentation::kTagged: // Fall through.
472 case MachineRepresentation::kWord64:
473 opcode = kMips64Sd;
474 break;
475 case MachineRepresentation::kSimd128:
476 opcode = kMips64MsaSt;
477 break;
478 case MachineRepresentation::kNone:
479 UNREACHABLE();
480 return;
481 }
482
483 if (g.CanBeImmediate(index, opcode)) {
484 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
485 g.UseRegister(base), g.UseImmediate(index),
486 g.UseRegisterOrImmediateZero(value));
487 } else {
488 InstructionOperand addr_reg = g.TempRegister();
489 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
490 g.UseRegister(index), g.UseRegister(base));
491 // Emit desired store opcode, using temp addr_reg.
492 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
493 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
494 }
495 }
496 }
497
VisitProtectedStore(Node * node)498 void InstructionSelector::VisitProtectedStore(Node* node) {
499 // TODO(eholk)
500 UNIMPLEMENTED();
501 }
502
VisitWord32And(Node * node)503 void InstructionSelector::VisitWord32And(Node* node) {
504 Mips64OperandGenerator g(this);
505 Int32BinopMatcher m(node);
506 if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
507 m.right().HasValue()) {
508 uint32_t mask = m.right().Value();
509 uint32_t mask_width = base::bits::CountPopulation(mask);
510 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
511 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
512 // The mask must be contiguous, and occupy the least-significant bits.
513 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
514
515 // Select Ext for And(Shr(x, imm), mask) where the mask is in the least
516 // significant bits.
517 Int32BinopMatcher mleft(m.left().node());
518 if (mleft.right().HasValue()) {
519 // Any shift value can match; int32 shifts use `value % 32`.
520 uint32_t lsb = mleft.right().Value() & 0x1F;
521
522 // Ext cannot extract bits past the register size, however since
523 // shifting the original value would have introduced some zeros we can
524 // still use Ext with a smaller mask and the remaining bits will be
525 // zeros.
526 if (lsb + mask_width > 32) mask_width = 32 - lsb;
527
528 Emit(kMips64Ext, g.DefineAsRegister(node),
529 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
530 g.TempImmediate(mask_width));
531 return;
532 }
533 // Other cases fall through to the normal And operation.
534 }
535 }
536 if (m.right().HasValue()) {
537 uint32_t mask = m.right().Value();
538 uint32_t shift = base::bits::CountPopulation(~mask);
539 uint32_t msb = base::bits::CountLeadingZeros32(~mask);
540 if (shift != 0 && shift != 32 && msb + shift == 32) {
541 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
542 // and remove constant loading of inverted mask.
543 Emit(kMips64Ins, g.DefineSameAsFirst(node),
544 g.UseRegister(m.left().node()), g.TempImmediate(0),
545 g.TempImmediate(shift));
546 return;
547 }
548 }
549 VisitBinop(this, node, kMips64And32, true, kMips64And32);
550 }
551
552
VisitWord64And(Node * node)553 void InstructionSelector::VisitWord64And(Node* node) {
554 Mips64OperandGenerator g(this);
555 Int64BinopMatcher m(node);
556 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
557 m.right().HasValue()) {
558 uint64_t mask = m.right().Value();
559 uint32_t mask_width = base::bits::CountPopulation(mask);
560 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
561 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
562 // The mask must be contiguous, and occupy the least-significant bits.
563 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
564
565 // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
566 // significant bits.
567 Int64BinopMatcher mleft(m.left().node());
568 if (mleft.right().HasValue()) {
569 // Any shift value can match; int64 shifts use `value % 64`.
570 uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3F);
571
572 // Dext cannot extract bits past the register size, however since
573 // shifting the original value would have introduced some zeros we can
574 // still use Dext with a smaller mask and the remaining bits will be
575 // zeros.
576 if (lsb + mask_width > 64) mask_width = 64 - lsb;
577
578 if (lsb == 0 && mask_width == 64) {
579 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
580 } else {
581 Emit(kMips64Dext, g.DefineAsRegister(node),
582 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
583 g.TempImmediate(static_cast<int32_t>(mask_width)));
584 }
585 return;
586 }
587 // Other cases fall through to the normal And operation.
588 }
589 }
590 if (m.right().HasValue()) {
591 uint64_t mask = m.right().Value();
592 uint32_t shift = base::bits::CountPopulation(~mask);
593 uint32_t msb = base::bits::CountLeadingZeros64(~mask);
594 if (shift != 0 && shift < 32 && msb + shift == 64) {
595 // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
596 // and remove constant loading of inverted mask. Dins cannot insert bits
597 // past word size, so shifts smaller than 32 are covered.
598 Emit(kMips64Dins, g.DefineSameAsFirst(node),
599 g.UseRegister(m.left().node()), g.TempImmediate(0),
600 g.TempImmediate(shift));
601 return;
602 }
603 }
604 VisitBinop(this, node, kMips64And, true, kMips64And);
605 }
606
607
VisitWord32Or(Node * node)608 void InstructionSelector::VisitWord32Or(Node* node) {
609 VisitBinop(this, node, kMips64Or32, true, kMips64Or32);
610 }
611
612
VisitWord64Or(Node * node)613 void InstructionSelector::VisitWord64Or(Node* node) {
614 VisitBinop(this, node, kMips64Or, true, kMips64Or);
615 }
616
617
VisitWord32Xor(Node * node)618 void InstructionSelector::VisitWord32Xor(Node* node) {
619 Int32BinopMatcher m(node);
620 if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
621 m.right().Is(-1)) {
622 Int32BinopMatcher mleft(m.left().node());
623 if (!mleft.right().HasValue()) {
624 Mips64OperandGenerator g(this);
625 Emit(kMips64Nor32, g.DefineAsRegister(node),
626 g.UseRegister(mleft.left().node()),
627 g.UseRegister(mleft.right().node()));
628 return;
629 }
630 }
631 if (m.right().Is(-1)) {
632 // Use Nor for bit negation and eliminate constant loading for xori.
633 Mips64OperandGenerator g(this);
634 Emit(kMips64Nor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
635 g.TempImmediate(0));
636 return;
637 }
638 VisitBinop(this, node, kMips64Xor32, true, kMips64Xor32);
639 }
640
641
VisitWord64Xor(Node * node)642 void InstructionSelector::VisitWord64Xor(Node* node) {
643 Int64BinopMatcher m(node);
644 if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
645 m.right().Is(-1)) {
646 Int64BinopMatcher mleft(m.left().node());
647 if (!mleft.right().HasValue()) {
648 Mips64OperandGenerator g(this);
649 Emit(kMips64Nor, g.DefineAsRegister(node),
650 g.UseRegister(mleft.left().node()),
651 g.UseRegister(mleft.right().node()));
652 return;
653 }
654 }
655 if (m.right().Is(-1)) {
656 // Use Nor for bit negation and eliminate constant loading for xori.
657 Mips64OperandGenerator g(this);
658 Emit(kMips64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
659 g.TempImmediate(0));
660 return;
661 }
662 VisitBinop(this, node, kMips64Xor, true, kMips64Xor);
663 }
664
665
VisitWord32Shl(Node * node)666 void InstructionSelector::VisitWord32Shl(Node* node) {
667 Int32BinopMatcher m(node);
668 if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
669 m.right().IsInRange(1, 31)) {
670 Mips64OperandGenerator g(this);
671 Int32BinopMatcher mleft(m.left().node());
672 // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
673 // contiguous, and the shift immediate non-zero.
674 if (mleft.right().HasValue()) {
675 uint32_t mask = mleft.right().Value();
676 uint32_t mask_width = base::bits::CountPopulation(mask);
677 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
678 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
679 uint32_t shift = m.right().Value();
680 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
681 DCHECK_NE(0u, shift);
682 if ((shift + mask_width) >= 32) {
683 // If the mask is contiguous and reaches or extends beyond the top
684 // bit, only the shift is needed.
685 Emit(kMips64Shl, g.DefineAsRegister(node),
686 g.UseRegister(mleft.left().node()),
687 g.UseImmediate(m.right().node()));
688 return;
689 }
690 }
691 }
692 }
693 VisitRRO(this, kMips64Shl, node);
694 }
695
696
VisitWord32Shr(Node * node)697 void InstructionSelector::VisitWord32Shr(Node* node) {
698 Int32BinopMatcher m(node);
699 if (m.left().IsWord32And() && m.right().HasValue()) {
700 uint32_t lsb = m.right().Value() & 0x1F;
701 Int32BinopMatcher mleft(m.left().node());
702 if (mleft.right().HasValue() && mleft.right().Value() != 0) {
703 // Select Ext for Shr(And(x, mask), imm) where the result of the mask is
704 // shifted into the least-significant bits.
705 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
706 unsigned mask_width = base::bits::CountPopulation(mask);
707 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
708 if ((mask_msb + mask_width + lsb) == 32) {
709 Mips64OperandGenerator g(this);
710 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
711 Emit(kMips64Ext, g.DefineAsRegister(node),
712 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
713 g.TempImmediate(mask_width));
714 return;
715 }
716 }
717 }
718 VisitRRO(this, kMips64Shr, node);
719 }
720
721
VisitWord32Sar(Node * node)722 void InstructionSelector::VisitWord32Sar(Node* node) {
723 Int32BinopMatcher m(node);
724 if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
725 Int32BinopMatcher mleft(m.left().node());
726 if (m.right().HasValue() && mleft.right().HasValue()) {
727 Mips64OperandGenerator g(this);
728 uint32_t sar = m.right().Value();
729 uint32_t shl = mleft.right().Value();
730 if ((sar == shl) && (sar == 16)) {
731 Emit(kMips64Seh, g.DefineAsRegister(node),
732 g.UseRegister(mleft.left().node()));
733 return;
734 } else if ((sar == shl) && (sar == 24)) {
735 Emit(kMips64Seb, g.DefineAsRegister(node),
736 g.UseRegister(mleft.left().node()));
737 return;
738 } else if ((sar == shl) && (sar == 32)) {
739 Emit(kMips64Shl, g.DefineAsRegister(node),
740 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
741 return;
742 }
743 }
744 }
745 VisitRRO(this, kMips64Sar, node);
746 }
747
748
VisitWord64Shl(Node * node)749 void InstructionSelector::VisitWord64Shl(Node* node) {
750 Mips64OperandGenerator g(this);
751 Int64BinopMatcher m(node);
752 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
753 m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
754 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
755 // 32 bits anyway.
756 Emit(kMips64Dshl, g.DefineSameAsFirst(node),
757 g.UseRegister(m.left().node()->InputAt(0)),
758 g.UseImmediate(m.right().node()));
759 return;
760 }
761 if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
762 m.right().IsInRange(1, 63)) {
763 // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
764 // contiguous, and the shift immediate non-zero.
765 Int64BinopMatcher mleft(m.left().node());
766 if (mleft.right().HasValue()) {
767 uint64_t mask = mleft.right().Value();
768 uint32_t mask_width = base::bits::CountPopulation(mask);
769 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
770 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
771 uint64_t shift = m.right().Value();
772 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
773 DCHECK_NE(0u, shift);
774
775 if ((shift + mask_width) >= 64) {
776 // If the mask is contiguous and reaches or extends beyond the top
777 // bit, only the shift is needed.
778 Emit(kMips64Dshl, g.DefineAsRegister(node),
779 g.UseRegister(mleft.left().node()),
780 g.UseImmediate(m.right().node()));
781 return;
782 }
783 }
784 }
785 }
786 VisitRRO(this, kMips64Dshl, node);
787 }
788
789
VisitWord64Shr(Node * node)790 void InstructionSelector::VisitWord64Shr(Node* node) {
791 Int64BinopMatcher m(node);
792 if (m.left().IsWord64And() && m.right().HasValue()) {
793 uint32_t lsb = m.right().Value() & 0x3F;
794 Int64BinopMatcher mleft(m.left().node());
795 if (mleft.right().HasValue() && mleft.right().Value() != 0) {
796 // Select Dext for Shr(And(x, mask), imm) where the result of the mask is
797 // shifted into the least-significant bits.
798 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
799 unsigned mask_width = base::bits::CountPopulation(mask);
800 unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
801 if ((mask_msb + mask_width + lsb) == 64) {
802 Mips64OperandGenerator g(this);
803 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
804 Emit(kMips64Dext, g.DefineAsRegister(node),
805 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
806 g.TempImmediate(mask_width));
807 return;
808 }
809 }
810 }
811 VisitRRO(this, kMips64Dshr, node);
812 }
813
814
VisitWord64Sar(Node * node)815 void InstructionSelector::VisitWord64Sar(Node* node) {
816 if (TryEmitExtendingLoad(this, node, node)) return;
817 VisitRRO(this, kMips64Dsar, node);
818 }
819
820
VisitWord32Ror(Node * node)821 void InstructionSelector::VisitWord32Ror(Node* node) {
822 VisitRRO(this, kMips64Ror, node);
823 }
824
825
VisitWord32Clz(Node * node)826 void InstructionSelector::VisitWord32Clz(Node* node) {
827 VisitRR(this, kMips64Clz, node);
828 }
829
830
VisitWord32ReverseBits(Node * node)831 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
832
833
VisitWord64ReverseBits(Node * node)834 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
835
VisitWord64ReverseBytes(Node * node)836 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
837 Mips64OperandGenerator g(this);
838 Emit(kMips64ByteSwap64, g.DefineAsRegister(node),
839 g.UseRegister(node->InputAt(0)));
840 }
841
VisitWord32ReverseBytes(Node * node)842 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
843 Mips64OperandGenerator g(this);
844 Emit(kMips64ByteSwap32, g.DefineAsRegister(node),
845 g.UseRegister(node->InputAt(0)));
846 }
847
VisitWord32Ctz(Node * node)848 void InstructionSelector::VisitWord32Ctz(Node* node) {
849 Mips64OperandGenerator g(this);
850 Emit(kMips64Ctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
851 }
852
853
VisitWord64Ctz(Node * node)854 void InstructionSelector::VisitWord64Ctz(Node* node) {
855 Mips64OperandGenerator g(this);
856 Emit(kMips64Dctz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
857 }
858
859
VisitWord32Popcnt(Node * node)860 void InstructionSelector::VisitWord32Popcnt(Node* node) {
861 Mips64OperandGenerator g(this);
862 Emit(kMips64Popcnt, g.DefineAsRegister(node),
863 g.UseRegister(node->InputAt(0)));
864 }
865
866
VisitWord64Popcnt(Node * node)867 void InstructionSelector::VisitWord64Popcnt(Node* node) {
868 Mips64OperandGenerator g(this);
869 Emit(kMips64Dpopcnt, g.DefineAsRegister(node),
870 g.UseRegister(node->InputAt(0)));
871 }
872
873
VisitWord64Ror(Node * node)874 void InstructionSelector::VisitWord64Ror(Node* node) {
875 VisitRRO(this, kMips64Dror, node);
876 }
877
878
VisitWord64Clz(Node * node)879 void InstructionSelector::VisitWord64Clz(Node* node) {
880 VisitRR(this, kMips64Dclz, node);
881 }
882
883
VisitInt32Add(Node * node)884 void InstructionSelector::VisitInt32Add(Node* node) {
885 Mips64OperandGenerator g(this);
886 Int32BinopMatcher m(node);
887
888 // Select Lsa for (left + (left_of_right << imm)).
889 if (m.right().opcode() == IrOpcode::kWord32Shl &&
890 CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
891 Int32BinopMatcher mright(m.right().node());
892 if (mright.right().HasValue() && !m.left().HasValue()) {
893 int32_t shift_value = static_cast<int32_t>(mright.right().Value());
894 Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
895 g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
896 return;
897 }
898 }
899
900 // Select Lsa for ((left_of_left << imm) + right).
901 if (m.left().opcode() == IrOpcode::kWord32Shl &&
902 CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
903 Int32BinopMatcher mleft(m.left().node());
904 if (mleft.right().HasValue() && !m.right().HasValue()) {
905 int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
906 Emit(kMips64Lsa, g.DefineAsRegister(node),
907 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
908 g.TempImmediate(shift_value));
909 return;
910 }
911 }
912 VisitBinop(this, node, kMips64Add, true, kMips64Add);
913 }
914
915
VisitInt64Add(Node * node)916 void InstructionSelector::VisitInt64Add(Node* node) {
917 Mips64OperandGenerator g(this);
918 Int64BinopMatcher m(node);
919
920 // Select Dlsa for (left + (left_of_right << imm)).
921 if (m.right().opcode() == IrOpcode::kWord64Shl &&
922 CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
923 Int64BinopMatcher mright(m.right().node());
924 if (mright.right().HasValue() && !m.left().HasValue()) {
925 int32_t shift_value = static_cast<int32_t>(mright.right().Value());
926 Emit(kMips64Dlsa, g.DefineAsRegister(node),
927 g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
928 g.TempImmediate(shift_value));
929 return;
930 }
931 }
932
933 // Select Dlsa for ((left_of_left << imm) + right).
934 if (m.left().opcode() == IrOpcode::kWord64Shl &&
935 CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
936 Int64BinopMatcher mleft(m.left().node());
937 if (mleft.right().HasValue() && !m.right().HasValue()) {
938 int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
939 Emit(kMips64Dlsa, g.DefineAsRegister(node),
940 g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
941 g.TempImmediate(shift_value));
942 return;
943 }
944 }
945
946 VisitBinop(this, node, kMips64Dadd, true, kMips64Dadd);
947 }
948
949
VisitInt32Sub(Node * node)950 void InstructionSelector::VisitInt32Sub(Node* node) {
951 VisitBinop(this, node, kMips64Sub);
952 }
953
954
VisitInt64Sub(Node * node)955 void InstructionSelector::VisitInt64Sub(Node* node) {
956 VisitBinop(this, node, kMips64Dsub);
957 }
958
959
VisitInt32Mul(Node * node)960 void InstructionSelector::VisitInt32Mul(Node* node) {
961 Mips64OperandGenerator g(this);
962 Int32BinopMatcher m(node);
963 if (m.right().HasValue() && m.right().Value() > 0) {
964 uint32_t value = static_cast<uint32_t>(m.right().Value());
965 if (base::bits::IsPowerOfTwo(value)) {
966 Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
967 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
968 g.TempImmediate(WhichPowerOf2(value)));
969 return;
970 }
971 if (base::bits::IsPowerOfTwo(value - 1)) {
972 Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
973 g.UseRegister(m.left().node()),
974 g.TempImmediate(WhichPowerOf2(value - 1)));
975 return;
976 }
977 if (base::bits::IsPowerOfTwo(value + 1)) {
978 InstructionOperand temp = g.TempRegister();
979 Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
980 g.UseRegister(m.left().node()),
981 g.TempImmediate(WhichPowerOf2(value + 1)));
982 Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
983 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
984 return;
985 }
986 }
987 Node* left = node->InputAt(0);
988 Node* right = node->InputAt(1);
989 if (CanCover(node, left) && CanCover(node, right)) {
990 if (left->opcode() == IrOpcode::kWord64Sar &&
991 right->opcode() == IrOpcode::kWord64Sar) {
992 Int64BinopMatcher leftInput(left), rightInput(right);
993 if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
994 // Combine untagging shifts with Dmul high.
995 Emit(kMips64DMulHigh, g.DefineSameAsFirst(node),
996 g.UseRegister(leftInput.left().node()),
997 g.UseRegister(rightInput.left().node()));
998 return;
999 }
1000 }
1001 }
1002 VisitRRR(this, kMips64Mul, node);
1003 }
1004
1005
VisitInt32MulHigh(Node * node)1006 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1007 VisitRRR(this, kMips64MulHigh, node);
1008 }
1009
1010
VisitUint32MulHigh(Node * node)1011 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1012 VisitRRR(this, kMips64MulHighU, node);
1013 }
1014
1015
VisitInt64Mul(Node * node)1016 void InstructionSelector::VisitInt64Mul(Node* node) {
1017 Mips64OperandGenerator g(this);
1018 Int64BinopMatcher m(node);
1019 // TODO(dusmil): Add optimization for shifts larger than 32.
1020 if (m.right().HasValue() && m.right().Value() > 0) {
1021 uint32_t value = static_cast<uint32_t>(m.right().Value());
1022 if (base::bits::IsPowerOfTwo(value)) {
1023 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
1024 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1025 g.TempImmediate(WhichPowerOf2(value)));
1026 return;
1027 }
1028 if (base::bits::IsPowerOfTwo(value - 1)) {
1029 // Dlsa macro will handle the shifting value out of bound cases.
1030 Emit(kMips64Dlsa, g.DefineAsRegister(node),
1031 g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
1032 g.TempImmediate(WhichPowerOf2(value - 1)));
1033 return;
1034 }
1035 if (base::bits::IsPowerOfTwo(value + 1)) {
1036 InstructionOperand temp = g.TempRegister();
1037 Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
1038 g.UseRegister(m.left().node()),
1039 g.TempImmediate(WhichPowerOf2(value + 1)));
1040 Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
1041 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1042 return;
1043 }
1044 }
1045 Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1046 g.UseRegister(m.right().node()));
1047 }
1048
1049
VisitInt32Div(Node * node)1050 void InstructionSelector::VisitInt32Div(Node* node) {
1051 Mips64OperandGenerator g(this);
1052 Int32BinopMatcher m(node);
1053 Node* left = node->InputAt(0);
1054 Node* right = node->InputAt(1);
1055 if (CanCover(node, left) && CanCover(node, right)) {
1056 if (left->opcode() == IrOpcode::kWord64Sar &&
1057 right->opcode() == IrOpcode::kWord64Sar) {
1058 Int64BinopMatcher rightInput(right), leftInput(left);
1059 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1060 // Combine both shifted operands with Ddiv.
1061 Emit(kMips64Ddiv, g.DefineSameAsFirst(node),
1062 g.UseRegister(leftInput.left().node()),
1063 g.UseRegister(rightInput.left().node()));
1064 return;
1065 }
1066 }
1067 }
1068 Emit(kMips64Div, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1069 g.UseRegister(m.right().node()));
1070 }
1071
1072
VisitUint32Div(Node * node)1073 void InstructionSelector::VisitUint32Div(Node* node) {
1074 Mips64OperandGenerator g(this);
1075 Int32BinopMatcher m(node);
1076 Emit(kMips64DivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1077 g.UseRegister(m.right().node()));
1078 }
1079
1080
VisitInt32Mod(Node * node)1081 void InstructionSelector::VisitInt32Mod(Node* node) {
1082 Mips64OperandGenerator g(this);
1083 Int32BinopMatcher m(node);
1084 Node* left = node->InputAt(0);
1085 Node* right = node->InputAt(1);
1086 if (CanCover(node, left) && CanCover(node, right)) {
1087 if (left->opcode() == IrOpcode::kWord64Sar &&
1088 right->opcode() == IrOpcode::kWord64Sar) {
1089 Int64BinopMatcher rightInput(right), leftInput(left);
1090 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1091 // Combine both shifted operands with Dmod.
1092 Emit(kMips64Dmod, g.DefineSameAsFirst(node),
1093 g.UseRegister(leftInput.left().node()),
1094 g.UseRegister(rightInput.left().node()));
1095 return;
1096 }
1097 }
1098 }
1099 Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1100 g.UseRegister(m.right().node()));
1101 }
1102
1103
VisitUint32Mod(Node * node)1104 void InstructionSelector::VisitUint32Mod(Node* node) {
1105 Mips64OperandGenerator g(this);
1106 Int32BinopMatcher m(node);
1107 Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1108 g.UseRegister(m.right().node()));
1109 }
1110
1111
VisitInt64Div(Node * node)1112 void InstructionSelector::VisitInt64Div(Node* node) {
1113 Mips64OperandGenerator g(this);
1114 Int64BinopMatcher m(node);
1115 Emit(kMips64Ddiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1116 g.UseRegister(m.right().node()));
1117 }
1118
1119
VisitUint64Div(Node * node)1120 void InstructionSelector::VisitUint64Div(Node* node) {
1121 Mips64OperandGenerator g(this);
1122 Int64BinopMatcher m(node);
1123 Emit(kMips64DdivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1124 g.UseRegister(m.right().node()));
1125 }
1126
1127
VisitInt64Mod(Node * node)1128 void InstructionSelector::VisitInt64Mod(Node* node) {
1129 Mips64OperandGenerator g(this);
1130 Int64BinopMatcher m(node);
1131 Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1132 g.UseRegister(m.right().node()));
1133 }
1134
1135
VisitUint64Mod(Node * node)1136 void InstructionSelector::VisitUint64Mod(Node* node) {
1137 Mips64OperandGenerator g(this);
1138 Int64BinopMatcher m(node);
1139 Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1140 g.UseRegister(m.right().node()));
1141 }
1142
1143
VisitChangeFloat32ToFloat64(Node * node)1144 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1145 VisitRR(this, kMips64CvtDS, node);
1146 }
1147
1148
VisitRoundInt32ToFloat32(Node * node)1149 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1150 VisitRR(this, kMips64CvtSW, node);
1151 }
1152
1153
VisitRoundUint32ToFloat32(Node * node)1154 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1155 VisitRR(this, kMips64CvtSUw, node);
1156 }
1157
1158
VisitChangeInt32ToFloat64(Node * node)1159 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1160 VisitRR(this, kMips64CvtDW, node);
1161 }
1162
1163
VisitChangeUint32ToFloat64(Node * node)1164 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1165 VisitRR(this, kMips64CvtDUw, node);
1166 }
1167
1168
VisitTruncateFloat32ToInt32(Node * node)1169 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1170 VisitRR(this, kMips64TruncWS, node);
1171 }
1172
1173
VisitTruncateFloat32ToUint32(Node * node)1174 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1175 VisitRR(this, kMips64TruncUwS, node);
1176 }
1177
1178
VisitChangeFloat64ToInt32(Node * node)1179 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1180 Mips64OperandGenerator g(this);
1181 Node* value = node->InputAt(0);
1182 // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1183 // which does rounding and conversion to integer format.
1184 if (CanCover(node, value)) {
1185 switch (value->opcode()) {
1186 case IrOpcode::kFloat64RoundDown:
1187 Emit(kMips64FloorWD, g.DefineAsRegister(node),
1188 g.UseRegister(value->InputAt(0)));
1189 return;
1190 case IrOpcode::kFloat64RoundUp:
1191 Emit(kMips64CeilWD, g.DefineAsRegister(node),
1192 g.UseRegister(value->InputAt(0)));
1193 return;
1194 case IrOpcode::kFloat64RoundTiesEven:
1195 Emit(kMips64RoundWD, g.DefineAsRegister(node),
1196 g.UseRegister(value->InputAt(0)));
1197 return;
1198 case IrOpcode::kFloat64RoundTruncate:
1199 Emit(kMips64TruncWD, g.DefineAsRegister(node),
1200 g.UseRegister(value->InputAt(0)));
1201 return;
1202 default:
1203 break;
1204 }
1205 if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1206 Node* next = value->InputAt(0);
1207 if (CanCover(value, next)) {
1208 // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1209 switch (next->opcode()) {
1210 case IrOpcode::kFloat32RoundDown:
1211 Emit(kMips64FloorWS, g.DefineAsRegister(node),
1212 g.UseRegister(next->InputAt(0)));
1213 return;
1214 case IrOpcode::kFloat32RoundUp:
1215 Emit(kMips64CeilWS, g.DefineAsRegister(node),
1216 g.UseRegister(next->InputAt(0)));
1217 return;
1218 case IrOpcode::kFloat32RoundTiesEven:
1219 Emit(kMips64RoundWS, g.DefineAsRegister(node),
1220 g.UseRegister(next->InputAt(0)));
1221 return;
1222 case IrOpcode::kFloat32RoundTruncate:
1223 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1224 g.UseRegister(next->InputAt(0)));
1225 return;
1226 default:
1227 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1228 g.UseRegister(value->InputAt(0)));
1229 return;
1230 }
1231 } else {
1232 // Match float32 -> float64 -> int32 representation change path.
1233 Emit(kMips64TruncWS, g.DefineAsRegister(node),
1234 g.UseRegister(value->InputAt(0)));
1235 return;
1236 }
1237 }
1238 }
1239 VisitRR(this, kMips64TruncWD, node);
1240 }
1241
1242
VisitChangeFloat64ToUint32(Node * node)1243 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1244 VisitRR(this, kMips64TruncUwD, node);
1245 }
1246
VisitChangeFloat64ToUint64(Node * node)1247 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1248 VisitRR(this, kMips64TruncUlD, node);
1249 }
1250
VisitTruncateFloat64ToUint32(Node * node)1251 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1252 VisitRR(this, kMips64TruncUwD, node);
1253 }
1254
VisitTryTruncateFloat32ToInt64(Node * node)1255 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1256 Mips64OperandGenerator g(this);
1257 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1258 InstructionOperand outputs[2];
1259 size_t output_count = 0;
1260 outputs[output_count++] = g.DefineAsRegister(node);
1261
1262 Node* success_output = NodeProperties::FindProjection(node, 1);
1263 if (success_output) {
1264 outputs[output_count++] = g.DefineAsRegister(success_output);
1265 }
1266
1267 this->Emit(kMips64TruncLS, output_count, outputs, 1, inputs);
1268 }
1269
1270
VisitTryTruncateFloat64ToInt64(Node * node)1271 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1272 Mips64OperandGenerator g(this);
1273 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1274 InstructionOperand outputs[2];
1275 size_t output_count = 0;
1276 outputs[output_count++] = g.DefineAsRegister(node);
1277
1278 Node* success_output = NodeProperties::FindProjection(node, 1);
1279 if (success_output) {
1280 outputs[output_count++] = g.DefineAsRegister(success_output);
1281 }
1282
1283 Emit(kMips64TruncLD, output_count, outputs, 1, inputs);
1284 }
1285
1286
VisitTryTruncateFloat32ToUint64(Node * node)1287 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1288 Mips64OperandGenerator g(this);
1289 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1290 InstructionOperand outputs[2];
1291 size_t output_count = 0;
1292 outputs[output_count++] = g.DefineAsRegister(node);
1293
1294 Node* success_output = NodeProperties::FindProjection(node, 1);
1295 if (success_output) {
1296 outputs[output_count++] = g.DefineAsRegister(success_output);
1297 }
1298
1299 Emit(kMips64TruncUlS, output_count, outputs, 1, inputs);
1300 }
1301
1302
VisitTryTruncateFloat64ToUint64(Node * node)1303 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1304 Mips64OperandGenerator g(this);
1305
1306 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1307 InstructionOperand outputs[2];
1308 size_t output_count = 0;
1309 outputs[output_count++] = g.DefineAsRegister(node);
1310
1311 Node* success_output = NodeProperties::FindProjection(node, 1);
1312 if (success_output) {
1313 outputs[output_count++] = g.DefineAsRegister(success_output);
1314 }
1315
1316 Emit(kMips64TruncUlD, output_count, outputs, 1, inputs);
1317 }
1318
1319
VisitChangeInt32ToInt64(Node * node)1320 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1321 Node* value = node->InputAt(0);
1322 if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
1323 // Generate sign-extending load.
1324 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1325 InstructionCode opcode = kArchNop;
1326 switch (load_rep.representation()) {
1327 case MachineRepresentation::kBit: // Fall through.
1328 case MachineRepresentation::kWord8:
1329 opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
1330 break;
1331 case MachineRepresentation::kWord16:
1332 opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
1333 break;
1334 case MachineRepresentation::kWord32:
1335 opcode = kMips64Lw;
1336 break;
1337 default:
1338 UNREACHABLE();
1339 return;
1340 }
1341 EmitLoad(this, value, opcode, node);
1342 } else {
1343 Mips64OperandGenerator g(this);
1344 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1345 g.TempImmediate(0));
1346 }
1347 }
1348
1349
VisitChangeUint32ToUint64(Node * node)1350 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1351 Mips64OperandGenerator g(this);
1352 Node* value = node->InputAt(0);
1353 switch (value->opcode()) {
1354 // 32-bit operations will write their result in a 64 bit register,
1355 // clearing the top 32 bits of the destination register.
1356 case IrOpcode::kUint32Div:
1357 case IrOpcode::kUint32Mod:
1358 case IrOpcode::kUint32MulHigh: {
1359 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1360 return;
1361 }
1362 case IrOpcode::kLoad: {
1363 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1364 if (load_rep.IsUnsigned()) {
1365 switch (load_rep.representation()) {
1366 case MachineRepresentation::kWord8:
1367 case MachineRepresentation::kWord16:
1368 case MachineRepresentation::kWord32:
1369 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1370 return;
1371 default:
1372 break;
1373 }
1374 }
1375 break;
1376 }
1377 default:
1378 break;
1379 }
1380 Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1381 g.TempImmediate(0), g.TempImmediate(32));
1382 }
1383
1384
VisitTruncateInt64ToInt32(Node * node)1385 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1386 Mips64OperandGenerator g(this);
1387 Node* value = node->InputAt(0);
1388 if (CanCover(node, value)) {
1389 switch (value->opcode()) {
1390 case IrOpcode::kWord64Sar: {
1391 if (TryEmitExtendingLoad(this, value, node)) {
1392 return;
1393 } else {
1394 Int64BinopMatcher m(value);
1395 if (m.right().IsInRange(32, 63)) {
1396 // After smi untagging no need for truncate. Combine sequence.
1397 Emit(kMips64Dsar, g.DefineSameAsFirst(node),
1398 g.UseRegister(m.left().node()),
1399 g.UseImmediate(m.right().node()));
1400 return;
1401 }
1402 }
1403 break;
1404 }
1405 default:
1406 break;
1407 }
1408 }
1409 Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1410 g.TempImmediate(0), g.TempImmediate(32));
1411 }
1412
1413
VisitTruncateFloat64ToFloat32(Node * node)1414 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1415 Mips64OperandGenerator g(this);
1416 Node* value = node->InputAt(0);
1417 // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1418 // instruction.
1419 if (CanCover(node, value) &&
1420 value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1421 Emit(kMips64CvtSW, g.DefineAsRegister(node),
1422 g.UseRegister(value->InputAt(0)));
1423 return;
1424 }
1425 VisitRR(this, kMips64CvtSD, node);
1426 }
1427
VisitTruncateFloat64ToWord32(Node * node)1428 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1429 VisitRR(this, kArchTruncateDoubleToI, node);
1430 }
1431
VisitRoundFloat64ToInt32(Node * node)1432 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1433 VisitRR(this, kMips64TruncWD, node);
1434 }
1435
VisitRoundInt64ToFloat32(Node * node)1436 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1437 VisitRR(this, kMips64CvtSL, node);
1438 }
1439
1440
VisitRoundInt64ToFloat64(Node * node)1441 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1442 VisitRR(this, kMips64CvtDL, node);
1443 }
1444
1445
VisitRoundUint64ToFloat32(Node * node)1446 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1447 VisitRR(this, kMips64CvtSUl, node);
1448 }
1449
1450
VisitRoundUint64ToFloat64(Node * node)1451 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1452 VisitRR(this, kMips64CvtDUl, node);
1453 }
1454
1455
VisitBitcastFloat32ToInt32(Node * node)1456 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1457 VisitRR(this, kMips64Float64ExtractLowWord32, node);
1458 }
1459
1460
VisitBitcastFloat64ToInt64(Node * node)1461 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1462 VisitRR(this, kMips64BitcastDL, node);
1463 }
1464
1465
VisitBitcastInt32ToFloat32(Node * node)1466 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1467 Mips64OperandGenerator g(this);
1468 Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
1469 ImmediateOperand(ImmediateOperand::INLINE, 0),
1470 g.UseRegister(node->InputAt(0)));
1471 }
1472
1473
VisitBitcastInt64ToFloat64(Node * node)1474 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1475 VisitRR(this, kMips64BitcastLD, node);
1476 }
1477
1478
VisitFloat32Add(Node * node)1479 void InstructionSelector::VisitFloat32Add(Node* node) {
1480 // Optimization with Madd.S(z, x, y) is intentionally removed.
1481 // See explanation for madd_s in assembler-mips64.cc.
1482 VisitRRR(this, kMips64AddS, node);
1483 }
1484
1485
VisitFloat64Add(Node * node)1486 void InstructionSelector::VisitFloat64Add(Node* node) {
1487 // Optimization with Madd.D(z, x, y) is intentionally removed.
1488 // See explanation for madd_d in assembler-mips64.cc.
1489 VisitRRR(this, kMips64AddD, node);
1490 }
1491
1492
VisitFloat32Sub(Node * node)1493 void InstructionSelector::VisitFloat32Sub(Node* node) {
1494 // Optimization with Msub.S(z, x, y) is intentionally removed.
1495 // See explanation for madd_s in assembler-mips64.cc.
1496 VisitRRR(this, kMips64SubS, node);
1497 }
1498
VisitFloat64Sub(Node * node)1499 void InstructionSelector::VisitFloat64Sub(Node* node) {
1500 // Optimization with Msub.D(z, x, y) is intentionally removed.
1501 // See explanation for madd_d in assembler-mips64.cc.
1502 VisitRRR(this, kMips64SubD, node);
1503 }
1504
VisitFloat32Mul(Node * node)1505 void InstructionSelector::VisitFloat32Mul(Node* node) {
1506 VisitRRR(this, kMips64MulS, node);
1507 }
1508
1509
VisitFloat64Mul(Node * node)1510 void InstructionSelector::VisitFloat64Mul(Node* node) {
1511 VisitRRR(this, kMips64MulD, node);
1512 }
1513
1514
VisitFloat32Div(Node * node)1515 void InstructionSelector::VisitFloat32Div(Node* node) {
1516 VisitRRR(this, kMips64DivS, node);
1517 }
1518
1519
VisitFloat64Div(Node * node)1520 void InstructionSelector::VisitFloat64Div(Node* node) {
1521 VisitRRR(this, kMips64DivD, node);
1522 }
1523
1524
VisitFloat64Mod(Node * node)1525 void InstructionSelector::VisitFloat64Mod(Node* node) {
1526 Mips64OperandGenerator g(this);
1527 Emit(kMips64ModD, g.DefineAsFixed(node, f0),
1528 g.UseFixed(node->InputAt(0), f12),
1529 g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
1530 }
1531
VisitFloat32Max(Node * node)1532 void InstructionSelector::VisitFloat32Max(Node* node) {
1533 Mips64OperandGenerator g(this);
1534 Emit(kMips64Float32Max, g.DefineAsRegister(node),
1535 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1536 }
1537
VisitFloat64Max(Node * node)1538 void InstructionSelector::VisitFloat64Max(Node* node) {
1539 Mips64OperandGenerator g(this);
1540 Emit(kMips64Float64Max, g.DefineAsRegister(node),
1541 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1542 }
1543
VisitFloat32Min(Node * node)1544 void InstructionSelector::VisitFloat32Min(Node* node) {
1545 Mips64OperandGenerator g(this);
1546 Emit(kMips64Float32Min, g.DefineAsRegister(node),
1547 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1548 }
1549
VisitFloat64Min(Node * node)1550 void InstructionSelector::VisitFloat64Min(Node* node) {
1551 Mips64OperandGenerator g(this);
1552 Emit(kMips64Float64Min, g.DefineAsRegister(node),
1553 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1554 }
1555
1556
VisitFloat32Abs(Node * node)1557 void InstructionSelector::VisitFloat32Abs(Node* node) {
1558 VisitRR(this, kMips64AbsS, node);
1559 }
1560
1561
VisitFloat64Abs(Node * node)1562 void InstructionSelector::VisitFloat64Abs(Node* node) {
1563 VisitRR(this, kMips64AbsD, node);
1564 }
1565
VisitFloat32Sqrt(Node * node)1566 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1567 VisitRR(this, kMips64SqrtS, node);
1568 }
1569
1570
VisitFloat64Sqrt(Node * node)1571 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1572 VisitRR(this, kMips64SqrtD, node);
1573 }
1574
1575
VisitFloat32RoundDown(Node * node)1576 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1577 VisitRR(this, kMips64Float32RoundDown, node);
1578 }
1579
1580
VisitFloat64RoundDown(Node * node)1581 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1582 VisitRR(this, kMips64Float64RoundDown, node);
1583 }
1584
1585
VisitFloat32RoundUp(Node * node)1586 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1587 VisitRR(this, kMips64Float32RoundUp, node);
1588 }
1589
1590
VisitFloat64RoundUp(Node * node)1591 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1592 VisitRR(this, kMips64Float64RoundUp, node);
1593 }
1594
1595
VisitFloat32RoundTruncate(Node * node)1596 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1597 VisitRR(this, kMips64Float32RoundTruncate, node);
1598 }
1599
1600
VisitFloat64RoundTruncate(Node * node)1601 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1602 VisitRR(this, kMips64Float64RoundTruncate, node);
1603 }
1604
1605
VisitFloat64RoundTiesAway(Node * node)1606 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1607 UNREACHABLE();
1608 }
1609
1610
VisitFloat32RoundTiesEven(Node * node)1611 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1612 VisitRR(this, kMips64Float32RoundTiesEven, node);
1613 }
1614
1615
VisitFloat64RoundTiesEven(Node * node)1616 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1617 VisitRR(this, kMips64Float64RoundTiesEven, node);
1618 }
1619
VisitFloat32Neg(Node * node)1620 void InstructionSelector::VisitFloat32Neg(Node* node) {
1621 VisitRR(this, kMips64NegS, node);
1622 }
1623
VisitFloat64Neg(Node * node)1624 void InstructionSelector::VisitFloat64Neg(Node* node) {
1625 VisitRR(this, kMips64NegD, node);
1626 }
1627
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1628 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1629 InstructionCode opcode) {
1630 Mips64OperandGenerator g(this);
1631 Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2),
1632 g.UseFixed(node->InputAt(1), f4))
1633 ->MarkAsCall();
1634 }
1635
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1636 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1637 InstructionCode opcode) {
1638 Mips64OperandGenerator g(this);
1639 Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12))
1640 ->MarkAsCall();
1641 }
1642
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1643 void InstructionSelector::EmitPrepareArguments(
1644 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1645 Node* node) {
1646 Mips64OperandGenerator g(this);
1647
1648 // Prepare for C function call.
1649 if (call_descriptor->IsCFunctionCall()) {
1650 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1651 call_descriptor->ParameterCount())),
1652 0, nullptr, 0, nullptr);
1653
1654 // Poke any stack arguments.
1655 int slot = kCArgSlotCount;
1656 for (PushParameter input : (*arguments)) {
1657 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1658 g.TempImmediate(slot << kPointerSizeLog2));
1659 ++slot;
1660 }
1661 } else {
1662 int push_count = static_cast<int>(call_descriptor->StackParameterCount());
1663 if (push_count > 0) {
1664 // Calculate needed space
1665 int stack_size = 0;
1666 for (PushParameter input : (*arguments)) {
1667 if (input.node) {
1668 stack_size += input.location.GetSizeInPointers();
1669 }
1670 }
1671 Emit(kMips64StackClaim, g.NoOutput(),
1672 g.TempImmediate(stack_size << kPointerSizeLog2));
1673 }
1674 for (size_t n = 0; n < arguments->size(); ++n) {
1675 PushParameter input = (*arguments)[n];
1676 if (input.node) {
1677 Emit(kMips64StoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1678 g.TempImmediate(static_cast<int>(n << kPointerSizeLog2)));
1679 }
1680 }
1681 }
1682 }
1683
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1684 void InstructionSelector::EmitPrepareResults(
1685 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1686 Node* node) {
1687 Mips64OperandGenerator g(this);
1688
1689 int reverse_slot = 0;
1690 for (PushParameter output : *results) {
1691 if (!output.location.IsCallerFrameSlot()) continue;
1692 // Skip any alignment holes in nodes.
1693 if (output.node != nullptr) {
1694 DCHECK(!call_descriptor->IsCFunctionCall());
1695 if (output.location.GetType() == MachineType::Float32()) {
1696 MarkAsFloat32(output.node);
1697 } else if (output.location.GetType() == MachineType::Float64()) {
1698 MarkAsFloat64(output.node);
1699 }
1700 Emit(kMips64Peek, g.DefineAsRegister(output.node),
1701 g.UseImmediate(reverse_slot));
1702 }
1703 reverse_slot += output.location.GetSizeInPointers();
1704 }
1705 }
1706
IsTailCallAddressImmediate()1707 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1708
GetTempsCountForTailCallFromJSFunction()1709 int InstructionSelector::GetTempsCountForTailCallFromJSFunction() { return 3; }
1710
VisitUnalignedLoad(Node * node)1711 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1712 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1713 Mips64OperandGenerator g(this);
1714 Node* base = node->InputAt(0);
1715 Node* index = node->InputAt(1);
1716
1717 ArchOpcode opcode = kArchNop;
1718 switch (load_rep.representation()) {
1719 case MachineRepresentation::kFloat32:
1720 opcode = kMips64Ulwc1;
1721 break;
1722 case MachineRepresentation::kFloat64:
1723 opcode = kMips64Uldc1;
1724 break;
1725 case MachineRepresentation::kBit: // Fall through.
1726 case MachineRepresentation::kWord8:
1727 UNREACHABLE();
1728 break;
1729 case MachineRepresentation::kWord16:
1730 opcode = load_rep.IsUnsigned() ? kMips64Ulhu : kMips64Ulh;
1731 break;
1732 case MachineRepresentation::kWord32:
1733 opcode = load_rep.IsUnsigned() ? kMips64Ulwu : kMips64Ulw;
1734 break;
1735 case MachineRepresentation::kTaggedSigned: // Fall through.
1736 case MachineRepresentation::kTaggedPointer: // Fall through.
1737 case MachineRepresentation::kTagged: // Fall through.
1738 case MachineRepresentation::kWord64:
1739 opcode = kMips64Uld;
1740 break;
1741 case MachineRepresentation::kSimd128:
1742 opcode = kMips64MsaLd;
1743 break;
1744 case MachineRepresentation::kNone:
1745 UNREACHABLE();
1746 return;
1747 }
1748
1749 if (g.CanBeImmediate(index, opcode)) {
1750 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1751 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1752 } else {
1753 InstructionOperand addr_reg = g.TempRegister();
1754 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1755 g.UseRegister(index), g.UseRegister(base));
1756 // Emit desired load opcode, using temp addr_reg.
1757 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1758 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1759 }
1760 }
1761
VisitUnalignedStore(Node * node)1762 void InstructionSelector::VisitUnalignedStore(Node* node) {
1763 Mips64OperandGenerator g(this);
1764 Node* base = node->InputAt(0);
1765 Node* index = node->InputAt(1);
1766 Node* value = node->InputAt(2);
1767
1768 UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1769 ArchOpcode opcode = kArchNop;
1770 switch (rep) {
1771 case MachineRepresentation::kFloat32:
1772 opcode = kMips64Uswc1;
1773 break;
1774 case MachineRepresentation::kFloat64:
1775 opcode = kMips64Usdc1;
1776 break;
1777 case MachineRepresentation::kBit: // Fall through.
1778 case MachineRepresentation::kWord8:
1779 UNREACHABLE();
1780 break;
1781 case MachineRepresentation::kWord16:
1782 opcode = kMips64Ush;
1783 break;
1784 case MachineRepresentation::kWord32:
1785 opcode = kMips64Usw;
1786 break;
1787 case MachineRepresentation::kTaggedSigned: // Fall through.
1788 case MachineRepresentation::kTaggedPointer: // Fall through.
1789 case MachineRepresentation::kTagged: // Fall through.
1790 case MachineRepresentation::kWord64:
1791 opcode = kMips64Usd;
1792 break;
1793 case MachineRepresentation::kSimd128:
1794 opcode = kMips64MsaSt;
1795 break;
1796 case MachineRepresentation::kNone:
1797 UNREACHABLE();
1798 return;
1799 }
1800
1801 if (g.CanBeImmediate(index, opcode)) {
1802 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1803 g.UseRegister(base), g.UseImmediate(index),
1804 g.UseRegisterOrImmediateZero(value));
1805 } else {
1806 InstructionOperand addr_reg = g.TempRegister();
1807 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
1808 g.UseRegister(index), g.UseRegister(base));
1809 // Emit desired store opcode, using temp addr_reg.
1810 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1811 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1812 }
1813 }
1814
1815 namespace {
1816
1817 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1818 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1819 InstructionOperand left, InstructionOperand right,
1820 FlagsContinuation* cont) {
1821 selector->EmitWithContinuation(opcode, left, right, cont);
1822 }
1823
1824
1825 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1826 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1827 FlagsContinuation* cont) {
1828 Mips64OperandGenerator g(selector);
1829 Float32BinopMatcher m(node);
1830 InstructionOperand lhs, rhs;
1831
1832 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1833 : g.UseRegister(m.left().node());
1834 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1835 : g.UseRegister(m.right().node());
1836 VisitCompare(selector, kMips64CmpS, lhs, rhs, cont);
1837 }
1838
1839
1840 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1841 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1842 FlagsContinuation* cont) {
1843 Mips64OperandGenerator g(selector);
1844 Float64BinopMatcher m(node);
1845 InstructionOperand lhs, rhs;
1846
1847 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1848 : g.UseRegister(m.left().node());
1849 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1850 : g.UseRegister(m.right().node());
1851 VisitCompare(selector, kMips64CmpD, lhs, rhs, cont);
1852 }
1853
1854
1855 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1856 void VisitWordCompare(InstructionSelector* selector, Node* node,
1857 InstructionCode opcode, FlagsContinuation* cont,
1858 bool commutative) {
1859 Mips64OperandGenerator g(selector);
1860 Node* left = node->InputAt(0);
1861 Node* right = node->InputAt(1);
1862
1863 // Match immediates on left or right side of comparison.
1864 if (g.CanBeImmediate(right, opcode)) {
1865 if (opcode == kMips64Tst) {
1866 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1867 cont);
1868 } else {
1869 switch (cont->condition()) {
1870 case kEqual:
1871 case kNotEqual:
1872 if (cont->IsSet()) {
1873 VisitCompare(selector, opcode, g.UseRegister(left),
1874 g.UseImmediate(right), cont);
1875 } else {
1876 VisitCompare(selector, opcode, g.UseRegister(left),
1877 g.UseRegister(right), cont);
1878 }
1879 break;
1880 case kSignedLessThan:
1881 case kSignedGreaterThanOrEqual:
1882 case kUnsignedLessThan:
1883 case kUnsignedGreaterThanOrEqual:
1884 VisitCompare(selector, opcode, g.UseRegister(left),
1885 g.UseImmediate(right), cont);
1886 break;
1887 default:
1888 VisitCompare(selector, opcode, g.UseRegister(left),
1889 g.UseRegister(right), cont);
1890 }
1891 }
1892 } else if (g.CanBeImmediate(left, opcode)) {
1893 if (!commutative) cont->Commute();
1894 if (opcode == kMips64Tst) {
1895 VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1896 cont);
1897 } else {
1898 switch (cont->condition()) {
1899 case kEqual:
1900 case kNotEqual:
1901 if (cont->IsSet()) {
1902 VisitCompare(selector, opcode, g.UseRegister(right),
1903 g.UseImmediate(left), cont);
1904 } else {
1905 VisitCompare(selector, opcode, g.UseRegister(right),
1906 g.UseRegister(left), cont);
1907 }
1908 break;
1909 case kSignedLessThan:
1910 case kSignedGreaterThanOrEqual:
1911 case kUnsignedLessThan:
1912 case kUnsignedGreaterThanOrEqual:
1913 VisitCompare(selector, opcode, g.UseRegister(right),
1914 g.UseImmediate(left), cont);
1915 break;
1916 default:
1917 VisitCompare(selector, opcode, g.UseRegister(right),
1918 g.UseRegister(left), cont);
1919 }
1920 }
1921 } else {
1922 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1923 cont);
1924 }
1925 }
1926
IsNodeUnsigned(Node * n)1927 bool IsNodeUnsigned(Node* n) {
1928 NodeMatcher m(n);
1929
1930 if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
1931 m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
1932 LoadRepresentation load_rep = LoadRepresentationOf(n->op());
1933 return load_rep.IsUnsigned();
1934 } else {
1935 return m.IsUint32Div() || m.IsUint32LessThan() ||
1936 m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
1937 m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
1938 m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
1939 }
1940 }
1941
1942 // Shared routine for multiple word compare operations.
VisitFullWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1943 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
1944 InstructionCode opcode, FlagsContinuation* cont) {
1945 Mips64OperandGenerator g(selector);
1946 InstructionOperand leftOp = g.TempRegister();
1947 InstructionOperand rightOp = g.TempRegister();
1948
1949 selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
1950 g.TempImmediate(32));
1951 selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
1952 g.TempImmediate(32));
1953
1954 VisitCompare(selector, opcode, leftOp, rightOp, cont);
1955 }
1956
VisitOptimizedWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1957 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
1958 InstructionCode opcode,
1959 FlagsContinuation* cont) {
1960 if (FLAG_debug_code) {
1961 Mips64OperandGenerator g(selector);
1962 InstructionOperand leftOp = g.TempRegister();
1963 InstructionOperand rightOp = g.TempRegister();
1964 InstructionOperand optimizedResult = g.TempRegister();
1965 InstructionOperand fullResult = g.TempRegister();
1966 FlagsCondition condition = cont->condition();
1967 InstructionCode testOpcode = opcode |
1968 FlagsConditionField::encode(condition) |
1969 FlagsModeField::encode(kFlags_set);
1970
1971 selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
1972 g.UseRegister(node->InputAt(1)));
1973
1974 selector->Emit(kMips64Dshl, leftOp, g.UseRegister(node->InputAt(0)),
1975 g.TempImmediate(32));
1976 selector->Emit(kMips64Dshl, rightOp, g.UseRegister(node->InputAt(1)),
1977 g.TempImmediate(32));
1978 selector->Emit(testOpcode, fullResult, leftOp, rightOp);
1979
1980 selector->Emit(
1981 kMips64AssertEqual, g.NoOutput(), optimizedResult, fullResult,
1982 g.TempImmediate(
1983 static_cast<int>(AbortReason::kUnsupportedNonPrimitiveCompare)));
1984 }
1985
1986 VisitWordCompare(selector, node, opcode, cont, false);
1987 }
1988
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1989 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1990 FlagsContinuation* cont) {
1991 // MIPS64 doesn't support Word32 compare instructions. Instead it relies
1992 // that the values in registers are correctly sign-extended and uses
1993 // Word64 comparison instead. This behavior is correct in most cases,
1994 // but doesn't work when comparing signed with unsigned operands.
1995 // We could simulate full Word32 compare in all cases but this would
1996 // create an unnecessary overhead since unsigned integers are rarely
1997 // used in JavaScript.
1998 // The solution proposed here tries to match a comparison of signed
1999 // with unsigned operand, and perform full Word32Compare only
2000 // in those cases. Unfortunately, the solution is not complete because
2001 // it might skip cases where Word32 full compare is needed, so
2002 // basically it is a hack.
2003 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
2004 VisitFullWord32Compare(selector, node, kMips64Cmp, cont);
2005 } else {
2006 VisitOptimizedWord32Compare(selector, node, kMips64Cmp, cont);
2007 }
2008 }
2009
2010
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2011 void VisitWord64Compare(InstructionSelector* selector, Node* node,
2012 FlagsContinuation* cont) {
2013 VisitWordCompare(selector, node, kMips64Cmp, cont, false);
2014 }
2015
2016
2017
EmitWordCompareZero(InstructionSelector * selector,Node * value,FlagsContinuation * cont)2018 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
2019 FlagsContinuation* cont) {
2020 Mips64OperandGenerator g(selector);
2021 selector->EmitWithContinuation(kMips64Cmp, g.UseRegister(value),
2022 g.TempImmediate(0), cont);
2023 }
2024
2025 } // namespace
2026
2027 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)2028 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2029 FlagsContinuation* cont) {
2030 // Try to combine with comparisons against 0 by simply inverting the branch.
2031 while (CanCover(user, value)) {
2032 if (value->opcode() == IrOpcode::kWord32Equal) {
2033 Int32BinopMatcher m(value);
2034 if (!m.right().Is(0)) break;
2035 user = value;
2036 value = m.left().node();
2037 } else if (value->opcode() == IrOpcode::kWord64Equal) {
2038 Int64BinopMatcher m(value);
2039 if (!m.right().Is(0)) break;
2040 user = value;
2041 value = m.left().node();
2042 } else {
2043 break;
2044 }
2045
2046 cont->Negate();
2047 }
2048
2049 if (CanCover(user, value)) {
2050 switch (value->opcode()) {
2051 case IrOpcode::kWord32Equal:
2052 cont->OverwriteAndNegateIfEqual(kEqual);
2053 return VisitWord32Compare(this, value, cont);
2054 case IrOpcode::kInt32LessThan:
2055 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2056 return VisitWord32Compare(this, value, cont);
2057 case IrOpcode::kInt32LessThanOrEqual:
2058 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2059 return VisitWord32Compare(this, value, cont);
2060 case IrOpcode::kUint32LessThan:
2061 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2062 return VisitWord32Compare(this, value, cont);
2063 case IrOpcode::kUint32LessThanOrEqual:
2064 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2065 return VisitWord32Compare(this, value, cont);
2066 case IrOpcode::kWord64Equal:
2067 cont->OverwriteAndNegateIfEqual(kEqual);
2068 return VisitWord64Compare(this, value, cont);
2069 case IrOpcode::kInt64LessThan:
2070 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2071 return VisitWord64Compare(this, value, cont);
2072 case IrOpcode::kInt64LessThanOrEqual:
2073 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2074 return VisitWord64Compare(this, value, cont);
2075 case IrOpcode::kUint64LessThan:
2076 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2077 return VisitWord64Compare(this, value, cont);
2078 case IrOpcode::kUint64LessThanOrEqual:
2079 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2080 return VisitWord64Compare(this, value, cont);
2081 case IrOpcode::kFloat32Equal:
2082 cont->OverwriteAndNegateIfEqual(kEqual);
2083 return VisitFloat32Compare(this, value, cont);
2084 case IrOpcode::kFloat32LessThan:
2085 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2086 return VisitFloat32Compare(this, value, cont);
2087 case IrOpcode::kFloat32LessThanOrEqual:
2088 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2089 return VisitFloat32Compare(this, value, cont);
2090 case IrOpcode::kFloat64Equal:
2091 cont->OverwriteAndNegateIfEqual(kEqual);
2092 return VisitFloat64Compare(this, value, cont);
2093 case IrOpcode::kFloat64LessThan:
2094 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2095 return VisitFloat64Compare(this, value, cont);
2096 case IrOpcode::kFloat64LessThanOrEqual:
2097 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2098 return VisitFloat64Compare(this, value, cont);
2099 case IrOpcode::kProjection:
2100 // Check if this is the overflow output projection of an
2101 // <Operation>WithOverflow node.
2102 if (ProjectionIndexOf(value->op()) == 1u) {
2103 // We cannot combine the <Operation>WithOverflow with this branch
2104 // unless the 0th projection (the use of the actual value of the
2105 // <Operation> is either nullptr, which means there's no use of the
2106 // actual value, or was already defined, which means it is scheduled
2107 // *AFTER* this branch).
2108 Node* const node = value->InputAt(0);
2109 Node* const result = NodeProperties::FindProjection(node, 0);
2110 if (result == nullptr || IsDefined(result)) {
2111 switch (node->opcode()) {
2112 case IrOpcode::kInt32AddWithOverflow:
2113 cont->OverwriteAndNegateIfEqual(kOverflow);
2114 return VisitBinop(this, node, kMips64Dadd, cont);
2115 case IrOpcode::kInt32SubWithOverflow:
2116 cont->OverwriteAndNegateIfEqual(kOverflow);
2117 return VisitBinop(this, node, kMips64Dsub, cont);
2118 case IrOpcode::kInt32MulWithOverflow:
2119 cont->OverwriteAndNegateIfEqual(kOverflow);
2120 return VisitBinop(this, node, kMips64MulOvf, cont);
2121 case IrOpcode::kInt64AddWithOverflow:
2122 cont->OverwriteAndNegateIfEqual(kOverflow);
2123 return VisitBinop(this, node, kMips64DaddOvf, cont);
2124 case IrOpcode::kInt64SubWithOverflow:
2125 cont->OverwriteAndNegateIfEqual(kOverflow);
2126 return VisitBinop(this, node, kMips64DsubOvf, cont);
2127 default:
2128 break;
2129 }
2130 }
2131 }
2132 break;
2133 case IrOpcode::kWord32And:
2134 case IrOpcode::kWord64And:
2135 return VisitWordCompare(this, value, kMips64Tst, cont, true);
2136 default:
2137 break;
2138 }
2139 }
2140
2141 // Continuation could not be combined with a compare, emit compare against 0.
2142 EmitWordCompareZero(this, value, cont);
2143 }
2144
VisitSwitch(Node * node,const SwitchInfo & sw)2145 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2146 Mips64OperandGenerator g(this);
2147 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2148
2149 // Emit either ArchTableSwitch or ArchLookupSwitch.
2150 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2151 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2152 size_t table_space_cost = 10 + 2 * sw.value_range();
2153 size_t table_time_cost = 3;
2154 size_t lookup_space_cost = 2 + 2 * sw.case_count();
2155 size_t lookup_time_cost = sw.case_count();
2156 if (sw.case_count() > 0 &&
2157 table_space_cost + 3 * table_time_cost <=
2158 lookup_space_cost + 3 * lookup_time_cost &&
2159 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2160 sw.value_range() <= kMaxTableSwitchValueRange) {
2161 InstructionOperand index_operand = value_operand;
2162 if (sw.min_value()) {
2163 index_operand = g.TempRegister();
2164 Emit(kMips64Sub, index_operand, value_operand,
2165 g.TempImmediate(sw.min_value()));
2166 }
2167 // Generate a table lookup.
2168 return EmitTableSwitch(sw, index_operand);
2169 }
2170 }
2171
2172 // Generate a tree of conditional jumps.
2173 return EmitBinarySearchSwitch(sw, value_operand);
2174 }
2175
2176
VisitWord32Equal(Node * const node)2177 void InstructionSelector::VisitWord32Equal(Node* const node) {
2178 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2179 Int32BinopMatcher m(node);
2180 if (m.right().Is(0)) {
2181 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2182 }
2183
2184 VisitWord32Compare(this, node, &cont);
2185 }
2186
2187
VisitInt32LessThan(Node * node)2188 void InstructionSelector::VisitInt32LessThan(Node* node) {
2189 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2190 VisitWord32Compare(this, node, &cont);
2191 }
2192
2193
VisitInt32LessThanOrEqual(Node * node)2194 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2195 FlagsContinuation cont =
2196 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2197 VisitWord32Compare(this, node, &cont);
2198 }
2199
2200
VisitUint32LessThan(Node * node)2201 void InstructionSelector::VisitUint32LessThan(Node* node) {
2202 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2203 VisitWord32Compare(this, node, &cont);
2204 }
2205
2206
VisitUint32LessThanOrEqual(Node * node)2207 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2208 FlagsContinuation cont =
2209 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2210 VisitWord32Compare(this, node, &cont);
2211 }
2212
2213
VisitInt32AddWithOverflow(Node * node)2214 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2215 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2216 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2217 return VisitBinop(this, node, kMips64Dadd, &cont);
2218 }
2219 FlagsContinuation cont;
2220 VisitBinop(this, node, kMips64Dadd, &cont);
2221 }
2222
2223
VisitInt32SubWithOverflow(Node * node)2224 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2225 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2226 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2227 return VisitBinop(this, node, kMips64Dsub, &cont);
2228 }
2229 FlagsContinuation cont;
2230 VisitBinop(this, node, kMips64Dsub, &cont);
2231 }
2232
VisitInt32MulWithOverflow(Node * node)2233 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2234 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2235 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2236 return VisitBinop(this, node, kMips64MulOvf, &cont);
2237 }
2238 FlagsContinuation cont;
2239 VisitBinop(this, node, kMips64MulOvf, &cont);
2240 }
2241
VisitInt64AddWithOverflow(Node * node)2242 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2243 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2244 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2245 return VisitBinop(this, node, kMips64DaddOvf, &cont);
2246 }
2247 FlagsContinuation cont;
2248 VisitBinop(this, node, kMips64DaddOvf, &cont);
2249 }
2250
2251
VisitInt64SubWithOverflow(Node * node)2252 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2253 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2254 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2255 return VisitBinop(this, node, kMips64DsubOvf, &cont);
2256 }
2257 FlagsContinuation cont;
2258 VisitBinop(this, node, kMips64DsubOvf, &cont);
2259 }
2260
2261
VisitWord64Equal(Node * const node)2262 void InstructionSelector::VisitWord64Equal(Node* const node) {
2263 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2264 Int64BinopMatcher m(node);
2265 if (m.right().Is(0)) {
2266 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2267 }
2268
2269 VisitWord64Compare(this, node, &cont);
2270 }
2271
2272
VisitInt64LessThan(Node * node)2273 void InstructionSelector::VisitInt64LessThan(Node* node) {
2274 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2275 VisitWord64Compare(this, node, &cont);
2276 }
2277
2278
VisitInt64LessThanOrEqual(Node * node)2279 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2280 FlagsContinuation cont =
2281 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2282 VisitWord64Compare(this, node, &cont);
2283 }
2284
2285
VisitUint64LessThan(Node * node)2286 void InstructionSelector::VisitUint64LessThan(Node* node) {
2287 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2288 VisitWord64Compare(this, node, &cont);
2289 }
2290
2291
VisitUint64LessThanOrEqual(Node * node)2292 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2293 FlagsContinuation cont =
2294 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2295 VisitWord64Compare(this, node, &cont);
2296 }
2297
2298
VisitFloat32Equal(Node * node)2299 void InstructionSelector::VisitFloat32Equal(Node* node) {
2300 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2301 VisitFloat32Compare(this, node, &cont);
2302 }
2303
2304
VisitFloat32LessThan(Node * node)2305 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2306 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2307 VisitFloat32Compare(this, node, &cont);
2308 }
2309
2310
VisitFloat32LessThanOrEqual(Node * node)2311 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2312 FlagsContinuation cont =
2313 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2314 VisitFloat32Compare(this, node, &cont);
2315 }
2316
2317
VisitFloat64Equal(Node * node)2318 void InstructionSelector::VisitFloat64Equal(Node* node) {
2319 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2320 VisitFloat64Compare(this, node, &cont);
2321 }
2322
2323
VisitFloat64LessThan(Node * node)2324 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2325 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2326 VisitFloat64Compare(this, node, &cont);
2327 }
2328
2329
VisitFloat64LessThanOrEqual(Node * node)2330 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2331 FlagsContinuation cont =
2332 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2333 VisitFloat64Compare(this, node, &cont);
2334 }
2335
2336
VisitFloat64ExtractLowWord32(Node * node)2337 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2338 VisitRR(this, kMips64Float64ExtractLowWord32, node);
2339 }
2340
2341
VisitFloat64ExtractHighWord32(Node * node)2342 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2343 VisitRR(this, kMips64Float64ExtractHighWord32, node);
2344 }
2345
VisitFloat64SilenceNaN(Node * node)2346 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2347 VisitRR(this, kMips64Float64SilenceNaN, node);
2348 }
2349
VisitFloat64InsertLowWord32(Node * node)2350 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2351 Mips64OperandGenerator g(this);
2352 Node* left = node->InputAt(0);
2353 Node* right = node->InputAt(1);
2354 Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2355 g.UseRegister(left), g.UseRegister(right));
2356 }
2357
2358
VisitFloat64InsertHighWord32(Node * node)2359 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2360 Mips64OperandGenerator g(this);
2361 Node* left = node->InputAt(0);
2362 Node* right = node->InputAt(1);
2363 Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2364 g.UseRegister(left), g.UseRegister(right));
2365 }
2366
VisitWord32AtomicLoad(Node * node)2367 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2368 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
2369 Mips64OperandGenerator g(this);
2370 Node* base = node->InputAt(0);
2371 Node* index = node->InputAt(1);
2372 ArchOpcode opcode = kArchNop;
2373 switch (load_rep.representation()) {
2374 case MachineRepresentation::kWord8:
2375 opcode =
2376 load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
2377 break;
2378 case MachineRepresentation::kWord16:
2379 opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
2380 : kWord32AtomicLoadUint16;
2381 break;
2382 case MachineRepresentation::kWord32:
2383 opcode = kWord32AtomicLoadWord32;
2384 break;
2385 default:
2386 UNREACHABLE();
2387 return;
2388 }
2389 if (g.CanBeImmediate(index, opcode)) {
2390 Emit(opcode | AddressingModeField::encode(kMode_MRI),
2391 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
2392 } else {
2393 InstructionOperand addr_reg = g.TempRegister();
2394 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
2395 g.UseRegister(index), g.UseRegister(base));
2396 // Emit desired load opcode, using temp addr_reg.
2397 Emit(opcode | AddressingModeField::encode(kMode_MRI),
2398 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
2399 }
2400 }
2401
VisitWord32AtomicStore(Node * node)2402 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2403 MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
2404 Mips64OperandGenerator g(this);
2405 Node* base = node->InputAt(0);
2406 Node* index = node->InputAt(1);
2407 Node* value = node->InputAt(2);
2408 ArchOpcode opcode = kArchNop;
2409 switch (rep) {
2410 case MachineRepresentation::kWord8:
2411 opcode = kWord32AtomicStoreWord8;
2412 break;
2413 case MachineRepresentation::kWord16:
2414 opcode = kWord32AtomicStoreWord16;
2415 break;
2416 case MachineRepresentation::kWord32:
2417 opcode = kWord32AtomicStoreWord32;
2418 break;
2419 default:
2420 UNREACHABLE();
2421 return;
2422 }
2423
2424 if (g.CanBeImmediate(index, opcode)) {
2425 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
2426 g.UseRegister(base), g.UseImmediate(index),
2427 g.UseRegisterOrImmediateZero(value));
2428 } else {
2429 InstructionOperand addr_reg = g.TempRegister();
2430 Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
2431 g.UseRegister(index), g.UseRegister(base));
2432 // Emit desired store opcode, using temp addr_reg.
2433 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
2434 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
2435 }
2436 }
2437
VisitWord32AtomicExchange(Node * node)2438 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2439 Mips64OperandGenerator g(this);
2440 Node* base = node->InputAt(0);
2441 Node* index = node->InputAt(1);
2442 Node* value = node->InputAt(2);
2443 ArchOpcode opcode = kArchNop;
2444 MachineType type = AtomicOpType(node->op());
2445 if (type == MachineType::Int8()) {
2446 opcode = kWord32AtomicExchangeInt8;
2447 } else if (type == MachineType::Uint8()) {
2448 opcode = kWord32AtomicExchangeUint8;
2449 } else if (type == MachineType::Int16()) {
2450 opcode = kWord32AtomicExchangeInt16;
2451 } else if (type == MachineType::Uint16()) {
2452 opcode = kWord32AtomicExchangeUint16;
2453 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2454 opcode = kWord32AtomicExchangeWord32;
2455 } else {
2456 UNREACHABLE();
2457 return;
2458 }
2459
2460 AddressingMode addressing_mode = kMode_MRI;
2461 InstructionOperand inputs[3];
2462 size_t input_count = 0;
2463 inputs[input_count++] = g.UseUniqueRegister(base);
2464 inputs[input_count++] = g.UseUniqueRegister(index);
2465 inputs[input_count++] = g.UseUniqueRegister(value);
2466 InstructionOperand outputs[1];
2467 outputs[0] = g.UseUniqueRegister(node);
2468 InstructionOperand temp[3];
2469 temp[0] = g.TempRegister();
2470 temp[1] = g.TempRegister();
2471 temp[2] = g.TempRegister();
2472 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2473 Emit(code, 1, outputs, input_count, inputs, 3, temp);
2474 }
2475
VisitWord32AtomicCompareExchange(Node * node)2476 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2477 Mips64OperandGenerator g(this);
2478 Node* base = node->InputAt(0);
2479 Node* index = node->InputAt(1);
2480 Node* old_value = node->InputAt(2);
2481 Node* new_value = node->InputAt(3);
2482 ArchOpcode opcode = kArchNop;
2483 MachineType type = AtomicOpType(node->op());
2484 if (type == MachineType::Int8()) {
2485 opcode = kWord32AtomicCompareExchangeInt8;
2486 } else if (type == MachineType::Uint8()) {
2487 opcode = kWord32AtomicCompareExchangeUint8;
2488 } else if (type == MachineType::Int16()) {
2489 opcode = kWord32AtomicCompareExchangeInt16;
2490 } else if (type == MachineType::Uint16()) {
2491 opcode = kWord32AtomicCompareExchangeUint16;
2492 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2493 opcode = kWord32AtomicCompareExchangeWord32;
2494 } else {
2495 UNREACHABLE();
2496 return;
2497 }
2498
2499 AddressingMode addressing_mode = kMode_MRI;
2500 InstructionOperand inputs[4];
2501 size_t input_count = 0;
2502 inputs[input_count++] = g.UseUniqueRegister(base);
2503 inputs[input_count++] = g.UseUniqueRegister(index);
2504 inputs[input_count++] = g.UseUniqueRegister(old_value);
2505 inputs[input_count++] = g.UseUniqueRegister(new_value);
2506 InstructionOperand outputs[1];
2507 outputs[0] = g.UseUniqueRegister(node);
2508 InstructionOperand temp[3];
2509 temp[0] = g.TempRegister();
2510 temp[1] = g.TempRegister();
2511 temp[2] = g.TempRegister();
2512 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2513 Emit(code, 1, outputs, input_count, inputs, 3, temp);
2514 }
2515
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2516 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2517 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2518 ArchOpcode uint16_op, ArchOpcode word32_op) {
2519 Mips64OperandGenerator g(this);
2520 Node* base = node->InputAt(0);
2521 Node* index = node->InputAt(1);
2522 Node* value = node->InputAt(2);
2523 ArchOpcode opcode = kArchNop;
2524 MachineType type = AtomicOpType(node->op());
2525 if (type == MachineType::Int8()) {
2526 opcode = int8_op;
2527 } else if (type == MachineType::Uint8()) {
2528 opcode = uint8_op;
2529 } else if (type == MachineType::Int16()) {
2530 opcode = int16_op;
2531 } else if (type == MachineType::Uint16()) {
2532 opcode = uint16_op;
2533 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2534 opcode = word32_op;
2535 } else {
2536 UNREACHABLE();
2537 return;
2538 }
2539
2540 AddressingMode addressing_mode = kMode_MRI;
2541 InstructionOperand inputs[3];
2542 size_t input_count = 0;
2543 inputs[input_count++] = g.UseUniqueRegister(base);
2544 inputs[input_count++] = g.UseUniqueRegister(index);
2545 inputs[input_count++] = g.UseUniqueRegister(value);
2546 InstructionOperand outputs[1];
2547 outputs[0] = g.UseUniqueRegister(node);
2548 InstructionOperand temps[4];
2549 temps[0] = g.TempRegister();
2550 temps[1] = g.TempRegister();
2551 temps[2] = g.TempRegister();
2552 temps[3] = g.TempRegister();
2553 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
2554 Emit(code, 1, outputs, input_count, inputs, 4, temps);
2555 }
2556
2557 #define VISIT_ATOMIC_BINOP(op) \
2558 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2559 VisitWord32AtomicBinaryOperation( \
2560 node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
2561 kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
2562 kWord32Atomic##op##Word32); \
2563 }
2564 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2565 VISIT_ATOMIC_BINOP(Sub)
2566 VISIT_ATOMIC_BINOP(And)
2567 VISIT_ATOMIC_BINOP(Or)
2568 VISIT_ATOMIC_BINOP(Xor)
2569 #undef VISIT_ATOMIC_BINOP
2570
2571 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2572 UNREACHABLE();
2573 }
2574
VisitInt64AbsWithOverflow(Node * node)2575 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2576 UNREACHABLE();
2577 }
2578
VisitSpeculationFence(Node * node)2579 void InstructionSelector::VisitSpeculationFence(Node* node) { UNREACHABLE(); }
2580
2581 #define SIMD_TYPE_LIST(V) \
2582 V(F32x4) \
2583 V(I32x4) \
2584 V(I16x8) \
2585 V(I8x16)
2586
2587 #define SIMD_UNOP_LIST(V) \
2588 V(F32x4SConvertI32x4, kMips64F32x4SConvertI32x4) \
2589 V(F32x4UConvertI32x4, kMips64F32x4UConvertI32x4) \
2590 V(F32x4Abs, kMips64F32x4Abs) \
2591 V(F32x4Neg, kMips64F32x4Neg) \
2592 V(F32x4RecipApprox, kMips64F32x4RecipApprox) \
2593 V(F32x4RecipSqrtApprox, kMips64F32x4RecipSqrtApprox) \
2594 V(I32x4SConvertF32x4, kMips64I32x4SConvertF32x4) \
2595 V(I32x4UConvertF32x4, kMips64I32x4UConvertF32x4) \
2596 V(I32x4Neg, kMips64I32x4Neg) \
2597 V(I32x4SConvertI16x8Low, kMips64I32x4SConvertI16x8Low) \
2598 V(I32x4SConvertI16x8High, kMips64I32x4SConvertI16x8High) \
2599 V(I32x4UConvertI16x8Low, kMips64I32x4UConvertI16x8Low) \
2600 V(I32x4UConvertI16x8High, kMips64I32x4UConvertI16x8High) \
2601 V(I16x8Neg, kMips64I16x8Neg) \
2602 V(I16x8SConvertI8x16Low, kMips64I16x8SConvertI8x16Low) \
2603 V(I16x8SConvertI8x16High, kMips64I16x8SConvertI8x16High) \
2604 V(I16x8UConvertI8x16Low, kMips64I16x8UConvertI8x16Low) \
2605 V(I16x8UConvertI8x16High, kMips64I16x8UConvertI8x16High) \
2606 V(I8x16Neg, kMips64I8x16Neg) \
2607 V(S128Not, kMips64S128Not) \
2608 V(S1x4AnyTrue, kMips64S1x4AnyTrue) \
2609 V(S1x4AllTrue, kMips64S1x4AllTrue) \
2610 V(S1x8AnyTrue, kMips64S1x8AnyTrue) \
2611 V(S1x8AllTrue, kMips64S1x8AllTrue) \
2612 V(S1x16AnyTrue, kMips64S1x16AnyTrue) \
2613 V(S1x16AllTrue, kMips64S1x16AllTrue)
2614
2615 #define SIMD_SHIFT_OP_LIST(V) \
2616 V(I32x4Shl) \
2617 V(I32x4ShrS) \
2618 V(I32x4ShrU) \
2619 V(I16x8Shl) \
2620 V(I16x8ShrS) \
2621 V(I16x8ShrU) \
2622 V(I8x16Shl) \
2623 V(I8x16ShrS) \
2624 V(I8x16ShrU)
2625
2626 #define SIMD_BINOP_LIST(V) \
2627 V(F32x4Add, kMips64F32x4Add) \
2628 V(F32x4AddHoriz, kMips64F32x4AddHoriz) \
2629 V(F32x4Sub, kMips64F32x4Sub) \
2630 V(F32x4Mul, kMips64F32x4Mul) \
2631 V(F32x4Max, kMips64F32x4Max) \
2632 V(F32x4Min, kMips64F32x4Min) \
2633 V(F32x4Eq, kMips64F32x4Eq) \
2634 V(F32x4Ne, kMips64F32x4Ne) \
2635 V(F32x4Lt, kMips64F32x4Lt) \
2636 V(F32x4Le, kMips64F32x4Le) \
2637 V(I32x4Add, kMips64I32x4Add) \
2638 V(I32x4AddHoriz, kMips64I32x4AddHoriz) \
2639 V(I32x4Sub, kMips64I32x4Sub) \
2640 V(I32x4Mul, kMips64I32x4Mul) \
2641 V(I32x4MaxS, kMips64I32x4MaxS) \
2642 V(I32x4MinS, kMips64I32x4MinS) \
2643 V(I32x4MaxU, kMips64I32x4MaxU) \
2644 V(I32x4MinU, kMips64I32x4MinU) \
2645 V(I32x4Eq, kMips64I32x4Eq) \
2646 V(I32x4Ne, kMips64I32x4Ne) \
2647 V(I32x4GtS, kMips64I32x4GtS) \
2648 V(I32x4GeS, kMips64I32x4GeS) \
2649 V(I32x4GtU, kMips64I32x4GtU) \
2650 V(I32x4GeU, kMips64I32x4GeU) \
2651 V(I16x8Add, kMips64I16x8Add) \
2652 V(I16x8AddSaturateS, kMips64I16x8AddSaturateS) \
2653 V(I16x8AddSaturateU, kMips64I16x8AddSaturateU) \
2654 V(I16x8AddHoriz, kMips64I16x8AddHoriz) \
2655 V(I16x8Sub, kMips64I16x8Sub) \
2656 V(I16x8SubSaturateS, kMips64I16x8SubSaturateS) \
2657 V(I16x8SubSaturateU, kMips64I16x8SubSaturateU) \
2658 V(I16x8Mul, kMips64I16x8Mul) \
2659 V(I16x8MaxS, kMips64I16x8MaxS) \
2660 V(I16x8MinS, kMips64I16x8MinS) \
2661 V(I16x8MaxU, kMips64I16x8MaxU) \
2662 V(I16x8MinU, kMips64I16x8MinU) \
2663 V(I16x8Eq, kMips64I16x8Eq) \
2664 V(I16x8Ne, kMips64I16x8Ne) \
2665 V(I16x8GtS, kMips64I16x8GtS) \
2666 V(I16x8GeS, kMips64I16x8GeS) \
2667 V(I16x8GtU, kMips64I16x8GtU) \
2668 V(I16x8GeU, kMips64I16x8GeU) \
2669 V(I16x8SConvertI32x4, kMips64I16x8SConvertI32x4) \
2670 V(I16x8UConvertI32x4, kMips64I16x8UConvertI32x4) \
2671 V(I8x16Add, kMips64I8x16Add) \
2672 V(I8x16AddSaturateS, kMips64I8x16AddSaturateS) \
2673 V(I8x16AddSaturateU, kMips64I8x16AddSaturateU) \
2674 V(I8x16Sub, kMips64I8x16Sub) \
2675 V(I8x16SubSaturateS, kMips64I8x16SubSaturateS) \
2676 V(I8x16SubSaturateU, kMips64I8x16SubSaturateU) \
2677 V(I8x16Mul, kMips64I8x16Mul) \
2678 V(I8x16MaxS, kMips64I8x16MaxS) \
2679 V(I8x16MinS, kMips64I8x16MinS) \
2680 V(I8x16MaxU, kMips64I8x16MaxU) \
2681 V(I8x16MinU, kMips64I8x16MinU) \
2682 V(I8x16Eq, kMips64I8x16Eq) \
2683 V(I8x16Ne, kMips64I8x16Ne) \
2684 V(I8x16GtS, kMips64I8x16GtS) \
2685 V(I8x16GeS, kMips64I8x16GeS) \
2686 V(I8x16GtU, kMips64I8x16GtU) \
2687 V(I8x16GeU, kMips64I8x16GeU) \
2688 V(I8x16SConvertI16x8, kMips64I8x16SConvertI16x8) \
2689 V(I8x16UConvertI16x8, kMips64I8x16UConvertI16x8) \
2690 V(S128And, kMips64S128And) \
2691 V(S128Or, kMips64S128Or) \
2692 V(S128Xor, kMips64S128Xor)
2693
VisitS128Zero(Node * node)2694 void InstructionSelector::VisitS128Zero(Node* node) {
2695 Mips64OperandGenerator g(this);
2696 Emit(kMips64S128Zero, g.DefineSameAsFirst(node));
2697 }
2698
2699 #define SIMD_VISIT_SPLAT(Type) \
2700 void InstructionSelector::Visit##Type##Splat(Node* node) { \
2701 VisitRR(this, kMips64##Type##Splat, node); \
2702 }
2703 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
2704 #undef SIMD_VISIT_SPLAT
2705
2706 #define SIMD_VISIT_EXTRACT_LANE(Type) \
2707 void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
2708 VisitRRI(this, kMips64##Type##ExtractLane, node); \
2709 }
SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)2710 SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
2711 #undef SIMD_VISIT_EXTRACT_LANE
2712
2713 #define SIMD_VISIT_REPLACE_LANE(Type) \
2714 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2715 VisitRRIR(this, kMips64##Type##ReplaceLane, node); \
2716 }
2717 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2718 #undef SIMD_VISIT_REPLACE_LANE
2719
2720 #define SIMD_VISIT_UNOP(Name, instruction) \
2721 void InstructionSelector::Visit##Name(Node* node) { \
2722 VisitRR(this, instruction, node); \
2723 }
2724 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2725 #undef SIMD_VISIT_UNOP
2726
2727 #define SIMD_VISIT_SHIFT_OP(Name) \
2728 void InstructionSelector::Visit##Name(Node* node) { \
2729 VisitRRI(this, kMips64##Name, node); \
2730 }
2731 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2732 #undef SIMD_VISIT_SHIFT_OP
2733
2734 #define SIMD_VISIT_BINOP(Name, instruction) \
2735 void InstructionSelector::Visit##Name(Node* node) { \
2736 VisitRRR(this, instruction, node); \
2737 }
2738 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2739 #undef SIMD_VISIT_BINOP
2740
2741 void InstructionSelector::VisitS128Select(Node* node) {
2742 VisitRRRR(this, kMips64S128Select, node);
2743 }
2744
2745 namespace {
2746
2747 struct ShuffleEntry {
2748 uint8_t shuffle[kSimd128Size];
2749 ArchOpcode opcode;
2750 };
2751
2752 static const ShuffleEntry arch_shuffles[] = {
2753 {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
2754 kMips64S32x4InterleaveRight},
2755 {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
2756 kMips64S32x4InterleaveLeft},
2757 {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
2758 kMips64S32x4PackEven},
2759 {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
2760 kMips64S32x4PackOdd},
2761 {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
2762 kMips64S32x4InterleaveEven},
2763 {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
2764 kMips64S32x4InterleaveOdd},
2765
2766 {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
2767 kMips64S16x8InterleaveRight},
2768 {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
2769 kMips64S16x8InterleaveLeft},
2770 {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
2771 kMips64S16x8PackEven},
2772 {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
2773 kMips64S16x8PackOdd},
2774 {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
2775 kMips64S16x8InterleaveEven},
2776 {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
2777 kMips64S16x8InterleaveOdd},
2778 {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
2779 kMips64S16x4Reverse},
2780 {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
2781 kMips64S16x2Reverse},
2782
2783 {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
2784 kMips64S8x16InterleaveRight},
2785 {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
2786 kMips64S8x16InterleaveLeft},
2787 {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
2788 kMips64S8x16PackEven},
2789 {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
2790 kMips64S8x16PackOdd},
2791 {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
2792 kMips64S8x16InterleaveEven},
2793 {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
2794 kMips64S8x16InterleaveOdd},
2795 {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
2796 kMips64S8x8Reverse},
2797 {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
2798 kMips64S8x4Reverse},
2799 {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
2800 kMips64S8x2Reverse}};
2801
TryMatchArchShuffle(const uint8_t * shuffle,const ShuffleEntry * table,size_t num_entries,bool is_swizzle,ArchOpcode * opcode)2802 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
2803 size_t num_entries, bool is_swizzle,
2804 ArchOpcode* opcode) {
2805 uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
2806 for (size_t i = 0; i < num_entries; ++i) {
2807 const ShuffleEntry& entry = table[i];
2808 int j = 0;
2809 for (; j < kSimd128Size; ++j) {
2810 if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
2811 break;
2812 }
2813 }
2814 if (j == kSimd128Size) {
2815 *opcode = entry.opcode;
2816 return true;
2817 }
2818 }
2819 return false;
2820 }
2821
2822 } // namespace
2823
VisitS8x16Shuffle(Node * node)2824 void InstructionSelector::VisitS8x16Shuffle(Node* node) {
2825 uint8_t shuffle[kSimd128Size];
2826 bool is_swizzle;
2827 CanonicalizeShuffle(node, shuffle, &is_swizzle);
2828 uint8_t shuffle32x4[4];
2829 ArchOpcode opcode;
2830 if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
2831 is_swizzle, &opcode)) {
2832 VisitRRR(this, opcode, node);
2833 return;
2834 }
2835 Node* input0 = node->InputAt(0);
2836 Node* input1 = node->InputAt(1);
2837 uint8_t offset;
2838 Mips64OperandGenerator g(this);
2839 if (TryMatchConcat(shuffle, &offset)) {
2840 Emit(kMips64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
2841 g.UseRegister(input0), g.UseImmediate(offset));
2842 return;
2843 }
2844 if (TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
2845 Emit(kMips64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2846 g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle32x4)));
2847 return;
2848 }
2849 Emit(kMips64S8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2850 g.UseRegister(input1), g.UseImmediate(Pack4Lanes(shuffle)),
2851 g.UseImmediate(Pack4Lanes(shuffle + 4)),
2852 g.UseImmediate(Pack4Lanes(shuffle + 8)),
2853 g.UseImmediate(Pack4Lanes(shuffle + 12)));
2854 }
2855
VisitSignExtendWord8ToInt32(Node * node)2856 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
2857 Mips64OperandGenerator g(this);
2858 Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2859 }
2860
VisitSignExtendWord16ToInt32(Node * node)2861 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
2862 Mips64OperandGenerator g(this);
2863 Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2864 }
2865
VisitSignExtendWord8ToInt64(Node * node)2866 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
2867 Mips64OperandGenerator g(this);
2868 Emit(kMips64Seb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2869 }
2870
VisitSignExtendWord16ToInt64(Node * node)2871 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
2872 Mips64OperandGenerator g(this);
2873 Emit(kMips64Seh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
2874 }
2875
VisitSignExtendWord32ToInt64(Node * node)2876 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
2877 Mips64OperandGenerator g(this);
2878 Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
2879 g.TempImmediate(0));
2880 }
2881
2882 // static
2883 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()2884 InstructionSelector::SupportedMachineOperatorFlags() {
2885 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
2886 return flags | MachineOperatorBuilder::kWord32Ctz |
2887 MachineOperatorBuilder::kWord64Ctz |
2888 MachineOperatorBuilder::kWord32Popcnt |
2889 MachineOperatorBuilder::kWord64Popcnt |
2890 MachineOperatorBuilder::kWord32ShiftIsSafe |
2891 MachineOperatorBuilder::kInt32DivIsSafe |
2892 MachineOperatorBuilder::kUint32DivIsSafe |
2893 MachineOperatorBuilder::kFloat64RoundDown |
2894 MachineOperatorBuilder::kFloat32RoundDown |
2895 MachineOperatorBuilder::kFloat64RoundUp |
2896 MachineOperatorBuilder::kFloat32RoundUp |
2897 MachineOperatorBuilder::kFloat64RoundTruncate |
2898 MachineOperatorBuilder::kFloat32RoundTruncate |
2899 MachineOperatorBuilder::kFloat64RoundTiesEven |
2900 MachineOperatorBuilder::kFloat32RoundTiesEven;
2901 }
2902
2903 // static
2904 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()2905 InstructionSelector::AlignmentRequirements() {
2906 if (kArchVariant == kMips64r6) {
2907 return MachineOperatorBuilder::AlignmentRequirements::
2908 FullUnalignedAccessSupport();
2909 } else {
2910 DCHECK_EQ(kMips64r2, kArchVariant);
2911 return MachineOperatorBuilder::AlignmentRequirements::
2912 NoUnalignedAccessSupport();
2913 }
2914 }
2915
2916 #undef SIMD_BINOP_LIST
2917 #undef SIMD_SHIFT_OP_LIST
2918 #undef SIMD_UNOP_LIST
2919 #undef SIMD_TYPE_LIST
2920 #undef TRACE_UNIMPL
2921 #undef TRACE
2922
2923 } // namespace compiler
2924 } // namespace internal
2925 } // namespace v8
2926