1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/base/bits.h"
6 #include "src/compiler/backend/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h"
8 #include "src/compiler/node-properties.h"
9
10 namespace v8 {
11 namespace internal {
12 namespace compiler {
13
14 #define TRACE_UNIMPL() \
15 PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
16
17 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
18
19 // Adds RISC-V-specific methods for generating InstructionOperands.
20 class RiscvOperandGenerator final : public OperandGenerator {
21 public:
RiscvOperandGenerator(InstructionSelector * selector)22 explicit RiscvOperandGenerator(InstructionSelector* selector)
23 : OperandGenerator(selector) {}
24
UseOperand(Node * node,InstructionCode opcode)25 InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
26 if (CanBeImmediate(node, opcode)) {
27 return UseImmediate(node);
28 }
29 return UseRegister(node);
30 }
31
32 // Use the zero register if the node has the immediate value zero, otherwise
33 // assign a register.
UseRegisterOrImmediateZero(Node * node)34 InstructionOperand UseRegisterOrImmediateZero(Node* node) {
35 if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
36 (IsFloatConstant(node) &&
37 (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
38 return UseImmediate(node);
39 }
40 return UseRegister(node);
41 }
42
IsIntegerConstant(Node * node)43 bool IsIntegerConstant(Node* node) {
44 if (node->opcode() == IrOpcode::kNumberConstant) {
45 const double value = OpParameter<double>(node->op());
46 return bit_cast<int64_t>(value) == 0;
47 }
48 return (node->opcode() == IrOpcode::kInt32Constant) ||
49 (node->opcode() == IrOpcode::kInt64Constant);
50 }
51
GetIntegerConstantValue(Node * node)52 int64_t GetIntegerConstantValue(Node* node) {
53 if (node->opcode() == IrOpcode::kInt32Constant) {
54 return OpParameter<int32_t>(node->op());
55 } else if (node->opcode() == IrOpcode::kInt64Constant) {
56 return OpParameter<int64_t>(node->op());
57 }
58 DCHECK_EQ(node->opcode(), IrOpcode::kNumberConstant);
59 const double value = OpParameter<double>(node->op());
60 DCHECK_EQ(bit_cast<int64_t>(value), 0);
61 return bit_cast<int64_t>(value);
62 }
63
IsFloatConstant(Node * node)64 bool IsFloatConstant(Node* node) {
65 return (node->opcode() == IrOpcode::kFloat32Constant) ||
66 (node->opcode() == IrOpcode::kFloat64Constant);
67 }
68
GetFloatConstantValue(Node * node)69 double GetFloatConstantValue(Node* node) {
70 if (node->opcode() == IrOpcode::kFloat32Constant) {
71 return OpParameter<float>(node->op());
72 }
73 DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
74 return OpParameter<double>(node->op());
75 }
76
CanBeImmediate(Node * node,InstructionCode mode)77 bool CanBeImmediate(Node* node, InstructionCode mode) {
78 return IsIntegerConstant(node) &&
79 CanBeImmediate(GetIntegerConstantValue(node), mode);
80 }
81
CanBeImmediate(int64_t value,InstructionCode opcode)82 bool CanBeImmediate(int64_t value, InstructionCode opcode) {
83 switch (ArchOpcodeField::decode(opcode)) {
84 case kRiscvShl32:
85 case kRiscvSar32:
86 case kRiscvShr32:
87 return is_uint5(value);
88 case kRiscvShl64:
89 case kRiscvSar64:
90 case kRiscvShr64:
91 return is_uint6(value);
92 case kRiscvAdd32:
93 case kRiscvAnd32:
94 case kRiscvAnd:
95 case kRiscvAdd64:
96 case kRiscvOr32:
97 case kRiscvOr:
98 case kRiscvTst:
99 case kRiscvXor:
100 return is_int12(value);
101 case kRiscvLb:
102 case kRiscvLbu:
103 case kRiscvSb:
104 case kRiscvLh:
105 case kRiscvLhu:
106 case kRiscvSh:
107 case kRiscvLw:
108 case kRiscvSw:
109 case kRiscvLd:
110 case kRiscvSd:
111 case kRiscvLoadFloat:
112 case kRiscvStoreFloat:
113 case kRiscvLoadDouble:
114 case kRiscvStoreDouble:
115 return is_int32(value);
116 default:
117 return is_int12(value);
118 }
119 }
120
121 private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const122 bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
123 TRACE_UNIMPL();
124 return false;
125 }
126 };
127
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)128 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
129 Node* node) {
130 RiscvOperandGenerator g(selector);
131 selector->Emit(opcode, g.DefineAsRegister(node),
132 g.UseRegister(node->InputAt(0)));
133 }
134
VisitRRI(InstructionSelector * selector,ArchOpcode opcode,Node * node)135 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
136 Node* node) {
137 RiscvOperandGenerator g(selector);
138 int32_t imm = OpParameter<int32_t>(node->op());
139 selector->Emit(opcode, g.DefineAsRegister(node),
140 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
141 }
142
VisitSimdShift(InstructionSelector * selector,ArchOpcode opcode,Node * node)143 static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
144 Node* node) {
145 RiscvOperandGenerator g(selector);
146 if (g.IsIntegerConstant(node->InputAt(1))) {
147 selector->Emit(opcode, g.DefineAsRegister(node),
148 g.UseRegister(node->InputAt(0)),
149 g.UseImmediate(node->InputAt(1)));
150 } else {
151 selector->Emit(opcode, g.DefineAsRegister(node),
152 g.UseRegister(node->InputAt(0)),
153 g.UseRegister(node->InputAt(1)));
154 }
155 }
156
VisitRRIR(InstructionSelector * selector,ArchOpcode opcode,Node * node)157 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
158 Node* node) {
159 RiscvOperandGenerator g(selector);
160 int32_t imm = OpParameter<int32_t>(node->op());
161 selector->Emit(opcode, g.DefineAsRegister(node),
162 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
163 g.UseRegister(node->InputAt(1)));
164 }
165
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)166 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
167 Node* node) {
168 RiscvOperandGenerator g(selector);
169 selector->Emit(opcode, g.DefineAsRegister(node),
170 g.UseRegister(node->InputAt(0)),
171 g.UseRegister(node->InputAt(1)));
172 }
173
VisitUniqueRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)174 static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
175 Node* node) {
176 RiscvOperandGenerator g(selector);
177 selector->Emit(opcode, g.DefineAsRegister(node),
178 g.UseUniqueRegister(node->InputAt(0)),
179 g.UseUniqueRegister(node->InputAt(1)));
180 }
181
VisitRRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)182 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
183 RiscvOperandGenerator g(selector);
184 selector->Emit(
185 opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
186 g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
187 }
188
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)189 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
190 Node* node) {
191 RiscvOperandGenerator g(selector);
192 selector->Emit(opcode, g.DefineAsRegister(node),
193 g.UseRegister(node->InputAt(0)),
194 g.UseOperand(node->InputAt(1), opcode));
195 }
196
197 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::ExtendingLoadMatcher198 ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
199 : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
200 Initialize(node);
201 }
202
Matchesv8::internal::compiler::ExtendingLoadMatcher203 bool Matches() const { return matches_; }
204
basev8::internal::compiler::ExtendingLoadMatcher205 Node* base() const {
206 DCHECK(Matches());
207 return base_;
208 }
immediatev8::internal::compiler::ExtendingLoadMatcher209 int64_t immediate() const {
210 DCHECK(Matches());
211 return immediate_;
212 }
opcodev8::internal::compiler::ExtendingLoadMatcher213 ArchOpcode opcode() const {
214 DCHECK(Matches());
215 return opcode_;
216 }
217
218 private:
219 bool matches_;
220 InstructionSelector* selector_;
221 Node* base_;
222 int64_t immediate_;
223 ArchOpcode opcode_;
224
Initializev8::internal::compiler::ExtendingLoadMatcher225 void Initialize(Node* node) {
226 Int64BinopMatcher m(node);
227 // When loading a 64-bit value and shifting by 32, we should
228 // just load and sign-extend the interesting 4 bytes instead.
229 // This happens, for example, when we're loading and untagging SMIs.
230 DCHECK(m.IsWord64Sar());
231 if (m.left().IsLoad() && m.right().Is(32) &&
232 selector_->CanCover(m.node(), m.left().node())) {
233 DCHECK_EQ(selector_->GetEffectLevel(node),
234 selector_->GetEffectLevel(m.left().node()));
235 MachineRepresentation rep =
236 LoadRepresentationOf(m.left().node()->op()).representation();
237 DCHECK_EQ(3, ElementSizeLog2Of(rep));
238 if (rep != MachineRepresentation::kTaggedSigned &&
239 rep != MachineRepresentation::kTaggedPointer &&
240 rep != MachineRepresentation::kTagged &&
241 rep != MachineRepresentation::kWord64) {
242 return;
243 }
244
245 RiscvOperandGenerator g(selector_);
246 Node* load = m.left().node();
247 Node* offset = load->InputAt(1);
248 base_ = load->InputAt(0);
249 opcode_ = kRiscvLw;
250 if (g.CanBeImmediate(offset, opcode_)) {
251 #if defined(V8_TARGET_LITTLE_ENDIAN)
252 immediate_ = g.GetIntegerConstantValue(offset) + 4;
253 #elif defined(V8_TARGET_BIG_ENDIAN)
254 immediate_ = g.GetIntegerConstantValue(offset);
255 #endif
256 matches_ = g.CanBeImmediate(immediate_, kRiscvLw);
257 }
258 }
259 }
260 };
261
TryEmitExtendingLoad(InstructionSelector * selector,Node * node,Node * output_node)262 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
263 Node* output_node) {
264 ExtendingLoadMatcher m(node, selector);
265 RiscvOperandGenerator g(selector);
266 if (m.Matches()) {
267 InstructionOperand inputs[2];
268 inputs[0] = g.UseRegister(m.base());
269 InstructionCode opcode =
270 m.opcode() | AddressingModeField::encode(kMode_MRI);
271 DCHECK(is_int32(m.immediate()));
272 inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
273 InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
274 selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
275 inputs);
276 return true;
277 }
278 return false;
279 }
280
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)281 bool TryMatchImmediate(InstructionSelector* selector,
282 InstructionCode* opcode_return, Node* node,
283 size_t* input_count_return, InstructionOperand* inputs) {
284 RiscvOperandGenerator g(selector);
285 if (g.CanBeImmediate(node, *opcode_return)) {
286 *opcode_return |= AddressingModeField::encode(kMode_MRI);
287 inputs[0] = g.UseImmediate(node);
288 *input_count_return = 1;
289 return true;
290 }
291 return false;
292 }
293
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)294 static void VisitBinop(InstructionSelector* selector, Node* node,
295 InstructionCode opcode, bool has_reverse_opcode,
296 InstructionCode reverse_opcode,
297 FlagsContinuation* cont) {
298 RiscvOperandGenerator g(selector);
299 Int32BinopMatcher m(node);
300 InstructionOperand inputs[2];
301 size_t input_count = 0;
302 InstructionOperand outputs[1];
303 size_t output_count = 0;
304
305 if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
306 &inputs[1])) {
307 inputs[0] = g.UseRegisterOrImmediateZero(m.left().node());
308 input_count++;
309 } else if (has_reverse_opcode &&
310 TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
311 &input_count, &inputs[1])) {
312 inputs[0] = g.UseRegisterOrImmediateZero(m.right().node());
313 opcode = reverse_opcode;
314 input_count++;
315 } else {
316 inputs[input_count++] = g.UseRegister(m.left().node());
317 inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
318 }
319
320 if (cont->IsDeoptimize()) {
321 // If we can deoptimize as a result of the binop, we need to make sure that
322 // the deopt inputs are not overwritten by the binop result. One way
323 // to achieve that is to declare the output register as same-as-first.
324 outputs[output_count++] = g.DefineSameAsFirst(node);
325 } else {
326 outputs[output_count++] = g.DefineAsRegister(node);
327 }
328
329 DCHECK_NE(0u, input_count);
330 DCHECK_EQ(1u, output_count);
331 DCHECK_GE(arraysize(inputs), input_count);
332 DCHECK_GE(arraysize(outputs), output_count);
333
334 selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
335 inputs, cont);
336 }
337
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)338 static void VisitBinop(InstructionSelector* selector, Node* node,
339 InstructionCode opcode, bool has_reverse_opcode,
340 InstructionCode reverse_opcode) {
341 FlagsContinuation cont;
342 VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
343 }
344
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)345 static void VisitBinop(InstructionSelector* selector, Node* node,
346 InstructionCode opcode, FlagsContinuation* cont) {
347 VisitBinop(selector, node, opcode, false, kArchNop, cont);
348 }
349
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)350 static void VisitBinop(InstructionSelector* selector, Node* node,
351 InstructionCode opcode) {
352 VisitBinop(selector, node, opcode, false, kArchNop);
353 }
354
VisitStackSlot(Node * node)355 void InstructionSelector::VisitStackSlot(Node* node) {
356 StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
357 int alignment = rep.alignment();
358 int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
359 OperandGenerator g(this);
360
361 Emit(kArchStackSlot, g.DefineAsRegister(node),
362 sequence()->AddImmediate(Constant(slot)),
363 sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
364 }
365
VisitAbortCSADcheck(Node * node)366 void InstructionSelector::VisitAbortCSADcheck(Node* node) {
367 RiscvOperandGenerator g(this);
368 Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
369 }
370
EmitLoad(InstructionSelector * selector,Node * node,InstructionCode opcode,Node * output=nullptr)371 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
372 Node* output = nullptr) {
373 RiscvOperandGenerator g(selector);
374 Node* base = node->InputAt(0);
375 Node* index = node->InputAt(1);
376
377 ExternalReferenceMatcher m(base);
378 if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
379 selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
380 ptrdiff_t const delta =
381 g.GetIntegerConstantValue(index) +
382 TurboAssemblerBase::RootRegisterOffsetForExternalReference(
383 selector->isolate(), m.ResolvedValue());
384 // Check that the delta is a 32-bit integer due to the limitations of
385 // immediate operands.
386 if (is_int32(delta)) {
387 opcode |= AddressingModeField::encode(kMode_Root);
388 selector->Emit(opcode,
389 g.DefineAsRegister(output == nullptr ? node : output),
390 g.UseImmediate(static_cast<int32_t>(delta)));
391 return;
392 }
393 }
394
395 if (g.CanBeImmediate(index, opcode)) {
396 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
397 g.DefineAsRegister(output == nullptr ? node : output),
398 g.UseRegister(base), g.UseImmediate(index));
399 } else {
400 InstructionOperand addr_reg = g.TempRegister();
401 selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
402 addr_reg, g.UseRegister(index), g.UseRegister(base));
403 // Emit desired load opcode, using temp addr_reg.
404 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
405 g.DefineAsRegister(output == nullptr ? node : output),
406 addr_reg, g.TempImmediate(0));
407 }
408 }
409
EmitS128Load(InstructionSelector * selector,Node * node,InstructionCode opcode,VSew sew,Vlmul lmul)410 void EmitS128Load(InstructionSelector* selector, Node* node,
411 InstructionCode opcode, VSew sew, Vlmul lmul) {
412 RiscvOperandGenerator g(selector);
413 Node* base = node->InputAt(0);
414 Node* index = node->InputAt(1);
415
416 if (g.CanBeImmediate(index, opcode)) {
417 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
418 g.DefineAsRegister(node), g.UseRegister(base),
419 g.UseImmediate(index), g.UseImmediate(sew),
420 g.UseImmediate(lmul));
421 } else {
422 InstructionOperand addr_reg = g.TempRegister();
423 selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
424 addr_reg, g.UseRegister(index), g.UseRegister(base));
425 // Emit desired load opcode, using temp addr_reg.
426 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
427 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0),
428 g.UseImmediate(sew), g.UseImmediate(lmul));
429 }
430 }
431
VisitStoreLane(Node * node)432 void InstructionSelector::VisitStoreLane(Node* node) {
433 StoreLaneParameters params = StoreLaneParametersOf(node->op());
434 LoadStoreLaneParams f(params.rep, params.laneidx);
435 InstructionCode opcode = kRiscvS128StoreLane;
436 opcode |= MiscField::encode(f.sz);
437
438 RiscvOperandGenerator g(this);
439 Node* base = node->InputAt(0);
440 Node* index = node->InputAt(1);
441 InstructionOperand addr_reg = g.TempRegister();
442 Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index));
443 InstructionOperand inputs[4] = {
444 g.UseRegister(node->InputAt(2)),
445 g.UseImmediate(f.laneidx),
446 addr_reg,
447 g.TempImmediate(0),
448 };
449 opcode |= AddressingModeField::encode(kMode_MRI);
450 Emit(opcode, 0, nullptr, 4, inputs);
451 }
VisitLoadLane(Node * node)452 void InstructionSelector::VisitLoadLane(Node* node) {
453 LoadLaneParameters params = LoadLaneParametersOf(node->op());
454 LoadStoreLaneParams f(params.rep.representation(), params.laneidx);
455 InstructionCode opcode = kRiscvS128LoadLane;
456 opcode |= MiscField::encode(f.sz);
457
458 RiscvOperandGenerator g(this);
459 Node* base = node->InputAt(0);
460 Node* index = node->InputAt(1);
461 InstructionOperand addr_reg = g.TempRegister();
462 Emit(kRiscvAdd64, addr_reg, g.UseRegister(base), g.UseRegister(index));
463 opcode |= AddressingModeField::encode(kMode_MRI);
464 Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(2)),
465 g.UseImmediate(params.laneidx), addr_reg, g.TempImmediate(0));
466 }
467
VisitLoadTransform(Node * node)468 void InstructionSelector::VisitLoadTransform(Node* node) {
469 LoadTransformParameters params = LoadTransformParametersOf(node->op());
470
471 switch (params.transformation) {
472 case LoadTransformation::kS128Load8Splat:
473 EmitS128Load(this, node, kRiscvS128LoadSplat, E8, m1);
474 break;
475 case LoadTransformation::kS128Load16Splat:
476 EmitS128Load(this, node, kRiscvS128LoadSplat, E16, m1);
477 break;
478 case LoadTransformation::kS128Load32Splat:
479 EmitS128Load(this, node, kRiscvS128LoadSplat, E32, m1);
480 break;
481 case LoadTransformation::kS128Load64Splat:
482 EmitS128Load(this, node, kRiscvS128LoadSplat, E64, m1);
483 break;
484 case LoadTransformation::kS128Load8x8S:
485 EmitS128Load(this, node, kRiscvS128Load64ExtendS, E16, m1);
486 break;
487 case LoadTransformation::kS128Load8x8U:
488 EmitS128Load(this, node, kRiscvS128Load64ExtendU, E16, m1);
489 break;
490 case LoadTransformation::kS128Load16x4S:
491 EmitS128Load(this, node, kRiscvS128Load64ExtendS, E32, m1);
492 break;
493 case LoadTransformation::kS128Load16x4U:
494 EmitS128Load(this, node, kRiscvS128Load64ExtendU, E32, m1);
495 break;
496 case LoadTransformation::kS128Load32x2S:
497 EmitS128Load(this, node, kRiscvS128Load64ExtendS, E64, m1);
498 break;
499 case LoadTransformation::kS128Load32x2U:
500 EmitS128Load(this, node, kRiscvS128Load64ExtendU, E64, m1);
501 break;
502 case LoadTransformation::kS128Load32Zero:
503 EmitS128Load(this, node, kRiscvS128Load32Zero, E32, m1);
504 break;
505 case LoadTransformation::kS128Load64Zero:
506 EmitS128Load(this, node, kRiscvS128Load64Zero, E64, m1);
507 break;
508 default:
509 UNIMPLEMENTED();
510 }
511 }
512
VisitLoad(Node * node)513 void InstructionSelector::VisitLoad(Node* node) {
514 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
515
516 InstructionCode opcode = kArchNop;
517 switch (load_rep.representation()) {
518 case MachineRepresentation::kFloat32:
519 opcode = kRiscvLoadFloat;
520 break;
521 case MachineRepresentation::kFloat64:
522 opcode = kRiscvLoadDouble;
523 break;
524 case MachineRepresentation::kBit: // Fall through.
525 case MachineRepresentation::kWord8:
526 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
527 break;
528 case MachineRepresentation::kWord16:
529 opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
530 break;
531 case MachineRepresentation::kWord32:
532 opcode = kRiscvLw;
533 break;
534 #ifdef V8_COMPRESS_POINTERS
535 case MachineRepresentation::kTaggedSigned:
536 opcode = kRiscvLoadDecompressTaggedSigned;
537 break;
538 case MachineRepresentation::kTaggedPointer:
539 opcode = kRiscvLoadDecompressTaggedPointer;
540 break;
541 case MachineRepresentation::kTagged:
542 opcode = kRiscvLoadDecompressAnyTagged;
543 break;
544 #else
545 case MachineRepresentation::kTaggedSigned: // Fall through.
546 case MachineRepresentation::kTaggedPointer: // Fall through.
547 case MachineRepresentation::kTagged: // Fall through.
548 #endif
549 case MachineRepresentation::kWord64:
550 opcode = kRiscvLd;
551 break;
552 case MachineRepresentation::kSimd128:
553 opcode = kRiscvRvvLd;
554 break;
555 case MachineRepresentation::kCompressedPointer:
556 case MachineRepresentation::kCompressed:
557 #ifdef V8_COMPRESS_POINTERS
558 opcode = kRiscvLw;
559 break;
560 #else
561 // Fall through.
562 #endif
563 case MachineRepresentation::kSandboxedPointer:
564 case MachineRepresentation::kMapWord: // Fall through.
565 case MachineRepresentation::kNone:
566 UNREACHABLE();
567 }
568
569 EmitLoad(this, node, opcode);
570 }
571
VisitProtectedLoad(Node * node)572 void InstructionSelector::VisitProtectedLoad(Node* node) {
573 // TODO(eholk)
574 UNIMPLEMENTED();
575 }
576
VisitStore(Node * node)577 void InstructionSelector::VisitStore(Node* node) {
578 RiscvOperandGenerator g(this);
579 Node* base = node->InputAt(0);
580 Node* index = node->InputAt(1);
581 Node* value = node->InputAt(2);
582
583 StoreRepresentation store_rep = StoreRepresentationOf(node->op());
584 WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
585 MachineRepresentation rep = store_rep.representation();
586
587 // TODO(riscv): I guess this could be done in a better way.
588 if (write_barrier_kind != kNoWriteBarrier &&
589 V8_LIKELY(!FLAG_disable_write_barriers)) {
590 DCHECK(CanBeTaggedPointer(rep));
591 InstructionOperand inputs[3];
592 size_t input_count = 0;
593 inputs[input_count++] = g.UseUniqueRegister(base);
594 inputs[input_count++] = g.UseUniqueRegister(index);
595 inputs[input_count++] = g.UseUniqueRegister(value);
596 RecordWriteMode record_write_mode =
597 WriteBarrierKindToRecordWriteMode(write_barrier_kind);
598 InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
599 size_t const temp_count = arraysize(temps);
600 InstructionCode code = kArchStoreWithWriteBarrier;
601 code |= MiscField::encode(static_cast<int>(record_write_mode));
602 Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
603 } else {
604 ArchOpcode opcode;
605 switch (rep) {
606 case MachineRepresentation::kFloat32:
607 opcode = kRiscvStoreFloat;
608 break;
609 case MachineRepresentation::kFloat64:
610 opcode = kRiscvStoreDouble;
611 break;
612 case MachineRepresentation::kBit: // Fall through.
613 case MachineRepresentation::kWord8:
614 opcode = kRiscvSb;
615 break;
616 case MachineRepresentation::kWord16:
617 opcode = kRiscvSh;
618 break;
619 case MachineRepresentation::kWord32:
620 opcode = kRiscvSw;
621 break;
622 case MachineRepresentation::kTaggedSigned: // Fall through.
623 case MachineRepresentation::kTaggedPointer: // Fall through.
624 case MachineRepresentation::kTagged:
625 #ifdef V8_COMPRESS_POINTERS
626 opcode = kRiscvStoreCompressTagged;
627 break;
628 #endif
629 case MachineRepresentation::kWord64:
630 opcode = kRiscvSd;
631 break;
632 case MachineRepresentation::kSimd128:
633 opcode = kRiscvRvvSt;
634 break;
635 case MachineRepresentation::kCompressedPointer: // Fall through.
636 case MachineRepresentation::kCompressed:
637 #ifdef V8_COMPRESS_POINTERS
638 opcode = kRiscvStoreCompressTagged;
639 break;
640 #else
641 UNREACHABLE();
642 #endif
643 case MachineRepresentation::kSandboxedPointer:
644 case MachineRepresentation::kMapWord: // Fall through.
645 case MachineRepresentation::kNone:
646 UNREACHABLE();
647 }
648
649 if (g.CanBeImmediate(index, opcode)) {
650 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
651 g.UseRegister(base), g.UseImmediate(index),
652 g.UseRegisterOrImmediateZero(value));
653 } else {
654 InstructionOperand addr_reg = g.TempRegister();
655 Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
656 g.UseRegister(index), g.UseRegister(base));
657 // Emit desired store opcode, using temp addr_reg.
658 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
659 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
660 }
661 }
662 }
663
VisitProtectedStore(Node * node)664 void InstructionSelector::VisitProtectedStore(Node* node) {
665 // TODO(eholk)
666 UNIMPLEMENTED();
667 }
668
VisitWord32And(Node * node)669 void InstructionSelector::VisitWord32And(Node* node) {
670 VisitBinop(this, node, kRiscvAnd32, true, kRiscvAnd32);
671 }
672
VisitWord64And(Node * node)673 void InstructionSelector::VisitWord64And(Node* node) {
674 RiscvOperandGenerator g(this);
675 Int64BinopMatcher m(node);
676 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
677 m.right().HasResolvedValue()) {
678 uint64_t mask = m.right().ResolvedValue();
679 uint32_t mask_width = base::bits::CountPopulation(mask);
680 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
681 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
682 // The mask must be contiguous, and occupy the least-significant bits.
683 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
684
685 // Select Dext for And(Shr(x, imm), mask) where the mask is in the least
686 // significant bits.
687 Int64BinopMatcher mleft(m.left().node());
688 if (mleft.right().HasResolvedValue()) {
689 // Any shift value can match; int64 shifts use `value % 64`.
690 uint32_t lsb =
691 static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
692
693 // Dext cannot extract bits past the register size, however since
694 // shifting the original value would have introduced some zeros we can
695 // still use Dext with a smaller mask and the remaining bits will be
696 // zeros.
697 if (lsb + mask_width > 64) mask_width = 64 - lsb;
698
699 if (lsb == 0 && mask_width == 64) {
700 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
701 return;
702 }
703 }
704 // Other cases fall through to the normal And operation.
705 }
706 }
707 VisitBinop(this, node, kRiscvAnd, true, kRiscvAnd);
708 }
709
VisitWord32Or(Node * node)710 void InstructionSelector::VisitWord32Or(Node* node) {
711 VisitBinop(this, node, kRiscvOr32, true, kRiscvOr32);
712 }
713
VisitWord64Or(Node * node)714 void InstructionSelector::VisitWord64Or(Node* node) {
715 VisitBinop(this, node, kRiscvOr, true, kRiscvOr);
716 }
717
VisitWord32Xor(Node * node)718 void InstructionSelector::VisitWord32Xor(Node* node) {
719 Int32BinopMatcher m(node);
720 if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
721 m.right().Is(-1)) {
722 Int32BinopMatcher mleft(m.left().node());
723 if (!mleft.right().HasResolvedValue()) {
724 RiscvOperandGenerator g(this);
725 Emit(kRiscvNor32, g.DefineAsRegister(node),
726 g.UseRegister(mleft.left().node()),
727 g.UseRegister(mleft.right().node()));
728 return;
729 }
730 }
731 if (m.right().Is(-1)) {
732 // Use Nor for bit negation and eliminate constant loading for xori.
733 RiscvOperandGenerator g(this);
734 Emit(kRiscvNor32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
735 g.TempImmediate(0));
736 return;
737 }
738 VisitBinop(this, node, kRiscvXor32, true, kRiscvXor32);
739 }
740
VisitWord64Xor(Node * node)741 void InstructionSelector::VisitWord64Xor(Node* node) {
742 Int64BinopMatcher m(node);
743 if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
744 m.right().Is(-1)) {
745 Int64BinopMatcher mleft(m.left().node());
746 if (!mleft.right().HasResolvedValue()) {
747 RiscvOperandGenerator g(this);
748 Emit(kRiscvNor, g.DefineAsRegister(node),
749 g.UseRegister(mleft.left().node()),
750 g.UseRegister(mleft.right().node()));
751 return;
752 }
753 }
754 if (m.right().Is(-1)) {
755 // Use Nor for bit negation and eliminate constant loading for xori.
756 RiscvOperandGenerator g(this);
757 Emit(kRiscvNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
758 g.TempImmediate(0));
759 return;
760 }
761 VisitBinop(this, node, kRiscvXor, true, kRiscvXor);
762 }
763
VisitWord32Shl(Node * node)764 void InstructionSelector::VisitWord32Shl(Node* node) {
765 Int32BinopMatcher m(node);
766 if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
767 m.right().IsInRange(1, 31)) {
768 RiscvOperandGenerator g(this);
769 Int32BinopMatcher mleft(m.left().node());
770 // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is
771 // contiguous, and the shift immediate non-zero.
772 if (mleft.right().HasResolvedValue()) {
773 uint32_t mask = mleft.right().ResolvedValue();
774 uint32_t mask_width = base::bits::CountPopulation(mask);
775 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
776 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
777 uint32_t shift = m.right().ResolvedValue();
778 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
779 DCHECK_NE(0u, shift);
780 if ((shift + mask_width) >= 32) {
781 // If the mask is contiguous and reaches or extends beyond the top
782 // bit, only the shift is needed.
783 Emit(kRiscvShl32, g.DefineAsRegister(node),
784 g.UseRegister(mleft.left().node()),
785 g.UseImmediate(m.right().node()));
786 return;
787 }
788 }
789 }
790 }
791 VisitRRO(this, kRiscvShl32, node);
792 }
793
VisitWord32Shr(Node * node)794 void InstructionSelector::VisitWord32Shr(Node* node) {
795 VisitRRO(this, kRiscvShr32, node);
796 }
797
VisitWord32Sar(Node * node)798 void InstructionSelector::VisitWord32Sar(Node* node) {
799 Int32BinopMatcher m(node);
800 if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
801 Int32BinopMatcher mleft(m.left().node());
802 if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
803 RiscvOperandGenerator g(this);
804 uint32_t sar = m.right().ResolvedValue();
805 uint32_t shl = mleft.right().ResolvedValue();
806 if ((sar == shl) && (sar == 16)) {
807 Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
808 g.UseRegister(mleft.left().node()));
809 return;
810 } else if ((sar == shl) && (sar == 24)) {
811 Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
812 g.UseRegister(mleft.left().node()));
813 return;
814 } else if ((sar == shl) && (sar == 32)) {
815 Emit(kRiscvShl32, g.DefineAsRegister(node),
816 g.UseRegister(mleft.left().node()), g.TempImmediate(0));
817 return;
818 }
819 }
820 }
821 VisitRRO(this, kRiscvSar32, node);
822 }
823
VisitWord64Shl(Node * node)824 void InstructionSelector::VisitWord64Shl(Node* node) {
825 RiscvOperandGenerator g(this);
826 Int64BinopMatcher m(node);
827 if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
828 m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
829 // There's no need to sign/zero-extend to 64-bit if we shift out the upper
830 // 32 bits anyway.
831 Emit(kRiscvShl64, g.DefineSameAsFirst(node),
832 g.UseRegister(m.left().node()->InputAt(0)),
833 g.UseImmediate(m.right().node()));
834 return;
835 }
836 if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
837 m.right().IsInRange(1, 63)) {
838 // Match Word64Shl(Word64And(x, mask), imm) to Dshl where the mask is
839 // contiguous, and the shift immediate non-zero.
840 Int64BinopMatcher mleft(m.left().node());
841 if (mleft.right().HasResolvedValue()) {
842 uint64_t mask = mleft.right().ResolvedValue();
843 uint32_t mask_width = base::bits::CountPopulation(mask);
844 uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
845 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
846 uint64_t shift = m.right().ResolvedValue();
847 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
848 DCHECK_NE(0u, shift);
849
850 if ((shift + mask_width) >= 64) {
851 // If the mask is contiguous and reaches or extends beyond the top
852 // bit, only the shift is needed.
853 Emit(kRiscvShl64, g.DefineAsRegister(node),
854 g.UseRegister(mleft.left().node()),
855 g.UseImmediate(m.right().node()));
856 return;
857 }
858 }
859 }
860 }
861 VisitRRO(this, kRiscvShl64, node);
862 }
863
VisitWord64Shr(Node * node)864 void InstructionSelector::VisitWord64Shr(Node* node) {
865 VisitRRO(this, kRiscvShr64, node);
866 }
867
VisitWord64Sar(Node * node)868 void InstructionSelector::VisitWord64Sar(Node* node) {
869 if (TryEmitExtendingLoad(this, node, node)) return;
870 VisitRRO(this, kRiscvSar64, node);
871 }
872
VisitWord32Rol(Node * node)873 void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
874
VisitWord64Rol(Node * node)875 void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
876
VisitWord32Ror(Node * node)877 void InstructionSelector::VisitWord32Ror(Node* node) {
878 VisitRRO(this, kRiscvRor32, node);
879 }
880
VisitWord32Clz(Node * node)881 void InstructionSelector::VisitWord32Clz(Node* node) {
882 VisitRR(this, kRiscvClz32, node);
883 }
884
VisitWord32ReverseBits(Node * node)885 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
886
VisitWord64ReverseBits(Node * node)887 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
888
VisitWord64ReverseBytes(Node * node)889 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
890 RiscvOperandGenerator g(this);
891 Emit(kRiscvByteSwap64, g.DefineAsRegister(node),
892 g.UseRegister(node->InputAt(0)));
893 }
894
VisitWord32ReverseBytes(Node * node)895 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
896 RiscvOperandGenerator g(this);
897 Emit(kRiscvByteSwap32, g.DefineAsRegister(node),
898 g.UseRegister(node->InputAt(0)));
899 }
900
VisitSimd128ReverseBytes(Node * node)901 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
902 UNREACHABLE();
903 }
904
VisitWord32Ctz(Node * node)905 void InstructionSelector::VisitWord32Ctz(Node* node) {
906 RiscvOperandGenerator g(this);
907 Emit(kRiscvCtz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
908 }
909
VisitWord64Ctz(Node * node)910 void InstructionSelector::VisitWord64Ctz(Node* node) {
911 RiscvOperandGenerator g(this);
912 Emit(kRiscvCtz64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
913 }
914
VisitWord32Popcnt(Node * node)915 void InstructionSelector::VisitWord32Popcnt(Node* node) {
916 RiscvOperandGenerator g(this);
917 Emit(kRiscvPopcnt32, g.DefineAsRegister(node),
918 g.UseRegister(node->InputAt(0)));
919 }
920
VisitWord64Popcnt(Node * node)921 void InstructionSelector::VisitWord64Popcnt(Node* node) {
922 RiscvOperandGenerator g(this);
923 Emit(kRiscvPopcnt64, g.DefineAsRegister(node),
924 g.UseRegister(node->InputAt(0)));
925 }
926
VisitWord64Ror(Node * node)927 void InstructionSelector::VisitWord64Ror(Node* node) {
928 VisitRRO(this, kRiscvRor64, node);
929 }
930
VisitWord64Clz(Node * node)931 void InstructionSelector::VisitWord64Clz(Node* node) {
932 VisitRR(this, kRiscvClz64, node);
933 }
934
VisitInt32Add(Node * node)935 void InstructionSelector::VisitInt32Add(Node* node) {
936 VisitBinop(this, node, kRiscvAdd32, true, kRiscvAdd32);
937 }
938
VisitInt64Add(Node * node)939 void InstructionSelector::VisitInt64Add(Node* node) {
940 VisitBinop(this, node, kRiscvAdd64, true, kRiscvAdd64);
941 }
942
VisitInt32Sub(Node * node)943 void InstructionSelector::VisitInt32Sub(Node* node) {
944 VisitBinop(this, node, kRiscvSub32);
945 }
946
VisitInt64Sub(Node * node)947 void InstructionSelector::VisitInt64Sub(Node* node) {
948 VisitBinop(this, node, kRiscvSub64);
949 }
950
VisitInt32Mul(Node * node)951 void InstructionSelector::VisitInt32Mul(Node* node) {
952 RiscvOperandGenerator g(this);
953 Int32BinopMatcher m(node);
954 if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
955 uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
956 if (base::bits::IsPowerOfTwo(value)) {
957 Emit(kRiscvShl32 | AddressingModeField::encode(kMode_None),
958 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
959 g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
960 return;
961 }
962 if (base::bits::IsPowerOfTwo(value + 1)) {
963 InstructionOperand temp = g.TempRegister();
964 Emit(kRiscvShl32 | AddressingModeField::encode(kMode_None), temp,
965 g.UseRegister(m.left().node()),
966 g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
967 Emit(kRiscvSub32 | AddressingModeField::encode(kMode_None),
968 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
969 return;
970 }
971 }
972 Node* left = node->InputAt(0);
973 Node* right = node->InputAt(1);
974 if (CanCover(node, left) && CanCover(node, right)) {
975 if (left->opcode() == IrOpcode::kWord64Sar &&
976 right->opcode() == IrOpcode::kWord64Sar) {
977 Int64BinopMatcher leftInput(left), rightInput(right);
978 if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
979 // Combine untagging shifts with Dmul high.
980 Emit(kRiscvMulHigh64, g.DefineSameAsFirst(node),
981 g.UseRegister(leftInput.left().node()),
982 g.UseRegister(rightInput.left().node()));
983 return;
984 }
985 }
986 }
987 VisitRRR(this, kRiscvMul32, node);
988 }
989
VisitI32x4ExtAddPairwiseI16x8S(Node * node)990 void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8S(Node* node) {
991 RiscvOperandGenerator g(this);
992 InstructionOperand src1 = g.TempSimd128Register();
993 InstructionOperand src2 = g.TempSimd128Register();
994 InstructionOperand src = g.UseUniqueRegister(node->InputAt(0));
995 Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0006000400020000),
996 g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1)));
997 Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0007000500030001),
998 g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1)));
999 Emit(kRiscvVwadd, g.DefineAsRegister(node), src1, src2,
1000 g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(mf2)));
1001 }
1002
VisitI32x4ExtAddPairwiseI16x8U(Node * node)1003 void InstructionSelector::VisitI32x4ExtAddPairwiseI16x8U(Node* node) {
1004 RiscvOperandGenerator g(this);
1005 InstructionOperand src1 = g.TempSimd128Register();
1006 InstructionOperand src2 = g.TempSimd128Register();
1007 InstructionOperand src = g.UseUniqueRegister(node->InputAt(0));
1008 Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0006000400020000),
1009 g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1)));
1010 Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0007000500030001),
1011 g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(m1)));
1012 Emit(kRiscvVwaddu, g.DefineAsRegister(node), src1, src2,
1013 g.UseImmediate(int8_t(E16)), g.UseImmediate(int8_t(mf2)));
1014 }
1015
VisitI16x8ExtAddPairwiseI8x16S(Node * node)1016 void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16S(Node* node) {
1017 RiscvOperandGenerator g(this);
1018 InstructionOperand src1 = g.TempSimd128Register();
1019 InstructionOperand src2 = g.TempSimd128Register();
1020 InstructionOperand src = g.UseUniqueRegister(node->InputAt(0));
1021 Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0E0C0A0806040200),
1022 g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1)));
1023 Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0F0D0B0907050301),
1024 g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1)));
1025 Emit(kRiscvVwadd, g.DefineAsRegister(node), src1, src2,
1026 g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(mf2)));
1027 }
1028
VisitI16x8ExtAddPairwiseI8x16U(Node * node)1029 void InstructionSelector::VisitI16x8ExtAddPairwiseI8x16U(Node* node) {
1030 RiscvOperandGenerator g(this);
1031 InstructionOperand src1 = g.TempSimd128Register();
1032 InstructionOperand src2 = g.TempSimd128Register();
1033 InstructionOperand src = g.UseUniqueRegister(node->InputAt(0));
1034 Emit(kRiscvVrgather, src1, src, g.UseImmediate64(0x0E0C0A0806040200),
1035 g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1)));
1036 Emit(kRiscvVrgather, src2, src, g.UseImmediate64(0x0F0D0B0907050301),
1037 g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(m1)));
1038 Emit(kRiscvVwaddu, g.DefineAsRegister(node), src1, src2,
1039 g.UseImmediate(int8_t(E8)), g.UseImmediate(int8_t(mf2)));
1040 }
1041
VisitInt32MulHigh(Node * node)1042 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1043 VisitRRR(this, kRiscvMulHigh32, node);
1044 }
1045
VisitUint32MulHigh(Node * node)1046 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1047 VisitRRR(this, kRiscvMulHighU32, node);
1048 }
1049
VisitInt64Mul(Node * node)1050 void InstructionSelector::VisitInt64Mul(Node* node) {
1051 RiscvOperandGenerator g(this);
1052 Int64BinopMatcher m(node);
1053 // TODO(dusmil): Add optimization for shifts larger than 32.
1054 if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1055 uint64_t value = static_cast<uint64_t>(m.right().ResolvedValue());
1056 if (base::bits::IsPowerOfTwo(value)) {
1057 Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None),
1058 g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1059 g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1060 return;
1061 }
1062 if (base::bits::IsPowerOfTwo(value + 1)) {
1063 InstructionOperand temp = g.TempRegister();
1064 Emit(kRiscvShl64 | AddressingModeField::encode(kMode_None), temp,
1065 g.UseRegister(m.left().node()),
1066 g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1067 Emit(kRiscvSub64 | AddressingModeField::encode(kMode_None),
1068 g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1069 return;
1070 }
1071 }
1072 Emit(kRiscvMul64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1073 g.UseRegister(m.right().node()));
1074 }
1075
VisitInt32Div(Node * node)1076 void InstructionSelector::VisitInt32Div(Node* node) {
1077 RiscvOperandGenerator g(this);
1078 Int32BinopMatcher m(node);
1079 Node* left = node->InputAt(0);
1080 Node* right = node->InputAt(1);
1081 if (CanCover(node, left) && CanCover(node, right)) {
1082 if (left->opcode() == IrOpcode::kWord64Sar &&
1083 right->opcode() == IrOpcode::kWord64Sar) {
1084 Int64BinopMatcher rightInput(right), leftInput(left);
1085 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1086 // Combine both shifted operands with Ddiv.
1087 Emit(kRiscvDiv64, g.DefineSameAsFirst(node),
1088 g.UseRegister(leftInput.left().node()),
1089 g.UseRegister(rightInput.left().node()));
1090 return;
1091 }
1092 }
1093 }
1094 Emit(kRiscvDiv32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1095 g.UseRegister(m.right().node()));
1096 }
1097
VisitUint32Div(Node * node)1098 void InstructionSelector::VisitUint32Div(Node* node) {
1099 RiscvOperandGenerator g(this);
1100 Int32BinopMatcher m(node);
1101 Emit(kRiscvDivU32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1102 g.UseRegister(m.right().node()));
1103 }
1104
VisitInt32Mod(Node * node)1105 void InstructionSelector::VisitInt32Mod(Node* node) {
1106 RiscvOperandGenerator g(this);
1107 Int32BinopMatcher m(node);
1108 Node* left = node->InputAt(0);
1109 Node* right = node->InputAt(1);
1110 if (CanCover(node, left) && CanCover(node, right)) {
1111 if (left->opcode() == IrOpcode::kWord64Sar &&
1112 right->opcode() == IrOpcode::kWord64Sar) {
1113 Int64BinopMatcher rightInput(right), leftInput(left);
1114 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1115 // Combine both shifted operands with Dmod.
1116 Emit(kRiscvMod64, g.DefineSameAsFirst(node),
1117 g.UseRegister(leftInput.left().node()),
1118 g.UseRegister(rightInput.left().node()));
1119 return;
1120 }
1121 }
1122 }
1123 Emit(kRiscvMod32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1124 g.UseRegister(m.right().node()));
1125 }
1126
VisitUint32Mod(Node * node)1127 void InstructionSelector::VisitUint32Mod(Node* node) {
1128 RiscvOperandGenerator g(this);
1129 Int32BinopMatcher m(node);
1130 Emit(kRiscvModU32, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1131 g.UseRegister(m.right().node()));
1132 }
1133
VisitInt64Div(Node * node)1134 void InstructionSelector::VisitInt64Div(Node* node) {
1135 RiscvOperandGenerator g(this);
1136 Int64BinopMatcher m(node);
1137 Emit(kRiscvDiv64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1138 g.UseRegister(m.right().node()));
1139 }
1140
VisitUint64Div(Node * node)1141 void InstructionSelector::VisitUint64Div(Node* node) {
1142 RiscvOperandGenerator g(this);
1143 Int64BinopMatcher m(node);
1144 Emit(kRiscvDivU64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1145 g.UseRegister(m.right().node()));
1146 }
1147
VisitInt64Mod(Node * node)1148 void InstructionSelector::VisitInt64Mod(Node* node) {
1149 RiscvOperandGenerator g(this);
1150 Int64BinopMatcher m(node);
1151 Emit(kRiscvMod64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1152 g.UseRegister(m.right().node()));
1153 }
1154
VisitUint64Mod(Node * node)1155 void InstructionSelector::VisitUint64Mod(Node* node) {
1156 RiscvOperandGenerator g(this);
1157 Int64BinopMatcher m(node);
1158 Emit(kRiscvModU64, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1159 g.UseRegister(m.right().node()));
1160 }
1161
VisitChangeFloat32ToFloat64(Node * node)1162 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1163 VisitRR(this, kRiscvCvtDS, node);
1164 }
1165
VisitRoundInt32ToFloat32(Node * node)1166 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1167 VisitRR(this, kRiscvCvtSW, node);
1168 }
1169
VisitRoundUint32ToFloat32(Node * node)1170 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1171 VisitRR(this, kRiscvCvtSUw, node);
1172 }
1173
VisitChangeInt32ToFloat64(Node * node)1174 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1175 VisitRR(this, kRiscvCvtDW, node);
1176 }
1177
VisitChangeInt64ToFloat64(Node * node)1178 void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
1179 VisitRR(this, kRiscvCvtDL, node);
1180 }
1181
VisitChangeUint32ToFloat64(Node * node)1182 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1183 VisitRR(this, kRiscvCvtDUw, node);
1184 }
1185
VisitTruncateFloat32ToInt32(Node * node)1186 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1187 RiscvOperandGenerator g(this);
1188 InstructionCode opcode = kRiscvTruncWS;
1189 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1190 if (kind == TruncateKind::kSetOverflowToMin) {
1191 opcode |= MiscField::encode(true);
1192 }
1193 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1194 }
1195
VisitTruncateFloat32ToUint32(Node * node)1196 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1197 RiscvOperandGenerator g(this);
1198 InstructionCode opcode = kRiscvTruncUwS;
1199 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1200 if (kind == TruncateKind::kSetOverflowToMin) {
1201 opcode |= MiscField::encode(true);
1202 }
1203 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1204 }
1205
VisitChangeFloat64ToInt32(Node * node)1206 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1207 RiscvOperandGenerator g(this);
1208 Node* value = node->InputAt(0);
1209 // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction
1210 // which does rounding and conversion to integer format.
1211 if (CanCover(node, value)) {
1212 switch (value->opcode()) {
1213 case IrOpcode::kFloat64RoundDown:
1214 Emit(kRiscvFloorWD, g.DefineAsRegister(node),
1215 g.UseRegister(value->InputAt(0)));
1216 return;
1217 case IrOpcode::kFloat64RoundUp:
1218 Emit(kRiscvCeilWD, g.DefineAsRegister(node),
1219 g.UseRegister(value->InputAt(0)));
1220 return;
1221 case IrOpcode::kFloat64RoundTiesEven:
1222 Emit(kRiscvRoundWD, g.DefineAsRegister(node),
1223 g.UseRegister(value->InputAt(0)));
1224 return;
1225 case IrOpcode::kFloat64RoundTruncate:
1226 Emit(kRiscvTruncWD, g.DefineAsRegister(node),
1227 g.UseRegister(value->InputAt(0)));
1228 return;
1229 default:
1230 break;
1231 }
1232 if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1233 Node* next = value->InputAt(0);
1234 if (CanCover(value, next)) {
1235 // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP))
1236 switch (next->opcode()) {
1237 case IrOpcode::kFloat32RoundDown:
1238 Emit(kRiscvFloorWS, g.DefineAsRegister(node),
1239 g.UseRegister(next->InputAt(0)));
1240 return;
1241 case IrOpcode::kFloat32RoundUp:
1242 Emit(kRiscvCeilWS, g.DefineAsRegister(node),
1243 g.UseRegister(next->InputAt(0)));
1244 return;
1245 case IrOpcode::kFloat32RoundTiesEven:
1246 Emit(kRiscvRoundWS, g.DefineAsRegister(node),
1247 g.UseRegister(next->InputAt(0)));
1248 return;
1249 case IrOpcode::kFloat32RoundTruncate:
1250 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
1251 g.UseRegister(next->InputAt(0)));
1252 return;
1253 default:
1254 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
1255 g.UseRegister(value->InputAt(0)));
1256 return;
1257 }
1258 } else {
1259 // Match float32 -> float64 -> int32 representation change path.
1260 Emit(kRiscvTruncWS, g.DefineAsRegister(node),
1261 g.UseRegister(value->InputAt(0)));
1262 return;
1263 }
1264 }
1265 }
1266 VisitRR(this, kRiscvTruncWD, node);
1267 }
1268
VisitChangeFloat64ToInt64(Node * node)1269 void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
1270 VisitRR(this, kRiscvTruncLD, node);
1271 }
1272
VisitChangeFloat64ToUint32(Node * node)1273 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1274 VisitRR(this, kRiscvTruncUwD, node);
1275 }
1276
VisitChangeFloat64ToUint64(Node * node)1277 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1278 VisitRR(this, kRiscvTruncUlD, node);
1279 }
1280
VisitTruncateFloat64ToUint32(Node * node)1281 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1282 VisitRR(this, kRiscvTruncUwD, node);
1283 }
1284
VisitTruncateFloat64ToInt64(Node * node)1285 void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
1286 RiscvOperandGenerator g(this);
1287 InstructionCode opcode = kRiscvTruncLD;
1288 TruncateKind kind = OpParameter<TruncateKind>(node->op());
1289 if (kind == TruncateKind::kSetOverflowToMin) {
1290 opcode |= MiscField::encode(true);
1291 }
1292 Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1293 }
1294
VisitTryTruncateFloat32ToInt64(Node * node)1295 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1296 RiscvOperandGenerator g(this);
1297 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1298 InstructionOperand outputs[2];
1299 size_t output_count = 0;
1300 outputs[output_count++] = g.DefineAsRegister(node);
1301
1302 Node* success_output = NodeProperties::FindProjection(node, 1);
1303 if (success_output) {
1304 outputs[output_count++] = g.DefineAsRegister(success_output);
1305 }
1306
1307 this->Emit(kRiscvTruncLS, output_count, outputs, 1, inputs);
1308 }
1309
VisitTryTruncateFloat64ToInt64(Node * node)1310 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1311 RiscvOperandGenerator g(this);
1312 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1313 InstructionOperand outputs[2];
1314 size_t output_count = 0;
1315 outputs[output_count++] = g.DefineAsRegister(node);
1316
1317 Node* success_output = NodeProperties::FindProjection(node, 1);
1318 if (success_output) {
1319 outputs[output_count++] = g.DefineAsRegister(success_output);
1320 }
1321
1322 Emit(kRiscvTruncLD, output_count, outputs, 1, inputs);
1323 }
1324
VisitTryTruncateFloat32ToUint64(Node * node)1325 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1326 RiscvOperandGenerator g(this);
1327 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1328 InstructionOperand outputs[2];
1329 size_t output_count = 0;
1330 outputs[output_count++] = g.DefineAsRegister(node);
1331
1332 Node* success_output = NodeProperties::FindProjection(node, 1);
1333 if (success_output) {
1334 outputs[output_count++] = g.DefineAsRegister(success_output);
1335 }
1336
1337 Emit(kRiscvTruncUlS, output_count, outputs, 1, inputs);
1338 }
1339
VisitTryTruncateFloat64ToUint64(Node * node)1340 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1341 RiscvOperandGenerator g(this);
1342
1343 InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1344 InstructionOperand outputs[2];
1345 size_t output_count = 0;
1346 outputs[output_count++] = g.DefineAsRegister(node);
1347
1348 Node* success_output = NodeProperties::FindProjection(node, 1);
1349 if (success_output) {
1350 outputs[output_count++] = g.DefineAsRegister(success_output);
1351 }
1352
1353 Emit(kRiscvTruncUlD, output_count, outputs, 1, inputs);
1354 }
1355
VisitBitcastWord32ToWord64(Node * node)1356 void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
1357 DCHECK(SmiValuesAre31Bits());
1358 DCHECK(COMPRESS_POINTERS_BOOL);
1359 RiscvOperandGenerator g(this);
1360 Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node),
1361 g.UseRegister(node->InputAt(0)));
1362 }
1363
EmitSignExtendWord(InstructionSelector * selector,Node * node)1364 void EmitSignExtendWord(InstructionSelector* selector, Node* node) {
1365 RiscvOperandGenerator g(selector);
1366 Node* value = node->InputAt(0);
1367 IrOpcode::Value lastOpCode = value->opcode();
1368 if (lastOpCode == IrOpcode::kInt32Add || lastOpCode == IrOpcode::kInt32Sub ||
1369 lastOpCode == IrOpcode::kWord32And || lastOpCode == IrOpcode::kWord32Or ||
1370 lastOpCode == IrOpcode::kWord32Xor ||
1371 lastOpCode == IrOpcode::kWord32Shl ||
1372 lastOpCode == IrOpcode::kWord32Shr ||
1373 lastOpCode == IrOpcode::kWord32Sar ||
1374 lastOpCode == IrOpcode::kUint32Mod) {
1375 selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1376 return;
1377 }
1378 if (lastOpCode == IrOpcode::kInt32Mul) {
1379 Node* left = value->InputAt(0);
1380 Node* right = value->InputAt(1);
1381 if (selector->CanCover(value, left) && selector->CanCover(value, right)) {
1382 if (left->opcode() == IrOpcode::kWord64Sar &&
1383 right->opcode() == IrOpcode::kWord64Sar) {
1384 Int64BinopMatcher leftInput(left), rightInput(right);
1385 if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
1386 selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
1387 g.UseRegister(value));
1388 return;
1389 }
1390 }
1391 }
1392 selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1393 return;
1394 }
1395 if (lastOpCode == IrOpcode::kInt32Mod) {
1396 Node* left = value->InputAt(0);
1397 Node* right = value->InputAt(1);
1398 if (selector->CanCover(value, left) && selector->CanCover(value, right)) {
1399 if (left->opcode() == IrOpcode::kWord64Sar &&
1400 right->opcode() == IrOpcode::kWord64Sar) {
1401 Int64BinopMatcher rightInput(right), leftInput(left);
1402 if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1403 // Combine both shifted operands with Dmod.
1404 selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
1405 g.UseRegister(value));
1406 return;
1407 }
1408 }
1409 }
1410 selector->Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1411 return;
1412 }
1413 selector->Emit(kRiscvSignExtendWord, g.DefineAsRegister(node),
1414 g.UseRegister(value));
1415 }
1416
VisitChangeInt32ToInt64(Node * node)1417 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1418 Node* value = node->InputAt(0);
1419 if ((value->opcode() == IrOpcode::kLoad ||
1420 value->opcode() == IrOpcode::kLoadImmutable) &&
1421 CanCover(node, value)) {
1422 // Generate sign-extending load.
1423 LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1424 InstructionCode opcode = kArchNop;
1425 switch (load_rep.representation()) {
1426 case MachineRepresentation::kBit: // Fall through.
1427 case MachineRepresentation::kWord8:
1428 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
1429 break;
1430 case MachineRepresentation::kWord16:
1431 opcode = load_rep.IsUnsigned() ? kRiscvLhu : kRiscvLh;
1432 break;
1433 case MachineRepresentation::kWord32:
1434 opcode = kRiscvLw;
1435 break;
1436 default:
1437 UNREACHABLE();
1438 }
1439 EmitLoad(this, value, opcode, node);
1440 } else {
1441 EmitSignExtendWord(this, node);
1442 }
1443 }
1444
ZeroExtendsWord32ToWord64NoPhis(Node * node)1445 bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
1446 DCHECK_NE(node->opcode(), IrOpcode::kPhi);
1447 if (node->opcode() == IrOpcode::kLoad ||
1448 node->opcode() == IrOpcode::kLoadImmutable) {
1449 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1450 if (load_rep.IsUnsigned()) {
1451 switch (load_rep.representation()) {
1452 case MachineRepresentation::kWord8:
1453 case MachineRepresentation::kWord16:
1454 return true;
1455 default:
1456 return false;
1457 }
1458 }
1459 }
1460
1461 // All other 32-bit operations sign-extend to the upper 32 bits
1462 return false;
1463 }
1464
VisitChangeUint32ToUint64(Node * node)1465 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1466 RiscvOperandGenerator g(this);
1467 Node* value = node->InputAt(0);
1468 if (ZeroExtendsWord32ToWord64(value)) {
1469 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1470 return;
1471 }
1472 Emit(kRiscvZeroExtendWord, g.DefineAsRegister(node),
1473 g.UseRegister(node->InputAt(0)));
1474 }
1475
VisitTruncateInt64ToInt32(Node * node)1476 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1477 RiscvOperandGenerator g(this);
1478 Node* value = node->InputAt(0);
1479 if (CanCover(node, value)) {
1480 switch (value->opcode()) {
1481 case IrOpcode::kWord64Sar: {
1482 if (CanCover(value, value->InputAt(0)) &&
1483 TryEmitExtendingLoad(this, value, node)) {
1484 return;
1485 } else {
1486 Int64BinopMatcher m(value);
1487 if (m.right().IsInRange(32, 63)) {
1488 // After smi untagging no need for truncate. Combine sequence.
1489 Emit(kRiscvSar64, g.DefineSameAsFirst(node),
1490 g.UseRegister(m.left().node()),
1491 g.UseImmediate(m.right().node()));
1492 return;
1493 }
1494 }
1495 break;
1496 }
1497 default:
1498 break;
1499 }
1500 }
1501
1502 // Semantics of this machine IR is not clear. For example, x86 zero-extend the
1503 // truncated value; arm treats it as nop thus the upper 32-bit as undefined;
1504 // Riscv emits ext instruction which zero-extend the 32-bit value; for riscv,
1505 // we do sign-extension of the truncated value
1506 EmitSignExtendWord(this, node);
1507 }
1508
VisitTruncateFloat64ToFloat32(Node * node)1509 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1510 RiscvOperandGenerator g(this);
1511 Node* value = node->InputAt(0);
1512 // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1513 // instruction.
1514 if (CanCover(node, value) &&
1515 value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1516 Emit(kRiscvCvtSW, g.DefineAsRegister(node),
1517 g.UseRegister(value->InputAt(0)));
1518 return;
1519 }
1520 VisitRR(this, kRiscvCvtSD, node);
1521 }
1522
VisitTruncateFloat64ToWord32(Node * node)1523 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1524 VisitRR(this, kArchTruncateDoubleToI, node);
1525 }
1526
VisitRoundFloat64ToInt32(Node * node)1527 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1528 VisitRR(this, kRiscvTruncWD, node);
1529 }
1530
VisitRoundInt64ToFloat32(Node * node)1531 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1532 VisitRR(this, kRiscvCvtSL, node);
1533 }
1534
VisitRoundInt64ToFloat64(Node * node)1535 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1536 VisitRR(this, kRiscvCvtDL, node);
1537 }
1538
VisitRoundUint64ToFloat32(Node * node)1539 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1540 VisitRR(this, kRiscvCvtSUl, node);
1541 }
1542
VisitRoundUint64ToFloat64(Node * node)1543 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1544 VisitRR(this, kRiscvCvtDUl, node);
1545 }
1546
VisitBitcastFloat32ToInt32(Node * node)1547 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1548 VisitRR(this, kRiscvBitcastFloat32ToInt32, node);
1549 }
1550
VisitBitcastFloat64ToInt64(Node * node)1551 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1552 VisitRR(this, kRiscvBitcastDL, node);
1553 }
1554
VisitBitcastInt32ToFloat32(Node * node)1555 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1556 VisitRR(this, kRiscvBitcastInt32ToFloat32, node);
1557 }
1558
VisitBitcastInt64ToFloat64(Node * node)1559 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1560 VisitRR(this, kRiscvBitcastLD, node);
1561 }
1562
VisitFloat32Add(Node * node)1563 void InstructionSelector::VisitFloat32Add(Node* node) {
1564 VisitRRR(this, kRiscvAddS, node);
1565 }
1566
VisitFloat64Add(Node * node)1567 void InstructionSelector::VisitFloat64Add(Node* node) {
1568 VisitRRR(this, kRiscvAddD, node);
1569 }
1570
VisitFloat32Sub(Node * node)1571 void InstructionSelector::VisitFloat32Sub(Node* node) {
1572 VisitRRR(this, kRiscvSubS, node);
1573 }
1574
VisitFloat64Sub(Node * node)1575 void InstructionSelector::VisitFloat64Sub(Node* node) {
1576 VisitRRR(this, kRiscvSubD, node);
1577 }
1578
VisitFloat32Mul(Node * node)1579 void InstructionSelector::VisitFloat32Mul(Node* node) {
1580 VisitRRR(this, kRiscvMulS, node);
1581 }
1582
VisitFloat64Mul(Node * node)1583 void InstructionSelector::VisitFloat64Mul(Node* node) {
1584 VisitRRR(this, kRiscvMulD, node);
1585 }
1586
VisitFloat32Div(Node * node)1587 void InstructionSelector::VisitFloat32Div(Node* node) {
1588 VisitRRR(this, kRiscvDivS, node);
1589 }
1590
VisitFloat64Div(Node * node)1591 void InstructionSelector::VisitFloat64Div(Node* node) {
1592 VisitRRR(this, kRiscvDivD, node);
1593 }
1594
VisitFloat64Mod(Node * node)1595 void InstructionSelector::VisitFloat64Mod(Node* node) {
1596 RiscvOperandGenerator g(this);
1597 Emit(kRiscvModD, g.DefineAsFixed(node, fa0),
1598 g.UseFixed(node->InputAt(0), fa0), g.UseFixed(node->InputAt(1), fa1))
1599 ->MarkAsCall();
1600 }
1601
VisitFloat32Max(Node * node)1602 void InstructionSelector::VisitFloat32Max(Node* node) {
1603 RiscvOperandGenerator g(this);
1604 Emit(kRiscvFloat32Max, g.DefineAsRegister(node),
1605 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1606 }
1607
VisitFloat64Max(Node * node)1608 void InstructionSelector::VisitFloat64Max(Node* node) {
1609 RiscvOperandGenerator g(this);
1610 Emit(kRiscvFloat64Max, g.DefineAsRegister(node),
1611 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1612 }
1613
VisitFloat32Min(Node * node)1614 void InstructionSelector::VisitFloat32Min(Node* node) {
1615 RiscvOperandGenerator g(this);
1616 Emit(kRiscvFloat32Min, g.DefineAsRegister(node),
1617 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1618 }
1619
VisitFloat64Min(Node * node)1620 void InstructionSelector::VisitFloat64Min(Node* node) {
1621 RiscvOperandGenerator g(this);
1622 Emit(kRiscvFloat64Min, g.DefineAsRegister(node),
1623 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1624 }
1625
VisitFloat32Abs(Node * node)1626 void InstructionSelector::VisitFloat32Abs(Node* node) {
1627 VisitRR(this, kRiscvAbsS, node);
1628 }
1629
VisitFloat64Abs(Node * node)1630 void InstructionSelector::VisitFloat64Abs(Node* node) {
1631 VisitRR(this, kRiscvAbsD, node);
1632 }
1633
VisitFloat32Sqrt(Node * node)1634 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1635 VisitRR(this, kRiscvSqrtS, node);
1636 }
1637
VisitFloat64Sqrt(Node * node)1638 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1639 VisitRR(this, kRiscvSqrtD, node);
1640 }
1641
VisitFloat32RoundDown(Node * node)1642 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1643 VisitRR(this, kRiscvFloat32RoundDown, node);
1644 }
1645
VisitFloat64RoundDown(Node * node)1646 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1647 VisitRR(this, kRiscvFloat64RoundDown, node);
1648 }
1649
VisitFloat32RoundUp(Node * node)1650 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1651 VisitRR(this, kRiscvFloat32RoundUp, node);
1652 }
1653
VisitFloat64RoundUp(Node * node)1654 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1655 VisitRR(this, kRiscvFloat64RoundUp, node);
1656 }
1657
VisitFloat32RoundTruncate(Node * node)1658 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1659 VisitRR(this, kRiscvFloat32RoundTruncate, node);
1660 }
1661
VisitFloat64RoundTruncate(Node * node)1662 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1663 VisitRR(this, kRiscvFloat64RoundTruncate, node);
1664 }
1665
VisitFloat64RoundTiesAway(Node * node)1666 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1667 UNREACHABLE();
1668 }
1669
VisitFloat32RoundTiesEven(Node * node)1670 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1671 VisitRR(this, kRiscvFloat32RoundTiesEven, node);
1672 }
1673
VisitFloat64RoundTiesEven(Node * node)1674 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1675 VisitRR(this, kRiscvFloat64RoundTiesEven, node);
1676 }
1677
VisitFloat32Neg(Node * node)1678 void InstructionSelector::VisitFloat32Neg(Node* node) {
1679 VisitRR(this, kRiscvNegS, node);
1680 }
1681
VisitFloat64Neg(Node * node)1682 void InstructionSelector::VisitFloat64Neg(Node* node) {
1683 VisitRR(this, kRiscvNegD, node);
1684 }
1685
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1686 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1687 InstructionCode opcode) {
1688 RiscvOperandGenerator g(this);
1689 Emit(opcode, g.DefineAsFixed(node, fa0), g.UseFixed(node->InputAt(0), fa0),
1690 g.UseFixed(node->InputAt(1), fa1))
1691 ->MarkAsCall();
1692 }
1693
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1694 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1695 InstructionCode opcode) {
1696 RiscvOperandGenerator g(this);
1697 Emit(opcode, g.DefineAsFixed(node, fa0), g.UseFixed(node->InputAt(0), fa1))
1698 ->MarkAsCall();
1699 }
1700
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1701 void InstructionSelector::EmitPrepareArguments(
1702 ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1703 Node* node) {
1704 RiscvOperandGenerator g(this);
1705
1706 // Prepare for C function call.
1707 if (call_descriptor->IsCFunctionCall()) {
1708 Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1709 call_descriptor->ParameterCount())),
1710 0, nullptr, 0, nullptr);
1711
1712 // Poke any stack arguments.
1713 int slot = kCArgSlotCount;
1714 for (PushParameter input : (*arguments)) {
1715 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1716 g.TempImmediate(slot << kSystemPointerSizeLog2));
1717 ++slot;
1718 }
1719 } else {
1720 int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
1721 if (push_count > 0) {
1722 // Calculate needed space
1723 int stack_size = 0;
1724 for (PushParameter input : (*arguments)) {
1725 if (input.node) {
1726 stack_size += input.location.GetSizeInPointers();
1727 }
1728 }
1729 Emit(kRiscvStackClaim, g.NoOutput(),
1730 g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1731 }
1732 for (size_t n = 0; n < arguments->size(); ++n) {
1733 PushParameter input = (*arguments)[n];
1734 if (input.node) {
1735 Emit(kRiscvStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node),
1736 g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
1737 }
1738 }
1739 }
1740 }
1741
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1742 void InstructionSelector::EmitPrepareResults(
1743 ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1744 Node* node) {
1745 RiscvOperandGenerator g(this);
1746
1747 int reverse_slot = 1;
1748 for (PushParameter output : *results) {
1749 if (!output.location.IsCallerFrameSlot()) continue;
1750 // Skip any alignment holes in nodes.
1751 if (output.node != nullptr) {
1752 DCHECK(!call_descriptor->IsCFunctionCall());
1753 if (output.location.GetType() == MachineType::Float32()) {
1754 MarkAsFloat32(output.node);
1755 } else if (output.location.GetType() == MachineType::Float64()) {
1756 MarkAsFloat64(output.node);
1757 }
1758 Emit(kRiscvPeek, g.DefineAsRegister(output.node),
1759 g.UseImmediate(reverse_slot));
1760 }
1761 reverse_slot += output.location.GetSizeInPointers();
1762 }
1763 }
1764
IsTailCallAddressImmediate()1765 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1766
VisitUnalignedLoad(Node * node)1767 void InstructionSelector::VisitUnalignedLoad(Node* node) {
1768 LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1769 RiscvOperandGenerator g(this);
1770 Node* base = node->InputAt(0);
1771 Node* index = node->InputAt(1);
1772
1773 ArchOpcode opcode;
1774 switch (load_rep.representation()) {
1775 case MachineRepresentation::kFloat32:
1776 opcode = kRiscvULoadFloat;
1777 break;
1778 case MachineRepresentation::kFloat64:
1779 opcode = kRiscvULoadDouble;
1780 break;
1781 case MachineRepresentation::kWord8:
1782 opcode = load_rep.IsUnsigned() ? kRiscvLbu : kRiscvLb;
1783 break;
1784 case MachineRepresentation::kWord16:
1785 opcode = load_rep.IsUnsigned() ? kRiscvUlhu : kRiscvUlh;
1786 break;
1787 case MachineRepresentation::kWord32:
1788 opcode = kRiscvUlw;
1789 break;
1790 case MachineRepresentation::kTaggedSigned: // Fall through.
1791 case MachineRepresentation::kTaggedPointer: // Fall through.
1792 case MachineRepresentation::kTagged: // Fall through.
1793 case MachineRepresentation::kWord64:
1794 opcode = kRiscvUld;
1795 break;
1796 case MachineRepresentation::kSimd128:
1797 opcode = kRiscvRvvLd;
1798 break;
1799 case MachineRepresentation::kBit: // Fall through.
1800 case MachineRepresentation::kCompressedPointer: // Fall through.
1801 case MachineRepresentation::kCompressed: // Fall through.
1802 case MachineRepresentation::kSandboxedPointer:
1803 case MachineRepresentation::kMapWord: // Fall through.
1804 case MachineRepresentation::kNone:
1805 UNREACHABLE();
1806 }
1807
1808 if (g.CanBeImmediate(index, opcode)) {
1809 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1810 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
1811 } else {
1812 InstructionOperand addr_reg = g.TempRegister();
1813 Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
1814 g.UseRegister(index), g.UseRegister(base));
1815 // Emit desired load opcode, using temp addr_reg.
1816 Emit(opcode | AddressingModeField::encode(kMode_MRI),
1817 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1818 }
1819 }
1820
VisitUnalignedStore(Node * node)1821 void InstructionSelector::VisitUnalignedStore(Node* node) {
1822 RiscvOperandGenerator g(this);
1823 Node* base = node->InputAt(0);
1824 Node* index = node->InputAt(1);
1825 Node* value = node->InputAt(2);
1826
1827 UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op());
1828 ArchOpcode opcode;
1829 switch (rep) {
1830 case MachineRepresentation::kFloat32:
1831 opcode = kRiscvUStoreFloat;
1832 break;
1833 case MachineRepresentation::kFloat64:
1834 opcode = kRiscvUStoreDouble;
1835 break;
1836 case MachineRepresentation::kWord8:
1837 opcode = kRiscvSb;
1838 break;
1839 case MachineRepresentation::kWord16:
1840 opcode = kRiscvUsh;
1841 break;
1842 case MachineRepresentation::kWord32:
1843 opcode = kRiscvUsw;
1844 break;
1845 case MachineRepresentation::kTaggedSigned: // Fall through.
1846 case MachineRepresentation::kTaggedPointer: // Fall through.
1847 case MachineRepresentation::kTagged: // Fall through.
1848 case MachineRepresentation::kWord64:
1849 opcode = kRiscvUsd;
1850 break;
1851 case MachineRepresentation::kSimd128:
1852 opcode = kRiscvRvvSt;
1853 break;
1854 case MachineRepresentation::kBit: // Fall through.
1855 case MachineRepresentation::kCompressedPointer: // Fall through.
1856 case MachineRepresentation::kCompressed: // Fall through.
1857 case MachineRepresentation::kSandboxedPointer:
1858 case MachineRepresentation::kMapWord: // Fall through.
1859 case MachineRepresentation::kNone:
1860 UNREACHABLE();
1861 }
1862
1863 if (g.CanBeImmediate(index, opcode)) {
1864 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1865 g.UseRegister(base), g.UseImmediate(index),
1866 g.UseRegisterOrImmediateZero(value));
1867 } else {
1868 InstructionOperand addr_reg = g.TempRegister();
1869 Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None), addr_reg,
1870 g.UseRegister(index), g.UseRegister(base));
1871 // Emit desired store opcode, using temp addr_reg.
1872 Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
1873 addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value));
1874 }
1875 }
1876
1877 namespace {
1878
1879 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1880 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1881 InstructionOperand left, InstructionOperand right,
1882 FlagsContinuation* cont) {
1883 selector->EmitWithContinuation(opcode, left, right, cont);
1884 }
1885
1886 // Shared routine for multiple compare operations.
VisitWordCompareZero(InstructionSelector * selector,InstructionOperand value,FlagsContinuation * cont)1887 static void VisitWordCompareZero(InstructionSelector* selector,
1888 InstructionOperand value,
1889 FlagsContinuation* cont) {
1890 selector->EmitWithContinuation(kRiscvCmpZero, value, cont);
1891 }
1892
1893 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1894 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1895 FlagsContinuation* cont) {
1896 RiscvOperandGenerator g(selector);
1897 Float32BinopMatcher m(node);
1898 InstructionOperand lhs, rhs;
1899
1900 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1901 : g.UseRegister(m.left().node());
1902 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1903 : g.UseRegister(m.right().node());
1904 VisitCompare(selector, kRiscvCmpS, lhs, rhs, cont);
1905 }
1906
1907 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1908 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1909 FlagsContinuation* cont) {
1910 RiscvOperandGenerator g(selector);
1911 Float64BinopMatcher m(node);
1912 InstructionOperand lhs, rhs;
1913
1914 lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1915 : g.UseRegister(m.left().node());
1916 rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1917 : g.UseRegister(m.right().node());
1918 VisitCompare(selector, kRiscvCmpD, lhs, rhs, cont);
1919 }
1920
1921 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1922 void VisitWordCompare(InstructionSelector* selector, Node* node,
1923 InstructionCode opcode, FlagsContinuation* cont,
1924 bool commutative) {
1925 RiscvOperandGenerator g(selector);
1926 Node* left = node->InputAt(0);
1927 Node* right = node->InputAt(1);
1928 // If one of the two inputs is an immediate, make sure it's on the right.
1929 if (!g.CanBeImmediate(right, opcode) && g.CanBeImmediate(left, opcode)) {
1930 cont->Commute();
1931 std::swap(left, right);
1932 }
1933 // Match immediates on right side of comparison.
1934 if (g.CanBeImmediate(right, opcode)) {
1935 if (opcode == kRiscvTst) {
1936 VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
1937 cont);
1938 } else {
1939 switch (cont->condition()) {
1940 case kEqual:
1941 case kNotEqual:
1942 if (cont->IsSet()) {
1943 VisitCompare(selector, opcode, g.UseRegister(left),
1944 g.UseImmediate(right), cont);
1945 } else {
1946 Int32BinopMatcher m(node, true);
1947 NumberBinopMatcher n(node, true);
1948 if (m.right().Is(0) || n.right().IsZero()) {
1949 VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
1950 cont);
1951 } else {
1952 VisitCompare(selector, opcode, g.UseRegister(left),
1953 g.UseRegister(right), cont);
1954 }
1955 }
1956 break;
1957 case kSignedLessThan:
1958 case kSignedGreaterThanOrEqual:
1959 case kUnsignedLessThan:
1960 case kUnsignedGreaterThanOrEqual: {
1961 Int32BinopMatcher m(node, true);
1962 if (m.right().Is(0)) {
1963 VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
1964 cont);
1965 } else {
1966 VisitCompare(selector, opcode, g.UseRegister(left),
1967 g.UseImmediate(right), cont);
1968 }
1969 } break;
1970 default:
1971 Int32BinopMatcher m(node, true);
1972 if (m.right().Is(0)) {
1973 VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
1974 cont);
1975 } else {
1976 VisitCompare(selector, opcode, g.UseRegister(left),
1977 g.UseRegister(right), cont);
1978 }
1979 }
1980 }
1981 } else {
1982 VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1983 cont);
1984 }
1985 }
1986 #ifndef V8_COMPRESS_POINTERS
IsNodeUnsigned(Node * n)1987 bool IsNodeUnsigned(Node* n) {
1988 NodeMatcher m(n);
1989
1990 if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
1991 LoadRepresentation load_rep = LoadRepresentationOf(n->op());
1992 return load_rep.IsUnsigned();
1993 } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
1994 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
1995 LoadRepresentation load_rep = atomic_load_params.representation();
1996 return load_rep.IsUnsigned();
1997 } else {
1998 return m.IsUint32Div() || m.IsUint32LessThan() ||
1999 m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
2000 m.IsUint32MulHigh() || m.IsChangeFloat64ToUint32() ||
2001 m.IsTruncateFloat64ToUint32() || m.IsTruncateFloat32ToUint32();
2002 }
2003 }
2004 #endif
2005
2006 // Shared routine for multiple word compare operations.
VisitFullWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)2007 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
2008 InstructionCode opcode, FlagsContinuation* cont) {
2009 RiscvOperandGenerator g(selector);
2010 InstructionOperand leftOp = g.TempRegister();
2011 InstructionOperand rightOp = g.TempRegister();
2012
2013 selector->Emit(kRiscvShl64, leftOp, g.UseRegister(node->InputAt(0)),
2014 g.TempImmediate(32));
2015 selector->Emit(kRiscvShl64, rightOp, g.UseRegister(node->InputAt(1)),
2016 g.TempImmediate(32));
2017
2018 VisitCompare(selector, opcode, leftOp, rightOp, cont);
2019 }
2020
2021 #ifndef V8_COMPRESS_POINTERS
VisitOptimizedWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)2022 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
2023 InstructionCode opcode,
2024 FlagsContinuation* cont) {
2025 if (FLAG_debug_code) {
2026 RiscvOperandGenerator g(selector);
2027 InstructionOperand leftOp = g.TempRegister();
2028 InstructionOperand rightOp = g.TempRegister();
2029 InstructionOperand optimizedResult = g.TempRegister();
2030 InstructionOperand fullResult = g.TempRegister();
2031 FlagsCondition condition = cont->condition();
2032 InstructionCode testOpcode = opcode |
2033 FlagsConditionField::encode(condition) |
2034 FlagsModeField::encode(kFlags_set);
2035
2036 selector->Emit(testOpcode, optimizedResult, g.UseRegister(node->InputAt(0)),
2037 g.UseRegister(node->InputAt(1)));
2038
2039 selector->Emit(kRiscvShl64, leftOp, g.UseRegister(node->InputAt(0)),
2040 g.TempImmediate(32));
2041 selector->Emit(kRiscvShl64, rightOp, g.UseRegister(node->InputAt(1)),
2042 g.TempImmediate(32));
2043 selector->Emit(testOpcode, fullResult, leftOp, rightOp);
2044
2045 selector->Emit(kRiscvAssertEqual, g.NoOutput(), optimizedResult, fullResult,
2046 g.TempImmediate(static_cast<int>(
2047 AbortReason::kUnsupportedNonPrimitiveCompare)));
2048 }
2049
2050 VisitWordCompare(selector, node, opcode, cont, false);
2051 }
2052 #endif
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)2053 void VisitWord32Compare(InstructionSelector* selector, Node* node,
2054 FlagsContinuation* cont) {
2055 // RISC-V doesn't support Word32 compare instructions. Instead it relies
2056 // that the values in registers are correctly sign-extended and uses
2057 // Word64 comparison instead. This behavior is correct in most cases,
2058 // but doesn't work when comparing signed with unsigned operands.
2059 // We could simulate full Word32 compare in all cases but this would
2060 // create an unnecessary overhead since unsigned integers are rarely
2061 // used in JavaScript.
2062 // The solution proposed here tries to match a comparison of signed
2063 // with unsigned operand, and perform full Word32Compare only
2064 // in those cases. Unfortunately, the solution is not complete because
2065 // it might skip cases where Word32 full compare is needed, so
2066 // basically it is a hack.
2067 // When call to a host function in simulator, if the function return a
2068 // int32 value, the simulator do not sign-extended to int64 because in
2069 // simulator we do not know the function whether return a int32 or int64.
2070 // so we need do a full word32 compare in this case.
2071 #ifndef V8_COMPRESS_POINTERS
2072 #ifndef USE_SIMULATOR
2073 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1))) {
2074 #else
2075 if (IsNodeUnsigned(node->InputAt(0)) != IsNodeUnsigned(node->InputAt(1)) ||
2076 node->InputAt(0)->opcode() == IrOpcode::kCall ||
2077 node->InputAt(1)->opcode() == IrOpcode::kCall) {
2078 #endif
2079 VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
2080 } else {
2081 VisitOptimizedWord32Compare(selector, node, kRiscvCmp, cont);
2082 }
2083 #else
2084 VisitFullWord32Compare(selector, node, kRiscvCmp, cont);
2085 #endif
2086 }
2087
2088 void VisitWord64Compare(InstructionSelector* selector, Node* node,
2089 FlagsContinuation* cont) {
2090 VisitWordCompare(selector, node, kRiscvCmp, cont, false);
2091 }
2092
2093 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
2094 FlagsContinuation* cont) {
2095 RiscvOperandGenerator g(selector);
2096 selector->EmitWithContinuation(kRiscvCmpZero,
2097 g.UseRegisterOrImmediateZero(value), cont);
2098 }
2099
2100 void VisitAtomicLoad(InstructionSelector* selector, Node* node,
2101 ArchOpcode opcode, AtomicWidth width) {
2102 RiscvOperandGenerator g(selector);
2103 Node* base = node->InputAt(0);
2104 Node* index = node->InputAt(1);
2105 if (g.CanBeImmediate(index, opcode)) {
2106 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
2107 AtomicWidthField::encode(width),
2108 g.DefineAsRegister(node), g.UseRegister(base),
2109 g.UseImmediate(index));
2110 } else {
2111 InstructionOperand addr_reg = g.TempRegister();
2112 selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
2113 addr_reg, g.UseRegister(index), g.UseRegister(base));
2114 // Emit desired load opcode, using temp addr_reg.
2115 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
2116 AtomicWidthField::encode(width),
2117 g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
2118 }
2119 }
2120
2121 void VisitAtomicStore(InstructionSelector* selector, Node* node,
2122 ArchOpcode opcode, AtomicWidth width) {
2123 RiscvOperandGenerator g(selector);
2124 Node* base = node->InputAt(0);
2125 Node* index = node->InputAt(1);
2126 Node* value = node->InputAt(2);
2127
2128 if (g.CanBeImmediate(index, opcode)) {
2129 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
2130 AtomicWidthField::encode(width),
2131 g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
2132 g.UseRegisterOrImmediateZero(value));
2133 } else {
2134 InstructionOperand addr_reg = g.TempRegister();
2135 selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
2136 addr_reg, g.UseRegister(index), g.UseRegister(base));
2137 // Emit desired store opcode, using temp addr_reg.
2138 selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
2139 AtomicWidthField::encode(width),
2140 g.NoOutput(), addr_reg, g.TempImmediate(0),
2141 g.UseRegisterOrImmediateZero(value));
2142 }
2143 }
2144
2145 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2146 ArchOpcode opcode, AtomicWidth width) {
2147 RiscvOperandGenerator g(selector);
2148 Node* base = node->InputAt(0);
2149 Node* index = node->InputAt(1);
2150 Node* value = node->InputAt(2);
2151
2152 AddressingMode addressing_mode = kMode_MRI;
2153 InstructionOperand inputs[3];
2154 size_t input_count = 0;
2155 inputs[input_count++] = g.UseUniqueRegister(base);
2156 inputs[input_count++] = g.UseUniqueRegister(index);
2157 inputs[input_count++] = g.UseUniqueRegister(value);
2158 InstructionOperand outputs[1];
2159 outputs[0] = g.UseUniqueRegister(node);
2160 InstructionOperand temp[3];
2161 temp[0] = g.TempRegister();
2162 temp[1] = g.TempRegister();
2163 temp[2] = g.TempRegister();
2164 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2165 AtomicWidthField::encode(width);
2166 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2167 }
2168
2169 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2170 ArchOpcode opcode, AtomicWidth width) {
2171 RiscvOperandGenerator g(selector);
2172 Node* base = node->InputAt(0);
2173 Node* index = node->InputAt(1);
2174 Node* old_value = node->InputAt(2);
2175 Node* new_value = node->InputAt(3);
2176
2177 AddressingMode addressing_mode = kMode_MRI;
2178 InstructionOperand inputs[4];
2179 size_t input_count = 0;
2180 inputs[input_count++] = g.UseUniqueRegister(base);
2181 inputs[input_count++] = g.UseUniqueRegister(index);
2182 inputs[input_count++] = g.UseUniqueRegister(old_value);
2183 inputs[input_count++] = g.UseUniqueRegister(new_value);
2184 InstructionOperand outputs[1];
2185 outputs[0] = g.UseUniqueRegister(node);
2186 InstructionOperand temp[3];
2187 temp[0] = g.TempRegister();
2188 temp[1] = g.TempRegister();
2189 temp[2] = g.TempRegister();
2190 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2191 AtomicWidthField::encode(width);
2192 selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2193 }
2194
2195 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2196 ArchOpcode opcode, AtomicWidth width) {
2197 RiscvOperandGenerator g(selector);
2198 Node* base = node->InputAt(0);
2199 Node* index = node->InputAt(1);
2200 Node* value = node->InputAt(2);
2201
2202 AddressingMode addressing_mode = kMode_MRI;
2203 InstructionOperand inputs[3];
2204 size_t input_count = 0;
2205 inputs[input_count++] = g.UseUniqueRegister(base);
2206 inputs[input_count++] = g.UseUniqueRegister(index);
2207 inputs[input_count++] = g.UseUniqueRegister(value);
2208 InstructionOperand outputs[1];
2209 outputs[0] = g.UseUniqueRegister(node);
2210 InstructionOperand temps[4];
2211 temps[0] = g.TempRegister();
2212 temps[1] = g.TempRegister();
2213 temps[2] = g.TempRegister();
2214 temps[3] = g.TempRegister();
2215 InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2216 AtomicWidthField::encode(width);
2217 selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
2218 }
2219
2220 } // namespace
2221
VisitStackPointerGreaterThan(Node * node,FlagsContinuation * cont)2222 void InstructionSelector::VisitStackPointerGreaterThan(
2223 Node* node, FlagsContinuation* cont) {
2224 StackCheckKind kind = StackCheckKindOf(node->op());
2225 InstructionCode opcode =
2226 kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
2227
2228 RiscvOperandGenerator g(this);
2229
2230 // No outputs.
2231 InstructionOperand* const outputs = nullptr;
2232 const int output_count = 0;
2233
2234 // Applying an offset to this stack check requires a temp register. Offsets
2235 // are only applied to the first stack check. If applying an offset, we must
2236 // ensure the input and temp registers do not alias, thus kUniqueRegister.
2237 InstructionOperand temps[] = {g.TempRegister()};
2238 const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 1 : 0);
2239 const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
2240 ? OperandGenerator::kUniqueRegister
2241 : OperandGenerator::kRegister;
2242
2243 Node* const value = node->InputAt(0);
2244 InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
2245 static constexpr int input_count = arraysize(inputs);
2246
2247 EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
2248 temp_count, temps, cont);
2249 }
2250
2251 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)2252 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2253 FlagsContinuation* cont) {
2254 // Try to combine with comparisons against 0 by simply inverting the branch.
2255 while (CanCover(user, value)) {
2256 if (value->opcode() == IrOpcode::kWord32Equal) {
2257 Int32BinopMatcher m(value);
2258 if (!m.right().Is(0)) break;
2259 user = value;
2260 value = m.left().node();
2261 } else if (value->opcode() == IrOpcode::kWord64Equal) {
2262 Int64BinopMatcher m(value);
2263 if (!m.right().Is(0)) break;
2264 user = value;
2265 value = m.left().node();
2266 } else {
2267 break;
2268 }
2269
2270 cont->Negate();
2271 }
2272
2273 if (CanCover(user, value)) {
2274 switch (value->opcode()) {
2275 case IrOpcode::kWord32Equal:
2276 cont->OverwriteAndNegateIfEqual(kEqual);
2277 return VisitWord32Compare(this, value, cont);
2278 case IrOpcode::kInt32LessThan:
2279 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2280 return VisitWord32Compare(this, value, cont);
2281 case IrOpcode::kInt32LessThanOrEqual:
2282 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2283 return VisitWord32Compare(this, value, cont);
2284 case IrOpcode::kUint32LessThan:
2285 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2286 return VisitWord32Compare(this, value, cont);
2287 case IrOpcode::kUint32LessThanOrEqual:
2288 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2289 return VisitWord32Compare(this, value, cont);
2290 case IrOpcode::kWord64Equal:
2291 cont->OverwriteAndNegateIfEqual(kEqual);
2292 return VisitWord64Compare(this, value, cont);
2293 case IrOpcode::kInt64LessThan:
2294 cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2295 return VisitWord64Compare(this, value, cont);
2296 case IrOpcode::kInt64LessThanOrEqual:
2297 cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2298 return VisitWord64Compare(this, value, cont);
2299 case IrOpcode::kUint64LessThan:
2300 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2301 return VisitWord64Compare(this, value, cont);
2302 case IrOpcode::kUint64LessThanOrEqual:
2303 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2304 return VisitWord64Compare(this, value, cont);
2305 case IrOpcode::kFloat32Equal:
2306 cont->OverwriteAndNegateIfEqual(kEqual);
2307 return VisitFloat32Compare(this, value, cont);
2308 case IrOpcode::kFloat32LessThan:
2309 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2310 return VisitFloat32Compare(this, value, cont);
2311 case IrOpcode::kFloat32LessThanOrEqual:
2312 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2313 return VisitFloat32Compare(this, value, cont);
2314 case IrOpcode::kFloat64Equal:
2315 cont->OverwriteAndNegateIfEqual(kEqual);
2316 return VisitFloat64Compare(this, value, cont);
2317 case IrOpcode::kFloat64LessThan:
2318 cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2319 return VisitFloat64Compare(this, value, cont);
2320 case IrOpcode::kFloat64LessThanOrEqual:
2321 cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2322 return VisitFloat64Compare(this, value, cont);
2323 case IrOpcode::kProjection:
2324 // Check if this is the overflow output projection of an
2325 // <Operation>WithOverflow node.
2326 if (ProjectionIndexOf(value->op()) == 1u) {
2327 // We cannot combine the <Operation>WithOverflow with this branch
2328 // unless the 0th projection (the use of the actual value of the
2329 // <Operation> is either nullptr, which means there's no use of the
2330 // actual value, or was already defined, which means it is scheduled
2331 // *AFTER* this branch).
2332 Node* const node = value->InputAt(0);
2333 Node* const result = NodeProperties::FindProjection(node, 0);
2334 if (result == nullptr || IsDefined(result)) {
2335 switch (node->opcode()) {
2336 case IrOpcode::kInt32AddWithOverflow:
2337 cont->OverwriteAndNegateIfEqual(kOverflow);
2338 return VisitBinop(this, node, kRiscvAdd64, cont);
2339 case IrOpcode::kInt32SubWithOverflow:
2340 cont->OverwriteAndNegateIfEqual(kOverflow);
2341 return VisitBinop(this, node, kRiscvSub64, cont);
2342 case IrOpcode::kInt32MulWithOverflow:
2343 cont->OverwriteAndNegateIfEqual(kOverflow);
2344 return VisitBinop(this, node, kRiscvMulOvf32, cont);
2345 case IrOpcode::kInt64AddWithOverflow:
2346 cont->OverwriteAndNegateIfEqual(kOverflow);
2347 return VisitBinop(this, node, kRiscvAddOvf64, cont);
2348 case IrOpcode::kInt64SubWithOverflow:
2349 cont->OverwriteAndNegateIfEqual(kOverflow);
2350 return VisitBinop(this, node, kRiscvSubOvf64, cont);
2351 default:
2352 break;
2353 }
2354 }
2355 }
2356 break;
2357 case IrOpcode::kWord32And:
2358 case IrOpcode::kWord64And:
2359 return VisitWordCompare(this, value, kRiscvTst, cont, true);
2360 case IrOpcode::kStackPointerGreaterThan:
2361 cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
2362 return VisitStackPointerGreaterThan(value, cont);
2363 default:
2364 break;
2365 }
2366 }
2367
2368 // Continuation could not be combined with a compare, emit compare against 0.
2369 EmitWordCompareZero(this, value, cont);
2370 }
2371
VisitSwitch(Node * node,const SwitchInfo & sw)2372 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2373 RiscvOperandGenerator g(this);
2374 InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2375
2376 // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
2377 if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2378 static const size_t kMaxTableSwitchValueRange = 2 << 16;
2379 size_t table_space_cost = 10 + 2 * sw.value_range();
2380 size_t table_time_cost = 3;
2381 size_t lookup_space_cost = 2 + 2 * sw.case_count();
2382 size_t lookup_time_cost = sw.case_count();
2383 if (sw.case_count() > 0 &&
2384 table_space_cost + 3 * table_time_cost <=
2385 lookup_space_cost + 3 * lookup_time_cost &&
2386 sw.min_value() > std::numeric_limits<int32_t>::min() &&
2387 sw.value_range() <= kMaxTableSwitchValueRange) {
2388 InstructionOperand index_operand = value_operand;
2389 if (sw.min_value()) {
2390 index_operand = g.TempRegister();
2391 Emit(kRiscvSub32, index_operand, value_operand,
2392 g.TempImmediate(sw.min_value()));
2393 }
2394 // Generate a table lookup.
2395 return EmitTableSwitch(sw, index_operand);
2396 }
2397 }
2398
2399 // Generate a tree of conditional jumps.
2400 return EmitBinarySearchSwitch(sw, value_operand);
2401 }
2402
VisitWord32Equal(Node * const node)2403 void InstructionSelector::VisitWord32Equal(Node* const node) {
2404 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2405 Int32BinopMatcher m(node);
2406 if (m.right().Is(0)) {
2407 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2408 }
2409
2410 VisitWord32Compare(this, node, &cont);
2411 }
2412
VisitInt32LessThan(Node * node)2413 void InstructionSelector::VisitInt32LessThan(Node* node) {
2414 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2415 VisitWord32Compare(this, node, &cont);
2416 }
2417
VisitInt32LessThanOrEqual(Node * node)2418 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2419 FlagsContinuation cont =
2420 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2421 VisitWord32Compare(this, node, &cont);
2422 }
2423
VisitUint32LessThan(Node * node)2424 void InstructionSelector::VisitUint32LessThan(Node* node) {
2425 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2426 VisitWord32Compare(this, node, &cont);
2427 }
2428
VisitUint32LessThanOrEqual(Node * node)2429 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2430 FlagsContinuation cont =
2431 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2432 VisitWord32Compare(this, node, &cont);
2433 }
2434
VisitInt32AddWithOverflow(Node * node)2435 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2436 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2437 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2438 return VisitBinop(this, node, kRiscvAdd64, &cont);
2439 }
2440 FlagsContinuation cont;
2441 VisitBinop(this, node, kRiscvAdd64, &cont);
2442 }
2443
VisitInt32SubWithOverflow(Node * node)2444 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2445 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2446 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2447 return VisitBinop(this, node, kRiscvSub64, &cont);
2448 }
2449 FlagsContinuation cont;
2450 VisitBinop(this, node, kRiscvSub64, &cont);
2451 }
2452
VisitInt32MulWithOverflow(Node * node)2453 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2454 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2455 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2456 return VisitBinop(this, node, kRiscvMulOvf32, &cont);
2457 }
2458 FlagsContinuation cont;
2459 VisitBinop(this, node, kRiscvMulOvf32, &cont);
2460 }
2461
VisitInt64AddWithOverflow(Node * node)2462 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2463 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2464 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2465 return VisitBinop(this, node, kRiscvAddOvf64, &cont);
2466 }
2467 FlagsContinuation cont;
2468 VisitBinop(this, node, kRiscvAddOvf64, &cont);
2469 }
2470
VisitInt64SubWithOverflow(Node * node)2471 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2472 if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2473 FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2474 return VisitBinop(this, node, kRiscvSubOvf64, &cont);
2475 }
2476 FlagsContinuation cont;
2477 VisitBinop(this, node, kRiscvSubOvf64, &cont);
2478 }
2479
VisitWord64Equal(Node * const node)2480 void InstructionSelector::VisitWord64Equal(Node* const node) {
2481 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2482 Int64BinopMatcher m(node);
2483 if (m.right().Is(0)) {
2484 return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2485 }
2486
2487 VisitWord64Compare(this, node, &cont);
2488 }
2489
VisitInt64LessThan(Node * node)2490 void InstructionSelector::VisitInt64LessThan(Node* node) {
2491 FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2492 VisitWord64Compare(this, node, &cont);
2493 }
2494
VisitInt64LessThanOrEqual(Node * node)2495 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2496 FlagsContinuation cont =
2497 FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2498 VisitWord64Compare(this, node, &cont);
2499 }
2500
VisitUint64LessThan(Node * node)2501 void InstructionSelector::VisitUint64LessThan(Node* node) {
2502 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2503 VisitWord64Compare(this, node, &cont);
2504 }
2505
VisitUint64LessThanOrEqual(Node * node)2506 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2507 FlagsContinuation cont =
2508 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2509 VisitWord64Compare(this, node, &cont);
2510 }
2511
VisitFloat32Equal(Node * node)2512 void InstructionSelector::VisitFloat32Equal(Node* node) {
2513 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2514 VisitFloat32Compare(this, node, &cont);
2515 }
2516
VisitFloat32LessThan(Node * node)2517 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2518 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2519 VisitFloat32Compare(this, node, &cont);
2520 }
2521
VisitFloat32LessThanOrEqual(Node * node)2522 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2523 FlagsContinuation cont =
2524 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2525 VisitFloat32Compare(this, node, &cont);
2526 }
2527
VisitFloat64Equal(Node * node)2528 void InstructionSelector::VisitFloat64Equal(Node* node) {
2529 FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2530 VisitFloat64Compare(this, node, &cont);
2531 }
2532
VisitFloat64LessThan(Node * node)2533 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2534 FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2535 VisitFloat64Compare(this, node, &cont);
2536 }
2537
VisitFloat64LessThanOrEqual(Node * node)2538 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2539 FlagsContinuation cont =
2540 FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2541 VisitFloat64Compare(this, node, &cont);
2542 }
2543
VisitFloat64ExtractLowWord32(Node * node)2544 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2545 VisitRR(this, kRiscvFloat64ExtractLowWord32, node);
2546 }
2547
VisitFloat64ExtractHighWord32(Node * node)2548 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2549 VisitRR(this, kRiscvFloat64ExtractHighWord32, node);
2550 }
2551
VisitFloat64SilenceNaN(Node * node)2552 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2553 VisitRR(this, kRiscvFloat64SilenceNaN, node);
2554 }
2555
VisitFloat64InsertLowWord32(Node * node)2556 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2557 RiscvOperandGenerator g(this);
2558 Node* left = node->InputAt(0);
2559 Node* right = node->InputAt(1);
2560 Emit(kRiscvFloat64InsertLowWord32, g.DefineSameAsFirst(node),
2561 g.UseRegister(left), g.UseRegister(right));
2562 }
2563
VisitFloat64InsertHighWord32(Node * node)2564 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2565 RiscvOperandGenerator g(this);
2566 Node* left = node->InputAt(0);
2567 Node* right = node->InputAt(1);
2568 Emit(kRiscvFloat64InsertHighWord32, g.DefineSameAsFirst(node),
2569 g.UseRegister(left), g.UseRegister(right));
2570 }
2571
VisitMemoryBarrier(Node * node)2572 void InstructionSelector::VisitMemoryBarrier(Node* node) {
2573 RiscvOperandGenerator g(this);
2574 Emit(kRiscvSync, g.NoOutput());
2575 }
2576
VisitWord32AtomicLoad(Node * node)2577 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2578 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
2579 LoadRepresentation load_rep = atomic_load_params.representation();
2580 ArchOpcode opcode;
2581 switch (load_rep.representation()) {
2582 case MachineRepresentation::kWord8:
2583 opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
2584 break;
2585 case MachineRepresentation::kWord16:
2586 opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
2587 break;
2588 case MachineRepresentation::kWord32:
2589 opcode = kAtomicLoadWord32;
2590 break;
2591 default:
2592 UNREACHABLE();
2593 }
2594 VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
2595 }
2596
VisitWord32AtomicStore(Node * node)2597 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2598 AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
2599 MachineRepresentation rep = store_params.representation();
2600 ArchOpcode opcode;
2601 switch (rep) {
2602 case MachineRepresentation::kWord8:
2603 opcode = kAtomicStoreWord8;
2604 break;
2605 case MachineRepresentation::kWord16:
2606 opcode = kAtomicStoreWord16;
2607 break;
2608 case MachineRepresentation::kWord32:
2609 opcode = kAtomicStoreWord32;
2610 break;
2611 default:
2612 UNREACHABLE();
2613 }
2614
2615 VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
2616 }
2617
VisitWord64AtomicLoad(Node * node)2618 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2619 AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
2620 LoadRepresentation load_rep = atomic_load_params.representation();
2621 ArchOpcode opcode;
2622 switch (load_rep.representation()) {
2623 case MachineRepresentation::kWord8:
2624 opcode = kAtomicLoadUint8;
2625 break;
2626 case MachineRepresentation::kWord16:
2627 opcode = kAtomicLoadUint16;
2628 break;
2629 case MachineRepresentation::kWord32:
2630 opcode = kAtomicLoadWord32;
2631 break;
2632 case MachineRepresentation::kWord64:
2633 opcode = kRiscvWord64AtomicLoadUint64;
2634 break;
2635 #ifdef V8_COMPRESS_POINTERS
2636 case MachineRepresentation::kTaggedSigned:
2637 opcode = kRiscv64LdDecompressTaggedSigned;
2638 break;
2639 case MachineRepresentation::kTaggedPointer:
2640 opcode = kRiscv64LdDecompressTaggedPointer;
2641 break;
2642 case MachineRepresentation::kTagged:
2643 opcode = kRiscv64LdDecompressAnyTagged;
2644 break;
2645 #else
2646 case MachineRepresentation::kTaggedSigned: // Fall through.
2647 case MachineRepresentation::kTaggedPointer: // Fall through.
2648 case MachineRepresentation::kTagged:
2649 if (kTaggedSize == 8) {
2650 opcode = kRiscvWord64AtomicLoadUint64;
2651 } else {
2652 opcode = kAtomicLoadWord32;
2653 }
2654 break;
2655 #endif
2656 case MachineRepresentation::kCompressedPointer: // Fall through.
2657 case MachineRepresentation::kCompressed:
2658 DCHECK(COMPRESS_POINTERS_BOOL);
2659 opcode = kAtomicLoadWord32;
2660 break;
2661 default:
2662 UNREACHABLE();
2663 }
2664 VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
2665 }
2666
VisitWord64AtomicStore(Node * node)2667 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2668 AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
2669 MachineRepresentation rep = store_params.representation();
2670 ArchOpcode opcode;
2671 switch (rep) {
2672 case MachineRepresentation::kWord8:
2673 opcode = kAtomicStoreWord8;
2674 break;
2675 case MachineRepresentation::kWord16:
2676 opcode = kAtomicStoreWord16;
2677 break;
2678 case MachineRepresentation::kWord32:
2679 opcode = kAtomicStoreWord32;
2680 break;
2681 case MachineRepresentation::kWord64:
2682 opcode = kRiscvWord64AtomicStoreWord64;
2683 break;
2684 case MachineRepresentation::kTaggedSigned: // Fall through.
2685 case MachineRepresentation::kTaggedPointer: // Fall through.
2686 case MachineRepresentation::kTagged:
2687 opcode = kRiscvWord64AtomicStoreWord64;
2688 break;
2689 case MachineRepresentation::kCompressedPointer: // Fall through.
2690 case MachineRepresentation::kCompressed:
2691 CHECK(COMPRESS_POINTERS_BOOL);
2692 opcode = kAtomicStoreWord32;
2693 break;
2694 default:
2695 UNREACHABLE();
2696 }
2697
2698 VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64);
2699 }
2700
VisitWord32AtomicExchange(Node * node)2701 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2702 ArchOpcode opcode;
2703 MachineType type = AtomicOpType(node->op());
2704 if (type == MachineType::Int8()) {
2705 opcode = kAtomicExchangeInt8;
2706 } else if (type == MachineType::Uint8()) {
2707 opcode = kAtomicExchangeUint8;
2708 } else if (type == MachineType::Int16()) {
2709 opcode = kAtomicExchangeInt16;
2710 } else if (type == MachineType::Uint16()) {
2711 opcode = kAtomicExchangeUint16;
2712 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2713 opcode = kAtomicExchangeWord32;
2714 } else {
2715 UNREACHABLE();
2716 }
2717
2718 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
2719 }
2720
VisitWord64AtomicExchange(Node * node)2721 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2722 ArchOpcode opcode;
2723 MachineType type = AtomicOpType(node->op());
2724 if (type == MachineType::Uint8()) {
2725 opcode = kAtomicExchangeUint8;
2726 } else if (type == MachineType::Uint16()) {
2727 opcode = kAtomicExchangeUint16;
2728 } else if (type == MachineType::Uint32()) {
2729 opcode = kAtomicExchangeWord32;
2730 } else if (type == MachineType::Uint64()) {
2731 opcode = kRiscvWord64AtomicExchangeUint64;
2732 } else {
2733 UNREACHABLE();
2734 }
2735 VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
2736 }
2737
VisitWord32AtomicCompareExchange(Node * node)2738 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2739 ArchOpcode opcode;
2740 MachineType type = AtomicOpType(node->op());
2741 if (type == MachineType::Int8()) {
2742 opcode = kAtomicCompareExchangeInt8;
2743 } else if (type == MachineType::Uint8()) {
2744 opcode = kAtomicCompareExchangeUint8;
2745 } else if (type == MachineType::Int16()) {
2746 opcode = kAtomicCompareExchangeInt16;
2747 } else if (type == MachineType::Uint16()) {
2748 opcode = kAtomicCompareExchangeUint16;
2749 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2750 opcode = kAtomicCompareExchangeWord32;
2751 } else {
2752 UNREACHABLE();
2753 }
2754
2755 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
2756 }
2757
VisitWord64AtomicCompareExchange(Node * node)2758 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2759 ArchOpcode opcode;
2760 MachineType type = AtomicOpType(node->op());
2761 if (type == MachineType::Uint8()) {
2762 opcode = kAtomicCompareExchangeUint8;
2763 } else if (type == MachineType::Uint16()) {
2764 opcode = kAtomicCompareExchangeUint16;
2765 } else if (type == MachineType::Uint32()) {
2766 opcode = kAtomicCompareExchangeWord32;
2767 } else if (type == MachineType::Uint64()) {
2768 opcode = kRiscvWord64AtomicCompareExchangeUint64;
2769 } else {
2770 UNREACHABLE();
2771 }
2772 VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
2773 }
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2774 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2775 Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2776 ArchOpcode uint16_op, ArchOpcode word32_op) {
2777 ArchOpcode opcode;
2778 MachineType type = AtomicOpType(node->op());
2779 if (type == MachineType::Int8()) {
2780 opcode = int8_op;
2781 } else if (type == MachineType::Uint8()) {
2782 opcode = uint8_op;
2783 } else if (type == MachineType::Int16()) {
2784 opcode = int16_op;
2785 } else if (type == MachineType::Uint16()) {
2786 opcode = uint16_op;
2787 } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2788 opcode = word32_op;
2789 } else {
2790 UNREACHABLE();
2791 }
2792
2793 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
2794 }
2795
2796 #define VISIT_ATOMIC_BINOP(op) \
2797 void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
2798 VisitWord32AtomicBinaryOperation( \
2799 node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2800 kAtomic##op##Uint16, kAtomic##op##Word32); \
2801 }
2802 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2803 VISIT_ATOMIC_BINOP(Sub)
2804 VISIT_ATOMIC_BINOP(And)
2805 VISIT_ATOMIC_BINOP(Or)
2806 VISIT_ATOMIC_BINOP(Xor)
2807 #undef VISIT_ATOMIC_BINOP
2808
2809 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2810 Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2811 ArchOpcode uint64_op) {
2812 ArchOpcode opcode;
2813 MachineType type = AtomicOpType(node->op());
2814 if (type == MachineType::Uint8()) {
2815 opcode = uint8_op;
2816 } else if (type == MachineType::Uint16()) {
2817 opcode = uint16_op;
2818 } else if (type == MachineType::Uint32()) {
2819 opcode = uint32_op;
2820 } else if (type == MachineType::Uint64()) {
2821 opcode = uint64_op;
2822 } else {
2823 UNREACHABLE();
2824 }
2825 VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
2826 }
2827
2828 #define VISIT_ATOMIC_BINOP(op) \
2829 void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
2830 VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
2831 kAtomic##op##Uint16, kAtomic##op##Word32, \
2832 kRiscvWord64Atomic##op##Uint64); \
2833 }
2834 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2835 VISIT_ATOMIC_BINOP(Sub)
2836 VISIT_ATOMIC_BINOP(And)
2837 VISIT_ATOMIC_BINOP(Or)
2838 VISIT_ATOMIC_BINOP(Xor)
2839 #undef VISIT_ATOMIC_BINOP
2840
2841 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2842 UNREACHABLE();
2843 }
2844
VisitInt64AbsWithOverflow(Node * node)2845 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2846 UNREACHABLE();
2847 }
2848
2849 #define SIMD_TYPE_LIST(V) \
2850 V(F32x4) \
2851 V(I64x2) \
2852 V(I32x4) \
2853 V(I16x8) \
2854 V(I8x16)
2855
2856 #define SIMD_UNOP_LIST(V) \
2857 V(F64x2Abs, kRiscvF64x2Abs) \
2858 V(F64x2Neg, kRiscvF64x2Neg) \
2859 V(F64x2Sqrt, kRiscvF64x2Sqrt) \
2860 V(F64x2ConvertLowI32x4S, kRiscvF64x2ConvertLowI32x4S) \
2861 V(F64x2ConvertLowI32x4U, kRiscvF64x2ConvertLowI32x4U) \
2862 V(F64x2PromoteLowF32x4, kRiscvF64x2PromoteLowF32x4) \
2863 V(F64x2Ceil, kRiscvF64x2Ceil) \
2864 V(F64x2Floor, kRiscvF64x2Floor) \
2865 V(F64x2Trunc, kRiscvF64x2Trunc) \
2866 V(F64x2NearestInt, kRiscvF64x2NearestInt) \
2867 V(I64x2Neg, kRiscvI64x2Neg) \
2868 V(I64x2Abs, kRiscvI64x2Abs) \
2869 V(I64x2BitMask, kRiscvI64x2BitMask) \
2870 V(F32x4SConvertI32x4, kRiscvF32x4SConvertI32x4) \
2871 V(F32x4UConvertI32x4, kRiscvF32x4UConvertI32x4) \
2872 V(F32x4Abs, kRiscvF32x4Abs) \
2873 V(F32x4Neg, kRiscvF32x4Neg) \
2874 V(F32x4Sqrt, kRiscvF32x4Sqrt) \
2875 V(F32x4RecipApprox, kRiscvF32x4RecipApprox) \
2876 V(F32x4RecipSqrtApprox, kRiscvF32x4RecipSqrtApprox) \
2877 V(F32x4DemoteF64x2Zero, kRiscvF32x4DemoteF64x2Zero) \
2878 V(F32x4Ceil, kRiscvF32x4Ceil) \
2879 V(F32x4Floor, kRiscvF32x4Floor) \
2880 V(F32x4Trunc, kRiscvF32x4Trunc) \
2881 V(F32x4NearestInt, kRiscvF32x4NearestInt) \
2882 V(I32x4RelaxedTruncF32x4S, kRiscvI32x4SConvertF32x4) \
2883 V(I32x4RelaxedTruncF32x4U, kRiscvI32x4UConvertF32x4) \
2884 V(I32x4RelaxedTruncF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
2885 V(I32x4RelaxedTruncF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
2886 V(I64x2SConvertI32x4Low, kRiscvI64x2SConvertI32x4Low) \
2887 V(I64x2SConvertI32x4High, kRiscvI64x2SConvertI32x4High) \
2888 V(I64x2UConvertI32x4Low, kRiscvI64x2UConvertI32x4Low) \
2889 V(I64x2UConvertI32x4High, kRiscvI64x2UConvertI32x4High) \
2890 V(I32x4SConvertF32x4, kRiscvI32x4SConvertF32x4) \
2891 V(I32x4UConvertF32x4, kRiscvI32x4UConvertF32x4) \
2892 V(I32x4Neg, kRiscvI32x4Neg) \
2893 V(I32x4SConvertI16x8Low, kRiscvI32x4SConvertI16x8Low) \
2894 V(I32x4SConvertI16x8High, kRiscvI32x4SConvertI16x8High) \
2895 V(I32x4UConvertI16x8Low, kRiscvI32x4UConvertI16x8Low) \
2896 V(I32x4UConvertI16x8High, kRiscvI32x4UConvertI16x8High) \
2897 V(I32x4Abs, kRiscvI32x4Abs) \
2898 V(I32x4BitMask, kRiscvI32x4BitMask) \
2899 V(I32x4TruncSatF64x2SZero, kRiscvI32x4TruncSatF64x2SZero) \
2900 V(I32x4TruncSatF64x2UZero, kRiscvI32x4TruncSatF64x2UZero) \
2901 V(I16x8Neg, kRiscvI16x8Neg) \
2902 V(I16x8SConvertI8x16Low, kRiscvI16x8SConvertI8x16Low) \
2903 V(I16x8SConvertI8x16High, kRiscvI16x8SConvertI8x16High) \
2904 V(I16x8UConvertI8x16Low, kRiscvI16x8UConvertI8x16Low) \
2905 V(I16x8UConvertI8x16High, kRiscvI16x8UConvertI8x16High) \
2906 V(I16x8Abs, kRiscvI16x8Abs) \
2907 V(I16x8BitMask, kRiscvI16x8BitMask) \
2908 V(I8x16Neg, kRiscvI8x16Neg) \
2909 V(I8x16Abs, kRiscvI8x16Abs) \
2910 V(I8x16BitMask, kRiscvI8x16BitMask) \
2911 V(I8x16Popcnt, kRiscvI8x16Popcnt) \
2912 V(S128Not, kRiscvS128Not) \
2913 V(V128AnyTrue, kRiscvV128AnyTrue) \
2914 V(I32x4AllTrue, kRiscvI32x4AllTrue) \
2915 V(I16x8AllTrue, kRiscvI16x8AllTrue) \
2916 V(I8x16AllTrue, kRiscvI8x16AllTrue) \
2917 V(I64x2AllTrue, kRiscvI64x2AllTrue)
2918
2919 #define SIMD_SHIFT_OP_LIST(V) \
2920 V(I64x2Shl) \
2921 V(I64x2ShrS) \
2922 V(I64x2ShrU) \
2923 V(I32x4Shl) \
2924 V(I32x4ShrS) \
2925 V(I32x4ShrU) \
2926 V(I16x8Shl) \
2927 V(I16x8ShrS) \
2928 V(I16x8ShrU) \
2929 V(I8x16Shl) \
2930 V(I8x16ShrS) \
2931 V(I8x16ShrU)
2932
2933 #define SIMD_BINOP_LIST(V) \
2934 V(F64x2Add, kRiscvF64x2Add) \
2935 V(F64x2Sub, kRiscvF64x2Sub) \
2936 V(F64x2Mul, kRiscvF64x2Mul) \
2937 V(F64x2Div, kRiscvF64x2Div) \
2938 V(F64x2Min, kRiscvF64x2Min) \
2939 V(F64x2Max, kRiscvF64x2Max) \
2940 V(F64x2Eq, kRiscvF64x2Eq) \
2941 V(F64x2Ne, kRiscvF64x2Ne) \
2942 V(F64x2Lt, kRiscvF64x2Lt) \
2943 V(F64x2Le, kRiscvF64x2Le) \
2944 V(I64x2Eq, kRiscvI64x2Eq) \
2945 V(I64x2Ne, kRiscvI64x2Ne) \
2946 V(I64x2GtS, kRiscvI64x2GtS) \
2947 V(I64x2GeS, kRiscvI64x2GeS) \
2948 V(I64x2Add, kRiscvI64x2Add) \
2949 V(I64x2Sub, kRiscvI64x2Sub) \
2950 V(I64x2Mul, kRiscvI64x2Mul) \
2951 V(F32x4Add, kRiscvF32x4Add) \
2952 V(F32x4Sub, kRiscvF32x4Sub) \
2953 V(F32x4Mul, kRiscvF32x4Mul) \
2954 V(F32x4Div, kRiscvF32x4Div) \
2955 V(F32x4Max, kRiscvF32x4Max) \
2956 V(F32x4Min, kRiscvF32x4Min) \
2957 V(F32x4Eq, kRiscvF32x4Eq) \
2958 V(F32x4Ne, kRiscvF32x4Ne) \
2959 V(F32x4Lt, kRiscvF32x4Lt) \
2960 V(F32x4Le, kRiscvF32x4Le) \
2961 V(F32x4RelaxedMin, kRiscvF32x4Min) \
2962 V(F32x4RelaxedMax, kRiscvF32x4Max) \
2963 V(F64x2RelaxedMin, kRiscvF64x2Min) \
2964 V(F64x2RelaxedMax, kRiscvF64x2Max) \
2965 V(I32x4Add, kRiscvI32x4Add) \
2966 V(I32x4Sub, kRiscvI32x4Sub) \
2967 V(I32x4Mul, kRiscvI32x4Mul) \
2968 V(I32x4MaxS, kRiscvI32x4MaxS) \
2969 V(I32x4MinS, kRiscvI32x4MinS) \
2970 V(I32x4MaxU, kRiscvI32x4MaxU) \
2971 V(I32x4MinU, kRiscvI32x4MinU) \
2972 V(I32x4Eq, kRiscvI32x4Eq) \
2973 V(I32x4Ne, kRiscvI32x4Ne) \
2974 V(I32x4GtS, kRiscvI32x4GtS) \
2975 V(I32x4GeS, kRiscvI32x4GeS) \
2976 V(I32x4GtU, kRiscvI32x4GtU) \
2977 V(I32x4GeU, kRiscvI32x4GeU) \
2978 V(I16x8Add, kRiscvI16x8Add) \
2979 V(I16x8AddSatS, kRiscvI16x8AddSatS) \
2980 V(I16x8AddSatU, kRiscvI16x8AddSatU) \
2981 V(I16x8Sub, kRiscvI16x8Sub) \
2982 V(I16x8SubSatS, kRiscvI16x8SubSatS) \
2983 V(I16x8SubSatU, kRiscvI16x8SubSatU) \
2984 V(I16x8Mul, kRiscvI16x8Mul) \
2985 V(I16x8MaxS, kRiscvI16x8MaxS) \
2986 V(I16x8MinS, kRiscvI16x8MinS) \
2987 V(I16x8MaxU, kRiscvI16x8MaxU) \
2988 V(I16x8MinU, kRiscvI16x8MinU) \
2989 V(I16x8Eq, kRiscvI16x8Eq) \
2990 V(I16x8Ne, kRiscvI16x8Ne) \
2991 V(I16x8GtS, kRiscvI16x8GtS) \
2992 V(I16x8GeS, kRiscvI16x8GeS) \
2993 V(I16x8GtU, kRiscvI16x8GtU) \
2994 V(I16x8GeU, kRiscvI16x8GeU) \
2995 V(I16x8RoundingAverageU, kRiscvI16x8RoundingAverageU) \
2996 V(I16x8Q15MulRSatS, kRiscvI16x8Q15MulRSatS) \
2997 V(I16x8SConvertI32x4, kRiscvI16x8SConvertI32x4) \
2998 V(I16x8UConvertI32x4, kRiscvI16x8UConvertI32x4) \
2999 V(I8x16Add, kRiscvI8x16Add) \
3000 V(I8x16AddSatS, kRiscvI8x16AddSatS) \
3001 V(I8x16AddSatU, kRiscvI8x16AddSatU) \
3002 V(I8x16Sub, kRiscvI8x16Sub) \
3003 V(I8x16SubSatS, kRiscvI8x16SubSatS) \
3004 V(I8x16SubSatU, kRiscvI8x16SubSatU) \
3005 V(I8x16MaxS, kRiscvI8x16MaxS) \
3006 V(I8x16MinS, kRiscvI8x16MinS) \
3007 V(I8x16MaxU, kRiscvI8x16MaxU) \
3008 V(I8x16MinU, kRiscvI8x16MinU) \
3009 V(I8x16Eq, kRiscvI8x16Eq) \
3010 V(I8x16Ne, kRiscvI8x16Ne) \
3011 V(I8x16GtS, kRiscvI8x16GtS) \
3012 V(I8x16GeS, kRiscvI8x16GeS) \
3013 V(I8x16GtU, kRiscvI8x16GtU) \
3014 V(I8x16GeU, kRiscvI8x16GeU) \
3015 V(I8x16RoundingAverageU, kRiscvI8x16RoundingAverageU) \
3016 V(I8x16SConvertI16x8, kRiscvI8x16SConvertI16x8) \
3017 V(I8x16UConvertI16x8, kRiscvI8x16UConvertI16x8) \
3018 V(S128And, kRiscvS128And) \
3019 V(S128Or, kRiscvS128Or) \
3020 V(S128Xor, kRiscvS128Xor) \
3021 V(S128AndNot, kRiscvS128AndNot)
3022
VisitS128Const(Node * node)3023 void InstructionSelector::VisitS128Const(Node* node) {
3024 RiscvOperandGenerator g(this);
3025 static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
3026 uint32_t val[kUint32Immediates];
3027 memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
3028 // If all bytes are zeros or ones, avoid emitting code for generic constants
3029 bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
3030 bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
3031 val[2] == UINT32_MAX && val[3] == UINT32_MAX;
3032 InstructionOperand dst = g.DefineAsRegister(node);
3033 if (all_zeros) {
3034 Emit(kRiscvS128Zero, dst);
3035 } else if (all_ones) {
3036 Emit(kRiscvS128AllOnes, dst);
3037 } else {
3038 Emit(kRiscvS128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
3039 g.UseImmediate(val[2]), g.UseImmediate(val[3]));
3040 }
3041 }
3042
VisitS128Zero(Node * node)3043 void InstructionSelector::VisitS128Zero(Node* node) {
3044 RiscvOperandGenerator g(this);
3045 Emit(kRiscvS128Zero, g.DefineAsRegister(node));
3046 }
3047
3048 #define SIMD_VISIT_SPLAT(Type) \
3049 void InstructionSelector::Visit##Type##Splat(Node* node) { \
3050 VisitRR(this, kRiscv##Type##Splat, node); \
3051 }
3052 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
SIMD_VISIT_SPLAT(F64x2)3053 SIMD_VISIT_SPLAT(F64x2)
3054 #undef SIMD_VISIT_SPLAT
3055
3056 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
3057 void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
3058 VisitRRI(this, kRiscv##Type##ExtractLane##Sign, node); \
3059 }
3060 SIMD_VISIT_EXTRACT_LANE(F64x2, )
3061 SIMD_VISIT_EXTRACT_LANE(F32x4, )
3062 SIMD_VISIT_EXTRACT_LANE(I32x4, )
3063 SIMD_VISIT_EXTRACT_LANE(I64x2, )
3064 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
3065 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
3066 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
3067 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
3068 #undef SIMD_VISIT_EXTRACT_LANE
3069
3070 #define SIMD_VISIT_REPLACE_LANE(Type) \
3071 void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
3072 VisitRRIR(this, kRiscv##Type##ReplaceLane, node); \
3073 }
3074 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
3075 SIMD_VISIT_REPLACE_LANE(F64x2)
3076 #undef SIMD_VISIT_REPLACE_LANE
3077
3078 #define SIMD_VISIT_UNOP(Name, instruction) \
3079 void InstructionSelector::Visit##Name(Node* node) { \
3080 VisitRR(this, instruction, node); \
3081 }
3082 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
3083 #undef SIMD_VISIT_UNOP
3084
3085 #define SIMD_VISIT_SHIFT_OP(Name) \
3086 void InstructionSelector::Visit##Name(Node* node) { \
3087 VisitSimdShift(this, kRiscv##Name, node); \
3088 }
3089 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
3090 #undef SIMD_VISIT_SHIFT_OP
3091
3092 #define SIMD_VISIT_BINOP(Name, instruction) \
3093 void InstructionSelector::Visit##Name(Node* node) { \
3094 VisitRRR(this, instruction, node); \
3095 }
3096 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
3097 #undef SIMD_VISIT_BINOP
3098
3099 void InstructionSelector::VisitS128Select(Node* node) {
3100 VisitRRRR(this, kRiscvS128Select, node);
3101 }
3102
3103 #define SIMD_VISIT_SELECT_LANE(Name) \
3104 void InstructionSelector::Visit##Name(Node* node) { \
3105 VisitRRRR(this, kRiscvS128Select, node); \
3106 }
3107 SIMD_VISIT_SELECT_LANE(I8x16RelaxedLaneSelect)
SIMD_VISIT_SELECT_LANE(I16x8RelaxedLaneSelect)3108 SIMD_VISIT_SELECT_LANE(I16x8RelaxedLaneSelect)
3109 SIMD_VISIT_SELECT_LANE(I32x4RelaxedLaneSelect)
3110 SIMD_VISIT_SELECT_LANE(I64x2RelaxedLaneSelect)
3111 #undef SIMD_VISIT_SELECT_LANE
3112
3113 #define VISIT_SIMD_QFMOP(Name, instruction) \
3114 void InstructionSelector::Visit##Name(Node* node) { \
3115 VisitRRRR(this, instruction, node); \
3116 }
3117 VISIT_SIMD_QFMOP(F64x2Qfma, kRiscvF64x2Qfma)
3118 VISIT_SIMD_QFMOP(F64x2Qfms, kRiscvF64x2Qfms)
3119 VISIT_SIMD_QFMOP(F32x4Qfma, kRiscvF32x4Qfma)
3120 VISIT_SIMD_QFMOP(F32x4Qfms, kRiscvF32x4Qfms)
3121 #undef VISIT_SIMD_QFMOP
3122
3123 void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
3124 RiscvOperandGenerator g(this);
3125 InstructionOperand temp = g.TempFpRegister(v16);
3126 InstructionOperand temp1 = g.TempFpRegister(v14);
3127 InstructionOperand temp2 = g.TempFpRegister(v30);
3128 InstructionOperand dst = g.DefineAsRegister(node);
3129 this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)),
3130 g.UseRegister(node->InputAt(1)), g.UseImmediate(E16),
3131 g.UseImmediate(m1));
3132 this->Emit(kRiscvVcompress, temp2, temp, g.UseImmediate(0b01010101),
3133 g.UseImmediate(E32), g.UseImmediate(m2));
3134 this->Emit(kRiscvVcompress, temp1, temp, g.UseImmediate(0b10101010),
3135 g.UseImmediate(E32), g.UseImmediate(m2));
3136 this->Emit(kRiscvVaddVv, dst, temp1, temp2, g.UseImmediate(E32),
3137 g.UseImmediate(m1));
3138 }
3139
3140 namespace {
3141
3142 struct ShuffleEntry {
3143 uint8_t shuffle[kSimd128Size];
3144 ArchOpcode opcode;
3145 };
3146
3147 // static const ShuffleEntry arch_shuffles[] = {
3148 // {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
3149 // kRiscvS32x4InterleaveRight},
3150 // {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
3151 // kRiscvS32x4InterleaveLeft},
3152 // {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
3153 // kRiscvS32x4PackEven},
3154 // {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
3155 // kRiscvS32x4PackOdd},
3156 // {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
3157 // kRiscvS32x4InterleaveEven},
3158 // {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
3159 // kRiscvS32x4InterleaveOdd},
3160
3161 // {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
3162 // kRiscvS16x8InterleaveRight},
3163 // {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
3164 // kRiscvS16x8InterleaveLeft},
3165 // {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
3166 // kRiscvS16x8PackEven},
3167 // {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
3168 // kRiscvS16x8PackOdd},
3169 // {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
3170 // kRiscvS16x8InterleaveEven},
3171 // {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
3172 // kRiscvS16x8InterleaveOdd},
3173 // {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
3174 // kRiscvS16x4Reverse},
3175 // {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
3176 // kRiscvS16x2Reverse},
3177
3178 // {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
3179 // kRiscvS8x16InterleaveRight},
3180 // {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
3181 // kRiscvS8x16InterleaveLeft},
3182 // {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
3183 // kRiscvS8x16PackEven},
3184 // {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
3185 // kRiscvS8x16PackOdd},
3186 // {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
3187 // kRiscvS8x16InterleaveEven},
3188 // {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
3189 // kRiscvS8x16InterleaveOdd},
3190 // {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
3191 // kRiscvS8x8Reverse},
3192 // {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
3193 // kRiscvS8x4Reverse},
3194 // {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
3195 // kRiscvS8x2Reverse}};
3196
3197 // bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
3198 // size_t num_entries, bool is_swizzle,
3199 // ArchOpcode* opcode) {
3200 // uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
3201 // for (size_t i = 0; i < num_entries; ++i) {
3202 // const ShuffleEntry& entry = table[i];
3203 // int j = 0;
3204 // for (; j < kSimd128Size; ++j) {
3205 // if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
3206 // break;
3207 // }
3208 // }
3209 // if (j == kSimd128Size) {
3210 // *opcode = entry.opcode;
3211 // return true;
3212 // }
3213 // }
3214 // return false;
3215 // }
3216
3217 } // namespace
3218
VisitI8x16Shuffle(Node * node)3219 void InstructionSelector::VisitI8x16Shuffle(Node* node) {
3220 uint8_t shuffle[kSimd128Size];
3221 bool is_swizzle;
3222 CanonicalizeShuffle(node, shuffle, &is_swizzle);
3223 Node* input0 = node->InputAt(0);
3224 Node* input1 = node->InputAt(1);
3225 RiscvOperandGenerator g(this);
3226 // uint8_t shuffle32x4[4];
3227 // ArchOpcode opcode;
3228 // if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
3229 // is_swizzle, &opcode)) {
3230 // VisitRRR(this, opcode, node);
3231 // return;
3232 // }
3233 // uint8_t offset;
3234 // if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
3235 // Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
3236 // g.UseRegister(input0), g.UseImmediate(offset));
3237 // return;
3238 // }
3239 // if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
3240 // Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3241 // g.UseRegister(input1),
3242 // g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
3243 // return;
3244 // }
3245 Emit(kRiscvI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
3246 g.UseRegister(input1),
3247 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
3248 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
3249 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
3250 g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
3251 }
3252
VisitI8x16Swizzle(Node * node)3253 void InstructionSelector::VisitI8x16Swizzle(Node* node) {
3254 RiscvOperandGenerator g(this);
3255 InstructionOperand temps[] = {g.TempSimd128Register()};
3256 // We don't want input 0 or input 1 to be the same as output, since we will
3257 // modify output before do the calculation.
3258 Emit(kRiscvVrgather, g.DefineAsRegister(node),
3259 g.UseUniqueRegister(node->InputAt(0)),
3260 g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E8),
3261 g.UseImmediate(m1), arraysize(temps), temps);
3262 }
3263
VisitSignExtendWord8ToInt32(Node * node)3264 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
3265 RiscvOperandGenerator g(this);
3266 Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
3267 g.UseRegister(node->InputAt(0)));
3268 }
3269
VisitSignExtendWord16ToInt32(Node * node)3270 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
3271 RiscvOperandGenerator g(this);
3272 Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
3273 g.UseRegister(node->InputAt(0)));
3274 }
3275
VisitSignExtendWord8ToInt64(Node * node)3276 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
3277 RiscvOperandGenerator g(this);
3278 Emit(kRiscvSignExtendByte, g.DefineAsRegister(node),
3279 g.UseRegister(node->InputAt(0)));
3280 }
3281
VisitSignExtendWord16ToInt64(Node * node)3282 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
3283 RiscvOperandGenerator g(this);
3284 Emit(kRiscvSignExtendShort, g.DefineAsRegister(node),
3285 g.UseRegister(node->InputAt(0)));
3286 }
3287
VisitSignExtendWord32ToInt64(Node * node)3288 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
3289 EmitSignExtendWord(this, node);
3290 }
3291
VisitF32x4Pmin(Node * node)3292 void InstructionSelector::VisitF32x4Pmin(Node* node) {
3293 VisitUniqueRRR(this, kRiscvF32x4Pmin, node);
3294 }
3295
VisitF32x4Pmax(Node * node)3296 void InstructionSelector::VisitF32x4Pmax(Node* node) {
3297 VisitUniqueRRR(this, kRiscvF32x4Pmax, node);
3298 }
3299
VisitF64x2Pmin(Node * node)3300 void InstructionSelector::VisitF64x2Pmin(Node* node) {
3301 VisitUniqueRRR(this, kRiscvF64x2Pmin, node);
3302 }
3303
VisitF64x2Pmax(Node * node)3304 void InstructionSelector::VisitF64x2Pmax(Node* node) {
3305 VisitUniqueRRR(this, kRiscvF64x2Pmax, node);
3306 }
3307
3308 #define VISIT_EXT_MUL(OPCODE1, OPCODE2, TYPE) \
3309 void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2##S( \
3310 Node* node) { \
3311 RiscvOperandGenerator g(this); \
3312 Emit(kRiscvVwmul, g.DefineAsRegister(node), \
3313 g.UseUniqueRegister(node->InputAt(0)), \
3314 g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E##TYPE), \
3315 g.UseImmediate(mf2)); \
3316 } \
3317 void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2##S( \
3318 Node* node) { \
3319 RiscvOperandGenerator g(this); \
3320 InstructionOperand t1 = g.TempFpRegister(v16); \
3321 Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(node->InputAt(0)), \
3322 g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
3323 g.UseImmediate(m1)); \
3324 InstructionOperand t2 = g.TempFpRegister(v17); \
3325 Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(node->InputAt(1)), \
3326 g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
3327 g.UseImmediate(m1)); \
3328 Emit(kRiscvVwmul, g.DefineAsRegister(node), t1, t2, \
3329 g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \
3330 } \
3331 void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2##U( \
3332 Node* node) { \
3333 RiscvOperandGenerator g(this); \
3334 Emit(kRiscvVwmulu, g.DefineAsRegister(node), \
3335 g.UseUniqueRegister(node->InputAt(0)), \
3336 g.UseUniqueRegister(node->InputAt(1)), g.UseImmediate(E##TYPE), \
3337 g.UseImmediate(mf2)); \
3338 } \
3339 void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2##U( \
3340 Node* node) { \
3341 RiscvOperandGenerator g(this); \
3342 InstructionOperand t1 = g.TempFpRegister(v16); \
3343 Emit(kRiscvVslidedown, t1, g.UseUniqueRegister(node->InputAt(0)), \
3344 g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
3345 g.UseImmediate(m1)); \
3346 InstructionOperand t2 = g.TempFpRegister(v17); \
3347 Emit(kRiscvVslidedown, t2, g.UseUniqueRegister(node->InputAt(1)), \
3348 g.UseImmediate(kRvvVLEN / TYPE / 2), g.UseImmediate(E##TYPE), \
3349 g.UseImmediate(m1)); \
3350 Emit(kRiscvVwmulu, g.DefineAsRegister(node), t1, t2, \
3351 g.UseImmediate(E##TYPE), g.UseImmediate(mf2)); \
3352 }
3353
3354 VISIT_EXT_MUL(I64x2, I32x4, 32)
3355 VISIT_EXT_MUL(I32x4, I16x8, 16)
3356 VISIT_EXT_MUL(I16x8, I8x16, 8)
3357 #undef VISIT_EXT_MUL
3358
AddOutputToSelectContinuation(OperandGenerator * g,int first_input_index,Node * node)3359 void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
3360 int first_input_index,
3361 Node* node) {
3362 UNREACHABLE();
3363 }
3364
3365 // static
3366 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()3367 InstructionSelector::SupportedMachineOperatorFlags() {
3368 MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
3369 return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
3370 MachineOperatorBuilder::kInt32DivIsSafe |
3371 MachineOperatorBuilder::kUint32DivIsSafe |
3372 MachineOperatorBuilder::kFloat64RoundDown |
3373 MachineOperatorBuilder::kFloat32RoundDown |
3374 MachineOperatorBuilder::kFloat64RoundUp |
3375 MachineOperatorBuilder::kFloat32RoundUp |
3376 MachineOperatorBuilder::kFloat64RoundTruncate |
3377 MachineOperatorBuilder::kFloat32RoundTruncate |
3378 MachineOperatorBuilder::kFloat64RoundTiesEven |
3379 MachineOperatorBuilder::kFloat32RoundTiesEven;
3380 }
3381
3382 // static
3383 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()3384 InstructionSelector::AlignmentRequirements() {
3385 #ifdef RISCV_HAS_NO_UNALIGNED
3386 return MachineOperatorBuilder::AlignmentRequirements::
3387 NoUnalignedAccessSupport();
3388 #else
3389 return MachineOperatorBuilder::AlignmentRequirements::
3390 FullUnalignedAccessSupport();
3391 #endif
3392 }
3393
3394 #undef SIMD_BINOP_LIST
3395 #undef SIMD_SHIFT_OP_LIST
3396 #undef SIMD_UNOP_LIST
3397 #undef SIMD_TYPE_LIST
3398 #undef TRACE_UNIMPL
3399 #undef TRACE
3400
3401 } // namespace compiler
3402 } // namespace internal
3403 } // namespace v8
3404