• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/base/bits.h"
6 #include "src/base/platform/wrappers.h"
7 #include "src/codegen/machine-type.h"
8 #include "src/compiler/backend/instruction-selector-impl.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties.h"
11 
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15 
16 #define TRACE_UNIMPL() \
17   PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
18 
19 #define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
20 
21 // Adds loong64-specific methods for generating InstructionOperands.
22 class Loong64OperandGenerator final : public OperandGenerator {
23  public:
Loong64OperandGenerator(InstructionSelector * selector)24   explicit Loong64OperandGenerator(InstructionSelector* selector)
25       : OperandGenerator(selector) {}
26 
UseOperand(Node * node,InstructionCode opcode)27   InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
28     if (CanBeImmediate(node, opcode)) {
29       return UseImmediate(node);
30     }
31     return UseRegister(node);
32   }
33 
34   // Use the zero register if the node has the immediate value zero, otherwise
35   // assign a register.
UseRegisterOrImmediateZero(Node * node)36   InstructionOperand UseRegisterOrImmediateZero(Node* node) {
37     if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
38         (IsFloatConstant(node) &&
39          (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
40       return UseImmediate(node);
41     }
42     return UseRegister(node);
43   }
44 
IsIntegerConstant(Node * node)45   bool IsIntegerConstant(Node* node) {
46     return (node->opcode() == IrOpcode::kInt32Constant) ||
47            (node->opcode() == IrOpcode::kInt64Constant);
48   }
49 
GetIntegerConstantValue(Node * node)50   int64_t GetIntegerConstantValue(Node* node) {
51     if (node->opcode() == IrOpcode::kInt32Constant) {
52       return OpParameter<int32_t>(node->op());
53     }
54     DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
55     return OpParameter<int64_t>(node->op());
56   }
57 
IsFloatConstant(Node * node)58   bool IsFloatConstant(Node* node) {
59     return (node->opcode() == IrOpcode::kFloat32Constant) ||
60            (node->opcode() == IrOpcode::kFloat64Constant);
61   }
62 
GetFloatConstantValue(Node * node)63   double GetFloatConstantValue(Node* node) {
64     if (node->opcode() == IrOpcode::kFloat32Constant) {
65       return OpParameter<float>(node->op());
66     }
67     DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
68     return OpParameter<double>(node->op());
69   }
70 
CanBeImmediate(Node * node,InstructionCode mode)71   bool CanBeImmediate(Node* node, InstructionCode mode) {
72     return IsIntegerConstant(node) &&
73            CanBeImmediate(GetIntegerConstantValue(node), mode);
74   }
75 
CanBeImmediate(int64_t value,InstructionCode opcode)76   bool CanBeImmediate(int64_t value, InstructionCode opcode) {
77     switch (ArchOpcodeField::decode(opcode)) {
78       case kLoong64Sll_w:
79       case kLoong64Srl_w:
80       case kLoong64Sra_w:
81         return is_uint5(value);
82       case kLoong64Sll_d:
83       case kLoong64Srl_d:
84       case kLoong64Sra_d:
85         return is_uint6(value);
86       case kLoong64And:
87       case kLoong64And32:
88       case kLoong64Or:
89       case kLoong64Or32:
90       case kLoong64Xor:
91       case kLoong64Xor32:
92       case kLoong64Tst:
93         return is_uint12(value);
94       case kLoong64Ld_b:
95       case kLoong64Ld_bu:
96       case kLoong64St_b:
97       case kLoong64Ld_h:
98       case kLoong64Ld_hu:
99       case kLoong64St_h:
100       case kLoong64Ld_w:
101       case kLoong64Ld_wu:
102       case kLoong64St_w:
103       case kLoong64Ld_d:
104       case kLoong64St_d:
105       case kLoong64Fld_s:
106       case kLoong64Fst_s:
107       case kLoong64Fld_d:
108       case kLoong64Fst_d:
109         return is_int16(value);
110       default:
111         return is_int12(value);
112     }
113   }
114 
115  private:
ImmediateFitsAddrMode1Instruction(int32_t imm) const116   bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
117     TRACE_UNIMPL();
118     return false;
119   }
120 };
121 
VisitRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)122 static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
123                     Node* node) {
124   Loong64OperandGenerator g(selector);
125   selector->Emit(opcode, g.DefineAsRegister(node),
126                  g.UseRegister(node->InputAt(0)));
127 }
128 
VisitRRI(InstructionSelector * selector,ArchOpcode opcode,Node * node)129 static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
130                      Node* node) {
131   Loong64OperandGenerator g(selector);
132   int32_t imm = OpParameter<int32_t>(node->op());
133   selector->Emit(opcode, g.DefineAsRegister(node),
134                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
135 }
136 
VisitSimdShift(InstructionSelector * selector,ArchOpcode opcode,Node * node)137 static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
138                            Node* node) {
139   Loong64OperandGenerator g(selector);
140   if (g.IsIntegerConstant(node->InputAt(1))) {
141     selector->Emit(opcode, g.DefineAsRegister(node),
142                    g.UseRegister(node->InputAt(0)),
143                    g.UseImmediate(node->InputAt(1)));
144   } else {
145     selector->Emit(opcode, g.DefineAsRegister(node),
146                    g.UseRegister(node->InputAt(0)),
147                    g.UseRegister(node->InputAt(1)));
148   }
149 }
150 
VisitRRIR(InstructionSelector * selector,ArchOpcode opcode,Node * node)151 static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
152                       Node* node) {
153   Loong64OperandGenerator g(selector);
154   int32_t imm = OpParameter<int32_t>(node->op());
155   selector->Emit(opcode, g.DefineAsRegister(node),
156                  g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
157                  g.UseRegister(node->InputAt(1)));
158 }
159 
VisitRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)160 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
161                      Node* node) {
162   Loong64OperandGenerator g(selector);
163   selector->Emit(opcode, g.DefineAsRegister(node),
164                  g.UseRegister(node->InputAt(0)),
165                  g.UseRegister(node->InputAt(1)));
166 }
167 
VisitUniqueRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)168 static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
169                            Node* node) {
170   Loong64OperandGenerator g(selector);
171   selector->Emit(opcode, g.DefineAsRegister(node),
172                  g.UseUniqueRegister(node->InputAt(0)),
173                  g.UseUniqueRegister(node->InputAt(1)));
174 }
175 
VisitRRRR(InstructionSelector * selector,ArchOpcode opcode,Node * node)176 void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
177   Loong64OperandGenerator g(selector);
178   selector->Emit(
179       opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
180       g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
181 }
182 
VisitRRO(InstructionSelector * selector,ArchOpcode opcode,Node * node)183 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
184                      Node* node) {
185   Loong64OperandGenerator g(selector);
186   selector->Emit(opcode, g.DefineAsRegister(node),
187                  g.UseRegister(node->InputAt(0)),
188                  g.UseOperand(node->InputAt(1), opcode));
189 }
190 
191 struct ExtendingLoadMatcher {
ExtendingLoadMatcherv8::internal::compiler::ExtendingLoadMatcher192   ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
193       : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
194     Initialize(node);
195   }
196 
Matchesv8::internal::compiler::ExtendingLoadMatcher197   bool Matches() const { return matches_; }
198 
basev8::internal::compiler::ExtendingLoadMatcher199   Node* base() const {
200     DCHECK(Matches());
201     return base_;
202   }
immediatev8::internal::compiler::ExtendingLoadMatcher203   int64_t immediate() const {
204     DCHECK(Matches());
205     return immediate_;
206   }
opcodev8::internal::compiler::ExtendingLoadMatcher207   ArchOpcode opcode() const {
208     DCHECK(Matches());
209     return opcode_;
210   }
211 
212  private:
213   bool matches_;
214   InstructionSelector* selector_;
215   Node* base_;
216   int64_t immediate_;
217   ArchOpcode opcode_;
218 
Initializev8::internal::compiler::ExtendingLoadMatcher219   void Initialize(Node* node) {
220     Int64BinopMatcher m(node);
221     // When loading a 64-bit value and shifting by 32, we should
222     // just load and sign-extend the interesting 4 bytes instead.
223     // This happens, for example, when we're loading and untagging SMIs.
224     DCHECK(m.IsWord64Sar());
225     if (m.left().IsLoad() && m.right().Is(32) &&
226         selector_->CanCover(m.node(), m.left().node())) {
227       DCHECK_EQ(selector_->GetEffectLevel(node),
228                 selector_->GetEffectLevel(m.left().node()));
229       MachineRepresentation rep =
230           LoadRepresentationOf(m.left().node()->op()).representation();
231       DCHECK_EQ(3, ElementSizeLog2Of(rep));
232       if (rep != MachineRepresentation::kTaggedSigned &&
233           rep != MachineRepresentation::kTaggedPointer &&
234           rep != MachineRepresentation::kTagged &&
235           rep != MachineRepresentation::kWord64) {
236         return;
237       }
238 
239       Loong64OperandGenerator g(selector_);
240       Node* load = m.left().node();
241       Node* offset = load->InputAt(1);
242       base_ = load->InputAt(0);
243       opcode_ = kLoong64Ld_w;
244       if (g.CanBeImmediate(offset, opcode_)) {
245         immediate_ = g.GetIntegerConstantValue(offset) + 4;
246         matches_ = g.CanBeImmediate(immediate_, kLoong64Ld_w);
247       }
248     }
249   }
250 };
251 
TryEmitExtendingLoad(InstructionSelector * selector,Node * node,Node * output_node)252 bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
253                           Node* output_node) {
254   ExtendingLoadMatcher m(node, selector);
255   Loong64OperandGenerator g(selector);
256   if (m.Matches()) {
257     InstructionOperand inputs[2];
258     inputs[0] = g.UseRegister(m.base());
259     InstructionCode opcode =
260         m.opcode() | AddressingModeField::encode(kMode_MRI);
261     DCHECK(is_int32(m.immediate()));
262     inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
263     InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
264     selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
265                    inputs);
266     return true;
267   }
268   return false;
269 }
270 
TryMatchImmediate(InstructionSelector * selector,InstructionCode * opcode_return,Node * node,size_t * input_count_return,InstructionOperand * inputs)271 bool TryMatchImmediate(InstructionSelector* selector,
272                        InstructionCode* opcode_return, Node* node,
273                        size_t* input_count_return, InstructionOperand* inputs) {
274   Loong64OperandGenerator g(selector);
275   if (g.CanBeImmediate(node, *opcode_return)) {
276     *opcode_return |= AddressingModeField::encode(kMode_MRI);
277     inputs[0] = g.UseImmediate(node);
278     *input_count_return = 1;
279     return true;
280   }
281   return false;
282 }
283 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode,FlagsContinuation * cont)284 static void VisitBinop(InstructionSelector* selector, Node* node,
285                        InstructionCode opcode, bool has_reverse_opcode,
286                        InstructionCode reverse_opcode,
287                        FlagsContinuation* cont) {
288   Loong64OperandGenerator g(selector);
289   Int32BinopMatcher m(node);
290   InstructionOperand inputs[2];
291   size_t input_count = 0;
292   InstructionOperand outputs[1];
293   size_t output_count = 0;
294 
295   if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
296                         &inputs[1])) {
297     inputs[0] = g.UseRegister(m.left().node());
298     input_count++;
299   } else if (has_reverse_opcode &&
300              TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
301                                &input_count, &inputs[1])) {
302     inputs[0] = g.UseRegister(m.right().node());
303     opcode = reverse_opcode;
304     input_count++;
305   } else {
306     inputs[input_count++] = g.UseRegister(m.left().node());
307     inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
308   }
309 
310   outputs[output_count++] = g.DefineAsRegister(node);
311 
312   DCHECK_NE(0u, input_count);
313   DCHECK_EQ(1u, output_count);
314   DCHECK_GE(arraysize(inputs), input_count);
315   DCHECK_GE(arraysize(outputs), output_count);
316 
317   selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
318                                  inputs, cont);
319 }
320 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,bool has_reverse_opcode,InstructionCode reverse_opcode)321 static void VisitBinop(InstructionSelector* selector, Node* node,
322                        InstructionCode opcode, bool has_reverse_opcode,
323                        InstructionCode reverse_opcode) {
324   FlagsContinuation cont;
325   VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
326 }
327 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)328 static void VisitBinop(InstructionSelector* selector, Node* node,
329                        InstructionCode opcode, FlagsContinuation* cont) {
330   VisitBinop(selector, node, opcode, false, kArchNop, cont);
331 }
332 
VisitBinop(InstructionSelector * selector,Node * node,InstructionCode opcode)333 static void VisitBinop(InstructionSelector* selector, Node* node,
334                        InstructionCode opcode) {
335   VisitBinop(selector, node, opcode, false, kArchNop);
336 }
337 
VisitStackSlot(Node * node)338 void InstructionSelector::VisitStackSlot(Node* node) {
339   StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
340   int alignment = rep.alignment();
341   int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
342   OperandGenerator g(this);
343 
344   Emit(kArchStackSlot, g.DefineAsRegister(node),
345        sequence()->AddImmediate(Constant(slot)), 0, nullptr);
346 }
347 
VisitAbortCSADcheck(Node * node)348 void InstructionSelector::VisitAbortCSADcheck(Node* node) {
349   Loong64OperandGenerator g(this);
350   Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
351 }
352 
EmitLoad(InstructionSelector * selector,Node * node,InstructionCode opcode,Node * output=nullptr)353 void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
354               Node* output = nullptr) {
355   Loong64OperandGenerator g(selector);
356   Node* base = node->InputAt(0);
357   Node* index = node->InputAt(1);
358 
359   ExternalReferenceMatcher m(base);
360   if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
361       selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
362     ptrdiff_t const delta =
363         g.GetIntegerConstantValue(index) +
364         TurboAssemblerBase::RootRegisterOffsetForExternalReference(
365             selector->isolate(), m.ResolvedValue());
366     // Check that the delta is a 32-bit integer due to the limitations of
367     // immediate operands.
368     if (is_int32(delta)) {
369       opcode |= AddressingModeField::encode(kMode_Root);
370       selector->Emit(opcode,
371                      g.DefineAsRegister(output == nullptr ? node : output),
372                      g.UseImmediate(static_cast<int32_t>(delta)));
373       return;
374     }
375   }
376 
377   if (g.CanBeImmediate(index, opcode)) {
378     selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
379                    g.DefineAsRegister(output == nullptr ? node : output),
380                    g.UseRegister(base), g.UseImmediate(index));
381   } else {
382     selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
383                    g.DefineAsRegister(output == nullptr ? node : output),
384                    g.UseRegister(base), g.UseRegister(index));
385   }
386 }
387 
VisitStoreLane(Node * node)388 void InstructionSelector::VisitStoreLane(Node* node) { UNREACHABLE(); }
389 
VisitLoadLane(Node * node)390 void InstructionSelector::VisitLoadLane(Node* node) { UNREACHABLE(); }
391 
VisitLoadTransform(Node * node)392 void InstructionSelector::VisitLoadTransform(Node* node) {
393   LoadTransformParameters params = LoadTransformParametersOf(node->op());
394 
395   InstructionCode opcode = kArchNop;
396   switch (params.transformation) {
397       // TODO(LOONG_dev): LOONG64 S128 LoadSplat
398     case LoadTransformation::kS128Load8Splat:
399       opcode = kLoong64S128LoadSplat;
400       break;
401     case LoadTransformation::kS128Load16Splat:
402       opcode = kLoong64S128LoadSplat;
403       break;
404     case LoadTransformation::kS128Load32Splat:
405       opcode = kLoong64S128LoadSplat;
406       break;
407     case LoadTransformation::kS128Load64Splat:
408       opcode = kLoong64S128LoadSplat;
409       break;
410     case LoadTransformation::kS128Load8x8S:
411       opcode = kLoong64S128Load8x8S;
412       break;
413     case LoadTransformation::kS128Load8x8U:
414       opcode = kLoong64S128Load8x8U;
415       break;
416     case LoadTransformation::kS128Load16x4S:
417       opcode = kLoong64S128Load16x4S;
418       break;
419     case LoadTransformation::kS128Load16x4U:
420       opcode = kLoong64S128Load16x4U;
421       break;
422     case LoadTransformation::kS128Load32x2S:
423       opcode = kLoong64S128Load32x2S;
424       break;
425     case LoadTransformation::kS128Load32x2U:
426       opcode = kLoong64S128Load32x2U;
427       break;
428     case LoadTransformation::kS128Load32Zero:
429       opcode = kLoong64S128Load32Zero;
430       break;
431     case LoadTransformation::kS128Load64Zero:
432       opcode = kLoong64S128Load64Zero;
433       break;
434     default:
435       UNIMPLEMENTED();
436   }
437 
438   EmitLoad(this, node, opcode);
439 }
440 
VisitLoad(Node * node)441 void InstructionSelector::VisitLoad(Node* node) {
442   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
443 
444   InstructionCode opcode = kArchNop;
445   switch (load_rep.representation()) {
446     case MachineRepresentation::kFloat32:
447       opcode = kLoong64Fld_s;
448       break;
449     case MachineRepresentation::kFloat64:
450       opcode = kLoong64Fld_d;
451       break;
452     case MachineRepresentation::kBit:  // Fall through.
453     case MachineRepresentation::kWord8:
454       opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
455       break;
456     case MachineRepresentation::kWord16:
457       opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
458       break;
459     case MachineRepresentation::kWord32:
460       opcode = kLoong64Ld_w;
461       break;
462     case MachineRepresentation::kTaggedSigned:   // Fall through.
463     case MachineRepresentation::kTaggedPointer:  // Fall through.
464     case MachineRepresentation::kTagged:         // Fall through.
465     case MachineRepresentation::kWord64:
466       opcode = kLoong64Ld_d;
467       break;
468     case MachineRepresentation::kCompressedPointer:  // Fall through.
469     case MachineRepresentation::kCompressed:         // Fall through.
470     case MachineRepresentation::kSandboxedPointer:   // Fall through.
471     case MachineRepresentation::kMapWord:            // Fall through.
472     case MachineRepresentation::kNone:
473     case MachineRepresentation::kSimd128:
474       UNREACHABLE();
475   }
476 
477   EmitLoad(this, node, opcode);
478 }
479 
VisitProtectedLoad(Node * node)480 void InstructionSelector::VisitProtectedLoad(Node* node) {
481   // TODO(eholk)
482   UNIMPLEMENTED();
483 }
484 
VisitStore(Node * node)485 void InstructionSelector::VisitStore(Node* node) {
486   Loong64OperandGenerator g(this);
487   Node* base = node->InputAt(0);
488   Node* index = node->InputAt(1);
489   Node* value = node->InputAt(2);
490 
491   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
492   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
493   MachineRepresentation rep = store_rep.representation();
494 
495   if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
496     write_barrier_kind = kFullWriteBarrier;
497   }
498 
499   // TODO(loong64): I guess this could be done in a better way.
500   if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
501     DCHECK(CanBeTaggedPointer(rep));
502     AddressingMode addressing_mode;
503     InstructionOperand inputs[3];
504     size_t input_count = 0;
505     inputs[input_count++] = g.UseUniqueRegister(base);
506     // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
507     // must check kArithmeticImm as well as kLoadStoreImm64.
508     if (g.CanBeImmediate(index, kLoong64Add_d)) {
509       inputs[input_count++] = g.UseImmediate(index);
510       addressing_mode = kMode_MRI;
511     } else {
512       inputs[input_count++] = g.UseUniqueRegister(index);
513       addressing_mode = kMode_MRR;
514     }
515     inputs[input_count++] = g.UseUniqueRegister(value);
516     RecordWriteMode record_write_mode =
517         WriteBarrierKindToRecordWriteMode(write_barrier_kind);
518     InstructionCode code = kArchStoreWithWriteBarrier;
519     code |= AddressingModeField::encode(addressing_mode);
520     code |= MiscField::encode(static_cast<int>(record_write_mode));
521     Emit(code, 0, nullptr, input_count, inputs);
522   } else {
523     ArchOpcode opcode;
524     switch (rep) {
525       case MachineRepresentation::kFloat32:
526         opcode = kLoong64Fst_s;
527         break;
528       case MachineRepresentation::kFloat64:
529         opcode = kLoong64Fst_d;
530         break;
531       case MachineRepresentation::kBit:  // Fall through.
532       case MachineRepresentation::kWord8:
533         opcode = kLoong64St_b;
534         break;
535       case MachineRepresentation::kWord16:
536         opcode = kLoong64St_h;
537         break;
538       case MachineRepresentation::kWord32:
539         opcode = kLoong64St_w;
540         break;
541       case MachineRepresentation::kTaggedSigned:   // Fall through.
542       case MachineRepresentation::kTaggedPointer:  // Fall through.
543       case MachineRepresentation::kTagged:         // Fall through.
544       case MachineRepresentation::kWord64:
545         opcode = kLoong64St_d;
546         break;
547       case MachineRepresentation::kCompressedPointer:  // Fall through.
548       case MachineRepresentation::kCompressed:         // Fall through.
549       case MachineRepresentation::kSandboxedPointer:   // Fall through.
550       case MachineRepresentation::kMapWord:            // Fall through.
551       case MachineRepresentation::kNone:
552       case MachineRepresentation::kSimd128:
553         UNREACHABLE();
554     }
555 
556     ExternalReferenceMatcher m(base);
557     if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
558         CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
559       ptrdiff_t const delta =
560           g.GetIntegerConstantValue(index) +
561           TurboAssemblerBase::RootRegisterOffsetForExternalReference(
562               isolate(), m.ResolvedValue());
563       // Check that the delta is a 32-bit integer due to the limitations of
564       // immediate operands.
565       if (is_int32(delta)) {
566         Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
567              g.UseImmediate(static_cast<int32_t>(delta)), g.UseImmediate(0),
568              g.UseRegisterOrImmediateZero(value));
569         return;
570       }
571     }
572 
573     if (g.CanBeImmediate(index, opcode)) {
574       Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
575            g.UseRegister(base), g.UseImmediate(index),
576            g.UseRegisterOrImmediateZero(value));
577     } else {
578       Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
579            g.UseRegister(base), g.UseRegister(index),
580            g.UseRegisterOrImmediateZero(value));
581     }
582   }
583 }
584 
VisitProtectedStore(Node * node)585 void InstructionSelector::VisitProtectedStore(Node* node) {
586   // TODO(eholk)
587   UNIMPLEMENTED();
588 }
589 
VisitWord32And(Node * node)590 void InstructionSelector::VisitWord32And(Node* node) {
591   Loong64OperandGenerator g(this);
592   Int32BinopMatcher m(node);
593   if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
594       m.right().HasResolvedValue()) {
595     uint32_t mask = m.right().ResolvedValue();
596     uint32_t mask_width = base::bits::CountPopulation(mask);
597     uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
598     if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
599       // The mask must be contiguous, and occupy the least-significant bits.
600       DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
601 
602       // Select Bstrpick_w for And(Shr(x, imm), mask) where the mask is in the
603       // least significant bits.
604       Int32BinopMatcher mleft(m.left().node());
605       if (mleft.right().HasResolvedValue()) {
606         // Any shift value can match; int32 shifts use `value % 32`.
607         uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
608 
609         // Bstrpick_w cannot extract bits past the register size, however since
610         // shifting the original value would have introduced some zeros we can
611         // still use Bstrpick_w with a smaller mask and the remaining bits will
612         // be zeros.
613         if (lsb + mask_width > 32) mask_width = 32 - lsb;
614 
615         Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
616              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
617              g.TempImmediate(mask_width));
618         return;
619       }
620       // Other cases fall through to the normal And operation.
621     }
622   }
623   if (m.right().HasResolvedValue()) {
624     uint32_t mask = m.right().ResolvedValue();
625     uint32_t shift = base::bits::CountPopulation(~mask);
626     uint32_t msb = base::bits::CountLeadingZeros32(~mask);
627     if (shift != 0 && shift != 32 && msb + shift == 32) {
628       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
629       // and remove constant loading of inverted mask.
630       Emit(kLoong64Bstrins_w, g.DefineSameAsFirst(node),
631            g.UseRegister(m.left().node()), g.TempImmediate(0),
632            g.TempImmediate(shift));
633       return;
634     }
635   }
636   VisitBinop(this, node, kLoong64And32, true, kLoong64And32);
637 }
638 
VisitWord64And(Node * node)639 void InstructionSelector::VisitWord64And(Node* node) {
640   Loong64OperandGenerator g(this);
641   Int64BinopMatcher m(node);
642   if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
643       m.right().HasResolvedValue()) {
644     uint64_t mask = m.right().ResolvedValue();
645     uint32_t mask_width = base::bits::CountPopulation(mask);
646     uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
647     if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
648       // The mask must be contiguous, and occupy the least-significant bits.
649       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
650 
651       // Select Bstrpick_d for And(Shr(x, imm), mask) where the mask is in the
652       // least significant bits.
653       Int64BinopMatcher mleft(m.left().node());
654       if (mleft.right().HasResolvedValue()) {
655         // Any shift value can match; int64 shifts use `value % 64`.
656         uint32_t lsb =
657             static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
658 
659         // Bstrpick_d cannot extract bits past the register size, however since
660         // shifting the original value would have introduced some zeros we can
661         // still use Bstrpick_d with a smaller mask and the remaining bits will
662         // be zeros.
663         if (lsb + mask_width > 64) mask_width = 64 - lsb;
664 
665         if (lsb == 0 && mask_width == 64) {
666           Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
667         } else {
668           Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
669                g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
670                g.TempImmediate(static_cast<int32_t>(mask_width)));
671         }
672         return;
673       }
674       // Other cases fall through to the normal And operation.
675     }
676   }
677   if (m.right().HasResolvedValue()) {
678     uint64_t mask = m.right().ResolvedValue();
679     uint32_t shift = base::bits::CountPopulation(~mask);
680     uint32_t msb = base::bits::CountLeadingZeros64(~mask);
681     if (shift != 0 && shift < 32 && msb + shift == 64) {
682       // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
683       // and remove constant loading of inverted mask. Dins cannot insert bits
684       // past word size, so shifts smaller than 32 are covered.
685       Emit(kLoong64Bstrins_d, g.DefineSameAsFirst(node),
686            g.UseRegister(m.left().node()), g.TempImmediate(0),
687            g.TempImmediate(shift));
688       return;
689     }
690   }
691   VisitBinop(this, node, kLoong64And, true, kLoong64And);
692 }
693 
VisitWord32Or(Node * node)694 void InstructionSelector::VisitWord32Or(Node* node) {
695   VisitBinop(this, node, kLoong64Or32, true, kLoong64Or32);
696 }
697 
VisitWord64Or(Node * node)698 void InstructionSelector::VisitWord64Or(Node* node) {
699   VisitBinop(this, node, kLoong64Or, true, kLoong64Or);
700 }
701 
VisitWord32Xor(Node * node)702 void InstructionSelector::VisitWord32Xor(Node* node) {
703   Int32BinopMatcher m(node);
704   if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
705       m.right().Is(-1)) {
706     Int32BinopMatcher mleft(m.left().node());
707     if (!mleft.right().HasResolvedValue()) {
708       Loong64OperandGenerator g(this);
709       Emit(kLoong64Nor32, g.DefineAsRegister(node),
710            g.UseRegister(mleft.left().node()),
711            g.UseRegister(mleft.right().node()));
712       return;
713     }
714   }
715   if (m.right().Is(-1)) {
716     // Use Nor for bit negation and eliminate constant loading for xori.
717     Loong64OperandGenerator g(this);
718     Emit(kLoong64Nor32, g.DefineAsRegister(node),
719          g.UseRegister(m.left().node()), g.TempImmediate(0));
720     return;
721   }
722   VisitBinop(this, node, kLoong64Xor32, true, kLoong64Xor32);
723 }
724 
VisitWord64Xor(Node * node)725 void InstructionSelector::VisitWord64Xor(Node* node) {
726   Int64BinopMatcher m(node);
727   if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
728       m.right().Is(-1)) {
729     Int64BinopMatcher mleft(m.left().node());
730     if (!mleft.right().HasResolvedValue()) {
731       Loong64OperandGenerator g(this);
732       Emit(kLoong64Nor, g.DefineAsRegister(node),
733            g.UseRegister(mleft.left().node()),
734            g.UseRegister(mleft.right().node()));
735       return;
736     }
737   }
738   if (m.right().Is(-1)) {
739     // Use Nor for bit negation and eliminate constant loading for xori.
740     Loong64OperandGenerator g(this);
741     Emit(kLoong64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
742          g.TempImmediate(0));
743     return;
744   }
745   VisitBinop(this, node, kLoong64Xor, true, kLoong64Xor);
746 }
747 
VisitWord32Shl(Node * node)748 void InstructionSelector::VisitWord32Shl(Node* node) {
749   Int32BinopMatcher m(node);
750   if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
751       m.right().IsInRange(1, 31)) {
752     Loong64OperandGenerator g(this);
753     Int32BinopMatcher mleft(m.left().node());
754     // Match Word32Shl(Word32And(x, mask), imm) to Sll_w where the mask is
755     // contiguous, and the shift immediate non-zero.
756     if (mleft.right().HasResolvedValue()) {
757       uint32_t mask = mleft.right().ResolvedValue();
758       uint32_t mask_width = base::bits::CountPopulation(mask);
759       uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
760       if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
761         uint32_t shift = m.right().ResolvedValue();
762         DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
763         DCHECK_NE(0u, shift);
764         if ((shift + mask_width) >= 32) {
765           // If the mask is contiguous and reaches or extends beyond the top
766           // bit, only the shift is needed.
767           Emit(kLoong64Sll_w, g.DefineAsRegister(node),
768                g.UseRegister(mleft.left().node()),
769                g.UseImmediate(m.right().node()));
770           return;
771         }
772       }
773     }
774   }
775   VisitRRO(this, kLoong64Sll_w, node);
776 }
777 
VisitWord32Shr(Node * node)778 void InstructionSelector::VisitWord32Shr(Node* node) {
779   Int32BinopMatcher m(node);
780   if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
781     uint32_t lsb = m.right().ResolvedValue() & 0x1F;
782     Int32BinopMatcher mleft(m.left().node());
783     if (mleft.right().HasResolvedValue() &&
784         mleft.right().ResolvedValue() != 0) {
785       // Select Bstrpick_w for Shr(And(x, mask), imm) where the result of the
786       // mask is shifted into the least-significant bits.
787       uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
788       unsigned mask_width = base::bits::CountPopulation(mask);
789       unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
790       if ((mask_msb + mask_width + lsb) == 32) {
791         Loong64OperandGenerator g(this);
792         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
793         Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
794              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
795              g.TempImmediate(mask_width));
796         return;
797       }
798     }
799   }
800   VisitRRO(this, kLoong64Srl_w, node);
801 }
802 
VisitWord32Sar(Node * node)803 void InstructionSelector::VisitWord32Sar(Node* node) {
804   Int32BinopMatcher m(node);
805   if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
806     Int32BinopMatcher mleft(m.left().node());
807     if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
808       Loong64OperandGenerator g(this);
809       uint32_t sar = m.right().ResolvedValue();
810       uint32_t shl = mleft.right().ResolvedValue();
811       if ((sar == shl) && (sar == 16)) {
812         Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
813              g.UseRegister(mleft.left().node()));
814         return;
815       } else if ((sar == shl) && (sar == 24)) {
816         Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
817              g.UseRegister(mleft.left().node()));
818         return;
819       } else if ((sar == shl) && (sar == 32)) {
820         Emit(kLoong64Sll_w, g.DefineAsRegister(node),
821              g.UseRegister(mleft.left().node()), g.TempImmediate(0));
822         return;
823       }
824     }
825   }
826   VisitRRO(this, kLoong64Sra_w, node);
827 }
828 
VisitWord64Shl(Node * node)829 void InstructionSelector::VisitWord64Shl(Node* node) {
830   Loong64OperandGenerator g(this);
831   Int64BinopMatcher m(node);
832   if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
833       m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
834     // There's no need to sign/zero-extend to 64-bit if we shift out the upper
835     // 32 bits anyway.
836     Emit(kLoong64Sll_d, g.DefineAsRegister(node),
837          g.UseRegister(m.left().node()->InputAt(0)),
838          g.UseImmediate(m.right().node()));
839     return;
840   }
841   if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
842       m.right().IsInRange(1, 63)) {
843     // Match Word64Shl(Word64And(x, mask), imm) to Sll_d where the mask is
844     // contiguous, and the shift immediate non-zero.
845     Int64BinopMatcher mleft(m.left().node());
846     if (mleft.right().HasResolvedValue()) {
847       uint64_t mask = mleft.right().ResolvedValue();
848       uint32_t mask_width = base::bits::CountPopulation(mask);
849       uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
850       if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
851         uint64_t shift = m.right().ResolvedValue();
852         DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
853         DCHECK_NE(0u, shift);
854 
855         if ((shift + mask_width) >= 64) {
856           // If the mask is contiguous and reaches or extends beyond the top
857           // bit, only the shift is needed.
858           Emit(kLoong64Sll_d, g.DefineAsRegister(node),
859                g.UseRegister(mleft.left().node()),
860                g.UseImmediate(m.right().node()));
861           return;
862         }
863       }
864     }
865   }
866   VisitRRO(this, kLoong64Sll_d, node);
867 }
868 
VisitWord64Shr(Node * node)869 void InstructionSelector::VisitWord64Shr(Node* node) {
870   Int64BinopMatcher m(node);
871   if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
872     uint32_t lsb = m.right().ResolvedValue() & 0x3F;
873     Int64BinopMatcher mleft(m.left().node());
874     if (mleft.right().HasResolvedValue() &&
875         mleft.right().ResolvedValue() != 0) {
876       // Select Bstrpick_d for Shr(And(x, mask), imm) where the result of the
877       // mask is shifted into the least-significant bits.
878       uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
879       unsigned mask_width = base::bits::CountPopulation(mask);
880       unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
881       if ((mask_msb + mask_width + lsb) == 64) {
882         Loong64OperandGenerator g(this);
883         DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
884         Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
885              g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
886              g.TempImmediate(mask_width));
887         return;
888       }
889     }
890   }
891   VisitRRO(this, kLoong64Srl_d, node);
892 }
893 
VisitWord64Sar(Node * node)894 void InstructionSelector::VisitWord64Sar(Node* node) {
895   if (TryEmitExtendingLoad(this, node, node)) return;
896   VisitRRO(this, kLoong64Sra_d, node);
897 }
898 
VisitWord32Rol(Node * node)899 void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
900 
VisitWord64Rol(Node * node)901 void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
902 
VisitWord32Ror(Node * node)903 void InstructionSelector::VisitWord32Ror(Node* node) {
904   VisitRRO(this, kLoong64Rotr_w, node);
905 }
906 
VisitWord64Ror(Node * node)907 void InstructionSelector::VisitWord64Ror(Node* node) {
908   VisitRRO(this, kLoong64Rotr_d, node);
909 }
910 
VisitWord32ReverseBits(Node * node)911 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
912 
VisitWord64ReverseBits(Node * node)913 void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
914 
VisitWord32ReverseBytes(Node * node)915 void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
916   Loong64OperandGenerator g(this);
917   Emit(kLoong64ByteSwap32, g.DefineAsRegister(node),
918        g.UseRegister(node->InputAt(0)));
919 }
920 
VisitWord64ReverseBytes(Node * node)921 void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
922   Loong64OperandGenerator g(this);
923   Emit(kLoong64ByteSwap64, g.DefineAsRegister(node),
924        g.UseRegister(node->InputAt(0)));
925 }
926 
VisitSimd128ReverseBytes(Node * node)927 void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
928   UNREACHABLE();
929 }
930 
VisitWord32Clz(Node * node)931 void InstructionSelector::VisitWord32Clz(Node* node) {
932   VisitRR(this, kLoong64Clz_w, node);
933 }
934 
VisitWord64Clz(Node * node)935 void InstructionSelector::VisitWord64Clz(Node* node) {
936   VisitRR(this, kLoong64Clz_d, node);
937 }
938 
VisitWord32Ctz(Node * node)939 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
940 
VisitWord64Ctz(Node * node)941 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
942 
VisitWord32Popcnt(Node * node)943 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
944 
VisitWord64Popcnt(Node * node)945 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
946 
VisitInt32Add(Node * node)947 void InstructionSelector::VisitInt32Add(Node* node) {
948   Loong64OperandGenerator g(this);
949   Int32BinopMatcher m(node);
950 
951   // Select Alsl_w for (left + (left_of_right << imm)).
952   if (m.right().opcode() == IrOpcode::kWord32Shl &&
953       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
954     Int32BinopMatcher mright(m.right().node());
955     if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
956       int32_t shift_value =
957           static_cast<int32_t>(mright.right().ResolvedValue());
958       if (shift_value > 0 && shift_value <= 31) {
959         Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
960              g.UseRegister(mright.left().node()),
961              g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
962         return;
963       }
964     }
965   }
966 
967   // Select Alsl_w for ((left_of_left << imm) + right).
968   if (m.left().opcode() == IrOpcode::kWord32Shl &&
969       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
970     Int32BinopMatcher mleft(m.left().node());
971     if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
972       int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
973       if (shift_value > 0 && shift_value <= 31) {
974         Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
975              g.UseRegister(mleft.left().node()),
976              g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
977         return;
978       }
979     }
980   }
981 
982   VisitBinop(this, node, kLoong64Add_w, true, kLoong64Add_w);
983 }
984 
VisitInt64Add(Node * node)985 void InstructionSelector::VisitInt64Add(Node* node) {
986   Loong64OperandGenerator g(this);
987   Int64BinopMatcher m(node);
988 
989   // Select Alsl_d for (left + (left_of_right << imm)).
990   if (m.right().opcode() == IrOpcode::kWord64Shl &&
991       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
992     Int64BinopMatcher mright(m.right().node());
993     if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
994       int32_t shift_value =
995           static_cast<int32_t>(mright.right().ResolvedValue());
996       if (shift_value > 0 && shift_value <= 31) {
997         Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
998              g.UseRegister(mright.left().node()),
999              g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
1000         return;
1001       }
1002     }
1003   }
1004 
1005   // Select Alsl_d for ((left_of_left << imm) + right).
1006   if (m.left().opcode() == IrOpcode::kWord64Shl &&
1007       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
1008     Int64BinopMatcher mleft(m.left().node());
1009     if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
1010       int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
1011       if (shift_value > 0 && shift_value <= 31) {
1012         Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
1013              g.UseRegister(mleft.left().node()),
1014              g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
1015         return;
1016       }
1017     }
1018   }
1019 
1020   VisitBinop(this, node, kLoong64Add_d, true, kLoong64Add_d);
1021 }
1022 
VisitInt32Sub(Node * node)1023 void InstructionSelector::VisitInt32Sub(Node* node) {
1024   VisitBinop(this, node, kLoong64Sub_w);
1025 }
1026 
VisitInt64Sub(Node * node)1027 void InstructionSelector::VisitInt64Sub(Node* node) {
1028   VisitBinop(this, node, kLoong64Sub_d);
1029 }
1030 
VisitInt32Mul(Node * node)1031 void InstructionSelector::VisitInt32Mul(Node* node) {
1032   Loong64OperandGenerator g(this);
1033   Int32BinopMatcher m(node);
1034   if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1035     uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
1036     if (base::bits::IsPowerOfTwo(value)) {
1037       Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None),
1038            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1039            g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1040       return;
1041     }
1042     if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
1043         value - 1 <= 31) {
1044       Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
1045            g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
1046            g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
1047       return;
1048     }
1049     if (base::bits::IsPowerOfTwo(value + 1)) {
1050       InstructionOperand temp = g.TempRegister();
1051       Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None), temp,
1052            g.UseRegister(m.left().node()),
1053            g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1054       Emit(kLoong64Sub_w | AddressingModeField::encode(kMode_None),
1055            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1056       return;
1057     }
1058   }
1059   Node* left = node->InputAt(0);
1060   Node* right = node->InputAt(1);
1061   if (CanCover(node, left) && CanCover(node, right)) {
1062     if (left->opcode() == IrOpcode::kWord64Sar &&
1063         right->opcode() == IrOpcode::kWord64Sar) {
1064       Int64BinopMatcher leftInput(left), rightInput(right);
1065       if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
1066         // Combine untagging shifts with Mulh_d.
1067         Emit(kLoong64Mulh_d, g.DefineSameAsFirst(node),
1068              g.UseRegister(leftInput.left().node()),
1069              g.UseRegister(rightInput.left().node()));
1070         return;
1071       }
1072     }
1073   }
1074   VisitRRR(this, kLoong64Mul_w, node);
1075 }
1076 
VisitInt32MulHigh(Node * node)1077 void InstructionSelector::VisitInt32MulHigh(Node* node) {
1078   VisitRRR(this, kLoong64Mulh_w, node);
1079 }
1080 
VisitUint32MulHigh(Node * node)1081 void InstructionSelector::VisitUint32MulHigh(Node* node) {
1082   VisitRRR(this, kLoong64Mulh_wu, node);
1083 }
1084 
VisitInt64Mul(Node * node)1085 void InstructionSelector::VisitInt64Mul(Node* node) {
1086   Loong64OperandGenerator g(this);
1087   Int64BinopMatcher m(node);
1088   if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
1089     uint64_t value = static_cast<uint64_t>(m.right().ResolvedValue());
1090     if (base::bits::IsPowerOfTwo(value)) {
1091       Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None),
1092            g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1093            g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
1094       return;
1095     }
1096     if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0) {
1097       // Alsl_d macro will handle the shifting value out of bound cases.
1098       Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
1099            g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
1100            g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
1101       return;
1102     }
1103     if (base::bits::IsPowerOfTwo(value + 1)) {
1104       InstructionOperand temp = g.TempRegister();
1105       Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None), temp,
1106            g.UseRegister(m.left().node()),
1107            g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
1108       Emit(kLoong64Sub_d | AddressingModeField::encode(kMode_None),
1109            g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
1110       return;
1111     }
1112   }
1113   Emit(kLoong64Mul_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1114        g.UseRegister(m.right().node()));
1115 }
1116 
VisitInt32Div(Node * node)1117 void InstructionSelector::VisitInt32Div(Node* node) {
1118   Loong64OperandGenerator g(this);
1119   Int32BinopMatcher m(node);
1120   Node* left = node->InputAt(0);
1121   Node* right = node->InputAt(1);
1122   if (CanCover(node, left) && CanCover(node, right)) {
1123     if (left->opcode() == IrOpcode::kWord64Sar &&
1124         right->opcode() == IrOpcode::kWord64Sar) {
1125       Int64BinopMatcher rightInput(right), leftInput(left);
1126       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1127         // Combine both shifted operands with Div_d.
1128         Emit(kLoong64Div_d, g.DefineSameAsFirst(node),
1129              g.UseRegister(leftInput.left().node()),
1130              g.UseRegister(rightInput.left().node()));
1131         return;
1132       }
1133     }
1134   }
1135   Emit(kLoong64Div_w, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1136        g.UseRegister(m.right().node()));
1137 }
1138 
VisitUint32Div(Node * node)1139 void InstructionSelector::VisitUint32Div(Node* node) {
1140   Loong64OperandGenerator g(this);
1141   Int32BinopMatcher m(node);
1142   Emit(kLoong64Div_wu, g.DefineSameAsFirst(node),
1143        g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
1144 }
1145 
VisitInt32Mod(Node * node)1146 void InstructionSelector::VisitInt32Mod(Node* node) {
1147   Loong64OperandGenerator g(this);
1148   Int32BinopMatcher m(node);
1149   Node* left = node->InputAt(0);
1150   Node* right = node->InputAt(1);
1151   if (CanCover(node, left) && CanCover(node, right)) {
1152     if (left->opcode() == IrOpcode::kWord64Sar &&
1153         right->opcode() == IrOpcode::kWord64Sar) {
1154       Int64BinopMatcher rightInput(right), leftInput(left);
1155       if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
1156         // Combine both shifted operands with Mod_d.
1157         Emit(kLoong64Mod_d, g.DefineSameAsFirst(node),
1158              g.UseRegister(leftInput.left().node()),
1159              g.UseRegister(rightInput.left().node()));
1160         return;
1161       }
1162     }
1163   }
1164   Emit(kLoong64Mod_w, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1165        g.UseRegister(m.right().node()));
1166 }
1167 
VisitUint32Mod(Node * node)1168 void InstructionSelector::VisitUint32Mod(Node* node) {
1169   Loong64OperandGenerator g(this);
1170   Int32BinopMatcher m(node);
1171   Emit(kLoong64Mod_wu, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1172        g.UseRegister(m.right().node()));
1173 }
1174 
VisitInt64Div(Node * node)1175 void InstructionSelector::VisitInt64Div(Node* node) {
1176   Loong64OperandGenerator g(this);
1177   Int64BinopMatcher m(node);
1178   Emit(kLoong64Div_d, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
1179        g.UseRegister(m.right().node()));
1180 }
1181 
VisitUint64Div(Node * node)1182 void InstructionSelector::VisitUint64Div(Node* node) {
1183   Loong64OperandGenerator g(this);
1184   Int64BinopMatcher m(node);
1185   Emit(kLoong64Div_du, g.DefineSameAsFirst(node),
1186        g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
1187 }
1188 
VisitInt64Mod(Node * node)1189 void InstructionSelector::VisitInt64Mod(Node* node) {
1190   Loong64OperandGenerator g(this);
1191   Int64BinopMatcher m(node);
1192   Emit(kLoong64Mod_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1193        g.UseRegister(m.right().node()));
1194 }
1195 
VisitUint64Mod(Node * node)1196 void InstructionSelector::VisitUint64Mod(Node* node) {
1197   Loong64OperandGenerator g(this);
1198   Int64BinopMatcher m(node);
1199   Emit(kLoong64Mod_du, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
1200        g.UseRegister(m.right().node()));
1201 }
1202 
VisitChangeFloat32ToFloat64(Node * node)1203 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
1204   VisitRR(this, kLoong64Float32ToFloat64, node);
1205 }
1206 
VisitRoundInt32ToFloat32(Node * node)1207 void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
1208   VisitRR(this, kLoong64Int32ToFloat32, node);
1209 }
1210 
VisitRoundUint32ToFloat32(Node * node)1211 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
1212   VisitRR(this, kLoong64Uint32ToFloat32, node);
1213 }
1214 
VisitChangeInt32ToFloat64(Node * node)1215 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
1216   VisitRR(this, kLoong64Int32ToFloat64, node);
1217 }
1218 
VisitChangeInt64ToFloat64(Node * node)1219 void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
1220   VisitRR(this, kLoong64Int64ToFloat64, node);
1221 }
1222 
VisitChangeUint32ToFloat64(Node * node)1223 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
1224   VisitRR(this, kLoong64Uint32ToFloat64, node);
1225 }
1226 
VisitTruncateFloat32ToInt32(Node * node)1227 void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
1228   Loong64OperandGenerator g(this);
1229   InstructionCode opcode = kLoong64Float32ToInt32;
1230   TruncateKind kind = OpParameter<TruncateKind>(node->op());
1231   if (kind == TruncateKind::kSetOverflowToMin) {
1232     opcode |= MiscField::encode(true);
1233   }
1234   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1235 }
1236 
VisitTruncateFloat32ToUint32(Node * node)1237 void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
1238   Loong64OperandGenerator g(this);
1239   InstructionCode opcode = kLoong64Float32ToUint32;
1240   TruncateKind kind = OpParameter<TruncateKind>(node->op());
1241   if (kind == TruncateKind::kSetOverflowToMin) {
1242     opcode |= MiscField::encode(true);
1243   }
1244   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1245 }
1246 
VisitChangeFloat64ToInt32(Node * node)1247 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
1248   Loong64OperandGenerator g(this);
1249   Node* value = node->InputAt(0);
1250   // TODO(LOONG_dev): LOONG64 Match ChangeFloat64ToInt32(Float64Round##OP) to
1251   // corresponding instruction which does rounding and conversion to
1252   // integer format.
1253   if (CanCover(node, value)) {
1254     if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
1255       Node* next = value->InputAt(0);
1256       if (!CanCover(value, next)) {
1257         // Match float32 -> float64 -> int32 representation change path.
1258         Emit(kLoong64Float32ToInt32, g.DefineAsRegister(node),
1259              g.UseRegister(value->InputAt(0)));
1260         return;
1261       }
1262     }
1263   }
1264   VisitRR(this, kLoong64Float64ToInt32, node);
1265 }
1266 
VisitChangeFloat64ToInt64(Node * node)1267 void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
1268   VisitRR(this, kLoong64Float64ToInt64, node);
1269 }
1270 
VisitChangeFloat64ToUint32(Node * node)1271 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
1272   VisitRR(this, kLoong64Float64ToUint32, node);
1273 }
1274 
VisitChangeFloat64ToUint64(Node * node)1275 void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
1276   VisitRR(this, kLoong64Float64ToUint64, node);
1277 }
1278 
VisitTruncateFloat64ToUint32(Node * node)1279 void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
1280   VisitRR(this, kLoong64Float64ToUint32, node);
1281 }
1282 
VisitTruncateFloat64ToInt64(Node * node)1283 void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
1284   Loong64OperandGenerator g(this);
1285   InstructionCode opcode = kLoong64Float64ToInt64;
1286   TruncateKind kind = OpParameter<TruncateKind>(node->op());
1287   if (kind == TruncateKind::kSetOverflowToMin) {
1288     opcode |= MiscField::encode(true);
1289   }
1290   Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
1291 }
1292 
VisitTryTruncateFloat32ToInt64(Node * node)1293 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
1294   Loong64OperandGenerator g(this);
1295   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1296   InstructionOperand outputs[2];
1297   size_t output_count = 0;
1298   outputs[output_count++] = g.DefineAsRegister(node);
1299 
1300   Node* success_output = NodeProperties::FindProjection(node, 1);
1301   if (success_output) {
1302     outputs[output_count++] = g.DefineAsRegister(success_output);
1303   }
1304 
1305   this->Emit(kLoong64Float32ToInt64, output_count, outputs, 1, inputs);
1306 }
1307 
VisitTryTruncateFloat64ToInt64(Node * node)1308 void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
1309   Loong64OperandGenerator g(this);
1310   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1311   InstructionOperand outputs[2];
1312   size_t output_count = 0;
1313   outputs[output_count++] = g.DefineAsRegister(node);
1314 
1315   Node* success_output = NodeProperties::FindProjection(node, 1);
1316   if (success_output) {
1317     outputs[output_count++] = g.DefineAsRegister(success_output);
1318   }
1319 
1320   Emit(kLoong64Float64ToInt64, output_count, outputs, 1, inputs);
1321 }
1322 
VisitTryTruncateFloat32ToUint64(Node * node)1323 void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
1324   Loong64OperandGenerator g(this);
1325   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1326   InstructionOperand outputs[2];
1327   size_t output_count = 0;
1328   outputs[output_count++] = g.DefineAsRegister(node);
1329 
1330   Node* success_output = NodeProperties::FindProjection(node, 1);
1331   if (success_output) {
1332     outputs[output_count++] = g.DefineAsRegister(success_output);
1333   }
1334 
1335   Emit(kLoong64Float32ToUint64, output_count, outputs, 1, inputs);
1336 }
1337 
VisitTryTruncateFloat64ToUint64(Node * node)1338 void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
1339   Loong64OperandGenerator g(this);
1340 
1341   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
1342   InstructionOperand outputs[2];
1343   size_t output_count = 0;
1344   outputs[output_count++] = g.DefineAsRegister(node);
1345 
1346   Node* success_output = NodeProperties::FindProjection(node, 1);
1347   if (success_output) {
1348     outputs[output_count++] = g.DefineAsRegister(success_output);
1349   }
1350 
1351   Emit(kLoong64Float64ToUint64, output_count, outputs, 1, inputs);
1352 }
1353 
VisitBitcastWord32ToWord64(Node * node)1354 void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
1355   UNIMPLEMENTED();
1356 }
1357 
VisitChangeInt32ToInt64(Node * node)1358 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
1359   // On LoongArch64, int32 values should all be sign-extended to 64-bit, so
1360   // no need to sign-extend them here.
1361   // But when call to a host function in simulator, if the function return an
1362   // int32 value, the simulator do not sign-extend to int64, because in
1363   // simulator we do not know the function whether return an int32 or int64.
1364 #ifdef USE_SIMULATOR
1365   Node* value = node->InputAt(0);
1366   if (value->opcode() == IrOpcode::kCall) {
1367     Loong64OperandGenerator g(this);
1368     Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
1369          g.TempImmediate(0));
1370     return;
1371   }
1372 #endif
1373   EmitIdentity(node);
1374 }
1375 
ZeroExtendsWord32ToWord64NoPhis(Node * node)1376 bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
1377   DCHECK_NE(node->opcode(), IrOpcode::kPhi);
1378   switch (node->opcode()) {
1379     // Comparisons only emit 0/1, so the upper 32 bits must be zero.
1380     case IrOpcode::kWord32Equal:
1381     case IrOpcode::kInt32LessThan:
1382     case IrOpcode::kInt32LessThanOrEqual:
1383     case IrOpcode::kUint32LessThan:
1384     case IrOpcode::kUint32LessThanOrEqual:
1385       return true;
1386     case IrOpcode::kWord32And: {
1387       Int32BinopMatcher m(node);
1388       if (m.right().HasResolvedValue()) {
1389         uint32_t mask = m.right().ResolvedValue();
1390         return is_uint31(mask);
1391       }
1392       return false;
1393     }
1394     case IrOpcode::kWord32Shr: {
1395       Int32BinopMatcher m(node);
1396       if (m.right().HasResolvedValue()) {
1397         uint8_t sa = m.right().ResolvedValue() & 0x1f;
1398         return sa > 0;
1399       }
1400       return false;
1401     }
1402     case IrOpcode::kLoad:
1403     case IrOpcode::kLoadImmutable: {
1404       LoadRepresentation load_rep = LoadRepresentationOf(node->op());
1405       if (load_rep.IsUnsigned()) {
1406         switch (load_rep.representation()) {
1407           case MachineRepresentation::kBit:    // Fall through.
1408           case MachineRepresentation::kWord8:  // Fall through.
1409           case MachineRepresentation::kWord16:
1410             return true;
1411           default:
1412             return false;
1413         }
1414       }
1415       return false;
1416     }
1417     default:
1418       return false;
1419   }
1420 }
1421 
VisitChangeUint32ToUint64(Node * node)1422 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1423   Loong64OperandGenerator g(this);
1424   Node* value = node->InputAt(0);
1425 
1426   if (value->opcode() == IrOpcode::kLoad) {
1427     LoadRepresentation load_rep = LoadRepresentationOf(value->op());
1428     if (load_rep.IsUnsigned() &&
1429         load_rep.representation() == MachineRepresentation::kWord32) {
1430       EmitLoad(this, value, kLoong64Ld_wu, node);
1431       return;
1432     }
1433   }
1434   if (ZeroExtendsWord32ToWord64(value)) {
1435     EmitIdentity(node);
1436     return;
1437   }
1438   Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
1439        g.UseRegister(node->InputAt(0)), g.TempImmediate(0),
1440        g.TempImmediate(32));
1441 }
1442 
VisitTruncateInt64ToInt32(Node * node)1443 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1444   Loong64OperandGenerator g(this);
1445   Node* value = node->InputAt(0);
1446   if (CanCover(node, value)) {
1447     switch (value->opcode()) {
1448       case IrOpcode::kWord64Sar: {
1449         if (CanCover(value, value->InputAt(0)) &&
1450             TryEmitExtendingLoad(this, value, node)) {
1451           return;
1452         } else {
1453           Int64BinopMatcher m(value);
1454           if (m.right().IsInRange(32, 63)) {
1455             // After smi untagging no need for truncate. Combine sequence.
1456             Emit(kLoong64Sra_d, g.DefineAsRegister(node),
1457                  g.UseRegister(m.left().node()),
1458                  g.UseImmediate(m.right().node()));
1459             return;
1460           }
1461         }
1462         break;
1463       }
1464       default:
1465         break;
1466     }
1467   }
1468   Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
1469        g.TempImmediate(0));
1470 }
1471 
VisitTruncateFloat64ToFloat32(Node * node)1472 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
1473   Loong64OperandGenerator g(this);
1474   Node* value = node->InputAt(0);
1475   // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
1476   // instruction.
1477   if (CanCover(node, value) &&
1478       value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
1479     Emit(kLoong64Int32ToFloat32, g.DefineAsRegister(node),
1480          g.UseRegister(value->InputAt(0)));
1481     return;
1482   }
1483   VisitRR(this, kLoong64Float64ToFloat32, node);
1484 }
1485 
VisitTruncateFloat64ToWord32(Node * node)1486 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
1487   VisitRR(this, kArchTruncateDoubleToI, node);
1488 }
1489 
VisitRoundFloat64ToInt32(Node * node)1490 void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
1491   VisitRR(this, kLoong64Float64ToInt32, node);
1492 }
1493 
VisitRoundInt64ToFloat32(Node * node)1494 void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
1495   VisitRR(this, kLoong64Int64ToFloat32, node);
1496 }
1497 
VisitRoundInt64ToFloat64(Node * node)1498 void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
1499   VisitRR(this, kLoong64Int64ToFloat64, node);
1500 }
1501 
VisitRoundUint64ToFloat32(Node * node)1502 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
1503   VisitRR(this, kLoong64Uint64ToFloat32, node);
1504 }
1505 
VisitRoundUint64ToFloat64(Node * node)1506 void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
1507   VisitRR(this, kLoong64Uint64ToFloat64, node);
1508 }
1509 
VisitBitcastFloat32ToInt32(Node * node)1510 void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
1511   VisitRR(this, kLoong64Float64ExtractLowWord32, node);
1512 }
1513 
VisitBitcastFloat64ToInt64(Node * node)1514 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
1515   VisitRR(this, kLoong64BitcastDL, node);
1516 }
1517 
VisitBitcastInt32ToFloat32(Node * node)1518 void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
1519   Loong64OperandGenerator g(this);
1520   Emit(kLoong64Float64InsertLowWord32, g.DefineAsRegister(node),
1521        ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
1522        g.UseRegister(node->InputAt(0)));
1523 }
1524 
VisitBitcastInt64ToFloat64(Node * node)1525 void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
1526   VisitRR(this, kLoong64BitcastLD, node);
1527 }
1528 
VisitFloat32Add(Node * node)1529 void InstructionSelector::VisitFloat32Add(Node* node) {
1530   VisitRRR(this, kLoong64Float32Add, node);
1531 }
1532 
VisitFloat64Add(Node * node)1533 void InstructionSelector::VisitFloat64Add(Node* node) {
1534   VisitRRR(this, kLoong64Float64Add, node);
1535 }
1536 
VisitFloat32Sub(Node * node)1537 void InstructionSelector::VisitFloat32Sub(Node* node) {
1538   VisitRRR(this, kLoong64Float32Sub, node);
1539 }
1540 
VisitFloat64Sub(Node * node)1541 void InstructionSelector::VisitFloat64Sub(Node* node) {
1542   VisitRRR(this, kLoong64Float64Sub, node);
1543 }
1544 
VisitFloat32Mul(Node * node)1545 void InstructionSelector::VisitFloat32Mul(Node* node) {
1546   VisitRRR(this, kLoong64Float32Mul, node);
1547 }
1548 
VisitFloat64Mul(Node * node)1549 void InstructionSelector::VisitFloat64Mul(Node* node) {
1550   VisitRRR(this, kLoong64Float64Mul, node);
1551 }
1552 
VisitFloat32Div(Node * node)1553 void InstructionSelector::VisitFloat32Div(Node* node) {
1554   VisitRRR(this, kLoong64Float32Div, node);
1555 }
1556 
VisitFloat64Div(Node * node)1557 void InstructionSelector::VisitFloat64Div(Node* node) {
1558   VisitRRR(this, kLoong64Float64Div, node);
1559 }
1560 
VisitFloat64Mod(Node * node)1561 void InstructionSelector::VisitFloat64Mod(Node* node) {
1562   Loong64OperandGenerator g(this);
1563   Emit(kLoong64Float64Mod, g.DefineAsFixed(node, f0),
1564        g.UseFixed(node->InputAt(0), f0), g.UseFixed(node->InputAt(1), f1))
1565       ->MarkAsCall();
1566 }
1567 
VisitFloat32Max(Node * node)1568 void InstructionSelector::VisitFloat32Max(Node* node) {
1569   Loong64OperandGenerator g(this);
1570   Emit(kLoong64Float32Max, g.DefineAsRegister(node),
1571        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1572 }
1573 
VisitFloat64Max(Node * node)1574 void InstructionSelector::VisitFloat64Max(Node* node) {
1575   Loong64OperandGenerator g(this);
1576   Emit(kLoong64Float64Max, g.DefineAsRegister(node),
1577        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1578 }
1579 
VisitFloat32Min(Node * node)1580 void InstructionSelector::VisitFloat32Min(Node* node) {
1581   Loong64OperandGenerator g(this);
1582   Emit(kLoong64Float32Min, g.DefineAsRegister(node),
1583        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1584 }
1585 
VisitFloat64Min(Node * node)1586 void InstructionSelector::VisitFloat64Min(Node* node) {
1587   Loong64OperandGenerator g(this);
1588   Emit(kLoong64Float64Min, g.DefineAsRegister(node),
1589        g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
1590 }
1591 
VisitFloat32Abs(Node * node)1592 void InstructionSelector::VisitFloat32Abs(Node* node) {
1593   VisitRR(this, kLoong64Float32Abs, node);
1594 }
1595 
VisitFloat64Abs(Node * node)1596 void InstructionSelector::VisitFloat64Abs(Node* node) {
1597   VisitRR(this, kLoong64Float64Abs, node);
1598 }
1599 
VisitFloat32Sqrt(Node * node)1600 void InstructionSelector::VisitFloat32Sqrt(Node* node) {
1601   VisitRR(this, kLoong64Float32Sqrt, node);
1602 }
1603 
VisitFloat64Sqrt(Node * node)1604 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
1605   VisitRR(this, kLoong64Float64Sqrt, node);
1606 }
1607 
VisitFloat32RoundDown(Node * node)1608 void InstructionSelector::VisitFloat32RoundDown(Node* node) {
1609   VisitRR(this, kLoong64Float32RoundDown, node);
1610 }
1611 
VisitFloat64RoundDown(Node * node)1612 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
1613   VisitRR(this, kLoong64Float64RoundDown, node);
1614 }
1615 
VisitFloat32RoundUp(Node * node)1616 void InstructionSelector::VisitFloat32RoundUp(Node* node) {
1617   VisitRR(this, kLoong64Float32RoundUp, node);
1618 }
1619 
VisitFloat64RoundUp(Node * node)1620 void InstructionSelector::VisitFloat64RoundUp(Node* node) {
1621   VisitRR(this, kLoong64Float64RoundUp, node);
1622 }
1623 
VisitFloat32RoundTruncate(Node * node)1624 void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
1625   VisitRR(this, kLoong64Float32RoundTruncate, node);
1626 }
1627 
VisitFloat64RoundTruncate(Node * node)1628 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
1629   VisitRR(this, kLoong64Float64RoundTruncate, node);
1630 }
1631 
VisitFloat64RoundTiesAway(Node * node)1632 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
1633   UNREACHABLE();
1634 }
1635 
VisitFloat32RoundTiesEven(Node * node)1636 void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
1637   VisitRR(this, kLoong64Float32RoundTiesEven, node);
1638 }
1639 
VisitFloat64RoundTiesEven(Node * node)1640 void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
1641   VisitRR(this, kLoong64Float64RoundTiesEven, node);
1642 }
1643 
VisitFloat32Neg(Node * node)1644 void InstructionSelector::VisitFloat32Neg(Node* node) {
1645   VisitRR(this, kLoong64Float32Neg, node);
1646 }
1647 
VisitFloat64Neg(Node * node)1648 void InstructionSelector::VisitFloat64Neg(Node* node) {
1649   VisitRR(this, kLoong64Float64Neg, node);
1650 }
1651 
VisitFloat64Ieee754Binop(Node * node,InstructionCode opcode)1652 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
1653                                                    InstructionCode opcode) {
1654   Loong64OperandGenerator g(this);
1655   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0),
1656        g.UseFixed(node->InputAt(1), f1))
1657       ->MarkAsCall();
1658 }
1659 
VisitFloat64Ieee754Unop(Node * node,InstructionCode opcode)1660 void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
1661                                                   InstructionCode opcode) {
1662   Loong64OperandGenerator g(this);
1663   Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0))
1664       ->MarkAsCall();
1665 }
1666 
EmitPrepareArguments(ZoneVector<PushParameter> * arguments,const CallDescriptor * call_descriptor,Node * node)1667 void InstructionSelector::EmitPrepareArguments(
1668     ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
1669     Node* node) {
1670   Loong64OperandGenerator g(this);
1671 
1672   // Prepare for C function call.
1673   if (call_descriptor->IsCFunctionCall()) {
1674     Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
1675                                          call_descriptor->ParameterCount())),
1676          0, nullptr, 0, nullptr);
1677 
1678     // Poke any stack arguments.
1679     int slot = 0;
1680     for (PushParameter input : (*arguments)) {
1681       Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
1682            g.TempImmediate(slot << kSystemPointerSizeLog2));
1683       ++slot;
1684     }
1685   } else {
1686     int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
1687     if (push_count > 0) {
1688       // Calculate needed space
1689       int stack_size = 0;
1690       for (PushParameter input : (*arguments)) {
1691         if (input.node) {
1692           stack_size += input.location.GetSizeInPointers();
1693         }
1694       }
1695       Emit(kLoong64StackClaim, g.NoOutput(),
1696            g.TempImmediate(stack_size << kSystemPointerSizeLog2));
1697     }
1698     for (size_t n = 0; n < arguments->size(); ++n) {
1699       PushParameter input = (*arguments)[n];
1700       if (input.node) {
1701         Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
1702              g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
1703       }
1704     }
1705   }
1706 }
1707 
EmitPrepareResults(ZoneVector<PushParameter> * results,const CallDescriptor * call_descriptor,Node * node)1708 void InstructionSelector::EmitPrepareResults(
1709     ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
1710     Node* node) {
1711   Loong64OperandGenerator g(this);
1712 
1713   for (PushParameter output : *results) {
1714     if (!output.location.IsCallerFrameSlot()) continue;
1715     // Skip any alignment holes in nodes.
1716     if (output.node != nullptr) {
1717       DCHECK(!call_descriptor->IsCFunctionCall());
1718       if (output.location.GetType() == MachineType::Float32()) {
1719         MarkAsFloat32(output.node);
1720       } else if (output.location.GetType() == MachineType::Float64()) {
1721         MarkAsFloat64(output.node);
1722       } else if (output.location.GetType() == MachineType::Simd128()) {
1723         abort();
1724       }
1725       int offset = call_descriptor->GetOffsetToReturns();
1726       int reverse_slot = -output.location.GetLocation() - offset;
1727       Emit(kLoong64Peek, g.DefineAsRegister(output.node),
1728            g.UseImmediate(reverse_slot));
1729     }
1730   }
1731 }
1732 
IsTailCallAddressImmediate()1733 bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
1734 
VisitUnalignedLoad(Node * node)1735 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
1736 
VisitUnalignedStore(Node * node)1737 void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
1738 
1739 namespace {
1740 
1741 // Shared routine for multiple compare operations.
VisitCompare(InstructionSelector * selector,InstructionCode opcode,InstructionOperand left,InstructionOperand right,FlagsContinuation * cont)1742 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
1743                          InstructionOperand left, InstructionOperand right,
1744                          FlagsContinuation* cont) {
1745   selector->EmitWithContinuation(opcode, left, right, cont);
1746 }
1747 
1748 // Shared routine for multiple float32 compare operations.
VisitFloat32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1749 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
1750                          FlagsContinuation* cont) {
1751   Loong64OperandGenerator g(selector);
1752   Float32BinopMatcher m(node);
1753   InstructionOperand lhs, rhs;
1754 
1755   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1756                           : g.UseRegister(m.left().node());
1757   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1758                            : g.UseRegister(m.right().node());
1759   VisitCompare(selector, kLoong64Float32Cmp, lhs, rhs, cont);
1760 }
1761 
1762 // Shared routine for multiple float64 compare operations.
VisitFloat64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1763 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
1764                          FlagsContinuation* cont) {
1765   Loong64OperandGenerator g(selector);
1766   Float64BinopMatcher m(node);
1767   InstructionOperand lhs, rhs;
1768 
1769   lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
1770                           : g.UseRegister(m.left().node());
1771   rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
1772                            : g.UseRegister(m.right().node());
1773   VisitCompare(selector, kLoong64Float64Cmp, lhs, rhs, cont);
1774 }
1775 
1776 // Shared routine for multiple word compare operations.
VisitWordCompare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont,bool commutative)1777 void VisitWordCompare(InstructionSelector* selector, Node* node,
1778                       InstructionCode opcode, FlagsContinuation* cont,
1779                       bool commutative) {
1780   Loong64OperandGenerator g(selector);
1781   Node* left = node->InputAt(0);
1782   Node* right = node->InputAt(1);
1783 
1784   // Match immediates on left or right side of comparison.
1785   if (g.CanBeImmediate(right, opcode)) {
1786     if (opcode == kLoong64Tst) {
1787       if (left->opcode() == IrOpcode::kTruncateInt64ToInt32) {
1788         VisitCompare(selector, opcode, g.UseRegister(left->InputAt(0)),
1789                      g.UseImmediate(right), cont);
1790       } else {
1791         VisitCompare(selector, opcode, g.UseRegister(left),
1792                      g.UseImmediate(right), cont);
1793       }
1794     } else {
1795       switch (cont->condition()) {
1796         case kEqual:
1797         case kNotEqual:
1798           if (cont->IsSet()) {
1799             VisitCompare(selector, opcode, g.UseRegister(left),
1800                          g.UseImmediate(right), cont);
1801           } else {
1802             VisitCompare(selector, opcode, g.UseRegister(left),
1803                          g.UseRegister(right), cont);
1804           }
1805           break;
1806         case kSignedLessThan:
1807         case kSignedGreaterThanOrEqual:
1808         case kUnsignedLessThan:
1809         case kUnsignedGreaterThanOrEqual:
1810           VisitCompare(selector, opcode, g.UseRegister(left),
1811                        g.UseImmediate(right), cont);
1812           break;
1813         default:
1814           VisitCompare(selector, opcode, g.UseRegister(left),
1815                        g.UseRegister(right), cont);
1816       }
1817     }
1818   } else if (g.CanBeImmediate(left, opcode)) {
1819     if (!commutative) cont->Commute();
1820     if (opcode == kLoong64Tst) {
1821       VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
1822                    cont);
1823     } else {
1824       switch (cont->condition()) {
1825         case kEqual:
1826         case kNotEqual:
1827           if (cont->IsSet()) {
1828             VisitCompare(selector, opcode, g.UseRegister(right),
1829                          g.UseImmediate(left), cont);
1830           } else {
1831             VisitCompare(selector, opcode, g.UseRegister(right),
1832                          g.UseRegister(left), cont);
1833           }
1834           break;
1835         case kSignedLessThan:
1836         case kSignedGreaterThanOrEqual:
1837         case kUnsignedLessThan:
1838         case kUnsignedGreaterThanOrEqual:
1839           VisitCompare(selector, opcode, g.UseRegister(right),
1840                        g.UseImmediate(left), cont);
1841           break;
1842         default:
1843           VisitCompare(selector, opcode, g.UseRegister(right),
1844                        g.UseRegister(left), cont);
1845       }
1846     }
1847   } else {
1848     VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
1849                  cont);
1850   }
1851 }
1852 
VisitOptimizedWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1853 void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
1854                                  InstructionCode opcode,
1855                                  FlagsContinuation* cont) {
1856   // TODO(LOONG_dev): LOONG64 Add check for debug mode
1857   VisitWordCompare(selector, node, opcode, cont, false);
1858 }
1859 
1860 #ifdef USE_SIMULATOR
1861 // Shared routine for multiple word compare operations.
VisitFullWord32Compare(InstructionSelector * selector,Node * node,InstructionCode opcode,FlagsContinuation * cont)1862 void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
1863                             InstructionCode opcode, FlagsContinuation* cont) {
1864   Loong64OperandGenerator g(selector);
1865   InstructionOperand leftOp = g.TempRegister();
1866   InstructionOperand rightOp = g.TempRegister();
1867 
1868   selector->Emit(kLoong64Sll_d, leftOp, g.UseRegister(node->InputAt(0)),
1869                  g.TempImmediate(32));
1870   selector->Emit(kLoong64Sll_d, rightOp, g.UseRegister(node->InputAt(1)),
1871                  g.TempImmediate(32));
1872 
1873   VisitCompare(selector, opcode, leftOp, rightOp, cont);
1874 }
1875 #endif
1876 
VisitWord32Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1877 void VisitWord32Compare(InstructionSelector* selector, Node* node,
1878                         FlagsContinuation* cont) {
1879   // LOONG64 doesn't support Word32 compare instructions. Instead it relies
1880   // that the values in registers are correctly sign-extended and uses
1881   // Word64 comparison instead.
1882 #ifdef USE_SIMULATOR
1883   // When call to a host function in simulator, if the function return a
1884   // int32 value, the simulator do not sign-extended to int64 because in
1885   // simulator we do not know the function whether return a int32 or int64.
1886   // so we need do a full word32 compare in this case.
1887   if (node->InputAt(0)->opcode() == IrOpcode::kCall ||
1888       node->InputAt(1)->opcode() == IrOpcode::kCall) {
1889     VisitFullWord32Compare(selector, node, kLoong64Cmp, cont);
1890     return;
1891   }
1892 #endif
1893   VisitOptimizedWord32Compare(selector, node, kLoong64Cmp, cont);
1894 }
1895 
VisitWord64Compare(InstructionSelector * selector,Node * node,FlagsContinuation * cont)1896 void VisitWord64Compare(InstructionSelector* selector, Node* node,
1897                         FlagsContinuation* cont) {
1898   VisitWordCompare(selector, node, kLoong64Cmp, cont, false);
1899 }
1900 
EmitWordCompareZero(InstructionSelector * selector,Node * value,FlagsContinuation * cont)1901 void EmitWordCompareZero(InstructionSelector* selector, Node* value,
1902                          FlagsContinuation* cont) {
1903   Loong64OperandGenerator g(selector);
1904   selector->EmitWithContinuation(kLoong64Cmp, g.UseRegister(value),
1905                                  g.TempImmediate(0), cont);
1906 }
1907 
VisitAtomicLoad(InstructionSelector * selector,Node * node,AtomicWidth width)1908 void VisitAtomicLoad(InstructionSelector* selector, Node* node,
1909                      AtomicWidth width) {
1910   Loong64OperandGenerator g(selector);
1911   Node* base = node->InputAt(0);
1912   Node* index = node->InputAt(1);
1913 
1914   // The memory order is ignored.
1915   AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
1916   LoadRepresentation load_rep = atomic_load_params.representation();
1917   InstructionCode code;
1918   switch (load_rep.representation()) {
1919     case MachineRepresentation::kWord8:
1920       DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
1921       code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
1922       break;
1923     case MachineRepresentation::kWord16:
1924       DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
1925       code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
1926       break;
1927     case MachineRepresentation::kWord32:
1928       code = (width == AtomicWidth::kWord32) ? kAtomicLoadWord32
1929                                              : kLoong64Word64AtomicLoadUint32;
1930       break;
1931     case MachineRepresentation::kWord64:
1932       code = kLoong64Word64AtomicLoadUint64;
1933       break;
1934     case MachineRepresentation::kTaggedSigned:   // Fall through.
1935     case MachineRepresentation::kTaggedPointer:  // Fall through.
1936     case MachineRepresentation::kTagged:
1937       DCHECK_EQ(kTaggedSize, 8);
1938       code = kLoong64Word64AtomicLoadUint64;
1939       break;
1940     default:
1941       UNREACHABLE();
1942   }
1943 
1944   if (g.CanBeImmediate(index, code)) {
1945     selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1946                        AtomicWidthField::encode(width),
1947                    g.DefineAsRegister(node), g.UseRegister(base),
1948                    g.UseImmediate(index));
1949   } else {
1950     InstructionOperand addr_reg = g.TempRegister();
1951     selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
1952                    addr_reg, g.UseRegister(index), g.UseRegister(base));
1953     // Emit desired load opcode, using temp addr_reg.
1954     selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
1955                        AtomicWidthField::encode(width),
1956                    g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
1957   }
1958 }
1959 
VisitAtomicStore(InstructionSelector * selector,Node * node,AtomicWidth width)1960 void VisitAtomicStore(InstructionSelector* selector, Node* node,
1961                       AtomicWidth width) {
1962   Loong64OperandGenerator g(selector);
1963   Node* base = node->InputAt(0);
1964   Node* index = node->InputAt(1);
1965   Node* value = node->InputAt(2);
1966 
1967   // The memory order is ignored.
1968   AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
1969   WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
1970   MachineRepresentation rep = store_params.representation();
1971 
1972   if (FLAG_enable_unconditional_write_barriers &&
1973       CanBeTaggedOrCompressedPointer(rep)) {
1974     write_barrier_kind = kFullWriteBarrier;
1975   }
1976 
1977   InstructionCode code;
1978 
1979   if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
1980     DCHECK(CanBeTaggedPointer(rep));
1981     DCHECK_EQ(kTaggedSize, 8);
1982 
1983     RecordWriteMode record_write_mode =
1984         WriteBarrierKindToRecordWriteMode(write_barrier_kind);
1985     code = kArchAtomicStoreWithWriteBarrier;
1986     code |= MiscField::encode(static_cast<int>(record_write_mode));
1987   } else {
1988     switch (rep) {
1989       case MachineRepresentation::kWord8:
1990         code = kAtomicStoreWord8;
1991         break;
1992       case MachineRepresentation::kWord16:
1993         code = kAtomicStoreWord16;
1994         break;
1995       case MachineRepresentation::kWord32:
1996         code = kAtomicStoreWord32;
1997         break;
1998       case MachineRepresentation::kWord64:
1999         DCHECK_EQ(width, AtomicWidth::kWord64);
2000         code = kLoong64Word64AtomicStoreWord64;
2001         break;
2002       case MachineRepresentation::kTaggedSigned:   // Fall through.
2003       case MachineRepresentation::kTaggedPointer:  // Fall through.
2004       case MachineRepresentation::kTagged:
2005         DCHECK_EQ(kTaggedSize, 8);
2006         code = kLoong64StoreCompressTagged;
2007         break;
2008       default:
2009         UNREACHABLE();
2010     }
2011   }
2012 
2013   if (g.CanBeImmediate(index, code)) {
2014     selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2015                        AtomicWidthField::encode(width),
2016                    g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
2017                    g.UseRegisterOrImmediateZero(value));
2018   } else {
2019     InstructionOperand addr_reg = g.TempRegister();
2020     selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
2021                    addr_reg, g.UseRegister(index), g.UseRegister(base));
2022     // Emit desired store opcode, using temp addr_reg.
2023     selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
2024                        AtomicWidthField::encode(width),
2025                    g.NoOutput(), addr_reg, g.TempImmediate(0),
2026                    g.UseRegisterOrImmediateZero(value));
2027   }
2028 }
2029 
VisitAtomicExchange(InstructionSelector * selector,Node * node,ArchOpcode opcode,AtomicWidth width)2030 void VisitAtomicExchange(InstructionSelector* selector, Node* node,
2031                          ArchOpcode opcode, AtomicWidth width) {
2032   Loong64OperandGenerator g(selector);
2033   Node* base = node->InputAt(0);
2034   Node* index = node->InputAt(1);
2035   Node* value = node->InputAt(2);
2036 
2037   AddressingMode addressing_mode = kMode_MRI;
2038   InstructionOperand inputs[3];
2039   size_t input_count = 0;
2040   inputs[input_count++] = g.UseUniqueRegister(base);
2041   inputs[input_count++] = g.UseUniqueRegister(index);
2042   inputs[input_count++] = g.UseUniqueRegister(value);
2043   InstructionOperand outputs[1];
2044   outputs[0] = g.UseUniqueRegister(node);
2045   InstructionOperand temp[3];
2046   temp[0] = g.TempRegister();
2047   temp[1] = g.TempRegister();
2048   temp[2] = g.TempRegister();
2049   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2050                          AtomicWidthField::encode(width);
2051   selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2052 }
2053 
VisitAtomicCompareExchange(InstructionSelector * selector,Node * node,ArchOpcode opcode,AtomicWidth width)2054 void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
2055                                 ArchOpcode opcode, AtomicWidth width) {
2056   Loong64OperandGenerator g(selector);
2057   Node* base = node->InputAt(0);
2058   Node* index = node->InputAt(1);
2059   Node* old_value = node->InputAt(2);
2060   Node* new_value = node->InputAt(3);
2061 
2062   AddressingMode addressing_mode = kMode_MRI;
2063   InstructionOperand inputs[4];
2064   size_t input_count = 0;
2065   inputs[input_count++] = g.UseUniqueRegister(base);
2066   inputs[input_count++] = g.UseUniqueRegister(index);
2067   inputs[input_count++] = g.UseUniqueRegister(old_value);
2068   inputs[input_count++] = g.UseUniqueRegister(new_value);
2069   InstructionOperand outputs[1];
2070   outputs[0] = g.UseUniqueRegister(node);
2071   InstructionOperand temp[3];
2072   temp[0] = g.TempRegister();
2073   temp[1] = g.TempRegister();
2074   temp[2] = g.TempRegister();
2075   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2076                          AtomicWidthField::encode(width);
2077   selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
2078 }
2079 
VisitAtomicBinop(InstructionSelector * selector,Node * node,ArchOpcode opcode,AtomicWidth width)2080 void VisitAtomicBinop(InstructionSelector* selector, Node* node,
2081                       ArchOpcode opcode, AtomicWidth width) {
2082   Loong64OperandGenerator g(selector);
2083   Node* base = node->InputAt(0);
2084   Node* index = node->InputAt(1);
2085   Node* value = node->InputAt(2);
2086 
2087   AddressingMode addressing_mode = kMode_MRI;
2088   InstructionOperand inputs[3];
2089   size_t input_count = 0;
2090   inputs[input_count++] = g.UseUniqueRegister(base);
2091   inputs[input_count++] = g.UseUniqueRegister(index);
2092   inputs[input_count++] = g.UseUniqueRegister(value);
2093   InstructionOperand outputs[1];
2094   outputs[0] = g.UseUniqueRegister(node);
2095   InstructionOperand temps[4];
2096   temps[0] = g.TempRegister();
2097   temps[1] = g.TempRegister();
2098   temps[2] = g.TempRegister();
2099   temps[3] = g.TempRegister();
2100   InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
2101                          AtomicWidthField::encode(width);
2102   selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
2103 }
2104 
2105 }  // namespace
2106 
VisitStackPointerGreaterThan(Node * node,FlagsContinuation * cont)2107 void InstructionSelector::VisitStackPointerGreaterThan(
2108     Node* node, FlagsContinuation* cont) {
2109   StackCheckKind kind = StackCheckKindOf(node->op());
2110   InstructionCode opcode =
2111       kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
2112 
2113   Loong64OperandGenerator g(this);
2114 
2115   // No outputs.
2116   InstructionOperand* const outputs = nullptr;
2117   const int output_count = 0;
2118 
2119   // TempRegister(0) is used to store the comparison result.
2120   // Applying an offset to this stack check requires a temp register. Offsets
2121   // are only applied to the first stack check. If applying an offset, we must
2122   // ensure the input and temp registers do not alias, thus kUniqueRegister.
2123   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
2124   const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
2125   const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
2126                                  ? OperandGenerator::kUniqueRegister
2127                                  : OperandGenerator::kRegister;
2128 
2129   Node* const value = node->InputAt(0);
2130   InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
2131   static constexpr int input_count = arraysize(inputs);
2132 
2133   EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
2134                        temp_count, temps, cont);
2135 }
2136 
2137 // Shared routine for word comparisons against zero.
VisitWordCompareZero(Node * user,Node * value,FlagsContinuation * cont)2138 void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
2139                                                FlagsContinuation* cont) {
2140   // Try to combine with comparisons against 0 by simply inverting the branch.
2141   while (CanCover(user, value)) {
2142     if (value->opcode() == IrOpcode::kWord32Equal) {
2143       Int32BinopMatcher m(value);
2144       if (!m.right().Is(0)) break;
2145       user = value;
2146       value = m.left().node();
2147     } else if (value->opcode() == IrOpcode::kWord64Equal) {
2148       Int64BinopMatcher m(value);
2149       if (!m.right().Is(0)) break;
2150       user = value;
2151       value = m.left().node();
2152     } else {
2153       break;
2154     }
2155 
2156     cont->Negate();
2157   }
2158 
2159   if (CanCover(user, value)) {
2160     switch (value->opcode()) {
2161       case IrOpcode::kWord32Equal:
2162         cont->OverwriteAndNegateIfEqual(kEqual);
2163         return VisitWord32Compare(this, value, cont);
2164       case IrOpcode::kInt32LessThan:
2165         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2166         return VisitWord32Compare(this, value, cont);
2167       case IrOpcode::kInt32LessThanOrEqual:
2168         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2169         return VisitWord32Compare(this, value, cont);
2170       case IrOpcode::kUint32LessThan:
2171         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2172         return VisitWord32Compare(this, value, cont);
2173       case IrOpcode::kUint32LessThanOrEqual:
2174         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2175         return VisitWord32Compare(this, value, cont);
2176       case IrOpcode::kWord64Equal:
2177         cont->OverwriteAndNegateIfEqual(kEqual);
2178         return VisitWord64Compare(this, value, cont);
2179       case IrOpcode::kInt64LessThan:
2180         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
2181         return VisitWord64Compare(this, value, cont);
2182       case IrOpcode::kInt64LessThanOrEqual:
2183         cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
2184         return VisitWord64Compare(this, value, cont);
2185       case IrOpcode::kUint64LessThan:
2186         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2187         return VisitWord64Compare(this, value, cont);
2188       case IrOpcode::kUint64LessThanOrEqual:
2189         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2190         return VisitWord64Compare(this, value, cont);
2191       case IrOpcode::kFloat32Equal:
2192         cont->OverwriteAndNegateIfEqual(kEqual);
2193         return VisitFloat32Compare(this, value, cont);
2194       case IrOpcode::kFloat32LessThan:
2195         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2196         return VisitFloat32Compare(this, value, cont);
2197       case IrOpcode::kFloat32LessThanOrEqual:
2198         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2199         return VisitFloat32Compare(this, value, cont);
2200       case IrOpcode::kFloat64Equal:
2201         cont->OverwriteAndNegateIfEqual(kEqual);
2202         return VisitFloat64Compare(this, value, cont);
2203       case IrOpcode::kFloat64LessThan:
2204         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
2205         return VisitFloat64Compare(this, value, cont);
2206       case IrOpcode::kFloat64LessThanOrEqual:
2207         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
2208         return VisitFloat64Compare(this, value, cont);
2209       case IrOpcode::kProjection:
2210         // Check if this is the overflow output projection of an
2211         // <Operation>WithOverflow node.
2212         if (ProjectionIndexOf(value->op()) == 1u) {
2213           // We cannot combine the <Operation>WithOverflow with this branch
2214           // unless the 0th projection (the use of the actual value of the
2215           // <Operation> is either nullptr, which means there's no use of the
2216           // actual value, or was already defined, which means it is scheduled
2217           // *AFTER* this branch).
2218           Node* const node = value->InputAt(0);
2219           Node* const result = NodeProperties::FindProjection(node, 0);
2220           if (result == nullptr || IsDefined(result)) {
2221             switch (node->opcode()) {
2222               case IrOpcode::kInt32AddWithOverflow:
2223                 cont->OverwriteAndNegateIfEqual(kOverflow);
2224                 return VisitBinop(this, node, kLoong64Add_d, cont);
2225               case IrOpcode::kInt32SubWithOverflow:
2226                 cont->OverwriteAndNegateIfEqual(kOverflow);
2227                 return VisitBinop(this, node, kLoong64Sub_d, cont);
2228               case IrOpcode::kInt32MulWithOverflow:
2229                 cont->OverwriteAndNegateIfEqual(kOverflow);
2230                 return VisitBinop(this, node, kLoong64MulOvf_w, cont);
2231               case IrOpcode::kInt64AddWithOverflow:
2232                 cont->OverwriteAndNegateIfEqual(kOverflow);
2233                 return VisitBinop(this, node, kLoong64AddOvf_d, cont);
2234               case IrOpcode::kInt64SubWithOverflow:
2235                 cont->OverwriteAndNegateIfEqual(kOverflow);
2236                 return VisitBinop(this, node, kLoong64SubOvf_d, cont);
2237               default:
2238                 break;
2239             }
2240           }
2241         }
2242         break;
2243       case IrOpcode::kWord32And:
2244       case IrOpcode::kWord64And:
2245         return VisitWordCompare(this, value, kLoong64Tst, cont, true);
2246       case IrOpcode::kStackPointerGreaterThan:
2247         cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
2248         return VisitStackPointerGreaterThan(value, cont);
2249       default:
2250         break;
2251     }
2252   }
2253 
2254   // Continuation could not be combined with a compare, emit compare against 0.
2255   EmitWordCompareZero(this, value, cont);
2256 }
2257 
VisitSwitch(Node * node,const SwitchInfo & sw)2258 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
2259   Loong64OperandGenerator g(this);
2260   InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
2261 
2262   // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
2263   if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
2264     static const size_t kMaxTableSwitchValueRange = 2 << 16;
2265     size_t table_space_cost = 10 + 2 * sw.value_range();
2266     size_t table_time_cost = 3;
2267     size_t lookup_space_cost = 2 + 2 * sw.case_count();
2268     size_t lookup_time_cost = sw.case_count();
2269     if (sw.case_count() > 0 &&
2270         table_space_cost + 3 * table_time_cost <=
2271             lookup_space_cost + 3 * lookup_time_cost &&
2272         sw.min_value() > std::numeric_limits<int32_t>::min() &&
2273         sw.value_range() <= kMaxTableSwitchValueRange) {
2274       InstructionOperand index_operand = value_operand;
2275       if (sw.min_value()) {
2276         index_operand = g.TempRegister();
2277         Emit(kLoong64Sub_w, index_operand, value_operand,
2278              g.TempImmediate(sw.min_value()));
2279       }
2280       // Generate a table lookup.
2281       return EmitTableSwitch(sw, index_operand);
2282     }
2283   }
2284 
2285   // Generate a tree of conditional jumps.
2286   return EmitBinarySearchSwitch(sw, value_operand);
2287 }
2288 
VisitWord32Equal(Node * const node)2289 void InstructionSelector::VisitWord32Equal(Node* const node) {
2290   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2291   Int32BinopMatcher m(node);
2292   if (m.right().Is(0)) {
2293     return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2294   }
2295 
2296   VisitWord32Compare(this, node, &cont);
2297 }
2298 
VisitInt32LessThan(Node * node)2299 void InstructionSelector::VisitInt32LessThan(Node* node) {
2300   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2301   VisitWord32Compare(this, node, &cont);
2302 }
2303 
VisitInt32LessThanOrEqual(Node * node)2304 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
2305   FlagsContinuation cont =
2306       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2307   VisitWord32Compare(this, node, &cont);
2308 }
2309 
VisitUint32LessThan(Node * node)2310 void InstructionSelector::VisitUint32LessThan(Node* node) {
2311   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2312   VisitWord32Compare(this, node, &cont);
2313 }
2314 
VisitUint32LessThanOrEqual(Node * node)2315 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
2316   FlagsContinuation cont =
2317       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2318   VisitWord32Compare(this, node, &cont);
2319 }
2320 
VisitInt32AddWithOverflow(Node * node)2321 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
2322   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2323     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2324     return VisitBinop(this, node, kLoong64Add_d, &cont);
2325   }
2326   FlagsContinuation cont;
2327   VisitBinop(this, node, kLoong64Add_d, &cont);
2328 }
2329 
VisitInt32SubWithOverflow(Node * node)2330 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
2331   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2332     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2333     return VisitBinop(this, node, kLoong64Sub_d, &cont);
2334   }
2335   FlagsContinuation cont;
2336   VisitBinop(this, node, kLoong64Sub_d, &cont);
2337 }
2338 
VisitInt32MulWithOverflow(Node * node)2339 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
2340   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2341     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2342     return VisitBinop(this, node, kLoong64MulOvf_w, &cont);
2343   }
2344   FlagsContinuation cont;
2345   VisitBinop(this, node, kLoong64MulOvf_w, &cont);
2346 }
2347 
VisitInt64AddWithOverflow(Node * node)2348 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
2349   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2350     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2351     return VisitBinop(this, node, kLoong64AddOvf_d, &cont);
2352   }
2353   FlagsContinuation cont;
2354   VisitBinop(this, node, kLoong64AddOvf_d, &cont);
2355 }
2356 
VisitInt64SubWithOverflow(Node * node)2357 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
2358   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
2359     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
2360     return VisitBinop(this, node, kLoong64SubOvf_d, &cont);
2361   }
2362   FlagsContinuation cont;
2363   VisitBinop(this, node, kLoong64SubOvf_d, &cont);
2364 }
2365 
VisitWord64Equal(Node * const node)2366 void InstructionSelector::VisitWord64Equal(Node* const node) {
2367   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2368   Int64BinopMatcher m(node);
2369   if (m.right().Is(0)) {
2370     return VisitWordCompareZero(m.node(), m.left().node(), &cont);
2371   }
2372 
2373   VisitWord64Compare(this, node, &cont);
2374 }
2375 
VisitInt64LessThan(Node * node)2376 void InstructionSelector::VisitInt64LessThan(Node* node) {
2377   FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
2378   VisitWord64Compare(this, node, &cont);
2379 }
2380 
VisitInt64LessThanOrEqual(Node * node)2381 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
2382   FlagsContinuation cont =
2383       FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
2384   VisitWord64Compare(this, node, &cont);
2385 }
2386 
VisitUint64LessThan(Node * node)2387 void InstructionSelector::VisitUint64LessThan(Node* node) {
2388   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2389   VisitWord64Compare(this, node, &cont);
2390 }
2391 
VisitUint64LessThanOrEqual(Node * node)2392 void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
2393   FlagsContinuation cont =
2394       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2395   VisitWord64Compare(this, node, &cont);
2396 }
2397 
VisitFloat32Equal(Node * node)2398 void InstructionSelector::VisitFloat32Equal(Node* node) {
2399   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2400   VisitFloat32Compare(this, node, &cont);
2401 }
2402 
VisitFloat32LessThan(Node * node)2403 void InstructionSelector::VisitFloat32LessThan(Node* node) {
2404   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2405   VisitFloat32Compare(this, node, &cont);
2406 }
2407 
VisitFloat32LessThanOrEqual(Node * node)2408 void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
2409   FlagsContinuation cont =
2410       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2411   VisitFloat32Compare(this, node, &cont);
2412 }
2413 
VisitFloat64Equal(Node * node)2414 void InstructionSelector::VisitFloat64Equal(Node* node) {
2415   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
2416   VisitFloat64Compare(this, node, &cont);
2417 }
2418 
VisitFloat64LessThan(Node * node)2419 void InstructionSelector::VisitFloat64LessThan(Node* node) {
2420   FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
2421   VisitFloat64Compare(this, node, &cont);
2422 }
2423 
VisitFloat64LessThanOrEqual(Node * node)2424 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
2425   FlagsContinuation cont =
2426       FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
2427   VisitFloat64Compare(this, node, &cont);
2428 }
2429 
VisitFloat64ExtractLowWord32(Node * node)2430 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
2431   VisitRR(this, kLoong64Float64ExtractLowWord32, node);
2432 }
2433 
VisitFloat64ExtractHighWord32(Node * node)2434 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
2435   VisitRR(this, kLoong64Float64ExtractHighWord32, node);
2436 }
2437 
VisitFloat64SilenceNaN(Node * node)2438 void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
2439   VisitRR(this, kLoong64Float64SilenceNaN, node);
2440 }
2441 
VisitFloat64InsertLowWord32(Node * node)2442 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
2443   Loong64OperandGenerator g(this);
2444   Node* left = node->InputAt(0);
2445   Node* right = node->InputAt(1);
2446   Emit(kLoong64Float64InsertLowWord32, g.DefineSameAsFirst(node),
2447        g.UseRegister(left), g.UseRegister(right));
2448 }
2449 
VisitFloat64InsertHighWord32(Node * node)2450 void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
2451   Loong64OperandGenerator g(this);
2452   Node* left = node->InputAt(0);
2453   Node* right = node->InputAt(1);
2454   Emit(kLoong64Float64InsertHighWord32, g.DefineSameAsFirst(node),
2455        g.UseRegister(left), g.UseRegister(right));
2456 }
2457 
VisitMemoryBarrier(Node * node)2458 void InstructionSelector::VisitMemoryBarrier(Node* node) {
2459   Loong64OperandGenerator g(this);
2460   Emit(kLoong64Dbar, g.NoOutput());
2461 }
2462 
VisitWord32AtomicLoad(Node * node)2463 void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
2464   VisitAtomicLoad(this, node, AtomicWidth::kWord32);
2465 }
2466 
VisitWord32AtomicStore(Node * node)2467 void InstructionSelector::VisitWord32AtomicStore(Node* node) {
2468   VisitAtomicStore(this, node, AtomicWidth::kWord32);
2469 }
2470 
VisitWord64AtomicLoad(Node * node)2471 void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
2472   VisitAtomicLoad(this, node, AtomicWidth::kWord64);
2473 }
2474 
VisitWord64AtomicStore(Node * node)2475 void InstructionSelector::VisitWord64AtomicStore(Node* node) {
2476   VisitAtomicStore(this, node, AtomicWidth::kWord64);
2477 }
2478 
VisitWord32AtomicExchange(Node * node)2479 void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
2480   ArchOpcode opcode;
2481   MachineType type = AtomicOpType(node->op());
2482   if (type == MachineType::Int8()) {
2483     opcode = kAtomicExchangeInt8;
2484   } else if (type == MachineType::Uint8()) {
2485     opcode = kAtomicExchangeUint8;
2486   } else if (type == MachineType::Int16()) {
2487     opcode = kAtomicExchangeInt16;
2488   } else if (type == MachineType::Uint16()) {
2489     opcode = kAtomicExchangeUint16;
2490   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2491     opcode = kAtomicExchangeWord32;
2492   } else {
2493     UNREACHABLE();
2494   }
2495 
2496   VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
2497 }
2498 
VisitWord64AtomicExchange(Node * node)2499 void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
2500   ArchOpcode opcode;
2501   MachineType type = AtomicOpType(node->op());
2502   if (type == MachineType::Uint8()) {
2503     opcode = kAtomicExchangeUint8;
2504   } else if (type == MachineType::Uint16()) {
2505     opcode = kAtomicExchangeUint16;
2506   } else if (type == MachineType::Uint32()) {
2507     opcode = kAtomicExchangeWord32;
2508   } else if (type == MachineType::Uint64()) {
2509     opcode = kLoong64Word64AtomicExchangeUint64;
2510   } else {
2511     UNREACHABLE();
2512   }
2513   VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
2514 }
2515 
VisitWord32AtomicCompareExchange(Node * node)2516 void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
2517   ArchOpcode opcode;
2518   MachineType type = AtomicOpType(node->op());
2519   if (type == MachineType::Int8()) {
2520     opcode = kAtomicCompareExchangeInt8;
2521   } else if (type == MachineType::Uint8()) {
2522     opcode = kAtomicCompareExchangeUint8;
2523   } else if (type == MachineType::Int16()) {
2524     opcode = kAtomicCompareExchangeInt16;
2525   } else if (type == MachineType::Uint16()) {
2526     opcode = kAtomicCompareExchangeUint16;
2527   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2528     opcode = kAtomicCompareExchangeWord32;
2529   } else {
2530     UNREACHABLE();
2531   }
2532 
2533   VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
2534 }
2535 
VisitWord64AtomicCompareExchange(Node * node)2536 void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
2537   ArchOpcode opcode;
2538   MachineType type = AtomicOpType(node->op());
2539   if (type == MachineType::Uint8()) {
2540     opcode = kAtomicCompareExchangeUint8;
2541   } else if (type == MachineType::Uint16()) {
2542     opcode = kAtomicCompareExchangeUint16;
2543   } else if (type == MachineType::Uint32()) {
2544     opcode = kAtomicCompareExchangeWord32;
2545   } else if (type == MachineType::Uint64()) {
2546     opcode = kLoong64Word64AtomicCompareExchangeUint64;
2547   } else {
2548     UNREACHABLE();
2549   }
2550   VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
2551 }
VisitWord32AtomicBinaryOperation(Node * node,ArchOpcode int8_op,ArchOpcode uint8_op,ArchOpcode int16_op,ArchOpcode uint16_op,ArchOpcode word32_op)2552 void InstructionSelector::VisitWord32AtomicBinaryOperation(
2553     Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
2554     ArchOpcode uint16_op, ArchOpcode word32_op) {
2555   ArchOpcode opcode;
2556   MachineType type = AtomicOpType(node->op());
2557   if (type == MachineType::Int8()) {
2558     opcode = int8_op;
2559   } else if (type == MachineType::Uint8()) {
2560     opcode = uint8_op;
2561   } else if (type == MachineType::Int16()) {
2562     opcode = int16_op;
2563   } else if (type == MachineType::Uint16()) {
2564     opcode = uint16_op;
2565   } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
2566     opcode = word32_op;
2567   } else {
2568     UNREACHABLE();
2569   }
2570 
2571   VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
2572 }
2573 
2574 #define VISIT_ATOMIC_BINOP(op)                                           \
2575   void InstructionSelector::VisitWord32Atomic##op(Node* node) {          \
2576     VisitWord32AtomicBinaryOperation(                                    \
2577         node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
2578         kAtomic##op##Uint16, kAtomic##op##Word32);                       \
2579   }
2580 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2581 VISIT_ATOMIC_BINOP(Sub)
2582 VISIT_ATOMIC_BINOP(And)
2583 VISIT_ATOMIC_BINOP(Or)
2584 VISIT_ATOMIC_BINOP(Xor)
2585 #undef VISIT_ATOMIC_BINOP
2586 
2587 void InstructionSelector::VisitWord64AtomicBinaryOperation(
2588     Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
2589     ArchOpcode uint64_op) {
2590   ArchOpcode opcode;
2591   MachineType type = AtomicOpType(node->op());
2592   if (type == MachineType::Uint8()) {
2593     opcode = uint8_op;
2594   } else if (type == MachineType::Uint16()) {
2595     opcode = uint16_op;
2596   } else if (type == MachineType::Uint32()) {
2597     opcode = uint32_op;
2598   } else if (type == MachineType::Uint64()) {
2599     opcode = uint64_op;
2600   } else {
2601     UNREACHABLE();
2602   }
2603   VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
2604 }
2605 
2606 #define VISIT_ATOMIC_BINOP(op)                                                 \
2607   void InstructionSelector::VisitWord64Atomic##op(Node* node) {                \
2608     VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8,                 \
2609                                      kAtomic##op##Uint16, kAtomic##op##Word32, \
2610                                      kLoong64Word64Atomic##op##Uint64);        \
2611   }
2612 VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)2613 VISIT_ATOMIC_BINOP(Sub)
2614 VISIT_ATOMIC_BINOP(And)
2615 VISIT_ATOMIC_BINOP(Or)
2616 VISIT_ATOMIC_BINOP(Xor)
2617 #undef VISIT_ATOMIC_BINOP
2618 
2619 void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
2620   UNREACHABLE();
2621 }
2622 
VisitInt64AbsWithOverflow(Node * node)2623 void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
2624   UNREACHABLE();
2625 }
2626 
2627 #define SIMD_TYPE_LIST(V) \
2628   V(F64x2)                \
2629   V(F32x4)                \
2630   V(I64x2)                \
2631   V(I32x4)                \
2632   V(I16x8)                \
2633   V(I8x16)
2634 
2635 #define SIMD_UNOP_LIST(V)                                     \
2636   V(F64x2Abs, kLoong64F64x2Abs)                               \
2637   V(F64x2Neg, kLoong64F64x2Neg)                               \
2638   V(F64x2Sqrt, kLoong64F64x2Sqrt)                             \
2639   V(F64x2Ceil, kLoong64F64x2Ceil)                             \
2640   V(F64x2Floor, kLoong64F64x2Floor)                           \
2641   V(F64x2Trunc, kLoong64F64x2Trunc)                           \
2642   V(F64x2NearestInt, kLoong64F64x2NearestInt)                 \
2643   V(I64x2Neg, kLoong64I64x2Neg)                               \
2644   V(I64x2BitMask, kLoong64I64x2BitMask)                       \
2645   V(F64x2ConvertLowI32x4S, kLoong64F64x2ConvertLowI32x4S)     \
2646   V(F64x2ConvertLowI32x4U, kLoong64F64x2ConvertLowI32x4U)     \
2647   V(F64x2PromoteLowF32x4, kLoong64F64x2PromoteLowF32x4)       \
2648   V(F32x4SConvertI32x4, kLoong64F32x4SConvertI32x4)           \
2649   V(F32x4UConvertI32x4, kLoong64F32x4UConvertI32x4)           \
2650   V(F32x4Abs, kLoong64F32x4Abs)                               \
2651   V(F32x4Neg, kLoong64F32x4Neg)                               \
2652   V(F32x4Sqrt, kLoong64F32x4Sqrt)                             \
2653   V(F32x4RecipApprox, kLoong64F32x4RecipApprox)               \
2654   V(F32x4RecipSqrtApprox, kLoong64F32x4RecipSqrtApprox)       \
2655   V(F32x4Ceil, kLoong64F32x4Ceil)                             \
2656   V(F32x4Floor, kLoong64F32x4Floor)                           \
2657   V(F32x4Trunc, kLoong64F32x4Trunc)                           \
2658   V(F32x4NearestInt, kLoong64F32x4NearestInt)                 \
2659   V(F32x4DemoteF64x2Zero, kLoong64F32x4DemoteF64x2Zero)       \
2660   V(I64x2Abs, kLoong64I64x2Abs)                               \
2661   V(I64x2SConvertI32x4Low, kLoong64I64x2SConvertI32x4Low)     \
2662   V(I64x2SConvertI32x4High, kLoong64I64x2SConvertI32x4High)   \
2663   V(I64x2UConvertI32x4Low, kLoong64I64x2UConvertI32x4Low)     \
2664   V(I64x2UConvertI32x4High, kLoong64I64x2UConvertI32x4High)   \
2665   V(I32x4SConvertF32x4, kLoong64I32x4SConvertF32x4)           \
2666   V(I32x4UConvertF32x4, kLoong64I32x4UConvertF32x4)           \
2667   V(I32x4Neg, kLoong64I32x4Neg)                               \
2668   V(I32x4SConvertI16x8Low, kLoong64I32x4SConvertI16x8Low)     \
2669   V(I32x4SConvertI16x8High, kLoong64I32x4SConvertI16x8High)   \
2670   V(I32x4UConvertI16x8Low, kLoong64I32x4UConvertI16x8Low)     \
2671   V(I32x4UConvertI16x8High, kLoong64I32x4UConvertI16x8High)   \
2672   V(I32x4Abs, kLoong64I32x4Abs)                               \
2673   V(I32x4BitMask, kLoong64I32x4BitMask)                       \
2674   V(I32x4TruncSatF64x2SZero, kLoong64I32x4TruncSatF64x2SZero) \
2675   V(I32x4TruncSatF64x2UZero, kLoong64I32x4TruncSatF64x2UZero) \
2676   V(I16x8Neg, kLoong64I16x8Neg)                               \
2677   V(I16x8SConvertI8x16Low, kLoong64I16x8SConvertI8x16Low)     \
2678   V(I16x8SConvertI8x16High, kLoong64I16x8SConvertI8x16High)   \
2679   V(I16x8UConvertI8x16Low, kLoong64I16x8UConvertI8x16Low)     \
2680   V(I16x8UConvertI8x16High, kLoong64I16x8UConvertI8x16High)   \
2681   V(I16x8Abs, kLoong64I16x8Abs)                               \
2682   V(I16x8BitMask, kLoong64I16x8BitMask)                       \
2683   V(I8x16Neg, kLoong64I8x16Neg)                               \
2684   V(I8x16Abs, kLoong64I8x16Abs)                               \
2685   V(I8x16Popcnt, kLoong64I8x16Popcnt)                         \
2686   V(I8x16BitMask, kLoong64I8x16BitMask)                       \
2687   V(S128Not, kLoong64S128Not)                                 \
2688   V(I64x2AllTrue, kLoong64I64x2AllTrue)                       \
2689   V(I32x4AllTrue, kLoong64I32x4AllTrue)                       \
2690   V(I16x8AllTrue, kLoong64I16x8AllTrue)                       \
2691   V(I8x16AllTrue, kLoong64I8x16AllTrue)                       \
2692   V(V128AnyTrue, kLoong64V128AnyTrue)
2693 
2694 #define SIMD_SHIFT_OP_LIST(V) \
2695   V(I64x2Shl)                 \
2696   V(I64x2ShrS)                \
2697   V(I64x2ShrU)                \
2698   V(I32x4Shl)                 \
2699   V(I32x4ShrS)                \
2700   V(I32x4ShrU)                \
2701   V(I16x8Shl)                 \
2702   V(I16x8ShrS)                \
2703   V(I16x8ShrU)                \
2704   V(I8x16Shl)                 \
2705   V(I8x16ShrS)                \
2706   V(I8x16ShrU)
2707 
2708 #define SIMD_BINOP_LIST(V)                                \
2709   V(F64x2Add, kLoong64F64x2Add)                           \
2710   V(F64x2Sub, kLoong64F64x2Sub)                           \
2711   V(F64x2Mul, kLoong64F64x2Mul)                           \
2712   V(F64x2Div, kLoong64F64x2Div)                           \
2713   V(F64x2Min, kLoong64F64x2Min)                           \
2714   V(F64x2Max, kLoong64F64x2Max)                           \
2715   V(F64x2Eq, kLoong64F64x2Eq)                             \
2716   V(F64x2Ne, kLoong64F64x2Ne)                             \
2717   V(F64x2Lt, kLoong64F64x2Lt)                             \
2718   V(F64x2Le, kLoong64F64x2Le)                             \
2719   V(I64x2Eq, kLoong64I64x2Eq)                             \
2720   V(I64x2Ne, kLoong64I64x2Ne)                             \
2721   V(I64x2Add, kLoong64I64x2Add)                           \
2722   V(I64x2Sub, kLoong64I64x2Sub)                           \
2723   V(I64x2Mul, kLoong64I64x2Mul)                           \
2724   V(I64x2GtS, kLoong64I64x2GtS)                           \
2725   V(I64x2GeS, kLoong64I64x2GeS)                           \
2726   V(F32x4Add, kLoong64F32x4Add)                           \
2727   V(F32x4Sub, kLoong64F32x4Sub)                           \
2728   V(F32x4Mul, kLoong64F32x4Mul)                           \
2729   V(F32x4Div, kLoong64F32x4Div)                           \
2730   V(F32x4Max, kLoong64F32x4Max)                           \
2731   V(F32x4Min, kLoong64F32x4Min)                           \
2732   V(F32x4Eq, kLoong64F32x4Eq)                             \
2733   V(F32x4Ne, kLoong64F32x4Ne)                             \
2734   V(F32x4Lt, kLoong64F32x4Lt)                             \
2735   V(F32x4Le, kLoong64F32x4Le)                             \
2736   V(I32x4Add, kLoong64I32x4Add)                           \
2737   V(I32x4Sub, kLoong64I32x4Sub)                           \
2738   V(I32x4Mul, kLoong64I32x4Mul)                           \
2739   V(I32x4MaxS, kLoong64I32x4MaxS)                         \
2740   V(I32x4MinS, kLoong64I32x4MinS)                         \
2741   V(I32x4MaxU, kLoong64I32x4MaxU)                         \
2742   V(I32x4MinU, kLoong64I32x4MinU)                         \
2743   V(I32x4Eq, kLoong64I32x4Eq)                             \
2744   V(I32x4Ne, kLoong64I32x4Ne)                             \
2745   V(I32x4GtS, kLoong64I32x4GtS)                           \
2746   V(I32x4GeS, kLoong64I32x4GeS)                           \
2747   V(I32x4GtU, kLoong64I32x4GtU)                           \
2748   V(I32x4GeU, kLoong64I32x4GeU)                           \
2749   V(I32x4DotI16x8S, kLoong64I32x4DotI16x8S)               \
2750   V(I16x8Add, kLoong64I16x8Add)                           \
2751   V(I16x8AddSatS, kLoong64I16x8AddSatS)                   \
2752   V(I16x8AddSatU, kLoong64I16x8AddSatU)                   \
2753   V(I16x8Sub, kLoong64I16x8Sub)                           \
2754   V(I16x8SubSatS, kLoong64I16x8SubSatS)                   \
2755   V(I16x8SubSatU, kLoong64I16x8SubSatU)                   \
2756   V(I16x8Mul, kLoong64I16x8Mul)                           \
2757   V(I16x8MaxS, kLoong64I16x8MaxS)                         \
2758   V(I16x8MinS, kLoong64I16x8MinS)                         \
2759   V(I16x8MaxU, kLoong64I16x8MaxU)                         \
2760   V(I16x8MinU, kLoong64I16x8MinU)                         \
2761   V(I16x8Eq, kLoong64I16x8Eq)                             \
2762   V(I16x8Ne, kLoong64I16x8Ne)                             \
2763   V(I16x8GtS, kLoong64I16x8GtS)                           \
2764   V(I16x8GeS, kLoong64I16x8GeS)                           \
2765   V(I16x8GtU, kLoong64I16x8GtU)                           \
2766   V(I16x8GeU, kLoong64I16x8GeU)                           \
2767   V(I16x8RoundingAverageU, kLoong64I16x8RoundingAverageU) \
2768   V(I16x8SConvertI32x4, kLoong64I16x8SConvertI32x4)       \
2769   V(I16x8UConvertI32x4, kLoong64I16x8UConvertI32x4)       \
2770   V(I16x8Q15MulRSatS, kLoong64I16x8Q15MulRSatS)           \
2771   V(I8x16Add, kLoong64I8x16Add)                           \
2772   V(I8x16AddSatS, kLoong64I8x16AddSatS)                   \
2773   V(I8x16AddSatU, kLoong64I8x16AddSatU)                   \
2774   V(I8x16Sub, kLoong64I8x16Sub)                           \
2775   V(I8x16SubSatS, kLoong64I8x16SubSatS)                   \
2776   V(I8x16SubSatU, kLoong64I8x16SubSatU)                   \
2777   V(I8x16MaxS, kLoong64I8x16MaxS)                         \
2778   V(I8x16MinS, kLoong64I8x16MinS)                         \
2779   V(I8x16MaxU, kLoong64I8x16MaxU)                         \
2780   V(I8x16MinU, kLoong64I8x16MinU)                         \
2781   V(I8x16Eq, kLoong64I8x16Eq)                             \
2782   V(I8x16Ne, kLoong64I8x16Ne)                             \
2783   V(I8x16GtS, kLoong64I8x16GtS)                           \
2784   V(I8x16GeS, kLoong64I8x16GeS)                           \
2785   V(I8x16GtU, kLoong64I8x16GtU)                           \
2786   V(I8x16GeU, kLoong64I8x16GeU)                           \
2787   V(I8x16RoundingAverageU, kLoong64I8x16RoundingAverageU) \
2788   V(I8x16SConvertI16x8, kLoong64I8x16SConvertI16x8)       \
2789   V(I8x16UConvertI16x8, kLoong64I8x16UConvertI16x8)       \
2790   V(S128And, kLoong64S128And)                             \
2791   V(S128Or, kLoong64S128Or)                               \
2792   V(S128Xor, kLoong64S128Xor)                             \
2793   V(S128AndNot, kLoong64S128AndNot)
2794 
VisitS128Const(Node * node)2795 void InstructionSelector::VisitS128Const(Node* node) {
2796   Loong64OperandGenerator g(this);
2797   static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
2798   uint32_t val[kUint32Immediates];
2799   memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
2800   // If all bytes are zeros or ones, avoid emitting code for generic constants
2801   bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
2802   bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
2803                   val[2] == UINT32_MAX && val[3] == UINT32_MAX;
2804   InstructionOperand dst = g.DefineAsRegister(node);
2805   if (all_zeros) {
2806     Emit(kLoong64S128Zero, dst);
2807   } else if (all_ones) {
2808     Emit(kLoong64S128AllOnes, dst);
2809   } else {
2810     Emit(kLoong64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
2811          g.UseImmediate(val[2]), g.UseImmediate(val[3]));
2812   }
2813 }
2814 
VisitS128Zero(Node * node)2815 void InstructionSelector::VisitS128Zero(Node* node) {
2816   Loong64OperandGenerator g(this);
2817   Emit(kLoong64S128Zero, g.DefineAsRegister(node));
2818 }
2819 
2820 #define SIMD_VISIT_SPLAT(Type)                               \
2821   void InstructionSelector::Visit##Type##Splat(Node* node) { \
2822     VisitRR(this, kLoong64##Type##Splat, node);              \
2823   }
2824 SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
2825 #undef SIMD_VISIT_SPLAT
2826 
2827 #define SIMD_VISIT_EXTRACT_LANE(Type, Sign)                              \
2828   void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
2829     VisitRRI(this, kLoong64##Type##ExtractLane##Sign, node);             \
2830   }
2831 SIMD_VISIT_EXTRACT_LANE(F64x2, )
2832 SIMD_VISIT_EXTRACT_LANE(F32x4, )
2833 SIMD_VISIT_EXTRACT_LANE(I64x2, )
2834 SIMD_VISIT_EXTRACT_LANE(I32x4, )
SIMD_VISIT_EXTRACT_LANE(I16x8,U)2835 SIMD_VISIT_EXTRACT_LANE(I16x8, U)
2836 SIMD_VISIT_EXTRACT_LANE(I16x8, S)
2837 SIMD_VISIT_EXTRACT_LANE(I8x16, U)
2838 SIMD_VISIT_EXTRACT_LANE(I8x16, S)
2839 #undef SIMD_VISIT_EXTRACT_LANE
2840 
2841 #define SIMD_VISIT_REPLACE_LANE(Type)                              \
2842   void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
2843     VisitRRIR(this, kLoong64##Type##ReplaceLane, node);            \
2844   }
2845 SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
2846 #undef SIMD_VISIT_REPLACE_LANE
2847 
2848 #define SIMD_VISIT_UNOP(Name, instruction)            \
2849   void InstructionSelector::Visit##Name(Node* node) { \
2850     VisitRR(this, instruction, node);                 \
2851   }
2852 SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
2853 #undef SIMD_VISIT_UNOP
2854 
2855 #define SIMD_VISIT_SHIFT_OP(Name)                     \
2856   void InstructionSelector::Visit##Name(Node* node) { \
2857     VisitSimdShift(this, kLoong64##Name, node);       \
2858   }
2859 SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
2860 #undef SIMD_VISIT_SHIFT_OP
2861 
2862 #define SIMD_VISIT_BINOP(Name, instruction)           \
2863   void InstructionSelector::Visit##Name(Node* node) { \
2864     VisitRRR(this, instruction, node);                \
2865   }
2866 SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
2867 #undef SIMD_VISIT_BINOP
2868 
2869 void InstructionSelector::VisitS128Select(Node* node) {
2870   VisitRRRR(this, kLoong64S128Select, node);
2871 }
2872 
2873 #if V8_ENABLE_WEBASSEMBLY
2874 namespace {
2875 
2876 struct ShuffleEntry {
2877   uint8_t shuffle[kSimd128Size];
2878   ArchOpcode opcode;
2879 };
2880 
2881 static const ShuffleEntry arch_shuffles[] = {
2882     {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
2883      kLoong64S32x4InterleaveRight},
2884     {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
2885      kLoong64S32x4InterleaveLeft},
2886     {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
2887      kLoong64S32x4PackEven},
2888     {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
2889      kLoong64S32x4PackOdd},
2890     {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
2891      kLoong64S32x4InterleaveEven},
2892     {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
2893      kLoong64S32x4InterleaveOdd},
2894 
2895     {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
2896      kLoong64S16x8InterleaveRight},
2897     {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
2898      kLoong64S16x8InterleaveLeft},
2899     {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
2900      kLoong64S16x8PackEven},
2901     {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
2902      kLoong64S16x8PackOdd},
2903     {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
2904      kLoong64S16x8InterleaveEven},
2905     {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
2906      kLoong64S16x8InterleaveOdd},
2907     {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
2908      kLoong64S16x4Reverse},
2909     {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
2910      kLoong64S16x2Reverse},
2911 
2912     {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
2913      kLoong64S8x16InterleaveRight},
2914     {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
2915      kLoong64S8x16InterleaveLeft},
2916     {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
2917      kLoong64S8x16PackEven},
2918     {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
2919      kLoong64S8x16PackOdd},
2920     {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
2921      kLoong64S8x16InterleaveEven},
2922     {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
2923      kLoong64S8x16InterleaveOdd},
2924     {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
2925      kLoong64S8x8Reverse},
2926     {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
2927      kLoong64S8x4Reverse},
2928     {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
2929      kLoong64S8x2Reverse}};
2930 
TryMatchArchShuffle(const uint8_t * shuffle,const ShuffleEntry * table,size_t num_entries,bool is_swizzle,ArchOpcode * opcode)2931 bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
2932                          size_t num_entries, bool is_swizzle,
2933                          ArchOpcode* opcode) {
2934   uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
2935   for (size_t i = 0; i < num_entries; ++i) {
2936     const ShuffleEntry& entry = table[i];
2937     int j = 0;
2938     for (; j < kSimd128Size; ++j) {
2939       if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
2940         break;
2941       }
2942     }
2943     if (j == kSimd128Size) {
2944       *opcode = entry.opcode;
2945       return true;
2946     }
2947   }
2948   return false;
2949 }
2950 
2951 }  // namespace
2952 
VisitI8x16Shuffle(Node * node)2953 void InstructionSelector::VisitI8x16Shuffle(Node* node) {
2954   uint8_t shuffle[kSimd128Size];
2955   bool is_swizzle;
2956   CanonicalizeShuffle(node, shuffle, &is_swizzle);
2957   uint8_t shuffle32x4[4];
2958   ArchOpcode opcode;
2959   if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
2960                           is_swizzle, &opcode)) {
2961     VisitRRR(this, opcode, node);
2962     return;
2963   }
2964   Node* input0 = node->InputAt(0);
2965   Node* input1 = node->InputAt(1);
2966   uint8_t offset;
2967   Loong64OperandGenerator g(this);
2968   if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
2969     Emit(kLoong64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
2970          g.UseRegister(input0), g.UseImmediate(offset));
2971     return;
2972   }
2973   if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
2974     Emit(kLoong64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2975          g.UseRegister(input1),
2976          g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
2977     return;
2978   }
2979   Emit(kLoong64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
2980        g.UseRegister(input1),
2981        g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
2982        g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
2983        g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
2984        g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
2985 }
2986 #else
VisitI8x16Shuffle(Node * node)2987 void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
2988 #endif  // V8_ENABLE_WEBASSEMBLY
2989 
VisitI8x16Swizzle(Node * node)2990 void InstructionSelector::VisitI8x16Swizzle(Node* node) {
2991   Loong64OperandGenerator g(this);
2992   InstructionOperand temps[] = {g.TempSimd128Register()};
2993   // We don't want input 0 or input 1 to be the same as output, since we will
2994   // modify output before do the calculation.
2995   Emit(kLoong64I8x16Swizzle, g.DefineAsRegister(node),
2996        g.UseUniqueRegister(node->InputAt(0)),
2997        g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
2998 }
2999 
VisitSignExtendWord8ToInt32(Node * node)3000 void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
3001   Loong64OperandGenerator g(this);
3002   Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
3003        g.UseRegister(node->InputAt(0)));
3004 }
3005 
VisitSignExtendWord16ToInt32(Node * node)3006 void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
3007   Loong64OperandGenerator g(this);
3008   Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
3009        g.UseRegister(node->InputAt(0)));
3010 }
3011 
VisitSignExtendWord8ToInt64(Node * node)3012 void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
3013   Loong64OperandGenerator g(this);
3014   Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
3015        g.UseRegister(node->InputAt(0)));
3016 }
3017 
VisitSignExtendWord16ToInt64(Node * node)3018 void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
3019   Loong64OperandGenerator g(this);
3020   Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
3021        g.UseRegister(node->InputAt(0)));
3022 }
3023 
VisitSignExtendWord32ToInt64(Node * node)3024 void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
3025   Loong64OperandGenerator g(this);
3026   Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
3027        g.TempImmediate(0));
3028 }
3029 
VisitF32x4Pmin(Node * node)3030 void InstructionSelector::VisitF32x4Pmin(Node* node) {
3031   VisitUniqueRRR(this, kLoong64F32x4Pmin, node);
3032 }
3033 
VisitF32x4Pmax(Node * node)3034 void InstructionSelector::VisitF32x4Pmax(Node* node) {
3035   VisitUniqueRRR(this, kLoong64F32x4Pmax, node);
3036 }
3037 
VisitF64x2Pmin(Node * node)3038 void InstructionSelector::VisitF64x2Pmin(Node* node) {
3039   VisitUniqueRRR(this, kLoong64F64x2Pmin, node);
3040 }
3041 
VisitF64x2Pmax(Node * node)3042 void InstructionSelector::VisitF64x2Pmax(Node* node) {
3043   VisitUniqueRRR(this, kLoong64F64x2Pmax, node);
3044 }
3045 
3046 #define VISIT_EXT_MUL(OPCODE1, OPCODE2)                                       \
3047   void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) {} \
3048   void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) {}
3049 
VISIT_EXT_MUL(I64x2,I32x4S)3050 VISIT_EXT_MUL(I64x2, I32x4S)
3051 VISIT_EXT_MUL(I64x2, I32x4U)
3052 VISIT_EXT_MUL(I32x4, I16x8S)
3053 VISIT_EXT_MUL(I32x4, I16x8U)
3054 VISIT_EXT_MUL(I16x8, I8x16S)
3055 VISIT_EXT_MUL(I16x8, I8x16U)
3056 #undef VISIT_EXT_MUL
3057 
3058 #define VISIT_EXTADD_PAIRWISE(OPCODE)                      \
3059   void InstructionSelector::Visit##OPCODE(Node* node) {    \
3060     Loong64OperandGenerator g(this);                       \
3061     Emit(kLoong64ExtAddPairwise, g.DefineAsRegister(node), \
3062          g.UseRegister(node->InputAt(0)));                 \
3063   }
3064 VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S)
3065 VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U)
3066 VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S)
3067 VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U)
3068 #undef VISIT_EXTADD_PAIRWISE
3069 
3070 void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
3071                                                         int first_input_index,
3072                                                         Node* node) {
3073   UNREACHABLE();
3074 }
3075 
3076 // static
3077 MachineOperatorBuilder::Flags
SupportedMachineOperatorFlags()3078 InstructionSelector::SupportedMachineOperatorFlags() {
3079   MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
3080   return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
3081          MachineOperatorBuilder::kInt32DivIsSafe |
3082          MachineOperatorBuilder::kUint32DivIsSafe |
3083          MachineOperatorBuilder::kFloat64RoundDown |
3084          MachineOperatorBuilder::kFloat32RoundDown |
3085          MachineOperatorBuilder::kFloat64RoundUp |
3086          MachineOperatorBuilder::kFloat32RoundUp |
3087          MachineOperatorBuilder::kFloat64RoundTruncate |
3088          MachineOperatorBuilder::kFloat32RoundTruncate |
3089          MachineOperatorBuilder::kFloat64RoundTiesEven |
3090          MachineOperatorBuilder::kFloat32RoundTiesEven;
3091 }
3092 
3093 // static
3094 MachineOperatorBuilder::AlignmentRequirements
AlignmentRequirements()3095 InstructionSelector::AlignmentRequirements() {
3096   return MachineOperatorBuilder::AlignmentRequirements::
3097       FullUnalignedAccessSupport();
3098 }
3099 
3100 #undef SIMD_BINOP_LIST
3101 #undef SIMD_SHIFT_OP_LIST
3102 #undef SIMD_UNOP_LIST
3103 #undef SIMD_TYPE_LIST
3104 #undef TRACE_UNIMPL
3105 #undef TRACE
3106 
3107 }  // namespace compiler
3108 }  // namespace internal
3109 }  // namespace v8
3110