//===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// This file describes the RISC-V instructions from the standard 'V' Vector /// extension, version 0.9. /// This version is still experimental as the 'V' extension hasn't been /// ratified yet. /// //===----------------------------------------------------------------------===// include "RISCVInstrFormatsV.td" //===----------------------------------------------------------------------===// // Operand and SDNode transformation definitions. //===----------------------------------------------------------------------===// def VTypeIAsmOperand : AsmOperandClass { let Name = "VTypeI"; let ParserMethod = "parseVTypeI"; let DiagnosticType = "InvalidVTypeI"; } def VTypeIOp : Operand { let ParserMatchClass = VTypeIAsmOperand; let PrintMethod = "printVTypeI"; let DecoderMethod = "decodeUImmOperand<11>"; } def VMaskAsmOperand : AsmOperandClass { let Name = "RVVMaskRegOpOperand"; let RenderMethod = "addRegOperands"; let PredicateMethod = "isV0Reg"; let ParserMethod = "parseMaskReg"; let IsOptional = 1; let DefaultMethod = "defaultMaskRegOp"; let DiagnosticType = "InvalidVMaskRegister"; } def VMaskOp : RegisterOperand { let ParserMatchClass = VMaskAsmOperand; let PrintMethod = "printVMaskReg"; let EncoderMethod = "getVMaskReg"; let DecoderMethod = "decodeVMaskReg"; } def simm5 : Operand, ImmLeaf(Imm);}]> { let ParserMatchClass = SImmAsmOperand<5>; let EncoderMethod = "getImmOpValue"; let DecoderMethod = "decodeSImmOperand<5>"; let MCOperandPredicate = [{ int64_t Imm; if (MCOp.evaluateAsConstantImm(Imm)) return isInt<5>(Imm); return MCOp.isBareSymbolRef(); }]; } def SImm5Plus1AsmOperand : AsmOperandClass { let Name = "SImm5Plus1"; let RenderMethod = "addSImm5Plus1Operands"; let DiagnosticType = "InvalidSImm5Plus1"; } def simm5_plus1 : Operand, ImmLeaf(Imm - 1);}]> { let ParserMatchClass = SImm5Plus1AsmOperand; let PrintMethod = "printSImm5Plus1"; let MCOperandPredicate = [{ int64_t Imm; if (MCOp.evaluateAsConstantImm(Imm)) return isInt<5>(Imm - 1); return MCOp.isBareSymbolRef(); }]; } //===----------------------------------------------------------------------===// // Instruction class templates //===----------------------------------------------------------------------===// let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { // load vd, (rs1), vm class VUnitStrideLoad : RVInstVLU<0b000, width.Value{3}, lumop, width.Value{2-0}, (outs VR:$vd), (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">; // load vd, (rs1), rs2, vm class VStridedLoad : RVInstVLS<0b000, width.Value{3}, width.Value{2-0}, (outs VR:$vd), (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr, "$vd, (${rs1}), $rs2$vm">; // load vd, (rs1), vs2, vm class VIndexedLoad : RVInstVLX<0b000, width.Value{3}, width.Value{2-0}, (outs VR:$vd), (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr, "$vd, (${rs1}), $vs2$vm">; // vlr.v vd, (rs1) class VWholeLoad nf, string opcodestr> : RVInstVLU { let vm = 1; let Uses = []; } // segment load vd, (rs1), vm class VUnitStrideSegmentLoad nf, RISCVLSUMOP lumop, RISCVWidth width, string opcodestr> : RVInstVLU; // segment load vd, (rs1), rs2, vm class VStridedSegmentLoad nf, RISCVWidth width, string opcodestr> : RVInstVLS; // segment load vd, (rs1), vs2, vm class VIndexedSegmentLoad nf, RISCVWidth width, string opcodestr> : RVInstVLX; } // hasSideEffects = 0, mayLoad = 1, mayStore = 0 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { // store vd, vs3, (rs1), vm class VUnitStrideStore : RVInstVSU<0b000, width.Value{3}, sumop, width.Value{2-0}, (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr, "$vs3, (${rs1})$vm">; // store vd, vs3, (rs1), rs2, vm class VStridedStore : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs), (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr, "$vs3, (${rs1}), $rs2$vm">; // store vd, vs3, (rs1), vs2, vm class VIndexedStore : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs), (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr, "$vs3, (${rs1}), $vs2$vm">; // vsr.v vd, (rs1) class VWholeStore nf, string opcodestr> : RVInstVSU { let vm = 1; let Uses = []; } // segment store vd, vs3, (rs1), vm class VUnitStrideSegmentStore nf, RISCVWidth width, string opcodestr> : RVInstVSU; // segment store vd, vs3, (rs1), rs2, vm class VStridedSegmentStore nf, RISCVWidth width, string opcodestr> : RVInstVSS; // segment store vd, vs3, (rs1), vs2, vm class VIndexedSegmentStore nf, RISCVWidth width, string opcodestr> : RVInstVSX; } // hasSideEffects = 0, mayLoad = 0, mayStore = 1 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { // op vd, vs2, vs1, vm class VALUVV funct6, RISCVVFormat opv, string opcodestr> : RVInstVV; // op vd, vs2, vs1, v0 (without mask, use v0 as carry input) class VALUmVV funct6, RISCVVFormat opv, string opcodestr> : RVInstVV { let vm = 0; } // op vd, vs1, vs2, vm (reverse the order of vs1 and vs2) class VALUrVV funct6, RISCVVFormat opv, string opcodestr> : RVInstVV; // op vd, vs2, vs1 class VALUVVNoVm funct6, RISCVVFormat opv, string opcodestr> : RVInstVV { let vm = 1; } // op vd, vs2, rs1, vm class VALUVX funct6, RISCVVFormat opv, string opcodestr> : RVInstVX; // op vd, vs2, rs1, v0 (without mask, use v0 as carry input) class VALUmVX funct6, RISCVVFormat opv, string opcodestr> : RVInstVX { let vm = 0; } // op vd, rs1, vs2, vm (reverse the order of rs1 and vs2) class VALUrVX funct6, RISCVVFormat opv, string opcodestr> : RVInstVX; // op vd, vs1, vs2 class VALUVXNoVm funct6, RISCVVFormat opv, string opcodestr> : RVInstVX { let vm = 1; } // op vd, vs2, imm, vm class VALUVI funct6, string opcodestr, Operand optype = simm5> : RVInstIVI; // op vd, vs2, imm, v0 (without mask, use v0 as carry input) class VALUmVI funct6, string opcodestr, Operand optype = simm5> : RVInstIVI { let vm = 0; } // op vd, vs2, imm, vm class VALUVINoVm funct6, string opcodestr, Operand optype = simm5> : RVInstIVI { let vm = 1; } // op vd, vs2, rs1, vm (Float) class VALUVF funct6, RISCVVFormat opv, string opcodestr> : RVInstVX; // op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2) class VALUrVF funct6, RISCVVFormat opv, string opcodestr> : RVInstVX; // op vd, vs2, vm (use vs1 as instruction encoding) class VALUVs2 funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr> : RVInstV; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 let hasSideEffects = 0, mayLoad = 1, mayStore = 1 in { // vamo vd, (rs1), vs2, vd, vm class VAMOWd : RVInstVAMO { let Constraints = "$vd_wd = $vd"; let wd = 1; bits<5> vd; let Inst{11-7} = vd; } // vamo x0, (rs1), vs2, vs3, vm class VAMONoWd : RVInstVAMO { bits<5> vs3; let Inst{11-7} = vs3; } } // hasSideEffects = 0, mayLoad = 1, mayStore = 1 //===----------------------------------------------------------------------===// // Combination of instruction classes. // Use these multiclasses to define instructions more easily. //===----------------------------------------------------------------------===// multiclass VALU_IV_V_X_I funct6, Operand optype = simm5, string vw = "v"> { def V : VALUVV; def X : VALUVX; def I : VALUVI; } multiclass VALU_IV_V_X funct6, string vw = "v"> { def V : VALUVV; def X : VALUVX; } multiclass VALUr_IV_V_X funct6, string vw = "v"> { def V : VALUrVV; def X : VALUrVX; } multiclass VALU_IV_X_I funct6, Operand optype = simm5, string vw = "v"> { def X : VALUVX; def I : VALUVI; } multiclass VALU_IV_V funct6> { def _VS : VALUVV; } multiclass VALUr_IV_X funct6, string vw = "v"> { def X : VALUrVX; } multiclass VALU_MV_V_X funct6, string vw = "v"> { def V : VALUVV; def X : VALUVX; } multiclass VALU_MV_V funct6> { def _VS : VALUVV; } multiclass VALU_MV_Mask funct6, string vm = "v"> { def M : VALUVVNoVm; } multiclass VALU_MV_X funct6, string vw = "v"> { def X : VALUVX; } multiclass VALUr_MV_V_X funct6, string vw = "v"> { def V : VALUrVV; def X : VALUrVX; } multiclass VALUr_MV_X funct6, string vw = "v"> { def X : VALUrVX; } multiclass VALU_MV_VS2 funct6, bits<5> vs1> { def "" : VALUVs2; } multiclass VALUm_IV_V_X_I funct6> { def VM : VALUmVV; def XM : VALUmVX; def IM : VALUmVI; } multiclass VALUm_IV_V_X funct6> { def VM : VALUmVV; def XM : VALUmVX; } multiclass VALUNoVm_IV_V_X_I funct6, Operand optype = simm5> { def V : VALUVVNoVm; def X : VALUVXNoVm; def I : VALUVINoVm; } multiclass VALUNoVm_IV_V_X funct6> { def V : VALUVVNoVm; def X : VALUVXNoVm; } multiclass VALU_FV_V_F funct6, string vw = "v"> { def V : VALUVV; def F : VALUVF; } multiclass VALU_FV_F funct6, string vw = "v"> { def F : VALUVF; } multiclass VALUr_FV_V_F funct6, string vw = "v"> { def V : VALUrVV; def F : VALUrVF; } multiclass VALU_FV_V funct6> { def _VS : VALUVV; } multiclass VALU_FV_VS2 funct6, bits<5> vs1> { def "" : VALUVs2; } multiclass VAMO { def _WD : VAMOWd; def _UNWD : VAMONoWd; } //===----------------------------------------------------------------------===// // Instructions //===----------------------------------------------------------------------===// let Predicates = [HasStdExtV] in { let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in { def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp:$vtypei), "vsetvli", "$rd, $rs1, $vtypei">; def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), "vsetvl", "$rd, $rs1, $rs2">; } // hasSideEffects = 1, mayLoad = 0, mayStore = 0 // Vector Unit-Stride Instructions def VLE8_V : VUnitStrideLoad; def VLE16_V : VUnitStrideLoad; def VLE32_V : VUnitStrideLoad; def VLE64_V : VUnitStrideLoad; def VLE128_V : VUnitStrideLoad; def VLE256_V : VUnitStrideLoad; def VLE512_V : VUnitStrideLoad; def VLE1024_V : VUnitStrideLoad; def VLE8FF_V : VUnitStrideLoad; def VLE16FF_V : VUnitStrideLoad; def VLE32FF_V : VUnitStrideLoad; def VLE64FF_V : VUnitStrideLoad; def VLE128FF_V : VUnitStrideLoad; def VLE256FF_V : VUnitStrideLoad; def VLE512FF_V : VUnitStrideLoad; def VLE1024FF_V : VUnitStrideLoad; def VSE8_V : VUnitStrideStore; def VSE16_V : VUnitStrideStore; def VSE32_V : VUnitStrideStore; def VSE64_V : VUnitStrideStore; def VSE128_V : VUnitStrideStore; def VSE256_V : VUnitStrideStore; def VSE512_V : VUnitStrideStore; def VSE1024_V : VUnitStrideStore; // Vector Strided Instructions def VLSE8_V : VStridedLoad; def VLSE16_V : VStridedLoad; def VLSE32_V : VStridedLoad; def VLSE64_V : VStridedLoad; def VLSE128_V : VStridedLoad; def VLSE256_V : VStridedLoad; def VLSE512_V : VStridedLoad; def VLSE1024_V : VStridedLoad; def VSSE8_V : VStridedStore; def VSSE16_V : VStridedStore; def VSSE32_V : VStridedStore; def VSSE64_V : VStridedStore; def VSSE128_V : VStridedStore; def VSSE256_V : VStridedStore; def VSSE512_V : VStridedStore; def VSSE1024_V : VStridedStore; // Vector Indexed Instructions def VLXEI8_V : VIndexedLoad; def VLXEI16_V : VIndexedLoad; def VLXEI32_V : VIndexedLoad; def VLXEI64_V : VIndexedLoad; def VLXEI128_V : VIndexedLoad; def VLXEI256_V : VIndexedLoad; def VLXEI512_V : VIndexedLoad; def VLXEI1024_V : VIndexedLoad; def VSXEI8_V : VIndexedStore; def VSXEI16_V : VIndexedStore; def VSXEI32_V : VIndexedStore; def VSXEI64_V : VIndexedStore; def VSXEI128_V : VIndexedStore; def VSXEI256_V : VIndexedStore; def VSXEI512_V : VIndexedStore; def VSXEI1024_V : VIndexedStore; def VSUXEI8_V : VIndexedStore; def VSUXEI16_V : VIndexedStore; def VSUXEI32_V : VIndexedStore; def VSUXEI64_V : VIndexedStore; def VSUXEI128_V : VIndexedStore; def VSUXEI256_V : VIndexedStore; def VSUXEI512_V : VIndexedStore; def VSUXEI1024_V : VIndexedStore; def VL1R_V : VWholeLoad<0, "vl1r.v">; def VS1R_V : VWholeStore<0, "vs1r.v">; // Vector Single-Width Integer Add and Subtract defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>; defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>; defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>; // Vector Widening Integer Add/Subtract // Refer to 11.2 Widening Vector Arithmetic Instructions // The destination vector register group cannot overlap a source vector // register group of a different element width (including the mask register // if masked), otherwise an illegal instruction exception is raised. let Constraints = "@earlyclobber $vd" in { let RVVConstraint = WidenV in { defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>; defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>; defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>; defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>; } // RVVConstraint = WidenV // Set earlyclobber for following instructions for second and mask operands. // This has the downside that the earlyclobber constraint is too coarse and // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. let RVVConstraint = WidenW in { defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">; defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">; defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">; defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">; } // RVVConstraint = WidenW } // Constraints = "@earlyclobber $vd" def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm", (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm", (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>; // Vector Integer Extension defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>; defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>; defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>; defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>; defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>; defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>; // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>; let Constraints = "@earlyclobber $vd", RVVConstraint = Vmadc in { defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>; defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Vmadc defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>; let Constraints = "@earlyclobber $vd", RVVConstraint = Vmadc in { defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>; defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Vmadc // Vector Bitwise Logical Instructions defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>; defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>; defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>; def : InstAlias<"vnot.v $vd, $vs$vm", (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>; // Vector Single-Width Bit Shift Instructions defm VSLL_V : VALU_IV_V_X_I<"vsll", 0b100101, uimm5>; defm VSRL_V : VALU_IV_V_X_I<"vsrl", 0b101000, uimm5>; defm VSRA_V : VALU_IV_V_X_I<"vsra", 0b101001, uimm5>; // Vector Narrowing Integer Right Shift Instructions // Refer to 11.3. Narrowing Vector Arithmetic Instructions // The destination vector register group cannot overlap the first source // vector register group (specified by vs2). The destination vector register // group cannot overlap the mask register if used, unless LMUL=1. let Constraints = "@earlyclobber $vd", RVVConstraint = Narrow in { defm VNSRL_W : VALU_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">; defm VNSRA_W : VALU_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">; } // Constraints = "@earlyclobber $vd", RVVConstraint = Narrow // Vector Integer Comparison Instructions let RVVConstraint = NoConstraint in { defm VMSEQ_V : VALU_IV_V_X_I<"vmseq", 0b011000>; defm VMSNE_V : VALU_IV_V_X_I<"vmsne", 0b011001>; defm VMSLTU_V : VALU_IV_V_X<"vmsltu", 0b011010>; defm VMSLT_V : VALU_IV_V_X<"vmslt", 0b011011>; defm VMSLEU_V : VALU_IV_V_X_I<"vmsleu", 0b011100>; defm VMSLE_V : VALU_IV_V_X_I<"vmsle", 0b011101>; defm VMSGTU_V : VALU_IV_X_I<"vmsgtu", 0b011110>; defm VMSGT_V : VALU_IV_X_I<"vmsgt", 0b011111>; } // RVVConstraint = NoConstraint def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm", (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm", (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm", (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsge.vv $vd, $va, $vb$vm", (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmsltu.vi $vd, $va, $imm$vm", (VMSLEU_VI VR:$vd, VR:$va, simm5_plus1:$imm, VMaskOp:$vm), 0>; def : InstAlias<"vmslt.vi $vd, $va, $imm$vm", (VMSLE_VI VR:$vd, VR:$va, simm5_plus1:$imm, VMaskOp:$vm), 0>; def : InstAlias<"vmsgeu.vi $vd, $va, $imm$vm", (VMSGTU_VI VR:$vd, VR:$va, simm5_plus1:$imm, VMaskOp:$vm), 0>; def : InstAlias<"vmsge.vi $vd, $va, $imm$vm", (VMSGT_VI VR:$vd, VR:$va, simm5_plus1:$imm, VMaskOp:$vm), 0>; let isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd), (ins VR:$vs2, GPR:$rs1), [], "vmsgeu.vx", "$vd, $vs2, $rs1">; def PseudoVMSGE_VX : Pseudo<(outs VR:$vd), (ins VR:$vs2, GPR:$rs1), [], "vmsge.vx", "$vd, $vs2, $rs1">; def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd), (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">; def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd), (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), [], "vmsge.vx", "$vd, $vs2, $rs1$vm">; def PseudoVMSGEU_VX_M_T : Pseudo<(outs VMV0:$vd, VR:$scratch), (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">; def PseudoVMSGE_VX_M_T : Pseudo<(outs VMV0:$vd, VR:$scratch), (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm), [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">; } // This apparently unnecessary alias prevents matching `vmsge{u}.vx vd, vs2, vs1` as if // it were an unmasked (i.e. $vm = RISCV::NoRegister) PseudoVMSGE{U}_VX_M. def : InstAlias<"vmsgeu.vx $vd, $va, $rs1", (PseudoVMSGEU_VX VR:$vd, VR:$va, GPR:$rs1), 0>; def : InstAlias<"vmsge.vx $vd, $va, $rs1", (PseudoVMSGE_VX VR:$vd, VR:$va, GPR:$rs1), 0>; def : InstAlias<"vmsgeu.vx v0, $va, $rs1, $vm, $vt", (PseudoVMSGEU_VX_M_T V0, VR:$vt, VR:$va, GPR:$rs1, VMaskOp:$vm), 0>; def : InstAlias<"vmsge.vx v0, $va, $rs1, $vm, $vt", (PseudoVMSGE_VX_M_T V0, VR:$vt, VR:$va, GPR:$rs1, VMaskOp:$vm), 0>; def : InstAlias<"vmsgeu.vx $vd, $va, $rs1, $vm", (PseudoVMSGEU_VX_M VRNoV0:$vd, VR:$va, GPR:$rs1, VMaskOp:$vm), 0>; def : InstAlias<"vmsge.vx $vd, $va, $rs1, $vm", (PseudoVMSGE_VX_M VRNoV0:$vd, VR:$va, GPR:$rs1, VMaskOp:$vm), 0>; // Vector Integer Min/Max Instructions defm VMINU_V : VALU_IV_V_X<"vminu", 0b000100>; defm VMIN_V : VALU_IV_V_X<"vmin", 0b000101>; defm VMAXU_V : VALU_IV_V_X<"vmaxu", 0b000110>; defm VMAX_V : VALU_IV_V_X<"vmax", 0b000111>; // Vector Single-Width Integer Multiply Instructions defm VMUL_V : VALU_MV_V_X<"vmul", 0b100101>; defm VMULH_V : VALU_MV_V_X<"vmulh", 0b100111>; defm VMULHU_V : VALU_MV_V_X<"vmulhu", 0b100100>; defm VMULHSU_V : VALU_MV_V_X<"vmulhsu", 0b100110>; // Vector Integer Divide Instructions defm VDIVU_V : VALU_MV_V_X<"vdivu", 0b100000>; defm VDIV_V : VALU_MV_V_X<"vdiv", 0b100001>; defm VREMU_V : VALU_MV_V_X<"vremu", 0b100010>; defm VREM_V : VALU_MV_V_X<"vrem", 0b100011>; // Vector Widening Integer Multiply Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { defm VWMUL_V : VALU_MV_V_X<"vwmul", 0b111011>; defm VWMULU_V : VALU_MV_V_X<"vwmulu", 0b111000>; defm VWMULSU_V : VALU_MV_V_X<"vwmulsu", 0b111010>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV // Vector Single-Width Integer Multiply-Add Instructions defm VMACC_V : VALUr_MV_V_X<"vmacc", 0b101101>; defm VNMSAC_V : VALUr_MV_V_X<"vnmsac", 0b101111>; defm VMADD_V : VALUr_MV_V_X<"vmadd", 0b101001>; defm VNMSUB_V : VALUr_MV_V_X<"vnmsub", 0b101011>; // Vector Widening Integer Multiply-Add Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { defm VWMACCU_V : VALUr_MV_V_X<"vwmaccu", 0b111100>; defm VWMACC_V : VALUr_MV_V_X<"vwmacc", 0b111101>; defm VWMACCSU_V : VALUr_MV_V_X<"vwmaccsu", 0b111111>; defm VWMACCUS_V : VALUr_MV_X<"vwmaccus", 0b111110>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV // Vector Integer Merge Instructions defm VMERGE_V : VALUm_IV_V_X_I<"vmerge", 0b010111>; // Vector Integer Move Instructions let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1 in { // op vd, vs1 def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd), (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">; // op vd, rs1 def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd), (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">; // op vd, imm def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd), (ins simm5:$imm), "vmv.v.i", "$vd, $imm">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 // Vector Fixed-Point Arithmetic Instructions defm VSADDU_V : VALU_IV_V_X_I<"vsaddu", 0b100000>; defm VSADD_V : VALU_IV_V_X_I<"vsadd", 0b100001>; defm VSSUBU_V : VALU_IV_V_X<"vssubu", 0b100010>; defm VSSUB_V : VALU_IV_V_X<"vssub", 0b100011>; // Vector Single-Width Averaging Add and Subtract defm VAADDU_V : VALU_MV_V_X<"vaaddu", 0b001000>; defm VAADD_V : VALU_MV_V_X<"vaadd", 0b001001>; defm VASUBU_V : VALU_MV_V_X<"vasubu", 0b001010>; defm VASUB_V : VALU_MV_V_X<"vasub", 0b001011>; // Vector Single-Width Fractional Multiply with Rounding and Saturation defm VSMUL_V : VALU_IV_V_X<"vsmul", 0b100111>; // Vector Single-Width Scaling Shift Instructions defm VSSRL_V : VALU_IV_V_X_I<"vssrl", 0b101010, uimm5>; defm VSSRA_V : VALU_IV_V_X_I<"vssra", 0b101011, uimm5>; // Vector Narrowing Fixed-Point Clip Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = Narrow in { defm VNCLIPU_W : VALU_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">; defm VNCLIP_W : VALU_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">; } // Constraints = "@earlyclobber $vd", RVVConstraint = Narrow // Vector Single-Width Floating-Point Add/Subtract Instructions defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>; defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>; defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>; // Vector Widening Floating-Point Add/Subtract Instructions let Constraints = "@earlyclobber $vd" in { let RVVConstraint = WidenV in { defm VFWADD_V : VALU_FV_V_F<"vfwadd", 0b110000>; defm VFWSUB_V : VALU_FV_V_F<"vfwsub", 0b110010>; } // RVVConstraint = WidenV // Set earlyclobber for following instructions for second and mask operands. // This has the downside that the earlyclobber constraint is too coarse and // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. let RVVConstraint = WidenW in { defm VFWADD_W : VALU_FV_V_F<"vfwadd", 0b110100, "w">; defm VFWSUB_W : VALU_FV_V_F<"vfwsub", 0b110110, "w">; } // RVVConstraint = WidenW } // Constraints = "@earlyclobber $vd" // Vector Single-Width Floating-Point Multiply/Divide Instructions defm VFMUL_V : VALU_FV_V_F<"vfmul", 0b100100>; defm VFDIV_V : VALU_FV_V_F<"vfdiv", 0b100000>; defm VFRDIV_V : VALU_FV_F<"vfrdiv", 0b100001>; // Vector Widening Floating-Point Multiply let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { defm VFWMUL_V : VALU_FV_V_F<"vfwmul", 0b111000>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV // Vector Single-Width Floating-Point Fused Multiply-Add Instructions defm VFMACC_V : VALUr_FV_V_F<"vfmacc", 0b101100>; defm VFNMACC_V : VALUr_FV_V_F<"vfnmacc", 0b101101>; defm VFMSAC_V : VALUr_FV_V_F<"vfmsac", 0b101110>; defm VFNMSAC_V : VALUr_FV_V_F<"vfnmsac", 0b101111>; defm VFMADD_V : VALUr_FV_V_F<"vfmadd", 0b101000>; defm VFNMADD_V : VALUr_FV_V_F<"vfnmadd", 0b101001>; defm VFMSUB_V : VALUr_FV_V_F<"vfmsub", 0b101010>; defm VFNMSUB_V : VALUr_FV_V_F<"vfnmsub", 0b101011>; // Vector Widening Floating-Point Fused Multiply-Add Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in { defm VFWMACC_V : VALUr_FV_V_F<"vfwmacc", 0b111100>; defm VFWNMACC_V : VALUr_FV_V_F<"vfwnmacc", 0b111101>; defm VFWMSAC_V : VALUr_FV_V_F<"vfwmsac", 0b111110>; defm VFWNMSAC_V : VALUr_FV_V_F<"vfwnmsac", 0b111111>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV // Vector Floating-Point Square-Root Instruction defm VFSQRT_V : VALU_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>; // Vector Floating-Point MIN/MAX Instructions defm VFMIN_V : VALU_FV_V_F<"vfmin", 0b000100>; defm VFMAX_V : VALU_FV_V_F<"vfmax", 0b000110>; // Vector Floating-Point Sign-Injection Instructions defm VFSGNJ_V : VALU_FV_V_F<"vfsgnj", 0b001000>; defm VFSGNJN_V : VALU_FV_V_F<"vfsgnjn", 0b001001>; defm VFSGNJX_V : VALU_FV_V_F<"vfsgnjx", 0b001010>; // Vector Floating-Point Compare Instructions let RVVConstraint = NoConstraint in { defm VMFEQ_V : VALU_FV_V_F<"vmfeq", 0b011000>; defm VMFNE_V : VALU_FV_V_F<"vmfne", 0b011100>; defm VMFLT_V : VALU_FV_V_F<"vmflt", 0b011011>; defm VMFLE_V : VALU_FV_V_F<"vmfle", 0b011001>; defm VMFGT_V : VALU_FV_F<"vmfgt", 0b011101>; defm VMFGE_V : VALU_FV_F<"vmfge", 0b011111>; } // RVVConstraint = NoConstraint def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm", (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; def : InstAlias<"vmfge.vv $vd, $va, $vb$vm", (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>; // Vector Floating-Point Classify Instruction defm VFCLASS_V : VALU_FV_VS2<"vfclass.v", 0b010011, 0b10000>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { // Vector Floating-Point Merge Instruction def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd), (ins VR:$vs2, FPR32:$rs1, VMV0:$v0), "vfmerge.vfm", "$vd, $vs2, $rs1, v0"> { let vm = 0; } // Vector Floating-Point Move Instruction def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd), (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1"> { let vs2 = 0; let vm = 1; } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 // Single-Width Floating-Point/Integer Type-Convert Instructions defm VFCVT_XU_F_V : VALU_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>; defm VFCVT_X_F_V : VALU_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>; defm VFCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>; defm VFCVT_RTZ_X_F_V : VALU_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>; defm VFCVT_F_XU_V : VALU_FV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>; defm VFCVT_F_X_V : VALU_FV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>; // Widening Floating-Point/Integer Type-Convert Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in { defm VFWCVT_XU_F_V : VALU_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>; defm VFWCVT_X_F_V : VALU_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>; defm VFWCVT_RTZ_XU_F_V : VALU_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>; defm VFWCVT_RTZ_X_F_V : VALU_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>; defm VFWCVT_F_XU_V : VALU_FV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>; defm VFWCVT_F_X_V : VALU_FV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>; defm VFWCVT_F_F_V : VALU_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>; } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt // Narrowing Floating-Point/Integer Type-Convert Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = NarrowCvt in { defm VFNCVT_XU_F_W : VALU_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>; defm VFNCVT_X_F_W : VALU_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>; defm VFNCVT_RTZ_XU_F_W : VALU_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>; defm VFNCVT_RTZ_X_F_W : VALU_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>; defm VFNCVT_F_XU_W : VALU_FV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>; defm VFNCVT_F_X_W : VALU_FV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>; defm VFNCVT_F_F_W : VALU_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>; defm VFNCVT_ROD_F_F_W : VALU_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NarrowCvt // Vector Single-Width Integer Reduction Instructions let RVVConstraint = NoConstraint in { defm VREDSUM : VALU_MV_V<"vredsum", 0b000000>; defm VREDMAXU : VALU_MV_V<"vredmaxu", 0b000110>; defm VREDMAX : VALU_MV_V<"vredmax", 0b000111>; defm VREDMINU : VALU_MV_V<"vredminu", 0b000100>; defm VREDMIN : VALU_MV_V<"vredmin", 0b000101>; defm VREDAND : VALU_MV_V<"vredand", 0b000001>; defm VREDOR : VALU_MV_V<"vredor", 0b000010>; defm VREDXOR : VALU_MV_V<"vredxor", 0b000011>; } // RVVConstraint = NoConstraint // Vector Widening Integer Reduction Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { // Set earlyclobber for following instructions for second and mask operands. // This has the downside that the earlyclobber constraint is too coarse and // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. defm VWREDSUMU : VALU_IV_V<"vwredsumu", 0b110000>; defm VWREDSUM : VALU_IV_V<"vwredsum", 0b110001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint // Vector Single-Width Floating-Point Reduction Instructions let RVVConstraint = NoConstraint in { defm VFREDOSUM : VALU_FV_V<"vfredosum", 0b000011>; defm VFREDSUM : VALU_FV_V<"vfredsum", 0b000001>; defm VFREDMAX : VALU_FV_V<"vfredmax", 0b000111>; defm VFREDMIN : VALU_FV_V<"vfredmin", 0b000101>; } // RVVConstraint = NoConstraint // Vector Widening Floating-Point Reduction Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in { // Set earlyclobber for following instructions for second and mask operands. // This has the downside that the earlyclobber constraint is too coarse and // will impose unnecessary restrictions by not allowing the destination to // overlap with the first (wide) operand. defm VFWREDOSUM : VALU_FV_V<"vfwredosum", 0b110011>; defm VFWREDSUM : VALU_FV_V<"vfwredsum", 0b110001>; } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint // Vector Mask-Register Logical Instructions defm VMAND_M : VALU_MV_Mask<"vmand", 0b011001, "m">; defm VMNAND_M : VALU_MV_Mask<"vmnand", 0b011101, "m">; defm VMANDNOT_M : VALU_MV_Mask<"vmandnot", 0b011000, "m">; defm VMXOR_M : VALU_MV_Mask<"vmxor", 0b011011, "m">; defm VMOR_M : VALU_MV_Mask<"vmor", 0b011010, "m">; defm VMNOR_M : VALU_MV_Mask<"vmnor", 0b011110, "m">; defm VMORNOT_M : VALU_MV_Mask<"vmornot", 0b011100, "m">; defm VMXNOR_M : VALU_MV_Mask<"vmxnor", 0b011111, "m">; def : InstAlias<"vmmv.m $vd, $vs", (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>; def : InstAlias<"vmclr.m $vd", (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>; def : InstAlias<"vmset.m $vd", (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>; def : InstAlias<"vmnot.m $vd, $vs", (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>; let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { // Vector mask population count vpopc def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd), (ins VR:$vs2, VMaskOp:$vm), "vpopc.m", "$vd, $vs2$vm">; // vfirst find-first-set mask bit def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd), (ins VR:$vs2, VMaskOp:$vm), "vfirst.m", "$vd, $vs2$vm">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 // vmsbf.m set-before-first mask bit defm VMSBF_M : VALU_MV_VS2<"vmsbf.m", 0b010100, 0b00001>; // vmsif.m set-including-first mask bit defm VMSIF_M : VALU_MV_VS2<"vmsif.m", 0b010100, 0b00011>; // vmsof.m set-only-first mask bit defm VMSOF_M : VALU_MV_VS2<"vmsof.m", 0b010100, 0b00010>; // Vector Iota Instruction let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in { defm VIOTA_M : VALU_MV_VS2<"viota.m", 0b010100, 0b10000>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Iota // Vector Element Index Instruction let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd), (ins VMaskOp:$vm), "vid.v", "$vd$vm"> { let vs2 = 0; } // Integer Scalar Move Instructions let vm = 1 in { def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd), (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">; def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd), (ins GPR:$rs1), "vmv.s.x", "$vd, $rs1">; } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 in { // Floating-Point Scalar Move Instructions def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd), (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">; def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd), (ins FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">; } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1 // Vector Slide Instructions let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { defm VSLIDEUP_V : VALU_IV_X_I<"vslideup", 0b001110, uimm5>; } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp defm VSLIDEDOWN_V : VALU_IV_X_I<"vslidedown", 0b001111, uimm5>; let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in { defm VSLIDE1UP_V : VALU_MV_X<"vslide1up", 0b001110>; defm VFSLIDE1UP_V : VALU_FV_F<"vfslide1up", 0b001110>; } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp defm VSLIDE1DOWN_V : VALU_MV_X<"vslide1down", 0b001111>; defm VFSLIDE1DOWN_V : VALU_FV_F<"vfslide1down", 0b001111>; // Vector Register Gather Instruction let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in { defm VRGATHER_V : VALU_IV_V_X_I<"vrgather", 0b001100, uimm5>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather // Vector Compress Instruction let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in { defm VCOMPRESS_V : VALU_MV_Mask<"vcompress", 0b010111>; } // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { foreach nf = [1, 2, 4, 8] in { def VMV#nf#R_V : RVInstV<0b100111, !add(nf, -1), OPIVI, (outs VR:$vd), (ins VR:$vs2), "vmv" # nf # "r.v", "$vd, $vs2"> { let Uses = []; let vm = 1; } } } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 } // Predicates = [HasStdExtV] let Predicates = [HasStdExtZvlsseg] in { foreach nf=2-8 in { def VLSEG#nf#E8_V : VUnitStrideSegmentLoad; def VLSEG#nf#E16_V : VUnitStrideSegmentLoad; def VLSEG#nf#E32_V : VUnitStrideSegmentLoad; def VLSEG#nf#E64_V : VUnitStrideSegmentLoad; def VLSEG#nf#E128_V : VUnitStrideSegmentLoad; def VLSEG#nf#E256_V : VUnitStrideSegmentLoad; def VLSEG#nf#E512_V : VUnitStrideSegmentLoad; def VLSEG#nf#E1024_V : VUnitStrideSegmentLoad; def VLSEG#nf#E8FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E16FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E32FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E64FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E128FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E256FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E512FF_V : VUnitStrideSegmentLoad; def VLSEG#nf#E1024FF_V : VUnitStrideSegmentLoad; def VSSEG#nf#E8_V : VUnitStrideSegmentStore; def VSSEG#nf#E16_V : VUnitStrideSegmentStore; def VSSEG#nf#E32_V : VUnitStrideSegmentStore; def VSSEG#nf#E64_V : VUnitStrideSegmentStore; def VSSEG#nf#E128_V : VUnitStrideSegmentStore; def VSSEG#nf#E256_V : VUnitStrideSegmentStore; def VSSEG#nf#E512_V : VUnitStrideSegmentStore; def VSSEG#nf#E1024_V : VUnitStrideSegmentStore; // Vector Strided Instructions def VLSSEG#nf#E8_V : VStridedSegmentLoad; def VLSSEG#nf#E16_V : VStridedSegmentLoad; def VLSSEG#nf#E32_V : VStridedSegmentLoad; def VLSSEG#nf#E64_V : VStridedSegmentLoad; def VLSSEG#nf#E128_V : VStridedSegmentLoad; def VLSSEG#nf#E256_V : VStridedSegmentLoad; def VLSSEG#nf#E512_V : VStridedSegmentLoad; def VLSSEG#nf#E1024_V : VStridedSegmentLoad; def VSSSEG#nf#E8_V : VStridedSegmentStore; def VSSSEG#nf#E16_V : VStridedSegmentStore; def VSSSEG#nf#E32_V : VStridedSegmentStore; def VSSSEG#nf#E64_V : VStridedSegmentStore; def VSSSEG#nf#E128_V : VStridedSegmentStore; def VSSSEG#nf#E256_V : VStridedSegmentStore; def VSSSEG#nf#E512_V : VStridedSegmentStore; def VSSSEG#nf#E1024_V : VStridedSegmentStore; // Vector Indexed Instructions def VLXSEG#nf#EI8_V : VIndexedSegmentLoad; def VLXSEG#nf#EI16_V : VIndexedSegmentLoad; def VLXSEG#nf#EI32_V : VIndexedSegmentLoad; def VLXSEG#nf#EI64_V : VIndexedSegmentLoad; def VLXSEG#nf#EI128_V : VIndexedSegmentLoad; def VLXSEG#nf#EI256_V : VIndexedSegmentLoad; def VLXSEG#nf#EI512_V : VIndexedSegmentLoad; def VLXSEG#nf#EI1024_V : VIndexedSegmentLoad; def VSXSEG#nf#EI8_V : VIndexedSegmentStore; def VSXSEG#nf#EI16_V : VIndexedSegmentStore; def VSXSEG#nf#EI32_V : VIndexedSegmentStore; def VSXSEG#nf#EI64_V : VIndexedSegmentStore; def VSXSEG#nf#EI128_V : VIndexedSegmentStore; def VSXSEG#nf#EI256_V : VIndexedSegmentStore; def VSXSEG#nf#EI512_V : VIndexedSegmentStore; def VSXSEG#nf#EI1024_V : VIndexedSegmentStore; } } // Predicates = [HasStdExtZvlsseg] let Predicates = [HasStdExtZvamo, HasStdExtA] in { defm VAMOSWAPEI8 : VAMO; defm VAMOSWAPEI16 : VAMO; defm VAMOSWAPEI32 : VAMO; defm VAMOADDEI8 : VAMO; defm VAMOADDEI16 : VAMO; defm VAMOADDEI32 : VAMO; defm VAMOXOREI8 : VAMO; defm VAMOXOREI16 : VAMO; defm VAMOXOREI32 : VAMO; defm VAMOANDEI8 : VAMO; defm VAMOANDEI16 : VAMO; defm VAMOANDEI32 : VAMO; defm VAMOOREI8 : VAMO; defm VAMOOREI16 : VAMO; defm VAMOOREI32 : VAMO; defm VAMOMINEI8 : VAMO; defm VAMOMINEI16 : VAMO; defm VAMOMINEI32 : VAMO; defm VAMOMAXEI8 : VAMO; defm VAMOMAXEI16 : VAMO; defm VAMOMAXEI32 : VAMO; defm VAMOMINUEI8 : VAMO; defm VAMOMINUEI16 : VAMO; defm VAMOMINUEI32 : VAMO; defm VAMOMAXUEI8 : VAMO; defm VAMOMAXUEI16 : VAMO; defm VAMOMAXUEI32 : VAMO; } // Predicates = [HasStdExtZvamo, HasStdExtA] let Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] in { defm VAMOSWAPEI64 : VAMO; defm VAMOADDEI64 : VAMO; defm VAMOXOREI64 : VAMO; defm VAMOANDEI64 : VAMO; defm VAMOOREI64 : VAMO; defm VAMOMINEI64 : VAMO; defm VAMOMAXEI64 : VAMO; defm VAMOMINUEI64 : VAMO; defm VAMOMAXUEI64 : VAMO; } // Predicates = [HasStdExtZvamo, HasStdExtA, IsRV64] include "RISCVInstrInfoVPseudos.td"