/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | subreg-split-live-in-error.mir | 28 # undef %0.sub0:vreg_64 = COPY %123:sreg_32 ; undef point for %0.sub1 57 undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec 58 %3.sub1:vreg_128 = COPY %3.sub0 59 %3.sub2:vreg_128 = COPY %3.sub0 68 undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec 69 %3.sub1:vreg_128 = COPY %3.sub0 88 undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $exec 89 undef %11.sub0:sreg_256 = S_MOV_B32 0 90 %11.sub1:sreg_256 = COPY %11.sub0 91 %11.sub2:sreg_256 = COPY %11.sub0 [all …]
|
D | detect-dead-lanes.mir | 9 # CHECK: %3:sreg_128 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, undef %2, %subreg.sub3 10 # CHECK: S_NOP 0, implicit %3.sub0 15 # CHECK: S_NOP 0, implicit %4.sub0 17 # CHECK: S_NOP 0, implicit undef %5.sub0 31 %3 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub3 32 S_NOP 0, implicit %3.sub0 37 S_NOP 0, implicit %4.sub0 39 S_NOP 0, implicit %5.sub0 45 # CHECK: %0:sreg_128 = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr0, %subreg.sub2 47 # CHECK: %2:sreg_64 = INSERT_SUBREG %0.sub2_sub3, $sgpr42, %subreg.sub0 [all …]
|
D | rename-independent-subregs.mir | 10 # in combination with sub0 and needs to stay with the original vreg. 12 # CHECK: S_NOP 0, implicit-def undef %0.sub0 24 S_NOP 0, implicit-def undef %0.sub0 42 # CHECK-NEXT: S_NOP 0, implicit-def undef %0.sub0 44 # CHECK-NEXT: S_NOP 0, implicit %0.sub0 63 S_NOP 0, implicit-def %1.sub0 65 S_NOP 0, implicit %1.sub0 73 # (1) %0.sub0 + %0.sub0 and (2) %0.sub1 + %0.sub1 76 … CHECK: INLINEASM &"", 32, 327690, def undef %0.sub0, 327690, def dead %1.sub1, 2147483657, undef … 80 undef %0.sub0:vreg_64 = IMPLICIT_DEF [all …]
|
D | rename-independent-subregs-mac-operands.mir | 5 # GCN: undef %18.sub0:vreg_128 = V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, … 45 undef %12.sub0 = COPY killed %4 53 undef %13.sub0 = COPY %8 63 %16 = COPY killed %1.sub0 64 undef %15.sub0 = COPY killed %16 77 # GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec 78 # GCN: undef %9.sub2:vreg_128 = COPY %7.sub0 81 # GCN: undef %7.sub0:vreg_128 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec 87 # GCN: BUFFER_STORE_DWORD_OFFEN %7.sub0, %0, 114 %6.sub0 = V_MOV_B32_e32 0, implicit $exec [all …]
|
D | opt-sgpr-to-vgpr-copy.mir | 8 # GCN-NEXT: %[[SGPR_PAIR:[0-9]+]]:sreg_64 = REG_SEQUENCE killed %[[LO]], %subreg.sub0, killed %[[… 15 # GCN-NEXT: %[[SGPR_PAIR:[0-9]+]]:sreg_64 = REG_SEQUENCE killed %[[LO]], %subreg.sub0, killed %[[… 20 # GCN: %[[OP0:[0-9]+]]:vreg_64 = REG_SEQUENCE killed %{{[0-9]+}}, %subreg.sub0, killed %{{[0-… 21 # GCN-NEXT: V_CMP_LT_U32_e64 killed %[[OP0]].sub0, 12, implicit $exec 112 %10 = REG_SEQUENCE %2, %subreg.sub0, killed %9, %subreg.sub1 114 %11 = COPY %10.sub0 116 %13 = COPY %8.sub0 120 %17 = REG_SEQUENCE killed %15, %subreg.sub0, killed %16, %subreg.sub1 123 %20 = REG_SEQUENCE killed %19, %subreg.sub0, killed %18, %subreg.sub1 136 %27 = REG_SEQUENCE killed %26, %subreg.sub0, killed %25, %subreg.sub1 [all …]
|
D | twoaddr-mad.mir | 4 # GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec 18 %3 = V_MAC_F32_e32 killed %0.sub0, %2, killed %1, implicit $exec 23 # GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec 37 %3 = V_MAC_F32_e32 %2, killed %0.sub0, killed %1, implicit $exec 42 # GCN: V_MADAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit $exec 54 %2 = V_MAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit $exec 59 # GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec 73 %3 = V_MAC_F16_e32 killed %0.sub0, %2, killed %1, implicit $exec 78 # GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec 92 %3 = V_MAC_F16_e32 %2, killed %0.sub0, killed %1, implicit $exec [all …]
|
D | sched-crash-dbg-value.mir | 209 %11:sreg_32_xm0 = S_LSHR_B32 %10.sub0, 16, implicit-def dead $scc 223 undef %27.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %6, 0, 0 226 undef %29.sub0:sreg_64 = S_ADD_U32 %5.sub0, %28.sub0, implicit-def $scc 228 undef %30.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %6, 4, 0 229 %27.sub0:sreg_64_xexec = IMPLICIT_DEF 231 %32:sreg_32_xm0 = S_ADD_U32 0, %31.sub0, implicit-def $scc 236 undef %38.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %37.sub0, implicit $exec 237 %38.sub0:vreg_64 = COPY %37.sub0 239 undef %40.sub0:vreg_64, %41:sreg_64_xexec = V_ADD_I32_e64 0, %39.sub0, implicit $exec 244 %45.sub0:vreg_64 = COPY %37.sub1 [all …]
|
D | flat-load-clustering.mir | 61 undef %12.sub0 = V_ADD_I32_e32 %4.sub0, %7, implicit-def $vcc, implicit $exec 65 undef %9.sub0 = V_ADD_I32_e32 %3.sub0, %7, implicit-def $vcc, implicit $exec 68 undef %13.sub0 = V_ADD_I32_e32 16, %12.sub0, implicit-def $vcc, implicit $exec 71 undef %10.sub0 = V_ADD_I32_e32 16, %9.sub0, implicit-def $vcc, implicit $exec
|
D | limit-coalesce.mir | 56 undef %4.sub0 = COPY $sgpr0 57 %4.sub1 = COPY %3.sub0 58 undef %5.sub0 = COPY %4.sub1 59 %5.sub1 = COPY %4.sub0 64 %7.sub2 = COPY %3.sub0 69 %9.sub3 = COPY %3.sub0
|
D | splitkit.mir | 21 S_NOP 0, implicit-def undef %0.sub0 : sreg_128 27 S_NOP 0, implicit %0.sub0 29 S_NOP 0, implicit %0.sub0 52 undef %0.sub0 : sreg_128 = COPY $sgpr0 57 S_NOP 0, implicit %0.sub0 63 S_NOP 0, implicit %0.sub0 78 S_NOP 0, implicit-def undef %0.sub0 : sreg_128 86 S_NOP 0, implicit %0.sub0 101 S_NOP 0, implicit %0.sub0 103 S_NOP 0, implicit %0.sub0
|
D | regcoal-subrange-join.mir | 7 # GCN-DAG: undef %[[REG0:[0-9]+]].sub0:sgpr_64 = COPY $sgpr5 8 # GCN-DAG: undef %[[REG1:[0-9]+]].sub0:sgpr_64 = COPY $sgpr2 96 %0.sub0 = COPY killed %12 98 %21.sub0 = COPY killed %15 101 undef %24.sub0 = COPY killed %22 119 %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit $exec 120 undef %56.sub0 = COPY killed %37 147 %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit $exec 148 undef %57.sub0 = COPY killed %50
|
D | sad.ll | 39 %sub0 = sub i32 %a, %b 41 %ret0 = select i1 %icmp0, i32 %sub0, i32 %sub1 124 %sub0 = sub i32 %a, %b 125 store volatile i32 %sub0, i32 addrspace(5)*undef 127 %ret0 = select i1 %icmp0, i32 %sub0, i32 %sub1 141 %sub0 = sub i32 %a, %b 143 %ret0 = select i1 %icmp0, i32 %sub0, i32 %sub1 178 %sub0 = sub <4 x i32> %a, %b 180 %ret0 = select <4 x i1> %icmp0, <4 x i32> %sub0, <4 x i32> %sub1 212 %sub0 = sub i16 %a, %b [all …]
|
D | coalescing-with-subregs-in-loop-bug.mir | 22 # undef %32.sub0:vreg_128 = COPY killed %31.sub0 49 undef %28.sub0:vreg_128 = COPY killed %3 79 %38:vgpr_32 = V_ADD_I32_e32 1, %36.sub0, implicit-def dead $vcc, implicit $exec 93 undef %32.sub0:vreg_128 = COPY killed %31.sub0
|
D | coalescer-extend-pruned-subrange.mir | 15 undef %3.sub0:sreg_128 = COPY %0 21 %7:sreg_32_xm0 = S_AND_B32 target-flags(amdgpu-gotprel) 1, %2.sub0, implicit-def dead $scc 28 %11:vgpr_32 = V_OR_B32_e32 %12.sub0, %12.sub1, implicit $exec 47 undef %22.sub0:sreg_128 = COPY %8 98 undef %40.sub0:vreg_128 = COPY %39
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | detect-dead-lanes.mir | 20 # CHECK: S_NOP 0, implicit %3:sub0 25 # CHECK: S_NOP 0, implicit %4:sub0 27 # CHECK: S_NOP 0, implicit undef %5:sub0 42 %3 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1, %2, %subreg.sub3 43 S_NOP 0, implicit %3:sub0 48 S_NOP 0, implicit %4:sub0 50 S_NOP 0, implicit %5:sub0 59 # CHECK: S_NOP 0, implicit %1:sub0 63 # CHECK: S_NOP 0, implicit %2:sub0 68 # CHECK: S_NOP 0, implicit undef %4:sub0 [all …]
|
D | rename-independent-subregs.mir | 8 # in combination with sub0 and needs to stay with the original vreg. 10 # CHECK: S_NOP 0, implicit-def undef %0:sub0 23 S_NOP 0, implicit-def undef %0:sub0
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | inst-select-load-smrd.mir | 47 # SIVI: [[K:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1 48 # SIVI-DAG: [[K_SUB0:%[0-9]+]]:sgpr_32 = COPY [[K]].sub0 49 # SIVI-DAG: [[PTR_LO:%[0-9]+]]:sgpr_32 = COPY [[PTR]].sub0 54 # SIVI: [[ADD_PTR:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[ADD_PTR_LO]], %subreg.sub0, [[ADD_PTR_HI]], %s… 61 # GCN: [[K:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1 62 # GCN-DAG: [[K_SUB0:%[0-9]+]]:sgpr_32 = COPY [[K]].sub0 63 # GCN-DAG: [[PTR_LO:%[0-9]+]]:sgpr_32 = COPY [[PTR]].sub0 68 # GCN: [[ADD_PTR:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[ADD_PTR_LO]], %subreg.sub0, [[ADD_PTR_HI]], %su… 79 # SIVI: [[K:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[K_LO]], %subreg.sub0, [[K_HI]], %subreg.sub1 80 # SIVI-DAG: [[K_SUB0:%[0-9]+]]:sgpr_32 = COPY [[K]].sub0 [all …]
|
D | inst-select-constant.mir | 25 ; GCN: %{{[0-9]+}}:sreg_64_xexec = REG_SEQUENCE [[LO0]], %subreg.sub0, [[HI0]], %subreg.sub1 33 ; GCN: %{{[0-9]+}}:sreg_64_xexec = REG_SEQUENCE [[LO1]], %subreg.sub0, [[HI1]], %subreg.sub1 41 ; GCN: %{{[0-9]+}}:vreg_64 = REG_SEQUENCE [[LO2]], %subreg.sub0, [[HI2]], %subreg.sub1 49 ; GCN: %{{[0-9]+}}:vreg_64 = REG_SEQUENCE [[LO3]], %subreg.sub0, [[HI3]], %subreg.sub1
|
/external/llvm/test/Transforms/InstCombine/ |
D | phi-preserve-ir-flags.ll | 11 %sub0 = fsub fast float %a, %b 23 %e = phi float [ %sub0, %cond.true ], [ %sub1, %cond.false ] 33 %sub0 = fsub fast float %a, %b 45 %e = phi float [ %sub0, %cond.true ], [ %sub1, %cond.false ] 55 %sub0 = fsub fast float %a, 2.0 66 %e = phi float [ %sub0, %cond.true ], [ %sub1, %cond.false ] 76 %sub0 = fsub fast float %a, 2.0 87 %e = phi float [ %sub0, %cond.true ], [ %sub1, %cond.false ]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | phi-preserve-ir-flags.ll | 11 %sub0 = fsub fast float %a, %b 23 %e = phi float [ %sub0, %cond.true ], [ %sub1, %cond.false ] 33 %sub0 = fsub fast float %a, %b 45 %e = phi float [ %sub0, %cond.true ], [ %sub1, %cond.false ] 55 %sub0 = fsub fast float %a, 2.0 66 %e = phi float [ %sub0, %cond.true ], [ %sub1, %cond.false ] 76 %sub0 = fsub fast float %a, 2.0 87 %e = phi float [ %sub0, %cond.true ], [ %sub1, %cond.false ]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIRegisterInfo.td | 30 let SubRegIndices = [sub0, sub1]; 40 let SubRegIndices = [sub0, sub1]; 54 let SubRegIndices = [sub0, sub1]; 64 let SubRegIndices = [sub0, sub1]; 91 let SubRegIndices = [sub0, sub1]; 132 def SGPR_64Regs : RegisterTuples<[sub0, sub1], 137 def SGPR_128Regs : RegisterTuples<[sub0, sub1, sub2, sub3], 144 def SGPR_256 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7], 155 def SGPR_512 : RegisterTuples<[sub0, sub1, sub2, sub3, sub4, sub5, sub6, sub7, 181 def TTMP_64Regs : RegisterTuples<[sub0, sub1], [all …]
|
D | SIMachineFunctionInfo.cpp | 151 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass); in addPrivateSegmentBuffer() 158 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); in addDispatchPtr() 165 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); in addQueuePtr() 172 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); in addKernargSegmentPtr() 179 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass); in addFlatScratchInit()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/AArch64/ |
D | ext-trunc.ll | 37 %sub0 = sub <4 x i32> %z0, %z1 38 %e0 = extractelement <4 x i32> %sub0, i32 0 42 %e1 = extractelement <4 x i32> %sub0, i32 1 46 %e2 = extractelement <4 x i32> %sub0, i32 2 50 %e3 = extractelement <4 x i32> %sub0, i32 3 88 %sub0 = sub <4 x i32> %z0, %z1 89 %e0 = extractelement <4 x i32> %sub0, i32 0 94 %e1 = extractelement <4 x i32> %sub0, i32 1 99 %e2 = extractelement <4 x i32> %sub0, i32 2 104 %e3 = extractelement <4 x i32> %sub0, i32 3
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | SIMachineFunctionInfo.cpp | 190 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_128RegClass)); in addPrivateSegmentBuffer() 197 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); in addDispatchPtr() 204 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); in addQueuePtr() 212 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); in addKernargSegmentPtr() 219 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); in addDispatchID() 226 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); in addFlatScratchInit() 233 getNextUserSGPR(), AMDGPU::sub0, &AMDGPU::SReg_64RegClass)); in addImplicitBufferPtr()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/TableGen/ |
D | ConcatenatedSubregs.td | 17 // D0_D1 -- D0 (sub0) -- S0 (ssub0) 22 def sub0 : SubRegIndex<32>; 62 def Dtup2regs : RegisterTuples<[sub0, sub1], 91 // CHECK-LABEL: SubRegIndex sub0: 113 // CHECK-NEXT: SubReg sub0 = S9_S10 126 // CHECK-NEXT: SubReg sub0 = D5
|