/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | legalize-mul.mir | 45 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]] 46 ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32) 54 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]] 55 ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32) 63 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV3]] 64 ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[MUL]](s32), [[MUL1]](s32) 84 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]] 87 ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]] 97 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]] 100 ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]] [all …]
|
D | legalize-umulh.mir | 69 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]] 71 ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]] 97 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]] 99 ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[MUL]], [[MUL1]] 235 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND2]], [[AND3]] 236 ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C1]](s32) 269 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND2]], [[AND3]] 270 ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C1]](s32) 315 ; GFX8: [[MUL1:%[0-9]+]]:_(s16) = G_MUL [[AND2]], [[AND3]] 316 ; GFX8: [[LSHR1:%[0-9]+]]:_(s16) = G_LSHR [[MUL1]], [[C1]](s16) [all …]
|
D | legalize-smulh.mir | 164 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG2]], [[SEXT_INREG3]] 165 ; GFX8: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[MUL1]], [[C]](s32) 198 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[SEXT_INREG2]], [[SEXT_INREG3]] 199 ; GFX9: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[MUL1]], [[C]](s32) 245 ; GFX8: [[MUL1:%[0-9]+]]:_(s16) = G_MUL [[ASHR3]], [[ASHR4]] 246 ; GFX8: [[ASHR5:%[0-9]+]]:_(s16) = G_ASHR [[MUL1]], [[C]](s16) 345 ; GFX8: [[MUL1:%[0-9]+]]:_(s16) = G_MUL [[ASHR3]], [[ASHR4]] 346 ; GFX8: [[ASHR5:%[0-9]+]]:_(s16) = G_ASHR [[MUL1]], [[C3]](s16) 424 ; GFX9: [[MUL1:%[0-9]+]]:_(<2 x s16>) = G_MUL [[BUILD_VECTOR_TRUNC3]], [[BUILD_VECTOR_TRUNC4]] 428 ; GFX9: [[ASHR1:%[0-9]+]]:_(<2 x s16>) = G_ASHR [[MUL1]], [[BUILD_VECTOR_TRUNC5]](<2 x s16>)
|
D | legalize-udiv.mir | 26 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[COPY1]] 27 ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[MUL1]] 52 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[COPY1]] 53 ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[MUL1]] 78 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[COPY1]] 79 ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[MUL1]] 118 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[UV2]] 119 ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[MUL1]] 167 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[UV2]] 168 ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[MUL1]] [all …]
|
D | legalize-urem.mir | 26 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[COPY1]] 27 ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[MUL1]] 49 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[COPY1]] 50 ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[MUL1]] 72 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[COPY1]] 73 ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[MUL1]] 109 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[UV2]] 110 ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[MUL1]] 153 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[UV2]] 154 ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[MUL1]] [all …]
|
D | legalize-sdiv.mir | 33 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 34 ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] 69 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 70 ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] 105 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 106 ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] 155 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 156 ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] 223 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 224 ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] [all …]
|
D | legalize-srem.mir | 33 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 34 ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] 65 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 66 ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] 97 ; GFX9: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 98 ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] 143 ; GFX6: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 144 ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] 204 ; GFX8: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UMULH1]], [[XOR1]] 205 ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[XOR]], [[MUL1]] [all …]
|
/external/llvm-project/llvm/test/Transforms/Reassociate/ |
D | canonicalize-neg-const.ll | 191 ; CHECK-NEXT: [[MUL1:%.*]] = fmul double [[MUL0]], [[C:%.*]] 192 ; CHECK-NEXT: [[TMP1:%.*]] = fsub double [[A:%.*]], [[MUL1]] 204 ; CHECK-NEXT: [[MUL1:%.*]] = fmul double [[MUL0]], [[B:%.*]] 205 ; CHECK-NEXT: [[MUL2:%.*]] = fmul double [[MUL1]], 4.000000e+00 219 ; CHECK-NEXT: [[MUL1:%.*]] = fmul double [[MUL0]], [[B:%.*]] 220 ; CHECK-NEXT: [[MUL2:%.*]] = fmul double [[MUL1]], 4.000000e+00 236 ; CHECK-NEXT: [[MUL1:%.*]] = fmul double [[MUL0]], [[C:%.*]] 237 ; CHECK-NEXT: [[TMP1:%.*]] = fadd double [[A:%.*]], [[MUL1]] 249 ; CHECK-NEXT: [[MUL1:%.*]] = fmul double [[MUL0]], [[B:%.*]] 250 ; CHECK-NEXT: [[MUL2:%.*]] = fmul double [[MUL1]], 4.000000e+00 [all …]
|
D | mixed-fast-nonfast-fp.ll | 25 ; CHECK-NEXT: [[MUL1:%.*]] = fmul reassoc float [[A:%.*]], [[C:%.*]] 29 ; CHECK-NEXT: [[ADD1:%.*]] = fadd fast float [[MUL1]], [[MUL3]]
|
/external/llvm-project/polly/test/Isl/CodeGen/MemAccess/ |
D | codegen_simple_md.ll | 59 ; WITHCONST: %[[MUL1:[._a-zA-Z0-9]+]] = mul nsw i64 16, %[[IVOut]] 61 ; WITHCONST: %[[SUM1:[._a-zA-Z0-9]+]] = add nsw i64 %[[MUL1]], %[[MUL2]] 68 ; WITHOUTCONST: %[[MUL1:[._a-zA-Z0-9]+]] = mul nsw i64 16, %[[IVOut]] 70 ; WITHOUTCONST: %[[SUM1:[._a-zA-Z0-9]+]] = add nsw i64 %[[MUL1]], %[[MUL2]]
|
D | codegen_simple_md_float.ll | 56 ; WITHCONST: %[[MUL1:[._a-zA-Z0-9]+]] = mul nsw i64 16, %[[IVOut]] 58 ; WITHCONST: %[[SUM1:[._a-zA-Z0-9]+]] = add nsw i64 %[[MUL1]], %[[MUL2]] 65 ; WITHOUTCONST: %[[MUL1:[._a-zA-Z0-9]+]] = mul nsw i64 16, %[[IVOut]] 67 ; WITHOUTCONST: %[[SUM1:[._a-zA-Z0-9]+]] = add nsw i64 %[[MUL1]], %[[MUL2]]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | fexp.ll | 126 ; VI-NEXT: v_mul_f16_sdwa [[MUL1:v[0-9]+]], v{{[0-9]+}}, [[VREG]] dst_sel:DWORD dst_unused:UNUSE… 128 ; VI-NEXT: v_exp_f16_sdwa [[MUL1]], [[MUL1]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD 130 ; VI-NEXT: v_or_b32_e32 v{{[0-9]+}}, [[MUL2]], [[MUL1]] 180 ; VI-NEXT: v_mul_f16_e32 [[MUL1:v[0-9]+]], [[SREG]], v1 184 ; VI-NEXT: v_exp_f16_e32 [[EXP1:v[0-9]+]], [[MUL1]] 196 ; GFX9-NEXT: v_mul_f16_e32 [[MUL1:v[0-9]+]], [[SREG]], v1 200 ; GFX9-NEXT: v_exp_f16_e32 [[EXP1:v[0-9]+]], [[MUL1]]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | PR36280.ll | 10 ; CHECK-NEXT: [[MUL1:%.*]] = fmul float [[P1]], [[X:%.*]] 12 ; CHECK-NEXT: [[ADD1:%.*]] = fadd float [[MUL1]], [[Z:%.*]]
|
D | crash_binaryop.ll | 22 ; CHECK-NEXT: [[MUL1:%.*]] = fmul double [[ADD1]], 0.000000e+00 24 ; CHECK-NEXT: [[BINARY_V:%.*]] = fadd double [[MUL1]], [[BINARYOP_B]]
|
/external/tensorflow/tensorflow/compiler/mlir/tensorflow/tests/ |
D | sink_constant.mlir | 25 // CHECK: %[[MUL1:.*]] = "tf.Mul"(%arg0, %[[CST2]]) 26 // CHECK-NEXT: %[[MUL2:.*]] = "tf.Mul"(%[[MUL1]], %[[CST2]])
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | reassociate-nuw.ll | 28 ; CHECK-NEXT: [[MUL1:%.*]] = mul nuw i32 [[X:%.*]], 260 29 ; CHECK-NEXT: ret i32 [[MUL1]] 70 ; CHECK-NEXT: [[MUL1:%.*]] = mul i32 [[X:%.*]], [[Y:%.*]] 71 ; CHECK-NEXT: [[MUL2:%.*]] = mul nuw i32 [[MUL1]], 45
|
/external/webp/src/dsp/ |
D | dec.c | 40 #define MUL1(a) ((((a) * 20091) >> 16) + (a)) macro 51 const int c = MUL2(in[4]) - MUL1(in[12]); // [-3783, 3783] in TransformOne_C() 52 const int d = MUL1(in[4]) + MUL2(in[12]); // [-3785, 3781] in TransformOne_C() 72 const int c = MUL2(tmp[4]) - MUL1(tmp[12]); in TransformOne_C() 73 const int d = MUL1(tmp[4]) + MUL2(tmp[12]); in TransformOne_C() 87 const int d4 = MUL1(in[4]); in TransformAC3_C() 89 const int d1 = MUL1(in[1]); in TransformAC3_C() 95 #undef MUL1
|
/external/llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | arm64-irtranslator-gep.ll | 19 ; O0: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]] 20 ; O0: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL1]](s64)
|
D | legalizer-combiner.mir | 108 ; CHECK: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[C]], [[UV]] 111 ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[MUL1]], [[MUL2]]
|
D | legalize-mul.mir | 84 ; CHECK: [[MUL1:%[0-9]+]]:_(s64) = G_MUL [[UV1]], [[UV3]] 85 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MUL]](s64), [[MUL1]](s64)
|
/external/llvm-project/llvm/test/Transforms/LoopVectorize/ |
D | runtime-check-needed-but-empty.ll | 21 ; CHECK-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 1, i32 undef) 22 ; CHECK-NEXT: [[MUL_RESULT2:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 23 ; CHECK-NEXT: [[MUL_OVERFLOW3:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
|
/external/llvm-project/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ |
D | mul.mir | 233 ; MIPS32: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[COPY3]], [[COPY]] 236 ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]] 282 ; MIPS32: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[LOAD1]], [[COPY]] 285 ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL1]], [[MUL2]] 381 ; MIPS32: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[COPY2]], [[COPY1]] 383 ; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[MUL]], [[MUL1]] 384 ; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[ADD]](s32), [[MUL1]]
|
D | ctpop.mir | 84 ; MIPS32: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[AND7]], [[C6]] 85 ; MIPS32: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[MUL1]], [[C7]](s32)
|
/external/llvm-project/llvm/test/Transforms/SimplifyCFG/PowerPC/ |
D | prefer-fma.ll | 21 ; CHECK-NEXT: [[MUL1:%.*]] = fmul fast double [[TMP1]], [[TMP2]] 22 ; CHECK-NEXT: [[SUB1:%.*]] = fsub fast double [[MUL1]], [[TMP0]]
|
/external/llvm-project/llvm/test/Transforms/LoopDistribute/ |
D | scev-inserted-runtime-check.ll | 17 ; CHECK-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]]) 18 ; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 19 ; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
|