/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | legalize-fexp.mir | 15 ; GFX6: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 16 ; GFX6: [[FEXP2_:%[0-9]+]]:_(s32) = G_FEXP2 [[FMUL]] 21 ; GFX8: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 22 ; GFX8: [[FEXP2_:%[0-9]+]]:_(s32) = G_FEXP2 [[FMUL]] 27 ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 28 ; GFX9: [[FEXP2_:%[0-9]+]]:_(s32) = G_FEXP2 [[FMUL]] 44 ; GFX6: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[C]] 45 ; GFX6: [[FEXP2_:%[0-9]+]]:_(s32) = nnan G_FEXP2 [[FMUL]] 50 ; GFX8: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[C]] 51 ; GFX8: [[FEXP2_:%[0-9]+]]:_(s32) = nnan G_FEXP2 [[FMUL]] [all …]
|
D | legalize-flog10.mir | 14 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FLOG2_]], [[C]] 15 ; CHECK: $vgpr0 = COPY [[FMUL]](s32) 31 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[FLOG2_]], [[C]] 32 ; CHECK: $vgpr0 = COPY [[FMUL]](s32) 49 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FLOG2_]], [[C]] 52 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32) 70 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FLOG2_]], [[C]] 75 …; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32), [[F… 94 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FLOG2_]], [[C]] 95 ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32) [all …]
|
D | legalize-flog.mir | 14 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FLOG2_]], [[C]] 15 ; CHECK: $vgpr0 = COPY [[FMUL]](s32) 31 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[FLOG2_]], [[C]] 32 ; CHECK: $vgpr0 = COPY [[FMUL]](s32) 49 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FLOG2_]], [[C]] 52 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32) 70 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FLOG2_]], [[C]] 75 …; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[FMUL]](s32), [[FMUL1]](s32), [[F… 94 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FLOG2_]], [[C]] 95 ; CHECK: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32) [all …]
|
D | legalize-fmul.mir | 15 ; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]] 16 ; SI: $vgpr0 = COPY [[FMUL]](s32) 20 ; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]] 21 ; VI: $vgpr0 = COPY [[FMUL]](s32) 25 ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]] 26 ; GFX9: $vgpr0 = COPY [[FMUL]](s32) 41 ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]] 42 ; SI: $vgpr0_vgpr1 = COPY [[FMUL]](s64) 46 ; VI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]] 47 ; VI: $vgpr0_vgpr1 = COPY [[FMUL]](s64) [all …]
|
D | legalize-fcos.mir | 15 ; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 16 ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s32) 22 ; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 23 ; VI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s32) 29 ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 30 ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.cos), [[FMUL]](s32) 46 ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[C]] 47 ; SI: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64) 53 ; VI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[C]] 54 ; VI: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64) [all …]
|
D | legalize-fsin.mir | 15 ; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 16 ; SI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s32) 22 ; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 23 ; VI: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s32) 29 ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[C]] 30 ; GFX9: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.sin), [[FMUL]](s32) 46 ; SI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[C]] 47 ; SI: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64) 53 ; VI: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[C]] 54 ; VI: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.fract), [[FMUL]](s64) [all …]
|
D | legalize-fmad.s32.mir | 41 ; GFX103: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]] 42 ; GFX103: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[COPY2]] 84 ; GFX103: [[FMUL:%[0-9]+]]:_(s32) = nnan G_FMUL [[COPY]], [[COPY1]] 85 ; GFX103: [[FADD:%[0-9]+]]:_(s32) = nnan G_FADD [[FMUL]], [[COPY2]] 145 ; GFX103: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[UV2]] 146 ; GFX103: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[UV4]] 212 ; GFX103: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[UV3]] 213 ; GFX103: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[UV6]] 284 ; GFX103: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[UV]], [[UV4]] 285 ; GFX103: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[UV8]] [all …]
|
D | legalize-fmad.s64.mir | 21 ; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]] 22 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[COPY2]] 49 ; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[UV2]] 50 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[UV4]] 77 ; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]] 78 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[COPY2]] 105 ; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[UV]], [[UV2]] 106 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[UV4]]
|
D | legalize-fdiv.mir | 35 ; SI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[INT2]], [[FMA1]] 36 ; SI: [[FMA2:%[0-9]+]]:_(s32) = G_FMA [[FNEG]], [[FMUL]], [[INT2]] 37 ; SI: [[FMA3:%[0-9]+]]:_(s32) = G_FMA [[FMA2]], [[FMA1]], [[FMUL]] 52 ; VI: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]] 53 ; VI: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32) 65 ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]] 66 ; GFX9: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32) 76 ; GFX9-UNSAFE: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[INT]] 77 ; GFX9-UNSAFE: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMUL]](s16) 87 ; GFX10: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[INT]] [all …]
|
D | regbankselect-fmul.mir | 17 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY2]], [[COPY3]] 34 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY2]], [[COPY1]] 51 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY]], [[COPY2]] 67 ; CHECK: [[FMUL:%[0-9]+]]:vgpr(s32) = G_FMUL [[COPY]], [[COPY1]]
|
D | legalize-intrinsic-amdgcn-fdiv-fast.mir | 19 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY1]], [[SELECT]] 20 ; CHECK: [[INT:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FMUL]](s32) 45 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = nsz G_FMUL [[COPY1]], [[SELECT]] 46 ; CHECK: [[INT:%[0-9]+]]:_(s32) = nsz G_INTRINSIC intrinsic(@llvm.amdgcn.rcp), [[FMUL]](s32)
|
D | legalize-fmad.s16.mir | 26 ; GFX6: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]] 27 ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32) 51 ; GFX10: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]] 52 ; GFX10: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC2]] 96 ; GFX6: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]] 97 ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32) 158 ; GFX10: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC2]] 159 ; GFX10: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC4]] 218 ; GFX6: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[FPEXT]], [[FPEXT1]] 219 ; GFX6: [[FPTRUNC:%[0-9]+]]:_(s16) = G_FPTRUNC [[FMUL]](s32) [all …]
|
/external/apache-commons-bcel/src/main/java/org/apache/bcel/generic/ |
D | FMUL.java | 26 public class FMUL extends ArithmeticInstruction { class 30 public FMUL() { in FMUL() method in FMUL 31 super(org.apache.bcel.Const.FMUL); in FMUL()
|
/external/XNNPACK/src/f16-gemm/gen-inc/ |
D | 6x16inc-minmax-aarch64-neonfp16arith-ld32.S | 160 FMUL v20.8h, v20.8h, v6.8h 163 FMUL v21.8h, v21.8h, v6.8h 164 FMUL v22.8h, v22.8h, v6.8h 165 FMUL v23.8h, v23.8h, v6.8h 166 FMUL v24.8h, v24.8h, v6.8h 167 FMUL v25.8h, v25.8h, v6.8h 168 FMUL v26.8h, v26.8h, v6.8h 169 FMUL v27.8h, v27.8h, v6.8h 170 FMUL v28.8h, v28.8h, v6.8h 171 FMUL v29.8h, v29.8h, v6.8h [all …]
|
D | 4x16inc-minmax-aarch64-neonfp16arith-ld32.S | 115 FMUL v16.8h, v16.8h, v4.8h 117 FMUL v17.8h, v17.8h, v4.8h 118 FMUL v18.8h, v18.8h, v4.8h 119 FMUL v19.8h, v19.8h, v4.8h 120 FMUL v28.8h, v28.8h, v4.8h 121 FMUL v29.8h, v29.8h, v4.8h 122 FMUL v30.8h, v30.8h, v4.8h 123 FMUL v31.8h, v31.8h, v4.8h
|
/external/llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/ |
D | legalize-fp-arith.mir | 68 ; CHECK: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[COPY]], [[COPY1]] 69 ; CHECK: $q0 = COPY [[FMUL]](<4 x s32>) 82 ; CHECK: [[FMUL:%[0-9]+]]:_(<2 x s64>) = G_FMUL [[DEF]], [[DEF]] 84 ; CHECK: $q0 = COPY [[FMUL]](<2 x s64>) 104 ; CHECK: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR2]] 106 ; CHECK: $q0 = COPY [[FMUL]](<4 x s32>)
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 6x16-minmax-aarch64-neonfp16arith-ld32.S | 164 FMUL v20.8h, v20.8h, v6.8h 167 FMUL v21.8h, v21.8h, v6.8h 168 FMUL v22.8h, v22.8h, v6.8h 169 FMUL v23.8h, v23.8h, v6.8h 170 FMUL v24.8h, v24.8h, v6.8h 171 FMUL v25.8h, v25.8h, v6.8h 172 FMUL v26.8h, v26.8h, v6.8h 173 FMUL v27.8h, v27.8h, v6.8h 174 FMUL v28.8h, v28.8h, v6.8h 175 FMUL v29.8h, v29.8h, v6.8h [all …]
|
D | 4x16-minmax-aarch64-neonfp16arith-ld32.S | 115 FMUL v16.8h, v16.8h, v4.8h 117 FMUL v17.8h, v17.8h, v4.8h 118 FMUL v18.8h, v18.8h, v4.8h 119 FMUL v19.8h, v19.8h, v4.8h 120 FMUL v28.8h, v28.8h, v4.8h 121 FMUL v29.8h, v29.8h, v4.8h 122 FMUL v30.8h, v30.8h, v4.8h 123 FMUL v31.8h, v31.8h, v4.8h
|
/external/llvm-project/llvm/test/CodeGen/X86/GlobalISel/ |
D | legalize-fmul-scalar.mir | 41 ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[TRUNC]], [[TRUNC1]] 42 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FMUL]](s32) 80 ; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[TRUNC]], [[TRUNC1]] 81 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[FMUL]](s64)
|
/external/llvm-project/llvm/test/CodeGen/Mips/GlobalISel/legalizer/ |
D | float_arithmetic_operations.mir | 87 ; FP32: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]] 88 ; FP32: $f0 = COPY [[FMUL]](s32) 94 ; FP64: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]] 95 ; FP64: $f0 = COPY [[FMUL]](s32) 203 ; FP32: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]] 204 ; FP32: $d0 = COPY [[FMUL]](s64) 210 ; FP64: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]] 211 ; FP64: $d0 = COPY [[FMUL]](s64)
|
/external/llvm-project/llvm/test/CodeGen/Mips/GlobalISel/regbankselect/ |
D | float_arithmetic_operations.mir | 90 ; FP32: [[FMUL:%[0-9]+]]:fprb(s32) = G_FMUL [[COPY]], [[COPY1]] 91 ; FP32: $f0 = COPY [[FMUL]](s32) 97 ; FP64: [[FMUL:%[0-9]+]]:fprb(s32) = G_FMUL [[COPY]], [[COPY1]] 98 ; FP64: $f0 = COPY [[FMUL]](s32) 210 ; FP32: [[FMUL:%[0-9]+]]:fprb(s64) = G_FMUL [[COPY]], [[COPY1]] 211 ; FP32: $d0 = COPY [[FMUL]](s64) 217 ; FP64: [[FMUL:%[0-9]+]]:fprb(s64) = G_FMUL [[COPY]], [[COPY1]] 218 ; FP64: $d0 = COPY [[FMUL]](s64)
|
/external/XNNPACK/src/f16-gemm/ |
D | 6x16-aarch64-neonfp16arith-ld32.S.in | 177 FMUL v20.8h, v20.8h, v6.8h 180 FMUL v21.8h, v21.8h, v6.8h 181 FMUL v22.8h, v22.8h, v6.8h 182 FMUL v23.8h, v23.8h, v6.8h 183 FMUL v24.8h, v24.8h, v6.8h 184 FMUL v25.8h, v25.8h, v6.8h 185 FMUL v26.8h, v26.8h, v6.8h 186 FMUL v27.8h, v27.8h, v6.8h 187 FMUL v28.8h, v28.8h, v6.8h 188 FMUL v29.8h, v29.8h, v6.8h [all …]
|
D | 4x16-aarch64-neonfp16arith-ld32.S.in | 128 FMUL v16.8h, v16.8h, v4.8h 130 FMUL v17.8h, v17.8h, v4.8h 131 FMUL v18.8h, v18.8h, v4.8h 132 FMUL v19.8h, v19.8h, v4.8h 133 FMUL v28.8h, v28.8h, v4.8h 134 FMUL v29.8h, v29.8h, v4.8h 135 FMUL v30.8h, v30.8h, v4.8h 136 FMUL v31.8h, v31.8h, v4.8h
|
/external/mesa3d/src/gallium/drivers/swr/rasterizer/jitter/ |
D | blend_jit.cpp | 237 src[swizComp] = FADD(FMUL(src[swizComp], VIMMED1(factor)), VIMMED1(0.5f)); in Quantize() 239 src[swizComp] = FMUL(src[swizComp], VIMMED1(1.0f / factor)); in Quantize() 261 srcBlend[i] = FMUL(src[i], srcFactor[i]); in BlendFunc() 262 dstBlend[i] = FMUL(dst[i], dstFactor[i]); in BlendFunc() 472 Value* pAlphaU8 = FMUL(pAlpha, VIMMED1(256.0f)); in AlphaTest() 636 currentSampleMask = FMUL(pClampedSrc, VBROADCAST(C((float)bits))); in Create() 782 src[i] = FP_TO_SI(FMUL(src[i], VIMMED1(scale[i])), mSimdInt32Ty); in Create() 783 dst[i] = FP_TO_SI(FMUL(dst[i], VIMMED1(scale[i])), mSimdInt32Ty); in Create() 786 src[i] = FP_TO_UI(FMUL(src[i], VIMMED1(scale[i])), mSimdInt32Ty); in Create() 787 dst[i] = FP_TO_UI(FMUL(dst[i], VIMMED1(scale[i])), mSimdInt32Ty); in Create() [all …]
|
/external/llvm-project/llvm/test/MachineVerifier/ |
D | test_vector_reductions.mir | 28 ; CHECK: Bad machine code: Sequential FADD/FMUL vector reduction requires a scalar 1st operand 31 ; CHECK: Bad machine code: Sequential FADD/FMUL vector reduction must have a vector 2nd operand
|