Searched refs:fmad (Results 1 – 25 of 40) sorted by relevance
12
10 fmad z0.h, p7/m, z1.h, z31.h label16 fmad z0.s, p7/m, z1.s, z31.s label22 fmad z0.d, p7/m, z1.d, z31.d label38 fmad z0.d, p7/m, z1.d, z31.d label50 fmad z0.d, p7/m, z1.d, z31.d label
7 fmad z0.h, p8/m, z1.h, z2.h label16 fmad z0.s, p7/m, z1.h, z2.h label21 fmad z0.b, p7/m, z1.b, z2.b label30 fmad z0.h, p7/m, z1.h, z2.h[0] label
27 %3:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %1, %251 %3:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %1, %275 %3:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %1, %2100 %3:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %1, %2124 %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %0, %1147 %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %1, %0170 %2:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %1, %0, %0191 %1:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %0, %0210 # %3:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %1, %2235 %4:vgpr(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.fmad.ftz), %0, %1, %3
6 declare float @llvm.amdgcn.fmad.ftz.f32(float %a, float %b, float %c)18 %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %b.val, float %c.val)31 %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float 8.0, float %b.val, float %c.val)45 %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float 8.0, float %c.val)62 %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %b.val, float 8.0)78 %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %neg.b, float %c.val)94 %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %abs.b, float %c.val)111 %r.val = call float @llvm.amdgcn.fmad.ftz.f32(float %a.val, float %neg.abs.b, float %c.val)
5 declare half @llvm.amdgcn.fmad.ftz.f16(half %a, half %b, half %c)17 %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %b.val, half %c.val)30 %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half 8.0, half %b.val, half %c.val)43 %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half 8.0, half %c.val)56 %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %b.val, half 8.0)73 %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %neg.b, half %c.val)90 %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %abs.b, half %c.val)108 %r.val = call half @llvm.amdgcn.fmad.ftz.f16(half %a.val, half %neg.abs.b, half %c.val)
131 %fmad = fadd contract float %mul1, %mul.ext132 %add = fadd float %fmad, %z296 ; fold (fsub (fmad x, y, (fpext (fmul u, v))), z)297 ; -> (fmad x, y (fmad (fpext u), (fpext v), (fneg z)))319 ; fold (fsub (fpext (fmad x, y, (fmul u, v))), z)320 ; -> (fmad (fpext x), (fpext y),321 ; (fmad (fpext u), (fpext v), (fneg z)))338 ; fold (fsub x, (fmad y, z, (fpext (fmul u, v))))339 ; -> (fmad (fneg y), z, (fmad (fneg (fpext u)), (fpext v), x))
16 ; CHECK-NEXT: [[TMP6:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP5]], float [[T…
311 ; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[T…358 ; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[T…412 ; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float […468 ; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float […525 ; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[T…570 ; CHECK-NEXT: [[TMP9:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP8]], float [[T…623 ; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float […679 ; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP11]], float […1739 ; CHECK-NEXT: [[TMP11:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP10]], float […1759 ; CHECK-NEXT: [[TMP31:%.*]] = call fast float @llvm.amdgcn.fmad.ftz.f32(float [[TMP30]], float […[all …]
14 ; Check for incorrect fmad formation when distributing
788 %fmad = call float @llvm.fmuladd.f32(float %x, float 4.0, float %z)789 %fneg = fsub float -0.0, %fmad
148 %v = call float @llvm.amdgcn.fmad.ftz.f32(float %load, float 15.0, float 15.0)884 declare float @llvm.amdgcn.fmad.ftz.f32(float, float, float) #0
70 - gk110/ir: add dnz flag emission for fmul/fmad
81 def : GINodeEquiv<G_FMAD, fmad>;
97 def : GINodeEquiv<G_FMAD, fmad>;
316 ; CHECK: fmad z0.h, p0/m, z1.h, z2.h318 %out = call <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1> %pg,327 ; CHECK: fmad z0.s, p0/m, z1.s, z2.s329 %out = call <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1> %pg,338 ; CHECK: fmad z0.d, p0/m, z1.d, z2.d340 %out = call <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1> %pg,1547 declare <vscale x 8 x half> @llvm.aarch64.sve.fmad.nxv8f16(<vscale x 8 x i1>, <vscale x 8 x half>, …1548 declare <vscale x 4 x float> @llvm.aarch64.sve.fmad.nxv4f32(<vscale x 4 x i1>, <vscale x 4 x float>…1549 declare <vscale x 2 x double> @llvm.aarch64.sve.fmad.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x doubl…
127 by passing the "--fmad=false" option.
387 __ fmad(z14.VnD(), p6.Merging(), z14.VnD(), z3.VnD()); in TEST() local390 __ fmad(z2.VnS(), p5.Merging(), z14.VnS(), z2.VnS()); in TEST() local897 __ fmad(z22.VnH(), p0.Merging(), z27.VnH(), z15.VnH()); in TEST() local1698 __ fmad(z31.VnS(), p5.Merging(), z23.VnS(), z11.VnS()); in TEST() local
672 [(fmad node:$src0, node:$src1, node:$src2),
293 def V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fmad>;488 def V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, fmad>;
184 defm : MadFmaMixPats<fmad, V_MAD_MIX_F32, V_MAD_MIXLO_F16, V_MAD_MIXHI_F16>;
879 def : FMADPat <f32, V_MAC_F32_e64, fmad>;907 def : FMADPat <f16, V_MAC_F16_e64, fmad>;
290 def V_MAD_F32 : VOP3Inst <"v_mad_f32", VOP3_Profile<VOP_F32_F32_F32_F32>, fmad>;453 def V_MAD_F16 : VOP3Inst <"v_mad_f16", VOP3_Profile<VOP_F16_F16_F16_F16>, fmad>;
161 defm : MadFmaMixPats<fmad, V_MAD_MIX_F32, V_MAD_MIXLO_F16, V_MAD_MIXHI_F16>;
1837 &Assembler::fmad, in Fmla()
431 def fmad : SDNode<"ISD::FMAD" , SDTFPTernaryOp>;