Lines Matching refs:v8f16
106 %aa = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %a)
107 %bb = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %b)
108 %c = tail call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %aa, <8 x half> %bb)
117 %aa = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %a)
118 %bb = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %b)
119 %c = tail call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %bb, <8 x half> %aa)
128 %aa = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %a)
129 %bb = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %b)
130 %c = tail call fast <8 x half> @llvm.minnum.v8f16(<8 x half> %aa, <8 x half> %bb)
139 %aa = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %a)
140 %bb = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %b)
141 %c = tail call fast <8 x half> @llvm.minnum.v8f16(<8 x half> %bb, <8 x half> %aa)
152 …%s = tail call fast <8 x half> @llvm.arm.mve.vmaxnma.predicated.v8f16.v8i1(<8 x half> %a, <8 x hal…
164 …%s = tail call fast <8 x half> @llvm.arm.mve.vmaxnma.predicated.v8f16.v8i1(<8 x half> %b, <8 x hal…
175 …%s = tail call fast <8 x half> @llvm.arm.mve.vminnma.predicated.v8f16.v8i1(<8 x half> %a, <8 x hal…
187 …%s = tail call fast <8 x half> @llvm.arm.mve.vminnma.predicated.v8f16.v8i1(<8 x half> %b, <8 x hal…
422 %13 = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %11)
423 %14 = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %8)
424 %15 = tail call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %14, <8 x half> %13)
431 %20 = tail call fast half @llvm.arm.mve.maxnmav.f16.v8f16(half 0.000000e+00, <8 x half> %19)
472 %13 = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %11)
473 %14 = tail call fast <8 x half> @llvm.fabs.v8f16(<8 x half> %8)
474 %15 = tail call fast <8 x half> @llvm.maxnum.v8f16(<8 x half> %13, <8 x half> %14)
481 %20 = tail call fast half @llvm.arm.mve.maxnmav.f16.v8f16(half 0.000000e+00, <8 x half> %19)
516 …%10 = tail call fast <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %9, i32 4, <8 x i1> %8…
518 …%12 = tail call fast <8 x half> @llvm.arm.mve.vmaxnma.predicated.v8f16.v8i1(<8 x half> %5, <8 x ha…
524 %16 = tail call fast half @llvm.arm.mve.maxnmav.f16.v8f16(half 0.000000e+00, <8 x half> %12)
560 …%10 = tail call fast <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %9, i32 4, <8 x i1> %8…
562 …%12 = tail call fast <8 x half> @llvm.arm.mve.vmaxnma.predicated.v8f16.v8i1(<8 x half> %10, <8 x h…
568 %16 = tail call fast half @llvm.arm.mve.maxnmav.f16.v8f16(half 0.000000e+00, <8 x half> %12)
587 declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32 immarg, <8 x i1>, <8 x half>)
588 declare <8 x half> @llvm.arm.mve.vminnma.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>)
589 declare <8 x half> @llvm.arm.mve.vmaxnma.predicated.v8f16.v8i1(<8 x half>, <8 x half>, <8 x i1>)
590 declare half @llvm.arm.mve.maxnmav.f16.v8f16(half, <8 x half>)
591 declare <8 x half> @llvm.fabs.v8f16(<8 x half>)
592 declare <8 x half> @llvm.maxnum.v8f16(<8 x half>, <8 x half>)
593 declare <8 x half> @llvm.minnum.v8f16(<8 x half>, <8 x half>)