• Home
  • Raw
  • Download

Lines Matching refs:vec0

18   %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
25 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %sca…
46 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
54 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
75 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
83 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
104 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
113 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
134 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
141 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
162 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
169 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %sca…
187 %vec0 = load volatile <2 x i16>, <2 x i16> addrspace(3)* %lds, align 4
195 %result = add <2 x i16> %vec0, %neg.scalar0.broadcast
217 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
226 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %vec…
245 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
255 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
276 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
283 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %vec…
304 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
312 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
331 %vec0 = load volatile <2 x i16>, <2 x i16> addrspace(3)* %lds, align 4
335 %result = add <2 x i16> %vec0, %vec1.elt1.broadcast
357 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
363 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %vec…
385 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
394 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
415 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
420 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %vec…
443 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
449 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %neg…
472 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
477 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %com…
500 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
505 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %com…
528 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
533 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %com…
556 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
561 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %com…
571 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
575 %result = fadd <2 x half> %vec0, %bc
585 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
591 %result = fadd <2 x half> %vec0, %shuf
601 %vec0 = load volatile <2 x i16>, <2 x i16> addrspace(3)* %lds, align 4
610 %result = add <2 x i16> %vec0, %ins1
634 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
645 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %shu…
669 %vec0 = load volatile <2 x half>, <2 x half> addrspace(3)* %lds, align 4
683 …%result = tail call <2 x half> @llvm.fma.v2f16(<2 x half> %vec0, <2 x half> %vec1, <2 x half> %ins…