Lines Matching refs:vi
11 vector int vi = { -1, 2, -3, 4 }; variable
53 vi = vec_abs(vi); // CHECK: sub <4 x i32> zeroinitializer in test1()
65 vi = vec_abss(vi); // CHECK: @llvm.ppc.altivec.vsubsws in test1()
81 res_vi = vec_add(vi, vi); // CHECK: add <4 x i32> in test1()
82 res_vi = vec_add(vbi, vi); // CHECK: add <4 x i32> in test1()
83 res_vi = vec_add(vi, vbi); // CHECK: add <4 x i32> in test1()
100 res_vi = vec_vadduwm(vi, vi); // CHECK: add <4 x i32> in test1()
101 res_vi = vec_vadduwm(vbi, vi); // CHECK: add <4 x i32> in test1()
102 res_vi = vec_vadduwm(vi, vbi); // CHECK: add <4 x i32> in test1()
125 res_vi = vec_adds(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
126 res_vi = vec_adds(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
127 res_vi = vec_adds(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
143 res_vi = vec_vaddsws(vi, vi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
144 res_vi = vec_vaddsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
145 res_vi = vec_vaddsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
165 res_vi = vec_and(vi, vi); // CHECK: and <4 x i32> in test1()
166 res_vi = vec_and(vbi, vi); // CHECK: and <4 x i32> in test1()
167 res_vi = vec_and(vi, vbi); // CHECK: and <4 x i32> in test1()
186 res_vi = vec_vand(vi, vi); // CHECK: and <4 x i32> in test1()
187 res_vi = vec_vand(vbi, vi); // CHECK: and <4 x i32> in test1()
188 res_vi = vec_vand(vi, vbi); // CHECK: and <4 x i32> in test1()
237 res_vi = vec_andc(vi, vi); // CHECK: xor <4 x i32> in test1()
240 res_vi = vec_andc(vbi, vi); // CHECK: xor <4 x i32> in test1()
243 res_vi = vec_andc(vi, vbi); // CHECK: xor <4 x i32> in test1()
306 res_vi = vec_vandc(vi, vi); // CHECK: xor <4 x i32> in test1()
309 res_vi = vec_vandc(vbi, vi); // CHECK: xor <4 x i32> in test1()
312 res_vi = vec_vandc(vi, vbi); // CHECK: xor <4 x i32> in test1()
342 res_vi = vec_avg(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw in test2()
348 res_vi = vec_vavgsw(vi, vi); // CHECK: @llvm.ppc.altivec.vavgsw in test2()
364 res_vbi = vec_cmpeq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw in test2()
381 res_vbi = vec_cmpgt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw in test5()
388 res_vbi = vec_vcmpgtsw(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw in test5()
403 res_vbi = vec_cmplt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw in test6()
408 res_vf = vec_ctf(vi, param_i); // CHECK: @llvm.ppc.altivec.vcfsx in test6()
410 res_vf = vec_vcfsx(vi, 0); // CHECK: @llvm.ppc.altivec.vcfsx in test6()
459 res_vi = vec_ld(0, &vi); // CHECK: @llvm.ppc.altivec.lvx in test6()
477 res_vi = vec_lvx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx in test6()
490 res_vi = vec_lde(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx in test6()
497 res_vi = vec_lvewx(0, &vi); // CHECK: @llvm.ppc.altivec.lvewx in test6()
513 res_vi = vec_ldl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl in test6()
531 res_vi = vec_lvxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl in test6()
570 res_vi = vec_max(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
571 res_vi = vec_max(vbi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
572 res_vi = vec_max(vi, vbi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
589 res_vi = vec_vmaxsw(vi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
590 res_vi = vec_vmaxsw(vbi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
591 res_vi = vec_vmaxsw(vi, vbi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
605 res_vi = vec_mergeh(vi, vi); // CHECK: @llvm.ppc.altivec.vperm in test6()
616 res_vi = vec_vmrghw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm in test6()
629 res_vi = vec_mergel(vi, vi); // CHECK: @llvm.ppc.altivec.vperm in test6()
640 res_vi = vec_vmrglw(vi, vi); // CHECK: @llvm.ppc.altivec.vperm in test6()
661 res_vi = vec_min(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
662 res_vi = vec_min(vbi, vi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
663 res_vi = vec_min(vi, vbi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
680 res_vi = vec_vminsw(vi, vi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
681 res_vi = vec_vminsw(vbi, vi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
682 res_vi = vec_vminsw(vi, vbi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
706 res_vi = vec_msum(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm in test6()
708 res_vi = vec_msum(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm in test6()
710 res_vi = vec_vmsummbm(vsc, vuc, vi); // CHECK: @llvm.ppc.altivec.vmsummbm in test6()
712 res_vi = vec_vmsumshm(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm in test6()
716 res_vi = vec_msums(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs in test6()
718 res_vi = vec_vmsumshs(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs in test6()
729 vec_mtvscr(vi); // CHECK: @llvm.ppc.altivec.mtvscr in test6()
776 res_vi = vec_nor(vi, vi); // CHECK: or <4 x i32> in test6()
806 res_vi = vec_vnor(vi, vi); // CHECK: or <4 x i32> in test6()
833 res_vi = vec_or(vi, vi); // CHECK: or <4 x i32> in test6()
834 res_vi = vec_or(vbi, vi); // CHECK: or <4 x i32> in test6()
835 res_vi = vec_or(vi, vbi); // CHECK: or <4 x i32> in test6()
857 res_vi = vec_vor(vi, vi); // CHECK: or <4 x i32> in test6()
858 res_vi = vec_vor(vbi, vi); // CHECK: or <4 x i32> in test6()
859 res_vi = vec_vor(vi, vbi); // CHECK: or <4 x i32> in test6()
872 res_vs = vec_pack(vi, vi); // CHECK: @llvm.ppc.altivec.vperm in test6()
878 res_vs = vec_vpkuwum(vi, vi); // CHECK: @llvm.ppc.altivec.vperm in test6()
889 res_vs = vec_packs(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss in test6()
893 res_vs = vec_vpkswss(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswss in test6()
899 res_vus = vec_packsu(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus in test6()
903 res_vus = vec_vpkswus(vi, vi); // CHECK: @llvm.ppc.altivec.vpkswus in test6()
914 res_vi = vec_perm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm in test6()
925 res_vi = vec_vperm(vi, vi, vuc); // CHECK: @llvm.ppc.altivec.vperm in test6()
939 res_vi = vec_rl(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw in test6()
945 res_vi = vec_vrlw(vi, vui); // CHECK: @llvm.ppc.altivec.vrlw in test6()
1017 res_vi = vec_sel(vi, vi, vui); // CHECK: xor <4 x i32> in test6()
1022 res_vi = vec_sel(vi, vi, vbi); // CHECK: xor <4 x i32> in test6()
1117 res_vi = vec_vsel(vi, vi, vui); // CHECK: xor <4 x i32> in test6()
1122 res_vi = vec_vsel(vi, vi, vbi); // CHECK: xor <4 x i32> in test6()
1162 res_vi = vec_sl(vi, vui); // CHECK: shl <4 x i32> in test6()
1168 res_vi = vec_vslw(vi, vui); // CHECK: shl <4 x i32> in test6()
1177 res_vi = vec_sld(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1185 res_vi = vec_vsldoi(vi, vi, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1211 res_vi = vec_sll(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl in test6()
1212 res_vi = vec_sll(vi, vus); // CHECK: @llvm.ppc.altivec.vsl in test6()
1213 res_vi = vec_sll(vi, vui); // CHECK: @llvm.ppc.altivec.vsl in test6()
1241 res_vi = vec_vsl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsl in test6()
1242 res_vi = vec_vsl(vi, vus); // CHECK: @llvm.ppc.altivec.vsl in test6()
1243 res_vi = vec_vsl(vi, vui); // CHECK: @llvm.ppc.altivec.vsl in test6()
1262 res_vi = vec_slo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo in test6()
1263 res_vi = vec_slo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo in test6()
1278 res_vi = vec_vslo(vi, vsc); // CHECK: @llvm.ppc.altivec.vslo in test6()
1279 res_vi = vec_vslo(vi, vuc); // CHECK: @llvm.ppc.altivec.vslo in test6()
1293 res_vi = vec_splat(vi, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1304 res_vi = vec_vspltw(vi, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1335 res_vi = vec_sr(vi, vui); // CHECK: shr <4 x i32> in test6()
1341 res_vi = vec_vsrw(vi, vui); // CHECK: shr <4 x i32> in test6()
1349 res_vi = vec_sra(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw in test6()
1355 res_vi = vec_vsraw(vi, vui); // CHECK: @llvm.ppc.altivec.vsraw in test6()
1380 res_vi = vec_srl(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr in test6()
1381 res_vi = vec_srl(vi, vus); // CHECK: @llvm.ppc.altivec.vsr in test6()
1382 res_vi = vec_srl(vi, vui); // CHECK: @llvm.ppc.altivec.vsr in test6()
1410 res_vi = vec_vsr(vi, vuc); // CHECK: @llvm.ppc.altivec.vsr in test6()
1411 res_vi = vec_vsr(vi, vus); // CHECK: @llvm.ppc.altivec.vsr in test6()
1412 res_vi = vec_vsr(vi, vui); // CHECK: @llvm.ppc.altivec.vsr in test6()
1431 res_vi = vec_sro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro in test6()
1432 res_vi = vec_sro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro in test6()
1447 res_vi = vec_vsro(vi, vsc); // CHECK: @llvm.ppc.altivec.vsro in test6()
1448 res_vi = vec_vsro(vi, vuc); // CHECK: @llvm.ppc.altivec.vsro in test6()
1472 vec_st(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx in test6()
1473 vec_st(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvx in test6()
1498 vec_stvx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvx in test6()
1499 vec_stvx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvx in test6()
1519 vec_ste(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvewx in test6()
1534 vec_stvewx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvewx in test6()
1558 vec_stl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1559 vec_stl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1584 vec_stvxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1585 vec_stvxl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1607 res_vi = vec_sub(vi, vi); // CHECK: sub <4 x i32> in test6()
1608 res_vi = vec_sub(vbi, vi); // CHECK: sub <4 x i32> in test6()
1609 res_vi = vec_sub(vi, vbi); // CHECK: sub <4 x i32> in test6()
1626 res_vi = vec_vsubuwm(vi, vi); // CHECK: sub <4 x i32> in test6()
1627 res_vi = vec_vsubuwm(vbi, vi); // CHECK: sub <4 x i32> in test6()
1628 res_vi = vec_vsubuwm(vi, vbi); // CHECK: sub <4 x i32> in test6()
1651 res_vi = vec_subs(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1652 res_vi = vec_subs(vbi, vi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1653 res_vi = vec_subs(vi, vbi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1669 res_vi = vec_vsubsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1670 res_vi = vec_vsubsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1671 res_vi = vec_vsubsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1677 res_vi = vec_sum4s(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs in test6()
1679 res_vi = vec_sum4s(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs in test6()
1680 res_vi = vec_vsum4sbs(vsc, vi); // CHECK: @llvm.ppc.altivec.vsum4sbs in test6()
1682 res_vi = vec_vsum4shs(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs in test6()
1685 res_vi = vec_sum2s(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws in test6()
1686 res_vi = vec_vsum2sws(vi, vi); // CHECK: @llvm.ppc.altivec.vsum2sws in test6()
1689 res_vi = vec_sums(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws in test6()
1690 res_vi = vec_vsumsws(vi, vi); // CHECK: @llvm.ppc.altivec.vsumsws in test6()
1735 res_vi = vec_xor(vi, vi); // CHECK: xor <4 x i32> in test6()
1736 res_vi = vec_xor(vbi, vi); // CHECK: xor <4 x i32> in test6()
1737 res_vi = vec_xor(vi, vbi); // CHECK: xor <4 x i32> in test6()
1759 res_vi = vec_vxor(vi, vi); // CHECK: xor <4 x i32> in test6()
1760 res_vi = vec_vxor(vbi, vi); // CHECK: xor <4 x i32> in test6()
1761 res_vi = vec_vxor(vi, vbi); // CHECK: xor <4 x i32> in test6()
1777 res_i = vec_extract(vi, param_i); // CHECK: extractelement <4 x i32> in test6()
1786 res_vi = vec_insert(param_i, vi, param_i); // CHECK: insertelement <4 x i32> in test6()
1851 res_vi = vec_lvlx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx in test6()
1937 res_vi = vec_lvlxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl in test6()
2023 res_vi = vec_lvrx(0, &vi); // CHECK: @llvm.ppc.altivec.lvx in test6()
2109 res_vi = vec_lvrxl(0, &vi); // CHECK: @llvm.ppc.altivec.lvxl in test6()
2223 vec_stvlx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx in test6()
2231 vec_stvlx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx in test6()
2360 vec_stvlxl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx in test6()
2368 vec_stvlxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx in test6()
2497 vec_stvrx(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx in test6()
2505 vec_stvrx(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx in test6()
2634 vec_stvrxl(vi, 0, ¶m_i); // CHECK: @llvm.ppc.altivec.lvx in test6()
2642 vec_stvrxl(vi, 0, &vi); // CHECK: @llvm.ppc.altivec.lvx in test6()
2737 res_i = vec_all_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2738 res_i = vec_all_eq(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2741 res_i = vec_all_eq(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2761 res_i = vec_all_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2762 res_i = vec_all_ge(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2765 res_i = vec_all_ge(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2785 res_i = vec_all_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2786 res_i = vec_all_gt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2789 res_i = vec_all_gt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2812 res_i = vec_all_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2813 res_i = vec_all_le(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2816 res_i = vec_all_le(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2836 res_i = vec_all_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2837 res_i = vec_all_lt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2840 res_i = vec_all_lt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2864 res_i = vec_all_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2865 res_i = vec_all_ne(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2868 res_i = vec_all_ne(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2904 res_i = vec_any_eq(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2905 res_i = vec_any_eq(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2908 res_i = vec_any_eq(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2928 res_i = vec_any_ge(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2929 res_i = vec_any_ge(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2932 res_i = vec_any_ge(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2952 res_i = vec_any_gt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2953 res_i = vec_any_gt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2956 res_i = vec_any_gt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2976 res_i = vec_any_le(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2977 res_i = vec_any_le(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2980 res_i = vec_any_le(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
3000 res_i = vec_any_lt(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
3001 res_i = vec_any_lt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
3004 res_i = vec_any_lt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
3028 res_i = vec_any_ne(vi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
3029 res_i = vec_any_ne(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
3032 res_i = vec_any_ne(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()