• Home
  • Raw
  • Download

Lines Matching refs:vbi

10 vector bool int vbi = { 1, 0, 1, 0 };  variable
82 res_vi = vec_add(vbi, vi); // CHECK: add <4 x i32> in test1()
83 res_vi = vec_add(vi, vbi); // CHECK: add <4 x i32> in test1()
85 res_vui = vec_add(vbi, vui); // CHECK: add <4 x i32> in test1()
86 res_vui = vec_add(vui, vbi); // CHECK: add <4 x i32> in test1()
101 res_vi = vec_vadduwm(vbi, vi); // CHECK: add <4 x i32> in test1()
102 res_vi = vec_vadduwm(vi, vbi); // CHECK: add <4 x i32> in test1()
104 res_vui = vec_vadduwm(vbi, vui); // CHECK: add <4 x i32> in test1()
105 res_vui = vec_vadduwm(vui, vbi); // CHECK: add <4 x i32> in test1()
126 res_vi = vec_adds(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
127 res_vi = vec_adds(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
129 res_vui = vec_adds(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws in test1()
130 res_vui = vec_adds(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws in test1()
144 res_vi = vec_vaddsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
145 res_vi = vec_vaddsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vaddsws in test1()
147 res_vui = vec_vadduws(vbi, vui); // CHECK: @llvm.ppc.altivec.vadduws in test1()
148 res_vui = vec_vadduws(vui, vbi); // CHECK: @llvm.ppc.altivec.vadduws in test1()
166 res_vi = vec_and(vbi, vi); // CHECK: and <4 x i32> in test1()
167 res_vi = vec_and(vi, vbi); // CHECK: and <4 x i32> in test1()
169 res_vui = vec_and(vbi, vui); // CHECK: and <4 x i32> in test1()
170 res_vui = vec_and(vui, vbi); // CHECK: and <4 x i32> in test1()
171 res_vbi = vec_and(vbi, vbi); // CHECK: and <4 x i32> in test1()
187 res_vi = vec_vand(vbi, vi); // CHECK: and <4 x i32> in test1()
188 res_vi = vec_vand(vi, vbi); // CHECK: and <4 x i32> in test1()
190 res_vui = vec_vand(vbi, vui); // CHECK: and <4 x i32> in test1()
191 res_vui = vec_vand(vui, vbi); // CHECK: and <4 x i32> in test1()
192 res_vbi = vec_vand(vbi, vbi); // CHECK: and <4 x i32> in test1()
240 res_vi = vec_andc(vbi, vi); // CHECK: xor <4 x i32> in test1()
243 res_vi = vec_andc(vi, vbi); // CHECK: xor <4 x i32> in test1()
249 res_vui = vec_andc(vbi, vui); // CHECK: xor <4 x i32> in test1()
252 res_vui = vec_andc(vui, vbi); // CHECK: xor <4 x i32> in test1()
258 res_vf = vec_andc(vbi, vf); // CHECK: xor <4 x i32> in test1()
261 res_vf = vec_andc(vf, vbi); // CHECK: xor <4 x i32> in test1()
309 res_vi = vec_vandc(vbi, vi); // CHECK: xor <4 x i32> in test1()
312 res_vi = vec_vandc(vi, vbi); // CHECK: xor <4 x i32> in test1()
318 res_vui = vec_vandc(vbi, vui); // CHECK: xor <4 x i32> in test1()
321 res_vui = vec_vandc(vui, vbi); // CHECK: xor <4 x i32> in test1()
327 res_vf = vec_vandc(vbi, vf); // CHECK: xor <4 x i32> in test1()
330 res_vf = vec_vandc(vf, vbi); // CHECK: xor <4 x i32> in test1()
463 res_vbi = vec_ld(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx in test6()
481 res_vbi = vec_lvx(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx in test6()
517 res_vbi = vec_ldl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl in test6()
535 res_vbi = vec_lvxl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl in test6()
571 res_vi = vec_max(vbi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
572 res_vi = vec_max(vi, vbi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
574 res_vui = vec_max(vbi, vui); // CHECK: @llvm.ppc.altivec.vmaxuw in test6()
575 res_vui = vec_max(vui, vbi); // CHECK: @llvm.ppc.altivec.vmaxuw in test6()
590 res_vi = vec_vmaxsw(vbi, vi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
591 res_vi = vec_vmaxsw(vi, vbi); // CHECK: @llvm.ppc.altivec.vmaxsw in test6()
593 res_vui = vec_vmaxuw(vbi, vui); // CHECK: @llvm.ppc.altivec.vmaxuw in test6()
594 res_vui = vec_vmaxuw(vui, vbi); // CHECK: @llvm.ppc.altivec.vmaxuw in test6()
607 res_vbi = vec_mergeh(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm in test6()
618 res_vbi = vec_vmrghw(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm in test6()
631 res_vbi = vec_mergel(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm in test6()
642 res_vbi = vec_vmrglw(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm in test6()
662 res_vi = vec_min(vbi, vi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
663 res_vi = vec_min(vi, vbi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
665 res_vui = vec_min(vbi, vui); // CHECK: @llvm.ppc.altivec.vminuw in test6()
666 res_vui = vec_min(vui, vbi); // CHECK: @llvm.ppc.altivec.vminuw in test6()
681 res_vi = vec_vminsw(vbi, vi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
682 res_vi = vec_vminsw(vi, vbi); // CHECK: @llvm.ppc.altivec.vminsw in test6()
684 res_vui = vec_vminuw(vbi, vui); // CHECK: @llvm.ppc.altivec.vminuw in test6()
685 res_vui = vec_vminuw(vui, vbi); // CHECK: @llvm.ppc.altivec.vminuw in test6()
731 vec_mtvscr(vbi); // CHECK: @llvm.ppc.altivec.mtvscr in test6()
782 res_vui = vec_nor(vbi, vbi); // CHECK: or <4 x i32> in test6()
812 res_vui = vec_vnor(vbi, vbi); // CHECK: or <4 x i32> in test6()
834 res_vi = vec_or(vbi, vi); // CHECK: or <4 x i32> in test6()
835 res_vi = vec_or(vi, vbi); // CHECK: or <4 x i32> in test6()
837 res_vui = vec_or(vbi, vui); // CHECK: or <4 x i32> in test6()
838 res_vui = vec_or(vui, vbi); // CHECK: or <4 x i32> in test6()
839 res_vbi = vec_or(vbi, vbi); // CHECK: or <4 x i32> in test6()
841 res_vf = vec_or(vbi, vf); // CHECK: or <4 x i32> in test6()
842 res_vf = vec_or(vf, vbi); // CHECK: or <4 x i32> in test6()
858 res_vi = vec_vor(vbi, vi); // CHECK: or <4 x i32> in test6()
859 res_vi = vec_vor(vi, vbi); // CHECK: or <4 x i32> in test6()
861 res_vui = vec_vor(vbi, vui); // CHECK: or <4 x i32> in test6()
862 res_vui = vec_vor(vui, vbi); // CHECK: or <4 x i32> in test6()
863 res_vbi = vec_vor(vbi, vbi); // CHECK: or <4 x i32> in test6()
865 res_vf = vec_vor(vbi, vf); // CHECK: or <4 x i32> in test6()
866 res_vf = vec_vor(vf, vbi); // CHECK: or <4 x i32> in test6()
874 res_vbs = vec_pack(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm in test6()
880 res_vbs = vec_vpkuwum(vbi, vbi); // CHECK: @llvm.ppc.altivec.vperm in test6()
916 res_vbi = vec_perm(vbi, vbi, vuc); // CHECK: @llvm.ppc.altivec.vperm in test6()
927 res_vbi = vec_vperm(vbi, vbi, vuc); // CHECK: @llvm.ppc.altivec.vperm in test6()
1022 res_vi = vec_sel(vi, vi, vbi); // CHECK: xor <4 x i32> in test6()
1032 res_vui = vec_sel(vui, vui, vbi); // CHECK: xor <4 x i32> in test6()
1037 res_vbi = vec_sel(vbi, vbi, vui); // CHECK: xor <4 x i32> in test6()
1042 res_vbi = vec_sel(vbi, vbi, vbi); // CHECK: xor <4 x i32> in test6()
1052 res_vf = vec_sel(vf, vf, vbi); // CHECK: xor <4 x i32> in test6()
1122 res_vi = vec_vsel(vi, vi, vbi); // CHECK: xor <4 x i32> in test6()
1132 res_vui = vec_vsel(vui, vui, vbi); // CHECK: xor <4 x i32> in test6()
1137 res_vbi = vec_vsel(vbi, vbi, vui); // CHECK: xor <4 x i32> in test6()
1142 res_vbi = vec_vsel(vbi, vbi, vbi); // CHECK: xor <4 x i32> in test6()
1152 res_vf = vec_vsel(vf, vf, vbi); // CHECK: xor <4 x i32> in test6()
1217 res_vbi = vec_sll(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsl in test6()
1218 res_vbi = vec_sll(vbi, vus); // CHECK: @llvm.ppc.altivec.vsl in test6()
1219 res_vbi = vec_sll(vbi, vui); // CHECK: @llvm.ppc.altivec.vsl in test6()
1247 res_vbi = vec_vsl(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsl in test6()
1248 res_vbi = vec_vsl(vbi, vus); // CHECK: @llvm.ppc.altivec.vsl in test6()
1249 res_vbi = vec_vsl(vbi, vui); // CHECK: @llvm.ppc.altivec.vsl in test6()
1295 res_vbi = vec_splat(vbi, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1306 res_vbi = vec_vspltw(vbi, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1386 res_vbi = vec_srl(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsr in test6()
1387 res_vbi = vec_srl(vbi, vus); // CHECK: @llvm.ppc.altivec.vsr in test6()
1388 res_vbi = vec_srl(vbi, vui); // CHECK: @llvm.ppc.altivec.vsr in test6()
1416 res_vbi = vec_vsr(vbi, vuc); // CHECK: @llvm.ppc.altivec.vsr in test6()
1417 res_vbi = vec_vsr(vbi, vus); // CHECK: @llvm.ppc.altivec.vsr in test6()
1418 res_vbi = vec_vsr(vbi, vui); // CHECK: @llvm.ppc.altivec.vsr in test6()
1476 vec_st(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx in test6()
1477 vec_st(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx in test6()
1478 vec_st(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvx in test6()
1502 vec_stvx(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvx in test6()
1503 vec_stvx(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvx in test6()
1504 vec_stvx(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvx in test6()
1521 vec_ste(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx in test6()
1522 vec_ste(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx in test6()
1536 vec_stvewx(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvewx in test6()
1537 vec_stvewx(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvewx in test6()
1562 vec_stl(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1563 vec_stl(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1564 vec_stl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1588 vec_stvxl(vbi, 0, &param_i); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1589 vec_stvxl(vbi, 0, &param_ui); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1590 vec_stvxl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1608 res_vi = vec_sub(vbi, vi); // CHECK: sub <4 x i32> in test6()
1609 res_vi = vec_sub(vi, vbi); // CHECK: sub <4 x i32> in test6()
1611 res_vui = vec_sub(vbi, vui); // CHECK: sub <4 x i32> in test6()
1612 res_vui = vec_sub(vui, vbi); // CHECK: sub <4 x i32> in test6()
1627 res_vi = vec_vsubuwm(vbi, vi); // CHECK: sub <4 x i32> in test6()
1628 res_vi = vec_vsubuwm(vi, vbi); // CHECK: sub <4 x i32> in test6()
1630 res_vui = vec_vsubuwm(vbi, vui); // CHECK: sub <4 x i32> in test6()
1631 res_vui = vec_vsubuwm(vui, vbi); // CHECK: sub <4 x i32> in test6()
1652 res_vi = vec_subs(vbi, vi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1653 res_vi = vec_subs(vi, vbi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1655 res_vui = vec_subs(vbi, vui); // CHECK: @llvm.ppc.altivec.vsubuws in test6()
1656 res_vui = vec_subs(vui, vbi); // CHECK: @llvm.ppc.altivec.vsubuws in test6()
1670 res_vi = vec_vsubsws(vbi, vi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1671 res_vi = vec_vsubsws(vi, vbi); // CHECK: @llvm.ppc.altivec.vsubsws in test6()
1673 res_vui = vec_vsubuws(vbi, vui); // CHECK: @llvm.ppc.altivec.vsubuws in test6()
1674 res_vui = vec_vsubuws(vui, vbi); // CHECK: @llvm.ppc.altivec.vsubuws in test6()
1736 res_vi = vec_xor(vbi, vi); // CHECK: xor <4 x i32> in test6()
1737 res_vi = vec_xor(vi, vbi); // CHECK: xor <4 x i32> in test6()
1739 res_vui = vec_xor(vbi, vui); // CHECK: xor <4 x i32> in test6()
1740 res_vui = vec_xor(vui, vbi); // CHECK: xor <4 x i32> in test6()
1741 res_vbi = vec_xor(vbi, vbi); // CHECK: xor <4 x i32> in test6()
1743 res_vf = vec_xor(vbi, vf); // CHECK: xor <4 x i32> in test6()
1744 res_vf = vec_xor(vf, vbi); // CHECK: xor <4 x i32> in test6()
1760 res_vi = vec_vxor(vbi, vi); // CHECK: xor <4 x i32> in test6()
1761 res_vi = vec_vxor(vi, vbi); // CHECK: xor <4 x i32> in test6()
1763 res_vui = vec_vxor(vbi, vui); // CHECK: xor <4 x i32> in test6()
1764 res_vui = vec_vxor(vui, vbi); // CHECK: xor <4 x i32> in test6()
1765 res_vbi = vec_vxor(vbi, vbi); // CHECK: xor <4 x i32> in test6()
1767 res_vf = vec_vxor(vbi, vf); // CHECK: xor <4 x i32> in test6()
1768 res_vf = vec_vxor(vf, vbi); // CHECK: xor <4 x i32> in test6()
1866 res_vbi = vec_lvlx(0, &vbi); // CHECK: @llvm.ppc.altivec.lvx in test6()
1952 res_vbi = vec_lvlxl(0, &vbi); // CHECK: @llvm.ppc.altivec.lvxl in test6()
2038 res_vbi = vec_lvrx(0, &vbi); // CHECK: store <4 x i32> zeroinitializer in test6()
2124 res_vbi = vec_lvrxl(0, &vbi); // CHECK: store <4 x i32> zeroinitializer in test6()
2255 vec_stvlx(vbi, 0, &vbi); // CHECK: store <4 x i32> zeroinitializer in test6()
2392 vec_stvlxl(vbi, 0, &vbi); // CHECK: store <4 x i32> zeroinitializer in test6()
2529 vec_stvrx(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.lvx in test6()
2666 vec_stvrxl(vbi, 0, &vbi); // CHECK: @llvm.ppc.altivec.lvx in test6()
2738 res_i = vec_all_eq(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2740 res_i = vec_all_eq(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2741 res_i = vec_all_eq(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2742 res_i = vec_all_eq(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2743 res_i = vec_all_eq(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2762 res_i = vec_all_ge(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2764 res_i = vec_all_ge(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2765 res_i = vec_all_ge(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2766 res_i = vec_all_ge(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2767 res_i = vec_all_ge(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2786 res_i = vec_all_gt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2788 res_i = vec_all_gt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2789 res_i = vec_all_gt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2790 res_i = vec_all_gt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2791 res_i = vec_all_gt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2813 res_i = vec_all_le(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2815 res_i = vec_all_le(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2816 res_i = vec_all_le(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2817 res_i = vec_all_le(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2818 res_i = vec_all_le(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2837 res_i = vec_all_lt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2839 res_i = vec_all_lt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2840 res_i = vec_all_lt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2841 res_i = vec_all_lt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2842 res_i = vec_all_lt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2865 res_i = vec_all_ne(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2867 res_i = vec_all_ne(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2868 res_i = vec_all_ne(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2869 res_i = vec_all_ne(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2870 res_i = vec_all_ne(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2905 res_i = vec_any_eq(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2907 res_i = vec_any_eq(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2908 res_i = vec_any_eq(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2909 res_i = vec_any_eq(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2910 res_i = vec_any_eq(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
2929 res_i = vec_any_ge(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2931 res_i = vec_any_ge(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2932 res_i = vec_any_ge(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2933 res_i = vec_any_ge(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2934 res_i = vec_any_ge(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2953 res_i = vec_any_gt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2955 res_i = vec_any_gt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2956 res_i = vec_any_gt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2957 res_i = vec_any_gt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2958 res_i = vec_any_gt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2977 res_i = vec_any_le(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
2979 res_i = vec_any_le(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2980 res_i = vec_any_le(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2981 res_i = vec_any_le(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
2982 res_i = vec_any_le(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
3001 res_i = vec_any_lt(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtsw.p in test6()
3003 res_i = vec_any_lt(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
3004 res_i = vec_any_lt(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
3005 res_i = vec_any_lt(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
3006 res_i = vec_any_lt(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpgtuw.p in test6()
3029 res_i = vec_any_ne(vi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
3031 res_i = vec_any_ne(vui, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
3032 res_i = vec_any_ne(vbi, vi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
3033 res_i = vec_any_ne(vbi, vui); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()
3034 res_i = vec_any_ne(vbi, vbi); // CHECK: @llvm.ppc.altivec.vcmpequw.p in test6()