Lines Matching refs:vs
8 vector short vs = { -1, 2, -3, 4, -5, 6, -7, 8 }; variable
51 vs = vec_abs(vs); // CHECK: sub <8 x i16> zeroinitializer in test1()
63 vs = vec_abss(vs); // CHECK: @llvm.ppc.altivec.vsubshs in test1()
76 res_vs = vec_add(vs, vs); // CHECK: add <8 x i16> in test1()
77 res_vs = vec_add(vbs, vs); // CHECK: add <8 x i16> in test1()
78 res_vs = vec_add(vs, vbs); // CHECK: add <8 x i16> in test1()
95 res_vs = vec_vadduhm(vs, vs); // CHECK: add <8 x i16> in test1()
96 res_vs = vec_vadduhm(vbs, vs); // CHECK: add <8 x i16> in test1()
97 res_vs = vec_vadduhm(vs, vbs); // CHECK: add <8 x i16> in test1()
120 res_vs = vec_adds(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs in test1()
121 res_vs = vec_adds(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs in test1()
122 res_vs = vec_adds(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs in test1()
138 res_vs = vec_vaddshs(vs, vs); // CHECK: @llvm.ppc.altivec.vaddshs in test1()
139 res_vs = vec_vaddshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vaddshs in test1()
140 res_vs = vec_vaddshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vaddshs in test1()
159 res_vs = vec_and(vs, vs); // CHECK: and <8 x i16> in test1()
160 res_vs = vec_and(vbs, vs); // CHECK: and <8 x i16> in test1()
161 res_vs = vec_and(vs, vbs); // CHECK: and <8 x i16> in test1()
180 res_vs = vec_vand(vs, vs); // CHECK: and <8 x i16> in test1()
181 res_vs = vec_vand(vbs, vs); // CHECK: and <8 x i16> in test1()
182 res_vs = vec_vand(vs, vbs); // CHECK: and <8 x i16> in test1()
217 res_vs = vec_andc(vs, vs); // CHECK: xor <8 x i16> in test1()
220 res_vs = vec_andc(vbs, vs); // CHECK: xor <8 x i16> in test1()
223 res_vs = vec_andc(vs, vbs); // CHECK: xor <8 x i16> in test1()
286 res_vs = vec_vandc(vs, vs); // CHECK: xor <8 x i16> in test1()
289 res_vs = vec_vandc(vbs, vs); // CHECK: xor <8 x i16> in test1()
292 res_vs = vec_vandc(vs, vbs); // CHECK: xor <8 x i16> in test1()
341 res_vs = vec_avg(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh in test2()
347 res_vs = vec_vavgsh(vs, vs); // CHECK: @llvm.ppc.altivec.vavgsh in test2()
363 res_vbs = vec_cmpeq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh in test2()
380 res_vbs = vec_cmpgt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh in test5()
387 res_vbs = vec_vcmpgtsh(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh in test5()
402 res_vbs = vec_cmplt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh in test6()
432 vec_dstst(&vs, 0, 0); // CHECK: @llvm.ppc.altivec.dstst in test6()
454 res_vs = vec_ld(0, &vs); // CHECK: @llvm.ppc.altivec.lvx in test6()
472 res_vs = vec_lvx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx in test6()
508 res_vs = vec_ldl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl in test6()
526 res_vs = vec_lvxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl in test6()
555 res_vs = vec_madds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs in test6()
556 res_vs = vec_vmhaddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhaddshs in test6()
565 res_vs = vec_max(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh in test6()
566 res_vs = vec_max(vbs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh in test6()
567 res_vs = vec_max(vs, vbs); // CHECK: @llvm.ppc.altivec.vmaxsh in test6()
584 res_vs = vec_vmaxsh(vs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh in test6()
585 res_vs = vec_vmaxsh(vbs, vs); // CHECK: @llvm.ppc.altivec.vmaxsh in test6()
586 res_vs = vec_vmaxsh(vs, vbs); // CHECK: @llvm.ppc.altivec.vmaxsh in test6()
602 res_vs = vec_mergeh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm in test6()
613 res_vs = vec_vmrghh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm in test6()
626 res_vs = vec_mergel(vs, vs); // CHECK: @llvm.ppc.altivec.vperm in test6()
637 res_vs = vec_vmrglh(vs, vs); // CHECK: @llvm.ppc.altivec.vperm in test6()
656 res_vs = vec_min(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh in test6()
657 res_vs = vec_min(vbs, vs); // CHECK: @llvm.ppc.altivec.vminsh in test6()
658 res_vs = vec_min(vs, vbs); // CHECK: @llvm.ppc.altivec.vminsh in test6()
675 res_vs = vec_vminsh(vs, vs); // CHECK: @llvm.ppc.altivec.vminsh in test6()
676 res_vs = vec_vminsh(vbs, vs); // CHECK: @llvm.ppc.altivec.vminsh in test6()
677 res_vs = vec_vminsh(vs, vbs); // CHECK: @llvm.ppc.altivec.vminsh in test6()
693 res_vs = vec_mladd(vus, vs, vs); // CHECK: mul <8 x i16> in test6()
696 res_vs = vec_mladd(vs, vus, vus); // CHECK: mul <8 x i16> in test6()
699 res_vs = vec_mladd(vs, vs, vs); // CHECK: mul <8 x i16> in test6()
703 res_vs = vec_mradds(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs in test6()
704 res_vs = vec_vmhraddshs(vs, vs, vs); // CHECK: @llvm.ppc.altivec.vmhraddshs in test6()
709 res_vi = vec_msum(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm in test6()
713 res_vi = vec_vmsumshm(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshm in test6()
717 res_vi = vec_msums(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs in test6()
719 res_vi = vec_vmsumshs(vs, vs, vi); // CHECK: @llvm.ppc.altivec.vmsumshs in test6()
726 vec_mtvscr(vs); // CHECK: @llvm.ppc.altivec.mtvscr in test6()
737 res_vi = vec_mule(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh in test6()
741 res_vi = vec_vmulesh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulesh in test6()
747 res_vi = vec_mulo(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh in test6()
751 res_vi = vec_vmulosh(vs, vs); // CHECK: @llvm.ppc.altivec.vmulosh in test6()
768 res_vs = vec_nor(vs, vs); // CHECK: or <8 x i16> in test6()
798 res_vs = vec_vnor(vs, vs); // CHECK: or <8 x i16> in test6()
827 res_vs = vec_or(vs, vs); // CHECK: or <8 x i16> in test6()
828 res_vs = vec_or(vbs, vs); // CHECK: or <8 x i16> in test6()
829 res_vs = vec_or(vs, vbs); // CHECK: or <8 x i16> in test6()
851 res_vs = vec_vor(vs, vs); // CHECK: or <8 x i16> in test6()
852 res_vs = vec_vor(vbs, vs); // CHECK: or <8 x i16> in test6()
853 res_vs = vec_vor(vs, vbs); // CHECK: or <8 x i16> in test6()
870 res_vsc = vec_pack(vs, vs); // CHECK: @llvm.ppc.altivec.vperm in test6()
876 res_vsc = vec_vpkuhum(vs, vs); // CHECK: @llvm.ppc.altivec.vperm in test6()
888 res_vsc = vec_packs(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss in test6()
892 res_vsc = vec_vpkshss(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshss in test6()
898 res_vuc = vec_packsu(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus in test6()
902 res_vuc = vec_vpkshus(vs, vs); // CHECK: @llvm.ppc.altivec.vpkshus in test6()
911 res_vs = vec_perm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm in test6()
922 res_vs = vec_vperm(vs, vs, vuc); // CHECK: @llvm.ppc.altivec.vperm in test6()
938 res_vs = vec_rl(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh in test6()
944 res_vs = vec_vrlh(vs, vus); // CHECK: @llvm.ppc.altivec.vrlh in test6()
988 res_vs = vec_sel(vs, vs, vus); // CHECK: xor <8 x i16> in test6()
993 res_vs = vec_sel(vs, vs, vbs); // CHECK: xor <8 x i16> in test6()
1088 res_vs = vec_vsel(vs, vs, vus); // CHECK: xor <8 x i16> in test6()
1093 res_vs = vec_vsel(vs, vs, vbs); // CHECK: xor <8 x i16> in test6()
1161 res_vs = vec_sl(vs, vus); // CHECK: shl <8 x i16> in test6()
1167 res_vs = vec_vslh(vs, vus); // CHECK: shl <8 x i16> in test6()
1175 res_vs = vec_sld(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1183 res_vs = vec_vsldoi(vs, vs, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1200 res_vs = vec_sll(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl in test6()
1201 res_vs = vec_sll(vs, vus); // CHECK: @llvm.ppc.altivec.vsl in test6()
1202 res_vs = vec_sll(vs, vui); // CHECK: @llvm.ppc.altivec.vsl in test6()
1230 res_vs = vec_vsl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsl in test6()
1231 res_vs = vec_vsl(vs, vus); // CHECK: @llvm.ppc.altivec.vsl in test6()
1232 res_vs = vec_vsl(vs, vui); // CHECK: @llvm.ppc.altivec.vsl in test6()
1257 res_vs = vec_slo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo in test6()
1258 res_vs = vec_slo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo in test6()
1273 res_vs = vec_vslo(vs, vsc); // CHECK: @llvm.ppc.altivec.vslo in test6()
1274 res_vs = vec_vslo(vs, vuc); // CHECK: @llvm.ppc.altivec.vslo in test6()
1290 res_vs = vec_splat(vs, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1301 res_vs = vec_vsplth(vs, 0); // CHECK: @llvm.ppc.altivec.vperm in test6()
1334 res_vs = vec_sr(vs, vus); // CHECK: shr <8 x i16> in test6()
1340 res_vs = vec_vsrh(vs, vus); // CHECK: shr <8 x i16> in test6()
1348 res_vs = vec_sra(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah in test6()
1354 res_vs = vec_vsrah(vs, vus); // CHECK: @llvm.ppc.altivec.vsrah in test6()
1369 res_vs = vec_srl(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr in test6()
1370 res_vs = vec_srl(vs, vus); // CHECK: @llvm.ppc.altivec.vsr in test6()
1371 res_vs = vec_srl(vs, vui); // CHECK: @llvm.ppc.altivec.vsr in test6()
1399 res_vs = vec_vsr(vs, vuc); // CHECK: @llvm.ppc.altivec.vsr in test6()
1400 res_vs = vec_vsr(vs, vus); // CHECK: @llvm.ppc.altivec.vsr in test6()
1401 res_vs = vec_vsr(vs, vui); // CHECK: @llvm.ppc.altivec.vsr in test6()
1426 res_vs = vec_sro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro in test6()
1427 res_vs = vec_sro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro in test6()
1442 res_vs = vec_vsro(vs, vsc); // CHECK: @llvm.ppc.altivec.vsro in test6()
1443 res_vs = vec_vsro(vs, vuc); // CHECK: @llvm.ppc.altivec.vsro in test6()
1463 vec_st(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx in test6()
1464 vec_st(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx in test6()
1489 vec_stvx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvx in test6()
1490 vec_stvx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvx in test6()
1514 vec_ste(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx in test6()
1529 vec_stvehx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvehx in test6()
1549 vec_stl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1550 vec_stl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1575 vec_stvxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1576 vec_stvxl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.stvxl in test6()
1602 res_vs = vec_sub(vs, vs); // CHECK: sub <8 x i16> in test6()
1603 res_vs = vec_sub(vbs, vs); // CHECK: sub <8 x i16> in test6()
1604 res_vs = vec_sub(vs, vbs); // CHECK: sub <8 x i16> in test6()
1621 res_vs = vec_vsubuhm(vs, vs); // CHECK: sub <8 x i16> in test6()
1646 res_vs = vec_subs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs in test6()
1647 res_vs = vec_subs(vbs, vs); // CHECK: @llvm.ppc.altivec.vsubshs in test6()
1648 res_vs = vec_subs(vs, vbs); // CHECK: @llvm.ppc.altivec.vsubshs in test6()
1664 res_vs = vec_vsubshs(vs, vs); // CHECK: @llvm.ppc.altivec.vsubshs in test6()
1665 res_vs = vec_vsubshs(vbs, vs); // CHECK: @llvm.ppc.altivec.vsubshs in test6()
1666 res_vs = vec_vsubshs(vs, vbs); // CHECK: @llvm.ppc.altivec.vsubshs in test6()
1680 res_vi = vec_sum4s(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs in test6()
1683 res_vi = vec_vsum4shs(vs, vi); // CHECK: @llvm.ppc.altivec.vsum4shs in test6()
1700 res_vi = vec_unpackh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh in test6()
1705 res_vi = vec_vupkhsh(vs); // CHECK: @llvm.ppc.altivec.vupkhsh in test6()
1712 res_vi = vec_unpackl(vs); // CHECK: @llvm.ppc.altivec.vupklsh in test6()
1717 res_vi = vec_vupklsh(vs); // CHECK: @llvm.ppc.altivec.vupklsh in test6()
1729 res_vs = vec_xor(vs, vs); // CHECK: xor <8 x i16> in test6()
1730 res_vs = vec_xor(vbs, vs); // CHECK: xor <8 x i16> in test6()
1731 res_vs = vec_xor(vs, vbs); // CHECK: xor <8 x i16> in test6()
1753 res_vs = vec_vxor(vs, vs); // CHECK: xor <8 x i16> in test6()
1754 res_vs = vec_vxor(vbs, vs); // CHECK: xor <8 x i16> in test6()
1755 res_vs = vec_vxor(vs, vbs); // CHECK: xor <8 x i16> in test6()
1776 res_s = vec_extract(vs, param_i); // CHECK: extractelement <8 x i16> in test6()
1785 res_vs = vec_insert(param_s, vs, param_i); // CHECK: insertelement <8 x i16> in test6()
1822 res_vs = vec_lvlx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx in test6()
1908 res_vs = vec_lvlxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl in test6()
1994 res_vs = vec_lvrx(0, &vs); // CHECK: @llvm.ppc.altivec.lvx in test6()
2080 res_vs = vec_lvrxl(0, &vs); // CHECK: @llvm.ppc.altivec.lvxl in test6()
2176 vec_stvlx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx in test6()
2184 vec_stvlx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx in test6()
2313 vec_stvlxl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx in test6()
2321 vec_stvlxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx in test6()
2450 vec_stvrx(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx in test6()
2458 vec_stvrx(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx in test6()
2587 vec_stvrxl(vs, 0, ¶m_s); // CHECK: @llvm.ppc.altivec.lvx in test6()
2595 vec_stvrxl(vs, 0, &vs); // CHECK: @llvm.ppc.altivec.lvx in test6()
2730 res_i = vec_all_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2731 res_i = vec_all_eq(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2734 res_i = vec_all_eq(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2755 res_i = vec_all_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2756 res_i = vec_all_ge(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2759 res_i = vec_all_ge(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p in test6()
2779 res_i = vec_all_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2780 res_i = vec_all_gt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2783 res_i = vec_all_gt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p in test6()
2806 res_i = vec_all_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2807 res_i = vec_all_le(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2810 res_i = vec_all_le(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p in test6()
2830 res_i = vec_all_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2831 res_i = vec_all_lt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2834 res_i = vec_all_lt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p in test6()
2857 res_i = vec_all_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2858 res_i = vec_all_ne(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2861 res_i = vec_all_ne(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2897 res_i = vec_any_eq(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2898 res_i = vec_any_eq(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2901 res_i = vec_any_eq(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
2922 res_i = vec_any_ge(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2923 res_i = vec_any_ge(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2926 res_i = vec_any_ge(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p in test6()
2946 res_i = vec_any_gt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2947 res_i = vec_any_gt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2950 res_i = vec_any_gt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p in test6()
2970 res_i = vec_any_le(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2971 res_i = vec_any_le(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2974 res_i = vec_any_le(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p in test6()
2994 res_i = vec_any_lt(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2995 res_i = vec_any_lt(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpgtsh.p in test6()
2998 res_i = vec_any_lt(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpgtuh.p in test6()
3021 res_i = vec_any_ne(vs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
3022 res_i = vec_any_ne(vs, vbs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()
3025 res_i = vec_any_ne(vbs, vs); // CHECK: @llvm.ppc.altivec.vcmpequh.p in test6()