Home
last modified time | relevance | path

Searched refs:v8i16 (Results 1 – 25 of 241) sorted by relevance

12345678910

/external/libvpx/libvpx/vp8/encoder/mips/msa/
Ddenoising_msa.c36 v8i16 diff0, diff1, abs_diff0, abs_diff1, abs_diff_neg0, abs_diff_neg1; in vp8_denoiser_filter_msa()
37 v8i16 adjust0, adjust1, adjust2, adjust3; in vp8_denoiser_filter_msa()
38 v8i16 shift_inc1_vec = { 0 }; in vp8_denoiser_filter_msa()
39 v8i16 col_sum0 = { 0 }; in vp8_denoiser_filter_msa()
40 v8i16 col_sum1 = { 0 }; in vp8_denoiser_filter_msa()
41 v8i16 col_sum2 = { 0 }; in vp8_denoiser_filter_msa()
42 v8i16 col_sum3 = { 0 }; in vp8_denoiser_filter_msa()
43 v8i16 temp0_h, temp1_h, temp2_h, temp3_h, cmp, delta_vec; in vp8_denoiser_filter_msa()
46 v8i16 zero = { 0 }; in vp8_denoiser_filter_msa()
47 v8i16 one = __msa_ldi_h(1); in vp8_denoiser_filter_msa()
[all …]
Dquantize_msa.c23 v8i16 round0, round1; in fast_quantize_b_msa()
24 v8i16 sign_z0, sign_z1; in fast_quantize_b_msa()
25 v8i16 q_coeff0, q_coeff1; in fast_quantize_b_msa()
26 v8i16 x0, x1, de_quant0, de_quant1; in fast_quantize_b_msa()
27 v8i16 coeff0, coeff1, z0, z1; in fast_quantize_b_msa()
28 v8i16 quant0, quant1, quant2, quant3; in fast_quantize_b_msa()
29 v8i16 zero = { 0 }; in fast_quantize_b_msa()
30 v8i16 inv_zig_zag0, inv_zig_zag1; in fast_quantize_b_msa()
31 v8i16 zigzag_mask0 = { 0, 1, 4, 8, 5, 2, 3, 6 }; in fast_quantize_b_msa()
32 v8i16 zigzag_mask1 = { 9, 12, 13, 10, 7, 11, 14, 15 }; in fast_quantize_b_msa()
[all …]
Ddct_msa.c16 v8i16 s0_m, s1_m, tp0_m, tp1_m, tp2_m, tp3_m; \
28 v8i16 tmp0_m; \
36 v8i16 tmp0_m; \
37 v8i16 one_m = __msa_ldi_h(1); \
71 v8i16 in0, in1, in2, in3; in vp8_short_fdct4x4_msa()
72 v8i16 temp0, temp1; in vp8_short_fdct4x4_msa()
73 v8i16 const0, const1; in vp8_short_fdct4x4_msa()
74 v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 }; in vp8_short_fdct4x4_msa()
76 v8i16 zero = { 0 }; in vp8_short_fdct4x4_msa()
118 v8i16 in0, in1, in2, in3; in vp8_short_fdct8x4_msa()
[all …]
/external/llvm/test/CodeGen/PowerPC/
Dvaddsplat.ll9 %v8i16 = type <8 x i16>
34 define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
35 %p = load %v8i16, %v8i16* %P
36 %r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
37 store %v8i16 %r, %v8i16* %S
45 define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
46 %p = load %v8i16, %v8i16* %P
47 … %r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
48 store %v8i16 %r, %v8i16* %S
102 define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
[all …]
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h25 #define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
37 #define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
338 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
345 #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
396 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
425 out0_m = __msa_copy_u_h((v8i16)in, (stidx)); \
426 out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1)); \
427 out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2)); \
428 out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3)); \
594 out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \
[all …]
Didct_msa.c20 v8i16 s4_m, s5_m, s6_m, s7_m; \
24 out1 = (v8i16)__msa_ilvl_d((v2i64)s6_m, (v2i64)s4_m); \
25 out3 = (v8i16)__msa_ilvl_d((v2i64)s7_m, (v2i64)s5_m); \
30 v8i16 out_m; \
31 v8i16 zero_m = { 0 }; \
40 out_m = __msa_pckev_h((v8i16)tmp2_m, (v8i16)tmp1_m); \
47 v8i16 a1_m, b1_m, c1_m, d1_m; \
48 v8i16 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
49 v8i16 const_cospi8sqrt2minus1_m; \
90 v8i16 input0, input1; in idct4x4_addblk_msa()
[all …]
Dsixtap_filter_msa.c41 v8i16 hz_out_m; \
92 v8i16 tmp0; \
103 v8i16 hz_out_m; \
148 v8i16 filt, out0, out1; in common_hz_6t_4x4_msa()
175 v8i16 filt, out0, out1, out2, out3; in common_hz_6t_4x8_msa()
225 v8i16 filt, out0, out1, out2, out3; in common_hz_6t_8w_msa()
271 v8i16 filt, out0, out1, out2, out3, out4, out5, out6, out7; in common_hz_6t_16w_msa()
321 v8i16 filt, out10, out32; in common_vt_6t_4w_msa()
368 v8i16 filt, out0_r, out1_r, out2_r, out3_r; in common_vt_6t_8w_msa()
419 v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l, filt; in common_vt_6t_16w_msa()
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dloopfilter_16_msa.c24 v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r; in vpx_hz_lpf_t4_and_t8_16w()
25 v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l; in vpx_hz_lpf_t4_and_t8_16w()
91 v8i16 l_out, r_out; in vpx_hz_lpf_t16_16w()
127 r_out = __msa_srari_h((v8i16)tmp1_r, 4); in vpx_hz_lpf_t16_16w()
146 l_out = __msa_srari_h((v8i16)tmp1_l, 4); in vpx_hz_lpf_t16_16w()
148 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in vpx_hz_lpf_t16_16w()
159 r_out = __msa_srari_h((v8i16)tmp1_r, 4); in vpx_hz_lpf_t16_16w()
166 l_out = __msa_srari_h((v8i16)tmp1_l, 4); in vpx_hz_lpf_t16_16w()
168 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in vpx_hz_lpf_t16_16w()
179 r_out = (v8i16)__msa_srari_h((v8i16)tmp1_r, 4); in vpx_hz_lpf_t16_16w()
[all …]
Dmacros_msa.h25 #define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
35 #define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
348 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
354 #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
361 #define LD_SH8(...) LD_H8(v8i16, __VA_ARGS__)
371 #define LD_SH16(...) LD_H16(v8i16, __VA_ARGS__)
381 out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
382 out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
428 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
434 #define ST_SH4(...) ST_H4(v8i16, __VA_ARGS__)
[all …]
Dloopfilter_msa.h20 v8i16 q0_sub_p0_r, filt_r, cnst3h; \
33 q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \
35 filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \
70 v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \
85 q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \
87 filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \
91 q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0, q0_sub_p0); \
93 filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \
183 p2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \
186 p1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp1, 3); \
[all …]
Dtxfm_macros_msa.h17 v8i16 k0_m = __msa_fill_h(cnst0); \
21 k0_m = __msa_ilvev_h((v8i16)s0_m, k0_m); \
27 out0 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \
31 out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \
52 v8i16 dst_m; \
57 dst_m = __msa_pckev_h((v8i16)tp1_m, (v8i16)tp0_m); \
64 v8i16 madd_s0_m, madd_s1_m; \
75 v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
Dfwd_txfm_msa.h18 v8i16 in0_m, in1_m, in2_m, in3_m, in4_m, in5_m, in6_m, in7_m; \
33 v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m; \
34 v8i16 vec0_m, vec1_m, vec2_m, vec3_m; \
36 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, \
60 v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
72 v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m; \
73 v8i16 s7_m, x0_m, x1_m, x2_m, x3_m; \
74 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, \
132 v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \
133 v8i16 x0_m, x1_m, x2_m, x3_m; \
[all …]
Dinv_txfm_msa.h20 v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
21 v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
22 v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
24 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, \
86 v8i16 out0_m, r0_m, r1_m; \
100 v8i16 res0_m, res1_m, res2_m, res3_m; \
113 v8i16 c0_m, c1_m, c2_m, c3_m; \
114 v8i16 step0_m, step1_m; \
130 BUTTERFLY_4((v8i16)tmp0_m, (v8i16)tmp1_m, \
131 (v8i16)tmp2_m, (v8i16)tmp3_m, \
[all …]
Dfwd_txfm_msa.c15 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in fdct8x16_1d_column()
16 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in fdct8x16_1d_column()
17 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x16_1d_column()
18 v8i16 stp21, stp22, stp23, stp24, stp25, stp26, stp30; in fdct8x16_1d_column()
19 v8i16 stp31, stp32, stp33, stp34, stp35, stp36, stp37; in fdct8x16_1d_column()
20 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5; in fdct8x16_1d_column()
21 v8i16 coeff = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, in fdct8x16_1d_column()
23 v8i16 coeff1 = { cospi_2_64, cospi_30_64, cospi_14_64, cospi_18_64, in fdct8x16_1d_column()
25 v8i16 coeff2 = { -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64, in fdct8x16_1d_column()
134 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in fdct16x8_1d_row()
[all …]
Dvpx_convolve8_msa.c32 v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; in common_hv_8ht_8vt_4w_msa()
33 v8i16 hz_out7, hz_out8, hz_out9, tmp0, tmp1, out0, out1, out2, out3, out4; in common_hv_8ht_8vt_4w_msa()
34 v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3; in common_hv_8ht_8vt_4w_msa()
65 out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); in common_hv_8ht_8vt_4w_msa()
74 hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); in common_hv_8ht_8vt_4w_msa()
75 out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_4w_msa()
81 hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8); in common_hv_8ht_8vt_4w_msa()
82 out4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); in common_hv_8ht_8vt_4w_msa()
106 v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3; in common_hv_8ht_8vt_8w_msa()
107 v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; in common_hv_8ht_8vt_8w_msa()
[all …]
Dvpx_convolve8_avg_msa.c26 v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; in common_hv_8ht_8vt_and_aver_dst_4w_msa()
27 v8i16 hz_out7, hz_out8, hz_out9, res0, res1, vec0, vec1, vec2, vec3, vec4; in common_hv_8ht_8vt_and_aver_dst_4w_msa()
28 v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3; in common_hv_8ht_8vt_and_aver_dst_4w_msa()
59 vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
69 hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
70 vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
76 hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
77 vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
107 v8i16 filt, filt_vt0, filt_vt1, filt_vt2, filt_vt3; in common_hv_8ht_8vt_and_aver_dst_8w_msa()
109 v8i16 hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6; in common_hv_8ht_8vt_and_aver_dst_8w_msa()
[all …]
/external/llvm/test/Analysis/BasicAA/
Dintrinsics.ll10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[ATTR:#[0-9]+]]
11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
16 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
25 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[ATTR]]
26 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
31 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
32 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
33 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
[all …]
/external/llvm/lib/Target/PowerPC/
DPPCInstrAltivec.td404 [(set v8i16:$vD, (int_ppc_altivec_mfvscr))]>;
415 [(set v8i16:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
442 [(int_ppc_altivec_stvehx v8i16:$rS, xoaddr:$dst)]>;
468 def VMHADDSHS : VA1a_Int_Ty<32, "vmhaddshs", int_ppc_altivec_vmhaddshs, v8i16>;
470 v8i16>;
471 def VMLADDUHM : VA1a_Int_Ty<34, "vmladduhm", int_ppc_altivec_vmladduhm, v8i16>;
495 [(set v8i16:$vD, (add v8i16:$vA, v8i16:$vB))]>;
502 def VADDSHS : VX1_Int_Ty<832, "vaddshs", int_ppc_altivec_vaddshs, v8i16>;
505 def VADDUHS : VX1_Int_Ty<576, "vadduhs", int_ppc_altivec_vadduhs, v8i16>;
561 def VAVGSH : VX1_Int_Ty<1346, "vavgsh", int_ppc_altivec_vavgsh, v8i16>;
[all …]
/external/llvm/test/Analysis/TypeBasedAliasAnalysis/
Dintrinsics.ll10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[NUW:#[0-9]+]]
11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind, !tbaa !2
16 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16), !tbaa !1
17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind, !tbaa !2
22 declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
23 declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
/external/llvm/lib/Target/X86/
DX86TargetTransformInfo.cpp181 { ISD::SHL, MVT::v8i16, 1 }, in getArithmeticInstrCost()
182 { ISD::SRL, MVT::v8i16, 2 }, in getArithmeticInstrCost()
183 { ISD::SRA, MVT::v8i16, 2 }, in getArithmeticInstrCost()
248 { ISD::SHL, MVT::v8i16, 1 }, // psllw. in getArithmeticInstrCost()
257 { ISD::SRL, MVT::v8i16, 1 }, // psrlw. in getArithmeticInstrCost()
266 { ISD::SRA, MVT::v8i16, 1 }, // psraw. in getArithmeticInstrCost()
273 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence in getArithmeticInstrCost()
274 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence in getArithmeticInstrCost()
295 if ((VT == MVT::v8i16 && ST->hasSSE2()) || in getArithmeticInstrCost()
323 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. in getArithmeticInstrCost()
[all …]
/external/llvm/test/CodeGen/ARM/
D2012-08-27-CopyPhysRegCrash.ll43 %28 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %26) nounwind
46 %31 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> undef, <8 x i16> %30) nounwind
57 %42 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %36) nounwind
58 %43 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %41) nounwind
62 %47 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %31, <8 x i16> %46) nounwind
80 %65 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %60) nounwind
87 %72 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> undef) nounwind
88 %73 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %71) nounwind
96 %81 = tail call <8 x i16> @llvm.arm.neon.vabs.v8i16(<8 x i16> %80) nounwind
99 %84 = tail call <8 x i16> @llvm.arm.neon.vmaxu.v8i16(<8 x i16> %76, <8 x i16> %83) nounwind
[all …]
/external/clang/test/CodeGen/
Dppc64-vector.c7 typedef short v8i16 __attribute__((vector_size (16))); typedef
37 v8i16 test_v8i16(v8i16 x) in test_v8i16()
/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_avg_msa.c28 sum0 = (v8u16)__msa_pckev_h((v8i16)sum, (v8i16)sum); in vp9_avg_8x8_msa()
49 sum0 = (v8u16)__msa_pckev_h((v8i16)sum1, (v8i16)sum1); in vp9_avg_4x4_msa()
Dvp9_fdct4x4_msa.c18 v8i16 in0, in1, in2, in3, in4; in vp9_fwht4x4_msa()
50 v8i16 in0, in1, in2, in3; in vp9_fht4x4_msa()
56 v8i16 temp, mask; in vp9_fht4x4_msa()
60 mask = (v8i16)__msa_sldi_b(zero, one, 15); in vp9_fht4x4_msa()
63 temp = (v8i16)__msa_xori_b((v16u8)temp, 255); in vp9_fht4x4_msa()
/external/llvm/test/CodeGen/SystemZ/
Dvec-const-02.ll1 ; Test vector byte masks, v8i16 version.
49 ; Test an all-zeros v2i16 that gets promoted to v8i16.
57 ; Test a mixed v2i16 that gets promoted to v8i16 (mask 0xc000).
65 ; Test an all-zeros v4i16 that gets promoted to v8i16.
73 ; Test a mixed v4i16 that gets promoted to v8i16 (mask 0x7200).

12345678910