Home
last modified time | relevance | path

Searched refs:v8i16 (Results 1 – 25 of 643) sorted by relevance

12345678910>>...26

/external/gemmlowp/fixedpoint/
Dfixedpoint_msa.h32 struct FixedPointRawTypeTraits<v8i16> {
44 inline v8i16 BitAnd(v8i16 a, v8i16 b) {
45 return reinterpret_cast<v8i16>(__builtin_msa_and_v(reinterpret_cast<v16u8>(a),
56 inline v8i16 BitOr(v8i16 a, v8i16 b) {
57 return reinterpret_cast<v8i16>(__builtin_msa_or_v(reinterpret_cast<v16u8>(a),
68 inline v8i16 BitXor(v8i16 a, v8i16 b) {
69 return reinterpret_cast<v8i16>(__builtin_msa_xor_v(reinterpret_cast<v16u8>(a),
80 inline v8i16 BitNot(v8i16 a) {
81 return reinterpret_cast<v8i16>(__builtin_msa_nor_v(reinterpret_cast<v16u8>(a),
91 inline v8i16 Add(v8i16 a, v8i16 b) {
[all …]
/external/libvpx/libvpx/third_party/libyuv/source/
Drow_msa.cc75 v8i16 vec0_m, vec1_m; \
80 vec0_m = (v8i16)__msa_ilvr_b((v16i8)in_y, (v16i8)in_y); \
81 vec1_m = (v8i16)__msa_ilvr_b((v16i8)zero_m, (v16i8)in_uv); \
82 reg0_m = (v4i32)__msa_ilvr_h((v8i16)zero_m, (v8i16)vec0_m); \
83 reg1_m = (v4i32)__msa_ilvl_h((v8i16)zero_m, (v8i16)vec0_m); \
84 reg2_m = (v4i32)__msa_ilvr_h((v8i16)zero_m, (v8i16)vec1_m); \
85 reg3_m = (v4i32)__msa_ilvl_h((v8i16)zero_m, (v8i16)vec1_m); \
92 reg4_m = __msa_dotp_s_w((v8i16)vec1_m, (v8i16)ugvg); \
118 out_b = __msa_pckev_h((v8i16)reg6_m, (v8i16)reg5_m); \
119 out_g = __msa_pckev_h((v8i16)reg4_m, (v8i16)reg7_m); \
[all …]
Dscale_msa.cc96 reg0 = (v8u16)__msa_srari_h((v8i16)reg0, 2); in ScaleARGBRowDown2Box_MSA()
97 reg1 = (v8u16)__msa_srari_h((v8i16)reg1, 2); in ScaleARGBRowDown2Box_MSA()
175 reg4 = (v8u16)__msa_srari_h((v8i16)reg4, 2); in ScaleARGBRowDownEvenBox_MSA()
176 reg5 = (v8u16)__msa_srari_h((v8i16)reg5, 2); in ScaleARGBRowDownEvenBox_MSA()
258 vec0 = (v8u16)__msa_srari_h((v8i16)vec0, 2); in ScaleRowDown2Box_MSA()
259 vec1 = (v8u16)__msa_srari_h((v8i16)vec1, 2); in ScaleRowDown2Box_MSA()
260 vec2 = (v8u16)__msa_srari_h((v8i16)vec2, 2); in ScaleRowDown2Box_MSA()
261 vec3 = (v8u16)__msa_srari_h((v8i16)vec3, 2); in ScaleRowDown2Box_MSA()
347 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0); in ScaleRowDown4Box_MSA()
348 vec1 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2); in ScaleRowDown4Box_MSA()
[all …]
/external/libvpx/libvpx/vp8/encoder/mips/msa/
Ddenoising_msa.c34 v8i16 diff0, diff1, abs_diff0, abs_diff1, abs_diff_neg0, abs_diff_neg1; in vp8_denoiser_filter_msa()
35 v8i16 adjust0, adjust1, adjust2, adjust3; in vp8_denoiser_filter_msa()
36 v8i16 shift_inc1_vec = { 0 }; in vp8_denoiser_filter_msa()
37 v8i16 col_sum0 = { 0 }; in vp8_denoiser_filter_msa()
38 v8i16 col_sum1 = { 0 }; in vp8_denoiser_filter_msa()
39 v8i16 col_sum2 = { 0 }; in vp8_denoiser_filter_msa()
40 v8i16 col_sum3 = { 0 }; in vp8_denoiser_filter_msa()
41 v8i16 temp0_h, temp1_h, temp2_h, temp3_h, cmp, delta_vec; in vp8_denoiser_filter_msa()
44 v8i16 zero = { 0 }; in vp8_denoiser_filter_msa()
45 v8i16 one = __msa_ldi_h(1); in vp8_denoiser_filter_msa()
[all …]
Dquantize_msa.c20 v8i16 round0, round1; in fast_quantize_b_msa()
21 v8i16 sign_z0, sign_z1; in fast_quantize_b_msa()
22 v8i16 q_coeff0, q_coeff1; in fast_quantize_b_msa()
23 v8i16 x0, x1, de_quant0, de_quant1; in fast_quantize_b_msa()
24 v8i16 coeff0, coeff1, z0, z1; in fast_quantize_b_msa()
25 v8i16 quant0, quant1, quant2, quant3; in fast_quantize_b_msa()
26 v8i16 zero = { 0 }; in fast_quantize_b_msa()
27 v8i16 inv_zig_zag0, inv_zig_zag1; in fast_quantize_b_msa()
28 v8i16 zigzag_mask0 = { 0, 1, 4, 8, 5, 2, 3, 6 }; in fast_quantize_b_msa()
29 v8i16 zigzag_mask1 = { 9, 12, 13, 10, 7, 11, 14, 15 }; in fast_quantize_b_msa()
[all …]
Ddct_msa.c16 v8i16 s0_m, s1_m, tp0_m, tp1_m, tp2_m, tp3_m; \
28 v8i16 tmp0_m; \
36 v8i16 tmp0_m; \
37 v8i16 one_m = __msa_ldi_h(1); \
70 v8i16 in0, in1, in2, in3; in vp8_short_fdct4x4_msa()
71 v8i16 temp0, temp1; in vp8_short_fdct4x4_msa()
72 v8i16 const0, const1; in vp8_short_fdct4x4_msa()
73 v8i16 coeff = { 2217, 5352, -5352, 14500, 7500, 12000, 25000, 26000 }; in vp8_short_fdct4x4_msa()
75 v8i16 zero = { 0 }; in vp8_short_fdct4x4_msa()
116 v8i16 in0, in1, in2, in3; in vp8_short_fdct8x4_msa()
[all …]
/external/libyuv/files/source/
Drow_msa.cc75 v8i16 vec0_m, vec1_m; \
80 vec0_m = (v8i16)__msa_ilvr_b((v16i8)in_y, (v16i8)in_y); \
81 vec1_m = (v8i16)__msa_ilvr_b((v16i8)zero_m, (v16i8)in_uv); \
82 reg0_m = (v4i32)__msa_ilvr_h((v8i16)zero_m, (v8i16)vec0_m); \
83 reg1_m = (v4i32)__msa_ilvl_h((v8i16)zero_m, (v8i16)vec0_m); \
84 reg2_m = (v4i32)__msa_ilvr_h((v8i16)zero_m, (v8i16)vec1_m); \
85 reg3_m = (v4i32)__msa_ilvl_h((v8i16)zero_m, (v8i16)vec1_m); \
92 reg4_m = __msa_dotp_s_w((v8i16)vec1_m, (v8i16)ugvg); \
118 out_b = __msa_pckev_h((v8i16)reg6_m, (v8i16)reg5_m); \
119 out_g = __msa_pckev_h((v8i16)reg4_m, (v8i16)reg7_m); \
[all …]
Dscale_msa.cc88 reg0 = (v8u16)__msa_srari_h((v8i16)reg0, 2); in ScaleARGBRowDown2Box_MSA()
89 reg1 = (v8u16)__msa_srari_h((v8i16)reg1, 2); in ScaleARGBRowDown2Box_MSA()
167 reg4 = (v8u16)__msa_srari_h((v8i16)reg4, 2); in ScaleARGBRowDownEvenBox_MSA()
168 reg5 = (v8u16)__msa_srari_h((v8i16)reg5, 2); in ScaleARGBRowDownEvenBox_MSA()
250 vec0 = (v8u16)__msa_srari_h((v8i16)vec0, 2); in ScaleRowDown2Box_MSA()
251 vec1 = (v8u16)__msa_srari_h((v8i16)vec1, 2); in ScaleRowDown2Box_MSA()
252 vec2 = (v8u16)__msa_srari_h((v8i16)vec2, 2); in ScaleRowDown2Box_MSA()
253 vec3 = (v8u16)__msa_srari_h((v8i16)vec3, 2); in ScaleRowDown2Box_MSA()
339 vec0 = (v8u16)__msa_pckev_h((v8i16)reg1, (v8i16)reg0); in ScaleRowDown4Box_MSA()
340 vec1 = (v8u16)__msa_pckev_h((v8i16)reg3, (v8i16)reg2); in ScaleRowDown4Box_MSA()
[all …]
/external/llvm/test/CodeGen/PowerPC/
Dvaddsplat.ll9 %v8i16 = type <8 x i16>
34 define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
35 %p = load %v8i16, %v8i16* %P
36 %r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
37 store %v8i16 %r, %v8i16* %S
45 define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
46 %p = load %v8i16, %v8i16* %P
47 … %r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
48 store %v8i16 %r, %v8i16* %S
102 define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/
Dvaddsplat.ll9 %v8i16 = type <8 x i16>
34 define void @test_v8i16_pos_even(%v8i16* %P, %v8i16* %S) {
35 %p = load %v8i16, %v8i16* %P
36 %r = add %v8i16 %p, < i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30, i16 30 >
37 store %v8i16 %r, %v8i16* %S
45 define void @test_v8i16_neg_even(%v8i16* %P, %v8i16* %S) {
46 %p = load %v8i16, %v8i16* %P
47 … %r = add %v8i16 %p, < i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32, i16 -32 >
48 store %v8i16 %r, %v8i16* %S
102 define void @test_v8i16_pos_odd(%v8i16* %P, %v8i16* %S) {
[all …]
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h25 #define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
37 #define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
320 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
327 #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
377 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
406 out0_m = __msa_copy_u_h((v8i16)in, (stidx)); \
407 out1_m = __msa_copy_u_h((v8i16)in, (stidx + 1)); \
408 out2_m = __msa_copy_u_h((v8i16)in, (stidx + 2)); \
409 out3_m = __msa_copy_u_h((v8i16)in, (stidx + 3)); \
575 out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/BasicAA/
Dcs-cs-arm.ll7 declare <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8*, i32) nounwind readonly
8 declare void @llvm.arm.neon.vst1.p0i8.v8i16(i8*, <8 x i16>, i32) nounwind
13 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
14 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
15 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
22 …tAlias): Ptr: i8* %p <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 1…
23 ; CHECK: NoModRef: Ptr: i8* %q <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* …
24 ; CHECK: NoModRef: Ptr: i8* %p <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i…
25 ; CHECK: Both ModRef (MustAlias): Ptr: i8* %q <-> call void @llvm.arm.neon.vst1.p0i8.v8i16(i8…
26 …tAlias): Ptr: i8* %p <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 1…
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dmacros_msa.h23 #define LD_SH(...) LD_V(v8i16, __VA_ARGS__)
29 #define ST_SH(...) ST_V(v8i16, __VA_ARGS__)
242 #define LD_SH2(...) LD_V2(v8i16, __VA_ARGS__)
259 #define LD_SH4(...) LD_V4(v8i16, __VA_ARGS__)
284 #define LD_SH8(...) LD_V8(v8i16, __VA_ARGS__)
294 #define LD_SH16(...) LD_V16(v8i16, __VA_ARGS__)
305 out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
306 out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
320 #define ST_SH2(...) ST_V2(v8i16, __VA_ARGS__)
329 #define ST_SH4(...) ST_V4(v8i16, __VA_ARGS__)
[all …]
Dloopfilter_16_msa.c25 v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r; in hz_lpf_t4_and_t8_16w()
26 v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l; in hz_lpf_t4_and_t8_16w()
91 v8i16 l_out, r_out; in hz_lpf_t16_16w()
127 r_out = __msa_srari_h((v8i16)tmp1_r, 4); in hz_lpf_t16_16w()
146 l_out = __msa_srari_h((v8i16)tmp1_l, 4); in hz_lpf_t16_16w()
148 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in hz_lpf_t16_16w()
159 r_out = __msa_srari_h((v8i16)tmp1_r, 4); in hz_lpf_t16_16w()
166 l_out = __msa_srari_h((v8i16)tmp1_l, 4); in hz_lpf_t16_16w()
168 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in hz_lpf_t16_16w()
179 r_out = (v8i16)__msa_srari_h((v8i16)tmp1_r, 4); in hz_lpf_t16_16w()
[all …]
Dtxfm_macros_msa.h19 v8i16 k0_m, k1_m, k2_m, zero = { 0 }; \
23 k2_m = __msa_ilvev_h((v8i16)k1_m, k0_m); \
24 k0_m = __msa_ilvev_h((v8i16)zero, k0_m); \
25 k1_m = __msa_ilvev_h(k1_m, (v8i16)zero); \
30 s1_m = __msa_dpsub_s_w(s1_m, (v8i16)s5_m, k1_m); \
31 s0_m = __msa_dpsub_s_w(s0_m, (v8i16)s4_m, k1_m); \
33 out0 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \
37 out1 = __msa_pckev_h((v8i16)s0_m, (v8i16)s1_m); \
60 v8i16 dst_m; \
65 dst_m = __msa_pckev_h((v8i16)tp1_m, (v8i16)tp0_m); \
[all …]
Dfwd_txfm_msa.h19 v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m; \
20 v8i16 vec0_m, vec1_m, vec2_m, vec3_m; \
22 v8i16 coeff_m = { \
48 v8i16 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
61 v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m; \
62 v8i16 s7_m, x0_m, x1_m, x2_m, x3_m; \
63 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
121 v8i16 s0_m, s1_m, s2_m, s3_m, s4_m, s5_m, s6_m, s7_m; \
122 v8i16 x0_m, x1_m, x2_m, x3_m; \
123 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
[all …]
/external/libaom/libaom/aom_dsp/mips/
Dloopfilter_16_msa.c24 v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r, q0_filt8_r, q1_filt8_r, q2_filt8_r; in aom_hz_lpf_t4_and_t8_16w()
25 v8i16 p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l; in aom_hz_lpf_t4_and_t8_16w()
90 v8i16 l_out, r_out; in aom_hz_lpf_t16_16w()
126 r_out = __msa_srari_h((v8i16)tmp1_r, 4); in aom_hz_lpf_t16_16w()
145 l_out = __msa_srari_h((v8i16)tmp1_l, 4); in aom_hz_lpf_t16_16w()
147 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in aom_hz_lpf_t16_16w()
158 r_out = __msa_srari_h((v8i16)tmp1_r, 4); in aom_hz_lpf_t16_16w()
165 l_out = __msa_srari_h((v8i16)tmp1_l, 4); in aom_hz_lpf_t16_16w()
167 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in aom_hz_lpf_t16_16w()
178 r_out = (v8i16)__msa_srari_h((v8i16)tmp1_r, 4); in aom_hz_lpf_t16_16w()
[all …]
Dmacros_msa.h27 #define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
37 #define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
353 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
360 #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__)
368 #define LD_SH8(...) LD_H8(v8i16, __VA_ARGS__)
378 #define LD_SH16(...) LD_H16(v8i16, __VA_ARGS__)
389 out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
390 out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
440 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
447 #define ST_SH4(...) ST_H4(v8i16, __VA_ARGS__)
[all …]
Dloopfilter_msa.h22 v8i16 q0_sub_p0_r, filt_r, cnst3h; \
35 q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \
37 filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \
73 v8i16 q0_sub_p0_r, q0_sub_p0_l, filt_l, filt_r, cnst3h; \
88 q0_sub_p0_r = (v8i16)__msa_ilvr_b(q0_sub_p0, q0_sub_p0); \
90 filt_r = (v8i16)__msa_ilvr_b(filt_sign, filt); \
94 q0_sub_p0_l = (v8i16)__msa_ilvl_b(q0_sub_p0, q0_sub_p0); \
96 filt_l = (v8i16)__msa_ilvl_b(filt_sign, filt); \
188 p2_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp_filt8_1, 3); \
191 p1_filt8_out = (v8i16)__msa_srari_h((v8i16)tmp_filt8_1, 3); \
[all …]
/external/webp/src/dsp/
Dmsa_macro.h26 #define ADDVI_H(a, b) __msa_addvi_h((v8i16)a, b)
29 #define SRAI_H(a, b) __msa_srai_h((v8i16)a, b)
31 #define SRLI_H(a, b) __msa_srli_h((v8i16)a, b)
54 #define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
66 #define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
260 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
355 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
370 const uint16_t out0_m = __msa_copy_s_h((v8i16)in, stidx); \
371 const uint16_t out1_m = __msa_copy_s_h((v8i16)in, stidx + 1); \
372 const uint16_t out2_m = __msa_copy_s_h((v8i16)in, stidx + 2); \
[all …]
Denc_msa.c46 v8i16 input0, input1; in ITransformOne()
85 v8i16 t0, t1, t2, t3; in FTransform_MSA()
87 const v8i16 mask0 = { 0, 4, 8, 12, 1, 5, 9, 13 }; in FTransform_MSA()
88 const v8i16 mask1 = { 3, 7, 11, 15, 2, 6, 10, 14 }; in FTransform_MSA()
89 const v8i16 mask2 = { 4, 0, 5, 1, 6, 2, 7, 3 }; in FTransform_MSA()
90 const v8i16 mask3 = { 0, 4, 1, 5, 2, 6, 3, 7 }; in FTransform_MSA()
91 const v8i16 cnst0 = { 2217, -5352, 2217, -5352, 2217, -5352, 2217, -5352 }; in FTransform_MSA()
92 const v8i16 cnst1 = { 5352, 2217, 5352, 2217, 5352, 2217, 5352, 2217 }; in FTransform_MSA()
135 v8i16 in0 = { 0 }; in FTransformWHT_MSA()
136 v8i16 in1 = { 0 }; in FTransformWHT_MSA()
[all …]
Dupsampling_msa.c25 const v8i16 t0 = (v8i16)__msa_ilvr_b((v16i8)zero, (v16i8)in); \
26 out0 = (v4u32)__msa_ilvr_h((v8i16)zero, t0); \
27 out1 = (v4u32)__msa_ilvl_h((v8i16)zero, t0); \
49 out0 = (v8u16)__msa_pckod_h((v8i16)temp1, (v8i16)temp0); \
53 const v8i16 const_a = (v8i16)__msa_fill_h(14234); \
54 const v8i16 a0 = __msa_adds_s_h((v8i16)y0, (v8i16)v0); \
55 const v8i16 a1 = __msa_adds_s_h((v8i16)y1, (v8i16)v1); \
56 v8i16 b0 = __msa_subs_s_h(a0, const_a); \
57 v8i16 b1 = __msa_subs_s_h(a1, const_a); \
64 const v8i16 const_a = (v8i16)__msa_fill_h(14234); \
[all …]
/external/swiftshader/third_party/LLVM/test/Analysis/BasicAA/
Dintrinsics.ll10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
16 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
25 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
26 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
31 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
32 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
33 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
[all …]
/external/llvm/test/Analysis/BasicAA/
Dintrinsics.ll10 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[ATTR:#[0-9]+]]
11 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
15 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
16 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
17 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
25 ; CHECK-NEXT: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) [[ATTR]]
26 ; CHECK-NEXT: call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
31 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
32 call void @llvm.arm.neon.vst1.p0i8.v8i16(i8* %q, <8 x i16> %y, i32 16)
33 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 16) nounwind
[all …]
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/
DCellSDKIntrinsics.td43 [(set (v4i32 VECREG:$rT), (int_spu_si_mpy (v8i16 VECREG:$rA),
44 (v8i16 VECREG:$rB)))]>;
49 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyu (v8i16 VECREG:$rA),
50 (v8i16 VECREG:$rB)))] >;
55 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyi (v8i16 VECREG:$rA),
61 [(set (v4i32 VECREG:$rT), (int_spu_si_mpyui (v8i16 VECREG:$rA),
67 [(set (v4i32 VECREG:$rT), (int_spu_si_mpya (v8i16 VECREG:$rA),
68 (v8i16 VECREG:$rB),
69 (v8i16 VECREG:$rC)))]>;
75 (v8i16 VECREG:$rB)))]>;
[all …]

12345678910>>...26