/external/llvm/test/CodeGen/PowerPC/ |
D | vaddsplat.ll | 10 %v16i8 = type <16 x i8> 56 define void @test_v16i8_pos_even(%v16i8* %P, %v16i8* %S) { 57 %p = load %v16i8, %v16i8* %P 58 …%r = add %v16i8 %p, < i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16, i8 16,… 59 store %v16i8 %r, %v16i8* %S 67 define void @test_v16i8_neg_even(%v16i8* %P, %v16i8* %S) { 68 %p = load %v16i8, %v16i8* %P 69 …%r = add %v16i8 %p, < i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -18, i8 -… 70 store %v16i8 %r, %v16i8* %S 126 define void @test_v16i8_pos_odd(%v16i8* %P, %v16i8* %S) { [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | loopfilter_16_msa.c | 84 v16i8 zero = { 0 }; in vpx_hz_lpf_t16_16w() 114 q0_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q0); in vpx_hz_lpf_t16_16w() 133 q0_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q0); in vpx_hz_lpf_t16_16w() 148 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in vpx_hz_lpf_t16_16w() 154 q1_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q1); in vpx_hz_lpf_t16_16w() 161 q1_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q1); in vpx_hz_lpf_t16_16w() 168 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in vpx_hz_lpf_t16_16w() 174 q2_r_in = (v8u16)__msa_ilvr_b(zero, (v16i8)q2); in vpx_hz_lpf_t16_16w() 181 q2_l_in = (v8u16)__msa_ilvl_b(zero, (v16i8)q2); in vpx_hz_lpf_t16_16w() 188 r_out = (v8i16)__msa_pckev_b((v16i8)l_out, (v16i8)r_out); in vpx_hz_lpf_t16_16w() [all …]
|
D | loopfilter_msa.h | 18 v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ 19 v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ 22 p1_m = (v16i8)__msa_xori_b(p1_in, 0x80); \ 23 p0_m = (v16i8)__msa_xori_b(p0_in, 0x80); \ 24 q0_m = (v16i8)__msa_xori_b(q0_in, 0x80); \ 25 q1_m = (v16i8)__msa_xori_b(q1_in, 0x80); \ 28 filt = filt & (v16i8)hev_in; \ 34 q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ 40 filt = __msa_pckev_b((v16i8)filt_r, (v16i8)filt_r); \ 42 filt = filt & (v16i8)mask_in; \ [all …]
|
D | vpx_convolve8_msa.c | 29 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_4w_msa() 30 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; in common_hv_8ht_8vt_4w_msa() 65 out2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); in common_hv_8ht_8vt_4w_msa() 74 hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); in common_hv_8ht_8vt_4w_msa() 75 out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_4w_msa() 81 hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8); in common_hv_8ht_8vt_4w_msa() 82 out4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); in common_hv_8ht_8vt_4w_msa() 103 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_8w_msa() 104 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; in common_hv_8ht_8vt_8w_msa() 156 out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_8w_msa() [all …]
|
D | macros_msa.h | 21 #define LD_SB(...) LD_B(v16i8, __VA_ARGS__) 32 #define ST_SB(...) ST_B(v16i8, __VA_ARGS__) 301 #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__) 314 #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__) 321 #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__) 328 #define LD_SB7(...) LD_B7(v16i8, __VA_ARGS__) 336 #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__) 608 v16i8 zero_m = { 0 }; \ 609 out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ 610 out1 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in1, slide_val); \ [all …]
|
D | vpx_convolve8_avg_msa.c | 23 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_4w_msa() 25 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; in common_hv_8ht_8vt_and_aver_dst_4w_msa() 59 vec2 = (v8i16)__msa_ilvev_b((v16i8)hz_out5, (v16i8)hz_out4); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 69 hz_out6 = (v8i16)__msa_sldi_b((v16i8)hz_out7, (v16i8)hz_out5, 8); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 70 vec3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 76 hz_out8 = (v8i16)__msa_sldi_b((v16i8)hz_out9, (v16i8)hz_out7, 8); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 77 vec4 = (v8i16)__msa_ilvev_b((v16i8)hz_out9, (v16i8)hz_out8); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 105 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_8w_msa() 106 v16i8 filt_hz0, filt_hz1, filt_hz2, filt_hz3; in common_hv_8ht_8vt_and_aver_dst_8w_msa() 159 out3 = (v8i16)__msa_ilvev_b((v16i8)hz_out7, (v16i8)hz_out6); in common_hv_8ht_8vt_and_aver_dst_8w_msa() [all …]
|
D | vpx_convolve_msa.h | 23 tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ 24 tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \ 25 tmp1 = __msa_dotp_s_h((v16i8)vec2, (v16i8)filt2); \ 26 tmp1 = __msa_dpadd_s_h(tmp1, (v16i8)vec3, (v16i8)filt3); \ 34 v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ 52 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ 70 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ 104 tmp_m = (v16u8)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
|
D | sub_pixel_variance_msa.c | 406 v16i8 src0, src1, src2, src3; in sub_pixel_sse_diff_4width_h_msa() 407 v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; in sub_pixel_sse_diff_4width_h_msa() 429 src0 = (v16i8)__msa_ilvev_d((v2i64)src2, (v2i64)src0); in sub_pixel_sse_diff_4width_h_msa() 449 v16i8 src0, src1, src2, src3; in sub_pixel_sse_diff_8width_h_msa() 450 v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; in sub_pixel_sse_diff_8width_h_msa() 493 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_pixel_sse_diff_16width_h_msa() 494 v16i8 mask = { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8 }; in sub_pixel_sse_diff_16width_h_msa() 616 out = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); in sub_pixel_sse_diff_4width_v_msa() 708 out0 = (v16u8)__msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); in sub_pixel_sse_diff_16width_v_msa() 714 out1 = (v16u8)__msa_pckev_b((v16i8)tmp3, (v16i8)tmp2); in sub_pixel_sse_diff_16width_v_msa() [all …]
|
D | vpx_convolve8_vert_msa.c | 19 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_4w_msa() 20 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; in common_vt_8t_4w_msa() 21 v16i8 src65_r, src87_r, src109_r, src2110, src4332, src6554, src8776; in common_vt_8t_4w_msa() 22 v16i8 src10998, filt0, filt1, filt2, filt3; in common_vt_8t_4w_msa() 70 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_8w_msa() 71 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; in common_vt_8t_8w_msa() 72 v16i8 src65_r, src87_r, src109_r, filt0, filt1, filt2, filt3; in common_vt_8t_8w_msa() 124 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_16w_msa() 125 v16i8 filt0, filt1, filt2, filt3; in common_vt_8t_16w_msa() 126 v16i8 src10_r, src32_r, src54_r, src76_r, src98_r, src21_r, src43_r; in common_vt_8t_16w_msa() [all …]
|
D | subtract_msa.c | 19 v16i8 src = { 0 }; in sub_blk_4x4_msa() 20 v16i8 pred = { 0 }; in sub_blk_4x4_msa() 38 v16i8 src = { 0 }; in sub_blk_8x8_msa() 39 v16i8 pred = { 0 }; in sub_blk_8x8_msa() 62 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_16x16_msa() 63 v16i8 pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7; in sub_blk_16x16_msa() 121 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_32x32_msa() 122 v16i8 pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7; in sub_blk_32x32_msa() 183 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_64x64_msa() 184 v16i8 pred0, pred1, pred2, pred3, pred4, pred5, pred6, pred7; in sub_blk_64x64_msa()
|
D | intrapred_msa.c | 156 v16i8 store, src = { 0 }; in intra_predict_dc_4x4_msa() 168 store = __msa_splati_b((v16i8)sum_w, 0); in intra_predict_dc_4x4_msa() 177 v16i8 store, data = { 0 }; in intra_predict_dc_tl_4x4_msa() 182 data = (v16i8)__msa_insert_w((v4i32)data, 0, val0); in intra_predict_dc_tl_4x4_msa() 186 store = __msa_splati_b((v16i8)sum_w, 0); in intra_predict_dc_tl_4x4_msa() 194 const v16i8 store = __msa_ldi_b(128); in intra_predict_128dc_4x4_msa() 205 v16i8 store; in intra_predict_dc_8x8_msa() 220 store = __msa_splati_b((v16i8)sum_w, 0); in intra_predict_dc_8x8_msa() 231 v16i8 store; in intra_predict_dc_tl_8x8_msa() 243 store = __msa_splati_b((v16i8)sum_w, 0); in intra_predict_dc_tl_8x8_msa() [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 33 // Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be 36 (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>; 79 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 83 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 87 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 91 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 95 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 99 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 105 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ 133 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | vp8_macros_msa.h | 21 #define LD_SB(...) LD_B(v16i8, __VA_ARGS__) 33 #define ST_SB(...) ST_B(v16i8, __VA_ARGS__) 292 #define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__) 300 #define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__) 308 #define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__) 316 #define LD_SB5(...) LD_B5(v16i8, __VA_ARGS__) 325 #define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__) 376 #define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__) 533 v16i8 zero_m = { 0 }; \ 535 out0 = (RTYPE)__msa_sldi_b((v16i8)zero_m, (v16i8)in0, slide_val); \ [all …]
|
D | sixtap_filter_msa.c | 40 v16i8 vec0_m, vec1_m, vec2_m; \ 59 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m; \ 74 v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \ 94 tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \ 95 tmp0 = __msa_dpadd_s_h(tmp0, (v16i8)vec1, (v16i8)filt1); \ 102 v16i8 vec0_m, vec1_m; \ 118 v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ 130 v16i8 vec0_m, vec1_m, vec2_m, vec3_m; \ 146 v16i8 src0, src1, src2, src3, filt0, filt1, filt2; in common_hz_6t_4x4_msa() 173 v16i8 src0, src1, src2, src3, filt0, filt1, filt2; in common_hz_6t_4x8_msa() [all …]
|
D | mfqe_msa.c | 22 v16i8 src0 = { 0 }; in filter_by_weight8x8_msa() 23 v16i8 src1 = { 0 }; in filter_by_weight8x8_msa() 24 v16i8 dst0 = { 0 }; in filter_by_weight8x8_msa() 25 v16i8 dst1 = { 0 }; in filter_by_weight8x8_msa() 52 dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); in filter_by_weight8x8_msa() 63 dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); in filter_by_weight8x8_msa() 75 v16i8 src0, src1, src2, src3; in filter_by_weight16x16_msa() 76 v16i8 dst0, dst1, dst2, dst3; in filter_by_weight16x16_msa()
|
D | loopfilter_filters_msa.c | 21 p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \ 30 v16i8 p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \ 31 v16i8 filt, filt1, filt2, cnst4b, cnst3b; \ 34 p1_m = (v16i8)__msa_xori_b(p1_in_out, 0x80); \ 35 p0_m = (v16i8)__msa_xori_b(p0_in_out, 0x80); \ 36 q0_m = (v16i8)__msa_xori_b(q0_in_out, 0x80); \ 37 q1_m = (v16i8)__msa_xori_b(q1_in_out, 0x80); \ 41 filt = filt & (v16i8)hev_in; \ 48 q0_sub_p0_r = __msa_dotp_s_h((v16i8)q0_sub_p0_r, (v16i8)cnst3h); \ 54 q0_sub_p0_l = __msa_dotp_s_h((v16i8)q0_sub_p0_l, (v16i8)cnst3h); \ [all …]
|
D | bilinear_filter_msa.c | 41 v16i8 src0, src1, src2, src3, mask; in common_hz_2t_4x4_msa() 63 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_4x8_msa() 64 v16i8 res0, res1, res2, res3; in common_hz_2t_4x8_msa() 104 v16i8 src0, src1, src2, src3, mask; in common_hz_2t_8x4_msa() 127 v16i8 src0, src1, src2, src3, mask, out0, out1; in common_hz_2t_8x8mult_msa() 205 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_16w_msa() 270 v16i8 src0, src1, src2, src3, src4; in common_vt_2t_4x4_msa() 271 v16i8 src10_r, src32_r, src21_r, src43_r, src2110, src4332; in common_vt_2t_4x4_msa() 287 src2110 = __msa_pckev_b((v16i8)tmp1, (v16i8)tmp0); in common_vt_2t_4x4_msa() 295 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8; in common_vt_2t_4x8_msa() [all …]
|
D | postproc_msa.c | 294 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0); in postproc_down_across_chroma_msa() 298 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1); in postproc_down_across_chroma_msa() 302 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 2); in postproc_down_across_chroma_msa() 306 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 3); in postproc_down_across_chroma_msa() 310 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 4); in postproc_down_across_chroma_msa() 314 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 5); in postproc_down_across_chroma_msa() 325 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 6); in postproc_down_across_chroma_msa() 336 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 7); in postproc_down_across_chroma_msa() 450 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 0); in postproc_down_across_luma_msa() 454 ref_temp = (v16u8)__msa_splati_b((v16i8)ref, 1); in postproc_down_across_luma_msa() [all …]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | denoising_msa.c | 123 temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h); in vp8_denoiser_filter_msa() 124 running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); in vp8_denoiser_filter_msa() 173 temp2_h = (v8i16)__msa_pckev_b((v16i8)temp3_h, (v16i8)temp2_h); in vp8_denoiser_filter_msa() 174 running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); in vp8_denoiser_filter_msa() 256 running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, in vp8_denoiser_filter_msa() 257 (v16i8)adjust2); in vp8_denoiser_filter_msa() 296 running_avg_y = (v16u8)__msa_pckev_b((v16i8)adjust3, in vp8_denoiser_filter_msa() 297 (v16i8)adjust2); in vp8_denoiser_filter_msa() 365 v16i8 zero = { 0 }; in vp8_denoiser_filter_uv_msa() 373 temp0_h = (v8i16)__msa_ilvr_b(zero, (v16i8)sig0); in vp8_denoiser_filter_uv_msa() [all …]
|
/external/libvpx/libvpx/vp9/common/mips/msa/ |
D | vp9_mfqe_msa.c | 21 v16i8 src0 = { 0 }; in filter_by_weight8x8_msa() 22 v16i8 src1 = { 0 }; in filter_by_weight8x8_msa() 23 v16i8 dst0 = { 0 }; in filter_by_weight8x8_msa() 24 v16i8 dst1 = { 0 }; in filter_by_weight8x8_msa() 50 dst0 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); in filter_by_weight8x8_msa() 61 dst1 = (v16i8)__msa_pckev_b((v16i8)res_h_l, (v16i8)res_h_r); in filter_by_weight8x8_msa() 74 v16i8 src0, src1, src2, src3, dst0, dst1, dst2, dst3; in filter_by_weight16x16_msa()
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-tbl.ll | 13 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8> %A, <16 x i8> %B) 27 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) 41 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16… 55 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16… 60 declare <16 x i8> @llvm.aarch64.neon.tbl1.v16i8(<16 x i8>, <16 x i8>) nounwind readnone 62 declare <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone 64 declare <16 x i8> @llvm.aarch64.neon.tbl3.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>) nounwin… 66 declare <16 x i8> @llvm.aarch64.neon.tbl4.v16i8(<16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i… 78 %tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx1.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C) 92 …%tmp3 = call <16 x i8> @llvm.aarch64.neon.tbx2.v16i8(<16 x i8> %A, <16 x i8> %B, <16 x i8> %C, <16… [all …]
|
/external/clang/test/CodeGen/ |
D | systemz-abi-vector.c | 27 typedef __attribute__((vector_size(16))) char v16i8; typedef 38 unsigned int align = __alignof__ (v16i8); 58 v16i8 pass_v16i8(v16i8 arg) { return arg; } in pass_v16i8() 153 struct agg_v16i8 { v16i8 a; }; 317 v16i8 va_v16i8(__builtin_va_list l) { return __builtin_va_arg(l, v16i8); } in va_v16i8()
|
/external/llvm/test/CodeGen/SystemZ/ |
D | vec-const-01.ll | 1 ; Test vector byte masks, v16i8 version. 57 ; Test an all-zeros v2i8 that gets promoted to v16i8. 65 ; Test a mixed v2i8 that gets promoted to v16i8 (mask 0x8000). 73 ; Test an all-zeros v4i8 that gets promoted to v16i8. 81 ; Test a mixed v4i8 that gets promoted to v16i8 (mask 0x9000). 89 ; Test an all-zeros v8i8 that gets promoted to v16i8. 97 ; Test a mixed v8i8 that gets promoted to v16i8 (mask 0xE500).
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 178 { ISD::SHL, MVT::v16i8, 1 }, in getArithmeticInstrCost() 179 { ISD::SRL, MVT::v16i8, 2 }, in getArithmeticInstrCost() 180 { ISD::SRA, MVT::v16i8, 2 }, in getArithmeticInstrCost() 246 { ISD::SHL, MVT::v16i8, 1 }, // psllw. in getArithmeticInstrCost() 255 { ISD::SRL, MVT::v16i8, 1 }, // psrlw. in getArithmeticInstrCost() 264 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. in getArithmeticInstrCost() 321 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. in getArithmeticInstrCost() 330 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. in getArithmeticInstrCost() 339 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. in getArithmeticInstrCost() 354 { ISD::SDIV, MVT::v16i8, 16*20 }, in getArithmeticInstrCost() [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vshift-6.ll | 8 ; This happens for example when lowering a shift left of a MVT::v16i8 vector. 12 ; B = BITCAST MVT::v16i8, A 16 ; D = BITCAST MVT::v16i8, C 22 ; Where 'r' is a vector of type MVT::v16i8, and
|