Home
last modified time | relevance | path

Searched refs:vec0 (Results 1 – 25 of 38) sorted by relevance

12

/external/libvpx/libvpx/vpx_dsp/mips/
Didct32x32_msa.c43 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; in idct32x8_row_even_process_store() local
52 BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0); in idct32x8_row_even_process_store()
53 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_even_process_store()
60 BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0); in idct32x8_row_even_process_store()
61 BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4); in idct32x8_row_even_process_store()
71 vec0 = reg0 + reg4; in idct32x8_row_even_process_store()
79 reg3 = vec0; in idct32x8_row_even_process_store()
90 vec0 = reg0 - reg6; in idct32x8_row_even_process_store()
95 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); in idct32x8_row_even_process_store()
127 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; in idct32x8_row_odd_process_store() local
[all …]
Dvpx_convolve8_avg_horiz_msa.c58 v8i16 filt, vec0, vec1, vec2, vec3; in common_hz_8t_and_aver_dst_4x8_msa() local
76 filt0, filt1, filt2, filt3, vec0, vec1); in common_hz_8t_and_aver_dst_4x8_msa()
81 SRARI_H4_SH(vec0, vec1, vec2, vec3, FILTER_BITS); in common_hz_8t_and_aver_dst_4x8_msa()
82 SAT_SH4_SH(vec0, vec1, vec2, vec3, 7); in common_hz_8t_and_aver_dst_4x8_msa()
83 PCKEV_B4_UB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, res0, res1, res2, in common_hz_8t_and_aver_dst_4x8_msa()
155 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_and_aver_dst_16w_msa() local
175 VSHF_B4_SH(src0, src0, mask0, mask1, mask2, mask3, vec0, vec4, vec8, vec12); in common_hz_8t_and_aver_dst_16w_msa()
181 DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in common_hz_8t_and_aver_dst_16w_msa()
185 DPADD_SB4_SH(vec4, vec5, vec6, vec7, filt1, filt1, filt1, filt1, vec0, vec1, in common_hz_8t_and_aver_dst_16w_msa()
189 ADDS_SH4_SH(vec0, vec8, vec1, vec9, vec2, vec10, vec3, vec11, out0, out1, in common_hz_8t_and_aver_dst_16w_msa()
[all …]
Dvpx_convolve8_horiz_msa.c321 v16u8 filt0, vec0, vec1, res0, res1; in common_hz_2t_4x4_msa() local
331 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa()
332 DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); in common_hz_2t_4x4_msa()
341 v16u8 vec0, vec1, vec2, vec3, filt0; in common_hz_2t_4x8_msa() local
353 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa()
355 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec4, vec5, in common_hz_2t_4x8_msa()
380 v8u16 vec0, vec1, vec2, vec3, filt; in common_hz_2t_8x4_msa() local
389 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in common_hz_2t_8x4_msa()
391 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, vec0, vec1, in common_hz_2t_8x4_msa()
393 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in common_hz_2t_8x4_msa()
[all …]
Dvpx_convolve8_msa.c105 v16u8 mask0, mask1, mask2, mask3, vec0, vec1; in common_hv_8ht_8vt_8w_msa() local
179 vec0 = PCKEV_XORI128_UB(tmp0, tmp1); in common_hv_8ht_8vt_8w_msa()
181 ST8x4_UB(vec0, vec1, dst, dst_stride); in common_hv_8ht_8vt_8w_msa()
238 v16u8 filt_vt, filt_hz, vec0, vec1, res0, res1; in common_hv_2ht_2vt_4x4_msa() local
257 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_4x4_msa()
258 DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); in common_hv_2ht_2vt_4x4_msa()
270 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; in common_hv_2ht_2vt_4x8_msa() local
296 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_4x8_msa()
298 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, in common_hv_2ht_2vt_4x8_msa()
326 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3; in common_hv_2ht_2vt_8x4_msa() local
[all …]
Dsub_pixel_variance_msa.c408 v8u16 vec0, vec1, vec2, vec3; in sub_pixel_sse_diff_4width_h_msa() local
421 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in sub_pixel_sse_diff_4width_h_msa()
423 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, in sub_pixel_sse_diff_4width_h_msa()
424 vec0, vec1, vec2, vec3); in sub_pixel_sse_diff_4width_h_msa()
425 SRARI_H4_UH(vec0, vec1, vec2, vec3, FILTER_BITS); in sub_pixel_sse_diff_4width_h_msa()
426 PCKEV_B4_SB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, in sub_pixel_sse_diff_4width_h_msa()
451 v8u16 vec0, vec1, vec2, vec3; in sub_pixel_sse_diff_8width_h_msa() local
465 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in sub_pixel_sse_diff_8width_h_msa()
467 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, in sub_pixel_sse_diff_8width_h_msa()
468 vec0, vec1, vec2, vec3); in sub_pixel_sse_diff_8width_h_msa()
[all …]
Dvpx_convolve8_avg_msa.c27 v8i16 hz_out7, hz_out8, hz_out9, res0, res1, vec0, vec1, vec2, vec3, vec4; in common_hv_8ht_8vt_and_aver_dst_4w_msa() local
58 ILVEV_B2_SH(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_8ht_8vt_and_aver_dst_4w_msa()
71 res0 = FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, filt_vt0, filt_vt1, in common_hv_8ht_8vt_and_aver_dst_4w_msa()
91 vec0 = vec2; in common_hv_8ht_8vt_and_aver_dst_4w_msa()
252 v16u8 filt_hz, filt_vt, vec0, vec1; in common_hv_2ht_2vt_and_aver_dst_4x4_msa() local
272 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_and_aver_dst_4x4_msa()
276 DOTP_UB2_UH(vec0, vec1, filt_vt, filt_vt, tmp0, tmp1); in common_hv_2ht_2vt_and_aver_dst_4x4_msa()
290 v16u8 filt_hz, filt_vt, vec0, vec1, vec2, vec3, res0, res1, res2, res3; in common_hv_2ht_2vt_and_aver_dst_4x8_msa() local
321 ILVEV_B2_UB(hz_out0, hz_out1, hz_out2, hz_out3, vec0, vec1); in common_hv_2ht_2vt_and_aver_dst_4x8_msa()
323 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt_vt, filt_vt, filt_vt, filt_vt, in common_hv_2ht_2vt_and_aver_dst_4x8_msa()
[all …]
Dloopfilter_8_msa.c172 v8i16 vec0, vec1, vec2, vec3, vec4; in vpx_lpf_vertical_8_msa() local
198 ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); in vpx_lpf_vertical_8_msa()
199 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in vpx_lpf_vertical_8_msa()
227 ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1); in vpx_lpf_vertical_8_msa()
228 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in vpx_lpf_vertical_8_msa()
257 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in vpx_lpf_vertical_8_dual_msa() local
271 vec0 = (v8i16)__msa_fill_b(*thresh1); in vpx_lpf_vertical_8_dual_msa()
272 thresh = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)thresh); in vpx_lpf_vertical_8_dual_msa()
275 vec0 = (v8i16)__msa_fill_b(*b_limit1); in vpx_lpf_vertical_8_dual_msa()
276 b_limit = (v16u8)__msa_ilvr_d((v2i64)vec0, (v2i64)b_limit); in vpx_lpf_vertical_8_dual_msa()
[all …]
Dfwd_txfm_msa.c20 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5; in fdct8x16_1d_column() local
72 ILVRL_H2_SH(in15, in8, vec1, vec0); in fdct8x16_1d_column()
76 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
81 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
84 ILVRL_H2_SH(in14, in9, vec1, vec0); in fdct8x16_1d_column()
88 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1); in fdct8x16_1d_column()
93 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
108 ILVRL_H2_SH(in13, in10, vec1, vec0); in fdct8x16_1d_column()
111 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
116 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column()
[all …]
Dfwd_dct32x32_msa.c61 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in fdct8x32_1d_column_even_store() local
68 vec0, vec1, vec2, vec3, in12, in13, in14, in15); in fdct8x32_1d_column_even_store()
75 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_column_even_store()
87 SUB4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, vec7, vec6, vec5, vec4); in fdct8x32_1d_column_even_store()
89 ADD2(vec4, vec5, vec7, vec6, vec0, vec1); in fdct8x32_1d_column_even_store()
90 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0); in fdct8x32_1d_column_even_store()
105 ADD2(in0, in1, in2, in3, vec0, vec7); in fdct8x32_1d_column_even_store()
106 DOTP_CONST_PAIR(vec7, vec0, cospi_30_64, cospi_2_64, temp1, temp0); in fdct8x32_1d_column_even_store()
292 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in fdct8x32_1d_row_even_4x() local
304 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, in fdct8x32_1d_row_even_4x()
[all …]
Dvpx_convolve8_vert_msa.c370 v16u8 src0, src1, src2, src3, src4, vec0, vec1, vec2, vec3, filt0; in common_vt_2t_8x4_msa() local
380 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1); in common_vt_2t_8x4_msa()
382 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_8x4_msa()
394 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_8x8mult_msa() local
410 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, in common_vt_2t_8x8mult_msa()
414 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_8x8mult_msa()
447 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_16w_msa() local
462 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); in common_vt_2t_16w_msa()
464 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); in common_vt_2t_16w_msa()
495 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_32w_msa() local
[all …]
Dvpx_convolve8_avg_vert_msa.c352 v16u8 dst0, dst1, dst2, dst3, vec0, vec1, vec2, vec3, filt0; in common_vt_2t_and_aver_dst_8x4_msa() local
362 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec1); in common_vt_2t_and_aver_dst_8x4_msa()
364 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_and_aver_dst_8x4_msa()
380 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_and_aver_dst_8x8mult_msa() local
396 ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, vec0, vec1, in common_vt_2t_and_aver_dst_8x8mult_msa()
400 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, tmp0, tmp1, in common_vt_2t_and_aver_dst_8x8mult_msa()
440 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_vt_2t_and_aver_dst_16w_msa() local
455 ILVR_B2_UB(src1, src0, src2, src1, vec0, vec2); in common_vt_2t_and_aver_dst_16w_msa()
457 DOTP_UB2_UH(vec0, vec1, filt0, filt0, tmp0, tmp1); in common_vt_2t_and_aver_dst_16w_msa()
492 v16u8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, filt0; in common_vt_2t_and_aver_dst_32w_msa() local
[all …]
Dfwd_txfm_msa.h291 #define FDCT_POSTPROC_2V_NEG_H(vec0, vec1) { \ argument
295 tp0_m = __msa_clti_s_h(vec0, 0); \
297 vec0 += 1; \
301 vec0 += tp0_m; \
303 vec0 >>= 2; \
318 #define FDCT32_POSTPROC_2V_POS_H(vec0, vec1) { \ argument
322 tp0_m = __msa_clei_s_h(vec0, 0); \
326 vec0 += 1; \
330 vec0 += tp0_m; \
332 vec0 >>= 2; \
Dintrapred_msa.c390 v8u16 src_top_left, vec0, vec1, vec2, vec3; in intra_predict_tm_4x4_msa() local
403 HADD_UB4_UH(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in intra_predict_tm_4x4_msa()
404 IPRED_SUBS_UH2_UH(src_top_left, src_top_left, vec0, vec1); in intra_predict_tm_4x4_msa()
406 SAT_UH4_UH(vec0, vec1, vec2, vec3, 7); in intra_predict_tm_4x4_msa()
407 PCKEV_B2_SB(vec1, vec0, vec3, vec2, tmp0, tmp1); in intra_predict_tm_4x4_msa()
418 v8u16 src_top_left, vec0, vec1, vec2, vec3; in intra_predict_tm_8x8_msa() local
434 HADD_UB4_UH(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in intra_predict_tm_8x8_msa()
435 IPRED_SUBS_UH2_UH(src_top_left, src_top_left, vec0, vec1); in intra_predict_tm_8x8_msa()
437 SAT_UH4_UH(vec0, vec1, vec2, vec3, 7); in intra_predict_tm_8x8_msa()
438 PCKEV_B2_SB(vec1, vec0, vec3, vec2, tmp0, tmp1); in intra_predict_tm_8x8_msa()
Dloopfilter_4_msa.c81 v8i16 vec0, vec1, vec2, vec3; in vpx_lpf_vertical_4_msa() local
96 ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1); in vpx_lpf_vertical_4_msa()
97 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in vpx_lpf_vertical_4_msa()
/external/libvpx/libvpx/vp8/common/mips/msa/
Dbilinear_filter_msa.c42 v16u8 filt0, vec0, vec1, res0, res1; in common_hz_2t_4x4_msa() local
51 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa()
52 DOTP_UB2_UH(vec0, vec1, filt0, filt0, vec2, vec3); in common_hz_2t_4x4_msa()
62 v16u8 vec0, vec1, vec2, vec3, filt0; in common_hz_2t_4x8_msa() local
73 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa()
75 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, in common_hz_2t_4x8_msa()
105 v8u16 vec0, vec1, vec2, vec3, filt; in common_hz_2t_8x4_msa() local
113 VSHF_B2_UH(src0, src0, src1, src1, mask, mask, vec0, vec1); in common_hz_2t_8x4_msa()
115 DOTP_UB4_UH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, in common_hz_2t_8x4_msa()
116 vec0, vec1, vec2, vec3); in common_hz_2t_8x4_msa()
[all …]
Dsixtap_filter_msa.c90 #define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \ argument
94 tmp0 = __msa_dotp_s_h((v16i8)vec0, (v16i8)filt0); \
558 v16u8 mask0, mask1, mask2, vec0, vec1; in common_hv_6ht_6vt_8w_msa() local
622 vec0 = PCKEV_XORI128_UB(tmp0, tmp1); in common_hv_6ht_6vt_8w_msa()
624 ST8x4_UB(vec0, vec1, dst, dst_stride); in common_hv_6ht_6vt_8w_msa()
969 v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; in common_hv_4ht_4vt_4w_msa() local
986 vec0 = (v8i16)__msa_ilvev_b((v16i8)hz_out1, (v16i8)hz_out0); in common_hv_4ht_4vt_4w_msa()
1000 tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); in common_hv_4ht_4vt_4w_msa()
1015 vec0 = vec2; in common_hv_4ht_4vt_4w_msa()
1030 v8i16 vec0, vec1, vec2, vec3, vec4; in common_hv_4ht_4vt_8w_msa() local
[all …]
/external/llvm/test/CodeGen/AArch64/
Darm64-copy-tuple.ll17 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
20 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
23 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
34 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
37 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
40 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
51 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
54 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
57 tail call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec0, <8 x i8> %vec1, i8* %addr)
68 %vec0 = extractvalue { <8 x i8>, <8 x i8> } %vec, 0
[all …]
/external/llvm/test/CodeGen/X86/
Dscalar_sse_minmax.ll23 %vec0 = insertelement <4 x float> undef, float %x, i32 0
25 %retval = tail call <4 x float> @llvm.x86.sse.min.ss(<4 x float> %vec0, <4 x float> %vec1)
49 %vec0 = insertelement <4 x float> undef, float %x, i32 0
51 %retval = tail call <4 x float> @llvm.x86.sse.max.ss(<4 x float> %vec0, <4 x float> %vec1)
/external/llvm/test/CodeGen/AMDGPU/
Doperand-folding.ll70 %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
71 %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1
103 %vec0 = insertelement <4 x i32> undef, i32 %tmp0, i32 0
104 %vec1 = insertelement <4 x i32> %vec0, i32 %tmp1, i32 1
Dsgpr-copy-duplicate-operand.ll14 %vec0 = insertelement <2 x i32> undef, i32 %add, i32 0
15 %vec1 = insertelement <2 x i32> %vec0, i32 %hi, i32 1
/external/llvm/test/CodeGen/SystemZ/
Dvec-shift-07.ll151 %vec0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
152 %vec1 = insertelement <2 x i64> %vec0, i64 %ext1, i32 1
165 %vec0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
166 %vec1 = insertelement <2 x i64> %vec0, i64 %ext1, i32 1
179 %vec0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
180 %vec1 = insertelement <2 x i64> %vec0, i64 %ext1, i32 1
Dvec-perm-12.ll37 %vec0 = insertelement <4 x i32> undef, i32 %elt0, i32 0
38 %vec1 = insertelement <4 x i32> %vec0, i32 %elt1, i32 1
/external/valgrind/memcheck/tests/x86/
Dfxsave.c8 const unsigned int vec0[4] variable
62 asm __volatile__("movups " VG_SYM(vec0) ", %xmm0"); in do_setup_then_fxsave()
/external/mesa3d/src/gallium/state_trackers/vega/
Darc.c228 double vec0[2], vec1[2]; in find_angles() local
259 vec0[0] = arc->x1 - arc->cx; in find_angles()
260 vec0[1] = arc->y1 - arc->cy; in find_angles()
266 vec0[0], vec0[1], vec1[0], vec1[1], arc->cx, arc->cy); in find_angles()
269 lambda1 = vector_orientation(vec0); in find_angles()
276 angle = vector_angles(vec0, vec1); in find_angles()
279 angle = 2*M_PI - vector_angles(vec0, vec1); in find_angles()
/external/valgrind/memcheck/tests/amd64/
Dfxsave-amd64.c8 const unsigned int vec0[4] variable
108 asm __volatile__("movups (%0), %%xmm0" : : "r"(&vec0[0]) : "xmm0" ); in do_setup_then_fxsave()

12