/external/libhevc/common/x86/ |
D | ihevc_itrans_recon_32x32_ssse3_intr.c | 260 m_temp_reg_70 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 262 m_temp_reg_71 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 264 m_temp_reg_72 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 266 m_temp_reg_73 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 268 m_temp_reg_74 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 270 m_temp_reg_75 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 272 m_temp_reg_76 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 274 m_temp_reg_77 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 277 m_temp_reg_80 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() 279 m_temp_reg_81 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_32x32_ssse3() [all …]
|
D | ihevc_itrans_recon_16x16_ssse3_intr.c | 210 m_temp_reg_70 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_16x16_ssse3() 212 m_temp_reg_71 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_16x16_ssse3() 214 m_temp_reg_72 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_16x16_ssse3() 216 m_temp_reg_73 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_16x16_ssse3() 218 m_temp_reg_74 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_16x16_ssse3() 220 m_temp_reg_75 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_16x16_ssse3() 222 m_temp_reg_76 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_16x16_ssse3() 224 m_temp_reg_77 = _mm_load_si128((__m128i *)pi2_tmp_src); in ihevc_itrans_recon_16x16_ssse3() 249 m_coeff1 = _mm_load_si128((__m128i *)&g_ai2_ihevc_trans_16_even[2][0]); //89 75 in ihevc_itrans_recon_16x16_ssse3() 295 m_coeff3 = _mm_load_si128((__m128i *)&g_ai2_ihevc_trans_16_even[3][0]); //75 -18 in ihevc_itrans_recon_16x16_ssse3() [all …]
|
D | ihevc_itrans_recon_ssse3_intr.c | 1035 m_temp_reg_70 = _mm_load_si128((__m128i *)pi2_src); in ihevc_itrans_recon_8x8_ssse3() 1037 m_temp_reg_71 = _mm_load_si128((__m128i *)pi2_src); in ihevc_itrans_recon_8x8_ssse3() 1039 m_temp_reg_72 = _mm_load_si128((__m128i *)pi2_src); in ihevc_itrans_recon_8x8_ssse3() 1041 m_temp_reg_73 = _mm_load_si128((__m128i *)pi2_src); in ihevc_itrans_recon_8x8_ssse3() 1044 m_temp_reg_74 = _mm_load_si128((__m128i *)pi2_src); in ihevc_itrans_recon_8x8_ssse3() 1046 m_temp_reg_75 = _mm_load_si128((__m128i *)pi2_src); in ihevc_itrans_recon_8x8_ssse3() 1048 m_temp_reg_76 = _mm_load_si128((__m128i *)pi2_src); in ihevc_itrans_recon_8x8_ssse3() 1050 m_temp_reg_77 = _mm_load_si128((__m128i *)pi2_src); in ihevc_itrans_recon_8x8_ssse3() 1061 m_coeff2 = _mm_load_si128((__m128i *)&g_ai2_ihevc_trans_intr_even_8[3][0]); in ihevc_itrans_recon_8x8_ssse3() 1062 m_coeff1 = _mm_load_si128((__m128i *)&g_ai2_ihevc_trans_intr_even_8[0][0]); in ihevc_itrans_recon_8x8_ssse3() [all …]
|
D | ihevc_deblk_ssse3_intr.c | 147 coef_8x16b = _mm_load_si128((__m128i *)(coef_d)); in ihevc_deblk_luma_vert_ssse3() 148 mask_16x8b = _mm_load_si128((__m128i *)(shuffle_d)); in ihevc_deblk_luma_vert_ssse3() 409 coefdelta_0_8x16b = _mm_load_si128((__m128i *)coef_de1); in ihevc_deblk_luma_vert_ssse3() 431 coefdelta_0_8x16b = _mm_load_si128((__m128i *)coef_dep1); in ihevc_deblk_luma_vert_ssse3() 474 tmp3_const_8x16b = _mm_load_si128((__m128i *)(shuffle1)); in ihevc_deblk_luma_vert_ssse3() 508 tmp0_const_8x16b = _mm_load_si128((__m128i *)shuffle2); in ihevc_deblk_luma_vert_ssse3() 509 tmp1_const_8x16b = _mm_load_si128((__m128i *)shuffle3); in ihevc_deblk_luma_vert_ssse3() 606 coef_8x16b = _mm_load_si128((__m128i *)(coef_d)); in ihevc_deblk_luma_horz_ssse3() 607 mask_16x8b = _mm_load_si128((__m128i *)(shuffle_d)); in ihevc_deblk_luma_horz_ssse3() 843 coefdelta_0_8x16b = _mm_load_si128((__m128i *)coef_de1); in ihevc_deblk_luma_horz_ssse3() [all …]
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp8/encoder/x86/ |
D | quantize_sse2.c | 44 __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift)); in vp8_regular_quantize_b_sse2() 45 __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8)); in vp8_regular_quantize_b_sse2() 46 __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); in vp8_regular_quantize_b_sse2() 47 __m128i z1 = _mm_load_si128((__m128i *)(b->coeff+8)); in vp8_regular_quantize_b_sse2() 49 __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); in vp8_regular_quantize_b_sse2() 50 __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); in vp8_regular_quantize_b_sse2() 51 __m128i round0 = _mm_load_si128((__m128i *)(b->round)); in vp8_regular_quantize_b_sse2() 52 __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8)); in vp8_regular_quantize_b_sse2() 53 __m128i quant0 = _mm_load_si128((__m128i *)(b->quant)); in vp8_regular_quantize_b_sse2() 54 __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8)); in vp8_regular_quantize_b_sse2() [all …]
|
D | quantize_sse4.c | 38 __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift)); in vp8_regular_quantize_b_sse4_1() 39 __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8)); in vp8_regular_quantize_b_sse4_1() 40 __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); in vp8_regular_quantize_b_sse4_1() 41 __m128i z1 = _mm_load_si128((__m128i *)(b->coeff+8)); in vp8_regular_quantize_b_sse4_1() 43 __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); in vp8_regular_quantize_b_sse4_1() 44 __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); in vp8_regular_quantize_b_sse4_1() 45 __m128i round0 = _mm_load_si128((__m128i *)(b->round)); in vp8_regular_quantize_b_sse4_1() 46 __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8)); in vp8_regular_quantize_b_sse4_1() 47 __m128i quant0 = _mm_load_si128((__m128i *)(b->quant)); in vp8_regular_quantize_b_sse4_1() 48 __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8)); in vp8_regular_quantize_b_sse4_1() [all …]
|
D | quantize_ssse3.c | 45 __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); in vp8_fast_quantize_b_ssse3() 46 __m128i z1 = _mm_load_si128((__m128i *)(b->coeff + 8)); in vp8_fast_quantize_b_ssse3() 47 __m128i round0 = _mm_load_si128((__m128i *)(b->round)); in vp8_fast_quantize_b_ssse3() 48 __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8)); in vp8_fast_quantize_b_ssse3() 49 __m128i quant_fast0 = _mm_load_si128((__m128i *)(b->quant_fast)); in vp8_fast_quantize_b_ssse3() 50 __m128i quant_fast1 = _mm_load_si128((__m128i *)(b->quant_fast + 8)); in vp8_fast_quantize_b_ssse3() 51 __m128i dequant0 = _mm_load_si128((__m128i *)(d->dequant)); in vp8_fast_quantize_b_ssse3() 52 __m128i dequant1 = _mm_load_si128((__m128i *)(d->dequant + 8)); in vp8_fast_quantize_b_ssse3() 58 __m128i zig_zag = _mm_load_si128((const __m128i *)pshufb_zig_zag_mask); in vp8_fast_quantize_b_ssse3()
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | quantize_sse2.c | 45 __m128i quant_shift0 = _mm_load_si128((__m128i *)(b->quant_shift)); in vp8_regular_quantize_b_sse2() 46 __m128i quant_shift1 = _mm_load_si128((__m128i *)(b->quant_shift + 8)); in vp8_regular_quantize_b_sse2() 47 __m128i z0 = _mm_load_si128((__m128i *)(b->coeff)); in vp8_regular_quantize_b_sse2() 48 __m128i z1 = _mm_load_si128((__m128i *)(b->coeff+8)); in vp8_regular_quantize_b_sse2() 50 __m128i zbin0 = _mm_load_si128((__m128i *)(b->zbin)); in vp8_regular_quantize_b_sse2() 51 __m128i zbin1 = _mm_load_si128((__m128i *)(b->zbin + 8)); in vp8_regular_quantize_b_sse2() 52 __m128i round0 = _mm_load_si128((__m128i *)(b->round)); in vp8_regular_quantize_b_sse2() 53 __m128i round1 = _mm_load_si128((__m128i *)(b->round + 8)); in vp8_regular_quantize_b_sse2() 54 __m128i quant0 = _mm_load_si128((__m128i *)(b->quant)); in vp8_regular_quantize_b_sse2() 55 __m128i quant1 = _mm_load_si128((__m128i *)(b->quant + 8)); in vp8_regular_quantize_b_sse2() [all …]
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/common/x86/ |
D | vp9_idct_intrin_sse2.h | 92 in[0] = _mm_load_si128((const __m128i *)(input + 0 * 16)); in load_buffer_8x16() 93 in[1] = _mm_load_si128((const __m128i *)(input + 1 * 16)); in load_buffer_8x16() 94 in[2] = _mm_load_si128((const __m128i *)(input + 2 * 16)); in load_buffer_8x16() 95 in[3] = _mm_load_si128((const __m128i *)(input + 3 * 16)); in load_buffer_8x16() 96 in[4] = _mm_load_si128((const __m128i *)(input + 4 * 16)); in load_buffer_8x16() 97 in[5] = _mm_load_si128((const __m128i *)(input + 5 * 16)); in load_buffer_8x16() 98 in[6] = _mm_load_si128((const __m128i *)(input + 6 * 16)); in load_buffer_8x16() 99 in[7] = _mm_load_si128((const __m128i *)(input + 7 * 16)); in load_buffer_8x16() 101 in[8] = _mm_load_si128((const __m128i *)(input + 8 * 16)); in load_buffer_8x16() 102 in[9] = _mm_load_si128((const __m128i *)(input + 9 * 16)); in load_buffer_8x16() [all …]
|
D | vp9_subpixel_8t_intrin_ssse3.c | 71 shuffle1 =_mm_load_si128((__m128i const *)filt1_4_h8); in vp9_filter_block1d4_h8_intrin_ssse3() 72 shuffle2 = _mm_load_si128((__m128i const *)filt2_4_h8); in vp9_filter_block1d4_h8_intrin_ssse3() 144 filt1Reg = _mm_load_si128((__m128i const *)filt1_global); in vp9_filter_block1d8_h8_intrin_ssse3() 145 filt2Reg = _mm_load_si128((__m128i const *)filt2_global); in vp9_filter_block1d8_h8_intrin_ssse3() 146 filt3Reg = _mm_load_si128((__m128i const *)filt3_global); in vp9_filter_block1d8_h8_intrin_ssse3() 147 filt4Reg = _mm_load_si128((__m128i const *)filt4_global); in vp9_filter_block1d8_h8_intrin_ssse3() 224 filt1Reg = _mm_load_si128((__m128i const *)filt1_global); in vp9_filter_block1d16_h8_intrin_ssse3() 225 filt2Reg = _mm_load_si128((__m128i const *)filt2_global); in vp9_filter_block1d16_h8_intrin_ssse3() 226 filt3Reg = _mm_load_si128((__m128i const *)filt3_global); in vp9_filter_block1d16_h8_intrin_ssse3() 227 filt4Reg = _mm_load_si128((__m128i const *)filt4_global); in vp9_filter_block1d16_h8_intrin_ssse3()
|
D | vp9_loopfilter_intrin_sse2.c | 26 const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); in mb_lpf_horizontal_edge_w_sse2_8() 27 const __m128i limit = _mm_load_si128((const __m128i *)_limit); in mb_lpf_horizontal_edge_w_sse2_8() 28 const __m128i thresh = _mm_load_si128((const __m128i *)_thresh); in mb_lpf_horizontal_edge_w_sse2_8() 391 const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); in mb_lpf_horizontal_edge_w_sse2_16() 392 const __m128i limit = _mm_load_si128((const __m128i *)_limit); in mb_lpf_horizontal_edge_w_sse2_16() 393 const __m128i thresh = _mm_load_si128((const __m128i *)_thresh); in mb_lpf_horizontal_edge_w_sse2_16() 739 const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); in vp9_lpf_horizontal_8_sse2() 740 const __m128i limit = _mm_load_si128((const __m128i *)_limit); in vp9_lpf_horizontal_8_sse2() 741 const __m128i thresh = _mm_load_si128((const __m128i *)_thresh); in vp9_lpf_horizontal_8_sse2() 959 _mm_unpacklo_epi64(_mm_load_si128((const __m128i *)_blimit0), in vp9_lpf_horizontal_8_dual_sse2() [all …]
|
/external/jpeg/ |
D | jidctintelsse.c | 171 x3 = _mm_load_si128(( __m128i*)(wsptr+24));\ in jpeg_idct_intelsse() 172 x1 = _mm_load_si128(( __m128i*)(wsptr+8));\ in jpeg_idct_intelsse() 200 x0 = _mm_load_si128(( __m128i*)(wsptr));\ in jpeg_idct_intelsse() 201 x4 = _mm_load_si128(( __m128i*)(wsptr+32));\ in jpeg_idct_intelsse() 202 x2 = _mm_load_si128(( __m128i*)(wsptr+16));\ in jpeg_idct_intelsse() 203 x6 = _mm_load_si128(( __m128i*)(wsptr+48));\ in jpeg_idct_intelsse() 281 row0 = _mm_load_si128((__m128i const*)(coef_blockSSE)); in jpeg_idct_intelsse() 282 row2 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*2)); in jpeg_idct_intelsse() 292 row0 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*4)); in jpeg_idct_intelsse() 293 row2 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*6)); in jpeg_idct_intelsse() [all …]
|
/external/qemu/distrib/jpeg-6b/ |
D | jidctintelsse.c | 171 x3 = _mm_load_si128(( __m128i*)(wsptr+24));\ in jpeg_idct_intelsse() 172 x1 = _mm_load_si128(( __m128i*)(wsptr+8));\ in jpeg_idct_intelsse() 200 x0 = _mm_load_si128(( __m128i*)(wsptr));\ in jpeg_idct_intelsse() 201 x4 = _mm_load_si128(( __m128i*)(wsptr+32));\ in jpeg_idct_intelsse() 202 x2 = _mm_load_si128(( __m128i*)(wsptr+16));\ in jpeg_idct_intelsse() 203 x6 = _mm_load_si128(( __m128i*)(wsptr+48));\ in jpeg_idct_intelsse() 281 row0 = _mm_load_si128((__m128i const*)(coef_blockSSE)); in jpeg_idct_intelsse() 282 row2 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*2)); in jpeg_idct_intelsse() 292 row0 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*4)); in jpeg_idct_intelsse() 293 row2 = _mm_load_si128((__m128i const*)(coef_blockSSE+8*6)); in jpeg_idct_intelsse() [all …]
|
/external/jemalloc/test/include/test/ |
D | SFMT-sse2.h | 67 x = _mm_load_si128(a); in mm_recursion() 89 r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); in gen_rand_all() 90 r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); in gen_rand_all() 119 r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); in gen_rand_array() 120 r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); in gen_rand_array() 144 r = _mm_load_si128(&array[j + size - N].si); in gen_rand_array()
|
/external/chromium_org/third_party/libvpx/source/libvpx/vp9/encoder/x86/ |
D | vp9_dct_sse2.c | 412 __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); in vp9_fdct8x8_1_sse2() 413 __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); in vp9_fdct8x8_1_sse2() 414 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); in vp9_fdct8x8_1_sse2() 415 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); in vp9_fdct8x8_1_sse2() 421 in0 = _mm_load_si128((const __m128i *)(input + 4 * stride)); in vp9_fdct8x8_1_sse2() 422 in1 = _mm_load_si128((const __m128i *)(input + 5 * stride)); in vp9_fdct8x8_1_sse2() 423 in2 = _mm_load_si128((const __m128i *)(input + 6 * stride)); in vp9_fdct8x8_1_sse2() 424 in3 = _mm_load_si128((const __m128i *)(input + 7 * stride)); in vp9_fdct8x8_1_sse2() 467 __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); in vp9_fdct8x8_sse2() 468 __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); in vp9_fdct8x8_sse2() [all …]
|
/external/libvpx/libvpx/vp9/common/x86/ |
D | vp9_loopfilter_intrin_sse2.c | 22 const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); in mb_lpf_horizontal_edge_w_sse2_8() 23 const __m128i limit = _mm_load_si128((const __m128i *)_limit); in mb_lpf_horizontal_edge_w_sse2_8() 24 const __m128i thresh = _mm_load_si128((const __m128i *)_thresh); in mb_lpf_horizontal_edge_w_sse2_8() 383 const __m128i blimit = _mm_load_si128((const __m128i *)_blimit); in mb_lpf_horizontal_edge_w_sse2_16() 384 const __m128i limit = _mm_load_si128((const __m128i *)_limit); in mb_lpf_horizontal_edge_w_sse2_16() 385 const __m128i thresh = _mm_load_si128((const __m128i *)_thresh); in mb_lpf_horizontal_edge_w_sse2_16() 736 work_a = _mm_load_si128((__m128i *)&ap[2 * 16]); in mb_lpf_horizontal_edge_w_sse2_16() 737 p2 = _mm_load_si128((__m128i *)&flat_op[2 * 16]); in mb_lpf_horizontal_edge_w_sse2_16() 743 p1 = _mm_load_si128((__m128i *)&flat_op[1 * 16]); in mb_lpf_horizontal_edge_w_sse2_16() 749 p0 = _mm_load_si128((__m128i *)&flat_op[0]); in mb_lpf_horizontal_edge_w_sse2_16() [all …]
|
D | vp9_subpixel_8t_intrin_ssse3.c | 69 thirdFilters =_mm_load_si128((__m128i const *)filt1_4_h8); in vp9_filter_block1d4_h8_intrin_ssse3() 70 forthFilters = _mm_load_si128((__m128i const *)filt2_4_h8); in vp9_filter_block1d4_h8_intrin_ssse3() 142 filt1Reg = _mm_load_si128((__m128i const *)filt1_global); in vp9_filter_block1d8_h8_intrin_ssse3() 143 filt2Reg = _mm_load_si128((__m128i const *)filt2_global); in vp9_filter_block1d8_h8_intrin_ssse3() 144 filt3Reg = _mm_load_si128((__m128i const *)filt3_global); in vp9_filter_block1d8_h8_intrin_ssse3() 145 filt4Reg = _mm_load_si128((__m128i const *)filt4_global); in vp9_filter_block1d8_h8_intrin_ssse3() 222 filt1Reg = _mm_load_si128((__m128i const *)filt1_global); in vp9_filter_block1d16_h8_intrin_ssse3() 223 filt2Reg = _mm_load_si128((__m128i const *)filt2_global); in vp9_filter_block1d16_h8_intrin_ssse3() 224 filt3Reg = _mm_load_si128((__m128i const *)filt3_global); in vp9_filter_block1d16_h8_intrin_ssse3() 225 filt4Reg = _mm_load_si128((__m128i const *)filt4_global); in vp9_filter_block1d16_h8_intrin_ssse3()
|
D | vp9_idct_intrin_sse2.c | 39 input0 = _mm_load_si128((const __m128i *)input); in vp9_idct4x4_16_add_sse2() 40 input2 = _mm_load_si128((const __m128i *)(input + 8)); in vp9_idct4x4_16_add_sse2() 560 in0 = _mm_load_si128((const __m128i *)input); in vp9_idct8x8_64_add_sse2() 561 in1 = _mm_load_si128((const __m128i *)(input + 8 * 1)); in vp9_idct8x8_64_add_sse2() 562 in2 = _mm_load_si128((const __m128i *)(input + 8 * 2)); in vp9_idct8x8_64_add_sse2() 563 in3 = _mm_load_si128((const __m128i *)(input + 8 * 3)); in vp9_idct8x8_64_add_sse2() 564 in4 = _mm_load_si128((const __m128i *)(input + 8 * 4)); in vp9_idct8x8_64_add_sse2() 565 in5 = _mm_load_si128((const __m128i *)(input + 8 * 5)); in vp9_idct8x8_64_add_sse2() 566 in6 = _mm_load_si128((const __m128i *)(input + 8 * 6)); in vp9_idct8x8_64_add_sse2() 567 in7 = _mm_load_si128((const __m128i *)(input + 8 * 7)); in vp9_idct8x8_64_add_sse2() [all …]
|
/external/neven/Embedded/common/src/b_BasicEm/ |
D | MathSSE2.c | 165 m_XMM0 = _mm_load_si128( (__m128i *)&0[vec1L] ); in bbs_dotProduct_128SSE2() 168 m_XMM2 = _mm_load_si128( (__m128i *)&0[vec2L] ); in bbs_dotProduct_128SSE2() 170 m_XMM6 = _mm_load_si128( (__m128i *)&8[vec1L] ); in bbs_dotProduct_128SSE2() 176 m_XMM3 = _mm_load_si128( (__m128i *)&8[vec2L] ); in bbs_dotProduct_128SSE2() 188 m_XMM0 = _mm_load_si128( (__m128i *)&m_XMM5 ); in bbs_dotProduct_128SSE2()
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_dct_avx2.c | 295 __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); in vp9_fdct8x8_avx2() 296 __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); in vp9_fdct8x8_avx2() 297 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); in vp9_fdct8x8_avx2() 298 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); in vp9_fdct8x8_avx2() 299 __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); in vp9_fdct8x8_avx2() 300 __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); in vp9_fdct8x8_avx2() 301 __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); in vp9_fdct8x8_avx2() 302 __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); in vp9_fdct8x8_avx2() 538 in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride)); in load_buffer_8x8_avx2() 539 in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride)); in load_buffer_8x8_avx2() [all …]
|
D | vp9_dct_sse2.c | 396 __m128i in0 = _mm_load_si128((const __m128i *)(input + 0 * stride)); in vp9_fdct8x8_sse2() 397 __m128i in1 = _mm_load_si128((const __m128i *)(input + 1 * stride)); in vp9_fdct8x8_sse2() 398 __m128i in2 = _mm_load_si128((const __m128i *)(input + 2 * stride)); in vp9_fdct8x8_sse2() 399 __m128i in3 = _mm_load_si128((const __m128i *)(input + 3 * stride)); in vp9_fdct8x8_sse2() 400 __m128i in4 = _mm_load_si128((const __m128i *)(input + 4 * stride)); in vp9_fdct8x8_sse2() 401 __m128i in5 = _mm_load_si128((const __m128i *)(input + 5 * stride)); in vp9_fdct8x8_sse2() 402 __m128i in6 = _mm_load_si128((const __m128i *)(input + 6 * stride)); in vp9_fdct8x8_sse2() 403 __m128i in7 = _mm_load_si128((const __m128i *)(input + 7 * stride)); in vp9_fdct8x8_sse2() 639 in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride)); in load_buffer_8x8() 640 in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride)); in load_buffer_8x8() [all …]
|
/external/chromium_org/third_party/mesa/src/src/gallium/drivers/llvmpipe/ |
D | lp_rast_tri.c | 264 __m128i p0 = _mm_load_si128((__m128i *)&plane[0]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_16() 265 __m128i p1 = _mm_load_si128((__m128i *)&plane[1]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_16() 266 __m128i p2 = _mm_load_si128((__m128i *)&plane[2]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_16() 374 __m128i p0 = _mm_load_si128((__m128i *)&plane[0]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_4() 375 __m128i p1 = _mm_load_si128((__m128i *)&plane[1]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_4() 376 __m128i p2 = _mm_load_si128((__m128i *)&plane[2]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_4()
|
/external/mesa3d/src/gallium/drivers/llvmpipe/ |
D | lp_rast_tri.c | 264 __m128i p0 = _mm_load_si128((__m128i *)&plane[0]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_16() 265 __m128i p1 = _mm_load_si128((__m128i *)&plane[1]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_16() 266 __m128i p2 = _mm_load_si128((__m128i *)&plane[2]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_16() 374 __m128i p0 = _mm_load_si128((__m128i *)&plane[0]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_4() 375 __m128i p1 = _mm_load_si128((__m128i *)&plane[1]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_4() 376 __m128i p2 = _mm_load_si128((__m128i *)&plane[2]); /* c, dcdx, dcdy, eo */ in lp_rast_triangle_3_4()
|
/external/chromium_org/media/base/simd/ |
D | convert_rgb_to_yuv_sse2.cc | 179 __m128i y_table = _mm_load_si128( in ConvertRGB32ToYUVRow_SSE2() 205 __m128i y_offset = _mm_load_si128( in ConvertRGB32ToYUVRow_SSE2() 262 _mm_load_si128( in ConvertRGB32ToYUVRow_SSE2() 277 _mm_load_si128( in ConvertRGB32ToYUVRow_SSE2()
|
/external/chromium_org/third_party/WebKit/Source/wtf/text/ |
D | ASCIIFastPath.h | 126 __m128i first8UChars = _mm_load_si128(reinterpret_cast<const __m128i*>(&source[i])); 127 __m128i second8UChars = _mm_load_si128(reinterpret_cast<const __m128i*>(&source[i+8]));
|