/external/libvpx/libvpx/vpx_dsp/x86/ |
D | inv_txfm_sse2.h | 245 out[bound - i] = _mm_sub_epi16(in[i], in[bound - i]); in add_sub_butterfly() 263 step2[5] = _mm_sub_epi16(step1[4], step1[5]); in idct8() 264 step2[6] = _mm_sub_epi16(step1[7], step1[6]); in idct8() 270 step1[2] = _mm_sub_epi16(step2[1], step2[2]); in idct8() 271 step1[3] = _mm_sub_epi16(step2[0], step2[3]); in idct8() 279 out[4] = _mm_sub_epi16(step1[3], step2[4]); in idct8() 280 out[5] = _mm_sub_epi16(step1[2], step1[5]); in idct8() 281 out[6] = _mm_sub_epi16(step1[1], step1[6]); in idct8() 282 out[7] = _mm_sub_epi16(step1[0], step2[7]); in idct8() 317 step2[5] = _mm_sub_epi16(step1[4], step1[5]); // step2 5&6 in idct8x8_12_add_kernel_sse2() [all …]
|
D | inv_txfm_ssse3.c | 83 out[4] = _mm_sub_epi16(step1[3], step1[4]); in idct32_34_8x32_quarter_1() 84 out[5] = _mm_sub_epi16(step1[2], step1[5]); in idct32_34_8x32_quarter_1() 85 out[6] = _mm_sub_epi16(step1[1], step1[6]); in idct32_34_8x32_quarter_1() 86 out[7] = _mm_sub_epi16(step1[0], step1[7]); in idct32_34_8x32_quarter_1() 203 step2[5] = _mm_sub_epi16(step1[4], step1[5]); in idct32_135_8x32_quarter_1() 204 step2[6] = _mm_sub_epi16(step1[7], step1[6]); in idct32_135_8x32_quarter_1() 210 step1[2] = _mm_sub_epi16(step2[0], step2[2]); in idct32_135_8x32_quarter_1() 211 step1[3] = _mm_sub_epi16(step2[0], step2[3]); in idct32_135_8x32_quarter_1() 221 out[4] = _mm_sub_epi16(step1[3], step1[4]); in idct32_135_8x32_quarter_1() 222 out[5] = _mm_sub_epi16(step1[2], step1[5]); in idct32_135_8x32_quarter_1() [all …]
|
D | inv_txfm_ssse3.h | 54 step2[5] = _mm_sub_epi16(step1[4], step1[5]); // step2 5&6 in idct8x8_12_add_kernel_ssse3() 61 tmp[1] = _mm_sub_epi16(step2[0], step2[2]); // step1 3&2 in idct8x8_12_add_kernel_ssse3() 68 tmp[2] = _mm_sub_epi16(step1[3], step2[4]); // output 4&7 in idct8x8_12_add_kernel_ssse3() 69 tmp[3] = _mm_sub_epi16(step1[2], step1[5]); // output 5&6 in idct8x8_12_add_kernel_ssse3() 86 step2[5] = _mm_sub_epi16(step1[4], step1[5]); in idct8x8_12_add_kernel_ssse3() 87 step2[6] = _mm_sub_epi16(step1[7], step1[6]); in idct8x8_12_add_kernel_ssse3() 93 step1[2] = _mm_sub_epi16(step2[0], step2[2]); in idct8x8_12_add_kernel_ssse3() 94 step1[3] = _mm_sub_epi16(step2[0], step2[3]); in idct8x8_12_add_kernel_ssse3() 102 io[4] = _mm_sub_epi16(step1[3], step2[4]); in idct8x8_12_add_kernel_ssse3() 103 io[5] = _mm_sub_epi16(step1[2], step1[5]); in idct8x8_12_add_kernel_ssse3() [all …]
|
D | avg_intrin_sse2.c | 202 __m128i b1 = _mm_sub_epi16(a0, a1); in hadamard_col8_sse2() 204 __m128i b3 = _mm_sub_epi16(a2, a3); in hadamard_col8_sse2() 206 __m128i b5 = _mm_sub_epi16(a4, a5); in hadamard_col8_sse2() 208 __m128i b7 = _mm_sub_epi16(a6, a7); in hadamard_col8_sse2() 212 a2 = _mm_sub_epi16(b0, b2); in hadamard_col8_sse2() 213 a3 = _mm_sub_epi16(b1, b3); in hadamard_col8_sse2() 216 a6 = _mm_sub_epi16(b4, b6); in hadamard_col8_sse2() 217 a7 = _mm_sub_epi16(b5, b7); in hadamard_col8_sse2() 224 b2 = _mm_sub_epi16(a0, a4); in hadamard_col8_sse2() 225 b6 = _mm_sub_epi16(a1, a5); in hadamard_col8_sse2() [all …]
|
D | inv_txfm_sse2.c | 98 in[1] = _mm_sub_epi16(u[0], u[1]); in idct4_sse2() 126 in[0] = _mm_sub_epi16(in[0], in[1]); // x0 - x2 in iadst4_sse2() 330 s[2] = _mm_sub_epi16(in[0], in[2]); in iadst8_sse2() 331 s[3] = _mm_sub_epi16(in[1], in[3]); in iadst8_sse2() 382 in[1] = _mm_sub_epi16(kZero, s[4]); in iadst8_sse2() 384 in[3] = _mm_sub_epi16(kZero, s[2]); in iadst8_sse2() 386 in[5] = _mm_sub_epi16(kZero, s[7]); in iadst8_sse2() 388 in[7] = _mm_sub_epi16(kZero, s[1]); in iadst8_sse2() 749 x[4] = _mm_sub_epi16(s[0], s[4]); in vpx_iadst16_8col_sse2() 750 x[5] = _mm_sub_epi16(s[1], s[5]); in vpx_iadst16_8col_sse2() [all …]
|
D | quantize_sse2.h | 27 *zbin = _mm_sub_epi16(*zbin, _mm_set1_epi16(1)); in load_b_values() 35 return _mm_sub_epi16(a, sign); in invert_sign_sse2() 78 scan0 = _mm_sub_epi16(scan0, zbin_mask0); in scan_for_eob() 79 scan1 = _mm_sub_epi16(scan1, zbin_mask1); in scan_for_eob()
|
/external/libaom/libaom/aom_dsp/x86/ |
D | highbd_subtract_sse2.c | 42 x0 = _mm_sub_epi16(u0, v0); in subtract_4x4() 43 x1 = _mm_sub_epi16(u1, v1); in subtract_4x4() 44 x2 = _mm_sub_epi16(u2, v2); in subtract_4x4() 45 x3 = _mm_sub_epi16(u3, v3); in subtract_4x4() 82 x0 = _mm_sub_epi16(u0, v0); in subtract_4x8() 83 x1 = _mm_sub_epi16(u1, v1); in subtract_4x8() 84 x2 = _mm_sub_epi16(u2, v2); in subtract_4x8() 85 x3 = _mm_sub_epi16(u3, v3); in subtract_4x8() 86 x4 = _mm_sub_epi16(u4, v4); in subtract_4x8() 87 x5 = _mm_sub_epi16(u5, v5); in subtract_4x8() [all …]
|
D | avg_intrin_sse2.c | 153 __m128i b1 = _mm_sub_epi16(a0, a1); in hadamard_col8_sse2() 155 __m128i b3 = _mm_sub_epi16(a2, a3); in hadamard_col8_sse2() 157 __m128i b5 = _mm_sub_epi16(a4, a5); in hadamard_col8_sse2() 159 __m128i b7 = _mm_sub_epi16(a6, a7); in hadamard_col8_sse2() 163 a2 = _mm_sub_epi16(b0, b2); in hadamard_col8_sse2() 164 a3 = _mm_sub_epi16(b1, b3); in hadamard_col8_sse2() 167 a6 = _mm_sub_epi16(b4, b6); in hadamard_col8_sse2() 168 a7 = _mm_sub_epi16(b5, b7); in hadamard_col8_sse2() 175 b2 = _mm_sub_epi16(a0, a4); in hadamard_col8_sse2() 176 b6 = _mm_sub_epi16(a1, a5); in hadamard_col8_sse2() [all …]
|
D | highbd_loopfilter_sse2.c | 387 sum_q = _mm_sub_epi16(sum_p_0, pq[5]); in highbd_lpf_internal_14_sse2() 388 sum_p = _mm_sub_epi16(sum_p_0, q[5]); in highbd_lpf_internal_14_sse2() 394 sum_lq = _mm_sub_epi16(sum_lp, pq[2]); in highbd_lpf_internal_14_sse2() 395 sum_lp = _mm_sub_epi16(sum_lp, q[2]); in highbd_lpf_internal_14_sse2() 404 sum_lp = _mm_sub_epi16(sum_lp, q[1]); in highbd_lpf_internal_14_sse2() 405 sum_lq = _mm_sub_epi16(sum_lq, pq[1]); in highbd_lpf_internal_14_sse2() 429 sum_p = _mm_sub_epi16(sum_p, q[4]); in highbd_lpf_internal_14_sse2() 430 sum_q = _mm_sub_epi16(sum_q, pq[4]); in highbd_lpf_internal_14_sse2() 441 sum_p = _mm_sub_epi16(sum_p, q[3]); in highbd_lpf_internal_14_sse2() 442 sum_q = _mm_sub_epi16(sum_q, pq[3]); in highbd_lpf_internal_14_sse2() [all …]
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_quantize_sse2.c | 66 qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); in vp9_quantize_fp_sse2() 67 qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); in vp9_quantize_fp_sse2() 79 qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); in vp9_quantize_fp_sse2() 80 qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); in vp9_quantize_fp_sse2() 106 iscan0 = _mm_sub_epi16(iscan0, nzero_coeff0); in vp9_quantize_fp_sse2() 107 iscan1 = _mm_sub_epi16(iscan1, nzero_coeff1); in vp9_quantize_fp_sse2() 133 qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); in vp9_quantize_fp_sse2() 134 qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); in vp9_quantize_fp_sse2() 148 qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign); in vp9_quantize_fp_sse2() 149 qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign); in vp9_quantize_fp_sse2() [all …]
|
D | vp9_dct_intrin_sse2.c | 87 v[1] = _mm_sub_epi16(u[0], u[1]); in fdct4_sse2() 229 res[0] = _mm_sub_epi16(res[0], sign0); in right_shift_8x8() 230 res[1] = _mm_sub_epi16(res[1], sign1); in right_shift_8x8() 231 res[2] = _mm_sub_epi16(res[2], sign2); in right_shift_8x8() 232 res[3] = _mm_sub_epi16(res[3], sign3); in right_shift_8x8() 233 res[4] = _mm_sub_epi16(res[4], sign4); in right_shift_8x8() 234 res[5] = _mm_sub_epi16(res[5], sign5); in right_shift_8x8() 235 res[6] = _mm_sub_epi16(res[6], sign6); in right_shift_8x8() 236 res[7] = _mm_sub_epi16(res[7], sign7); in right_shift_8x8() 292 s4 = _mm_sub_epi16(in[3], in[4]); in fdct8_sse2() [all …]
|
/external/libgav1/libgav1/src/dsp/x86/ |
D | intrapred_smooth_sse4.cc | 164 weight_h[1] = _mm_sub_epi16(scale, weight_h[0]); in LoadSmoothWeights4() 170 weight_h[1] = _mm_sub_epi16(scale, weight_h[0]); in LoadSmoothWeights4() 175 weight_h[1] = _mm_sub_epi16(scale, weight_h[0]); in LoadSmoothWeights4() 177 weight_h[3] = _mm_sub_epi16(scale, weight_h[2]); in LoadSmoothWeights4() 326 weight_h[1] = _mm_sub_epi16(inverter, weight_h[0]); in LoadSmoothWeights8() 331 __m128i inverted_weights_x = _mm_sub_epi16(inverter, weights_x); in LoadSmoothWeights8() 343 weight_h[1] = _mm_sub_epi16(inverter, weight_h[0]); in LoadSmoothWeights8() 345 weight_h[3] = _mm_sub_epi16(inverter, weight_h[2]); in LoadSmoothWeights8() 350 weight_h[1] = _mm_sub_epi16(inverter, weight_h[0]); in LoadSmoothWeights8() 352 weight_h[3] = _mm_sub_epi16(inverter, weight_h[2]); in LoadSmoothWeights8() [all …]
|
D | mask_blend_sse4.cc | 155 __m128i pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlending4x4_SSE4() 164 pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlending4x4_SSE4() 185 __m128i pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlending4xH_SSE4() 195 pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlending4xH_SSE4() 204 pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlending4xH_SSE4() 213 pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlending4xH_SSE4() 250 const __m128i pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlend_SSE4() 560 __m128i pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlend10bpp4x4_SSE4_1() 571 pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlend10bpp4x4_SSE4_1() 604 __m128i pred_mask_1 = _mm_sub_epi16(mask_inverter, pred_mask_0); in MaskBlend10bpp4xH_SSE4_1() [all …]
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | vp8_quantize_sse2.c | 68 x0 = _mm_sub_epi16(x0, sz0); in vp8_regular_quantize_b_sse2() 69 x1 = _mm_sub_epi16(x1, sz1); in vp8_regular_quantize_b_sse2() 78 x_minus_zbin0 = _mm_sub_epi16(x0, zbin0); in vp8_regular_quantize_b_sse2() 79 x_minus_zbin1 = _mm_sub_epi16(x1, zbin1); in vp8_regular_quantize_b_sse2() 103 y0 = _mm_sub_epi16(y0, sz0); in vp8_regular_quantize_b_sse2() 104 y1 = _mm_sub_epi16(y1, sz1); in vp8_regular_quantize_b_sse2() 165 x0 = _mm_sub_epi16(x0, sz0); in vp8_fast_quantize_b_sse2() 166 x1 = _mm_sub_epi16(x1, sz1); in vp8_fast_quantize_b_sse2() 179 x0 = _mm_sub_epi16(y0, sz0); in vp8_fast_quantize_b_sse2() 180 x1 = _mm_sub_epi16(y1, sz1); in vp8_fast_quantize_b_sse2()
|
/external/libavc/common/x86/ |
D | ih264_resi_trans_quant_sse42.c | 149 src_r0 = _mm_sub_epi16(src_r0, pred_r0); in ih264_resi_trans_quant_4x4_sse42() 150 src_r1 = _mm_sub_epi16(src_r1, pred_r1); in ih264_resi_trans_quant_4x4_sse42() 151 src_r2 = _mm_sub_epi16(src_r2, pred_r2); in ih264_resi_trans_quant_4x4_sse42() 152 src_r3 = _mm_sub_epi16(src_r3, pred_r3); in ih264_resi_trans_quant_4x4_sse42() 181 temp2 = _mm_sub_epi16(src_r1, src_r2); in ih264_resi_trans_quant_4x4_sse42() 183 temp3 = _mm_sub_epi16(src_r0, src_r3); in ih264_resi_trans_quant_4x4_sse42() 191 src_r2 = _mm_sub_epi16(temp0, temp1); in ih264_resi_trans_quant_4x4_sse42() 194 src_r3 = _mm_sub_epi16(temp3, src_r3); in ih264_resi_trans_quant_4x4_sse42() 219 temp2 = _mm_sub_epi16(src_r1, src_r2); in ih264_resi_trans_quant_4x4_sse42() 221 temp3 = _mm_sub_epi16(src_r0, src_r3); in ih264_resi_trans_quant_4x4_sse42() [all …]
|
/external/libaom/libaom/av1/common/x86/ |
D | cfl_sse2.c | 70 _mm_storel_epi64(dst, _mm_sub_epi16(_mm_loadl_epi64(src), avg_epi16)); in subtract_average_sse2() 72 _mm_storeu_si128(dst, _mm_sub_epi16(_mm_loadu_si128(src), avg_epi16)); in subtract_average_sse2() 75 _mm_sub_epi16(_mm_loadu_si128(src + 1), avg_epi16)); in subtract_average_sse2() 78 _mm_sub_epi16(_mm_loadu_si128(src + 2), avg_epi16)); in subtract_average_sse2() 80 _mm_sub_epi16(_mm_loadu_si128(src + 3), avg_epi16)); in subtract_average_sse2()
|
D | reconinter_ssse3.c | 45 __m128i diff = _mm_srai_epi16(_mm_abs_epi16(_mm_sub_epi16(s0, s1)), in av1_build_compound_diffwtd_mask_highbd_ssse3() 50 m = _mm_sub_epi16(xAOM_BLEND_A64_MAX_ALPHA, m); in av1_build_compound_diffwtd_mask_highbd_ssse3() 63 __m128i diff = _mm_srai_epi16(_mm_abs_epi16(_mm_sub_epi16(s0, s1)), in av1_build_compound_diffwtd_mask_highbd_ssse3() 84 _mm_sra_epi16(_mm_abs_epi16(_mm_sub_epi16(s0, s1)), xshift); in av1_build_compound_diffwtd_mask_highbd_ssse3() 88 m = _mm_sub_epi16(xAOM_BLEND_A64_MAX_ALPHA, m); in av1_build_compound_diffwtd_mask_highbd_ssse3() 102 _mm_sra_epi16(_mm_abs_epi16(_mm_sub_epi16(s0, s1)), xshift); in av1_build_compound_diffwtd_mask_highbd_ssse3()
|
/external/XNNPACK/src/qu8-dwconv/ |
D | up8x9-minmax-sse2.c | 78 const __m128i vxk0 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 87 const __m128i vxk1 = _mm_sub_epi16(_mm_unpacklo_epi8(vk1, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 96 const __m128i vxk2 = _mm_sub_epi16(_mm_unpacklo_epi8(vk2, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 105 const __m128i vxk3 = _mm_sub_epi16(_mm_unpacklo_epi8(vk3, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 114 const __m128i vxk4 = _mm_sub_epi16(_mm_unpacklo_epi8(vk4, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 123 const __m128i vxk5 = _mm_sub_epi16(_mm_unpacklo_epi8(vk5, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 132 const __m128i vxk6 = _mm_sub_epi16(_mm_unpacklo_epi8(vk6, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 141 const __m128i vxk7 = _mm_sub_epi16(_mm_unpacklo_epi8(vk7, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 150 const __m128i vxk8 = _mm_sub_epi16(_mm_unpacklo_epi8(vk8, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() 230 const __m128i vxk0 = _mm_sub_epi16(_mm_unpacklo_epi8(vk0, vzero), vkernel_zero_point); in xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2() [all …]
|
/external/webp/src/dsp/ |
D | enc_sse2.c | 86 const __m128i b = _mm_sub_epi16(in0, in2); in ITransform_SSE2() 90 const __m128i c3 = _mm_sub_epi16(in1, in3); in ITransform_SSE2() 91 const __m128i c4 = _mm_sub_epi16(c1, c2); in ITransform_SSE2() 103 const __m128i tmp2 = _mm_sub_epi16(b, c); in ITransform_SSE2() 104 const __m128i tmp3 = _mm_sub_epi16(a, d); in ITransform_SSE2() 117 const __m128i b = _mm_sub_epi16(dc, T2); in ITransform_SSE2() 121 const __m128i c3 = _mm_sub_epi16(T1, T3); in ITransform_SSE2() 122 const __m128i c4 = _mm_sub_epi16(c1, c2); in ITransform_SSE2() 134 const __m128i tmp2 = _mm_sub_epi16(b, c); in ITransform_SSE2() 135 const __m128i tmp3 = _mm_sub_epi16(a, d); in ITransform_SSE2() [all …]
|
D | enc_sse41.c | 114 const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); in TTransform_SSE41() 115 const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); in TTransform_SSE41() 118 const __m128i b2 = _mm_sub_epi16(a3, a2); in TTransform_SSE41() 119 const __m128i b3 = _mm_sub_epi16(a0, a1); in TTransform_SSE41() 138 const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3); in TTransform_SSE41() 139 const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2); in TTransform_SSE41() 142 const __m128i b2 = _mm_sub_epi16(a3, a2); in TTransform_SSE41() 143 const __m128i b3 = _mm_sub_epi16(a0, a1); in TTransform_SSE41()
|
/external/XNNPACK/src/qs8-vadd/gen/ |
D | minmax-sse41-mul16-ld64-x32.c | 75 …vxprod01234567hi = _mm_sub_epi16(vxprod01234567hi, _mm_and_si128(_mm_srai_epi16(vx01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 76 …vyprod01234567hi = _mm_sub_epi16(vyprod01234567hi, _mm_and_si128(_mm_srai_epi16(vy01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 77 …vxprod89ABCDEFhi = _mm_sub_epi16(vxprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vx89ABCDEF, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 78 …vyprod89ABCDEFhi = _mm_sub_epi16(vyprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vy89ABCDEF, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 79 …vxprodGHIJKLMNhi = _mm_sub_epi16(vxprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vxGHIJKLMN, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 80 …vyprodGHIJKLMNhi = _mm_sub_epi16(vyprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vyGHIJKLMN, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 81 …vxprodOPQRSTUVhi = _mm_sub_epi16(vxprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vxOPQRSTUV, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 82 …vyprodOPQRSTUVhi = _mm_sub_epi16(vyprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vyOPQRSTUV, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 158 …vxprod01234567hi = _mm_sub_epi16(vxprod01234567hi, _mm_and_si128(_mm_srai_epi16(vx01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32() 159 …vyprod01234567hi = _mm_sub_epi16(vyprod01234567hi, _mm_and_si128(_mm_srai_epi16(vy01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x32()
|
D | minmax-sse2-mul16-ld64-x32.c | 83 …vxprod01234567hi = _mm_sub_epi16(vxprod01234567hi, _mm_and_si128(_mm_srai_epi16(vx01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 84 …vyprod01234567hi = _mm_sub_epi16(vyprod01234567hi, _mm_and_si128(_mm_srai_epi16(vy01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 85 …vxprod89ABCDEFhi = _mm_sub_epi16(vxprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vx89ABCDEF, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 86 …vyprod89ABCDEFhi = _mm_sub_epi16(vyprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vy89ABCDEF, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 87 …vxprodGHIJKLMNhi = _mm_sub_epi16(vxprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vxGHIJKLMN, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 88 …vyprodGHIJKLMNhi = _mm_sub_epi16(vyprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vyGHIJKLMN, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 89 …vxprodOPQRSTUVhi = _mm_sub_epi16(vxprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vxOPQRSTUV, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 90 …vyprodOPQRSTUVhi = _mm_sub_epi16(vyprodOPQRSTUVhi, _mm_and_si128(_mm_srai_epi16(vyOPQRSTUV, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 168 …vxprod01234567hi = _mm_sub_epi16(vxprod01234567hi, _mm_and_si128(_mm_srai_epi16(vx01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32() 169 …vyprod01234567hi = _mm_sub_epi16(vyprod01234567hi, _mm_and_si128(_mm_srai_epi16(vy01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse2_mul16_ld64_x32()
|
D | minmax-sse41-mul16-ld64-x24.c | 67 …vxprod01234567hi = _mm_sub_epi16(vxprod01234567hi, _mm_and_si128(_mm_srai_epi16(vx01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24() 68 …vyprod01234567hi = _mm_sub_epi16(vyprod01234567hi, _mm_and_si128(_mm_srai_epi16(vy01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24() 69 …vxprod89ABCDEFhi = _mm_sub_epi16(vxprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vx89ABCDEF, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24() 70 …vyprod89ABCDEFhi = _mm_sub_epi16(vyprod89ABCDEFhi, _mm_and_si128(_mm_srai_epi16(vy89ABCDEF, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24() 71 …vxprodGHIJKLMNhi = _mm_sub_epi16(vxprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vxGHIJKLMN, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24() 72 …vyprodGHIJKLMNhi = _mm_sub_epi16(vyprodGHIJKLMNhi, _mm_and_si128(_mm_srai_epi16(vyGHIJKLMN, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24() 137 …vxprod01234567hi = _mm_sub_epi16(vxprod01234567hi, _mm_and_si128(_mm_srai_epi16(vx01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24() 138 …vyprod01234567hi = _mm_sub_epi16(vyprod01234567hi, _mm_and_si128(_mm_srai_epi16(vy01234567, 15), v… in xnn_qs8_vadd_minmax_ukernel__sse41_mul16_ld64_x24()
|
/external/pdfium/third_party/libpng16/intel/ |
D | filter_sse2_intrinsics.c | 215 x = _mm_sub_epi16(x, is_negative); in abs_i16() 263 pa = _mm_sub_epi16(b,c); in png_read_filter_row_paeth3_sse2() 266 pb = _mm_sub_epi16(a,c); in png_read_filter_row_paeth3_sse2() 299 pa = _mm_sub_epi16(b,c); in png_read_filter_row_paeth3_sse2() 302 pb = _mm_sub_epi16(a,c); in png_read_filter_row_paeth3_sse2() 361 pa = _mm_sub_epi16(b,c); in png_read_filter_row_paeth4_sse2() 364 pb = _mm_sub_epi16(a,c); in png_read_filter_row_paeth4_sse2()
|
/external/libpng/intel/ |
D | filter_sse2_intrinsics.c | 215 x = _mm_sub_epi16(x, is_negative); in abs_i16() 263 pa = _mm_sub_epi16(b,c); in png_read_filter_row_paeth3_sse2() 266 pb = _mm_sub_epi16(a,c); in png_read_filter_row_paeth3_sse2() 299 pa = _mm_sub_epi16(b,c); in png_read_filter_row_paeth3_sse2() 302 pb = _mm_sub_epi16(a,c); in png_read_filter_row_paeth3_sse2() 361 pa = _mm_sub_epi16(b,c); in png_read_filter_row_paeth4_sse2() 364 pb = _mm_sub_epi16(a,c); in png_read_filter_row_paeth4_sse2()
|