/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_idct32x32_add_sse4.c | 22 __m128i *const step1 /*step1[16]*/, __m128i *const out /*out[16]*/) { in highbd_idct32_4x32_quarter_2_stage_4_to_6() argument 26 step2[8] = step1[8]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 27 step2[15] = step1[15]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 28 highbd_butterfly_sse4_1(step1[14], step1[9], cospi_24_64, cospi_8_64, in highbd_idct32_4x32_quarter_2_stage_4_to_6() 30 highbd_butterfly_sse4_1(step1[13], step1[10], -cospi_8_64, cospi_24_64, in highbd_idct32_4x32_quarter_2_stage_4_to_6() 32 step2[11] = step1[11]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 33 step2[12] = step1[12]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 36 step1[8] = _mm_add_epi32(step2[8], step2[11]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 37 step1[9] = _mm_add_epi32(step2[9], step2[10]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 38 step1[10] = _mm_sub_epi32(step2[9], step2[10]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() [all …]
|
D | highbd_idct32x32_add_sse2.c | 18 __m128i *const step1 /*step1[16]*/, __m128i *const out /*out[16]*/) { in highbd_idct32_4x32_quarter_2_stage_4_to_6() argument 22 step2[8] = step1[8]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 23 step2[15] = step1[15]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 24 highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9], in highbd_idct32_4x32_quarter_2_stage_4_to_6() 26 highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64, in highbd_idct32_4x32_quarter_2_stage_4_to_6() 28 step2[11] = step1[11]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 29 step2[12] = step1[12]; in highbd_idct32_4x32_quarter_2_stage_4_to_6() 32 step1[8] = _mm_add_epi32(step2[8], step2[11]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 33 step1[9] = _mm_add_epi32(step2[9], step2[10]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() 34 step1[10] = _mm_sub_epi32(step2[9], step2[10]); in highbd_idct32_4x32_quarter_2_stage_4_to_6() [all …]
|
D | inv_txfm_ssse3.c | 57 __m128i step1[8], step2[8]; in idct32_34_8x32_quarter_1() local 60 partial_butterfly_ssse3(in[4], cospi_28_64, cospi_4_64, &step1[4], &step1[7]); in idct32_34_8x32_quarter_1() 64 step2[4] = step1[4]; in idct32_34_8x32_quarter_1() 65 step2[5] = step1[4]; in idct32_34_8x32_quarter_1() 66 step2[6] = step1[7]; in idct32_34_8x32_quarter_1() 67 step2[7] = step1[7]; in idct32_34_8x32_quarter_1() 70 step1[0] = step2[0]; in idct32_34_8x32_quarter_1() 71 step1[1] = step2[0]; in idct32_34_8x32_quarter_1() 72 step1[2] = step2[0]; in idct32_34_8x32_quarter_1() 73 step1[3] = step2[0]; in idct32_34_8x32_quarter_1() [all …]
|
D | inv_txfm_sse2.h | 252 __m128i step1[8], step2[8]; in idct8() local 255 butterfly(in[1], in[7], cospi_28_64, cospi_4_64, &step1[4], &step1[7]); in idct8() 256 butterfly(in[5], in[3], cospi_12_64, cospi_20_64, &step1[5], &step1[6]); in idct8() 262 step2[4] = _mm_add_epi16(step1[4], step1[5]); in idct8() 263 step2[5] = _mm_sub_epi16(step1[4], step1[5]); in idct8() 264 step2[6] = _mm_sub_epi16(step1[7], step1[6]); in idct8() 265 step2[7] = _mm_add_epi16(step1[7], step1[6]); in idct8() 268 step1[0] = _mm_add_epi16(step2[0], step2[3]); in idct8() 269 step1[1] = _mm_add_epi16(step2[1], step2[2]); in idct8() 270 step1[2] = _mm_sub_epi16(step2[1], step2[2]); in idct8() [all …]
|
D | inv_txfm_ssse3.h | 34 __m128i step1[8], step2[8], tmp[4]; in idct8x8_12_add_kernel_ssse3() local 47 step1[4] = _mm_mulhrs_epi16(tmp[1], cp_28d_4d); // step1 4&7 in idct8x8_12_add_kernel_ssse3() 48 step1[5] = _mm_mulhrs_epi16(tmp[3], cp_n20d_12d); // step1 5&6 in idct8x8_12_add_kernel_ssse3() 53 step2[4] = _mm_add_epi16(step1[4], step1[5]); // step2 4&7 in idct8x8_12_add_kernel_ssse3() 54 step2[5] = _mm_sub_epi16(step1[4], step1[5]); // step2 5&6 in idct8x8_12_add_kernel_ssse3() 59 step1[5] = idct_calc_wraplow_sse2(cp_16_n16, cp_16_16, tmp[0]); // step1 5&6 in idct8x8_12_add_kernel_ssse3() 62 step1[2] = _mm_unpackhi_epi64(tmp[1], tmp[0]); // step1 2&1 in idct8x8_12_add_kernel_ssse3() 63 step1[3] = _mm_unpacklo_epi64(tmp[1], tmp[0]); // step1 3&0 in idct8x8_12_add_kernel_ssse3() 66 tmp[0] = _mm_add_epi16(step1[3], step2[4]); // output 3&0 in idct8x8_12_add_kernel_ssse3() 67 tmp[1] = _mm_add_epi16(step1[2], step1[5]); // output 2&1 in idct8x8_12_add_kernel_ssse3() [all …]
|
D | highbd_idct16x16_add_sse4.c | 57 __m128i step1[16], step2[16]; in vpx_highbd_idct16_4col_sse4_1() local 70 highbd_butterfly_sse4_1(io[2], io[14], cospi_28_64, cospi_4_64, &step1[4], in vpx_highbd_idct16_4col_sse4_1() 71 &step1[7]); in vpx_highbd_idct16_4col_sse4_1() 72 highbd_butterfly_sse4_1(io[10], io[6], cospi_12_64, cospi_20_64, &step1[5], in vpx_highbd_idct16_4col_sse4_1() 73 &step1[6]); in vpx_highbd_idct16_4col_sse4_1() 74 step1[8] = _mm_add_epi32(step2[8], step2[9]); in vpx_highbd_idct16_4col_sse4_1() 75 step1[9] = _mm_sub_epi32(step2[8], step2[9]); in vpx_highbd_idct16_4col_sse4_1() 76 step1[10] = _mm_sub_epi32(step2[11], step2[10]); in vpx_highbd_idct16_4col_sse4_1() 77 step1[11] = _mm_add_epi32(step2[11], step2[10]); in vpx_highbd_idct16_4col_sse4_1() 78 step1[12] = _mm_add_epi32(step2[12], step2[13]); in vpx_highbd_idct16_4col_sse4_1() [all …]
|
D | highbd_idct16x16_add_sse2.c | 56 __m128i step1[16], step2[16]; in highbd_idct16_4col() local 69 highbd_butterfly_sse2(io[2], io[14], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct16_4col() 70 &step1[7]); in highbd_idct16_4col() 71 highbd_butterfly_sse2(io[10], io[6], cospi_12_64, cospi_20_64, &step1[5], in highbd_idct16_4col() 72 &step1[6]); in highbd_idct16_4col() 73 step1[8] = _mm_add_epi32(step2[8], step2[9]); in highbd_idct16_4col() 74 step1[9] = _mm_sub_epi32(step2[8], step2[9]); in highbd_idct16_4col() 75 step1[10] = _mm_sub_epi32(step2[10], step2[11]); // step1[10] = -step1[10] in highbd_idct16_4col() 76 step1[11] = _mm_add_epi32(step2[10], step2[11]); in highbd_idct16_4col() 77 step1[12] = _mm_add_epi32(step2[13], step2[12]); in highbd_idct16_4col() [all …]
|
D | highbd_idct8x8_add_sse4.c | 21 __m128i step1[8], step2[8]; in vpx_highbd_idct8x8_half1d_sse4_1() local 26 step1[0] = io[0]; in vpx_highbd_idct8x8_half1d_sse4_1() 27 step1[2] = io[4]; in vpx_highbd_idct8x8_half1d_sse4_1() 28 step1[1] = io[2]; in vpx_highbd_idct8x8_half1d_sse4_1() 29 step1[3] = io[6]; in vpx_highbd_idct8x8_half1d_sse4_1() 30 highbd_butterfly_sse4_1(io[1], io[7], cospi_28_64, cospi_4_64, &step1[4], in vpx_highbd_idct8x8_half1d_sse4_1() 31 &step1[7]); in vpx_highbd_idct8x8_half1d_sse4_1() 32 highbd_butterfly_sse4_1(io[5], io[3], cospi_12_64, cospi_20_64, &step1[5], in vpx_highbd_idct8x8_half1d_sse4_1() 33 &step1[6]); in vpx_highbd_idct8x8_half1d_sse4_1() 36 highbd_butterfly_cospi16_sse4_1(step1[0], step1[2], &step2[0], &step2[1]); in vpx_highbd_idct8x8_half1d_sse4_1() [all …]
|
D | highbd_idct8x8_add_sse2.c | 19 __m128i step1[8], step2[8]; in highbd_idct8x8_half1d() local 24 step1[0] = io[0]; in highbd_idct8x8_half1d() 25 step1[2] = io[4]; in highbd_idct8x8_half1d() 26 step1[1] = io[2]; in highbd_idct8x8_half1d() 27 step1[3] = io[6]; in highbd_idct8x8_half1d() 28 highbd_butterfly_sse2(io[1], io[7], cospi_28_64, cospi_4_64, &step1[4], in highbd_idct8x8_half1d() 29 &step1[7]); in highbd_idct8x8_half1d() 30 highbd_butterfly_sse2(io[5], io[3], cospi_12_64, cospi_20_64, &step1[5], in highbd_idct8x8_half1d() 31 &step1[6]); in highbd_idct8x8_half1d() 34 highbd_butterfly_cospi16_sse2(step1[0], step1[2], &step2[0], &step2[1]); in highbd_idct8x8_half1d() [all …]
|
D | inv_txfm_sse2.c | 893 __m128i step1[8], step2[8]; in idct32_34_8x32_quarter_1() local 896 butterfly(in[4], zero, cospi_28_64, cospi_4_64, &step1[4], &step1[7]); in idct32_34_8x32_quarter_1() 900 step2[4] = step1[4]; in idct32_34_8x32_quarter_1() 901 step2[5] = step1[4]; in idct32_34_8x32_quarter_1() 902 step2[6] = step1[7]; in idct32_34_8x32_quarter_1() 903 step2[7] = step1[7]; in idct32_34_8x32_quarter_1() 906 step1[0] = step2[0]; in idct32_34_8x32_quarter_1() 907 step1[1] = step2[0]; in idct32_34_8x32_quarter_1() 908 step1[2] = step2[0]; in idct32_34_8x32_quarter_1() 909 step1[3] = step2[0]; in idct32_34_8x32_quarter_1() [all …]
|
D | fwd_dct32x32_impl_sse2.h | 115 __m128i step1[32]; in FDCT32x32_2D() local 130 __m128i *step1a = &step1[0]; in FDCT32x32_2D() 131 __m128i *step1b = &step1[31]; in FDCT32x32_2D() 160 __m128i *step1a = &step1[4]; in FDCT32x32_2D() 161 __m128i *step1b = &step1[27]; in FDCT32x32_2D() 190 __m128i *step1a = &step1[8]; in FDCT32x32_2D() 191 __m128i *step1b = &step1[23]; in FDCT32x32_2D() 220 __m128i *step1a = &step1[12]; in FDCT32x32_2D() 221 __m128i *step1b = &step1[19]; in FDCT32x32_2D() 264 step1[0] = ADD_EPI16(in00, in31); in FDCT32x32_2D() [all …]
|
D | fwd_dct32x32_impl_avx2.h | 100 __m256i step1[32]; in FDCT32x32_2D_AVX2() local 115 __m256i *step1a = &step1[0]; in FDCT32x32_2D_AVX2() 116 __m256i *step1b = &step1[31]; in FDCT32x32_2D_AVX2() 151 __m256i *step1a = &step1[4]; in FDCT32x32_2D_AVX2() 152 __m256i *step1b = &step1[27]; in FDCT32x32_2D_AVX2() 187 __m256i *step1a = &step1[8]; in FDCT32x32_2D_AVX2() 188 __m256i *step1b = &step1[23]; in FDCT32x32_2D_AVX2() 223 __m256i *step1a = &step1[12]; in FDCT32x32_2D_AVX2() 224 __m256i *step1b = &step1[19]; in FDCT32x32_2D_AVX2() 273 step1[0] = _mm256_add_epi16(in00, in31); in FDCT32x32_2D_AVX2() [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | idct16x16_add_neon.c | 75 int16x8_t in[16], step1[16], step2[16], out[16]; in vpx_idct16x16_256_add_half1d() local 153 step1[0] = in[0 / 2]; in vpx_idct16x16_256_add_half1d() 154 step1[1] = in[16 / 2]; in vpx_idct16x16_256_add_half1d() 155 step1[2] = in[8 / 2]; in vpx_idct16x16_256_add_half1d() 156 step1[3] = in[24 / 2]; in vpx_idct16x16_256_add_half1d() 157 step1[4] = in[4 / 2]; in vpx_idct16x16_256_add_half1d() 158 step1[5] = in[20 / 2]; in vpx_idct16x16_256_add_half1d() 159 step1[6] = in[12 / 2]; in vpx_idct16x16_256_add_half1d() 160 step1[7] = in[28 / 2]; in vpx_idct16x16_256_add_half1d() 161 step1[8] = in[2 / 2]; in vpx_idct16x16_256_add_half1d() [all …]
|
D | highbd_idct16x16_add_neon.c | 526 int32x4x2_t in[16], step1[16], step2[16], out[16]; in vpx_highbd_idct16x16_256_add_half1d() local 584 step1[0] = in[0 / 2]; in vpx_highbd_idct16x16_256_add_half1d() 585 step1[1] = in[16 / 2]; in vpx_highbd_idct16x16_256_add_half1d() 586 step1[2] = in[8 / 2]; in vpx_highbd_idct16x16_256_add_half1d() 587 step1[3] = in[24 / 2]; in vpx_highbd_idct16x16_256_add_half1d() 588 step1[4] = in[4 / 2]; in vpx_highbd_idct16x16_256_add_half1d() 589 step1[5] = in[20 / 2]; in vpx_highbd_idct16x16_256_add_half1d() 590 step1[6] = in[12 / 2]; in vpx_highbd_idct16x16_256_add_half1d() 591 step1[7] = in[28 / 2]; in vpx_highbd_idct16x16_256_add_half1d() 592 step1[8] = in[2 / 2]; in vpx_highbd_idct16x16_256_add_half1d() [all …]
|
D | highbd_idct8x8_add_neon.c | 76 int32x4_t step1[8], step2[8]; in idct8x8_12_half1d_bd10() local 81 step1[4] = vmulq_lane_s32(*io1, vget_high_s32(cospis1), 1); in idct8x8_12_half1d_bd10() 82 step1[5] = vmulq_lane_s32(*io3, vget_high_s32(cospis1), 0); in idct8x8_12_half1d_bd10() 83 step1[6] = vmulq_lane_s32(*io3, vget_low_s32(cospis1), 1); in idct8x8_12_half1d_bd10() 84 step1[7] = vmulq_lane_s32(*io1, vget_low_s32(cospis1), 0); in idct8x8_12_half1d_bd10() 85 step1[4] = vrshrq_n_s32(step1[4], DCT_CONST_BITS); in idct8x8_12_half1d_bd10() 86 step1[5] = vrshrq_n_s32(step1[5], DCT_CONST_BITS); in idct8x8_12_half1d_bd10() 87 step1[6] = vrshrq_n_s32(step1[6], DCT_CONST_BITS); in idct8x8_12_half1d_bd10() 88 step1[7] = vrshrq_n_s32(step1[7], DCT_CONST_BITS); in idct8x8_12_half1d_bd10() 98 step2[4] = vaddq_s32(step1[4], step1[5]); in idct8x8_12_half1d_bd10() [all …]
|
D | highbd_idct_neon.h | 171 int32x4_t step1[8], step2[8]; in idct8x8_64_half1d_bd10() local 176 step1[4] = vmulq_lane_s32(*io1, vget_high_s32(cospis1), 1); in idct8x8_64_half1d_bd10() 177 step1[5] = vmulq_lane_s32(*io3, vget_high_s32(cospis1), 0); in idct8x8_64_half1d_bd10() 178 step1[6] = vmulq_lane_s32(*io3, vget_low_s32(cospis1), 1); in idct8x8_64_half1d_bd10() 179 step1[7] = vmulq_lane_s32(*io1, vget_low_s32(cospis1), 0); in idct8x8_64_half1d_bd10() 181 step1[4] = vmlsq_lane_s32(step1[4], *io7, vget_low_s32(cospis1), 0); in idct8x8_64_half1d_bd10() 182 step1[5] = vmlaq_lane_s32(step1[5], *io5, vget_low_s32(cospis1), 1); in idct8x8_64_half1d_bd10() 183 step1[6] = vmlsq_lane_s32(step1[6], *io5, vget_high_s32(cospis1), 0); in idct8x8_64_half1d_bd10() 184 step1[7] = vmlaq_lane_s32(step1[7], *io7, vget_high_s32(cospis1), 1); in idct8x8_64_half1d_bd10() 186 step1[4] = vrshrq_n_s32(step1[4], DCT_CONST_BITS); in idct8x8_64_half1d_bd10() [all …]
|
D | idct_neon.h | 323 int16x4_t step1[8], step2[8]; in idct8x8_12_pass1_bd8() local 329 step1[4] = vqrdmulh_lane_s16(io[1], cospisd1, 3); in idct8x8_12_pass1_bd8() 330 step1[5] = vqrdmulh_lane_s16(io[3], cospisd1, 2); in idct8x8_12_pass1_bd8() 331 step1[6] = vqrdmulh_lane_s16(io[3], cospisd1, 1); in idct8x8_12_pass1_bd8() 332 step1[7] = vqrdmulh_lane_s16(io[1], cospisd1, 0); in idct8x8_12_pass1_bd8() 339 step2[4] = vadd_s16(step1[4], step1[5]); in idct8x8_12_pass1_bd8() 340 step2[5] = vsub_s16(step1[4], step1[5]); in idct8x8_12_pass1_bd8() 341 step2[6] = vsub_s16(step1[7], step1[6]); in idct8x8_12_pass1_bd8() 342 step2[7] = vadd_s16(step1[7], step1[6]); in idct8x8_12_pass1_bd8() 345 step1[0] = vadd_s16(step2[1], step2[3]); in idct8x8_12_pass1_bd8() [all …]
|
/external/libaom/libaom/av1/common/arm/ |
D | av1_inv_txfm_neon.c | 392 int16x8_t step1[8], step2[8]; in idct8_neon() local 399 btf_16_lane_0_1_neon(in[1], in[7], c0, &step1[7], &step1[4]); in idct8_neon() 400 btf_16_lane_2_3_neon(in[5], in[3], c0, &step1[6], &step1[5]); in idct8_neon() 405 step2[4] = vqaddq_s16(step1[4], step1[5]); in idct8_neon() 406 step2[5] = vqsubq_s16(step1[4], step1[5]); in idct8_neon() 407 step2[6] = vqsubq_s16(step1[7], step1[6]); in idct8_neon() 408 step2[7] = vqaddq_s16(step1[7], step1[6]); in idct8_neon() 411 step1[0] = vqaddq_s16(step2[0], step2[3]); in idct8_neon() 412 step1[1] = vqaddq_s16(step2[1], step2[2]); in idct8_neon() 413 step1[2] = vqsubq_s16(step2[1], step2[2]); in idct8_neon() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ |
D | inv_txfm.c | 272 int16_t step1[8], step2[8]; in idct8_c() local 276 step1[0] = (int16_t)input[0]; in idct8_c() 277 step1[2] = (int16_t)input[4]; in idct8_c() 278 step1[1] = (int16_t)input[2]; in idct8_c() 279 step1[3] = (int16_t)input[6]; in idct8_c() 282 step1[4] = WRAPLOW(dct_const_round_shift(temp1)); in idct8_c() 283 step1[7] = WRAPLOW(dct_const_round_shift(temp2)); in idct8_c() 286 step1[5] = WRAPLOW(dct_const_round_shift(temp1)); in idct8_c() 287 step1[6] = WRAPLOW(dct_const_round_shift(temp2)); in idct8_c() 290 temp1 = (step1[0] + step1[2]) * cospi_16_64; in idct8_c() [all …]
|
D | fwd_txfm.c | 197 tran_high_t step1[8]; // canbe16 in vpx_fdct16x16_c() local 215 step1[0] = (input[7 * stride] - input[8 * stride]) * 4; in vpx_fdct16x16_c() 216 step1[1] = (input[6 * stride] - input[9 * stride]) * 4; in vpx_fdct16x16_c() 217 step1[2] = (input[5 * stride] - input[10 * stride]) * 4; in vpx_fdct16x16_c() 218 step1[3] = (input[4 * stride] - input[11 * stride]) * 4; in vpx_fdct16x16_c() 219 step1[4] = (input[3 * stride] - input[12 * stride]) * 4; in vpx_fdct16x16_c() 220 step1[5] = (input[2 * stride] - input[13 * stride]) * 4; in vpx_fdct16x16_c() 221 step1[6] = (input[1 * stride] - input[14 * stride]) * 4; in vpx_fdct16x16_c() 222 step1[7] = (input[0 * stride] - input[15 * stride]) * 4; in vpx_fdct16x16_c() 235 step1[0] = ((in_low[7 * 16] + 1) >> 2) - ((in_low[8 * 16] + 1) >> 2); in vpx_fdct16x16_c() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_dct.c | 95 tran_high_t step1[8]; // canbe16 in fdct16() local 111 step1[0] = in[7] - in[8]; in fdct16() 112 step1[1] = in[6] - in[9]; in fdct16() 113 step1[2] = in[5] - in[10]; in fdct16() 114 step1[3] = in[4] - in[11]; in fdct16() 115 step1[4] = in[3] - in[12]; in fdct16() 116 step1[5] = in[2] - in[13]; in fdct16() 117 step1[6] = in[1] - in[14]; in fdct16() 118 step1[7] = in[0] - in[15]; in fdct16() 174 temp1 = (step1[5] - step1[2]) * cospi_16_64; in fdct16() [all …]
|
/external/autotest/client/tests/error_skip_step/ |
D | control | 7 DOC = """Raise TestNAError during step1. step0, 2 and 3 should run.""" 16 job.next_step('step1') 24 def step1(): 34 print 'at least I can run. unlike step1.'
|
/external/llvm/test/Transforms/IndVarSimplify/ |
D | no-iv-rewrite.ll | 282 ; Two increments should remain, one by %step and one by %step1. 303 %step1 = add i32 %step, 1 304 %init1 = add i32 %init, %step1 305 %l.0 = sub i32 %init1, %step1 314 %ii.next = add i32 %ii, %step1 315 %j.next = add i32 %j, %step1 316 %k.next = add i32 %k, %step1
|
/external/testng/src/test/java/test/dependent/ |
D | SampleDependentMethods5.java | 13 public void step1() { in step1() method in SampleDependentMethods5
|
D | SampleDependentMethods6.java | 12 public void step1() { in step1() method in SampleDependentMethods6
|