/external/libvpx/vpx_dsp/arm/ |
D | idct32x32_34_add_neon.c | 98 s2[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15], in vpx_idct32_6_neon() 101 cospi_8_64); in vpx_idct32_6_neon() 116 s1[18] = multiply_accumulate_shift_and_narrow_s16(s1[17], -cospi_8_64, s1[30], in vpx_idct32_6_neon() 119 cospi_8_64); in vpx_idct32_6_neon() 121 s1[19] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_8_64, s1[31], in vpx_idct32_6_neon() 124 cospi_8_64); in vpx_idct32_6_neon() 127 s2[27], -cospi_8_64); in vpx_idct32_6_neon() 128 s1[27] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_8_64, s2[27], in vpx_idct32_6_neon() 132 s2[26], -cospi_8_64); in vpx_idct32_6_neon() 133 s1[26] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_8_64, s2[26], in vpx_idct32_6_neon() [all …]
|
D | idct32x32_135_add_neon.c | 175 s4[3] = multiply_shift_and_narrow_s16(in[8], cospi_8_64); in vpx_idct32_12_neon() 177 s4[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15], in vpx_idct32_12_neon() 180 cospi_8_64); in vpx_idct32_12_neon() 183 s3[13], -cospi_8_64); in vpx_idct32_12_neon() 184 s4[13] = multiply_accumulate_shift_and_narrow_s16(s3[10], -cospi_8_64, s3[13], in vpx_idct32_12_neon() 222 s5[18] = multiply_accumulate_shift_and_narrow_s16(s4[18], -cospi_8_64, s4[29], in vpx_idct32_12_neon() 225 cospi_8_64); in vpx_idct32_12_neon() 227 s5[19] = multiply_accumulate_shift_and_narrow_s16(s4[19], -cospi_8_64, s4[28], in vpx_idct32_12_neon() 230 cospi_8_64); in vpx_idct32_12_neon() 233 s4[27], -cospi_8_64); in vpx_idct32_12_neon() [all …]
|
D | highbd_idct32x32_34_add_neon.c | 106 s2[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64, in vpx_highbd_idct32_6_neon() 109 s2[15], cospi_8_64); in vpx_highbd_idct32_6_neon() 124 s1[18] = multiply_accumulate_shift_and_narrow_s32_dual(s1[17], -cospi_8_64, in vpx_highbd_idct32_6_neon() 127 s1[30], cospi_8_64); in vpx_highbd_idct32_6_neon() 129 s1[19] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_8_64, in vpx_highbd_idct32_6_neon() 132 s1[31], cospi_8_64); in vpx_highbd_idct32_6_neon() 135 s2[27], -cospi_8_64); in vpx_highbd_idct32_6_neon() 136 s1[27] = multiply_accumulate_shift_and_narrow_s32_dual(s2[20], -cospi_8_64, in vpx_highbd_idct32_6_neon() 140 s2[26], -cospi_8_64); in vpx_highbd_idct32_6_neon() 141 s1[26] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_8_64, in vpx_highbd_idct32_6_neon() [all …]
|
D | highbd_idct32x32_135_add_neon.c | 186 s4[3] = multiply_shift_and_narrow_s32_dual(in[8], cospi_8_64); in vpx_highbd_idct32_12_neon() 188 s4[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64, in vpx_highbd_idct32_12_neon() 191 s2[15], cospi_8_64); in vpx_highbd_idct32_12_neon() 194 s3[13], -cospi_8_64); in vpx_highbd_idct32_12_neon() 195 s4[13] = multiply_accumulate_shift_and_narrow_s32_dual(s3[10], -cospi_8_64, in vpx_highbd_idct32_12_neon() 233 s5[18] = multiply_accumulate_shift_and_narrow_s32_dual(s4[18], -cospi_8_64, in vpx_highbd_idct32_12_neon() 236 s4[29], cospi_8_64); in vpx_highbd_idct32_12_neon() 238 s5[19] = multiply_accumulate_shift_and_narrow_s32_dual(s4[19], -cospi_8_64, in vpx_highbd_idct32_12_neon() 241 s4[28], cospi_8_64); in vpx_highbd_idct32_12_neon() 244 s4[27], -cospi_8_64); in vpx_highbd_idct32_12_neon() [all …]
|
D | idct4x4_add_neon.asm | 41 ; cospi_8_64 = 15137 57 vdup.16 d20, r0 ; replicate cospi_8_64 76 vmull.s16 q1, d17, d20 ; input[1] * cospi_8_64 85 ; input[1] * cospi_24_64 - input[3] * cospi_8_64; 86 ; input[1] * cospi_8_64 + input[3] * cospi_24_64; 129 vmull.s16 q1, d17, d20 ; input[1] * cospi_8_64 136 ; input[1] * cospi_24_64 - input[3] * cospi_8_64; 137 ; input[1] * cospi_8_64 + input[3] * cospi_24_64;
|
D | fdct4x4_neon.h | 37 butterfly_two_coeff_half(s_3, s_2, cospi_8_64, cospi_24_64, &out[1], &out[3]); in vpx_fdct4x4_pass1_neon() 69 butterfly_two_coeff_half(s_3, s_2, cospi_8_64, cospi_24_64, &out[1], &out[3]); in vpx_fdct4x4_pass2_neon() 93 butterfly_two_coeff_s32_s64_narrow_half(s_3, s_2, cospi_8_64, cospi_24_64, in vpx_highbd_fdct4x4_pass1_neon()
|
D | fdct32x32_neon.h | 255 butterfly_two_coeff(b[29], b[18], cospi_8_64, cospi_24_64, &a[29], &a[18]); in dct_body_first_pass() 256 butterfly_two_coeff(b[28], b[19], cospi_8_64, cospi_24_64, &a[28], &a[19]); in dct_body_first_pass() 257 butterfly_two_coeff(b[27], b[20], cospi_24_64, -cospi_8_64, &a[27], &a[20]); in dct_body_first_pass() 258 butterfly_two_coeff(b[26], b[21], cospi_24_64, -cospi_8_64, &a[26], &a[21]); in dct_body_first_pass() 270 butterfly_two_coeff(a[3], a[2], cospi_8_64, cospi_24_64, &b[2], &b[3]); in dct_body_first_pass() 279 butterfly_two_coeff(a[14], a[9], cospi_8_64, cospi_24_64, &b[14], &b[9]); in dct_body_first_pass() 280 butterfly_two_coeff(a[13], a[10], cospi_24_64, -cospi_8_64, &b[13], &b[10]); in dct_body_first_pass() 635 BUTTERFLY_TWO_S32(c, 29, 18, cospi_8_64, cospi_24_64, d, 29, 18); in dct_body_second_pass() 636 BUTTERFLY_TWO_S32(c, 28, 19, cospi_8_64, cospi_24_64, d, 28, 19); in dct_body_second_pass() 637 BUTTERFLY_TWO_S32(c, 27, 20, cospi_24_64, -cospi_8_64, d, 27, 20); in dct_body_second_pass() [all …]
|
/external/libvpx/vpx_dsp/mips/ |
D | itrans16_dspr2.c | 75 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct16_rows_dspr2() 137 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_rows_dspr2() 198 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_rows_dspr2() 460 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct16_cols_add_blk_dspr2() 522 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_cols_add_blk_dspr2() 583 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_cols_add_blk_dspr2() 1163 s4 = x4 * cospi_8_64 + x5 * cospi_24_64; in iadst16_dspr2() 1164 s5 = x4 * cospi_24_64 - x5 * cospi_8_64; in iadst16_dspr2() 1165 s6 = -x6 * cospi_24_64 + x7 * cospi_8_64; in iadst16_dspr2() 1166 s7 = x6 * cospi_8_64 + x7 * cospi_24_64; in iadst16_dspr2() [all …]
|
D | itrans32_cols_dspr2.c | 343 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in vpx_idct32_cols_add_blk_dspr2() 399 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in vpx_idct32_cols_add_blk_dspr2() 470 [cospi_8_64] "r"(cospi_8_64)); in vpx_idct32_cols_add_blk_dspr2() 495 [cospi_8_64] "r"(cospi_8_64)); in vpx_idct32_cols_add_blk_dspr2() 520 [cospi_8_64] "r"(cospi_8_64)); in vpx_idct32_cols_add_blk_dspr2() 545 [cospi_8_64] "r"(cospi_8_64)); in vpx_idct32_cols_add_blk_dspr2() 624 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in vpx_idct32_cols_add_blk_dspr2()
|
D | itrans8_dspr2.c | 186 [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64), in idct8_rows_dspr2() 446 [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64), in idct8_columns_add_blk_dspr2() 656 s4 = cospi_8_64 * x4 + cospi_24_64 * x5; in iadst8_dspr2() 657 s5 = cospi_24_64 * x4 - cospi_8_64 * x5; in iadst8_dspr2() 658 s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; in iadst8_dspr2() 659 s7 = cospi_8_64 * x6 + cospi_24_64 * x7; in iadst8_dspr2()
|
D | itrans32_dspr2.c | 387 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in idct32_rows_dspr2() 443 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in idct32_rows_dspr2() 514 [cospi_8_64] "r"(cospi_8_64)); in idct32_rows_dspr2() 539 [cospi_8_64] "r"(cospi_8_64)); in idct32_rows_dspr2() 564 [cospi_8_64] "r"(cospi_8_64)); in idct32_rows_dspr2() 589 [cospi_8_64] "r"(cospi_8_64)); in idct32_rows_dspr2() 668 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct32_rows_dspr2()
|
D | itrans4_dspr2.c | 90 [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), in vpx_idct4_rows_dspr2() 212 [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), in vpx_idct4_columns_add_blk_dspr2()
|
D | inv_txfm_msa.h | 25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \ 122 c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ 123 c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ 239 k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ 240 k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ 256 -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \ 258 -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0 \ 386 k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \ 387 k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \ 388 k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \
|
D | fwd_dct32x32_msa.c | 83 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0); in fdct8x32_1d_column_even_store() 105 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_column_even_store() 119 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1); in fdct8x32_1d_column_even_store() 183 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29); in fdct8x32_1d_column_odd_store() 184 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28); in fdct8x32_1d_column_odd_store() 217 DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27); in fdct8x32_1d_column_odd_store() 218 DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26); in fdct8x32_1d_column_odd_store() 336 DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64, in fdct8x32_1d_row_even_4x() 364 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_row_even_4x() 378 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1); in fdct8x32_1d_row_even_4x() [all …]
|
/external/libaom/aom_dsp/x86/ |
D | fwd_txfm_impl_sse2.h | 46 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, in FDCT4x4_2D_HELPER() 47 cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64); in FDCT4x4_2D_HELPER() 49 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, in FDCT4x4_2D_HELPER() 50 cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64); in FDCT4x4_2D_HELPER() 58 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, in FDCT4x4_2D_HELPER() 59 -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64); in FDCT4x4_2D_HELPER() 61 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, in FDCT4x4_2D_HELPER() 62 -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64); in FDCT4x4_2D_HELPER() 241 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); in FDCT8x8_2D() 242 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); in FDCT8x8_2D()
|
/external/libvpx/vp9/encoder/ |
D | vp9_dct.c | 36 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; in fdct4() 37 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; in fdct4() 64 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; in fdct8() 65 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; in fdct8() 143 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; in fdct16() 144 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; in fdct16() 194 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; in fdct16() 195 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; in fdct16() 198 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; in fdct16() 199 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; in fdct16() [all …]
|
/external/libvpx/vpx_dsp/ |
D | fwd_txfm.c | 60 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; in vpx_fdct4x4_c() 61 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; in vpx_fdct4x4_c() 134 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; in vpx_fdct8x8_c() 135 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; in vpx_fdct8x8_c() 268 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; in vpx_fdct16x16_c() 269 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; in vpx_fdct16x16_c() 318 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; in vpx_fdct16x16_c() 319 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; in vpx_fdct16x16_c() 322 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; in vpx_fdct16x16_c() 323 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; in vpx_fdct16x16_c() [all …]
|
D | inv_txfm.c | 142 temp1 = (int16_t)input[1] * cospi_24_64 - (int16_t)input[3] * cospi_8_64; in idct4_c() 143 temp2 = (int16_t)input[1] * cospi_8_64 + (int16_t)input[3] * cospi_24_64; in idct4_c() 236 s4 = (int)(cospi_8_64 * x4 + cospi_24_64 * x5); in iadst8_c() 237 s5 = (int)(cospi_24_64 * x4 - cospi_8_64 * x5); in iadst8_c() 238 s6 = (int)(-cospi_24_64 * x6 + cospi_8_64 * x7); in iadst8_c() 239 s7 = (int)(cospi_8_64 * x6 + cospi_24_64 * x7); in iadst8_c() 294 temp1 = step1[1] * cospi_24_64 - step1[3] * cospi_8_64; in idct8_c() 295 temp2 = step1[1] * cospi_8_64 + step1[3] * cospi_24_64; in idct8_c() 490 s4 = x4 * cospi_8_64 + x5 * cospi_24_64; in iadst16_c() 491 s5 = x4 * cospi_24_64 - x5 * cospi_8_64; in iadst16_c() [all …]
|
/external/libvpx/config/arm-neon/vpx_dsp/arm/ |
D | idct4x4_add_neon.asm.S | 47 @ cospi_8_64 = 15137 63 vdup.16 d20, r0 @ replicate cospi_8_64 82 vmull.s16 q1, d17, d20 @ input[1] * cospi_8_64 91 @ input[1] * cospi_24_64 - input[3] * cospi_8_64; 92 @ input[1] * cospi_8_64 + input[3] * cospi_24_64; 135 vmull.s16 q1, d17, d20 @ input[1] * cospi_8_64 142 @ input[1] * cospi_24_64 - input[3] * cospi_8_64; 143 @ input[1] * cospi_8_64 + input[3] * cospi_24_64;
|
/external/libaom/aom_dsp/ |
D | fwd_txfm.c | 59 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; in aom_fdct4x4_c() 60 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; in aom_fdct4x4_c() 121 temp1 = step[2] * (int32_t)cospi_24_64 + step[3] * (int32_t)cospi_8_64; in aom_fdct4x4_lp_c() 122 temp2 = -step[2] * (int32_t)cospi_8_64 + step[3] * (int32_t)cospi_24_64; in aom_fdct4x4_lp_c() 184 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; in aom_fdct8x8_c() 185 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; in aom_fdct8x8_c()
|
/external/libvpx/vpx_dsp/x86/ |
D | highbd_idct16x16_add_sse4.c | 85 highbd_butterfly_sse4_1(io[4], io[12], cospi_24_64, cospi_8_64, &step2[2], in vpx_highbd_idct16_4col_sse4_1() 87 highbd_butterfly_sse4_1(step1[14], step1[9], cospi_24_64, cospi_8_64, in vpx_highbd_idct16_4col_sse4_1() 89 highbd_butterfly_sse4_1(step1[10], step1[13], -cospi_8_64, -cospi_24_64, in vpx_highbd_idct16_4col_sse4_1() 137 highbd_partial_butterfly_sse4_1(io[4], cospi_24_64, cospi_8_64, &step2[2], in highbd_idct16x16_38_4col() 139 highbd_butterfly_sse4_1(step1[14], step1[9], cospi_24_64, cospi_8_64, in highbd_idct16x16_38_4col() 141 highbd_butterfly_sse4_1(step1[10], step1[13], -cospi_8_64, -cospi_24_64, in highbd_idct16x16_38_4col() 185 highbd_butterfly_sse4_1(step1[14], step1[9], cospi_24_64, cospi_8_64, in highbd_idct16x16_10_4col() 187 highbd_butterfly_sse4_1(step1[10], step1[13], -cospi_8_64, -cospi_24_64, in highbd_idct16x16_10_4col()
|
D | highbd_idct16x16_add_sse2.c | 84 highbd_butterfly_sse2(io[4], io[12], cospi_24_64, cospi_8_64, &step2[2], in highbd_idct16_4col() 86 highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9], in highbd_idct16_4col() 88 highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64, in highbd_idct16_4col() 136 highbd_partial_butterfly_sse2(io[4], cospi_24_64, cospi_8_64, &step2[2], in highbd_idct16x16_38_4col() 138 highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9], in highbd_idct16x16_38_4col() 140 highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64, in highbd_idct16x16_38_4col() 186 highbd_butterfly_sse2(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9], in highbd_idct16x16_10_4col() 188 highbd_butterfly_sse2(step1[10], step1[13], cospi_8_64, cospi_24_64, in highbd_idct16x16_10_4col()
|
D | inv_txfm_sse2.h | 260 butterfly(in[2], in[6], cospi_24_64, cospi_8_64, &step2[2], &step2[3]); in idct8() 309 const __m128i cp_24_n8 = pair_set_epi16(cospi_24_64, -cospi_8_64); in idct8x8_12_add_kernel_sse2() 310 const __m128i cp_8_24 = pair_set_epi16(cospi_8_64, cospi_24_64); in idct8x8_12_add_kernel_sse2() 367 butterfly(in[4], in[12], cospi_24_64, cospi_8_64, &step2[2], &step2[3]); in idct16_8col() 368 butterfly(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9], in idct16_8col() 370 butterfly(step1[10], step1[13], -cospi_8_64, -cospi_24_64, &step2[13], in idct16_8col() 465 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); in idct16x16_10_pass1() 466 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); in idct16x16_10_pass1() 467 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64); in idct16x16_10_pass1() 550 butterfly(step2[15], step2[8], cospi_24_64, cospi_8_64, &step2[9], in idct16x16_10_pass2() [all …]
|
D | fwd_txfm_impl_sse2.h | 53 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, in FDCT4x4_2D() 54 cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64); in FDCT4x4_2D() 56 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, in FDCT4x4_2D() 57 cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64); in FDCT4x4_2D() 65 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, in FDCT4x4_2D() 66 -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64); in FDCT4x4_2D() 68 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, in FDCT4x4_2D() 69 -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64); in FDCT4x4_2D() 266 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); in FDCT8x8_2D() 267 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); in FDCT8x8_2D() [all …]
|
/external/libvpx/vpx_dsp/loongarch/ |
D | fwd_dct32x32_lsx.c | 168 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0); in fdct8x32_1d_column_even_store() 192 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_column_even_store() 207 DOTP_CONST_PAIR(temp0, vec5, cospi_24_64, cospi_8_64, in2, in1); in fdct8x32_1d_column_even_store() 269 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29); in fdct8x32_1d_column_odd_store() 270 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28); in fdct8x32_1d_column_odd_store() 309 DOTP_CONST_PAIR(tmp0, in27, cospi_24_64, cospi_8_64, in20, in27); in fdct8x32_1d_column_odd_store() 310 DOTP_CONST_PAIR(tmp1, in26, cospi_24_64, cospi_8_64, in21, in26); in fdct8x32_1d_column_odd_store() 502 DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64, in fdct8x32_1d_row_even_4x() 539 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_row_even_4x() 554 DOTP_CONST_PAIR(tmp0_w, vec5, cospi_24_64, cospi_8_64, in2, in1); in fdct8x32_1d_row_even_4x() [all …]
|