/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans8_dspr2.c | 186 [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64), in idct8_rows_dspr2() 435 [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64), in idct8_columns_add_blk_dspr2()
|
D | itrans16_dspr2.c | 263 [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), in idct16_rows_dspr2() 660 [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), in idct16_cols_add_blk_dspr2() 1130 s10 = x10 * cospi_20_64 + x11 * cospi_12_64; in iadst16_dspr2() 1131 s11 = x10 * cospi_12_64 - x11 * cospi_20_64; in iadst16_dspr2() 1134 s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; in iadst16_dspr2() 1135 s15 = x14 * cospi_20_64 + x15 * cospi_12_64; in iadst16_dspr2()
|
D | itrans32_cols_dspr2.c | 234 [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) in vpx_idct32_cols_add_blk_dspr2() 291 [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) in vpx_idct32_cols_add_blk_dspr2() 659 [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), in vpx_idct32_cols_add_blk_dspr2()
|
D | itrans32_dspr2.c | 282 [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) in idct32_rows_dspr2() 344 [cospi_12_64] "r" (cospi_12_64), [cospi_20_64] "r" (cospi_20_64) in idct32_rows_dspr2() 734 [cospi_20_64] "r" (cospi_20_64), [cospi_12_64] "r" (cospi_12_64), in idct32_rows_dspr2()
|
D | idct8x8_msa.c | 56 k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); in vpx_idct8x8_12_add_msa() 57 k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); in vpx_idct8x8_12_add_msa()
|
D | idct16x16_msa.c | 28 DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); in vpx_idct16_1d_rows_msa() 120 DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6); in vpx_idct16_1d_columns_addblk_msa() 396 k0 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); in vpx_iadst16_1d_columns_addblk_msa() 397 k1 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); in vpx_iadst16_1d_columns_addblk_msa() 398 k2 = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); in vpx_iadst16_1d_columns_addblk_msa()
|
D | fwd_dct32x32_msa.c | 96 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0); in fdct8x32_1d_column_even_store() 219 DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30); in fdct8x32_1d_column_odd_store() 232 DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31); in fdct8x32_1d_column_odd_store() 357 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, in5, in4); in fdct8x32_1d_row_even_4x() 431 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0); in fdct8x32_1d_row_even() 569 DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30); in fdct8x32_1d_row_odd() 583 DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31); in fdct8x32_1d_row_odd() 746 DOTP_CONST_PAIR(vec7, vec4, cospi_12_64, cospi_20_64, temp1, temp0); in fdct8x32_1d_row_even_rd() 883 DOTP_CONST_PAIR(in18, in17, cospi_12_64, cospi_20_64, in29, in30); in fdct8x32_1d_row_odd_rd() 896 DOTP_CONST_PAIR(-in16, in19, cospi_12_64, cospi_20_64, in28, in31); in fdct8x32_1d_row_odd_rd()
|
D | fwd_txfm_msa.h | 76 cospi_12_64, cospi_20_64 }; \ 135 cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 }; \ 200 cospi_12_64, cospi_20_64 }; \
|
D | idct32x32_msa.c | 51 DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); in idct32x8_row_even_process_store() 191 DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); in idct32x8_row_odd_process_store() 192 DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3); in idct32x8_row_odd_process_store() 367 DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3); in idct8x32_column_even_process_store() 499 DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1); in idct8x32_column_odd_process_store() 500 DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3); in idct8x32_column_odd_process_store()
|
D | inv_txfm_msa.h | 218 v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \ 374 k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \ 375 k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \ 376 k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \
|
/external/libvpx/libvpx/vpx_dsp/ |
D | fwd_txfm.c | 157 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in vpx_fdct8x8_c() 158 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in vpx_fdct8x8_c() 293 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in vpx_fdct16x16_c() 294 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in vpx_fdct16x16_c() 615 output[5] = dct_32_round(step[5] * cospi_12_64 + step[6] * cospi_20_64); in vpx_fdct32() 616 output[6] = dct_32_round(step[6] * cospi_12_64 + step[5] * -cospi_20_64); in vpx_fdct32() 632 output[21] = dct_32_round(step[21] * -cospi_20_64 + step[26] * cospi_12_64); in vpx_fdct32() 633 output[22] = dct_32_round(step[22] * -cospi_12_64 + step[25] * -cospi_20_64); in vpx_fdct32() 636 output[25] = dct_32_round(step[25] * cospi_12_64 + step[22] * -cospi_20_64); in vpx_fdct32() 637 output[26] = dct_32_round(step[26] * cospi_20_64 + step[21] * cospi_12_64); in vpx_fdct32()
|
D | inv_txfm.c | 168 temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64; in idct8_c() 169 temp2 = input[5] * cospi_20_64 + input[3] * cospi_12_64; in idct8_c() 450 temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; in idct16_c() 451 temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; in idct16_c() 655 s10 = x10 * cospi_20_64 + x11 * cospi_12_64; in iadst16_c() 656 s11 = x10 * cospi_12_64 - x11 * cospi_20_64; in iadst16_c() 659 s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; in iadst16_c() 660 s15 = x14 * cospi_20_64 + x15 * cospi_12_64; in iadst16_c() 910 temp1 = step2[5] * cospi_12_64 - step2[6] * cospi_20_64; in idct32_c() 911 temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64; in idct32_c() [all …]
|
D | txfm_common.h | 39 static const tran_high_t cospi_12_64 = 13623; variable
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_dct.c | 85 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in fdct8() 86 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in fdct8() 164 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in fdct16() 165 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in fdct16() 413 s10 = x10 * cospi_20_64 + x11 * cospi_12_64; in fadst16() 414 s11 = x10 * cospi_12_64 - x11 * cospi_20_64; in fadst16() 417 s14 = - x14 * cospi_12_64 + x15 * cospi_20_64; in fadst16() 418 s15 = x14 * cospi_20_64 + x15 * cospi_12_64; in fadst16() 624 t1 = x1 * cospi_12_64 + x2 * cospi_20_64; in vp9_fdct8x8_quant_c() 625 t2 = x2 * cospi_12_64 + x1 * -cospi_20_64; in vp9_fdct8x8_quant_c()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | fwd_txfm_neon.c | 98 v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_12_64); in vpx_fdct8x8_neon() 99 v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_12_64); in vpx_fdct8x8_neon() 102 v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), (int16_t)cospi_12_64); in vpx_fdct8x8_neon() 103 v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), (int16_t)cospi_12_64); in vpx_fdct8x8_neon()
|
D | idct8x8_add_neon.asm | 28 vdup.16 d2, r5 ; duplicate cospi_12_64 35 ; input[5] * cospi_12_64 43 ; input[5] * cospi_12_64 - input[3] * cospi_20_64 67 ; input[5] * cospi_20_64 + input[3] * cospi_12_64 226 ; generate cospi_12_64 = 13623 338 ; generate cospi_12_64 = 13623 380 vdup.16 q1, r12 ; duplicate cospi_12_64*2 388 ; dct_const_round_shift(input[3] * cospi_12_64)
|
D | idct32x32_add_neon.asm | 24 cospi_12_64 EQU 13623 define 602 ;temp1 = step1b[26][i] * cospi_12_64 - step1b[21][i] * cospi_20_64; 603 ;temp2 = step1b[26][i] * cospi_20_64 + step1b[21][i] * cospi_12_64; 606 DO_BUTTERFLY_STD cospi_12_64, cospi_20_64, d2, d3, d6, d7 637 ;temp1 = step1b[22][i] * (-cospi_20_64) - step1b[25][i] * (-cospi_12_64); 638 ;temp2 = step1b[22][i] * (-cospi_12_64) + step1b[25][i] * (-cospi_20_64); 641 DO_BUTTERFLY_STD (-cospi_20_64), (-cospi_12_64), d8, d9, d14, d15 899 ;temp1 = input[20 * 32] * cospi_12_64 - input[12 * 32] * cospi_20_64; 900 ;temp2 = input[20 * 32] * cospi_20_64 + input[12 * 32] * cospi_12_64; 904 DO_BUTTERFLY_STD cospi_12_64, cospi_20_64, d2, d3, d6, d7
|
D | idct32x32_add_neon.c | 543 DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16) in vpx_idct32x32_1024_add_neon() 557 DO_BUTTERFLY_STD(-cospi_20_64, -cospi_12_64, &q4s16, &q7s16) in vpx_idct32x32_1024_add_neon() 665 DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16) in vpx_idct32x32_1024_add_neon()
|
D | idct8x8_add_neon.c | 104 d2s16 = vdup_n_s16(cospi_12_64); in IDCT8x8_1D() 391 q1s16 = vdupq_n_s16(cospi_12_64 * 2); in vpx_idct8x8_12_add_neon()
|
D | idct16x16_add_neon.asm | 79 ; generate cospi_12_64 = 13623 103 vdup.16 d2, r3 ; duplicate cospi_12_64 123 ; step2[5] * cospi_12_64 131 ; temp1 = input[5] * cospi_12_64 - input[3] * cospi_20_64 135 ; temp2 = step2[5] * cospi_20_64 + step2[6] * cospi_12_64
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | inv_txfm_sse2.c | 461 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vpx_idct8x8_64_add_sse2() 462 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); in vpx_idct8x8_64_add_sse2() 550 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); in idct8_sse2() 551 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); in idct8_sse2() 806 const __m128i stg1_2 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vpx_idct8x8_12_add_sse2() 807 const __m128i stg1_3 = pair_set_epi16(cospi_12_64, cospi_20_64); in vpx_idct8x8_12_add_sse2() 1194 const __m128i stg3_2 = pair_set_epi16(cospi_12_64, -cospi_20_64); in vpx_idct16x16_256_add_sse2() 1195 const __m128i stg3_3 = pair_set_epi16(cospi_20_64, cospi_12_64); in vpx_idct16x16_256_add_sse2() 1356 const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); in iadst16_8col() 1357 const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); in iadst16_8col() [all …]
|
D | fwd_txfm_impl_sse2.h | 279 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in FDCT8x8_2D() 280 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in FDCT8x8_2D() 601 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in FDCT16x16_2D() 602 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in FDCT16x16_2D()
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_dct_ssse3.c | 46 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in vp9_fdct8x8_quant_ssse3() 47 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vp9_fdct8x8_quant_ssse3()
|
D | vp9_dct_sse2.c | 207 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in vp9_fdct8x8_quant_sse2() 208 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in vp9_fdct8x8_quant_sse2() 776 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in fdct8_sse2() 777 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in fdct8_sse2() 1232 const __m128i k__cospi_p12_p20 = pair_set_epi16(cospi_12_64, cospi_20_64); in fdct16_8col() 1233 const __m128i k__cospi_m20_p12 = pair_set_epi16(-cospi_20_64, cospi_12_64); in fdct16_8col() 1564 const __m128i k__cospi_p20_p12 = pair_set_epi16(cospi_20_64, cospi_12_64); in fadst16_8col() 1565 const __m128i k__cospi_p12_m20 = pair_set_epi16(cospi_12_64, -cospi_20_64); in fadst16_8col() 1567 const __m128i k__cospi_m12_p20 = pair_set_epi16(-cospi_12_64, cospi_20_64); in fadst16_8col()
|
/external/libvpx/libvpx/vp9/common/arm/neon/ |
D | vp9_iht8x8_add_neon.c | 23 static int16_t cospi_12_64 = 13623; variable 122 d2s16 = vdup_n_s16(cospi_12_64); in IDCT8x8_1D()
|