/external/libvpx/libvpx/vpx_dsp/arm/ |
D | idct32x32_34_add_neon.c | 78 s1[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); in vpx_idct32_6_neon() 80 s1[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31], in vpx_idct32_6_neon() 83 cospi_4_64); in vpx_idct32_6_neon() 299 s1[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); in vpx_idct32_8_neon() 301 s1[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31], in vpx_idct32_8_neon() 304 cospi_4_64); in vpx_idct32_8_neon() 308 s1[28], -cospi_4_64); in vpx_idct32_8_neon() 309 s1[29] = multiply_accumulate_shift_and_narrow_s16(s1[19], -cospi_4_64, s1[28], in vpx_idct32_8_neon()
|
D | idct32x32_135_add_neon.c | 145 s3[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); in vpx_idct32_12_neon() 152 s3[17] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_4_64, s1[31], in vpx_idct32_12_neon() 155 cospi_4_64); in vpx_idct32_12_neon() 158 s2[29], -cospi_4_64); in vpx_idct32_12_neon() 159 s3[29] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_4_64, s2[29], in vpx_idct32_12_neon() 443 s3[7] = multiply_shift_and_narrow_s16(in[4], cospi_4_64); in vpx_idct32_16_neon() 457 s3[17] = multiply_accumulate_shift_and_narrow_s16(s2[17], -cospi_4_64, s2[30], in vpx_idct32_16_neon() 460 cospi_4_64); in vpx_idct32_16_neon() 463 s2[29], -cospi_4_64); in vpx_idct32_16_neon() 464 s3[29] = multiply_accumulate_shift_and_narrow_s16(s2[18], -cospi_4_64, s2[29], in vpx_idct32_16_neon()
|
D | highbd_idct32x32_34_add_neon.c | 85 s1[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64); in vpx_highbd_idct32_6_neon() 87 s1[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64, in vpx_highbd_idct32_6_neon() 90 s1[31], cospi_4_64); in vpx_highbd_idct32_6_neon() 403 s1[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64); in vpx_highbd_idct32_8_neon() 405 s1[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64, in vpx_highbd_idct32_8_neon() 408 s1[31], cospi_4_64); in vpx_highbd_idct32_8_neon() 412 s1[28], -cospi_4_64); in vpx_highbd_idct32_8_neon() 413 s1[29] = multiply_accumulate_shift_and_narrow_s32_dual(s1[19], -cospi_4_64, in vpx_highbd_idct32_8_neon()
|
D | highbd_idct32x32_135_add_neon.c | 155 s3[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64); in vpx_highbd_idct32_12_neon() 162 s3[17] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_4_64, in vpx_highbd_idct32_12_neon() 165 s1[31], cospi_4_64); in vpx_highbd_idct32_12_neon() 168 s2[29], -cospi_4_64); in vpx_highbd_idct32_12_neon() 169 s3[29] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_4_64, in vpx_highbd_idct32_12_neon() 519 s3[7] = multiply_shift_and_narrow_s32_dual(in[4], cospi_4_64); in vpx_highbd_idct32_16_neon() 533 s3[17] = multiply_accumulate_shift_and_narrow_s32_dual(s2[17], -cospi_4_64, in vpx_highbd_idct32_16_neon() 536 s2[30], cospi_4_64); in vpx_highbd_idct32_16_neon() 539 s2[29], -cospi_4_64); in vpx_highbd_idct32_16_neon() 540 s3[29] = multiply_accumulate_shift_and_narrow_s32_dual(s2[18], -cospi_4_64, in vpx_highbd_idct32_16_neon()
|
D | fwd_txfm_neon.c | 98 v_t0_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_4_64); in vpx_fdct8x8_neon() 99 v_t0_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_4_64); in vpx_fdct8x8_neon() 112 v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x0), (int16_t)cospi_4_64); in vpx_fdct8x8_neon() 113 v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x0), (int16_t)cospi_4_64); in vpx_fdct8x8_neon()
|
D | highbd_idct32x32_1024_add_neon.c | 419 do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[5], &q[7]); in vpx_highbd_idct32_32_neon() 433 do_butterfly(q[14], q[13], -cospi_4_64, -cospi_28_64, &q[1], &q[0]); in vpx_highbd_idct32_32_neon() 585 do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[0], &q[2]); in vpx_highbd_idct32_32_neon()
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans16_dspr2.c | 261 [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), in idct16_rows_dspr2() 647 [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), in idct16_cols_add_blk_dspr2() 1132 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in iadst16_dspr2() 1133 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in iadst16_dspr2() 1136 s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; in iadst16_dspr2() 1137 s13 = x12 * cospi_4_64 + x13 * cospi_28_64; in iadst16_dspr2()
|
D | itrans8_dspr2.c | 185 [cospi_4_64] "r"(cospi_4_64), [cospi_12_64] "r"(cospi_12_64), in idct8_rows_dspr2() 445 [cospi_4_64] "r"(cospi_4_64), [cospi_12_64] "r"(cospi_12_64), in idct8_columns_add_blk_dspr2()
|
D | itrans32_cols_dspr2.c | 110 [cospi_4_64] "r"(cospi_4_64), [cospi_17_64] "r"(cospi_17_64), in vpx_idct32_cols_add_blk_dspr2() 170 [cospi_4_64] "r"(cospi_4_64), [cospi_7_64] "r"(cospi_7_64), in vpx_idct32_cols_add_blk_dspr2() 683 [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), in vpx_idct32_cols_add_blk_dspr2()
|
D | itrans32_dspr2.c | 154 [cospi_4_64] "r"(cospi_4_64), [cospi_17_64] "r"(cospi_17_64), in idct32_rows_dspr2() 214 [cospi_4_64] "r"(cospi_4_64), [cospi_7_64] "r"(cospi_7_64), in idct32_rows_dspr2() 727 [cospi_4_64] "r"(cospi_4_64), [cospi_28_64] "r"(cospi_28_64), in idct32_rows_dspr2()
|
D | idct8x8_msa.c | 54 k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); in vpx_idct8x8_12_add_msa() 55 k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); in vpx_idct8x8_12_add_msa()
|
D | inv_txfm_msa.h | 220 v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \ 221 cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \ 370 k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); \ 371 k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); \ 372 k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); \
|
D | fwd_dct32x32_msa.c | 90 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0); in fdct8x32_1d_column_even_store() 185 DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24); in fdct8x32_1d_column_odd_store() 199 DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25); in fdct8x32_1d_column_odd_store() 348 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, in5, in4); in fdct8x32_1d_row_even_4x() 421 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0); in fdct8x32_1d_row_even() 519 DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24); in fdct8x32_1d_row_odd() 535 DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25); in fdct8x32_1d_row_odd() 735 DOTP_CONST_PAIR(vec1, vec0, cospi_28_64, cospi_4_64, temp1, temp0); in fdct8x32_1d_row_even_rd() 838 DOTP_CONST_PAIR(in21, in22, cospi_28_64, cospi_4_64, in26, in24); in fdct8x32_1d_row_odd_rd() 850 DOTP_CONST_PAIR((-in23), in20, cospi_28_64, cospi_4_64, in27, in25); in fdct8x32_1d_row_odd_rd()
|
D | idct16x16_msa.c | 27 DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14); in vpx_idct16_1d_rows_msa() 119 DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14); in vpx_idct16_1d_columns_addblk_msa() 357 k0 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); in vpx_iadst16_1d_columns_addblk_msa() 358 k1 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); in vpx_iadst16_1d_columns_addblk_msa() 359 k2 = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); in vpx_iadst16_1d_columns_addblk_msa()
|
D | idct32x32_msa.c | 50 DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); in idct32x8_row_even_process_store() 164 DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7); in idct32x8_row_odd_process_store() 165 DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6); in idct32x8_row_odd_process_store() 361 DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7); in idct8x32_column_even_process_store() 469 DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7); in idct8x32_column_odd_process_store() 470 DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6); in idct8x32_column_odd_process_store()
|
D | fwd_txfm_msa.h | 64 cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 }; \ 124 cospi_4_64, cospi_28_64, cospi_12_64, cospi_20_64 }; \
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | inv_txfm_ssse3.c | 22 const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in vpx_idct8x8_64_add_ssse3() 23 const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in vpx_idct8x8_64_add_ssse3() 225 const __m128i stg1_1 = pair_set_epi16(2 * cospi_4_64, 2 * cospi_4_64); in vpx_idct8x8_12_add_ssse3() 442 const __m128i stk3_1 = pair_set_epi16(2 * cospi_4_64, 2 * cospi_4_64); in idct32_34_first_half() 531 const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64); in idct32_34_second_half() 532 const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64); in idct32_34_second_half() 533 const __m128i stg3_6 = pair_set_epi16(-cospi_28_64, -cospi_4_64); in idct32_34_second_half() 710 const __m128i stk3_1 = pair_set_epi16(2 * cospi_4_64, 2 * cospi_4_64); in idct32_8x32_135_quarter_1() 878 const __m128i stg3_4 = pair_set_epi16(-cospi_4_64, cospi_28_64); in idct32_8x32_quarter_3_4() 879 const __m128i stg3_5 = pair_set_epi16(cospi_28_64, cospi_4_64); in idct32_8x32_quarter_3_4() [all …]
|
D | inv_txfm_sse2.c | 253 const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in vpx_idct8x8_64_add_sse2() 254 const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in vpx_idct8x8_64_add_sse2() 342 const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in idct8_sse2() 343 const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in idct8_sse2() 598 const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in vpx_idct8x8_12_add_sse2() 599 const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in vpx_idct8x8_12_add_sse2() 978 const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in vpx_idct16x16_256_add_sse2() 979 const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in vpx_idct16x16_256_add_sse2() 1126 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); in iadst16_8col() 1127 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); in iadst16_8col() [all …]
|
D | fwd_txfm_impl_sse2.h | 268 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in FDCT8x8_2D() 269 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in FDCT8x8_2D() 590 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in FDCT16x16_2D() 591 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in FDCT16x16_2D()
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_dct.c | 84 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in fdct8() 87 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in fdct8() 163 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in fdct16() 166 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in fdct16() 411 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in fadst16() 412 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in fadst16() 415 s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; in fadst16() 416 s13 = x12 * cospi_4_64 + x13 * cospi_28_64; in fadst16() 617 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in vp9_fdct8x8_quant_c() 620 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in vp9_fdct8x8_quant_c()
|
/external/libvpx/libvpx/vpx_dsp/ |
D | fwd_txfm.c | 154 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in vpx_fdct8x8_c() 157 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in vpx_fdct8x8_c() 288 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in vpx_fdct16x16_c() 291 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in vpx_fdct16x16_c() 607 output[4] = dct_32_round(step[4] * cospi_28_64 + step[7] * cospi_4_64); in vpx_fdct32() 610 output[7] = dct_32_round(step[7] * cospi_28_64 + step[4] * -cospi_4_64); in vpx_fdct32() 621 output[17] = dct_32_round(step[17] * -cospi_4_64 + step[30] * cospi_28_64); in vpx_fdct32() 622 output[18] = dct_32_round(step[18] * -cospi_28_64 + step[29] * -cospi_4_64); in vpx_fdct32() 633 output[29] = dct_32_round(step[29] * cospi_28_64 + step[18] * -cospi_4_64); in vpx_fdct32() 634 output[30] = dct_32_round(step[30] * cospi_4_64 + step[17] * cospi_28_64); in vpx_fdct32()
|
D | inv_txfm.c | 278 temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64; in idct8_c() 279 temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64; in idct8_c() 456 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in iadst16_c() 457 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in iadst16_c() 460 s12 = -x12 * cospi_28_64 + x13 * cospi_4_64; in iadst16_c() 461 s13 = x12 * cospi_4_64 + x13 * cospi_28_64; in iadst16_c() 612 temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; in idct16_c() 613 temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; in idct16_c() 924 temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; in idct32_c() 925 temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; in idct32_c() [all …]
|
D | txfm_common.h | 31 static const tran_high_t cospi_4_64 = 16069; variable
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_dct_ssse3.c | 38 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in vp9_fdct8x8_quant_ssse3() 39 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in vp9_fdct8x8_quant_ssse3()
|
D | vp9_dct_intrin_sse2.c | 200 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in vp9_fdct8x8_quant_sse2() 201 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in vp9_fdct8x8_quant_sse2() 767 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in fdct8_sse2() 768 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in fdct8_sse2() 1219 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in fdct16_8col() 1220 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in fdct16_8col() 1551 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); in fadst16_8col() 1552 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); in fadst16_8col() 1555 const __m128i k__cospi_m28_p04 = pair_set_epi16(-cospi_28_64, cospi_4_64); in fadst16_8col()
|