/external/libvpx/libvpx/vpx_dsp/arm/ |
D | idct8x8_add_neon.asm | 47 ; dct_const_round_shift(input_dc * cospi_16_64) 51 ; dct_const_round_shift(input_dc * cospi_16_64) 71 ; dct_const_round_shift(input_dc * cospi_16_64) 76 vdup.16 d0, r7 ; duplicate cospi_16_64 78 ; dct_const_round_shift(input_dc * cospi_16_64) 82 ; input[0] * cospi_16_64 86 ; input[0] * cospi_16_64 90 ; (input[0] + input[2]) * cospi_16_64 94 ; (input[0] - input[2]) * cospi_16_64 101 ; dct_const_round_shift(input_dc * cospi_16_64) [all …]
|
D | idct4x4_1_add_neon.asm | 28 ; generate cospi_16_64 = 11585 32 ; out = dct_const_round_shift(input[0] * cospi_16_64) 33 mul r0, r0, r12 ; input[0] * cospi_16_64 37 ; out = dct_const_round_shift(out * cospi_16_64) 38 mul r0, r0, r12 ; out * cospi_16_64
|
D | idct16x16_add_neon.asm | 115 ; generate cospi_16_64 = 11585 148 vdup.16 d30, r3 ; cospi_16_64 150 ; step1[0] * cospi_16_64 154 ; step1[1] * cospi_16_64 165 ; temp1 = (step1[0] + step1[1]) * cospi_16_64 169 ; temp2 = (step1[0] - step1[1]) * cospi_16_64 211 ; generate cospi_16_64 = 11585 221 vdup.16 d16, r3; ; duplicate cospi_16_64 223 ; step2[5] * cospi_16_64 227 ; step2[6] * cospi_16_64 [all …]
|
D | idct4x4_1_add_neon.c | 25 int16_t i, a1, cospi_16_64 = 11585; in vpx_idct4x4_1_add_neon() local 26 int16_t out = dct_const_round_shift(input[0] * cospi_16_64); in vpx_idct4x4_1_add_neon() 27 out = dct_const_round_shift(out * cospi_16_64); in vpx_idct4x4_1_add_neon()
|
D | idct8x8_1_add_neon.asm | 28 ; generate cospi_16_64 = 11585 32 ; out = dct_const_round_shift(input[0] * cospi_16_64) 33 mul r0, r0, r12 ; input[0] * cospi_16_64 37 ; out = dct_const_round_shift(out * cospi_16_64) 38 mul r0, r0, r12 ; out * cospi_16_64
|
D | fwd_txfm_neon.c | 55 v_t0_lo = vmulq_n_s32(v_t0_lo, cospi_16_64); in vpx_fdct8x8_neon() 56 v_t0_hi = vmulq_n_s32(v_t0_hi, cospi_16_64); in vpx_fdct8x8_neon() 57 v_t1_lo = vmulq_n_s32(v_t1_lo, cospi_16_64); in vpx_fdct8x8_neon() 58 v_t1_hi = vmulq_n_s32(v_t1_hi, cospi_16_64); in vpx_fdct8x8_neon() 76 v_t0_lo = vmull_n_s16(vget_low_s16(v_x0), (int16_t)cospi_16_64); in vpx_fdct8x8_neon() 77 v_t0_hi = vmull_n_s16(vget_high_s16(v_x0), (int16_t)cospi_16_64); in vpx_fdct8x8_neon() 78 v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_16_64); in vpx_fdct8x8_neon() 79 v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_16_64); in vpx_fdct8x8_neon()
|
D | idct16x16_1_add_neon.c | 25 int16_t i, j, a1, cospi_16_64 = 11585; in vpx_idct16x16_1_add_neon() local 26 int16_t out = dct_const_round_shift(input[0] * cospi_16_64); in vpx_idct16x16_1_add_neon() 27 out = dct_const_round_shift(out * cospi_16_64); in vpx_idct16x16_1_add_neon()
|
D | idct8x8_1_add_neon.c | 25 int16_t i, a1, cospi_16_64 = 11585; in vpx_idct8x8_1_add_neon() local 26 int16_t out = dct_const_round_shift(input[0] * cospi_16_64); in vpx_idct8x8_1_add_neon() 27 out = dct_const_round_shift(out * cospi_16_64); in vpx_idct8x8_1_add_neon()
|
D | idct32x32_1_add_neon.asm | 80 ; generate cospi_16_64 = 11585 84 ; out = dct_const_round_shift(input[0] * cospi_16_64) 85 mul r0, r0, r12 ; input[0] * cospi_16_64 89 ; out = dct_const_round_shift(out * cospi_16_64) 90 mul r0, r0, r12 ; out * cospi_16_64
|
D | idct32x32_add_neon.c | 577 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) in vpx_idct32x32_1024_add_neon() 581 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) in vpx_idct32x32_1024_add_neon() 604 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16) in vpx_idct32x32_1024_add_neon() 606 DO_BUTTERFLY(q0s16, q1s16, cospi_16_64, cospi_16_64, in vpx_idct32x32_1024_add_neon() 650 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) in vpx_idct32x32_1024_add_neon() 654 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) in vpx_idct32x32_1024_add_neon() 672 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16) in vpx_idct32x32_1024_add_neon() 677 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q5s16, &q7s16) in vpx_idct32x32_1024_add_neon()
|
D | idct4x4_add_neon.asm | 42 ; cospi_16_64 = 11585 = 0x2d41 59 vdup.16 d21, r3 ; replicate cospi_16_64 82 ; (input[0] + input[2]) * cospi_16_64; 83 ; (input[0] - input[2]) * cospi_16_64; 133 ; (input[0] + input[2]) * cospi_16_64; 134 ; (input[0] - input[2]) * cospi_16_64;
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans32_cols_dspr2.c | 452 [step2_15] "r" (step2_15), [cospi_16_64] "r" (cospi_16_64) in vpx_idct32_cols_add_blk_dspr2() 600 [cospi_16_64] "r" (cospi_16_64) in vpx_idct32_cols_add_blk_dspr2() 661 [cospi_16_64] "r" (cospi_16_64) in vpx_idct32_cols_add_blk_dspr2() 700 [step2_27] "r" (step2_27), [cospi_16_64] "r" (cospi_16_64) in vpx_idct32_cols_add_blk_dspr2() 703 temp21 = (step2_20 + step2_27) * cospi_16_64; in vpx_idct32_cols_add_blk_dspr2() 715 [step2_21] "r" (step2_21), [cospi_16_64] "r" (cospi_16_64) in vpx_idct32_cols_add_blk_dspr2() 718 temp21 = (step2_21 + step2_26) * cospi_16_64; in vpx_idct32_cols_add_blk_dspr2() 730 [step2_22] "r" (step2_22), [cospi_16_64] "r" (cospi_16_64) in vpx_idct32_cols_add_blk_dspr2() 733 temp21 = (step2_22 + step2_25) * cospi_16_64; in vpx_idct32_cols_add_blk_dspr2() 745 [step2_23] "r" (step2_23), [cospi_16_64] "r" (cospi_16_64) in vpx_idct32_cols_add_blk_dspr2() [all …]
|
D | itrans32_dspr2.c | 520 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 669 [cospi_16_64] "r" (cospi_16_64), in idct32_rows_dspr2() 736 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 775 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 778 temp21 = (step2_20 + step2_27) * cospi_16_64; in idct32_rows_dspr2() 791 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 794 temp21 = (step2_21 + step2_26) * cospi_16_64; in idct32_rows_dspr2() 807 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 810 temp21 = (step2_22 + step2_25) * cospi_16_64; in idct32_rows_dspr2() 823 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() [all …]
|
D | fwd_dct32x32_msa.c | 77 DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0); in fdct8x32_1d_column_even_store() 88 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6); in fdct8x32_1d_column_even_store() 101 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5); in fdct8x32_1d_column_even_store() 102 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4); in fdct8x32_1d_column_even_store() 141 DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27); in fdct8x32_1d_column_odd_store() 142 DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26); in fdct8x32_1d_column_odd_store() 164 DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25); in fdct8x32_1d_column_odd_store() 165 DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24); in fdct8x32_1d_column_odd_store() 329 DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64, in fdct8x32_1d_row_even_4x() 330 cospi_16_64, vec4_r, tmp3_w, vec6_r, vec3_r); in fdct8x32_1d_row_even_4x() [all …]
|
D | itrans16_dspr2.c | 76 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2() 265 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2() 316 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2() 472 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2() 662 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2() 713 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2() 1190 s2 = (- cospi_16_64) * (x2 + x3); in iadst16_dspr2() 1191 s3 = cospi_16_64 * (x2 - x3); in iadst16_dspr2() 1192 s6 = cospi_16_64 * (x6 + x7); in iadst16_dspr2() 1193 s7 = cospi_16_64 * (- x6 + x7); in iadst16_dspr2() [all …]
|
D | idct16x16_msa.c | 30 DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3); in vpx_idct16_1d_rows_msa() 31 DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8); in vpx_idct16_1d_rows_msa() 79 DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11); in vpx_idct16_1d_rows_msa() 85 DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13); in vpx_idct16_1d_rows_msa() 122 DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3); in vpx_idct16_1d_columns_addblk_msa() 123 DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8); in vpx_idct16_1d_columns_addblk_msa() 176 DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11); in vpx_idct16_1d_columns_addblk_msa() 182 DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13); in vpx_idct16_1d_columns_addblk_msa() 270 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); in vpx_idct16x16_1_add_msa() 271 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); in vpx_idct16x16_1_add_msa() [all …]
|
D | idct32x32_msa.c | 53 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_even_process_store() 58 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); in idct32x8_row_even_process_store() 95 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); in idct32x8_row_even_process_store() 96 DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4); in idct32x8_row_even_process_store() 221 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); in idct32x8_row_odd_process_store() 224 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_odd_process_store() 236 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); in idct32x8_row_odd_process_store() 239 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_odd_process_store() 369 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct8x32_column_even_process_store() 374 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); in idct8x32_column_even_process_store() [all …]
|
D | idct8x8_msa.c | 66 k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); in vpx_idct8x8_12_add_msa() 67 k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); in vpx_idct8x8_12_add_msa() 79 k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); in vpx_idct8x8_12_add_msa() 108 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); in vpx_idct8x8_1_add_msa() 109 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); in vpx_idct8x8_1_add_msa()
|
D | itrans8_dspr2.c | 185 [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64), in idct8_rows_dspr2() 434 [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64), in idct8_columns_add_blk_dspr2() 649 s2 = cospi_16_64 * (x2 + x3); in iadst8_dspr2() 650 s3 = cospi_16_64 * (x2 - x3); in iadst8_dspr2() 651 s6 = cospi_16_64 * (x6 + x7); in iadst8_dspr2() 652 s7 = cospi_16_64 * (x6 - x7); in iadst8_dspr2()
|
D | inv_txfm_msa.h | 24 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, \ 25 -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 }; \ 117 c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ 118 c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ 219 cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \ 255 v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64, \ 256 -cospi_16_64, 0, 0, 0, 0 }; \ 394 k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ 395 k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \ 396 k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ [all …]
|
D | itrans4_dspr2.c | 92 [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64), in vpx_idct4_rows_dspr2() 215 [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64), in vpx_idct4_columns_add_blk_dspr2()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | fwd_txfm_impl_sse2.h | 46 const __m128i k__cospi_A = octa_set_epi16(cospi_16_64, cospi_16_64, in FDCT4x4_2D() 47 cospi_16_64, cospi_16_64, in FDCT4x4_2D() 48 cospi_16_64, -cospi_16_64, in FDCT4x4_2D() 49 cospi_16_64, -cospi_16_64); in FDCT4x4_2D() 50 const __m128i k__cospi_B = octa_set_epi16(cospi_16_64, -cospi_16_64, in FDCT4x4_2D() 51 cospi_16_64, -cospi_16_64, in FDCT4x4_2D() 52 cospi_16_64, cospi_16_64, in FDCT4x4_2D() 53 cospi_16_64, cospi_16_64); in FDCT4x4_2D() 62 const __m128i k__cospi_E = octa_set_epi16(cospi_16_64, cospi_16_64, in FDCT4x4_2D() 63 cospi_16_64, cospi_16_64, in FDCT4x4_2D() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ |
D | fwd_txfm.c | 53 temp1 = (step[0] + step[1]) * cospi_16_64; in vpx_fdct4x4_c() 54 temp2 = (step[0] - step[1]) * cospi_16_64; in vpx_fdct4x4_c() 134 t0 = (x0 + x1) * cospi_16_64; in vpx_fdct8x8_c() 135 t1 = (x0 - x1) * cospi_16_64; in vpx_fdct8x8_c() 144 t0 = (s6 - s5) * cospi_16_64; in vpx_fdct8x8_c() 145 t1 = (s6 + s5) * cospi_16_64; in vpx_fdct8x8_c() 270 t0 = (x0 + x1) * cospi_16_64; in vpx_fdct16x16_c() 271 t1 = (x0 - x1) * cospi_16_64; in vpx_fdct16x16_c() 280 t0 = (s6 - s5) * cospi_16_64; in vpx_fdct16x16_c() 281 t1 = (s6 + s5) * cospi_16_64; in vpx_fdct16x16_c() [all …]
|
D | inv_txfm.c | 98 temp1 = (input[0] + input[2]) * cospi_16_64; in idct4_c() 99 temp2 = (input[0] - input[2]) * cospi_16_64; in idct4_c() 143 tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); in vpx_idct4x4_1_add_c() 144 out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); in vpx_idct4x4_1_add_c() 174 temp1 = (step1[0] + step1[2]) * cospi_16_64; in idct8_c() 175 temp2 = (step1[0] - step1[2]) * cospi_16_64; in idct8_c() 193 temp1 = (step2[6] - step2[5]) * cospi_16_64; in idct8_c() 194 temp2 = (step2[5] + step2[6]) * cospi_16_64; in idct8_c() 238 tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), 8); in vpx_idct8x8_1_add_c() 239 out = WRAPLOW(dct_const_round_shift(out * cospi_16_64), 8); in vpx_idct8x8_1_add_c() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_dct.c | 32 temp1 = (step[0] + step[1]) * cospi_16_64; in fdct4() 33 temp2 = (step[0] - step[1]) * cospi_16_64; in fdct4() 62 t0 = (x0 + x1) * cospi_16_64; in fdct8() 63 t1 = (x0 - x1) * cospi_16_64; in fdct8() 72 t0 = (s6 - s5) * cospi_16_64; in fdct8() 73 t1 = (s6 + s5) * cospi_16_64; in fdct8() 141 t0 = (x0 + x1) * cospi_16_64; in fdct16() 142 t1 = (x0 - x1) * cospi_16_64; in fdct16() 151 t0 = (s6 - s5) * cospi_16_64; in fdct16() 152 t1 = (s6 + s5) * cospi_16_64; in fdct16() [all …]
|