/external/libaom/libaom/aom_dsp/x86/ |
D | fwd_txfm_impl_sse2.h | 40 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64, in FDCT4x4_2D_HELPER() 41 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64); in FDCT4x4_2D_HELPER() 43 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64, in FDCT4x4_2D_HELPER() 44 cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64); in FDCT4x4_2D_HELPER() 52 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64, in FDCT4x4_2D_HELPER() 53 cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64); in FDCT4x4_2D_HELPER() 55 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64, in FDCT4x4_2D_HELPER() 56 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64); in FDCT4x4_2D_HELPER() 239 const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); in FDCT8x8_2D() 240 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in FDCT8x8_2D()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | idct32x32_34_add_neon.c | 96 s1[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64); in vpx_idct32_6_neon() 113 s1[5] = sub_multiply_shift_and_narrow_s16(s1[7], s1[4], cospi_16_64); in vpx_idct32_6_neon() 114 s1[6] = add_multiply_shift_and_narrow_s16(s1[4], s1[7], cospi_16_64); in vpx_idct32_6_neon() 146 s2[10] = sub_multiply_shift_and_narrow_s16(s2[14], s2[9], cospi_16_64); in vpx_idct32_6_neon() 147 s2[13] = add_multiply_shift_and_narrow_s16(s2[9], s2[14], cospi_16_64); in vpx_idct32_6_neon() 149 s2[11] = sub_multiply_shift_and_narrow_s16(s2[15], s2[8], cospi_16_64); in vpx_idct32_6_neon() 150 s2[12] = add_multiply_shift_and_narrow_s16(s2[8], s2[15], cospi_16_64); in vpx_idct32_6_neon() 188 s1[20] = sub_multiply_shift_and_narrow_s16(s3[27], s2[20], cospi_16_64); in vpx_idct32_6_neon() 189 s1[27] = add_multiply_shift_and_narrow_s16(s2[20], s3[27], cospi_16_64); in vpx_idct32_6_neon() 191 s1[21] = sub_multiply_shift_and_narrow_s16(s3[26], s2[21], cospi_16_64); in vpx_idct32_6_neon() [all …]
|
D | highbd_idct32x32_34_add_neon.c | 104 s1[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64); in vpx_highbd_idct32_6_neon() 121 s1[5] = sub_multiply_shift_and_narrow_s32_dual(s1[7], s1[4], cospi_16_64); in vpx_highbd_idct32_6_neon() 122 s1[6] = add_multiply_shift_and_narrow_s32_dual(s1[4], s1[7], cospi_16_64); in vpx_highbd_idct32_6_neon() 154 s2[10] = sub_multiply_shift_and_narrow_s32_dual(s2[14], s2[9], cospi_16_64); in vpx_highbd_idct32_6_neon() 155 s2[13] = add_multiply_shift_and_narrow_s32_dual(s2[9], s2[14], cospi_16_64); in vpx_highbd_idct32_6_neon() 157 s2[11] = sub_multiply_shift_and_narrow_s32_dual(s2[15], s2[8], cospi_16_64); in vpx_highbd_idct32_6_neon() 158 s2[12] = add_multiply_shift_and_narrow_s32_dual(s2[8], s2[15], cospi_16_64); in vpx_highbd_idct32_6_neon() 196 s1[20] = sub_multiply_shift_and_narrow_s32_dual(s3[27], s2[20], cospi_16_64); in vpx_highbd_idct32_6_neon() 197 s1[27] = add_multiply_shift_and_narrow_s32_dual(s2[20], s3[27], cospi_16_64); in vpx_highbd_idct32_6_neon() 199 s1[21] = sub_multiply_shift_and_narrow_s32_dual(s3[26], s2[21], cospi_16_64); in vpx_highbd_idct32_6_neon() [all …]
|
D | idct32x32_135_add_neon.c | 173 s4[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64); in vpx_idct32_12_neon() 210 s5[5] = sub_multiply_shift_and_narrow_s16(s3[7], s3[4], cospi_16_64); in vpx_idct32_12_neon() 211 s5[6] = add_multiply_shift_and_narrow_s16(s3[4], s3[7], cospi_16_64); in vpx_idct32_12_neon() 252 s6[10] = sub_multiply_shift_and_narrow_s16(s5[13], s5[10], cospi_16_64); in vpx_idct32_12_neon() 253 s6[13] = add_multiply_shift_and_narrow_s16(s5[10], s5[13], cospi_16_64); in vpx_idct32_12_neon() 255 s6[11] = sub_multiply_shift_and_narrow_s16(s5[12], s5[11], cospi_16_64); in vpx_idct32_12_neon() 256 s6[12] = add_multiply_shift_and_narrow_s16(s5[11], s5[12], cospi_16_64); in vpx_idct32_12_neon() 294 s7[20] = sub_multiply_shift_and_narrow_s16(s6[27], s6[20], cospi_16_64); in vpx_idct32_12_neon() 295 s7[27] = add_multiply_shift_and_narrow_s16(s6[20], s6[27], cospi_16_64); in vpx_idct32_12_neon() 297 s7[21] = sub_multiply_shift_and_narrow_s16(s6[26], s6[21], cospi_16_64); in vpx_idct32_12_neon() [all …]
|
D | highbd_idct32x32_135_add_neon.c | 184 s4[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64); in vpx_highbd_idct32_12_neon() 221 s5[5] = sub_multiply_shift_and_narrow_s32_dual(s3[7], s3[4], cospi_16_64); in vpx_highbd_idct32_12_neon() 222 s5[6] = add_multiply_shift_and_narrow_s32_dual(s3[4], s3[7], cospi_16_64); in vpx_highbd_idct32_12_neon() 263 s6[10] = sub_multiply_shift_and_narrow_s32_dual(s5[13], s5[10], cospi_16_64); in vpx_highbd_idct32_12_neon() 264 s6[13] = add_multiply_shift_and_narrow_s32_dual(s5[10], s5[13], cospi_16_64); in vpx_highbd_idct32_12_neon() 266 s6[11] = sub_multiply_shift_and_narrow_s32_dual(s5[12], s5[11], cospi_16_64); in vpx_highbd_idct32_12_neon() 267 s6[12] = add_multiply_shift_and_narrow_s32_dual(s5[11], s5[12], cospi_16_64); in vpx_highbd_idct32_12_neon() 305 s7[20] = sub_multiply_shift_and_narrow_s32_dual(s6[27], s6[20], cospi_16_64); in vpx_highbd_idct32_12_neon() 306 s7[27] = add_multiply_shift_and_narrow_s32_dual(s6[20], s6[27], cospi_16_64); in vpx_highbd_idct32_12_neon() 308 s7[21] = sub_multiply_shift_and_narrow_s32_dual(s6[26], s6[21], cospi_16_64); in vpx_highbd_idct32_12_neon() [all …]
|
D | idct4x4_1_add_neon.asm | 27 ; cospi_16_64 = 11585 30 ; out = dct_const_round_shift(input[0] * cospi_16_64) 31 mul r0, r0, r12 ; input[0] * cospi_16_64 35 ; out = dct_const_round_shift(out * cospi_16_64) 36 mul r0, r0, r12 ; out * cospi_16_64
|
D | fwd_txfm_neon.c | 60 v_t0_lo = vmulq_n_s32(v_t0_lo, cospi_16_64); in vpx_fdct8x8_neon() 61 v_t0_hi = vmulq_n_s32(v_t0_hi, cospi_16_64); in vpx_fdct8x8_neon() 62 v_t1_lo = vmulq_n_s32(v_t1_lo, cospi_16_64); in vpx_fdct8x8_neon() 63 v_t1_hi = vmulq_n_s32(v_t1_hi, cospi_16_64); in vpx_fdct8x8_neon() 81 v_t0_lo = vmull_n_s16(vget_low_s16(v_x0), cospi_16_64); in vpx_fdct8x8_neon() 82 v_t0_hi = vmull_n_s16(vget_high_s16(v_x0), cospi_16_64); in vpx_fdct8x8_neon() 83 v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), cospi_16_64); in vpx_fdct8x8_neon() 84 v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), cospi_16_64); in vpx_fdct8x8_neon()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | inv_txfm_sse2.h | 93 const __m128i cst = pair_set_epi16(cospi_16_64, cospi_16_64); in butterfly_cospi16() 259 butterfly(in[0], in[4], cospi_16_64, cospi_16_64, &step2[1], &step2[0]); in idct8() 272 butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]); in idct8() 287 const __m128i cp_16_16 = pair_set_epi16(cospi_16_64, cospi_16_64); in idct8x8_12_add_kernel_sse2() 288 const __m128i cp_16_n16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in idct8x8_12_add_kernel_sse2() 366 butterfly(in[0], in[8], cospi_16_64, cospi_16_64, &step2[1], &step2[0]); in idct16_8col() 386 butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]); in idct16_8col() 405 butterfly(step1[13], step1[10], cospi_16_64, cospi_16_64, &step2[10], in idct16_8col() 407 butterfly(step1[12], step1[11], cospi_16_64, cospi_16_64, &step2[11], in idct16_8col() 432 const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64); in idct16x16_10_pass1() [all …]
|
D | fwd_txfm_impl_sse2.h | 47 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64, in FDCT4x4_2D() 48 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64); in FDCT4x4_2D() 50 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64, in FDCT4x4_2D() 51 cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64); in FDCT4x4_2D() 59 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64, in FDCT4x4_2D() 60 cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64); in FDCT4x4_2D() 62 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64, in FDCT4x4_2D() 63 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64); in FDCT4x4_2D() 264 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); in FDCT8x8_2D() 265 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in FDCT8x8_2D() [all …]
|
D | highbd_idct32x32_add_sse4.c | 48 highbd_butterfly_sse4_1(step1[13], step1[10], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_2_stage_4_to_6() 50 highbd_butterfly_sse4_1(step1[12], step1[11], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_2_stage_4_to_6() 121 highbd_butterfly_sse4_1(step2[27], step2[20], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_3_4_stage_4_to_7() 123 highbd_butterfly_sse4_1(step2[26], step2[21], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_3_4_stage_4_to_7() 125 highbd_butterfly_sse4_1(step2[25], step2[22], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_3_4_stage_4_to_7() 127 highbd_butterfly_sse4_1(step2[24], step2[23], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_3_4_stage_4_to_7() 155 highbd_butterfly_sse4_1(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1], in highbd_idct32_1024_4x32_quarter_1() 170 highbd_butterfly_sse4_1(step2[6], step2[5], cospi_16_64, cospi_16_64, in highbd_idct32_1024_4x32_quarter_1() 379 highbd_partial_butterfly_sse4_1(in[0], cospi_16_64, cospi_16_64, &step2[1], in highbd_idct32_135_4x32_quarter_1() 394 highbd_butterfly_sse4_1(step2[6], step2[5], cospi_16_64, cospi_16_64, in highbd_idct32_135_4x32_quarter_1() [all …]
|
D | highbd_idct32x32_add_sse2.c | 44 highbd_butterfly_sse2(step1[13], step1[10], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_2_stage_4_to_6() 46 highbd_butterfly_sse2(step1[12], step1[11], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_2_stage_4_to_6() 117 highbd_butterfly_sse2(step2[27], step2[20], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_3_4_stage_4_to_7() 119 highbd_butterfly_sse2(step2[26], step2[21], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_3_4_stage_4_to_7() 121 highbd_butterfly_sse2(step2[25], step2[22], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_3_4_stage_4_to_7() 123 highbd_butterfly_sse2(step2[24], step2[23], cospi_16_64, cospi_16_64, in highbd_idct32_4x32_quarter_3_4_stage_4_to_7() 151 highbd_butterfly_sse2(in[0], in[16], cospi_16_64, cospi_16_64, &step2[1], in highbd_idct32_1024_4x32_quarter_1() 166 highbd_butterfly_sse2(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], in highbd_idct32_1024_4x32_quarter_1() 375 highbd_partial_butterfly_sse2(in[0], cospi_16_64, cospi_16_64, &step2[1], in highbd_idct32_135_4x32_quarter_1() 390 highbd_butterfly_sse2(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], in highbd_idct32_135_4x32_quarter_1() [all …]
|
D | inv_txfm_ssse3.h | 25 const __m128i cp_16_16 = _mm_set1_epi16(cospi_16_64); in idct8x8_12_add_kernel_ssse3() 26 const __m128i cp_16_n16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in idct8x8_12_add_kernel_ssse3() 27 const __m128i cospi_16_64d = _mm_set1_epi16((int16_t)(2 * cospi_16_64)); in idct8x8_12_add_kernel_ssse3() 95 butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]); in idct8x8_12_add_kernel_ssse3()
|
D | highbd_inv_txfm_sse4.h | 70 *out0 = multiplication_round_shift_sse4_1(temp1, cospi_16_64); in highbd_butterfly_cospi16_sse4_1() 73 *out1 = multiplication_round_shift_sse4_1(temp1, cospi_16_64); in highbd_butterfly_cospi16_sse4_1() 95 step[0] = multiplication_round_shift_sse4_1(temp, cospi_16_64); in highbd_idct4_sse4_1() 98 step[1] = multiplication_round_shift_sse4_1(temp, cospi_16_64); in highbd_idct4_sse4_1()
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_dct32x32_msa.c | 78 DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0); in fdct8x32_1d_column_even_store() 89 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6); in fdct8x32_1d_column_even_store() 102 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5); in fdct8x32_1d_column_even_store() 103 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4); in fdct8x32_1d_column_even_store() 142 DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27); in fdct8x32_1d_column_odd_store() 143 DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26); in fdct8x32_1d_column_odd_store() 165 DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25); in fdct8x32_1d_column_odd_store() 166 DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24); in fdct8x32_1d_column_odd_store() 327 DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64, cospi_16_64, in fdct8x32_1d_row_even_4x() 347 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6); in fdct8x32_1d_row_even_4x() [all …]
|
D | itrans16_dspr2.c | 76 [cospi_16_64] "r"(cospi_16_64)); in idct16_rows_dspr2() 262 [cospi_16_64] "r"(cospi_16_64)); in idct16_rows_dspr2() 311 [step2_11] "r"(step2_11), [cospi_16_64] "r"(cospi_16_64)); in idct16_rows_dspr2() 461 [cospi_16_64] "r"(cospi_16_64)); in idct16_cols_add_blk_dspr2() 648 [cospi_16_64] "r"(cospi_16_64)); in idct16_cols_add_blk_dspr2() 697 [step2_11] "r"(step2_11), [cospi_16_64] "r"(cospi_16_64)); in idct16_cols_add_blk_dspr2() 1194 s2 = (-cospi_16_64) * (x2 + x3); in iadst16_dspr2() 1195 s3 = cospi_16_64 * (x2 - x3); in iadst16_dspr2() 1196 s6 = cospi_16_64 * (x6 + x7); in iadst16_dspr2() 1197 s7 = cospi_16_64 * (-x6 + x7); in iadst16_dspr2() [all …]
|
D | idct16x16_msa.c | 31 DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3); in vpx_idct16_1d_rows_msa() 32 DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8); in vpx_idct16_1d_rows_msa() 80 DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11); in vpx_idct16_1d_rows_msa() 86 DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13); in vpx_idct16_1d_rows_msa() 123 DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3); in vpx_idct16_1d_columns_addblk_msa() 124 DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8); in vpx_idct16_1d_columns_addblk_msa() 177 DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11); in vpx_idct16_1d_columns_addblk_msa() 183 DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13); in vpx_idct16_1d_columns_addblk_msa() 270 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); in vpx_idct16x16_1_add_msa() 271 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); in vpx_idct16x16_1_add_msa() [all …]
|
D | idct32x32_msa.c | 54 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_even_process_store() 59 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); in idct32x8_row_even_process_store() 96 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); in idct32x8_row_even_process_store() 97 DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4); in idct32x8_row_even_process_store() 219 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); in idct32x8_row_odd_process_store() 222 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_odd_process_store() 233 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); in idct32x8_row_odd_process_store() 236 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_odd_process_store() 365 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct8x32_column_even_process_store() 370 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); in idct8x32_column_even_process_store() [all …]
|
D | idct8x8_msa.c | 67 k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); in vpx_idct8x8_12_add_msa() 68 k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); in vpx_idct8x8_12_add_msa() 80 k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); in vpx_idct8x8_12_add_msa() 109 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); in vpx_idct8x8_1_add_msa() 110 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); in vpx_idct8x8_1_add_msa()
|
D | itrans8_dspr2.c | 184 [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64), in idct8_rows_dspr2() 444 [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64), in idct8_columns_add_blk_dspr2() 671 s2 = cospi_16_64 * (x2 + x3); in iadst8_dspr2() 672 s3 = cospi_16_64 * (x2 - x3); in iadst8_dspr2() 673 s6 = cospi_16_64 * (x6 + x7); in iadst8_dspr2() 674 s7 = cospi_16_64 * (x6 - x7); in iadst8_dspr2()
|
D | inv_txfm_msa.h | 25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \ 117 c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ 118 c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ 221 cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \ 258 -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0 \ 395 k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \ 396 k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \ 397 k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \ 398 k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \
|
/external/libvpx/libvpx/vpx_dsp/ |
D | fwd_txfm.c | 56 temp1 = (step[0] + step[1]) * cospi_16_64; in vpx_fdct4x4_c() 57 temp2 = (step[0] - step[1]) * cospi_16_64; in vpx_fdct4x4_c() 132 t0 = (x0 + x1) * cospi_16_64; in vpx_fdct8x8_c() 133 t1 = (x0 - x1) * cospi_16_64; in vpx_fdct8x8_c() 142 t0 = (s6 - s5) * cospi_16_64; in vpx_fdct8x8_c() 143 t1 = (s6 + s5) * cospi_16_64; in vpx_fdct8x8_c() 266 t0 = (x0 + x1) * cospi_16_64; in vpx_fdct16x16_c() 267 t1 = (x0 - x1) * cospi_16_64; in vpx_fdct16x16_c() 276 t0 = (s6 - s5) * cospi_16_64; in vpx_fdct16x16_c() 277 t1 = (s6 + s5) * cospi_16_64; in vpx_fdct16x16_c() [all …]
|
D | inv_txfm.c | 138 temp1 = ((int16_t)input[0] + (int16_t)input[2]) * cospi_16_64; in idct4_c() 139 temp2 = ((int16_t)input[0] - (int16_t)input[2]) * cospi_16_64; in idct4_c() 182 WRAPLOW(dct_const_round_shift((int16_t)input[0] * cospi_16_64)); in vpx_idct4x4_1_add_c() 184 out = WRAPLOW(dct_const_round_shift(out * cospi_16_64)); in vpx_idct4x4_1_add_c() 251 s2 = (int)(cospi_16_64 * (x2 + x3)); in iadst8_c() 252 s3 = (int)(cospi_16_64 * (x2 - x3)); in iadst8_c() 253 s6 = (int)(cospi_16_64 * (x6 + x7)); in iadst8_c() 254 s7 = (int)(cospi_16_64 * (x6 - x7)); in iadst8_c() 290 temp1 = (step1[0] + step1[2]) * cospi_16_64; in idct8_c() 291 temp2 = (step1[0] - step1[2]) * cospi_16_64; in idct8_c() [all …]
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_dct.c | 32 temp1 = (step[0] + step[1]) * cospi_16_64; in fdct4() 33 temp2 = (step[0] - step[1]) * cospi_16_64; in fdct4() 62 t0 = (x0 + x1) * cospi_16_64; in fdct8() 63 t1 = (x0 - x1) * cospi_16_64; in fdct8() 72 t0 = (s6 - s5) * cospi_16_64; in fdct8() 73 t1 = (s6 + s5) * cospi_16_64; in fdct8() 141 t0 = (x0 + x1) * cospi_16_64; in fdct16() 142 t1 = (x0 - x1) * cospi_16_64; in fdct16() 151 t0 = (s6 - s5) * cospi_16_64; in fdct16() 152 t1 = (s6 + s5) * cospi_16_64; in fdct16() [all …]
|
/external/libaom/libaom/aom_dsp/ |
D | fwd_txfm.c | 55 temp1 = (step[0] + step[1]) * cospi_16_64; in aom_fdct4x4_c() 56 temp2 = (step[0] - step[1]) * cospi_16_64; in aom_fdct4x4_c() 117 temp1 = (step[0] + step[1]) * (int32_t)cospi_16_64; in aom_fdct4x4_lp_c() 118 temp2 = (step[0] - step[1]) * (int32_t)cospi_16_64; in aom_fdct4x4_lp_c() 182 t0 = (x0 + x1) * cospi_16_64; in aom_fdct8x8_c() 183 t1 = (x0 - x1) * cospi_16_64; in aom_fdct8x8_c() 192 t0 = (s6 - s5) * cospi_16_64; in aom_fdct8x8_c() 193 t1 = (s6 + s5) * cospi_16_64; in aom_fdct8x8_c()
|
/external/libvpx/config/arm-neon/vpx_dsp/arm/ |
D | idct4x4_1_add_neon.asm.S | 33 @ cospi_16_64 = 11585 36 @ out = dct_const_round_shift(input[0] * cospi_16_64) 37 mul r0, r0, r12 @ input[0] * cospi_16_64 41 @ out = dct_const_round_shift(out * cospi_16_64) 42 mul r0, r0, r12 @ out * cospi_16_64
|