Home
last modified time | relevance | path

Searched refs:cospi_16_64 (Results 1 – 25 of 48) sorted by relevance

12

/external/libvpx/libvpx/vpx_dsp/arm/
Didct32x32_34_add_neon.c96 s1[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64); in vpx_idct32_6_neon()
113 s1[5] = sub_multiply_shift_and_narrow_s16(s1[7], s1[4], cospi_16_64); in vpx_idct32_6_neon()
114 s1[6] = add_multiply_shift_and_narrow_s16(s1[4], s1[7], cospi_16_64); in vpx_idct32_6_neon()
146 s2[10] = sub_multiply_shift_and_narrow_s16(s2[14], s2[9], cospi_16_64); in vpx_idct32_6_neon()
147 s2[13] = add_multiply_shift_and_narrow_s16(s2[9], s2[14], cospi_16_64); in vpx_idct32_6_neon()
149 s2[11] = sub_multiply_shift_and_narrow_s16(s2[15], s2[8], cospi_16_64); in vpx_idct32_6_neon()
150 s2[12] = add_multiply_shift_and_narrow_s16(s2[8], s2[15], cospi_16_64); in vpx_idct32_6_neon()
188 s1[20] = sub_multiply_shift_and_narrow_s16(s3[27], s2[20], cospi_16_64); in vpx_idct32_6_neon()
189 s1[27] = add_multiply_shift_and_narrow_s16(s2[20], s3[27], cospi_16_64); in vpx_idct32_6_neon()
191 s1[21] = sub_multiply_shift_and_narrow_s16(s3[26], s2[21], cospi_16_64); in vpx_idct32_6_neon()
[all …]
Dhighbd_idct32x32_34_add_neon.c103 s1[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64); in vpx_highbd_idct32_6_neon()
120 s1[5] = sub_multiply_shift_and_narrow_s32_dual(s1[7], s1[4], cospi_16_64); in vpx_highbd_idct32_6_neon()
121 s1[6] = add_multiply_shift_and_narrow_s32_dual(s1[4], s1[7], cospi_16_64); in vpx_highbd_idct32_6_neon()
153 s2[10] = sub_multiply_shift_and_narrow_s32_dual(s2[14], s2[9], cospi_16_64); in vpx_highbd_idct32_6_neon()
154 s2[13] = add_multiply_shift_and_narrow_s32_dual(s2[9], s2[14], cospi_16_64); in vpx_highbd_idct32_6_neon()
156 s2[11] = sub_multiply_shift_and_narrow_s32_dual(s2[15], s2[8], cospi_16_64); in vpx_highbd_idct32_6_neon()
157 s2[12] = add_multiply_shift_and_narrow_s32_dual(s2[8], s2[15], cospi_16_64); in vpx_highbd_idct32_6_neon()
195 s1[20] = sub_multiply_shift_and_narrow_s32_dual(s3[27], s2[20], cospi_16_64); in vpx_highbd_idct32_6_neon()
196 s1[27] = add_multiply_shift_and_narrow_s32_dual(s2[20], s3[27], cospi_16_64); in vpx_highbd_idct32_6_neon()
198 s1[21] = sub_multiply_shift_and_narrow_s32_dual(s3[26], s2[21], cospi_16_64); in vpx_highbd_idct32_6_neon()
[all …]
Didct32x32_135_add_neon.c173 s4[0] = multiply_shift_and_narrow_s16(in[0], cospi_16_64); in vpx_idct32_12_neon()
210 s5[5] = sub_multiply_shift_and_narrow_s16(s3[7], s3[4], cospi_16_64); in vpx_idct32_12_neon()
211 s5[6] = add_multiply_shift_and_narrow_s16(s3[4], s3[7], cospi_16_64); in vpx_idct32_12_neon()
252 s6[10] = sub_multiply_shift_and_narrow_s16(s5[13], s5[10], cospi_16_64); in vpx_idct32_12_neon()
253 s6[13] = add_multiply_shift_and_narrow_s16(s5[10], s5[13], cospi_16_64); in vpx_idct32_12_neon()
255 s6[11] = sub_multiply_shift_and_narrow_s16(s5[12], s5[11], cospi_16_64); in vpx_idct32_12_neon()
256 s6[12] = add_multiply_shift_and_narrow_s16(s5[11], s5[12], cospi_16_64); in vpx_idct32_12_neon()
294 s7[20] = sub_multiply_shift_and_narrow_s16(s6[27], s6[20], cospi_16_64); in vpx_idct32_12_neon()
295 s7[27] = add_multiply_shift_and_narrow_s16(s6[20], s6[27], cospi_16_64); in vpx_idct32_12_neon()
297 s7[21] = sub_multiply_shift_and_narrow_s16(s6[26], s6[21], cospi_16_64); in vpx_idct32_12_neon()
[all …]
Dhighbd_idct32x32_135_add_neon.c183 s4[0] = multiply_shift_and_narrow_s32_dual(in[0], cospi_16_64); in vpx_highbd_idct32_12_neon()
220 s5[5] = sub_multiply_shift_and_narrow_s32_dual(s3[7], s3[4], cospi_16_64); in vpx_highbd_idct32_12_neon()
221 s5[6] = add_multiply_shift_and_narrow_s32_dual(s3[4], s3[7], cospi_16_64); in vpx_highbd_idct32_12_neon()
262 s6[10] = sub_multiply_shift_and_narrow_s32_dual(s5[13], s5[10], cospi_16_64); in vpx_highbd_idct32_12_neon()
263 s6[13] = add_multiply_shift_and_narrow_s32_dual(s5[10], s5[13], cospi_16_64); in vpx_highbd_idct32_12_neon()
265 s6[11] = sub_multiply_shift_and_narrow_s32_dual(s5[12], s5[11], cospi_16_64); in vpx_highbd_idct32_12_neon()
266 s6[12] = add_multiply_shift_and_narrow_s32_dual(s5[11], s5[12], cospi_16_64); in vpx_highbd_idct32_12_neon()
304 s7[20] = sub_multiply_shift_and_narrow_s32_dual(s6[27], s6[20], cospi_16_64); in vpx_highbd_idct32_12_neon()
305 s7[27] = add_multiply_shift_and_narrow_s32_dual(s6[20], s6[27], cospi_16_64); in vpx_highbd_idct32_12_neon()
307 s7[21] = sub_multiply_shift_and_narrow_s32_dual(s6[26], s6[21], cospi_16_64); in vpx_highbd_idct32_12_neon()
[all …]
Didct4x4_1_add_neon.asm27 ; cospi_16_64 = 11585
30 ; out = dct_const_round_shift(input[0] * cospi_16_64)
31 mul r0, r0, r12 ; input[0] * cospi_16_64
35 ; out = dct_const_round_shift(out * cospi_16_64)
36 mul r0, r0, r12 ; out * cospi_16_64
Dfwd_txfm_neon.c59 v_t0_lo = vmulq_n_s32(v_t0_lo, (int32_t)cospi_16_64); in vpx_fdct8x8_neon()
60 v_t0_hi = vmulq_n_s32(v_t0_hi, (int32_t)cospi_16_64); in vpx_fdct8x8_neon()
61 v_t1_lo = vmulq_n_s32(v_t1_lo, (int32_t)cospi_16_64); in vpx_fdct8x8_neon()
62 v_t1_hi = vmulq_n_s32(v_t1_hi, (int32_t)cospi_16_64); in vpx_fdct8x8_neon()
80 v_t0_lo = vmull_n_s16(vget_low_s16(v_x0), (int16_t)cospi_16_64); in vpx_fdct8x8_neon()
81 v_t0_hi = vmull_n_s16(vget_high_s16(v_x0), (int16_t)cospi_16_64); in vpx_fdct8x8_neon()
82 v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_16_64); in vpx_fdct8x8_neon()
83 v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_16_64); in vpx_fdct8x8_neon()
Dhighbd_idct32x32_1024_add_neon.c502 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]); in vpx_highbd_idct32_32_neon()
504 do_butterfly(q[8], q[11], cospi_16_64, cospi_16_64, &q[13], &q[14]); in vpx_highbd_idct32_32_neon()
527 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]); in vpx_highbd_idct32_32_neon()
529 do_butterfly(q[0], q[1], cospi_16_64, cospi_16_64, &q[1], &q[0]); in vpx_highbd_idct32_32_neon()
572 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); in vpx_highbd_idct32_32_neon()
576 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); in vpx_highbd_idct32_32_neon()
594 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); in vpx_highbd_idct32_32_neon()
599 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[5], &q[7]); in vpx_highbd_idct32_32_neon()
Didct4x4_add_neon.asm43 ; cospi_16_64 = 11585
58 vdup.16 d21, r3 ; replicate cospi_16_64
78 ; (input[0] + input[2]) * cospi_16_64;
79 ; (input[0] - input[2]) * cospi_16_64;
131 ; (input[0] + input[2]) * cospi_16_64;
132 ; (input[0] - input[2]) * cospi_16_64;
Didct32x32_add_neon.c632 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]); in vpx_idct32_32_neon()
634 do_butterfly(q[8], q[11], cospi_16_64, cospi_16_64, &q[13], &q[14]); in vpx_idct32_32_neon()
657 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]); in vpx_idct32_32_neon()
659 do_butterfly(q[0], q[1], cospi_16_64, cospi_16_64, &q[1], &q[0]); in vpx_idct32_32_neon()
702 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); in vpx_idct32_32_neon()
706 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); in vpx_idct32_32_neon()
724 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]); in vpx_idct32_32_neon()
729 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[5], &q[7]); in vpx_idct32_32_neon()
/external/libvpx/libvpx/vpx_dsp/mips/
Dfwd_dct32x32_msa.c77 DOTP_CONST_PAIR(temp0, in4, cospi_16_64, cospi_16_64, temp1, temp0); in fdct8x32_1d_column_even_store()
88 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6); in fdct8x32_1d_column_even_store()
101 DOTP_CONST_PAIR(in13, in10, cospi_16_64, cospi_16_64, vec2, vec5); in fdct8x32_1d_column_even_store()
102 DOTP_CONST_PAIR(in12, in11, cospi_16_64, cospi_16_64, vec3, vec4); in fdct8x32_1d_column_even_store()
141 DOTP_CONST_PAIR(in27, in20, cospi_16_64, cospi_16_64, in20, in27); in fdct8x32_1d_column_odd_store()
142 DOTP_CONST_PAIR(in26, in21, cospi_16_64, cospi_16_64, in21, in26); in fdct8x32_1d_column_odd_store()
164 DOTP_CONST_PAIR(in25, in22, cospi_16_64, cospi_16_64, in22, in25); in fdct8x32_1d_column_odd_store()
165 DOTP_CONST_PAIR(in24, in23, cospi_16_64, cospi_16_64, in23, in24); in fdct8x32_1d_column_odd_store()
326 DOTP_CONST_PAIR_W(vec4_r, vec6_r, tmp3_w, vec3_r, cospi_16_64, cospi_16_64, in fdct8x32_1d_row_even_4x()
346 DOTP_CONST_PAIR(vec6, vec5, cospi_16_64, cospi_16_64, vec5, vec6); in fdct8x32_1d_row_even_4x()
[all …]
Ditrans16_dspr2.c76 [cospi_16_64] "r"(cospi_16_64)); in idct16_rows_dspr2()
262 [cospi_16_64] "r"(cospi_16_64)); in idct16_rows_dspr2()
311 [step2_11] "r"(step2_11), [cospi_16_64] "r"(cospi_16_64)); in idct16_rows_dspr2()
461 [cospi_16_64] "r"(cospi_16_64)); in idct16_cols_add_blk_dspr2()
648 [cospi_16_64] "r"(cospi_16_64)); in idct16_cols_add_blk_dspr2()
697 [step2_11] "r"(step2_11), [cospi_16_64] "r"(cospi_16_64)); in idct16_cols_add_blk_dspr2()
1194 s2 = (-cospi_16_64) * (x2 + x3); in iadst16_dspr2()
1195 s3 = cospi_16_64 * (x2 - x3); in iadst16_dspr2()
1196 s6 = cospi_16_64 * (x6 + x7); in iadst16_dspr2()
1197 s7 = cospi_16_64 * (-x6 + x7); in iadst16_dspr2()
[all …]
Didct16x16_msa.c30 DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3); in vpx_idct16_1d_rows_msa()
31 DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8); in vpx_idct16_1d_rows_msa()
79 DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11); in vpx_idct16_1d_rows_msa()
85 DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13); in vpx_idct16_1d_rows_msa()
122 DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3); in vpx_idct16_1d_columns_addblk_msa()
123 DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8); in vpx_idct16_1d_columns_addblk_msa()
176 DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11); in vpx_idct16_1d_columns_addblk_msa()
182 DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13); in vpx_idct16_1d_columns_addblk_msa()
269 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); in vpx_idct16x16_1_add_msa()
270 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); in vpx_idct16x16_1_add_msa()
[all …]
Didct32x32_msa.c53 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_even_process_store()
58 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); in idct32x8_row_even_process_store()
95 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1); in idct32x8_row_even_process_store()
96 DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4); in idct32x8_row_even_process_store()
218 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); in idct32x8_row_odd_process_store()
221 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_odd_process_store()
232 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1); in idct32x8_row_odd_process_store()
235 DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct32x8_row_odd_process_store()
364 DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3); in idct8x32_column_even_process_store()
369 DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4); in idct8x32_column_even_process_store()
[all …]
Didct8x8_msa.c66 k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); in vpx_idct8x8_12_add_msa()
67 k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); in vpx_idct8x8_12_add_msa()
79 k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); in vpx_idct8x8_12_add_msa()
108 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS); in vpx_idct8x8_1_add_msa()
109 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS); in vpx_idct8x8_1_add_msa()
Ditrans8_dspr2.c184 [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64), in idct8_rows_dspr2()
444 [cospi_16_64] "r"(cospi_16_64), [cospi_28_64] "r"(cospi_28_64), in idct8_columns_add_blk_dspr2()
671 s2 = cospi_16_64 * (x2 + x3); in iadst8_dspr2()
672 s3 = cospi_16_64 * (x2 - x3); in iadst8_dspr2()
673 s6 = cospi_16_64 * (x6 + x7); in iadst8_dspr2()
674 s7 = cospi_16_64 * (x6 - x7); in iadst8_dspr2()
Dinv_txfm_msa.h25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
117 c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
118 c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
221 cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \
258 -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0 \
395 k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
396 k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \
397 k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
398 k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \
Ditrans32_cols_dspr2.c445 [step2_15] "r"(step2_15), [cospi_16_64] "r"(cospi_16_64)); in vpx_idct32_cols_add_blk_dspr2()
625 [cospi_16_64] "r"(cospi_16_64)); in vpx_idct32_cols_add_blk_dspr2()
684 [cospi_16_64] "r"(cospi_16_64)); in vpx_idct32_cols_add_blk_dspr2()
775 [step2_26] "r"(step2_26), [cospi_16_64] "r"(cospi_16_64)); in vpx_idct32_cols_add_blk_dspr2()
808 [step2_24] "r"(step2_24), [cospi_16_64] "r"(cospi_16_64)); in vpx_idct32_cols_add_blk_dspr2()
Ditrans4_dspr2.c90 [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), in vpx_idct4_rows_dspr2()
212 [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), in vpx_idct4_columns_add_blk_dspr2()
Ditrans32_dspr2.c489 [step2_15] "r"(step2_15), [cospi_16_64] "r"(cospi_16_64)); in idct32_rows_dspr2()
669 [cospi_16_64] "r"(cospi_16_64)); in idct32_rows_dspr2()
728 [cospi_16_64] "r"(cospi_16_64)); in idct32_rows_dspr2()
819 [step2_26] "r"(step2_26), [cospi_16_64] "r"(cospi_16_64)); in idct32_rows_dspr2()
852 [step2_24] "r"(step2_24), [cospi_16_64] "r"(cospi_16_64)); in idct32_rows_dspr2()
Dinv_txfm_dspr2.h56 [cospi_16_64] "r"(cospi_16_64)); \
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_impl_sse2.h47 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64, in FDCT4x4_2D()
48 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64); in FDCT4x4_2D()
50 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64, in FDCT4x4_2D()
51 cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64); in FDCT4x4_2D()
59 octa_set_epi16(cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64, in FDCT4x4_2D()
60 cospi_16_64, cospi_16_64, cospi_16_64, cospi_16_64); in FDCT4x4_2D()
62 octa_set_epi16(cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64, in FDCT4x4_2D()
63 cospi_16_64, -cospi_16_64, cospi_16_64, -cospi_16_64); in FDCT4x4_2D()
264 const __m128i k__cospi_p16_p16 = _mm_set1_epi16((int16_t)cospi_16_64); in FDCT8x8_2D()
265 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in FDCT8x8_2D()
[all …]
Dinv_txfm_ssse3.c26 const __m128i stk2_0 = pair_set_epi16(cospi_16_64, cospi_16_64); in vpx_idct8x8_64_add_ssse3()
27 const __m128i stk2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); in vpx_idct8x8_64_add_ssse3()
228 const __m128i stg2_0 = pair_set_epi16(2 * cospi_16_64, 2 * cospi_16_64); in vpx_idct8x8_12_add_ssse3()
229 const __m128i stk2_0 = pair_set_epi16(cospi_16_64, cospi_16_64); in vpx_idct8x8_12_add_ssse3()
230 const __m128i stk2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); in vpx_idct8x8_12_add_ssse3()
233 const __m128i stg3_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); in vpx_idct8x8_12_add_ssse3()
444 const __m128i stg4_0 = pair_set_epi16(cospi_16_64, cospi_16_64); in idct32_34_first_half()
445 const __m128i stk4_0 = pair_set_epi16(2 * cospi_16_64, 2 * cospi_16_64); in idct32_34_first_half()
446 const __m128i stg4_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); in idct32_34_first_half()
451 const __m128i stg6_0 = pair_set_epi16(-cospi_16_64, cospi_16_64); in idct32_34_first_half()
[all …]
/external/libvpx/libvpx/vp9/encoder/
Dvp9_dct.c32 temp1 = (step[0] + step[1]) * cospi_16_64; in fdct4()
33 temp2 = (step[0] - step[1]) * cospi_16_64; in fdct4()
62 t0 = (x0 + x1) * cospi_16_64; in fdct8()
63 t1 = (x0 - x1) * cospi_16_64; in fdct8()
72 t0 = (s6 - s5) * cospi_16_64; in fdct8()
73 t1 = (s6 + s5) * cospi_16_64; in fdct8()
141 t0 = (x0 + x1) * cospi_16_64; in fdct16()
142 t1 = (x0 - x1) * cospi_16_64; in fdct16()
151 t0 = (s6 - s5) * cospi_16_64; in fdct16()
152 t1 = (s6 + s5) * cospi_16_64; in fdct16()
[all …]
/external/libvpx/libvpx/vpx_dsp/
Dfwd_txfm.c56 temp1 = (step[0] + step[1]) * cospi_16_64; in vpx_fdct4x4_c()
57 temp2 = (step[0] - step[1]) * cospi_16_64; in vpx_fdct4x4_c()
132 t0 = (x0 + x1) * cospi_16_64; in vpx_fdct8x8_c()
133 t1 = (x0 - x1) * cospi_16_64; in vpx_fdct8x8_c()
142 t0 = (s6 - s5) * cospi_16_64; in vpx_fdct8x8_c()
143 t1 = (s6 + s5) * cospi_16_64; in vpx_fdct8x8_c()
266 t0 = (x0 + x1) * cospi_16_64; in vpx_fdct16x16_c()
267 t1 = (x0 - x1) * cospi_16_64; in vpx_fdct16x16_c()
276 t0 = (s6 - s5) * cospi_16_64; in vpx_fdct16x16_c()
277 t1 = (s6 + s5) * cospi_16_64; in vpx_fdct16x16_c()
[all …]
Dinv_txfm.c137 temp1 = (input[0] + input[2]) * cospi_16_64; in idct4_c()
138 temp2 = (input[0] - input[2]) * cospi_16_64; in idct4_c()
180 tran_low_t out = WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64)); in vpx_idct4x4_1_add_c()
182 out = WRAPLOW(dct_const_round_shift(out * cospi_16_64)); in vpx_idct4x4_1_add_c()
249 s2 = (int)(cospi_16_64 * (x2 + x3)); in iadst8_c()
250 s3 = (int)(cospi_16_64 * (x2 - x3)); in iadst8_c()
251 s6 = (int)(cospi_16_64 * (x6 + x7)); in iadst8_c()
252 s7 = (int)(cospi_16_64 * (x6 - x7)); in iadst8_c()
288 temp1 = (step1[0] + step1[2]) * cospi_16_64; in idct8_c()
289 temp2 = (step1[0] - step1[2]) * cospi_16_64; in idct8_c()
[all …]

12