Home
last modified time | relevance | path

Searched refs:cospi_8_64 (Results 1 – 25 of 35) sorted by relevance

12

/external/libvpx/libvpx/vpx_dsp/arm/
Didct32x32_34_add_neon.c98 s2[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15], in vpx_idct32_6_neon()
101 cospi_8_64); in vpx_idct32_6_neon()
116 s1[18] = multiply_accumulate_shift_and_narrow_s16(s1[17], -cospi_8_64, s1[30], in vpx_idct32_6_neon()
119 cospi_8_64); in vpx_idct32_6_neon()
121 s1[19] = multiply_accumulate_shift_and_narrow_s16(s1[16], -cospi_8_64, s1[31], in vpx_idct32_6_neon()
124 cospi_8_64); in vpx_idct32_6_neon()
127 s2[27], -cospi_8_64); in vpx_idct32_6_neon()
128 s1[27] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_8_64, s2[27], in vpx_idct32_6_neon()
132 s2[26], -cospi_8_64); in vpx_idct32_6_neon()
133 s1[26] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_8_64, s2[26], in vpx_idct32_6_neon()
[all …]
Didct32x32_135_add_neon.c175 s4[3] = multiply_shift_and_narrow_s16(in[8], cospi_8_64); in vpx_idct32_12_neon()
177 s4[9] = multiply_accumulate_shift_and_narrow_s16(s2[8], -cospi_8_64, s2[15], in vpx_idct32_12_neon()
180 cospi_8_64); in vpx_idct32_12_neon()
183 s3[13], -cospi_8_64); in vpx_idct32_12_neon()
184 s4[13] = multiply_accumulate_shift_and_narrow_s16(s3[10], -cospi_8_64, s3[13], in vpx_idct32_12_neon()
222 s5[18] = multiply_accumulate_shift_and_narrow_s16(s4[18], -cospi_8_64, s4[29], in vpx_idct32_12_neon()
225 cospi_8_64); in vpx_idct32_12_neon()
227 s5[19] = multiply_accumulate_shift_and_narrow_s16(s4[19], -cospi_8_64, s4[28], in vpx_idct32_12_neon()
230 cospi_8_64); in vpx_idct32_12_neon()
233 s4[27], -cospi_8_64); in vpx_idct32_12_neon()
[all …]
Dhighbd_idct32x32_34_add_neon.c105 s2[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64, in vpx_highbd_idct32_6_neon()
108 s2[15], cospi_8_64); in vpx_highbd_idct32_6_neon()
123 s1[18] = multiply_accumulate_shift_and_narrow_s32_dual(s1[17], -cospi_8_64, in vpx_highbd_idct32_6_neon()
126 s1[30], cospi_8_64); in vpx_highbd_idct32_6_neon()
128 s1[19] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], -cospi_8_64, in vpx_highbd_idct32_6_neon()
131 s1[31], cospi_8_64); in vpx_highbd_idct32_6_neon()
134 s2[27], -cospi_8_64); in vpx_highbd_idct32_6_neon()
135 s1[27] = multiply_accumulate_shift_and_narrow_s32_dual(s2[20], -cospi_8_64, in vpx_highbd_idct32_6_neon()
139 s2[26], -cospi_8_64); in vpx_highbd_idct32_6_neon()
140 s1[26] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_8_64, in vpx_highbd_idct32_6_neon()
[all …]
Dhighbd_idct32x32_135_add_neon.c185 s4[3] = multiply_shift_and_narrow_s32_dual(in[8], cospi_8_64); in vpx_highbd_idct32_12_neon()
187 s4[9] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], -cospi_8_64, in vpx_highbd_idct32_12_neon()
190 s2[15], cospi_8_64); in vpx_highbd_idct32_12_neon()
193 s3[13], -cospi_8_64); in vpx_highbd_idct32_12_neon()
194 s4[13] = multiply_accumulate_shift_and_narrow_s32_dual(s3[10], -cospi_8_64, in vpx_highbd_idct32_12_neon()
232 s5[18] = multiply_accumulate_shift_and_narrow_s32_dual(s4[18], -cospi_8_64, in vpx_highbd_idct32_12_neon()
235 s4[29], cospi_8_64); in vpx_highbd_idct32_12_neon()
237 s5[19] = multiply_accumulate_shift_and_narrow_s32_dual(s4[19], -cospi_8_64, in vpx_highbd_idct32_12_neon()
240 s4[28], cospi_8_64); in vpx_highbd_idct32_12_neon()
243 s4[27], -cospi_8_64); in vpx_highbd_idct32_12_neon()
[all …]
Didct4x4_add_neon.asm41 ; cospi_8_64 = 15137
57 vdup.16 d20, r0 ; replicate cospi_8_64
76 vmull.s16 q1, d17, d20 ; input[1] * cospi_8_64
85 ; input[1] * cospi_24_64 - input[3] * cospi_8_64;
86 ; input[1] * cospi_8_64 + input[3] * cospi_24_64;
129 vmull.s16 q1, d17, d20 ; input[1] * cospi_8_64
136 ; input[1] * cospi_24_64 - input[3] * cospi_8_64;
137 ; input[1] * cospi_8_64 + input[3] * cospi_24_64;
Dfdct_neon.c62 const int32x4_t s_3_cospi_8_64 = vmull_n_s16(s_3, (int16_t)cospi_8_64); in vpx_fdct4x4_neon()
68 vmlsl_n_s16(s_3_cospi_24_64, s_2, (int16_t)cospi_8_64); in vpx_fdct4x4_neon()
Dfwd_txfm_neon.c55 v_t2_lo = vmlal_n_s16(v_t2_lo, vget_low_s16(v_x3), (int16_t)cospi_8_64); in vpx_fdct8x8_neon()
56 v_t2_hi = vmlal_n_s16(v_t2_hi, vget_high_s16(v_x3), (int16_t)cospi_8_64); in vpx_fdct8x8_neon()
57 v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x2), (int16_t)cospi_8_64); in vpx_fdct8x8_neon()
58 v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x2), (int16_t)cospi_8_64); in vpx_fdct8x8_neon()
Dhighbd_idct32x32_1024_add_neon.c444 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[0], &q[1]); in vpx_highbd_idct32_32_neon()
450 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[4], &q[6]); in vpx_highbd_idct32_32_neon()
509 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[5], &q[6]); in vpx_highbd_idct32_32_neon()
512 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[0], &q[1]); in vpx_highbd_idct32_32_neon()
547 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[1], &q[3]); in vpx_highbd_idct32_32_neon()
561 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[4], &q[7]); in vpx_highbd_idct32_32_neon()
601 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[14], &q[6]); in vpx_highbd_idct32_32_neon()
/external/libvpx/libvpx/vpx_dsp/mips/
Ditrans16_dspr2.c75 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct16_rows_dspr2()
137 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_rows_dspr2()
198 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_rows_dspr2()
460 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct16_cols_add_blk_dspr2()
522 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_cols_add_blk_dspr2()
583 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_cols_add_blk_dspr2()
1163 s4 = x4 * cospi_8_64 + x5 * cospi_24_64; in iadst16_dspr2()
1164 s5 = x4 * cospi_24_64 - x5 * cospi_8_64; in iadst16_dspr2()
1165 s6 = -x6 * cospi_24_64 + x7 * cospi_8_64; in iadst16_dspr2()
1166 s7 = x6 * cospi_8_64 + x7 * cospi_24_64; in iadst16_dspr2()
[all …]
Ditrans32_cols_dspr2.c343 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in vpx_idct32_cols_add_blk_dspr2()
399 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in vpx_idct32_cols_add_blk_dspr2()
470 [cospi_8_64] "r"(cospi_8_64)); in vpx_idct32_cols_add_blk_dspr2()
495 [cospi_8_64] "r"(cospi_8_64)); in vpx_idct32_cols_add_blk_dspr2()
520 [cospi_8_64] "r"(cospi_8_64)); in vpx_idct32_cols_add_blk_dspr2()
545 [cospi_8_64] "r"(cospi_8_64)); in vpx_idct32_cols_add_blk_dspr2()
624 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in vpx_idct32_cols_add_blk_dspr2()
Ditrans8_dspr2.c186 [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64), in idct8_rows_dspr2()
446 [cospi_20_64] "r"(cospi_20_64), [cospi_8_64] "r"(cospi_8_64), in idct8_columns_add_blk_dspr2()
656 s4 = cospi_8_64 * x4 + cospi_24_64 * x5; in iadst8_dspr2()
657 s5 = cospi_24_64 * x4 - cospi_8_64 * x5; in iadst8_dspr2()
658 s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; in iadst8_dspr2()
659 s7 = cospi_8_64 * x6 + cospi_24_64 * x7; in iadst8_dspr2()
Ditrans32_dspr2.c387 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in idct32_rows_dspr2()
443 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in idct32_rows_dspr2()
514 [cospi_8_64] "r"(cospi_8_64)); in idct32_rows_dspr2()
539 [cospi_8_64] "r"(cospi_8_64)); in idct32_rows_dspr2()
564 [cospi_8_64] "r"(cospi_8_64)); in idct32_rows_dspr2()
589 [cospi_8_64] "r"(cospi_8_64)); in idct32_rows_dspr2()
668 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct32_rows_dspr2()
Ditrans4_dspr2.c90 [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), in vpx_idct4_rows_dspr2()
212 [cospi_8_64] "r"(cospi_8_64), [cospi_16_64] "r"(cospi_16_64), in vpx_idct4_columns_add_blk_dspr2()
Dinv_txfm_msa.h25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \
122 c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
123 c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
239 k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
240 k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
256 -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \
258 -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0 \
386 k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
387 k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
388 k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \
Dfwd_dct32x32_msa.c82 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0); in fdct8x32_1d_column_even_store()
104 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_column_even_store()
118 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1); in fdct8x32_1d_column_even_store()
182 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29); in fdct8x32_1d_column_odd_store()
183 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28); in fdct8x32_1d_column_odd_store()
216 DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27); in fdct8x32_1d_column_odd_store()
217 DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26); in fdct8x32_1d_column_odd_store()
335 DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64, in fdct8x32_1d_row_even_4x()
363 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_row_even_4x()
377 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1); in fdct8x32_1d_row_even_4x()
[all …]
Didct32x32_msa.c59 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); in idct32x8_row_even_process_store()
87 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); in idct32x8_row_even_process_store()
88 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); in idct32x8_row_even_process_store()
160 DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1); in idct32x8_row_odd_process_store()
169 DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3); in idct32x8_row_odd_process_store()
196 DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1); in idct32x8_row_odd_process_store()
205 DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1); in idct32x8_row_odd_process_store()
370 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); in idct8x32_column_even_process_store()
400 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); in idct8x32_column_even_process_store()
401 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); in idct8x32_column_even_process_store()
[all …]
Didct16x16_msa.c32 DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); in vpx_idct16_1d_rows_msa()
65 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); in vpx_idct16_1d_rows_msa()
66 DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11); in vpx_idct16_1d_rows_msa()
124 DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); in vpx_idct16_1d_columns_addblk_msa()
162 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); in vpx_idct16_1d_columns_addblk_msa()
163 DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11); in vpx_idct16_1d_columns_addblk_msa()
411 k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); in vpx_iadst16_1d_columns_addblk_msa()
412 k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); in vpx_iadst16_1d_columns_addblk_msa()
413 k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); in vpx_iadst16_1d_columns_addblk_msa()
Dfwd_txfm_msa.h23 cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, -cospi_8_64, 0, 0, 0 \
63 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
123 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
187 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
188 -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 }; \
/external/libvpx/libvpx/vp9/encoder/
Dvp9_dct.c36 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; in fdct4()
37 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; in fdct4()
64 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; in fdct8()
65 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; in fdct8()
143 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; in fdct16()
144 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; in fdct16()
194 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; in fdct16()
195 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; in fdct16()
198 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; in fdct16()
199 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; in fdct16()
[all …]
/external/libvpx/libvpx/vpx_dsp/
Dfwd_txfm.c60 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; in vpx_fdct4x4_c()
61 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; in vpx_fdct4x4_c()
134 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; in vpx_fdct8x8_c()
135 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; in vpx_fdct8x8_c()
268 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; in vpx_fdct16x16_c()
269 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; in vpx_fdct16x16_c()
318 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; in vpx_fdct16x16_c()
319 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; in vpx_fdct16x16_c()
322 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; in vpx_fdct16x16_c()
323 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; in vpx_fdct16x16_c()
[all …]
Dinv_txfm.c141 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; in idct4_c()
142 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; in idct4_c()
234 s4 = (int)(cospi_8_64 * x4 + cospi_24_64 * x5); in iadst8_c()
235 s5 = (int)(cospi_24_64 * x4 - cospi_8_64 * x5); in iadst8_c()
236 s6 = (int)(-cospi_24_64 * x6 + cospi_8_64 * x7); in iadst8_c()
237 s7 = (int)(cospi_8_64 * x6 + cospi_24_64 * x7); in iadst8_c()
292 temp1 = step1[1] * cospi_24_64 - step1[3] * cospi_8_64; in idct8_c()
293 temp2 = step1[1] * cospi_8_64 + step1[3] * cospi_24_64; in idct8_c()
487 s4 = x4 * cospi_8_64 + x5 * cospi_24_64; in iadst16_c()
488 s5 = x4 * cospi_24_64 - x5 * cospi_8_64; in iadst16_c()
[all …]
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_impl_sse2.h53 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, in FDCT4x4_2D()
54 cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64); in FDCT4x4_2D()
56 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, in FDCT4x4_2D()
57 cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64); in FDCT4x4_2D()
65 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, in FDCT4x4_2D()
66 -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64); in FDCT4x4_2D()
68 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, in FDCT4x4_2D()
69 -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64); in FDCT4x4_2D()
266 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); in FDCT8x8_2D()
267 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); in FDCT8x8_2D()
[all …]
Dinv_txfm_ssse3.c28 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); in vpx_idct8x8_64_add_ssse3()
29 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); in vpx_idct8x8_64_add_ssse3()
232 const __m128i stg2_3 = pair_set_epi16(2 * cospi_8_64, 2 * cospi_8_64); in vpx_idct8x8_12_add_ssse3()
447 const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); in idct32_34_first_half()
448 const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); in idct32_34_first_half()
449 const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); in idct32_34_first_half()
539 const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); in idct32_34_second_half()
540 const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); in idct32_34_second_half()
541 const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); in idct32_34_second_half()
696 const __m128i stk4_3 = pair_set_epi16(2 * cospi_8_64, 2 * cospi_8_64); in idct32_8x32_135_quarter_1()
[all …]
Dinv_txfm_sse2.c75 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); in idct4_sse2()
76 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); in idct4_sse2()
259 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); in vpx_idct8x8_64_add_sse2()
260 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); in vpx_idct8x8_64_add_sse2()
348 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); in idct8_sse2()
349 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); in idct8_sse2()
374 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); in iadst8_sse2()
375 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); in iadst8_sse2()
376 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); in iadst8_sse2()
604 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); in vpx_idct8x8_12_add_sse2()
[all …]
/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_fdct_msa.h25 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, -cospi_16_64, \

12