Home
last modified time | relevance | path

Searched refs:cospi_24_64 (Results 1 – 25 of 35) sorted by relevance

12

/external/libvpx/libvpx/vpx_dsp/arm/
Didct32x32_34_add_neon.c99 cospi_24_64); in vpx_idct32_6_neon()
100 s2[14] = multiply_accumulate_shift_and_narrow_s16(s2[8], cospi_24_64, s2[15], in vpx_idct32_6_neon()
117 cospi_24_64); in vpx_idct32_6_neon()
118 s1[29] = multiply_accumulate_shift_and_narrow_s16(s1[17], cospi_24_64, s1[30], in vpx_idct32_6_neon()
122 cospi_24_64); in vpx_idct32_6_neon()
123 s1[28] = multiply_accumulate_shift_and_narrow_s16(s1[16], cospi_24_64, s1[31], in vpx_idct32_6_neon()
126 s1[20] = multiply_accumulate_shift_and_narrow_s16(s2[20], -cospi_24_64, in vpx_idct32_6_neon()
129 cospi_24_64); in vpx_idct32_6_neon()
131 s1[21] = multiply_accumulate_shift_and_narrow_s16(s2[21], -cospi_24_64, in vpx_idct32_6_neon()
134 cospi_24_64); in vpx_idct32_6_neon()
[all …]
Didct32x32_135_add_neon.c174 s4[2] = multiply_shift_and_narrow_s16(in[8], cospi_24_64); in vpx_idct32_12_neon()
178 cospi_24_64); in vpx_idct32_12_neon()
179 s4[14] = multiply_accumulate_shift_and_narrow_s16(s2[8], cospi_24_64, s2[15], in vpx_idct32_12_neon()
182 s4[10] = multiply_accumulate_shift_and_narrow_s16(s3[10], -cospi_24_64, in vpx_idct32_12_neon()
185 cospi_24_64); in vpx_idct32_12_neon()
223 cospi_24_64); in vpx_idct32_12_neon()
224 s5[29] = multiply_accumulate_shift_and_narrow_s16(s4[18], cospi_24_64, s4[29], in vpx_idct32_12_neon()
228 cospi_24_64); in vpx_idct32_12_neon()
229 s5[28] = multiply_accumulate_shift_and_narrow_s16(s4[19], cospi_24_64, s4[28], in vpx_idct32_12_neon()
232 s5[20] = multiply_accumulate_shift_and_narrow_s16(s4[20], -cospi_24_64, in vpx_idct32_12_neon()
[all …]
Dhighbd_idct32x32_34_add_neon.c106 s2[15], cospi_24_64); in vpx_highbd_idct32_6_neon()
107 s2[14] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], cospi_24_64, in vpx_highbd_idct32_6_neon()
124 s1[30], cospi_24_64); in vpx_highbd_idct32_6_neon()
125 s1[29] = multiply_accumulate_shift_and_narrow_s32_dual(s1[17], cospi_24_64, in vpx_highbd_idct32_6_neon()
129 s1[31], cospi_24_64); in vpx_highbd_idct32_6_neon()
130 s1[28] = multiply_accumulate_shift_and_narrow_s32_dual(s1[16], cospi_24_64, in vpx_highbd_idct32_6_neon()
133 s1[20] = multiply_accumulate_shift_and_narrow_s32_dual(s2[20], -cospi_24_64, in vpx_highbd_idct32_6_neon()
136 s2[27], cospi_24_64); in vpx_highbd_idct32_6_neon()
138 s1[21] = multiply_accumulate_shift_and_narrow_s32_dual(s2[21], -cospi_24_64, in vpx_highbd_idct32_6_neon()
141 s2[26], cospi_24_64); in vpx_highbd_idct32_6_neon()
[all …]
Dhighbd_idct32x32_135_add_neon.c184 s4[2] = multiply_shift_and_narrow_s32_dual(in[8], cospi_24_64); in vpx_highbd_idct32_12_neon()
188 s2[15], cospi_24_64); in vpx_highbd_idct32_12_neon()
189 s4[14] = multiply_accumulate_shift_and_narrow_s32_dual(s2[8], cospi_24_64, in vpx_highbd_idct32_12_neon()
192 s4[10] = multiply_accumulate_shift_and_narrow_s32_dual(s3[10], -cospi_24_64, in vpx_highbd_idct32_12_neon()
195 s3[13], cospi_24_64); in vpx_highbd_idct32_12_neon()
233 s4[29], cospi_24_64); in vpx_highbd_idct32_12_neon()
234 s5[29] = multiply_accumulate_shift_and_narrow_s32_dual(s4[18], cospi_24_64, in vpx_highbd_idct32_12_neon()
238 s4[28], cospi_24_64); in vpx_highbd_idct32_12_neon()
239 s5[28] = multiply_accumulate_shift_and_narrow_s32_dual(s4[19], cospi_24_64, in vpx_highbd_idct32_12_neon()
242 s5[20] = multiply_accumulate_shift_and_narrow_s32_dual(s4[20], -cospi_24_64, in vpx_highbd_idct32_12_neon()
[all …]
Didct4x4_add_neon.asm45 ; cospi_24_64 = 6270
70 vdup.16 d22, r12 ; replicate cospi_24_64
75 vmull.s16 q15, d17, d22 ; input[1] * cospi_24_64
85 ; input[1] * cospi_24_64 - input[3] * cospi_8_64;
86 ; input[1] * cospi_8_64 + input[3] * cospi_24_64;
128 vmull.s16 q15, d17, d22 ; input[1] * cospi_24_64
136 ; input[1] * cospi_24_64 - input[3] * cospi_8_64;
137 ; input[1] * cospi_8_64 + input[3] * cospi_24_64;
Dfdct_neon.c63 const int32x4_t s_3_cospi_24_64 = vmull_n_s16(s_3, (int16_t)cospi_24_64); in vpx_fdct4x4_neon()
66 vmlal_n_s16(s_3_cospi_8_64, s_2, (int16_t)cospi_24_64); in vpx_fdct4x4_neon()
Dfwd_txfm_neon.c51 int32x4_t v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), (int16_t)cospi_24_64); in vpx_fdct8x8_neon()
52 int32x4_t v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), (int16_t)cospi_24_64); in vpx_fdct8x8_neon()
53 int32x4_t v_t3_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_24_64); in vpx_fdct8x8_neon()
54 int32x4_t v_t3_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_24_64); in vpx_fdct8x8_neon()
Dhighbd_idct32x32_1024_add_neon.c444 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[0], &q[1]); in vpx_highbd_idct32_32_neon()
450 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[4], &q[6]); in vpx_highbd_idct32_32_neon()
509 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[5], &q[6]); in vpx_highbd_idct32_32_neon()
512 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[0], &q[1]); in vpx_highbd_idct32_32_neon()
547 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[1], &q[3]); in vpx_highbd_idct32_32_neon()
561 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[4], &q[7]); in vpx_highbd_idct32_32_neon()
601 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[14], &q[6]); in vpx_highbd_idct32_32_neon()
/external/libvpx/libvpx/vpx_dsp/mips/
Ditrans16_dspr2.c75 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct16_rows_dspr2()
137 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_rows_dspr2()
198 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_rows_dspr2()
460 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct16_cols_add_blk_dspr2()
522 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_cols_add_blk_dspr2()
583 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64)); in idct16_cols_add_blk_dspr2()
1163 s4 = x4 * cospi_8_64 + x5 * cospi_24_64; in iadst16_dspr2()
1164 s5 = x4 * cospi_24_64 - x5 * cospi_8_64; in iadst16_dspr2()
1165 s6 = -x6 * cospi_24_64 + x7 * cospi_8_64; in iadst16_dspr2()
1166 s7 = x6 * cospi_8_64 + x7 * cospi_24_64; in iadst16_dspr2()
[all …]
Ditrans32_cols_dspr2.c343 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in vpx_idct32_cols_add_blk_dspr2()
399 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in vpx_idct32_cols_add_blk_dspr2()
469 [step1_29] "r"(step1_29), [cospi_24_64] "r"(cospi_24_64), in vpx_idct32_cols_add_blk_dspr2()
494 [step1_28] "r"(step1_28), [cospi_24_64] "r"(cospi_24_64), in vpx_idct32_cols_add_blk_dspr2()
519 [step1_27] "r"(step1_27), [cospi_24_64] "r"(cospi_24_64), in vpx_idct32_cols_add_blk_dspr2()
544 [step1_26] "r"(step1_26), [cospi_24_64] "r"(cospi_24_64), in vpx_idct32_cols_add_blk_dspr2()
624 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in vpx_idct32_cols_add_blk_dspr2()
Ditrans8_dspr2.c187 [cospi_24_64] "r"(cospi_24_64), [output] "r"(output), in idct8_rows_dspr2()
447 [cospi_24_64] "r"(cospi_24_64), [input] "r"(input), in idct8_columns_add_blk_dspr2()
656 s4 = cospi_8_64 * x4 + cospi_24_64 * x5; in iadst8_dspr2()
657 s5 = cospi_24_64 * x4 - cospi_8_64 * x5; in iadst8_dspr2()
658 s6 = -cospi_24_64 * x6 + cospi_8_64 * x7; in iadst8_dspr2()
659 s7 = cospi_8_64 * x6 + cospi_24_64 * x7; in iadst8_dspr2()
Ditrans32_dspr2.c387 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in idct32_rows_dspr2()
443 [cospi_8_64] "r"(cospi_8_64), [cospi_24_64] "r"(cospi_24_64)); in idct32_rows_dspr2()
513 [step1_29] "r"(step1_29), [cospi_24_64] "r"(cospi_24_64), in idct32_rows_dspr2()
538 [step1_28] "r"(step1_28), [cospi_24_64] "r"(cospi_24_64), in idct32_rows_dspr2()
563 [step1_27] "r"(step1_27), [cospi_24_64] "r"(cospi_24_64), in idct32_rows_dspr2()
588 [step1_26] "r"(step1_26), [cospi_24_64] "r"(cospi_24_64), in idct32_rows_dspr2()
668 [cospi_24_64] "r"(cospi_24_64), [cospi_8_64] "r"(cospi_8_64), in idct32_rows_dspr2()
Ditrans4_dspr2.c91 [cospi_24_64] "r"(cospi_24_64), [input] "r"(input)); in vpx_idct4_rows_dspr2()
213 [cospi_24_64] "r"(cospi_24_64), [input] "r"(input), in vpx_idct4_columns_add_blk_dspr2()
Dfwd_dct32x32_msa.c82 DOTP_CONST_PAIR(in0, in1, cospi_24_64, cospi_8_64, temp1, temp0); in fdct8x32_1d_column_even_store()
104 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_column_even_store()
118 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1); in fdct8x32_1d_column_even_store()
182 DOTP_CONST_PAIR(in26, in21, cospi_24_64, cospi_8_64, in18, in29); in fdct8x32_1d_column_odd_store()
183 DOTP_CONST_PAIR(in27, in20, cospi_24_64, cospi_8_64, in19, in28); in fdct8x32_1d_column_odd_store()
216 DOTP_CONST_PAIR(-in16, in27, cospi_24_64, cospi_8_64, in20, in27); in fdct8x32_1d_column_odd_store()
217 DOTP_CONST_PAIR(-in17, in26, cospi_24_64, cospi_8_64, in21, in26); in fdct8x32_1d_column_odd_store()
335 DOTP_CONST_PAIR_W(vec5_r, vec7_r, vec0_r, vec1_r, cospi_24_64, cospi_8_64, in fdct8x32_1d_row_even_4x()
363 DOTP_CONST_PAIR(vec6, vec1, cospi_24_64, cospi_8_64, in1, in3); in fdct8x32_1d_row_even_4x()
377 DOTP_CONST_PAIR((-vec2), vec5, cospi_24_64, cospi_8_64, in2, in1); in fdct8x32_1d_row_even_4x()
[all …]
Dinv_txfm_msa.h26 cospi_24_64, -cospi_24_64, 0, 0 }; \
122 c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
123 c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
239 k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
240 k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
256 -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \
258 -cospi_24_64, cospi_8_64, cospi_16_64, -cospi_16_64, 0, 0, 0, 0 \
386 k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
387 k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
388 k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \
Didct32x32_msa.c59 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); in idct32x8_row_even_process_store()
87 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); in idct32x8_row_even_process_store()
88 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); in idct32x8_row_even_process_store()
160 DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1); in idct32x8_row_odd_process_store()
169 DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3); in idct32x8_row_odd_process_store()
196 DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1); in idct32x8_row_odd_process_store()
205 DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1); in idct32x8_row_odd_process_store()
370 DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6); in idct8x32_column_even_process_store()
400 DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7); in idct8x32_column_even_process_store()
401 DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1); in idct8x32_column_even_process_store()
[all …]
Didct16x16_msa.c32 DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); in vpx_idct16_1d_rows_msa()
65 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); in vpx_idct16_1d_rows_msa()
66 DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11); in vpx_idct16_1d_rows_msa()
124 DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12); in vpx_idct16_1d_columns_addblk_msa()
162 DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9); in vpx_idct16_1d_columns_addblk_msa()
163 DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5, reg11); in vpx_idct16_1d_columns_addblk_msa()
411 k0 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); in vpx_iadst16_1d_columns_addblk_msa()
412 k1 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); in vpx_iadst16_1d_columns_addblk_msa()
413 k2 = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); in vpx_iadst16_1d_columns_addblk_msa()
Dfwd_txfm_msa.h23 cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, -cospi_8_64, 0, 0, 0 \
63 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
123 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
187 v8i16 coeff_m = { cospi_16_64, -cospi_16_64, cospi_8_64, cospi_24_64, \
188 -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 }; \
/external/libvpx/libvpx/vp9/encoder/
Dvp9_dct.c36 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; in fdct4()
37 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; in fdct4()
64 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; in fdct8()
65 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; in fdct8()
143 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; in fdct16()
144 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; in fdct16()
194 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; in fdct16()
195 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; in fdct16()
198 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; in fdct16()
199 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; in fdct16()
[all …]
/external/libvpx/libvpx/vpx_dsp/
Dfwd_txfm.c60 temp1 = step[2] * cospi_24_64 + step[3] * cospi_8_64; in vpx_fdct4x4_c()
61 temp2 = -step[2] * cospi_8_64 + step[3] * cospi_24_64; in vpx_fdct4x4_c()
134 t2 = x2 * cospi_24_64 + x3 * cospi_8_64; in vpx_fdct8x8_c()
135 t3 = -x2 * cospi_8_64 + x3 * cospi_24_64; in vpx_fdct8x8_c()
268 t2 = x3 * cospi_8_64 + x2 * cospi_24_64; in vpx_fdct16x16_c()
269 t3 = x3 * cospi_24_64 - x2 * cospi_8_64; in vpx_fdct16x16_c()
318 temp1 = step3[1] * -cospi_8_64 + step3[6] * cospi_24_64; in vpx_fdct16x16_c()
319 temp2 = step3[2] * cospi_24_64 + step3[5] * cospi_8_64; in vpx_fdct16x16_c()
322 temp1 = step3[2] * cospi_8_64 - step3[5] * cospi_24_64; in vpx_fdct16x16_c()
323 temp2 = step3[1] * cospi_24_64 + step3[6] * cospi_8_64; in vpx_fdct16x16_c()
[all …]
Dinv_txfm.c141 temp1 = input[1] * cospi_24_64 - input[3] * cospi_8_64; in idct4_c()
142 temp2 = input[1] * cospi_8_64 + input[3] * cospi_24_64; in idct4_c()
234 s4 = (int)(cospi_8_64 * x4 + cospi_24_64 * x5); in iadst8_c()
235 s5 = (int)(cospi_24_64 * x4 - cospi_8_64 * x5); in iadst8_c()
236 s6 = (int)(-cospi_24_64 * x6 + cospi_8_64 * x7); in iadst8_c()
237 s7 = (int)(cospi_8_64 * x6 + cospi_24_64 * x7); in iadst8_c()
292 temp1 = step1[1] * cospi_24_64 - step1[3] * cospi_8_64; in idct8_c()
293 temp2 = step1[1] * cospi_8_64 + step1[3] * cospi_24_64; in idct8_c()
487 s4 = x4 * cospi_8_64 + x5 * cospi_24_64; in iadst16_c()
488 s5 = x4 * cospi_24_64 - x5 * cospi_8_64; in iadst16_c()
[all …]
/external/libvpx/libvpx/vpx_dsp/x86/
Dfwd_txfm_impl_sse2.h53 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, in FDCT4x4_2D()
54 cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64); in FDCT4x4_2D()
56 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, in FDCT4x4_2D()
57 cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64); in FDCT4x4_2D()
65 octa_set_epi16(cospi_8_64, cospi_24_64, cospi_8_64, cospi_24_64, in FDCT4x4_2D()
66 -cospi_8_64, -cospi_24_64, -cospi_8_64, -cospi_24_64); in FDCT4x4_2D()
68 octa_set_epi16(cospi_24_64, -cospi_8_64, cospi_24_64, -cospi_8_64, in FDCT4x4_2D()
69 -cospi_24_64, cospi_8_64, -cospi_24_64, cospi_8_64); in FDCT4x4_2D()
266 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64); in FDCT8x8_2D()
267 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64); in FDCT8x8_2D()
[all …]
Dinv_txfm_ssse3.c28 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); in vpx_idct8x8_64_add_ssse3()
29 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); in vpx_idct8x8_64_add_ssse3()
231 const __m128i stg2_2 = pair_set_epi16(2 * cospi_24_64, 2 * cospi_24_64); in vpx_idct8x8_12_add_ssse3()
447 const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); in idct32_34_first_half()
448 const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); in idct32_34_first_half()
449 const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); in idct32_34_first_half()
539 const __m128i stg4_4 = pair_set_epi16(-cospi_8_64, cospi_24_64); in idct32_34_second_half()
540 const __m128i stg4_5 = pair_set_epi16(cospi_24_64, cospi_8_64); in idct32_34_second_half()
541 const __m128i stg4_6 = pair_set_epi16(-cospi_24_64, -cospi_8_64); in idct32_34_second_half()
695 const __m128i stk4_2 = pair_set_epi16(2 * cospi_24_64, 2 * cospi_24_64); in idct32_8x32_135_quarter_1()
[all …]
Dinv_txfm_sse2.c75 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); in idct4_sse2()
76 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); in idct4_sse2()
259 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); in vpx_idct8x8_64_add_sse2()
260 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); in vpx_idct8x8_64_add_sse2()
348 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); in idct8_sse2()
349 const __m128i stg2_3 = pair_set_epi16(cospi_8_64, cospi_24_64); in idct8_sse2()
374 const __m128i k__cospi_p08_p24 = pair_set_epi16(cospi_8_64, cospi_24_64); in iadst8_sse2()
375 const __m128i k__cospi_p24_m08 = pair_set_epi16(cospi_24_64, -cospi_8_64); in iadst8_sse2()
376 const __m128i k__cospi_m24_p08 = pair_set_epi16(-cospi_24_64, cospi_8_64); in iadst8_sse2()
604 const __m128i stg2_2 = pair_set_epi16(cospi_24_64, -cospi_8_64); in vpx_idct8x8_12_add_sse2()
[all …]
/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_fdct_msa.h26 cospi_24_64, -cospi_24_64, 0, 0 }; \

12