Home
last modified time | relevance | path

Searched refs:cospi_4_64 (Results 1 – 16 of 16) sorted by relevance

/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/mips/dspr2/
Dvp9_itrans16_dspr2.c269 [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64), in idct16_rows_dspr2()
666 [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64), in idct16_cols_add_blk_dspr2()
987 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in iadst16()
988 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in iadst16()
991 s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; in iadst16()
992 s13 = x12 * cospi_4_64 + x13 * cospi_28_64; in iadst16()
Dvp9_itrans32_dspr2.c160 [cospi_4_64] "r" (cospi_4_64), [cospi_17_64] "r" (cospi_17_64), in idct32_rows_dspr2()
222 [cospi_4_64] "r" (cospi_4_64), [cospi_7_64] "r" (cospi_7_64), in idct32_rows_dspr2()
738 [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64), in idct32_rows_dspr2()
Dvp9_itrans32_cols_dspr2.c116 [cospi_4_64] "r" (cospi_4_64), [cospi_17_64] "r" (cospi_17_64), in vp9_idct32_cols_add_blk_dspr2()
177 [cospi_4_64] "r" (cospi_4_64), [cospi_7_64] "r" (cospi_7_64), in vp9_idct32_cols_add_blk_dspr2()
665 [cospi_4_64] "r" (cospi_4_64), [cospi_28_64] "r" (cospi_28_64), in vp9_idct32_cols_add_blk_dspr2()
Dvp9_itrans8_dspr2.c192 [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64), in idct8_rows_dspr2()
441 [cospi_4_64] "r" (cospi_4_64), [cospi_12_64] "r" (cospi_12_64), in idct8_columns_add_blk_dspr2()
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/
Dvp9_dct.c233 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in fdct8()
236 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in fdct8()
293 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in vp9_fdct8x8_c()
296 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in vp9_fdct8x8_c()
417 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in vp9_fdct16x16_c()
420 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in vp9_fdct16x16_c()
726 t0 = x0 * cospi_28_64 + x3 * cospi_4_64; in fdct16()
729 t3 = x3 * cospi_28_64 + x0 * -cospi_4_64; in fdct16()
862 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in fadst16()
863 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in fadst16()
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/
Dvp9_idct8x8_add_neon.asm27 vdup.16 d1, r4 ; duplicate cospi_4_64
39 ; input[1]*cospi_28_64-input[7]*cospi_4_64
55 ; input[1] * cospi_4_64
63 ; input[1]*cospi_4_64+input[7]*cospi_28_64
222 ; generate cospi_4_64 = 16069
334 ; generate cospi_4_64 = 16069
367 vdup.16 q1, r12 ; duplicate cospi_4_64*2
376 ; dct_const_round_shift(input[1] * cospi_4_64)
Dvp9_iht8x8_add_neon.asm76 ; generate cospi_4_64 = 16069
126 vdup.16 d1, r4 ; duplicate cospi_4_64
138 ; input[1]*cospi_28_64-input[7]*cospi_4_64
154 ; input[1] * cospi_4_64
162 ; input[1]*cospi_4_64+input[7]*cospi_28_64
Dvp9_idct32x32_add_neon.asm16 cospi_4_64 EQU 16069 define
487 ;temp1 = step1b[30][i] * cospi_28_64 - step1b[17][i] * cospi_4_64;
488 ;temp2 = step1b[30][i] * cospi_4_64 - step1b[17][i] * cospi_28_64;
491 DO_BUTTERFLY_STD cospi_28_64, cospi_4_64, d10, d11, d14, d15
522 ;temp1 = step1b[18][i] * (-cospi_4_64) - step1b[29][i] * (-cospi_28_64);
523 ;temp2 = step1b[18][i] * (-cospi_28_64) + step1b[29][i] * (-cospi_4_64);
526 DO_BUTTERFLY_STD (-cospi_4_64), (-cospi_28_64), d2, d3, d0, d1
891 ;temp1 = input[4 * 32] * cospi_28_64 - input[28 * 32] * cospi_4_64;
892 ;temp2 = input[4 * 32] * cospi_4_64 + input[28 * 32] * cospi_28_64;
896 DO_BUTTERFLY_STD cospi_28_64, cospi_4_64, d0, d1, d4, d5
Dvp9_idct16x16_add_neon.asm67 ; generate cospi_4_64 = 16069
76 vdup.16 d1, r12 ; duplicate cospi_4_64
91 ; step2[4] * cospi_4_64
95 ; temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64
99 ; temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64
817 ; generate cospi_4_64*2 = 32138
826 vdup.16 q1, r12 ; duplicate cospi_4_64*2
840 ; dct_const_round_shift(step2[4] * cospi_4_64);
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/
Dvp9_idct.c167 temp1 = input[1] * cospi_28_64 - input[7] * cospi_4_64; in idct8()
168 temp2 = input[1] * cospi_4_64 + input[7] * cospi_28_64; in idct8()
507 temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; in idct16()
508 temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; in idct16()
711 s8 = x8 * cospi_4_64 + x9 * cospi_28_64; in iadst16()
712 s9 = x8 * cospi_28_64 - x9 * cospi_4_64; in iadst16()
715 s12 = - x12 * cospi_28_64 + x13 * cospi_4_64; in iadst16()
716 s13 = x12 * cospi_4_64 + x13 * cospi_28_64; in iadst16()
995 temp1 = step2[4] * cospi_28_64 - step2[7] * cospi_4_64; in idct32()
996 temp2 = step2[4] * cospi_4_64 + step2[7] * cospi_28_64; in idct32()
[all …]
Dvp9_idct.h47 static const int cospi_4_64 = 16069; variable
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
Dvp9_idct_intrin_sse2.c544 const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in vp9_idct8x8_64_add_sse2()
545 const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in vp9_idct8x8_64_add_sse2()
679 const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in idct8_sse2()
680 const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in idct8_sse2()
1002 const __m128i stg1_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in vp9_idct8x8_10_add_sse2()
1003 const __m128i stg1_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in vp9_idct8x8_10_add_sse2()
1390 const __m128i stg3_0 = pair_set_epi16(cospi_28_64, -cospi_4_64); in vp9_idct16x16_256_add_sse2()
1391 const __m128i stg3_1 = pair_set_epi16(cospi_4_64, cospi_28_64); in vp9_idct16x16_256_add_sse2()
1612 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); in iadst16_8col()
1613 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); in iadst16_8col()
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
Dvp9_dct_avx2.c289 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in vp9_fdct8x8_avx2()
290 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in vp9_fdct8x8_avx2()
671 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in fdct8_avx2()
672 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in fdct8_avx2()
1091 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in vp9_fdct16x16_avx2()
1092 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in vp9_fdct16x16_avx2()
1752 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in fdct16_8col_avx2()
1753 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in fdct16_8col_avx2()
2084 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); in fadst16_8col_avx2()
2085 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); in fadst16_8col_avx2()
[all …]
Dvp9_dct_sse2.c390 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in vp9_fdct8x8_sse2()
391 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in vp9_fdct8x8_sse2()
772 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in fdct8_sse2()
773 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in fdct8_sse2()
1192 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in vp9_fdct16x16_sse2()
1193 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in vp9_fdct16x16_sse2()
1853 const __m128i k__cospi_p28_p04 = pair_set_epi16(cospi_28_64, cospi_4_64); in fdct16_8col()
1854 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in fdct16_8col()
2185 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64); in fadst16_8col()
2186 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64); in fadst16_8col()
[all …]
Dvp9_dct32x32_sse2.c51 const __m128i k__cospi_m04_p28 = pair_set_epi16(-cospi_4_64, cospi_28_64); in FDCT32x32_2D()
52 const __m128i k__cospi_p28_p04 = pair_set_epi16(+cospi_28_64, cospi_4_64); in FDCT32x32_2D()
53 const __m128i k__cospi_m28_m04 = pair_set_epi16(-cospi_28_64, -cospi_4_64); in FDCT32x32_2D()
1728 const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); in FDCT32x32_2D()
1731 const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); in FDCT32x32_2D()
1854 const __m128i k32_m04_p28 = pair_set_epi32(-cospi_4_64, cospi_28_64); in FDCT32x32_2D()
1855 const __m128i k32_m28_m04 = pair_set_epi32(-cospi_28_64, -cospi_4_64); in FDCT32x32_2D()
1860 const __m128i k32_p28_p04 = pair_set_epi32(cospi_28_64, cospi_4_64); in FDCT32x32_2D()
Dvp9_dct32x32_avx2.c60 const __m256i k__cospi_m04_p28 = pair256_set_epi16(-cospi_4_64, cospi_28_64); in FDCT32x32_2D_AVX2()
61 const __m256i k__cospi_p28_p04 = pair256_set_epi16(+cospi_28_64, cospi_4_64); in FDCT32x32_2D_AVX2()
62 const __m256i k__cospi_m28_m04 = pair256_set_epi16(-cospi_28_64, -cospi_4_64); in FDCT32x32_2D_AVX2()
1737 const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64); in FDCT32x32_2D_AVX2()
1740 const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64); in FDCT32x32_2D_AVX2()
1863 const __m256i k32_m04_p28 = pair256_set_epi32(-cospi_4_64, cospi_28_64); in FDCT32x32_2D_AVX2()
1864 const __m256i k32_m28_m04 = pair256_set_epi32(-cospi_28_64, -cospi_4_64); in FDCT32x32_2D_AVX2()
1869 const __m256i k32_p28_p04 = pair256_set_epi32(cospi_28_64, cospi_4_64); in FDCT32x32_2D_AVX2()