/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/ |
D | vp9_idct8x8_add_neon.asm | 47 ; dct_const_round_shift(input_dc * cospi_16_64) 51 ; dct_const_round_shift(input_dc * cospi_16_64) 71 ; dct_const_round_shift(input_dc * cospi_16_64) 76 vdup.16 d0, r7 ; duplicate cospi_16_64 78 ; dct_const_round_shift(input_dc * cospi_16_64) 82 ; input[0] * cospi_16_64 86 ; input[0] * cospi_16_64 90 ; (input[0] + input[2]) * cospi_16_64 94 ; (input[0] - input[2]) * cospi_16_64 101 ; dct_const_round_shift(input_dc * cospi_16_64) [all …]
|
D | vp9_iht8x8_add_neon.asm | 64 ; generate cospi_16_64 = 11585 88 ; generate cospi_16_64 = 11585 146 ; dct_const_round_shift(input_dc * cospi_16_64) 150 ; dct_const_round_shift(input_dc * cospi_16_64) 170 ; dct_const_round_shift(input_dc * cospi_16_64) 175 vdup.16 d0, r7 ; duplicate cospi_16_64 177 ; dct_const_round_shift(input_dc * cospi_16_64) 181 ; input[0] * cospi_16_64 185 ; input[0] * cospi_16_64 189 ; (input[0] + input[2]) * cospi_16_64 [all …]
|
D | vp9_dc_only_idct_add_neon.asm | 29 ; generate cospi_16_64 = 11585 33 ; dct_const_round_shift(input_dc * cospi_16_64) 34 mul r0, r0, r12 ; input_dc * cospi_16_64 38 ; dct_const_round_shift(out * cospi_16_64) 39 mul r0, r0, r12 ; out * cospi_16_64
|
D | vp9_idct4x4_1_add_neon.asm | 28 ; generate cospi_16_64 = 11585 32 ; out = dct_const_round_shift(input[0] * cospi_16_64) 33 mul r0, r0, r12 ; input[0] * cospi_16_64 37 ; out = dct_const_round_shift(out * cospi_16_64) 38 mul r0, r0, r12 ; out * cospi_16_64
|
D | vp9_idct8x8_1_add_neon.asm | 28 ; generate cospi_16_64 = 11585 32 ; out = dct_const_round_shift(input[0] * cospi_16_64) 33 mul r0, r0, r12 ; input[0] * cospi_16_64 37 ; out = dct_const_round_shift(out * cospi_16_64) 38 mul r0, r0, r12 ; out * cospi_16_64
|
D | vp9_idct16x16_add_neon.asm | 115 ; generate cospi_16_64 = 11585 148 vdup.16 d30, r3 ; cospi_16_64 150 ; step1[0] * cospi_16_64 154 ; step1[1] * cospi_16_64 165 ; temp1 = (step1[0] + step1[1]) * cospi_16_64 169 ; temp2 = (step1[0] - step1[1]) * cospi_16_64 211 ; generate cospi_16_64 = 11585 221 vdup.16 d16, r3; ; duplicate cospi_16_64 223 ; step2[5] * cospi_16_64 227 ; step2[6] * cospi_16_64 [all …]
|
D | vp9_idct32x32_1_add_neon.asm | 80 ; generate cospi_16_64 = 11585 84 ; out = dct_const_round_shift(input[0] * cospi_16_64) 85 mul r0, r0, r12 ; input[0] * cospi_16_64 89 ; out = dct_const_round_shift(out * cospi_16_64) 90 mul r0, r0, r12 ; out * cospi_16_64
|
D | vp9_idct4x4_add_neon.asm | 42 ; cospi_16_64 = 11585 = 0x2d41 59 vdup.16 d21, r3 ; replicate cospi_16_64 82 ; (input[0] + input[2]) * cospi_16_64; 83 ; (input[0] - input[2]) * cospi_16_64; 133 ; (input[0] + input[2]) * cospi_16_64; 134 ; (input[0] - input[2]) * cospi_16_64;
|
D | vp9_idct32x32_add_neon.asm | 28 cospi_16_64 EQU 11585 define 684 ;temp1 = (step1b[25][i] - step1b[22][i]) * cospi_16_64; 685 ;temp2 = (step1b[25][i] + step1b[22][i]) * cospi_16_64; 688 DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29 692 ;temp1 = (step1b[24][i] - step1b[23][i]) * cospi_16_64; 693 ;temp2 = (step1b[24][i] + step1b[23][i]) * cospi_16_64; 699 DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29 753 ;temp1 = (step1b[27][i] - step1b[20][i]) * cospi_16_64; 754 ;temp2 = (step1b[27][i] + step1b[20][i]) * cospi_16_64; 757 DO_BUTTERFLY_STD cospi_16_64, cospi_16_64, d26, d27, d28, d29 [all …]
|
D | vp9_iht4x4_add_neon.asm | 20 ; cospi_16_64. d2 must contain cospi_24_64. The output will be stored back 31 vmull.s16 q13, d23, d1 ; (input[0] + input[2]) * cospi_16_64 32 vmull.s16 q14, d24, d1 ; (input[0] - input[2]) * cospi_16_64 95 ; cospi_16_64 = 11585 = 0x2d41 104 vdup.16 d1, r3 ; duplicate cospi_16_64
|
D | vp9_idct16x16_1_add_neon.asm | 28 ; generate cospi_16_64 = 11585 32 ; out = dct_const_round_shift(input[0] * cospi_16_64) 33 mul r0, r0, r12 ; input[0] * cospi_16_64 37 ; out = dct_const_round_shift(out * cospi_16_64) 38 mul r0, r0, r12 ; out * cospi_16_64
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/ |
D | vp9_dct.c | 36 temp1 = (step[0] + step[1]) * cospi_16_64; in fdct4() 37 temp2 = (step[0] - step[1]) * cospi_16_64; in fdct4() 85 temp1 = (step[0] + step[1]) * cospi_16_64; in vp9_fdct4x4_c() 86 temp2 = (step[0] - step[1]) * cospi_16_64; in vp9_fdct4x4_c() 211 t0 = (x0 + x1) * cospi_16_64; in fdct8() 212 t1 = (x0 - x1) * cospi_16_64; in fdct8() 221 t0 = (s6 - s5) * cospi_16_64; in fdct8() 222 t1 = (s6 + s5) * cospi_16_64; in fdct8() 271 t0 = (x0 + x1) * cospi_16_64; in vp9_fdct8x8_c() 272 t1 = (x0 - x1) * cospi_16_64; in vp9_fdct8x8_c() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/mips/dspr2/ |
D | vp9_itrans32_dspr2.c | 523 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 672 [cospi_16_64] "r" (cospi_16_64), in idct32_rows_dspr2() 739 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 778 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 781 temp21 = (step2_20 + step2_27) * cospi_16_64; in idct32_rows_dspr2() 794 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 797 temp21 = (step2_21 + step2_26) * cospi_16_64; in idct32_rows_dspr2() 810 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() 813 temp21 = (step2_22 + step2_25) * cospi_16_64; in idct32_rows_dspr2() 826 [cospi_16_64] "r" (cospi_16_64) in idct32_rows_dspr2() [all …]
|
D | vp9_itrans32_cols_dspr2.c | 457 [step2_15] "r" (step2_15), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2() 605 [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2() 666 [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2() 705 [step2_27] "r" (step2_27), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2() 708 temp21 = (step2_20 + step2_27) * cospi_16_64; in vp9_idct32_cols_add_blk_dspr2() 720 [step2_21] "r" (step2_21), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2() 723 temp21 = (step2_21 + step2_26) * cospi_16_64; in vp9_idct32_cols_add_blk_dspr2() 735 [step2_22] "r" (step2_22), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2() 738 temp21 = (step2_22 + step2_25) * cospi_16_64; in vp9_idct32_cols_add_blk_dspr2() 750 [step2_23] "r" (step2_23), [cospi_16_64] "r" (cospi_16_64) in vp9_idct32_cols_add_blk_dspr2() [all …]
|
D | vp9_itrans16_dspr2.c | 81 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2() 270 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2() 321 [cospi_16_64] "r" (cospi_16_64) in idct16_rows_dspr2() 477 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2() 667 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2() 718 [cospi_16_64] "r" (cospi_16_64) in idct16_cols_add_blk_dspr2() 1049 s2 = (- cospi_16_64) * (x2 + x3); in iadst16() 1050 s3 = cospi_16_64 * (x2 - x3); in iadst16() 1051 s6 = cospi_16_64 * (x6 + x7); in iadst16() 1052 s7 = cospi_16_64 * (- x6 + x7); in iadst16() [all …]
|
D | vp9_itrans8_dspr2.c | 191 [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64), in idct8_rows_dspr2() 440 [cospi_16_64] "r" (cospi_16_64), [cospi_28_64] "r" (cospi_28_64), in idct8_columns_add_blk_dspr2() 529 s2 = cospi_16_64 * (x2 + x3); in iadst8_dspr2() 530 s3 = cospi_16_64 * (x2 - x3); in iadst8_dspr2() 531 s6 = cospi_16_64 * (x6 + x7); in iadst8_dspr2() 532 s7 = cospi_16_64 * (x6 - x7); in iadst8_dspr2()
|
D | vp9_itrans4_dspr2.c | 97 [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64), in vp9_idct4_rows_dspr2() 220 [cospi_8_64] "r" (cospi_8_64), [cospi_16_64] "r" (cospi_16_64), in vp9_idct4_columns_add_blk_dspr2()
|
D | vp9_common_dspr2.h | 50 [cospi_16_64] "r" (cospi_16_64) \
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/ |
D | vp9_idct.c | 103 temp1 = (input[0] + input[2]) * cospi_16_64; in idct4() 104 temp2 = (input[0] - input[2]) * cospi_16_64; in idct4() 146 int16_t out = dct_const_round_shift(input[0] * cospi_16_64); in vp9_idct4x4_1_add_c() 147 out = dct_const_round_shift(out * cospi_16_64); in vp9_idct4x4_1_add_c() 187 temp1 = (step2[6] - step2[5]) * cospi_16_64; in idct8() 188 temp2 = (step2[5] + step2[6]) * cospi_16_64; in idct8() 231 int16_t out = dct_const_round_shift(input[0] * cospi_16_64); in vp9_idct8x8_1_add_c() 232 out = dct_const_round_shift(out * cospi_16_64); in vp9_idct8x8_1_add_c() 371 s2 = cospi_16_64 * (x2 + x3); in iadst8() 372 s3 = cospi_16_64 * (x2 - x3); in iadst8() [all …]
|
D | vp9_idct.h | 59 static const int cospi_16_64 = 11585; variable
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/ |
D | vp9_dct_sse2.c | 30 const __m128i k__cospi_A = _mm_setr_epi16(cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2() 31 cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2() 32 cospi_16_64, -cospi_16_64, in vp9_fdct4x4_sse2() 33 cospi_16_64, -cospi_16_64); in vp9_fdct4x4_sse2() 34 const __m128i k__cospi_B = _mm_setr_epi16(cospi_16_64, -cospi_16_64, in vp9_fdct4x4_sse2() 35 cospi_16_64, -cospi_16_64, in vp9_fdct4x4_sse2() 36 cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2() 37 cospi_16_64, cospi_16_64); in vp9_fdct4x4_sse2() 46 const __m128i k__cospi_E = _mm_setr_epi16(cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2() 47 cospi_16_64, cospi_16_64, in vp9_fdct4x4_sse2() [all …]
|
D | vp9_dct_avx2.c | 27 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); in vp9_fdct4x4_avx2() 28 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in vp9_fdct4x4_avx2() 167 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); in fdct4_avx2() 168 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in fdct4_avx2() 285 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); in vp9_fdct8x8_avx2() 286 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in vp9_fdct8x8_avx2() 667 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); in fdct8_avx2() 668 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in fdct8_avx2() 818 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in fadst8_avx2() 819 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(cospi_16_64); in fadst8_avx2() [all …]
|
D | vp9_dct32x32_sse2.c | 44 const __m128i k__cospi_p16_p16 = _mm_set1_epi16(+cospi_16_64); in FDCT32x32_2D() 45 const __m128i k__cospi_p16_m16 = pair_set_epi16(+cospi_16_64, -cospi_16_64); in FDCT32x32_2D() 1380 const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); in FDCT32x32_2D() 1381 const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); in FDCT32x32_2D() 1532 const __m128i k32_p16_p16 = pair_set_epi32(cospi_16_64, cospi_16_64); in FDCT32x32_2D() 1533 const __m128i k32_p16_m16 = pair_set_epi32(cospi_16_64, -cospi_16_64); in FDCT32x32_2D()
|
D | vp9_dct32x32_avx2.c | 53 const __m256i k__cospi_p16_p16 = _mm256_set1_epi16(+cospi_16_64); in FDCT32x32_2D_AVX2() 54 const __m256i k__cospi_p16_m16 = pair256_set_epi16(+cospi_16_64, -cospi_16_64); in FDCT32x32_2D_AVX2() 1389 const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64); in FDCT32x32_2D_AVX2() 1390 const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64); in FDCT32x32_2D_AVX2() 1541 const __m256i k32_p16_p16 = pair256_set_epi32(cospi_16_64, cospi_16_64); in FDCT32x32_2D_AVX2() 1542 const __m256i k32_p16_m16 = pair256_set_epi32(cospi_16_64, -cospi_16_64); in FDCT32x32_2D_AVX2()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/ |
D | vp9_idct_intrin_sse2.c | 31 const __m128i cst = _mm_setr_epi16((int16_t)cospi_16_64, (int16_t)cospi_16_64, in vp9_idct4x4_16_add_sse2() 32 (int16_t)cospi_16_64, (int16_t)-cospi_16_64, in vp9_idct4x4_16_add_sse2() 163 a = dct_const_round_shift(input[0] * cospi_16_64); in vp9_idct4x4_1_add_sse2() 164 a = dct_const_round_shift(a * cospi_16_64); in vp9_idct4x4_1_add_sse2() 184 const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64); in idct4_sse2() 185 const __m128i k__cospi_p16_m16 = pair_set_epi16(cospi_16_64, -cospi_16_64); in idct4_sse2() 548 const __m128i stg2_0 = pair_set_epi16(cospi_16_64, cospi_16_64); in vp9_idct8x8_64_add_sse2() 549 const __m128i stg2_1 = pair_set_epi16(cospi_16_64, -cospi_16_64); in vp9_idct8x8_64_add_sse2() 614 a = dct_const_round_shift(input[0] * cospi_16_64); in vp9_idct8x8_1_add_sse2() 615 a = dct_const_round_shift(a * cospi_16_64); in vp9_idct8x8_1_add_sse2() [all …]
|