Home
last modified time | relevance | path

Searched refs:s4 (Results 1 – 18 of 18) sorted by relevance

/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/mips/dspr2/
Dloopfilter_filters_dspr2.c356 unsigned char *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; in vp8_loop_filter_horizontal_edge_mips() local
377 s4 = s + p; in vp8_loop_filter_horizontal_edge_mips()
387 p4 = *((uint32_t *)(s4)); in vp8_loop_filter_horizontal_edge_mips()
413 *((uint32_t *)s4) = p4; in vp8_loop_filter_horizontal_edge_mips()
422 s4 += 4; in vp8_loop_filter_horizontal_edge_mips()
432 p4 = *((uint32_t *)(s4)); in vp8_loop_filter_horizontal_edge_mips()
458 *((uint32_t *)s4) = p4; in vp8_loop_filter_horizontal_edge_mips()
467 s4 += 4; in vp8_loop_filter_horizontal_edge_mips()
477 p4 = *((uint32_t *)(s4)); in vp8_loop_filter_horizontal_edge_mips()
503 *((uint32_t *)s4) = p4; in vp8_loop_filter_horizontal_edge_mips()
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/mips/dspr2/
Dvp9_loopfilter_filters_dspr2.c33 uint8_t *sm1, *s0, *s1, *s2, *s3, *s4, *s5, *s6; in vp9_lpf_horizontal_4_dspr2() local
63 s4 = s + pitch; in vp9_lpf_horizontal_4_dspr2()
64 s5 = s4 + pitch; in vp9_lpf_horizontal_4_dspr2()
74 : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) in vp9_lpf_horizontal_4_dspr2()
108 [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) in vp9_lpf_horizontal_4_dspr2()
126 uint8_t *s1, *s2, *s3, *s4; in vp9_lpf_vertical_4_dspr2() local
153 s4 = s3 + pitch; in vp9_lpf_vertical_4_dspr2()
154 s = s4 + pitch; in vp9_lpf_vertical_4_dspr2()
165 pm1 = *((uint32_t *)(s4 - 4)); in vp9_lpf_vertical_4_dspr2()
166 p3 = *((uint32_t *)(s4)); in vp9_lpf_vertical_4_dspr2()
[all …]
Dvp9_mblpf_vert_loopfilter_dspr2.c30 uint8_t *s1, *s2, *s3, *s4; in vp9_lpf_vertical_16_dspr2() local
64 s4 = s3 + pitch; in vp9_lpf_vertical_16_dspr2()
65 s = s4 + pitch; in vp9_lpf_vertical_16_dspr2()
80 : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) in vp9_lpf_vertical_16_dspr2()
96 : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) in vp9_lpf_vertical_16_dspr2()
319 [s4] "r" (s4) in vp9_lpf_vertical_16_dspr2()
331 [s4] "r" (s4) in vp9_lpf_vertical_16_dspr2()
510 [p0_r] "r" (p0_r), [s4] "r" (s4) in vp9_lpf_vertical_16_dspr2()
526 [q6_r] "r" (q6_r), [s4] "r" (s4) in vp9_lpf_vertical_16_dspr2()
541 [s4] "r" (s4) in vp9_lpf_vertical_16_dspr2()
[all …]
Dvp9_loopfilter_macros_dspr2.h35 [s4] "r" (s4) \
119 [s4] "r" (s4) \
215 [s4] "r" (s4) \
Dvp9_itrans8_dspr2.c472 int s0, s1, s2, s3, s4, s5, s6, s7; in iadst8_dspr2() local
495 s4 = cospi_18_64 * x4 + cospi_14_64 * x5; in iadst8_dspr2()
500 x0 = ROUND_POWER_OF_TWO((s0 + s4), DCT_CONST_BITS); in iadst8_dspr2()
504 x4 = ROUND_POWER_OF_TWO((s0 - s4), DCT_CONST_BITS); in iadst8_dspr2()
514 s4 = cospi_8_64 * x4 + cospi_24_64 * x5; in iadst8_dspr2()
523 x4 = ROUND_POWER_OF_TWO((s4 + s6), DCT_CONST_BITS); in iadst8_dspr2()
525 x6 = ROUND_POWER_OF_TWO((s4 - s6), DCT_CONST_BITS); in iadst8_dspr2()
Dvp9_mbloop_loopfilter_dspr2.c330 uint8_t *s1, *s2, *s3, *s4; in vp9_lpf_vertical_8_dspr2() local
360 s4 = s3 + pitch; in vp9_lpf_vertical_8_dspr2()
361 s = s4 + pitch; in vp9_lpf_vertical_8_dspr2()
375 : [s1] "r" (s1), [s2] "r" (s2), [s3] "r" (s3), [s4] "r" (s4) in vp9_lpf_vertical_8_dspr2()
500 [s4] "r" (s4) in vp9_lpf_vertical_8_dspr2()
512 [s4] "r" (s4) in vp9_lpf_vertical_8_dspr2()
Dvp9_itrans16_dspr2.c915 int s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15; in iadst16() local
948 s4 = x4 * cospi_9_64 + x5 * cospi_23_64; in iadst16()
965 x4 = dct_const_round_shift(s4 + s12); in iadst16()
973 x12 = dct_const_round_shift(s4 - s12); in iadst16()
983 s4 = x4; in iadst16()
996 x0 = s0 + s4; in iadst16()
1000 x4 = s0 - s4; in iadst16()
1018 s4 = x4 * cospi_8_64 + x5 * cospi_24_64; in iadst16()
1035 x4 = dct_const_round_shift(s4 + s6); in iadst16()
1037 x6 = dct_const_round_shift(s4 - s6); in iadst16()
Dvp9_itrans4_dspr2.c323 int s0, s1, s2, s3, s4, s5, s6, s7; in iadst4_dspr2() local
340 s4 = sinpi_1_9 * x2; in iadst4_dspr2()
346 x1 = s1 - s4 - s6; in iadst4_dspr2()
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/
Dvp9_dct.c113 int s0, s1, s2, s3, s4, s5, s6, s7; in fadst4() local
129 s4 = sinpi_3_9 * x2; in fadst4()
137 x3 = s4; in fadst4()
192 /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7; in fdct8() local
201 s4 = input[3] - input[4]; in fdct8()
227 x0 = s4 + t2; in fdct8()
228 x1 = s4 - t2; in fdct8()
250 /*canbe16*/ int s0, s1, s2, s3, s4, s5, s6, s7; in vp9_fdct8x8_c() local
261 s4 = (input[3 * stride] - input[4 * stride]) * 4; in vp9_fdct8x8_c()
287 x0 = s4 + t2; in vp9_fdct8x8_c()
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/
Dvp9_idct.c242 int s0, s1, s2, s3, s4, s5, s6, s7; in iadst4() local
258 s4 = sinpi_1_9 * x2; in iadst4()
264 x1 = s1 - s4 - s6; in iadst4()
315 int s0, s1, s2, s3, s4, s5, s6, s7; in iadst8() local
337 s4 = cospi_18_64 * x4 + cospi_14_64 * x5; in iadst8()
342 x0 = dct_const_round_shift(s0 + s4); in iadst8()
346 x4 = dct_const_round_shift(s0 - s4); in iadst8()
356 s4 = cospi_8_64 * x4 + cospi_24_64 * x5; in iadst8()
365 x4 = dct_const_round_shift(s4 + s6); in iadst8()
367 x6 = dct_const_round_shift(s4 - s6); in iadst8()
[all …]
/hardware/intel/common/libva/
Dstyle_unify6 astyle --style=linux -s4 -c -s -p -U -H -n $i
/hardware/intel/img/psb_video/
Dstyle_unify6 astyle --style=linux -s4 -c -s -p -U -H -n $i
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/
Dvp9_iht8x8_add_neon.asm317 ; s4 = cospi_18_64 * x4 + cospi_14_64 * x5;
325 ; (s0 + s4)
332 ; (s0 - s4)
336 ; x0 = dct_const_round_shift(s0 + s4);
348 ; x4 = dct_const_round_shift(s0 - s4);
438 ; s4 = cospi_8_64 * x4 + cospi_24_64 * x5;
470 ; (s4 + s6)
474 ; (s4 - s6)
478 ; x4 = dct_const_round_shift(s4 + s6);
482 ; x6 = dct_const_round_shift(s4 - s6);
Dvp9_iht4x4_add_neon.asm63 vmull.s16 q14, d3, d18 ; s4 = sinpi_1_9 * x2
72 vsub.s32 q11, q11, q14 ; x1 = s1 - s4 - s6
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/
Dloopfilter_block_sse2.asm320 %define s4 [src + 4 * stride]
453 movdqa xmm3, s4
763 movdqa s4, xmm4
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/encoder/x86/
Dvp9_dct_avx2.c678 __m128i s0, s1, s2, s3, s4, s5, s6, s7; in fdct8_avx2() local
685 s4 = _mm_sub_epi16(in[3], in[4]); in fdct8_avx2()
757 s0 = _mm_add_epi16(s4, u0); in fdct8_avx2()
758 s1 = _mm_sub_epi16(s4, u0); in fdct8_avx2()
826 __m128i s0, s1, s2, s3, s4, s5, s6, s7; in fadst8_avx2() local
846 s4 = _mm_unpacklo_epi16(in4, in5); in fadst8_avx2()
859 u8 = _mm_madd_epi16(s4, k__cospi_p18_p14); in fadst8_avx2()
861 u10 = _mm_madd_epi16(s4, k__cospi_p14_m18); in fadst8_avx2()
978 s4 = _mm_packs_epi32(u0, u1); in fadst8_avx2()
1023 in[1] = _mm_sub_epi16(k__const_0, s4); in fadst8_avx2()
Dvp9_dct_sse2.c779 __m128i s0, s1, s2, s3, s4, s5, s6, s7; in fdct8_sse2() local
786 s4 = _mm_sub_epi16(in[3], in[4]); in fdct8_sse2()
858 s0 = _mm_add_epi16(s4, u0); in fdct8_sse2()
859 s1 = _mm_sub_epi16(s4, u0); in fdct8_sse2()
927 __m128i s0, s1, s2, s3, s4, s5, s6, s7; in fadst8_sse2() local
947 s4 = _mm_unpacklo_epi16(in4, in5); in fadst8_sse2()
960 u8 = _mm_madd_epi16(s4, k__cospi_p18_p14); in fadst8_sse2()
962 u10 = _mm_madd_epi16(s4, k__cospi_p14_m18); in fadst8_sse2()
1079 s4 = _mm_packs_epi32(u0, u1); in fadst8_sse2()
1124 in[1] = _mm_sub_epi16(k__const_0, s4); in fadst8_sse2()
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/
Dvp9_idct_intrin_sse2.c722 __m128i s0, s1, s2, s3, s4, s5, s6, s7; in iadst8_sse2() local
745 s4 = _mm_unpacklo_epi16(in4, in5); in iadst8_sse2()
758 u8 = _mm_madd_epi16(s4, k__cospi_p18_p14); in iadst8_sse2()
760 u10 = _mm_madd_epi16(s4, k__cospi_p14_m18); in iadst8_sse2()
877 s4 = _mm_packs_epi32(u0, u1); in iadst8_sse2()
921 in[1] = _mm_sub_epi16(k__const_0, s4); in iadst8_sse2()