Home
last modified time | relevance | path

Searched refs:avg1 (Results 1 – 11 of 11) sorted by relevance

/external/libmpeg2/common/x86/
Dideint_cac_ssse3.c169 __m128i avg1, avg2; in ideint_cac_8x8_ssse3() local
173 avg1 = _mm_avg_epu8(top[0], top[1]); in ideint_cac_8x8_ssse3()
175 top_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3()
177 avg1 = _mm_avg_epu8(bot[0], bot[1]); in ideint_cac_8x8_ssse3()
179 bot_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3()
208 __m128i avg1, avg2; in ideint_cac_8x8_ssse3() local
210 avg1 = _mm_avg_epu8(top[0], bot[0]); in ideint_cac_8x8_ssse3()
212 even_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3()
214 avg1 = _mm_avg_epu8(top[1], bot[1]); in ideint_cac_8x8_ssse3()
216 odd_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3()
/external/libvpx/libvpx/vpx_dsp/mips/
Dvariance_msa.c186 v8i16 avg1 = { 0 }; in sse_diff_32x64_msa() local
195 CALC_MSE_AVG_B(src1, ref1, var, avg1); in sse_diff_32x64_msa()
202 CALC_MSE_AVG_B(src1, ref1, var, avg1); in sse_diff_32x64_msa()
209 CALC_MSE_AVG_B(src1, ref1, var, avg1); in sse_diff_32x64_msa()
216 CALC_MSE_AVG_B(src1, ref1, var, avg1); in sse_diff_32x64_msa()
220 vec += __msa_hadd_s_w(avg1, avg1); in sse_diff_32x64_msa()
233 v8i16 avg1 = { 0 }; in sse_diff_64x32_msa() local
243 CALC_MSE_AVG_B(src1, ref1, var, avg1); in sse_diff_64x32_msa()
244 CALC_MSE_AVG_B(src3, ref3, var, avg1); in sse_diff_64x32_msa()
252 CALC_MSE_AVG_B(src1, ref1, var, avg1); in sse_diff_64x32_msa()
[all …]
Dsub_pixel_variance_msa.c229 v8i16 avg1 = { 0 }; in avg_sse_diff_32x64_msa() local
241 CALC_MSE_AVG_B(src1, ref1, var, avg1); in avg_sse_diff_32x64_msa()
251 CALC_MSE_AVG_B(src1, ref1, var, avg1); in avg_sse_diff_32x64_msa()
261 CALC_MSE_AVG_B(src1, ref1, var, avg1); in avg_sse_diff_32x64_msa()
271 CALC_MSE_AVG_B(src1, ref1, var, avg1); in avg_sse_diff_32x64_msa()
275 vec += __msa_hadd_s_w(avg1, avg1); in avg_sse_diff_32x64_msa()
291 v8i16 avg1 = { 0 }; in avg_sse_diff_64x32_msa() local
305 CALC_MSE_AVG_B(src1, ref1, var, avg1); in avg_sse_diff_64x32_msa()
306 CALC_MSE_AVG_B(src3, ref3, var, avg1); in avg_sse_diff_64x32_msa()
318 CALC_MSE_AVG_B(src1, ref1, var, avg1); in avg_sse_diff_64x32_msa()
[all …]
/external/tensorflow/tensorflow/python/training/
Dmoving_averages_test.py138 avg1 = ema.average(var1)
144 self.assertFalse(avg1 in variables.trainable_variables())
149 self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
159 self.assertAllClose(thirties, avg1.eval())
171 self.assertAllClose(expected, avg1.eval())
182 self.assertAllClose(expected, avg1.eval())
/external/webp/src/dsp/
Dlossless_sse2.c88 const __m128i avg1 = _mm_avg_epu8(*a0, *a1); in Average2_m128i() local
90 *avg = _mm_sub_epi8(avg1, one); in Average2_m128i()
100 const __m128i avg1 = _mm_avg_epu8(A0, A1); in Average2_uint32_SSE2() local
102 *avg = _mm_sub_epi8(avg1, one); in Average2_uint32_SSE2()
122 const __m128i avg1 = Average2_uint32_16_SSE2(a0, a2); in Average3_SSE2() local
124 const __m128i sum = _mm_add_epi16(avg1, A1); in Average3_SSE2()
133 const __m128i avg1 = Average2_uint32_16_SSE2(a0, a1); in Average4_SSE2() local
135 const __m128i sum = _mm_add_epi16(avg2, avg1); in Average4_SSE2()
Ddec_sse2.c924 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0); in LD4_SSE2() local
926 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in LD4_SSE2()
945 const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0); in VR4_SSE2() local
947 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in VR4_SSE2()
964 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_); in VL4_SSE2() local
966 const __m128i avg3 = _mm_avg_epu8(avg1, avg2); in VL4_SSE2()
967 const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one); in VL4_SSE2()
974 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 )); in VL4_SSE2()
976 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1))); in VL4_SSE2()
997 const __m128i avg1 = _mm_avg_epu8(JIXABCD__, LKJIXABCD); in RD4_SSE2() local
[all …]
Denc_sse2.c759 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, CDEFGHH0); in LD4_SSE2() local
761 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in LD4_SSE2()
781 const __m128i avg1 = _mm_avg_epu8(IXABCD, ABCD0); in VR4_SSE2() local
783 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in VR4_SSE2()
801 const __m128i avg1 = _mm_avg_epu8(ABCDEFGH, BCDEFGH_); in VL4_SSE2() local
803 const __m128i avg3 = _mm_avg_epu8(avg1, avg2); in VL4_SSE2()
804 const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one); in VL4_SSE2()
811 WebPUint32ToMem(dst + 0 * BPS, _mm_cvtsi128_si32( avg1 )); in VL4_SSE2()
813 WebPUint32ToMem(dst + 2 * BPS, _mm_cvtsi128_si32(_mm_srli_si128(avg1, 1))); in VL4_SSE2()
828 const __m128i avg1 = _mm_avg_epu8(JIXABCD__, LKJIXABCD); in RD4_SSE2() local
[all …]
Dlossless_enc_sse2.c470 const __m128i avg1 = _mm_avg_epu8(*a0, *a1); in Average2_m128i() local
472 *avg = _mm_sub_epi8(avg1, one); in Average2_m128i()
Ddec_neon.c1371 const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8); in RD4_NEON() local
1372 const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_); in RD4_NEON()
1390 const uint8x8_t avg1 = vhadd_u8(ABCDEFGH, CDEFGHH0); in LD4_NEON() local
1391 const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0); in LD4_NEON()
/external/libvpx/libvpx/vpx_dsp/arm/
Dintrapred_neon.c270 const uint8x8_t avg1 = vhadd_u8(ABCDEFGH, CDEFGH00); in vpx_d45_predictor_4x4_neon() local
271 const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0); in vpx_d45_predictor_4x4_neon()
298 const uint8x8_t avg1 = vhadd_u8(A0, A2); in vpx_d45_predictor_8x8_neon() local
299 uint8x8_t row = vrhadd_u8(avg1, A1); in vpx_d45_predictor_8x8_neon()
326 const uint8x16_t avg1 = vhaddq_u8(A0, A2); in vpx_d45_predictor_16x16_neon() local
327 uint8x16_t row = vrhaddq_u8(avg1, A1); in vpx_d45_predictor_16x16_neon()
395 const uint8x8_t avg1 = vhadd_u8(L10XA0123_, L3210XA012); in vpx_d135_predictor_4x4_neon() local
396 const uint8x8_t avg2 = vrhadd_u8(avg1, L210XA0123); in vpx_d135_predictor_4x4_neon()
Dhighbd_intrapred_neon.c295 const uint16x8_t avg1 = vhaddq_u16(ABCDEFGH, CDEFGH00); in vpx_highbd_d45_predictor_4x4_neon() local
296 const uint16x8_t avg2 = vrhaddq_u16(avg1, BCDEFGH0); in vpx_highbd_d45_predictor_4x4_neon()
328 const uint16x8_t avg1 = vhaddq_u16(A0, A2); in vpx_highbd_d45_predictor_8x8_neon() local
329 uint16x8_t row = vrhaddq_u16(avg1, A1); in vpx_highbd_d45_predictor_8x8_neon()
466 const uint16x8_t avg1 = vhaddq_u16(L3210XA012, L10XA0123_); in vpx_highbd_d135_predictor_4x4_neon() local
467 const uint16x8_t avg2 = vrhaddq_u16(avg1, L210XA0123); in vpx_highbd_d135_predictor_4x4_neon()