/external/libmpeg2/common/x86/ |
D | ideint_cac_ssse3.c | 169 __m128i avg1, avg2; in ideint_cac_8x8_ssse3() local 174 avg2 = _mm_avg_epu8(top[2], top[3]); in ideint_cac_8x8_ssse3() 175 top_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3() 178 avg2 = _mm_avg_epu8(bot[2], bot[3]); in ideint_cac_8x8_ssse3() 179 bot_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3() 208 __m128i avg1, avg2; in ideint_cac_8x8_ssse3() local 211 avg2 = _mm_avg_epu8(top[2], bot[2]); in ideint_cac_8x8_ssse3() 212 even_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3() 215 avg2 = _mm_avg_epu8(top[3], bot[3]); in ideint_cac_8x8_ssse3() 216 odd_avg = _mm_avg_epu8(avg1, avg2); in ideint_cac_8x8_ssse3()
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | highbd_intrapred_intrin_sse2.c | 408 const __m128i avg2 = _mm_avg_epu16(KJIXABCD, JIXABCD0); in vpx_highbd_d117_predictor_4x4_sse2() local 410 const __m128i row0 = _mm_srli_si128(avg2, 6); in vpx_highbd_d117_predictor_4x4_sse2() 412 const __m128i row2 = _mm_srli_si128(avg2, 4); in vpx_highbd_d117_predictor_4x4_sse2() 472 const __m128i avg2 = _mm_avg_epu16(LKJIXABC, KJIXABC0); in vpx_highbd_d153_predictor_4x4_sse2() local 473 const __m128i row3 = _mm_unpacklo_epi16(avg2, avg3); in vpx_highbd_d153_predictor_4x4_sse2() 479 dst[0] = _mm_extract_epi16(avg2, 3); in vpx_highbd_d153_predictor_4x4_sse2() 497 const __m128i avg2 = _mm_avg_epu16(IJKLLLLL, JKLLLLL0); in vpx_highbd_d207_predictor_4x4_sse2() local 498 const __m128i row0 = _mm_unpacklo_epi16(avg2, avg3); in vpx_highbd_d207_predictor_4x4_sse2() 520 const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, BCDEFGH0); in vpx_highbd_d63_predictor_4x4_sse2() local 521 const __m128i row0 = avg2; in vpx_highbd_d63_predictor_4x4_sse2() [all …]
|
D | highbd_intrapred_intrin_ssse3.c | 192 const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, XABCDEFG); in vpx_highbd_d117_predictor_8x8_ssse3() local 197 __m128i rowa = avg2; in vpx_highbd_d117_predictor_8x8_ssse3() 680 const __m128i avg2 = _mm_avg_epu16(ABCDEFGH, BCDEFGHH); in vpx_highbd_d207_predictor_8x8_ssse3() local 681 const __m128i out_a = _mm_unpacklo_epi16(avg2, avg3); in vpx_highbd_d207_predictor_8x8_ssse3() 682 const __m128i out_b = _mm_unpackhi_epi16(avg2, avg3); in vpx_highbd_d207_predictor_8x8_ssse3() 829 __m128i avg2 = _mm_avg_epu16(ABCDEFGH, BCDEFGHH); in vpx_highbd_d63_predictor_8x8_ssse3() local 832 d63_store_4x8(&dst, stride, &avg2, &avg3, &HHHHHHHH); in vpx_highbd_d63_predictor_8x8_ssse3() 833 d63_store_4x8(&dst, stride, &avg2, &avg3, &HHHHHHHH); in vpx_highbd_d63_predictor_8x8_ssse3()
|
/external/rust/crates/rand/src/distributions/ |
D | bernoulli.rs | 195 let avg2 = (sum2 as f64) / (N as f64); in test_average() localVariable 196 assert!((avg2 - (NUM as f64) / (DENOM as f64)).abs() < 5e-3); in test_average()
|
/external/tensorflow/tensorflow/python/training/ |
D | moving_averages_test.py | 189 avg2 = ema.average(tensor2) 195 self.assertNotIn(avg2, variables.trainable_variables()) 200 self.assertEqual("add/ExponentialMovingAverage:0", avg2.name) 212 self.assertAllClose(_Repeat(0.0, dim), self.evaluate(avg2)) 223 self.assertAllClose(expected, self.evaluate(avg2)) 235 self.assertAllClose(expected, self.evaluate(avg2))
|
/external/webp/src/dsp/ |
D | dec_sse2.c | 926 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in LD4_SSE2() local 927 const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); in LD4_SSE2() 947 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in VR4_SSE2() local 948 const __m128i efgh = _mm_avg_epu8(avg2, XABCD); in VR4_SSE2() 965 const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_); in VL4_SSE2() local 966 const __m128i avg3 = _mm_avg_epu8(avg1, avg2); in VL4_SSE2() 967 const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one); in VL4_SSE2() 999 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in RD4_SSE2() local 1000 const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_); in RD4_SSE2()
|
D | lossless_sse2.c | 125 const __m128i avg2 = _mm_srli_epi16(sum, 1); in Average3_SSE2() local 126 const __m128i A2 = _mm_packus_epi16(avg2, avg2); in Average3_SSE2() 134 const __m128i avg2 = Average2_uint32_16_SSE2(a2, a3); in Average4_SSE2() local 135 const __m128i sum = _mm_add_epi16(avg2, avg1); in Average4_SSE2()
|
D | enc_sse2.c | 761 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in LD4_SSE2() local 762 const __m128i abcdefg = _mm_avg_epu8(avg2, BCDEFGH0); in LD4_SSE2() 783 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in VR4_SSE2() local 784 const __m128i efgh = _mm_avg_epu8(avg2, XABCD); in VR4_SSE2() 802 const __m128i avg2 = _mm_avg_epu8(CDEFGH__, BCDEFGH_); in VL4_SSE2() local 803 const __m128i avg3 = _mm_avg_epu8(avg1, avg2); in VL4_SSE2() 804 const __m128i lsb1 = _mm_and_si128(_mm_xor_si128(avg1, avg2), one); in VL4_SSE2() 830 const __m128i avg2 = _mm_subs_epu8(avg1, lsb); in RD4_SSE2() local 831 const __m128i abcdefg = _mm_avg_epu8(avg2, KJIXABCD_); in RD4_SSE2()
|
D | dec_neon.c | 1373 const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_); in RD4_NEON() local 1374 const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); in RD4_NEON() 1375 const uint32x2_t r3 = vreinterpret_u32_u8(avg2); in RD4_NEON() 1392 const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0); in LD4_NEON() local 1393 const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); in LD4_NEON() 1394 const uint32x2_t r0 = vreinterpret_u32_u8(avg2); in LD4_NEON()
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | variance_msa.c | 272 v8i16 avg2 = { 0 }; in sse_diff_64x64_msa() local 284 CALC_MSE_AVG_B(src2, ref2, var, avg2); in sse_diff_64x64_msa() 292 CALC_MSE_AVG_B(src2, ref2, var, avg2); in sse_diff_64x64_msa() 298 vec += __msa_hadd_s_w(avg2, avg2); in sse_diff_64x64_msa()
|
D | sub_pixel_variance_msa.c | 342 v8i16 avg2 = { 0 }; in avg_sse_diff_64x64_msa() local 357 CALC_MSE_AVG_B(src2, ref2, var, avg2); in avg_sse_diff_64x64_msa() 370 CALC_MSE_AVG_B(src2, ref2, var, avg2); in avg_sse_diff_64x64_msa() 376 vec += __msa_hadd_s_w(avg2, avg2); in avg_sse_diff_64x64_msa()
|
/external/libaom/libaom/aom_dsp/mips/ |
D | variance_msa.c | 273 v8i16 avg2 = { 0 }; in sse_diff_64x64_msa() local 285 CALC_MSE_AVG_B(src2, ref2, var, avg2); in sse_diff_64x64_msa() 293 CALC_MSE_AVG_B(src2, ref2, var, avg2); in sse_diff_64x64_msa() 299 vec += __msa_hadd_s_w(avg2, avg2); in sse_diff_64x64_msa()
|
D | sub_pixel_variance_msa.c | 339 v8i16 avg2 = { 0 }; in avg_sse_diff_64x64_msa() local 354 CALC_MSE_AVG_B(src2, ref2, var, avg2); in avg_sse_diff_64x64_msa() 367 CALC_MSE_AVG_B(src2, ref2, var, avg2); in avg_sse_diff_64x64_msa() 373 vec += __msa_hadd_s_w(avg2, avg2); in avg_sse_diff_64x64_msa()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | intrapred_neon.c | 271 const uint8x8_t avg2 = vrhadd_u8(avg1, BCDEFGH0); in vpx_d45_predictor_4x4_neon() local 272 const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); in vpx_d45_predictor_4x4_neon() 273 const uint32x2_t r0 = vreinterpret_u32_u8(avg2); in vpx_d45_predictor_4x4_neon() 396 const uint8x8_t avg2 = vrhadd_u8(avg1, L210XA0123); in vpx_d135_predictor_4x4_neon() local 397 const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); in vpx_d135_predictor_4x4_neon() 398 const uint32x2_t r3 = vreinterpret_u32_u8(avg2); in vpx_d135_predictor_4x4_neon()
|
D | highbd_intrapred_neon.c | 296 const uint16x8_t avg2 = vrhaddq_u16(avg1, BCDEFGH0); in vpx_highbd_d45_predictor_4x4_neon() local 297 const uint16x4_t avg2_low = vget_low_u16(avg2); in vpx_highbd_d45_predictor_4x4_neon() 298 const uint16x4_t avg2_high = vget_high_u16(avg2); in vpx_highbd_d45_predictor_4x4_neon() 467 const uint16x8_t avg2 = vrhaddq_u16(avg1, L210XA0123); in vpx_highbd_d135_predictor_4x4_neon() local 468 const uint16x4_t row_0 = vget_low_u16(avg2); in vpx_highbd_d135_predictor_4x4_neon() 469 const uint16x4_t row_1 = vget_high_u16(avg2); in vpx_highbd_d135_predictor_4x4_neon()
|
/external/libaom/libaom/aom_dsp/arm/ |
D | intrapred_neon.c | 334 const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_); in aom_d135_predictor_4x4_neon() local 335 const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2); in aom_d135_predictor_4x4_neon() 336 const uint32x2_t r3 = vreinterpret_u32_u8(avg2); in aom_d135_predictor_4x4_neon()
|