/external/libvpx/libvpx/vpx_dsp/arm/ |
D | variance_neon.c | 206 uint32_t sse1, sse2; in vpx_variance32x64_neon() local 207 variance_neon_w16(a, a_stride, b, b_stride, 32, 32, &sse1, &sum1); in vpx_variance32x64_neon() 210 *sse = sse1 + sse2; in vpx_variance32x64_neon() 219 uint32_t sse1, sse2; in vpx_variance64x32_neon() local 220 variance_neon_w16(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1); in vpx_variance64x32_neon() 223 *sse = sse1 + sse2; in vpx_variance64x32_neon() 232 uint32_t sse1, sse2; in vpx_variance64x64_neon() local 234 variance_neon_w16(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1); in vpx_variance64x64_neon() 237 sse1 += sse2; in vpx_variance64x64_neon() 242 sse1 += sse2; in vpx_variance64x64_neon() [all …]
|
/external/python/cpython2/Modules/_ctypes/libffi_osx/x86/ |
D | x86-ffi64.c | 381 _Bool sse1 = n == 2 && SSE_CLASS_P(classes[1]); in ffi_prep_cif_machdep() local 383 if (sse0 && !sse1) in ffi_prep_cif_machdep() 385 else if (!sse0 && sse1) in ffi_prep_cif_machdep() 387 else if (sse0 && sse1) in ffi_prep_cif_machdep() 654 _Bool sse1 = SSE_CLASS_P (classes[1]); in ffi_closure_unix64_inner() local 656 if (!sse0 && sse1) in ffi_closure_unix64_inner() 658 else if (sse0 && !sse1) in ffi_closure_unix64_inner()
|
/external/python/cpython2/Modules/_ctypes/libffi/src/x86/ |
D | ffi64.c | 379 _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]); in ffi_prep_cif_machdep() local 380 if (sse0 && !sse1) in ffi_prep_cif_machdep() 382 else if (!sse0 && sse1) in ffi_prep_cif_machdep() 384 else if (sse0 && sse1) in ffi_prep_cif_machdep() 602 _Bool sse1 = SSE_CLASS_P (classes[1]); in ffi_closure_unix64_inner() local 603 if (!sse0 && sse1) in ffi_closure_unix64_inner() 605 else if (sse0 && !sse1) in ffi_closure_unix64_inner()
|
/external/python/cpython3/Modules/_ctypes/libffi/src/x86/ |
D | ffi64.c | 379 _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]); in ffi_prep_cif_machdep() local 380 if (sse0 && !sse1) in ffi_prep_cif_machdep() 382 else if (!sse0 && sse1) in ffi_prep_cif_machdep() 384 else if (sse0 && sse1) in ffi_prep_cif_machdep() 602 _Bool sse1 = SSE_CLASS_P (classes[1]); in ffi_closure_unix64_inner() local 603 if (!sse0 && sse1) in ffi_closure_unix64_inner() 605 else if (sse0 && !sse1) in ffi_closure_unix64_inner()
|
/external/python/cpython3/Modules/_ctypes/libffi_osx/x86/ |
D | x86-ffi64.c | 383 _Bool sse1 = n == 2 && SSE_CLASS_P(classes[1]); in ffi_prep_cif_machdep() local 385 if (sse0 && !sse1) in ffi_prep_cif_machdep() 387 else if (!sse0 && sse1) in ffi_prep_cif_machdep() 389 else if (sse0 && sse1) in ffi_prep_cif_machdep() 657 _Bool sse1 = SSE_CLASS_P (classes[1]); in ffi_closure_unix64_inner() local 659 if (!sse0 && sse1) in ffi_closure_unix64_inner() 661 else if (sse0 && !sse1) in ffi_closure_unix64_inner()
|
/external/libffi/src/x86/ |
D | ffi64.c | 379 _Bool sse1 = n == 2 && SSE_CLASS_P (classes[1]); in ffi_prep_cif_machdep() local 380 if (sse0 && !sse1) in ffi_prep_cif_machdep() 382 else if (!sse0 && sse1) in ffi_prep_cif_machdep() 384 else if (sse0 && sse1) in ffi_prep_cif_machdep() 602 _Bool sse1 = SSE_CLASS_P (classes[1]); in ffi_closure_unix64_inner() local 603 if (!sse0 && sse1) in ffi_closure_unix64_inner() 605 else if (sse0 && !sse1) in ffi_closure_unix64_inner()
|
/external/libvpx/libvpx/test/ |
D | variance_test.cc | 413 unsigned int sse1, sse2, var1, var2; in RefTest() local 416 var1 = params_.func(src_, stride, ref_, stride, &sse1)); in RefTest() 420 EXPECT_EQ(sse1, sse2) << "Error at test index: " << i; in RefTest() 443 unsigned int sse1, sse2; in RefStrideTest() local 447 var1 = params_.func(src_, src_stride, ref_, ref_stride, &sse1)); in RefStrideTest() 451 EXPECT_EQ(sse1, sse2) << "Error at test index: " << i; in RefStrideTest() 516 unsigned int sse1, sse2; in RefTestMse() local 518 ASM_REGISTER_STATE_CHECK(params_.func(src_, stride, ref_, stride, &sse1)); in RefTestMse() 521 EXPECT_EQ(sse1, sse2); in RefTestMse() 646 unsigned int sse1, sse2; in RefTest() local [all …]
|
/external/libvpx/libvpx/vp8/encoder/ |
D | mcomp.c | 218 *sse1 = sse; \ 228 unsigned int *sse1) { in vp8_find_best_sub_pixel_step_iteratively() argument 292 besterr = vfp->vf(y, y_stride, z, b->src_stride, sse1); in vp8_find_best_sub_pixel_step_iteratively() 372 unsigned int *sse1) { in vp8_find_best_sub_pixel_step() argument 407 bestmse = vfp->vf(y, y_stride, z, b->src_stride, sse1); in vp8_find_best_sub_pixel_step() 422 *sse1 = sse; in vp8_find_best_sub_pixel_step() 434 *sse1 = sse; in vp8_find_best_sub_pixel_step() 448 *sse1 = sse; in vp8_find_best_sub_pixel_step() 460 *sse1 = sse; in vp8_find_best_sub_pixel_step() 502 *sse1 = sse; in vp8_find_best_sub_pixel_step() [all …]
|
D | rdopt.c | 377 unsigned int sse1 = 0; in VP8_UVSSE() local 407 vpred_ptr, uv_stride, &sse1); in VP8_UVSSE() 408 sse2 += sse1; in VP8_UVSSE() 411 vpx_variance8x8(vptr, pre_stride, vpred_ptr, uv_stride, &sse1); in VP8_UVSSE() 412 sse2 += sse1; in VP8_UVSSE()
|
/external/libvpx/libvpx/vp9/encoder/ |
D | vp9_mcomp.c | 188 *sse1 = sse; \ 211 *sse1 = sse; \ 326 int *mvjcost, int *mvcost[2], uint32_t *sse1, uint32_t *distortion) { in setup_center_error() argument 335 vfp->vf(CONVERT_TO_BYTEPTR(comp_pred16), w, src, src_stride, sse1); in setup_center_error() 339 besterr = vfp->vf(comp_pred, w, src, src_stride, sse1); in setup_center_error() 342 besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1); in setup_center_error() 354 besterr = vfp->vf(comp_pred, w, src, src_stride, sse1); in setup_center_error() 356 besterr = vfp->vf(y + offset, y_stride, src, src_stride, sse1); in setup_center_error() 397 uint32_t *distortion, uint32_t *sse1, in vp9_skip_sub_pixel_tree() argument 402 offset, mvjcost, mvcost, sse1, distortion); in vp9_skip_sub_pixel_tree() [all …]
|
D | vp9_mcomp.h | 76 uint32_t *distortion, uint32_t *sse1, const uint8_t *second_pred, int w,
|
/external/valgrind/memcheck/tests/x86/ |
D | sse1_memory.vgtest | 3 args: sse1
|
D | sse_memory.c | 391 Int sse1 = 0, sse2 = 0; in main() local 394 sse1 = 1; in main() 402 sse1 = sse2 = 1; in main() 410 if (sse1) { in main()
|
D | insn_mmxext.vgtest | 2 # mmxext is an old AMD subset of sse1, so either will do.
|
/external/valgrind/memcheck/tests/amd64/ |
D | sse_memory.c | 391 Int sse1 = 0, sse2 = 0; in main() local 394 sse1 = 1; in main() 402 sse1 = sse2 = 1; in main() 410 if (sse1) { in main()
|
/external/valgrind/none/tests/x86/ |
D | insn_mmxext.vgtest | 2 # mmxext is an old AMD subset of sse1, so either will do.
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | variance_avx2.c | 675 unsigned int sse1; in vpx_sub_pixel_variance64x64_avx2() local 677 src, src_stride, x_offset, y_offset, dst, dst_stride, 64, &sse1); in vpx_sub_pixel_variance64x64_avx2() 683 *sse = sse1 + sse2; in vpx_sub_pixel_variance64x64_avx2() 700 unsigned int sse1; in vpx_sub_pixel_avg_variance64x64_avx2() local 702 src, src_stride, x_offset, y_offset, dst, dst_stride, sec, 64, 64, &sse1); in vpx_sub_pixel_avg_variance64x64_avx2() 709 *sse = sse1 + sse2; in vpx_sub_pixel_avg_variance64x64_avx2()
|
/external/valgrind/docs/internals/ |
D | release-HOWTO.txt | 75 x86, sse1 (PIII)
|