/external/libaom/libaom/aom_dsp/x86/ |
D | subtract_sse2.asm | 57 mova [diffq+mmsize*0+%5], m0 58 mova [diffq+mmsize*1+%5], m2 59 mova [diffq+mmsize*0+%6], m4 60 mova [diffq+mmsize*1+%6], m1 65 loop16 0*mmsize, 1*mmsize, 0*mmsize, 1*mmsize, 0*mmsize, 2*mmsize 66 loop16 2*mmsize, 3*mmsize, 2*mmsize, 3*mmsize, 4*mmsize, 6*mmsize 67 loop16 4*mmsize, 5*mmsize, 4*mmsize, 5*mmsize, 8*mmsize, 10*mmsize 68 loop16 6*mmsize, 7*mmsize, 6*mmsize, 7*mmsize, 12*mmsize, 14*mmsize 79 loop16 0*mmsize, 1*mmsize, 0*mmsize, 1*mmsize, 0*mmsize, 2*mmsize 80 loop16 2*mmsize, 3*mmsize, 2*mmsize, 3*mmsize, 4*mmsize, 6*mmsize [all …]
|
D | sad_sse2.asm | 63 pavgb m1, [second_predq+mmsize*0] 64 pavgb m2, [second_predq+mmsize*1] 65 pavgb m3, [second_predq+mmsize*2] 66 pavgb m4, [second_predq+mmsize*3] 83 pavgb m1, [second_predq+mmsize*4] 84 pavgb m2, [second_predq+mmsize*5] 85 pavgb m3, [second_predq+mmsize*6] 86 pavgb m4, [second_predq+mmsize*7] 87 lea second_predq, [second_predq+mmsize*8] 130 pavgb m1, [second_predq+mmsize*0] [all …]
|
D | highbd_sad_sse2.asm | 71 pavgw m1, [second_predq+mmsize*0] 72 pavgw m2, [second_predq+mmsize*1] 73 pavgw m3, [second_predq+mmsize*2] 74 pavgw m4, [second_predq+mmsize*3] 75 lea second_predq, [second_predq+mmsize*4] 109 pavgw m1, [second_predq+mmsize*0] 110 pavgw m2, [second_predq+mmsize*1] 111 pavgw m3, [second_predq+mmsize*2] 112 pavgw m4, [second_predq+mmsize*3] 113 lea second_predq, [second_predq+mmsize*4] [all …]
|
D | highbd_subpel_variance_impl_sse2.asm | 63 %if mmsize == 16 278 %if ARCH_X86_64 && mmsize == 16 505 %if ARCH_X86_64 && mmsize == 16 627 %if ARCH_X86_64 && mmsize == 16 726 %if ARCH_X86_64 && mmsize == 16 854 %if ARCH_X86_64 && mmsize == 16
|
D | quantize_ssse3_x86_64.asm | 160 add ncoeffq, mmsize 268 add ncoeffq, mmsize 282 add ncoeffq, mmsize
|
D | quantize_avx_x86_64.asm | 235 add ncoeffq, mmsize 327 add ncoeffq, mmsize 353 add ncoeffq, mmsize 445 add ncoeffq, mmsize
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | subtract_sse2.asm | 52 mova [diffq+mmsize*0+%5], m0 53 mova [diffq+mmsize*1+%5], m2 54 mova [diffq+mmsize*0+%6], m4 55 mova [diffq+mmsize*1+%6], m1 60 loop16 0*mmsize, 1*mmsize, 0*mmsize, 1*mmsize, 0*mmsize, 2*mmsize 61 loop16 2*mmsize, 3*mmsize, 2*mmsize, 3*mmsize, 4*mmsize, 6*mmsize 72 loop16 0, mmsize, 0, mmsize, 0, 2*mmsize
|
D | sad_sse2.asm | 59 pavgb m1, [second_predq+mmsize*0] 60 pavgb m2, [second_predq+mmsize*1] 61 pavgb m3, [second_predq+mmsize*2] 62 pavgb m4, [second_predq+mmsize*3] 63 lea second_predq, [second_predq+mmsize*4] 102 pavgb m1, [second_predq+mmsize*0] 103 pavgb m2, [second_predq+mmsize*1] 104 pavgb m3, [second_predq+mmsize*2] 105 pavgb m4, [second_predq+mmsize*3] 106 lea second_predq, [second_predq+mmsize*4] [all …]
|
D | highbd_sad_sse2.asm | 68 pavgw m1, [second_predq+mmsize*0] 69 pavgw m2, [second_predq+mmsize*1] 70 pavgw m3, [second_predq+mmsize*2] 71 pavgw m4, [second_predq+mmsize*3] 72 lea second_predq, [second_predq+mmsize*4] 106 pavgw m1, [second_predq+mmsize*0] 107 pavgw m2, [second_predq+mmsize*1] 108 pavgw m3, [second_predq+mmsize*2] 109 pavgw m4, [second_predq+mmsize*3] 110 lea second_predq, [second_predq+mmsize*4] [all …]
|
D | highbd_subpel_variance_impl_sse2.asm | 60 %if mmsize == 16 275 %if ARCH_X86_64 && mmsize == 16 502 %if ARCH_X86_64 && mmsize == 16 624 %if ARCH_X86_64 && mmsize == 16 723 %if ARCH_X86_64 && mmsize == 16 851 %if ARCH_X86_64 && mmsize == 16
|
/external/libaom/libaom/third_party/libyuv/source/ |
D | row_x86.asm | 34 mov%2 m1, [src_yuy2q + mmsize] 35 lea src_yuy2q, [src_yuy2q + mmsize * 2] 47 sub pixd, mmsize 49 lea dst_yq, [dst_yq + mmsize] 80 mov%1 m1, [src_uvq + mmsize] 81 lea src_uvq, [src_uvq + mmsize * 2] 94 lea dst_uq, [dst_uq + mmsize] 95 sub pixd, mmsize 120 lea src_uq, [src_uq + mmsize] 127 mov%1 [dst_uvq + mmsize], m2 [all …]
|
/external/libaom/libaom/third_party/x86inc/ |
D | x86inc.asm | 532 %if mmsize != 8 && stack_size == 0 599 %define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0 604 %if mmsize == 32 640 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0 651 %if mmsize == 32 696 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0 707 %if mmsize == 32 939 %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2)) 968 ; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m# 969 ; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m# [all …]
|
/external/libvpx/libvpx/third_party/x86inc/ |
D | x86inc.asm | 532 %if mmsize != 8 && stack_size == 0 599 %define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0 604 %if mmsize == 32 640 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0 651 %if mmsize == 32 696 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0 707 %if mmsize == 32 939 %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2)) 968 ; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m# 969 ; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m# [all …]
|
/external/libaom/libaom/av1/encoder/x86/ |
D | error_sse2.asm | 34 mova m3, [uqcq+sizeq*2+mmsize] 35 mova m1, [dqcq+sizeq*2+mmsize] 61 add sizeq, mmsize
|
D | av1_quantize_ssse3_x86_64.asm | 104 add ncoeffq, mmsize 155 add ncoeffq, mmsize 164 add ncoeffq, mmsize 196 add ncoeffq, mmsize
|
/external/libvpx/libvpx/vp9/encoder/x86/ |
D | vp9_quantize_ssse3_x86_64.asm | 99 add ncoeffq, mmsize 150 add ncoeffq, mmsize 159 add ncoeffq, mmsize
|
/external/python/cpython2/Modules/_ctypes/libffi/src/ |
D | dlmalloc.c | 3221 size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); in mmap_alloc() local 3222 if (mmsize > nb) { /* Check for wrap around 0 */ in mmap_alloc() 3223 char* mm = (char*)(DIRECT_MMAP(mmsize)); in mmap_alloc() 3226 size_t psize = mmsize - offset - MMAP_FOOT_PAD; in mmap_alloc() 3236 if ((m->footprint += mmsize) > m->max_footprint) in mmap_alloc()
|
/external/libffi/src/ |
D | dlmalloc.c | 3216 size_t mmsize = granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); in mmap_alloc() local 3217 if (mmsize > nb) { /* Check for wrap around 0 */ in mmap_alloc() 3218 char* mm = (char*)(DIRECT_MMAP(mmsize)); in mmap_alloc() 3221 size_t psize = mmsize - offset - MMAP_FOOT_PAD; in mmap_alloc() 3231 if ((m->footprint += mmsize) > m->max_footprint) in mmap_alloc()
|
/external/dlmalloc/ |
D | malloc.c | 3843 size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); in mmap_alloc() local 3845 size_t fp = m->footprint + mmsize; in mmap_alloc() 3849 if (mmsize > nb) { /* Check for wrap around 0 */ in mmap_alloc() 3850 char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); in mmap_alloc() 3853 size_t psize = mmsize - offset - MMAP_FOOT_PAD; in mmap_alloc() 3863 if ((m->footprint += mmsize) > m->max_footprint) in mmap_alloc()
|