/external/libvpx/vp8/encoder/x86/ |
D | variance_impl_sse2.asm | 125 pxor xmm6, xmm6 ; clear xmm6 for accumulating sse 155 paddd xmm6, xmm1 156 paddd xmm6, xmm3 165 movdqa xmm1, xmm6 166 pxor xmm6, xmm6 169 punpcklwd xmm6, xmm7 174 psrad xmm6, 16 175 paddd xmm6, xmm5 181 movdqa xmm7, xmm6 184 punpckldq xmm6, xmm0 [all …]
|
D | fwalsh_sse2.asm | 55 pxor xmm6, xmm6 56 movq xmm6, xmm0 58 pcmpeqw xmm7, xmm6 86 pshufd xmm6, xmm1, 0x72 ; d13 d12 a13 a12 92 movdqa xmm1, xmm6 94 punpckhqdq xmm6, xmm7 ; c13 c12 d13 d12 100 paddd xmm1, xmm6 ; b23 b22 a23 a22 101 psubd xmm3, xmm6 ; c23 c22 d23 d22 110 pxor xmm6, xmm6 111 movdqa xmm7, xmm6 [all …]
|
D | variance_impl_ssse3.asm | 43 pxor xmm6, xmm6 129 paddw xmm6, xmm2 130 paddw xmm6, xmm3 195 paddw xmm6, xmm1 196 paddw xmm6, xmm2 237 paddw xmm6, xmm1 238 paddw xmm6, xmm2 286 paddw xmm6, xmm1 287 paddw xmm6, xmm3 310 punpcklwd xmm0, xmm6 [all …]
|
D | sad_ssse3.asm | 18 lddqu xmm6, XMMWORD PTR [rdi+1] 22 psadbw xmm6, xmm0 35 paddw xmm6, xmm2 51 paddw xmm6, xmm2 64 movdqa xmm6, xmm7 65 palignr xmm6, xmm4, (%2+1) 70 psadbw xmm6, xmm0 90 paddw xmm6, xmm2 113 paddw xmm6, xmm2 240 movq xmm0, xmm6 [all …]
|
D | quantize_sse2.asm | 103 movdqa xmm6, [rdx + 16] 110 paddw xmm5, xmm6 123 pxor xmm6, xmm6 125 movdqa [rsp + qcoeff], xmm6 126 movdqa [rsp + qcoeff + 16], xmm6 204 pcmpeqw xmm2, xmm6 205 pcmpeqw xmm3, xmm6 207 pcmpeqw xmm6, xmm6 208 pxor xmm2, xmm6 209 pxor xmm3, xmm6
|
D | sad_sse3.asm | 163 lddqu xmm6, XMMWORD PTR [%3+1] 167 psadbw xmm6, xmm0 180 paddw xmm6, xmm2 198 paddw xmm6, xmm2 258 lddqu xmm6, XMMWORD PTR [%5] 263 psadbw xmm6, xmm0 278 paddw xmm6, xmm3 295 paddw xmm6, xmm3 397 movq xmm0, xmm6 398 psrldq xmm6, 8 [all …]
|
D | temporal_filter_apply_sse2.asm | 46 movd xmm6, arg(4) 47 movdqa [rsp + strength], xmm6 ; where strength is used, all 16 bytes are read
|
/external/openssl/crypto/aes/asm/ |
D | vpaes-x86_64.pl | 78 ## Preserves %xmm6 - %xmm8 so you get some local vectors 343 ## the high bits of %xmm6. 354 movdqa %xmm0, %xmm6 # save short part 356 movhlps %xmm4, %xmm6 # clobber low side with zeros 361 palignr \$8,%xmm6,%xmm0 379 ## %xmm6. The low side's rounds are the same as the 390 movdqa %xmm0, %xmm6 # save cur_lo in xmm6 401 movdqa %xmm6, %xmm7 444 pxor %xmm6, %xmm6 456 ## %xmm6: low side, d c 0 0 [all …]
|
D | bsaes-x86_64.pl | 919 movdqu ($inp), %xmm6 # load round 1 key 926 pshufb %xmm4, %xmm6 # .LM0 931 pand %xmm6, %xmm8 932 pand %xmm6, %xmm9 940 pand %xmm6, %xmm10 941 pand %xmm6, %xmm11 954 pand %xmm6, %xmm12 955 pand %xmm6, %xmm13 964 pand %xmm6, %xmm14 965 pand %xmm6, %xmm15 [all …]
|
/external/llvm/test/MC/X86/ |
D | x86_64-xop-encoding.s | 37 vphaddwq %xmm6, %xmm2 50 vphadduwq (%rcx,%rax), %xmm6 69 vphaddudq %xmm6, %xmm2 109 vphaddbw %xmm5, %xmm6 149 vfrczps %xmm6, %xmm5 191 vpshlq %xmm2, %xmm4, %xmm6 197 vpshlq %xmm5, (%rdx,%rcx), %xmm6 241 vpshaq %xmm6, (%rax,%rcx), %xmm5 268 vprotw (%rax), %xmm3, %xmm6 317 vprotd $43, (%rcx), %xmm6 [all …]
|
D | x86-32-avx.s | 5 vaddss %xmm4, %xmm6, %xmm2 9 vmulss %xmm4, %xmm6, %xmm2 13 vsubss %xmm4, %xmm6, %xmm2 17 vdivss %xmm4, %xmm6, %xmm2 21 vaddsd %xmm4, %xmm6, %xmm2 25 vmulsd %xmm4, %xmm6, %xmm2 29 vsubsd %xmm4, %xmm6, %xmm2 33 vdivsd %xmm4, %xmm6, %xmm2 69 vaddps %xmm4, %xmm6, %xmm2 73 vsubps %xmm4, %xmm6, %xmm2 [all …]
|
D | x86_64-avx-encoding.s | 405 vcmpordps -4(%rbx,%rcx,8), %xmm6, %xmm2 469 vcmpordpd -4(%rbx,%rcx,8), %xmm6, %xmm2 533 vcmpordss -4(%rbx,%rcx,8), %xmm6, %xmm2 597 vcmpordsd -4(%rbx,%rcx,8), %xmm6, %xmm2 725 vcmpgtps -4(%rbx,%rcx,8), %xmm6, %xmm2 757 vcmpnle_uqps -4(%rbx,%rcx,8), %xmm6, %xmm2 789 vcmpgt_oqps -4(%rbx,%rcx,8), %xmm6, %xmm2 917 vcmpgtpd -4(%rbx,%rcx,8), %xmm6, %xmm2 949 vcmpnle_uqpd -4(%rbx,%rcx,8), %xmm6, %xmm2 981 vcmpgt_oqpd -4(%rbx,%rcx,8), %xmm6, %xmm2 [all …]
|
/external/libvpx/vp8/common/x86/ |
D | loopfilter_sse2.asm | 42 movdqa xmm6, xmm1 ; q2 46 psubusb xmm2, xmm6 ; q3-=q2 48 psubusb xmm4, xmm6 ; q1-=q2 49 psubusb xmm6, xmm3 ; q2-=q1 51 por xmm4, xmm6 ; abs(q2-q1) 68 movdqa xmm6, [rsi+2*rax] ; p1 72 movlps xmm6, [rsi + rcx] ; p1 76 movhps xmm6, [rdi + rcx] 79 movdqa XMMWORD PTR [rsp + 48], xmm6 ; store p1 83 movdqa xmm3, xmm6 ; p1 [all …]
|
D | idctllm_sse2.asm | 182 movdqa xmm6, xmm2 ; a1 190 psubw xmm6, xmm3 ;3 198 punpcklwd xmm4, xmm6 ; 015 011 014 010 013 009 012 008 199 punpckhwd xmm5, xmm6 ; 115 111 114 110 113 109 112 108 206 movdqa xmm6, xmm7 ; 107 103 106 102 105 101 104 100 208 punpckhdq xmm6, xmm5 ; 115 111 107 103 114 110 106 102 216 punpckldq xmm1, xmm6 ; 114 110 014 010 106 102 006 002 217 punpckhdq xmm7, xmm6 ; 115 111 015 011 107 103 007 003 254 movdqa xmm6, xmm2 ; a1 262 psubw xmm6, xmm3 ;3 [all …]
|
D | subpixel_ssse3.asm | 62 movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3 89 pmaddubsw xmm2, xmm6 116 movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3 144 pmaddubsw xmm2, xmm6 205 movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3 230 pmaddubsw xmm2, xmm6 248 pmaddubsw xmm2, xmm6 280 movdqa xmm6, XMMWORD PTR [rax+128] ;k1_k3 297 pmaddubsw xmm2, xmm6 306 pmaddubsw xmm3, xmm6 [all …]
|
D | subpixel_sse2.asm | 70 movdqa xmm6, xmm1 84 …psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02… 88 … punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01 91 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4 107 paddsw xmm4, xmm6 195 movdqa xmm6, xmm1 209 …psrldq xmm6, 3 ; xx xx xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02… 213 … punpcklbw xmm6, xmm0 ; xx08 xx07 xx06 xx05 xx04 xx03 xx02 xx01 216 pmullw xmm6, [rdx+48] ; x[ 1] * h[ 1] ; Tap 4 231 paddsw xmm4, xmm6 [all …]
|
D | postproc_mmx.c | 477 movdqa xmm6, xmm1 ; in vp8_post_proc_down_and_across_xmm() local 479 psubusw xmm6, xmm5 ; in vp8_post_proc_down_and_across_xmm() 483 paddusw xmm6, xmm5 ; in vp8_post_proc_down_and_across_xmm() 485 pcmpgtw xmm6, xmm2 in vp8_post_proc_down_and_across_xmm() 486 por xmm7, xmm6 ; in vp8_post_proc_down_and_across_xmm() 500 movdqa xmm6, xmm1 ; in vp8_post_proc_down_and_across_xmm() local 502 psubusw xmm6, xmm5 ; in vp8_post_proc_down_and_across_xmm() 506 paddusw xmm6, xmm5 ; in vp8_post_proc_down_and_across_xmm() 508 pcmpgtw xmm6, xmm2 in vp8_post_proc_down_and_across_xmm() 509 por xmm7, xmm6 ; in vp8_post_proc_down_and_across_xmm() [all …]
|
/external/libyuv/files/source/ |
D | row_win.cc | 85 movdqa xmm6, _kAddY16 in ARGBToYRow_SSSE3() 102 paddb xmm0, xmm6 in ARGBToYRow_SSSE3() 118 movdqa xmm6, _kAddY16 in BGRAToYRow_SSSE3() 135 paddb xmm0, xmm6 in BGRAToYRow_SSSE3() 151 movdqa xmm6, _kAddY16 in ABGRToYRow_SSSE3() 168 paddb xmm0, xmm6 in ABGRToYRow_SSSE3() 189 movdqa xmm6, _kARGBToV in ARGBToUVRow_SSSE3() 220 pmaddubsw xmm1, xmm6 // V in ARGBToUVRow_SSSE3() 221 pmaddubsw xmm3, xmm6 in ARGBToUVRow_SSSE3() 253 movdqa xmm6, _kBGRAToV in BGRAToUVRow_SSSE3() [all …]
|
D | rotate.cc | 103 movq xmm6, qword ptr [eax] in TransposeWx8_SSSE3() 106 punpcklbw xmm6, xmm7 in TransposeWx8_SSSE3() 108 movdqa xmm7, xmm6 in TransposeWx8_SSSE3() 117 punpcklwd xmm4, xmm6 in TransposeWx8_SSSE3() 119 movdqa xmm6, xmm4 in TransposeWx8_SSSE3() 121 palignr xmm6, xmm6, 8 in TransposeWx8_SSSE3() 131 punpckldq xmm2, xmm6 in TransposeWx8_SSSE3() 132 movdqa xmm6, xmm2 in TransposeWx8_SSSE3() 133 palignr xmm6, xmm6, 8 in TransposeWx8_SSSE3() 136 movq qword ptr [edx + esi], xmm6 in TransposeWx8_SSSE3() [all …]
|
D | convert.cc | 373 movq xmm6, QWORD PTR [ebx] ;src_u in I420ToYUY2() 375 punpcklbw xmm6, xmm0 ;src_u, src_v mix in I420ToYUY2() 376 ;movdqa xmm1, xmm6 in I420ToYUY2() 377 ;movdqa xmm2, xmm6 in I420ToYUY2() 378 ;movdqa xmm4, xmm6 in I420ToYUY2() 382 punpcklbw xmm1, xmm6 ;in1, src_u, in1, src_v in I420ToYUY2() 388 punpcklbw xmm2, xmm6 ;in2, src_u, in2, src_v in I420ToYUY2() 392 punpckhbw xmm3, xmm6 ;in1, src_u, in1, src_v again in I420ToYUY2() 397 punpckhbw xmm5, xmm6 ;src_u, in2, src_v again in I420ToYUY2() 487 movq xmm6, QWORD PTR [ebx] ;src_u in I420ToUYVY() [all …]
|
D | scale.cc | 503 movdqa xmm6, [ebp + edx] in ScaleRowDown8Int_SSE2() 504 pavgb xmm4, xmm6 in ScaleRowDown8Int_SSE2() 505 movdqa xmm6, [ebp + edx + 16] in ScaleRowDown8Int_SSE2() 506 pavgb xmm5, xmm6 in ScaleRowDown8Int_SSE2() 601 movdqa xmm6, _madd11 in ScaleRowDown34_1_Int_SSSE3() 618 pmaddubsw xmm0, xmm6 in ScaleRowDown34_1_Int_SSSE3() 658 movdqa xmm6, _madd11 in ScaleRowDown34_0_Int_SSSE3() 677 pmaddubsw xmm0, xmm6 in ScaleRowDown34_0_Int_SSSE3() 717 movdqa xmm6, _shuf38b in ScaleRowDown38_SSSE3() 725 pshufb xmm1, xmm6 in ScaleRowDown38_SSSE3() [all …]
|
/external/elfutils/tests/ |
D | testfile44.expect.bz2 |
|
D | testfile45.expect.bz2 | 1testfile45.o: elf64-elf_x86_64
2
3Disassembly of section .text:
4
5 0 ... |
/external/openssl/crypto/bn/asm/ |
D | x86_64-mont5.pl | 78 movaps %xmm6,(%rsp) 106 movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument 115 pand %xmm6,%xmm2 142 pand %xmm6,%xmm2 222 pand %xmm6,%xmm2 325 movaps (%rsi),%xmm6 360 movaps %xmm6,(%rsp) 389 movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument 398 pand %xmm6,%xmm2 424 pand %xmm6,%xmm2 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | fold-xmm-zero.ll | 12 …%0 = tail call %0 asm sideeffect "foo", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},=… 22 …%1 = tail call %0 asm sideeffect "bar", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},=… 32 …%2 = tail call %0 asm sideeffect "baz", "={xmm0},={xmm1},={xmm2},={xmm3},={xmm4},={xmm5},={xmm6},=…
|