/external/boringssl/linux-x86_64/crypto/aes/ |
D | bsaes-x86_64.S | 990 movdqu (%rcx),%xmm7 999 movdqu (%rcx),%xmm6 1052 movdqu (%rcx),%xmm6 1111 movdqu (%rbx),%xmm14 1114 movdqu 0(%r12),%xmm15 1115 movdqu 16(%r12),%xmm0 1116 movdqu 32(%r12),%xmm1 1117 movdqu 48(%r12),%xmm2 1118 movdqu 64(%r12),%xmm3 1119 movdqu 80(%r12),%xmm4 [all …]
|
D | aesni-x86_64.S | 514 movdqu (%rdi),%xmm2 515 movdqu 16(%rdi),%xmm3 516 movdqu 32(%rdi),%xmm4 517 movdqu 48(%rdi),%xmm5 518 movdqu 64(%rdi),%xmm6 519 movdqu 80(%rdi),%xmm7 520 movdqu 96(%rdi),%xmm8 521 movdqu 112(%rdi),%xmm9 529 movdqu (%rdi),%xmm2 532 movdqu 16(%rdi),%xmm3 [all …]
|
D | vpaes-x86_64.S | 28 movdqu (%r9),%xmm5 88 movdqu (%r9),%xmm5 119 movdqu (%r9),%xmm5 194 movdqu (%r9),%xmm0 225 movdqu (%rdi),%xmm0 238 movdqu %xmm0,(%rdx) 245 movdqu %xmm3,(%rdx) 289 movdqu 8(%rdi),%xmm0 321 movdqu 16(%rdi),%xmm0 372 movdqu %xmm0,(%rdx) [all …]
|
/external/boringssl/mac-x86_64/crypto/aes/ |
D | bsaes-x86_64.S | 988 movdqu (%rcx),%xmm7 997 movdqu (%rcx),%xmm6 1050 movdqu (%rcx),%xmm6 1108 movdqu (%rbx),%xmm14 1111 movdqu 0(%r12),%xmm15 1112 movdqu 16(%r12),%xmm0 1113 movdqu 32(%r12),%xmm1 1114 movdqu 48(%r12),%xmm2 1115 movdqu 64(%r12),%xmm3 1116 movdqu 80(%r12),%xmm4 [all …]
|
D | aesni-x86_64.S | 513 movdqu (%rdi),%xmm2 514 movdqu 16(%rdi),%xmm3 515 movdqu 32(%rdi),%xmm4 516 movdqu 48(%rdi),%xmm5 517 movdqu 64(%rdi),%xmm6 518 movdqu 80(%rdi),%xmm7 519 movdqu 96(%rdi),%xmm8 520 movdqu 112(%rdi),%xmm9 528 movdqu (%rdi),%xmm2 531 movdqu 16(%rdi),%xmm3 [all …]
|
D | vpaes-x86_64.S | 28 movdqu (%r9),%xmm5 88 movdqu (%r9),%xmm5 119 movdqu (%r9),%xmm5 194 movdqu (%r9),%xmm0 225 movdqu (%rdi),%xmm0 238 movdqu %xmm0,(%rdx) 245 movdqu %xmm3,(%rdx) 289 movdqu 8(%rdi),%xmm0 321 movdqu 16(%rdi),%xmm0 372 movdqu %xmm0,(%rdx) [all …]
|
/external/boringssl/win-x86_64/crypto/aes/ |
D | bsaes-x86_64.asm | 992 movdqu xmm7,XMMWORD[rcx] 1001 movdqu xmm6,XMMWORD[rcx] 1054 movdqu xmm6,XMMWORD[rcx] 1125 movdqu xmm14,XMMWORD[rbx] 1128 movdqu xmm15,XMMWORD[r12] 1129 movdqu xmm0,XMMWORD[16+r12] 1130 movdqu xmm1,XMMWORD[32+r12] 1131 movdqu xmm2,XMMWORD[48+r12] 1132 movdqu xmm3,XMMWORD[64+r12] 1133 movdqu xmm4,XMMWORD[80+r12] [all …]
|
D | aesni-x86_64.asm | 531 movdqu xmm2,XMMWORD[rdi] 532 movdqu xmm3,XMMWORD[16+rdi] 533 movdqu xmm4,XMMWORD[32+rdi] 534 movdqu xmm5,XMMWORD[48+rdi] 535 movdqu xmm6,XMMWORD[64+rdi] 536 movdqu xmm7,XMMWORD[80+rdi] 537 movdqu xmm8,XMMWORD[96+rdi] 538 movdqu xmm9,XMMWORD[112+rdi] 546 movdqu xmm2,XMMWORD[rdi] 549 movdqu xmm3,XMMWORD[16+rdi] [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | vpx_high_subpixel_8t_sse2.asm | 137 movdqu xmm0, [rsi + %1] ;0 138 movdqu xmm1, [rsi + rax + %1] ;1 139 movdqu xmm6, [rsi + rdx * 2 + %1] ;6 141 movdqu xmm7, [rsi + rdx * 2 + %1] ;7 142 movdqu xmm2, [rsi + rax + %1] ;2 143 movdqu xmm3, [rsi + rax * 2 + %1] ;3 144 movdqu xmm4, [rsi + rdx + %1] ;4 145 movdqu xmm5, [rsi + rax * 4 + %1] ;5 149 movdqu temp, xmm4 160 movdqu xmm5, temp [all …]
|
D | vpx_high_subpixel_bilinear_sse2.asm | 117 movdqu xmm1, [rdi] 120 movdqu [rdi], xmm0 ;store the result 160 movdqu xmm1, [rdi] 161 movdqu xmm3, [rdi + 16] 165 movdqu [rdi], xmm0 ;store the result 166 movdqu [rdi + 16], xmm2 ;store the result 211 movdqu xmm0, [rsi] ;0 212 movdqu xmm1, [rsi + 2*rax] ;1 237 movdqu xmm0, [rsi] ;0 238 movdqu xmm2, [rsi + 16] [all …]
|
D | highbd_variance_impl_sse2.asm | 70 movdqu xmm1, XMMWORD PTR [rsi] 71 movdqu xmm2, XMMWORD PTR [rdi] 87 movdqu xmm3, XMMWORD PTR [rsi+16] 90 movdqu xmm2, XMMWORD PTR [rdi+16] 94 movdqu xmm1, XMMWORD PTR [rsi+rax] 97 movdqu xmm2, XMMWORD PTR [rdi+rdx] 101 movdqu xmm3, XMMWORD PTR [rsi+rax+16] 104 movdqu xmm2, XMMWORD PTR [rdi+rdx+16] 215 movdqu xmm1, XMMWORD PTR [rsi] 216 movdqu xmm2, XMMWORD PTR [rdi] [all …]
|
D | vpx_subpixel_bilinear_ssse3.asm | 102 movdqu xmm1, [rdi] 105 movdqu [rdi], xmm0 ;store the result 174 movdqu xmm0, [rsi] ;0 175 movdqu xmm1, [rsi + rax] ;1 251 movdqu xmm0, [rsi] ;0 252 movdqu xmm1, [rsi + rax] ;1 277 movdqu xmm0, [rsi] ;load src 303 movdqu xmm0, [rsi] ;load src 330 movdqu xmm0, [rsi] ;load src 331 movdqu xmm1, [rsi + 1] [all …]
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | row_win.cc | 343 movdqu [edx], xmm0 in J400ToARGBRow_SSE2() 344 movdqu [edx + 16], xmm1 in J400ToARGBRow_SSE2() 395 movdqu xmm0, [eax] in RGB24ToARGBRow_SSSE3() 396 movdqu xmm1, [eax + 16] in RGB24ToARGBRow_SSSE3() 397 movdqu xmm3, [eax + 32] in RGB24ToARGBRow_SSSE3() 405 movdqu [edx + 32], xmm2 in RGB24ToARGBRow_SSSE3() 408 movdqu [edx], xmm0 in RGB24ToARGBRow_SSSE3() 412 movdqu [edx + 16], xmm1 in RGB24ToARGBRow_SSSE3() 414 movdqu [edx + 48], xmm3 in RGB24ToARGBRow_SSSE3() 434 movdqu xmm0, [eax] in RAWToARGBRow_SSSE3() [all …]
|
D | scale_win.cc | 108 movdqu xmm0, [eax] in ScaleRowDown2_SSE2() 109 movdqu xmm1, [eax + 16] in ScaleRowDown2_SSE2() 114 movdqu [edx], xmm0 in ScaleRowDown2_SSE2() 136 movdqu xmm0, [eax] in ScaleRowDown2Linear_SSE2() 137 movdqu xmm1, [eax + 16] in ScaleRowDown2Linear_SSE2() 150 movdqu [edx], xmm0 in ScaleRowDown2Linear_SSE2() 173 movdqu xmm0, [eax] in ScaleRowDown2Box_SSE2() 174 movdqu xmm1, [eax + 16] in ScaleRowDown2Box_SSE2() 175 movdqu xmm2, [eax + esi] in ScaleRowDown2Box_SSE2() 176 movdqu xmm3, [eax + esi + 16] in ScaleRowDown2Box_SSE2() [all …]
|
D | scale_gcc.cc | 162 MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm2 in ScaleRowDown2Box_SSE2() 163 MEMOPREG(movdqu,0x10,0,3,1,xmm3) // movdqu 0x10(%0,%3,1),%%xmm3 in ScaleRowDown2Box_SSE2() 229 MEMOPREG(movdqu,0x00,0,4,1,xmm2) // movdqu (%0,%4,1),%%xmm2 in ScaleRowDown4Box_SSE2() 230 MEMOPREG(movdqu,0x10,0,4,1,xmm3) // movdqu 0x10(%0,%4,1),%%xmm3 in ScaleRowDown4Box_SSE2() 233 MEMOPREG(movdqu,0x00,0,4,2,xmm2) // movdqu (%0,%4,2),%%xmm2 in ScaleRowDown4Box_SSE2() 234 MEMOPREG(movdqu,0x10,0,4,2,xmm3) // movdqu 0x10(%0,%4,2),%%xmm3 in ScaleRowDown4Box_SSE2() 235 MEMOPREG(movdqu,0x00,0,3,1,xmm4) // movdqu (%0,%3,1),%%xmm4 in ScaleRowDown4Box_SSE2() 236 MEMOPREG(movdqu,0x10,0,3,1,xmm5) // movdqu 0x10(%0,%3,1),%%xmm5 in ScaleRowDown4Box_SSE2() 330 MEMOPREG(movdqu,0x00,0,3,1,xmm7) // movdqu (%0,%3),%%xmm7 in ScaleRowDown34_1_Box_SSSE3() 339 MEMOPREG(movdqu,0x8,0,3,1,xmm7) // movdqu 0x8(%0,%3),%%xmm7 in ScaleRowDown34_1_Box_SSSE3() [all …]
|
D | row_gcc.cc | 378 MEMOPMEM(movdqu,xmm1,0x00,1,0,2) // movdqu %%xmm1,(%1,%0,2) in RGB565ToARGBRow_SSE2() 379 MEMOPMEM(movdqu,xmm2,0x10,1,0,2) // movdqu %%xmm2,0x10(%1,%0,2) in RGB565ToARGBRow_SSE2() 429 MEMOPMEM(movdqu,xmm1,0x00,1,0,2) // movdqu %%xmm1,(%1,%0,2) in ARGB1555ToARGBRow_SSE2() 430 MEMOPMEM(movdqu,xmm2,0x10,1,0,2) // movdqu %%xmm2,0x10(%1,%0,2) in ARGB1555ToARGBRow_SSE2() 467 MEMOPMEM(movdqu,xmm0,0x00,1,0,2) // movdqu %%xmm0,(%1,%0,2) in ARGB4444ToARGBRow_SSE2() 468 MEMOPMEM(movdqu,xmm1,0x10,1,0,2) // movdqu %%xmm1,0x10(%1,%0,2) in ARGB4444ToARGBRow_SSE2() 838 MEMOPREG(movdqu,0x00,0,4,1,xmm7) // movdqu (%0,%4,1),%%xmm7 in ARGBToUVRow_SSSE3() 841 MEMOPREG(movdqu,0x10,0,4,1,xmm7) // movdqu 0x10(%0,%4,1),%%xmm7 in ARGBToUVRow_SSSE3() 844 MEMOPREG(movdqu,0x20,0,4,1,xmm7) // movdqu 0x20(%0,%4,1),%%xmm7 in ARGBToUVRow_SSSE3() 847 MEMOPREG(movdqu,0x30,0,4,1,xmm7) // movdqu 0x30(%0,%4,1),%%xmm7 in ARGBToUVRow_SSSE3() [all …]
|
D | rotate_win.cc | 140 movdqu xmm0, [eax] in TransposeUVWx8_SSE2() 141 movdqu xmm1, [eax + edi] in TransposeUVWx8_SSE2() 147 movdqu xmm2, [eax] in TransposeUVWx8_SSE2() 148 movdqu xmm3, [eax + edi] in TransposeUVWx8_SSE2() 154 movdqu xmm4, [eax] in TransposeUVWx8_SSE2() 155 movdqu xmm5, [eax + edi] in TransposeUVWx8_SSE2() 161 movdqu xmm6, [eax] in TransposeUVWx8_SSE2() 162 movdqu xmm7, [eax + edi] in TransposeUVWx8_SSE2() 164 movdqu [esp], xmm5 // backup xmm5 in TransposeUVWx8_SSE2() 185 movdqu xmm5, [esp] // restore xmm5 in TransposeUVWx8_SSE2() [all …]
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | copy_sse2.asm | 39 movdqu xmm0, XMMWORD PTR [rsi] 40 movdqu xmm1, XMMWORD PTR [rsi + 16] 41 movdqu xmm2, XMMWORD PTR [rsi + rax] 42 movdqu xmm3, XMMWORD PTR [rsi + rax + 16] 46 movdqu xmm4, XMMWORD PTR [rsi] 47 movdqu xmm5, XMMWORD PTR [rsi + 16] 48 movdqu xmm6, XMMWORD PTR [rsi + rax] 49 movdqu xmm7, XMMWORD PTR [rsi + rax + 16] 75 movdqu xmm0, XMMWORD PTR [rsi] 76 movdqu xmm1, XMMWORD PTR [rsi + 16]
|
D | copy_sse3.asm | 101 movdqu xmm0, XMMWORD PTR [src_ptr] 102 movdqu xmm1, XMMWORD PTR [src_ptr + 16] 103 movdqu xmm2, XMMWORD PTR [src_ptr + src_stride] 104 movdqu xmm3, XMMWORD PTR [src_ptr + src_stride + 16] 105 movdqu xmm4, XMMWORD PTR [end_ptr] 106 movdqu xmm5, XMMWORD PTR [end_ptr + 16] 107 movdqu xmm6, XMMWORD PTR [end_ptr + src_stride] 108 movdqu xmm7, XMMWORD PTR [end_ptr + src_stride + 16] 134 movdqu xmm0, XMMWORD PTR [src_ptr] 135 movdqu xmm1, XMMWORD PTR [src_ptr + 16]
|
/external/boringssl/src/crypto/aes/asm/ |
D | vpaes-x86.pl | 188 &movdqu ("xmm5",&QWP(0,$key)); 247 &movdqu ("xmm5",&QWP(0,$key)); 276 &movdqu ("xmm5",&QWP(0,$key)); 349 &movdqu ("xmm0",&QWP(0,$key)); 372 &movdqu ("xmm0",&QWP(0,$inp)); # load key (unaligned) 386 &movdqu (&QWP(0,$key),"xmm0"); 393 &movdqu (&QWP(0,$key),"xmm3"); 436 &movdqu ("xmm0",&QWP(8,$inp)); # load key part 2 (very unaligned) 467 &movdqu ("xmm0",&QWP(16,$inp)); # load key part 2 (unaligned) 516 &movdqu (&QWP(0,$key),"xmm0"); # save last key [all …]
|
/external/boringssl/linux-x86/crypto/aes/ |
D | vpaes-x86.S | 80 movdqu (%edx),%xmm5 135 movdqu (%edx),%xmm5 159 movdqu (%edx),%xmm5 224 movdqu (%edx),%xmm0 242 movdqu (%esi),%xmm0 251 movdqu %xmm0,(%edx) 256 movdqu %xmm3,(%edx) 272 movdqu 8(%esi),%xmm0 292 movdqu 16(%esi),%xmm0 321 movdqu %xmm0,(%edx) [all …]
|
/external/boringssl/mac-x86/crypto/aes/ |
D | vpaes-x86.S | 77 movdqu (%edx),%xmm5 132 movdqu (%edx),%xmm5 154 movdqu (%edx),%xmm5 219 movdqu (%edx),%xmm0 235 movdqu (%esi),%xmm0 244 movdqu %xmm0,(%edx) 249 movdqu %xmm3,(%edx) 265 movdqu 8(%esi),%xmm0 285 movdqu 16(%esi),%xmm0 314 movdqu %xmm0,(%edx) [all …]
|
/external/boringssl/src/crypto/modes/asm/ |
D | ghash-x86.pl | 932 &movdqu ($Hkey,&QWP(0,$Xip)); 957 &movdqu (&QWP(0,$Htbl),$Hkey); # save H 959 &movdqu (&QWP(16,$Htbl),$Xi); # save H^2 961 &movdqu (&QWP(32,$Htbl),$T2); # save Karatsuba "salt" 975 &movdqu ($Xi,&QWP(0,$Xip)); 985 &movdqu (&QWP(0,$Xip),$Xi); 1001 &movdqu ($Xi,&QWP(0,$Xip)); 1003 &movdqu ($Hkey,&QWP(0,$Htbl)); 1014 &movdqu ($T1,&QWP(0,$inp)); # Ii 1015 &movdqu ($Xn,&QWP(16,$inp)); # Ii+1 [all …]
|
/external/libyuv/files/source/ |
D | row_win.cc | 249 movdqu xmm0, [eax] in RGB24ToARGBRow_SSSE3() 250 movdqu xmm1, [eax + 16] in RGB24ToARGBRow_SSSE3() 251 movdqu xmm3, [eax + 32] in RGB24ToARGBRow_SSSE3() 289 movdqu xmm0, [eax] in RAWToARGBRow_SSSE3() 290 movdqu xmm1, [eax + 16] in RAWToARGBRow_SSSE3() 291 movdqu xmm3, [eax + 32] in RAWToARGBRow_SSSE3() 349 movdqu xmm0, [eax] // fetch 8 pixels of bgr565 in RGB565ToARGBRow_SSE2() 399 movdqu xmm0, [eax] // fetch 8 pixels of 1555 in ARGB1555ToARGBRow_SSE2() 445 movdqu xmm0, [eax] // fetch 8 pixels of bgra4444 in ARGB4444ToARGBRow_SSE2() 702 movdqu xmm0, [eax] in ARGBToYRow_Unaligned_SSSE3() [all …]
|
/external/boringssl/win-x86/crypto/aes/ |
D | vpaes-x86.asm | 88 movdqu xmm5,[edx] 143 movdqu xmm5,[edx] 164 movdqu xmm5,[edx] 229 movdqu xmm0,[edx] 244 movdqu xmm0,[esi] 253 movdqu [edx],xmm0 258 movdqu [edx],xmm3 274 movdqu xmm0,[8+esi] 294 movdqu xmm0,[16+esi] 323 movdqu [edx],xmm0 [all …]
|