/external/llvm/test/MC/X86/ |
D | x86_64-xop-encoding.s | 34 vphaddwq (%rcx), %xmm4 45 vphaddwd %xmm3, %xmm4 66 vphaddudq 8(%rcx,%rax), %xmm4 82 vphaddubq (%rcx), %xmm4 98 vphadddq (%rdx), %xmm4 101 vphadddq %xmm4, %xmm5 191 vpshlq %xmm2, %xmm4, %xmm6 235 vpshaq %xmm4, %xmm4, %xmm4 246 vpshad %xmm5, %xmm4, %xmm0 260 vpshab (%rcx), %xmm4, %xmm0 [all …]
|
/external/boringssl/src/crypto/aes/asm/ |
D | vpaes-x86_64.pl | 107 movdqa %xmm13, %xmm4 # 4 : sb1u 109 pshufb %xmm2, %xmm4 # 4 = sb1u 111 pxor %xmm5, %xmm4 # 4 = sb1u + k 113 pxor %xmm4, %xmm0 # 0 = A 116 movdqa (%r11,%r10), %xmm4 # .Lk_mc_backward[] 124 pshufb %xmm4, %xmm3 # 3 = D 143 movdqa %xmm10, %xmm4 # 4 : 1/j 145 pshufb %xmm0, %xmm4 # 4 = 1/j 147 pxor %xmm5, %xmm4 # 4 = jak = 1/j + a/k 151 pshufb %xmm4, %xmm3 # 3 = 1/jak [all …]
|
/external/libvpx/libvpx/vp8/common/x86/ |
D | idctllm_sse2.asm | 33 movd xmm4, [rax] 36 pinsrw xmm4, [rax+32], 4 39 pmullw xmm4, xmm5 51 pshuflw xmm4, xmm4, 00000000b 52 pshufhw xmm4, xmm4, 00000000b 55 paddw xmm4, [GLOBAL(fours)] 57 psraw xmm4, 3 71 paddw xmm0, xmm4 72 paddw xmm1, xmm4 73 paddw xmm2, xmm4 [all …]
|
D | subpixel_sse2.asm | 68 movdqa xmm4, xmm1 75 …psrldq xmm4, 1 ; xx 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00… 78 … punpcklbw xmm4, xmm0 ; xx06 xx05 xx04 xx03 xx02 xx01 xx00 xx-1 81 pmullw xmm4, XMMWORD PTR [rdx+16] ; x[-1] * H[-1]; Tap 2 104 paddsw xmm4, xmm7 105 paddsw xmm4, xmm5 107 paddsw xmm4, xmm3 108 paddsw xmm4, xmm6 110 paddsw xmm4, xmm1 111 paddsw xmm4, [GLOBAL(rd)] [all …]
|
D | iwalsh_sse2.asm | 36 movdqa xmm4, xmm0 38 punpckhqdq xmm4, xmm3 ;c1 b1 40 movdqa xmm1, xmm4 ;c1 b1 41 paddw xmm4, xmm0 ;dl+cl a1+b1 aka op[4] op[0] 49 movdqa xmm3, xmm4 ; 13 12 11 10 03 02 01 00 50 punpcklwd xmm4, xmm0 ; 23 03 22 02 21 01 20 00 52 movdqa xmm1, xmm4 ; 23 03 22 02 21 01 20 00 53 punpcklwd xmm4, xmm3 ; 31 21 11 01 30 20 10 00 58 movdqa xmm3, xmm4 ;ip[4] ip[0] 62 paddw xmm4, xmm2 ;ip[4]+ip[8] ip[0]+ip[12] aka b1 a1 [all …]
|
D | loopfilter_sse2.asm | 32 movdqa xmm4, [rsi+rax] ; q1 38 movlps xmm4, [rsi] ; q1 43 movhps xmm4, [rdi] 50 movdqa [rsp+_q1], xmm4 ; store q1 55 movdqa xmm3, xmm4 ; q1 60 psubusb xmm4, xmm6 ; q1-=q2 63 por xmm4, xmm6 ; abs(q2-q1) 67 pmaxub xmm1, xmm4 79 movdqa xmm4, [rdi+4*rax] ; p2 83 movlps xmm4, [rsi] ; p2 [all …]
|
/external/libvpx/libvpx/vp8/encoder/x86/ |
D | dct_sse2.asm | 96 movdqa xmm4, xmm3 98 pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)];d1*2217 - c1*5352 101 paddd xmm4, XMMWORD PTR[GLOBAL(_7500)] 103 psrad xmm4, 12 ;(d1 * 2217 - c1 * 5352 + 7500)>>12 106 packssdw xmm3, xmm4 ;op[3] op[1] 139 pxor xmm4, xmm4 ;zero out for compare 142 pcmpeqw xmm2, xmm4 148 movdqa xmm4, xmm3 150 pmaddwd xmm4, XMMWORD PTR[GLOBAL(_2217_neg5352)] ;d1*2217 - c1*5352 152 paddd xmm4, XMMWORD PTR[GLOBAL(_51000)] [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vector-shift-ashr-128.ll | 21 ; SSE2-NEXT: movdqa %xmm2, %xmm4 22 ; SSE2-NEXT: psrlq %xmm3, %xmm4 24 ; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1] 29 ; SSE2-NEXT: xorpd %xmm4, %xmm2 30 ; SSE2-NEXT: psubq %xmm4, %xmm2 39 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] 40 ; SSE41-NEXT: psrlq %xmm4, %xmm2 44 ; SSE41-NEXT: psrlq %xmm4, %xmm0 54 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] 55 ; AVX1-NEXT: vpsrlq %xmm4, %xmm2, %xmm2 [all …]
|
D | fmaxnum.ll | 105 ; SSE-NEXT: movaps %xmm3, %xmm4 106 ; SSE-NEXT: cmpunordss %xmm4, %xmm4 107 ; SSE-NEXT: movaps %xmm4, %xmm5 110 ; SSE-NEXT: andnps %xmm2, %xmm4 111 ; SSE-NEXT: orps %xmm5, %xmm4 123 ; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] 126 ; SSE-NEXT: movaps %xmm2, %xmm4 127 ; SSE-NEXT: andps %xmm1, %xmm4 131 ; SSE-NEXT: orps %xmm4, %xmm2 134 ; SSE-NEXT: movapd %xmm0, %xmm4 [all …]
|
D | vector-rotate-256.ll | 16 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 17 ; AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2 19 ; AVX1-NEXT: vpsllq %xmm4, %xmm5, %xmm6 20 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1] 21 ; AVX1-NEXT: vpsllq %xmm4, %xmm5, %xmm4 22 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5,6,7] 27 ; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1 28 ; AVX1-NEXT: vpsrlq %xmm2, %xmm5, %xmm4 31 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7] 32 ; AVX1-NEXT: vpsrlq %xmm3, %xmm0, %xmm4 [all …]
|
D | vector-rotate-128.ll | 22 ; SSE2-NEXT: movdqa %xmm0, %xmm4 23 ; SSE2-NEXT: psllq %xmm3, %xmm4 26 ; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm3[0],xmm4[1] 32 ; SSE2-NEXT: orpd %xmm4, %xmm1 43 ; SSE41-NEXT: movdqa %xmm0, %xmm4 44 ; SSE41-NEXT: psllq %xmm1, %xmm4 45 ; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm3[0,1,2,3],xmm4[4,5,6,7] 51 ; SSE41-NEXT: por %xmm4, %xmm0 88 ; X32-SSE-NEXT: movdqa %xmm0, %xmm4 89 ; X32-SSE-NEXT: psllq %xmm3, %xmm4 [all …]
|
D | vselect-minmax.ll | 581 ; SSE2-NEXT: movdqa %xmm3, %xmm4 582 ; SSE2-NEXT: pcmpgtb %xmm1, %xmm4 588 ; SSE2-NEXT: pand %xmm4, %xmm1 589 ; SSE2-NEXT: pandn %xmm3, %xmm4 590 ; SSE2-NEXT: por %xmm4, %xmm1 629 ; SSE2-NEXT: movdqa %xmm6, %xmm4 630 ; SSE2-NEXT: pxor %xmm7, %xmm4 638 ; SSE2-NEXT: pandn %xmm3, %xmm4 639 ; SSE2-NEXT: por %xmm6, %xmm4 641 ; SSE2-NEXT: movdqa %xmm4, %xmm1 [all …]
|
D | vec_minmax_uint.ll | 21 ; SSE2-NEXT: movdqa %xmm2, %xmm4 22 ; SSE2-NEXT: pcmpgtd %xmm3, %xmm4 23 ; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 27 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm4[1,1,3,3] 41 ; SSE41-NEXT: movdqa %xmm0, %xmm4 42 ; SSE41-NEXT: pcmpgtd %xmm3, %xmm4 43 ; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm4[0,0,2,2] 47 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,3,3] 81 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [2147483648,2147483648,2147483648,2147483648] 83 ; SSE2-NEXT: pxor %xmm4, %xmm5 [all …]
|
D | vector-shift-ashr-256.ll | 16 ; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4 19 ; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm6[4,5,6,7] 24 ; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2 25 ; AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2 26 ; AVX1-NEXT: vpsrlq %xmm1, %xmm3, %xmm4 29 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5,6,7] 52 ; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 53 ; XOPAVX1-NEXT: vpshaq %xmm2, %xmm4, %xmm2 76 ; AVX1-NEXT: vpsrldq {{.*#+}} xmm4 = xmm3[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,z… 77 ; AVX1-NEXT: vpsrad %xmm4, %xmm2, %xmm4 [all …]
|
/external/v8/test/cctest/ |
D | test-assembler-ia32.cc | 623 __ movaps(xmm4, xmm0); in TEST() 624 __ vfmadd132sd(xmm4, xmm2, xmm1); in TEST() 625 __ ucomisd(xmm4, xmm3); in TEST() 629 __ movaps(xmm4, xmm1); in TEST() 630 __ vfmadd213sd(xmm4, xmm0, xmm2); in TEST() 631 __ ucomisd(xmm4, xmm3); in TEST() 635 __ movaps(xmm4, xmm2); in TEST() 636 __ vfmadd231sd(xmm4, xmm0, xmm1); in TEST() 637 __ ucomisd(xmm4, xmm3); in TEST() 642 __ movaps(xmm4, xmm0); in TEST() [all …]
|
/external/boringssl/linux-x86_64/crypto/modes/ |
D | ghash-x86_64.S | 672 pshufd $255,%xmm2,%xmm4 677 pcmpgtd %xmm4,%xmm5 698 movdqa %xmm3,%xmm4 700 pslldq $8,%xmm4 702 pxor %xmm4,%xmm0 704 movdqa %xmm0,%xmm4 714 pxor %xmm4,%xmm0 718 movdqa %xmm0,%xmm4 720 pxor %xmm4,%xmm1 721 pxor %xmm0,%xmm4 [all …]
|
/external/boringssl/mac-x86_64/crypto/modes/ |
D | ghash-x86_64.S | 671 pshufd $255,%xmm2,%xmm4 676 pcmpgtd %xmm4,%xmm5 697 movdqa %xmm3,%xmm4 699 pslldq $8,%xmm4 701 pxor %xmm4,%xmm0 703 movdqa %xmm0,%xmm4 713 pxor %xmm4,%xmm0 717 movdqa %xmm0,%xmm4 719 pxor %xmm4,%xmm1 720 pxor %xmm0,%xmm4 [all …]
|
/external/libjpeg-turbo/simd/ |
D | jccolext-sse2-64.asm | 280 ; xmm0=R(02468ACE)=RE, xmm2=G(02468ACE)=GE, xmm4=B(02468ACE)=BE 295 movdqa XMMWORD [wk(2)], xmm4 ; wk(2)=BE 302 movdqa xmm4,xmm6 306 pmaddwd xmm4,[rel PW_MF016_MF033] ; xmm4=ROH*-FIX(0.168)+GOH*-FIX(0.331) 321 paddd xmm4,xmm6 323 paddd xmm4,xmm5 325 psrld xmm4,SCALEBITS ; xmm4=CbOH 326 packssdw xmm7,xmm4 ; xmm7=CbO 334 movdqa xmm4,xmm6 338 pmaddwd xmm4,[rel PW_MF016_MF033] ; xmm4=REH*-FIX(0.168)+GEH*-FIX(0.331) [all …]
|
D | jccolext-sse2.asm | 294 ; xmm0=R(02468ACE)=RE, xmm2=G(02468ACE)=GE, xmm4=B(02468ACE)=BE 309 movdqa XMMWORD [wk(2)], xmm4 ; wk(2)=BE 316 movdqa xmm4,xmm6 320 pmaddwd xmm4,[GOTOFF(eax,PW_MF016_MF033)] ; xmm4=ROH*-FIX(0.168)+GOH*-FIX(0.331) 335 paddd xmm4,xmm6 337 paddd xmm4,xmm5 339 psrld xmm4,SCALEBITS ; xmm4=CbOH 340 packssdw xmm7,xmm4 ; xmm7=CbO 348 movdqa xmm4,xmm6 352 pmaddwd xmm4,[GOTOFF(eax,PW_MF016_MF033)] ; xmm4=REH*-FIX(0.168)+GEH*-FIX(0.331) [all …]
|
/external/boringssl/linux-x86_64/crypto/aes/ |
D | vpaes-x86_64.S | 43 movdqa %xmm13,%xmm4 47 pxor %xmm5,%xmm4 49 pxor %xmm4,%xmm0 52 movdqa (%r11,%r10,1),%xmm4 79 movdqa %xmm10,%xmm4 83 pxor %xmm5,%xmm4 93 movdqa -96(%r10),%xmm4 96 pxor %xmm5,%xmm4 99 pxor %xmm4,%xmm0 140 movdqa -32(%r10),%xmm4 [all …]
|
/external/boringssl/mac-x86_64/crypto/aes/ |
D | vpaes-x86_64.S | 43 movdqa %xmm13,%xmm4 47 pxor %xmm5,%xmm4 49 pxor %xmm4,%xmm0 52 movdqa (%r11,%r10,1),%xmm4 79 movdqa %xmm10,%xmm4 83 pxor %xmm5,%xmm4 93 movdqa -96(%r10),%xmm4 96 pxor %xmm5,%xmm4 99 pxor %xmm4,%xmm0 140 movdqa -32(%r10),%xmm4 [all …]
|
/external/boringssl/linux-x86/crypto/aes/ |
D | vpaes-x86.S | 92 movdqa 32(%ebp),%xmm4 96 pxor %xmm5,%xmm4 98 pxor %xmm4,%xmm0 102 movdqa (%ebx,%ecx,1),%xmm4 126 movdqa %xmm7,%xmm4 130 pxor %xmm5,%xmm4 138 movdqa 96(%ebp),%xmm4 141 pxor %xmm5,%xmm4 144 pxor %xmm4,%xmm0 175 movdqa -32(%ebx),%xmm4 [all …]
|
/external/boringssl/mac-x86/crypto/aes/ |
D | vpaes-x86.S | 89 movdqa 32(%ebp),%xmm4 93 pxor %xmm5,%xmm4 95 pxor %xmm4,%xmm0 99 movdqa (%ebx,%ecx,1),%xmm4 123 movdqa %xmm7,%xmm4 127 pxor %xmm5,%xmm4 135 movdqa 96(%ebp),%xmm4 138 pxor %xmm5,%xmm4 141 pxor %xmm4,%xmm0 170 movdqa -32(%ebx),%xmm4 [all …]
|
/external/boringssl/win-x86/crypto/aes/ |
D | vpaes-x86.asm | 100 movdqa xmm4,[32+ebp] 104 pxor xmm4,xmm5 106 pxor xmm0,xmm4 110 movdqa xmm4,[ecx*1+ebx] 134 movdqa xmm4,xmm7 138 pxor xmm4,xmm5 146 movdqa xmm4,[96+ebp] 149 pxor xmm4,xmm5 152 pxor xmm0,xmm4 180 movdqa xmm4,[ebx-32] [all …]
|
/external/boringssl/win-x86_64/crypto/modes/ |
D | ghash-x86_64.asm | 698 pshufd xmm4,xmm2,255 703 pcmpgtd xmm5,xmm4 724 movdqa xmm4,xmm3 726 pslldq xmm4,8 728 pxor xmm0,xmm4 730 movdqa xmm4,xmm0 740 pxor xmm0,xmm4 744 movdqa xmm4,xmm0 746 pxor xmm1,xmm4 747 pxor xmm4,xmm0 [all …]
|