/external/llvm/test/MC/X86/ |
D | shuffle-comments.s | 3 palignr $8, %xmm0, %xmm1 4 # CHECK: xmm1 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] 5 palignr $8, (%rax), %xmm1 6 # CHECK: xmm1 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7] 8 palignr $16, %xmm0, %xmm1 9 # CHECK: xmm1 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] 10 palignr $16, (%rax), %xmm1 11 # CHECK: xmm1 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] 13 palignr $0, %xmm0, %xmm1 14 # CHECK: xmm1 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] [all …]
|
D | x86-32-fma3.s | 5 vfmadd132pd %xmm2, %xmm5, %xmm1 9 vfmadd132pd (%eax), %xmm5, %xmm1 13 vfmadd132ps %xmm2, %xmm5, %xmm1 17 vfmadd132ps (%eax), %xmm5, %xmm1 21 vfmadd213pd %xmm2, %xmm5, %xmm1 25 vfmadd213pd (%eax), %xmm5, %xmm1 29 vfmadd213ps %xmm2, %xmm5, %xmm1 33 vfmadd213ps (%eax), %xmm5, %xmm1 37 vfmadd231pd %xmm2, %xmm5, %xmm1 41 vfmadd231pd (%eax), %xmm5, %xmm1 [all …]
|
D | x86_64-fma4-encoding.s | 6 vfmaddss (%rcx), %xmm1, %xmm0, %xmm0 10 vfmaddss %xmm1, (%rcx),%xmm0, %xmm0 14 vfmaddss %xmm2, %xmm1, %xmm0, %xmm0 18 vfmaddsd (%rcx), %xmm1, %xmm0, %xmm0 22 vfmaddsd %xmm1, (%rcx),%xmm0, %xmm0 26 vfmaddsd %xmm2, %xmm1, %xmm0, %xmm0 30 vfmaddsd %xmm10, %xmm1, %xmm0, %xmm0 34 vfmaddps (%rcx), %xmm1, %xmm0, %xmm0 38 vfmaddps %xmm1, (%rcx),%xmm0, %xmm0 42 vfmaddps %xmm2, %xmm1, %xmm0, %xmm0 [all …]
|
D | x86-32-avx.s | 276 vunpckhps %xmm1, %xmm2, %xmm4 280 vunpckhpd %xmm1, %xmm2, %xmm4 284 vunpcklps %xmm1, %xmm2, %xmm4 288 vunpcklpd %xmm1, %xmm2, %xmm4 308 vcmpps $0, %xmm0, %xmm6, %xmm1 312 vcmpps $0, (%eax), %xmm6, %xmm1 316 vcmpps $7, %xmm0, %xmm6, %xmm1 320 vcmppd $0, %xmm0, %xmm6, %xmm1 324 vcmppd $0, (%eax), %xmm6, %xmm1 328 vcmppd $7, %xmm0, %xmm6, %xmm1 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vector-idiv-sdiv-128.ll | 21 ; SSE2-NEXT: movd %rdx, %xmm1 30 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 31 ; SSE2-NEXT: movdqa %xmm1, %xmm0 43 ; SSE41-NEXT: movd %rdx, %xmm1 51 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 63 ; AVX-NEXT: vmovq %rdx, %xmm1 71 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 80 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] 83 ; SSE2-NEXT: pand %xmm1, %xmm2 85 ; SSE2-NEXT: pmuludq %xmm1, %xmm3 [all …]
|
D | vector-shift-ashr-128.ll | 21 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] 25 ; SSE2-NEXT: psrlq %xmm1, %xmm2 29 ; SSE2-NEXT: psrlq %xmm1, %xmm0 40 ; SSE41-NEXT: psrlq %xmm1, %xmm3 41 ; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] 45 ; SSE41-NEXT: psrlq %xmm1, %xmm3 55 ; AVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm3 56 ; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,0,1] 59 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm1 61 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7] [all …]
|
D | vector-zext.ll | 12 ; SSE2-NEXT: pxor %xmm1, %xmm1 13 …}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],… 18 ; SSSE3-NEXT: pxor %xmm1, %xmm1 19 …}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],… 41 ; SSE2-NEXT: movdqa %xmm0, %xmm1 44 … {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2… 49 ; SSSE3-NEXT: movdqa %xmm0, %xmm1 52 … {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2… 57 ; SSE41-NEXT: movdqa %xmm0, %xmm1 59 … pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1… [all …]
|
D | vector-shuffle-128-v4.ll | 308 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] 313 ; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] 322 ; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1] 323 ; SSE-NEXT: movapd %xmm1, %xmm0 328 ; AVX-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm1[1],xmm0[1] 337 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0] 338 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0] 343 ; SSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0] 344 ; SSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0] 349 ; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0] [all …]
|
D | vector-popcnt-128.ll | 12 ; SSE2-NEXT: movdqa %xmm0, %xmm1 13 ; SSE2-NEXT: psrlq $1, %xmm1 14 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1 15 ; SSE2-NEXT: psubq %xmm1, %xmm0 16 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [3689348814741910323,3689348814741910323] 18 ; SSE2-NEXT: pand %xmm1, %xmm2 20 ; SSE2-NEXT: pand %xmm1, %xmm0 22 ; SSE2-NEXT: movdqa %xmm0, %xmm1 23 ; SSE2-NEXT: psrlq $4, %xmm1 24 ; SSE2-NEXT: paddq %xmm0, %xmm1 [all …]
|
D | vector-tzcnt-128.ll | 21 ; SSE2-NEXT: movd %rax, %xmm1 27 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 28 ; SSE2-NEXT: movdqa %xmm1, %xmm0 37 ; SSE3-NEXT: movd %rax, %xmm1 43 ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 44 ; SSE3-NEXT: movdqa %xmm1, %xmm0 53 ; SSSE3-NEXT: movd %rax, %xmm1 59 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 60 ; SSSE3-NEXT: movdqa %xmm1, %xmm0 69 ; SSE41-NEXT: movd %rax, %xmm1 [all …]
|
D | vec_cmp_sint-128.ll | 19 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 20 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] 21 ; SSE2-NEXT: pand %xmm1, %xmm0 26 ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 31 ; SSE42-NEXT: pcmpeqq %xmm1, %xmm0 36 ; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 41 ; XOP-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0 51 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 56 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 61 ; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0 [all …]
|
D | vector-idiv-udiv-128.ll | 22 ; SSE2-NEXT: movd %rcx, %xmm1 32 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 33 ; SSE2-NEXT: movdqa %xmm1, %xmm0 46 ; SSE41-NEXT: movd %rcx, %xmm1 55 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 68 ; AVX-NEXT: vmovq %rcx, %xmm1 77 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 86 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [613566757,613566757,613566757,613566757] 88 ; SSE2-NEXT: pmuludq %xmm1, %xmm2 90 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] [all …]
|
D | vec_cmp_uint-128.ll | 19 ; SSE2-NEXT: pcmpeqd %xmm1, %xmm0 20 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] 21 ; SSE2-NEXT: pand %xmm1, %xmm0 26 ; SSE41-NEXT: pcmpeqq %xmm1, %xmm0 31 ; SSE42-NEXT: pcmpeqq %xmm1, %xmm0 36 ; AVX-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 41 ; XOP-NEXT: vpcomeqq %xmm1, %xmm0, %xmm0 51 ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 56 ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 61 ; XOP-NEXT: vpcomeqd %xmm1, %xmm0, %xmm0 [all …]
|
D | vector-shift-lshr-128.ll | 20 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] 23 ; SSE2-NEXT: psrlq %xmm1, %xmm0 31 ; SSE41-NEXT: psrlq %xmm1, %xmm2 32 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 33 ; SSE41-NEXT: psrlq %xmm1, %xmm0 39 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2 40 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 41 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0 47 ; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0 53 ; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1 [all …]
|
D | vector-shift-shl-128.ll | 20 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] 23 ; SSE2-NEXT: psllq %xmm1, %xmm0 31 ; SSE41-NEXT: psllq %xmm1, %xmm2 32 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 33 ; SSE41-NEXT: psllq %xmm1, %xmm0 39 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm2 40 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] 41 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0 47 ; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0 52 ; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0 [all …]
|
D | insertelement-zero.ll | 15 ; SSE2-NEXT: xorpd %xmm1, %xmm1 16 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] 21 ; SSE3-NEXT: xorpd %xmm1, %xmm1 22 ; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] 27 ; SSSE3-NEXT: xorpd %xmm1, %xmm1 28 ; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] 33 ; SSE41-NEXT: xorpd %xmm1, %xmm1 34 ; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] 39 ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 40 ; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1] [all …]
|
D | vector-rotate-128.ll | 20 ; SSE2-NEXT: psubq %xmm1, %xmm2 21 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1] 25 ; SSE2-NEXT: psllq %xmm1, %xmm3 28 ; SSE2-NEXT: movdqa %xmm0, %xmm1 29 ; SSE2-NEXT: psrlq %xmm3, %xmm1 31 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm0[0],xmm1[1] 32 ; SSE2-NEXT: orpd %xmm4, %xmm1 33 ; SSE2-NEXT: movapd %xmm1, %xmm0 39 ; SSE41-NEXT: psubq %xmm1, %xmm2 41 ; SSE41-NEXT: psllq %xmm1, %xmm3 [all …]
|
D | vector-lzcnt-128.ll | 22 ; SSE2-NEXT: movd %rax, %xmm1 29 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 30 ; SSE2-NEXT: movdqa %xmm1, %xmm0 40 ; SSE3-NEXT: movd %rax, %xmm1 47 ; SSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 48 ; SSE3-NEXT: movdqa %xmm1, %xmm0 58 ; SSSE3-NEXT: movd %rax, %xmm1 65 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0] 66 ; SSSE3-NEXT: movdqa %xmm1, %xmm0 76 ; SSE41-NEXT: movd %rax, %xmm1 [all …]
|
D | vector-trunc.ll | 12 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 14 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 16 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] 17 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] 22 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] 24 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] 26 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3] 27 ; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0] 32 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2] 34 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7] [all …]
|
/external/swiftshader/third_party/LLVM/test/MC/X86/ |
D | x86-32-fma3.s | 5 vfmadd132pd %xmm2, %xmm5, %xmm1 9 vfmadd132pd (%eax), %xmm5, %xmm1 13 vfmadd132ps %xmm2, %xmm5, %xmm1 17 vfmadd132ps (%eax), %xmm5, %xmm1 21 vfmadd213pd %xmm2, %xmm5, %xmm1 25 vfmadd213pd (%eax), %xmm5, %xmm1 29 vfmadd213ps %xmm2, %xmm5, %xmm1 33 vfmadd213ps (%eax), %xmm5, %xmm1 37 vfmadd231pd %xmm2, %xmm5, %xmm1 41 vfmadd231pd (%eax), %xmm5, %xmm1 [all …]
|
D | x86-32-avx.s | 276 vunpckhps %xmm1, %xmm2, %xmm4 280 vunpckhpd %xmm1, %xmm2, %xmm4 284 vunpcklps %xmm1, %xmm2, %xmm4 288 vunpcklpd %xmm1, %xmm2, %xmm4 308 vcmpps $0, %xmm0, %xmm6, %xmm1 312 vcmpps $0, (%eax), %xmm6, %xmm1 316 vcmpps $7, %xmm0, %xmm6, %xmm1 320 vcmppd $0, %xmm0, %xmm6, %xmm1 324 vcmppd $0, (%eax), %xmm6, %xmm1 328 vcmppd $7, %xmm0, %xmm6, %xmm1 [all …]
|
/external/boringssl/linux-x86_64/crypto/cipher_extra/ |
D | aes128gcmsiv-x86_64.S | 41 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 42 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm5 43 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 44 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm4 70 vmovdqa %xmm0,%xmm1 96 vmovdqa %xmm0,%xmm1 132 vmovdqa (%rcx),%xmm1 139 vpxor %xmm1,%xmm0,%xmm0 179 vpxor %xmm5,%xmm3,%xmm1 188 vpxor %xmm1,%xmm1,%xmm1 [all …]
|
/external/boringssl/mac-x86_64/crypto/cipher_extra/ |
D | aes128gcmsiv-x86_64.S | 41 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2 42 vpclmulqdq $0x11,%xmm1,%xmm0,%xmm5 43 vpclmulqdq $0x10,%xmm1,%xmm0,%xmm3 44 vpclmulqdq $0x01,%xmm1,%xmm0,%xmm4 70 vmovdqa %xmm0,%xmm1 96 vmovdqa %xmm0,%xmm1 132 vmovdqa (%rcx),%xmm1 139 vpxor %xmm1,%xmm0,%xmm0 179 vpxor %xmm5,%xmm3,%xmm1 188 vpxor %xmm1,%xmm1,%xmm1 [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | sse-minmax.ll | 19 ; CHECK-NEXT: maxsd %xmm1, %xmm0 22 ; UNSAFE-NEXT: maxsd %xmm1, %xmm0 25 ; FINITE-NEXT: maxsd %xmm1, %xmm0 34 ; CHECK-NEXT: minsd %xmm1, %xmm0 37 ; UNSAFE-NEXT: minsd %xmm1, %xmm0 40 ; FINITE-NEXT: minsd %xmm1, %xmm0 49 ; CHECK-NEXT: minsd %xmm0, %xmm1 50 ; CHECK-NEXT: movap{{[sd]}} %xmm1, %xmm0 53 ; UNSAFE-NEXT: minsd %xmm0, %xmm1 54 ; UNSAFE-NEXT: movap{{[sd]}} %xmm1, %xmm0 [all …]
|
/external/boringssl/win-x86_64/crypto/cipher_extra/ |
D | aes128gcmsiv-x86_64.asm | 46 vpclmulqdq xmm2,xmm0,xmm1,0x00 47 vpclmulqdq xmm5,xmm0,xmm1,0x11 48 vpclmulqdq xmm3,xmm0,xmm1,0x10 49 vpclmulqdq xmm4,xmm0,xmm1,0x01 82 vmovdqa xmm1,xmm0 117 vmovdqa xmm1,xmm0 166 vmovdqa xmm1,XMMWORD[rcx] 173 vpxor xmm0,xmm0,xmm1 213 vpxor xmm1,xmm3,xmm5 222 vpxor xmm1,xmm1,xmm1 [all …]
|