Home
last modified time | relevance | path

Searched refs:xmm2 (Results 1 – 25 of 451) sorted by relevance

12345678910>>...19

/external/llvm/test/MC/X86/
Dx86-32-avx.s5 vaddss %xmm4, %xmm6, %xmm2
9 vmulss %xmm4, %xmm6, %xmm2
13 vsubss %xmm4, %xmm6, %xmm2
17 vdivss %xmm4, %xmm6, %xmm2
21 vaddsd %xmm4, %xmm6, %xmm2
25 vmulsd %xmm4, %xmm6, %xmm2
29 vsubsd %xmm4, %xmm6, %xmm2
33 vdivsd %xmm4, %xmm6, %xmm2
37 vaddss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
41 vsubss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
[all …]
Dx86_64-xop-encoding.s29 vphsubbw %xmm2, %xmm1
37 vphaddwq %xmm6, %xmm2
61 vphadduwd %xmm2, %xmm1
69 vphaddudq %xmm6, %xmm2
85 vphaddubq %xmm2, %xmm2
117 vphaddbq %xmm2, %xmm0
180 vpshlw %xmm0, %xmm1, %xmm2
183 vpshlw (%rax), %xmm1, %xmm2
186 vpshlw %xmm0, (%rax,%rcx), %xmm2
191 vpshlq %xmm2, %xmm4, %xmm6
[all …]
Dshuffle-comments.s18 vpalignr $8, %xmm0, %xmm1, %xmm2
19 # CHECK: xmm2 = xmm0[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
20 vpalignr $8, (%rax), %xmm1, %xmm2
21 # CHECK: xmm2 = mem[8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7]
23 vpalignr $16, %xmm0, %xmm1, %xmm2
24 # CHECK: xmm2 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
25 vpalignr $16, (%rax), %xmm1, %xmm2
26 # CHECK: xmm2 = xmm1[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
28 vpalignr $0, %xmm0, %xmm1, %xmm2
29 # CHECK: xmm2 = xmm0[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
[all …]
/external/swiftshader/third_party/LLVM/test/MC/X86/
Dx86-32-avx.s5 vaddss %xmm4, %xmm6, %xmm2
9 vmulss %xmm4, %xmm6, %xmm2
13 vsubss %xmm4, %xmm6, %xmm2
17 vdivss %xmm4, %xmm6, %xmm2
21 vaddsd %xmm4, %xmm6, %xmm2
25 vmulsd %xmm4, %xmm6, %xmm2
29 vsubsd %xmm4, %xmm6, %xmm2
33 vdivsd %xmm4, %xmm6, %xmm2
37 vaddss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
41 vsubss 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
[all …]
/external/llvm/test/CodeGen/X86/
Dvector-lzcnt-512.ll108 ; AVX512CD-NEXT: vextractf128 $1, %ymm0, %xmm2
109xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4…
111 ; AVX512CD-NEXT: vpmovdb %zmm2, %xmm2
113 ; AVX512CD-NEXT: vpsubb %xmm3, %xmm2, %xmm2
118 ; AVX512CD-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
119 ; AVX512CD-NEXT: vextractf128 $1, %ymm1, %xmm2
120xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero,xmm2[4…
122 ; AVX512CD-NEXT: vpmovdb %zmm2, %xmm2
123 ; AVX512CD-NEXT: vpsubb %xmm3, %xmm2, %xmm2
128 ; AVX512CD-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
[all …]
Dbswap-vector.ll18 ; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
19 … {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1…
20 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,3,2,4,5,6,7]
21 ; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,4,7,6]
25 ; CHECK-NOSSSE3-NEXT: packuswb %xmm2, %xmm0
51 ; CHECK-NOSSSE3-NEXT: movdqa %xmm0, %xmm2
52 … {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1…
53 ; CHECK-NOSSSE3-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,2,1,0,4,5,6,7]
54 ; CHECK-NOSSSE3-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,6,5,4]
58 ; CHECK-NOSSSE3-NEXT: packuswb %xmm2, %xmm0
[all …]
Dvector-shift-lshr-128.ll21 ; SSE2-NEXT: movdqa %xmm0, %xmm2
22 ; SSE2-NEXT: psrlq %xmm3, %xmm2
24 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
25 ; SSE2-NEXT: movapd %xmm2, %xmm0
30 ; SSE41-NEXT: movdqa %xmm0, %xmm2
31 ; SSE41-NEXT: psrlq %xmm1, %xmm2
34 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
39 ; AVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm2
42 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
52 ; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
[all …]
Dvector-shift-ashr-128.ll22 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808]
23 ; SSE2-NEXT: movdqa %xmm2, %xmm4
25 ; SSE2-NEXT: psrlq %xmm1, %xmm2
26 ; SSE2-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1]
27 ; SSE2-NEXT: movdqa %xmm0, %xmm2
28 ; SSE2-NEXT: psrlq %xmm3, %xmm2
30 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
31 ; SSE2-NEXT: xorpd %xmm4, %xmm2
32 ; SSE2-NEXT: psubq %xmm4, %xmm2
33 ; SSE2-NEXT: movdqa %xmm2, %xmm0
[all …]
Dvector-zext.ll42 ; SSE2-NEXT: pxor %xmm2, %xmm2
43 …}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],
44 …m1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13…
50 ; SSSE3-NEXT: pxor %xmm2, %xmm2
51 …}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],
52 …m1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13…
58 ; SSE41-NEXT: pxor %xmm2, %xmm2
60 …m1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13…
119 ; SSE2-NEXT: pxor %xmm2, %xmm2
120 …}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],
[all …]
Dvector-shift-shl-128.ll21 ; SSE2-NEXT: movdqa %xmm0, %xmm2
22 ; SSE2-NEXT: psllq %xmm3, %xmm2
24 ; SSE2-NEXT: movsd {{.*#+}} xmm2 = xmm0[0],xmm2[1]
25 ; SSE2-NEXT: movapd %xmm2, %xmm0
30 ; SSE41-NEXT: movdqa %xmm0, %xmm2
31 ; SSE41-NEXT: psllq %xmm1, %xmm2
34 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
39 ; AVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm2
42 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
68 ; X32-SSE-NEXT: movdqa %xmm0, %xmm2
[all …]
Dvector-shift-lshr-256.ll14 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
16 ; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
17 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
18 ; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm2
19 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
24 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
34 ; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
36 ; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
38 ; XOPAVX1-NEXT: vpshlq %xmm2, %xmm4, %xmm2
41 ; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
[all …]
Dvector-trunc.ll16 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
26 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
36 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
42 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
43 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
45 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
46 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
47 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
49 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
77 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],x…
[all …]
Dvector-idiv-udiv-128.ll87 ; SSE2-NEXT: movdqa %xmm0, %xmm2
88 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
89 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,3,2,3]
94 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
95 ; SSE2-NEXT: psubd %xmm2, %xmm0
97 ; SSE2-NEXT: paddd %xmm2, %xmm0
104 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
106 ; SSE41-NEXT: pmuludq %xmm2, %xmm3
119 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
121 ; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
[all …]
Dvector-tzcnt-128.ll241 ; SSE2-NEXT: pxor %xmm2, %xmm2
242 ; SSE2-NEXT: psubd %xmm0, %xmm2
243 ; SSE2-NEXT: pand %xmm0, %xmm2
244 ; SSE2-NEXT: psubd {{.*}}(%rip), %xmm2
245 ; SSE2-NEXT: movdqa %xmm2, %xmm0
248 ; SSE2-NEXT: psubd %xmm0, %xmm2
250 ; SSE2-NEXT: movdqa %xmm2, %xmm3
252 ; SSE2-NEXT: psrld $2, %xmm2
253 ; SSE2-NEXT: pand %xmm0, %xmm2
254 ; SSE2-NEXT: paddd %xmm3, %xmm2
[all …]
Dvector-shift-ashr-256.ll14 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
16 ; AVX1-NEXT: vpsrlq %xmm2, %xmm3, %xmm4
17 ; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,0,1]
21 ; AVX1-NEXT: vpsrlq %xmm2, %xmm6, %xmm2
23 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm5[4,5,6,7]
24 ; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm2
25 ; AVX1-NEXT: vpsubq %xmm4, %xmm2, %xmm2
35 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
49 ; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
51 ; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
[all …]
Dvector-lzcnt-128.ll270 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
271 ; SSE2-NEXT: movd %xmm2, %eax
275 ; SSE2-NEXT: movd %eax, %xmm2
276 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
289 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
302 ; SSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
303 ; SSE3-NEXT: movd %xmm2, %eax
307 ; SSE3-NEXT: movd %eax, %xmm2
308 ; SSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
321 ; SSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
[all …]
Dvector-idiv-sdiv-128.ll81 ; SSE2-NEXT: movdqa %xmm0, %xmm2
82 ; SSE2-NEXT: psrad $31, %xmm2
83 ; SSE2-NEXT: pand %xmm1, %xmm2
89 ; SSE2-NEXT: paddd %xmm1, %xmm2
95 ; SSE2-NEXT: psubd %xmm2, %xmm1
107 ; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
109 ; SSE41-NEXT: pmuldq %xmm2, %xmm3
124 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
126 ; AVX1-NEXT: vpmuldq %xmm2, %xmm3, %xmm2
129 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
[all …]
Dfminnum.ll20 ; SSE: movaps %xmm0, %xmm2
21 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
22 ; SSE-NEXT: movaps %xmm2, %xmm3
25 ; SSE-NEXT: andnps %xmm1, %xmm2
26 ; SSE-NEXT: orps %xmm3, %xmm2
27 ; SSE-NEXT: movaps %xmm2, %xmm0
30 ; AVX: vminss %xmm0, %xmm1, %xmm2
32 ; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
42 ; SSE: movapd %xmm0, %xmm2
43 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
[all …]
Dfmaxnum.ll20 ; SSE: movaps %xmm0, %xmm2
21 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
22 ; SSE-NEXT: movaps %xmm2, %xmm3
25 ; SSE-NEXT: andnps %xmm1, %xmm2
26 ; SSE-NEXT: orps %xmm3, %xmm2
27 ; SSE-NEXT: movaps %xmm2, %xmm0
30 ; AVX: vmaxss %xmm0, %xmm1, %xmm2
32 ; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
49 ; SSE: movapd %xmm0, %xmm2
50 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
[all …]
Dvector-shift-shl-256.ll15 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
17 ; AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm4
18 ; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
19 ; AVX1-NEXT: vpsllq %xmm2, %xmm3, %xmm2
20 ; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm4[0,1,2,3],xmm2[4,5,6,7]
25 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
35 ; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
37 ; XOPAVX1-NEXT: vpshlq %xmm2, %xmm3, %xmm2
39 ; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
58 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
[all …]
/external/libvpx/libvpx/vp8/common/x86/
Didctllm_sse2.asm61 movq xmm2, [rax+2*rdx]
66 punpcklbw xmm2, xmm5
73 paddw xmm2, xmm4
79 packuswb xmm2, xmm5
88 movq [rax], xmm2
126 ; note the transpose of xmm1 and xmm2, necessary for shuffle
129 movdqa xmm2, [rax+16]
141 pmullw xmm2, [rdx+16]
154 movdqa xmm4, xmm2
155 punpckldq xmm2, xmm3
[all …]
/external/boringssl/linux-x86/crypto/fipsmodule/
Daesni-x86.S11 movups (%eax),%xmm2
17 xorps %xmm0,%xmm2
27 movups %xmm2,(%eax)
28 pxor %xmm2,%xmm2
39 movups (%eax),%xmm2
45 xorps %xmm0,%xmm2
55 movups %xmm2,(%eax)
56 pxor %xmm2,%xmm2
66 xorps %xmm0,%xmm2
94 xorps %xmm0,%xmm2
[all …]
/external/boringssl/mac-x86/crypto/fipsmodule/
Daesni-x86.S10 movups (%eax),%xmm2
16 xorps %xmm0,%xmm2
26 movups %xmm2,(%eax)
27 pxor %xmm2,%xmm2
36 movups (%eax),%xmm2
42 xorps %xmm0,%xmm2
52 movups %xmm2,(%eax)
53 pxor %xmm2,%xmm2
61 xorps %xmm0,%xmm2
87 xorps %xmm0,%xmm2
[all …]
/external/boringssl/src/crypto/fipsmodule/aes/asm/
Dvpaes-x86_64.pl91 movdqa .Lk_ipt(%rip), %xmm2 # iptlo
96 pshufb %xmm0, %xmm2
99 pxor %xmm5, %xmm2
101 pxor %xmm2, %xmm0
110 pshufb %xmm2, %xmm4 # 4 = sb1u
116 pshufb %xmm2, %xmm5 # 4 = sb2u
118 movdqa %xmm14, %xmm2 # 2 : sb2t
119 pshufb %xmm3, %xmm2 # 2 = sb2t
121 pxor %xmm5, %xmm2 # 2 = 2A
124 pxor %xmm2, %xmm0 # 0 = 2A+B
[all …]
/external/boringssl/linux-x86_64/crypto/cipher_extra/
Daes128gcmsiv-x86_64.S41 vpclmulqdq $0x00,%xmm1,%xmm0,%xmm2
48 vpxor %xmm4,%xmm2,%xmm2
51 vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3
52 vpshufd $78,%xmm2,%xmm4
53 vpxor %xmm4,%xmm3,%xmm2
55 vpclmulqdq $0x10,poly(%rip),%xmm2,%xmm3
56 vpshufd $78,%xmm2,%xmm4
57 vpxor %xmm4,%xmm3,%xmm2
59 vpxor %xmm5,%xmm2,%xmm0
378 vpshufb %xmm15,%xmm1,%xmm2
[all …]

12345678910>>...19