Home
last modified time | relevance | path

Searched refs:vmovss (Results 1 – 25 of 176) sorted by relevance

12345678

/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dpr30430.ll14 ; CHECK-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
15 ; CHECK-NEXT: vmovss {{.*#+}} xmm9 = mem[0],zero,zero,zero
16 ; CHECK-NEXT: vmovss {{.*#+}} xmm10 = mem[0],zero,zero,zero
17 ; CHECK-NEXT: vmovss {{.*#+}} xmm11 = mem[0],zero,zero,zero
18 ; CHECK-NEXT: vmovss {{.*#+}} xmm12 = mem[0],zero,zero,zero
19 ; CHECK-NEXT: vmovss {{.*#+}} xmm13 = mem[0],zero,zero,zero
20 ; CHECK-NEXT: vmovss {{.*#+}} xmm14 = mem[0],zero,zero,zero
21 ; CHECK-NEXT: vmovss {{.*#+}} xmm15 = mem[0],zero,zero,zero
22 ; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp)
23 ; CHECK-NEXT: vmovss %xmm1, {{[0-9]+}}(%rsp)
[all …]
Davx1-logical-load-folding.ll13 ; X86-NEXT: vmovss %xmm0, (%eax)
21 ; X64-NEXT: vmovss %xmm0, (%rsi)
42 ; X86-NEXT: vmovss %xmm0, (%eax)
50 ; X64-NEXT: vmovss %xmm0, (%rsi)
71 ; X86-NEXT: vmovss %xmm0, (%eax)
79 ; X64-NEXT: vmovss %xmm0, (%rsi)
99 ; X86-NEXT: vmovss %xmm0, (%eax)
107 ; X64-NEXT: vmovss %xmm0, (%rsi)
Dgpr-to-mask.ll16 ; X86-64-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1}
17 ; X86-64-NEXT: vmovss %xmm1, (%rsi)
22 ; X86-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
23 ; X86-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
28 ; X86-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
32 ; X86-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
35 ; X86-32-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1}
36 ; X86-32-NEXT: vmovss %xmm0, (%eax)
76 ; X86-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
81 ; X86-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
[all …]
Dpr38803.ll16 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
17 ; CHECK-NEXT: vmovss %xmm0, %xmm0, %xmm0 {%k1} {z}
24 ; CHECK-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1}
25 ; CHECK-NEXT: vmovss %xmm1, {{.*}}(%rip)
Dfma-scalar-memfold.ll20 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
22 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
46 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
48 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
72 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
74 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
98 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
100 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
124 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
126 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
[all …]
Davx512-load-store.ll11 ; CHECK64-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1}
18 ; CHECK32-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1}
34 ; CHECK64-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
41 ; CHECK32-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z}
101 ; CHECK64-NEXT: vmovss %xmm0, (%rdi) {%k1}
109 ; CHECK32-NEXT: vmovss %xmm0, (%eax) {%k1}
148 ; CHECK64-NEXT: vmovss (%rsi), %xmm0 {%k1}
156 ; CHECK32-NEXT: vmovss (%eax), %xmm0 {%k1}
199 ; CHECK64-NEXT: vmovss (%rsi), %xmm0 {%k1} {z}
207 ; CHECK32-NEXT: vmovss (%eax), %xmm0 {%k1} {z}
[all …]
Dvec_cast3.ll116 ; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
146 ; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp)
192 ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
196 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp)
200 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp)
240 ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
244 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp)
248 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp)
288 ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
292 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp)
[all …]
Dvector-sqrt.ll32 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
33 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
37 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
40 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
Dfunction-subtarget-features.ll5 ; In this case avx has a vmovss instruction and otherwise we should be using movss
15 ; CHECK: vmovss
39 ; CHECK: vmovss
Dfma4-scalar-memfold.ll12 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
14 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
38 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
40 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
Dfast-isel-select-sse.ll29 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
82 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
136 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
189 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
241 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
293 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
345 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
397 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
449 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
502 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1}
[all …]
Dload-slice.ll21 ; STRESS: vmovss 64([[BASE:[^(]+]]), [[OUT_Real:%xmm[0-9]+]]
23 ; STRESS-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
36 ; REGULAR: vmovss 64([[BASE:[^)]+]]), [[OUT_Real:%xmm[0-9]+]]
38 ; REGULAR-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
Dfast-isel-uint-float-conversion.ll105 ; AVX_X86-NEXT: vmovss %xmm0, (%esp)
128 ; AVX_X86-NEXT: vmovss %xmm0, (%esp)
151 ; AVX_X86-NEXT: vmovss %xmm0, (%esp)
Davx-vbroadcast.ll240 ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
241 ; X32-NEXT: vmovss %xmm0, (%eax)
248 ; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
249 ; X64-NEXT: vmovss %xmm0, (%rsi)
294 ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
295 ; X32-NEXT: vmovss %xmm0, (%eax)
301 ; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
302 ; X64-NEXT: vmovss %xmm0, (%rsi)
881 ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
882 ; X32-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp) ## 4-byte Spill
[all …]
Davx2-fma-fneg-combine.ll121 ; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
122 ; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
131 ; X64-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
148 ; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
Dscalar-int-to-fp.ll21 ; AVX512_32-NEXT: vmovss %xmm0, (%esp)
73 ; AVX512_32-NEXT: vmovss %xmm0, (%esp)
224 ; AVX512_32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
308 ; AVX512DQVL_32-NEXT: vmovss %xmm0, (%esp)
324 ; AVX512DQ_32-NEXT: vmovss %xmm0, (%esp)
344 ; AVX512F_32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
345 ; AVX512F_32-NEXT: vmovss %xmm0, (%esp)
418 ; AVX512DQVL_32-NEXT: vmovss %xmm0, (%esp)
434 ; AVX512DQ_32-NEXT: vmovss %xmm0, (%esp)
482 ; AVX512DQVL_32-NEXT: vmovss %xmm0, (%esp)
[all …]
/external/llvm/test/CodeGen/X86/
Dfma-scalar-memfold.ll17 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]]
19 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
42 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]]
44 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
67 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]]
69 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
92 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]]
94 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
117 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]]
119 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx)
[all …]
Dlower-vec-shift.ll21 ; AVX-NEXT: vmovss
24 ; AVX2-NEXT: vmovss
55 ; AVX-NEXT: vmovss
107 ; AVX-NEXT: vmovss
Dfunction-subtarget-features.ll5 ; In this case avx has a vmovss instruction and otherwise we should be using movss
15 ; CHECK: vmovss
39 ; CHECK: vmovss
Df16c-intrinsics-fast-isel.ll14 ; X32-NEXT: vmovss %xmm0, (%esp)
41 ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
43 ; X32-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
52 ; X64-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
Davx-load-store.ll28 ;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory
33 ; CHECK: vmovss (%
94 ; CHECK-O0: vmovss LCPI
96 ; CHECK-O0: vmovss %xmm
Davx512-build-vector.ll19 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3]
20 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
Dload-slice.ll21 ; STRESS: vmovss 64([[BASE:[^(]+]]), [[OUT_Real:%xmm[0-9]+]]
25 ; STRESS-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
36 ; REGULAR: vmovss 64([[BASE:[^)]+]]), [[OUT_Real:%xmm[0-9]+]]
40 ; REGULAR-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
Dfold-load-unops.ll16 ; AVX-NEXT: vmovss (%rdi), %xmm0
35 ; AVX-NEXT: vmovss (%rdi), %xmm0
54 ; AVX-NEXT: vmovss (%rdi), %xmm0
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Davx-load-store.ll28 ;; + ins_subvec+ zext into only a single vmovss or vmovsd
30 ; CHECK: vmovss (%
85 ; CHECK-O0: vmovss LCPI
87 ; CHECK-O0: vmovss %xmm

12345678