/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | pr30430.ll | 14 ; CHECK-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero 15 ; CHECK-NEXT: vmovss {{.*#+}} xmm9 = mem[0],zero,zero,zero 16 ; CHECK-NEXT: vmovss {{.*#+}} xmm10 = mem[0],zero,zero,zero 17 ; CHECK-NEXT: vmovss {{.*#+}} xmm11 = mem[0],zero,zero,zero 18 ; CHECK-NEXT: vmovss {{.*#+}} xmm12 = mem[0],zero,zero,zero 19 ; CHECK-NEXT: vmovss {{.*#+}} xmm13 = mem[0],zero,zero,zero 20 ; CHECK-NEXT: vmovss {{.*#+}} xmm14 = mem[0],zero,zero,zero 21 ; CHECK-NEXT: vmovss {{.*#+}} xmm15 = mem[0],zero,zero,zero 22 ; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%rsp) 23 ; CHECK-NEXT: vmovss %xmm1, {{[0-9]+}}(%rsp) [all …]
|
D | avx1-logical-load-folding.ll | 13 ; X86-NEXT: vmovss %xmm0, (%eax) 21 ; X64-NEXT: vmovss %xmm0, (%rsi) 42 ; X86-NEXT: vmovss %xmm0, (%eax) 50 ; X64-NEXT: vmovss %xmm0, (%rsi) 71 ; X86-NEXT: vmovss %xmm0, (%eax) 79 ; X64-NEXT: vmovss %xmm0, (%rsi) 99 ; X86-NEXT: vmovss %xmm0, (%eax) 107 ; X64-NEXT: vmovss %xmm0, (%rsi)
|
D | gpr-to-mask.ll | 16 ; X86-64-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} 17 ; X86-64-NEXT: vmovss %xmm1, (%rsi) 22 ; X86-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 23 ; X86-32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero 28 ; X86-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero 32 ; X86-32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero 35 ; X86-32-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} 36 ; X86-32-NEXT: vmovss %xmm0, (%eax) 76 ; X86-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 81 ; X86-32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero [all …]
|
D | pr38803.ll | 16 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 17 ; CHECK-NEXT: vmovss %xmm0, %xmm0, %xmm0 {%k1} {z} 24 ; CHECK-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1} 25 ; CHECK-NEXT: vmovss %xmm1, {{.*}}(%rip)
|
D | fma-scalar-memfold.ll | 20 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 22 ; CHECK-NEXT: vmovss %xmm0, (%rdi) 46 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 48 ; CHECK-NEXT: vmovss %xmm0, (%rdi) 72 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 74 ; CHECK-NEXT: vmovss %xmm0, (%rdi) 98 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 100 ; CHECK-NEXT: vmovss %xmm0, (%rdi) 124 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 126 ; CHECK-NEXT: vmovss %xmm0, (%rdi) [all …]
|
D | avx512-load-store.ll | 11 ; CHECK64-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1} 18 ; CHECK32-NEXT: vmovss %xmm2, %xmm1, %xmm0 {%k1} 34 ; CHECK64-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z} 41 ; CHECK32-NEXT: vmovss %xmm1, %xmm0, %xmm0 {%k1} {z} 101 ; CHECK64-NEXT: vmovss %xmm0, (%rdi) {%k1} 109 ; CHECK32-NEXT: vmovss %xmm0, (%eax) {%k1} 148 ; CHECK64-NEXT: vmovss (%rsi), %xmm0 {%k1} 156 ; CHECK32-NEXT: vmovss (%eax), %xmm0 {%k1} 199 ; CHECK64-NEXT: vmovss (%rsi), %xmm0 {%k1} {z} 207 ; CHECK32-NEXT: vmovss (%eax), %xmm0 {%k1} {z} [all …]
|
D | vec_cast3.ll | 116 ; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp) 146 ; CHECK-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp) 192 ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero 196 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp) 200 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp) 240 ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero 244 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp) 248 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp) 288 ; CHECK-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero 292 ; CHECK-NEXT: vmovss %xmm3, {{[0-9]+}}(%esp) [all …]
|
D | vector-sqrt.ll | 32 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 33 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero 37 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero 40 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
D | function-subtarget-features.ll | 5 ; In this case avx has a vmovss instruction and otherwise we should be using movss 15 ; CHECK: vmovss 39 ; CHECK: vmovss
|
D | fma4-scalar-memfold.ll | 12 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 14 ; CHECK-NEXT: vmovss %xmm0, (%rdi) 38 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 40 ; CHECK-NEXT: vmovss %xmm0, (%rdi)
|
D | fast-isel-select-sse.ll | 29 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 82 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 136 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 189 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 241 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 293 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 345 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 397 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 449 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} 502 ; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} [all …]
|
D | load-slice.ll | 21 ; STRESS: vmovss 64([[BASE:[^(]+]]), [[OUT_Real:%xmm[0-9]+]] 23 ; STRESS-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]] 36 ; REGULAR: vmovss 64([[BASE:[^)]+]]), [[OUT_Real:%xmm[0-9]+]] 38 ; REGULAR-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
|
D | fast-isel-uint-float-conversion.ll | 105 ; AVX_X86-NEXT: vmovss %xmm0, (%esp) 128 ; AVX_X86-NEXT: vmovss %xmm0, (%esp) 151 ; AVX_X86-NEXT: vmovss %xmm0, (%esp)
|
D | avx-vbroadcast.ll | 240 ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 241 ; X32-NEXT: vmovss %xmm0, (%eax) 248 ; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 249 ; X64-NEXT: vmovss %xmm0, (%rsi) 294 ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 295 ; X32-NEXT: vmovss %xmm0, (%eax) 301 ; X64-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 302 ; X64-NEXT: vmovss %xmm0, (%rsi) 881 ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 882 ; X32-NEXT: vmovss %xmm0, {{[0-9]+}}(%esp) ## 4-byte Spill [all …]
|
D | avx2-fma-fneg-combine.ll | 121 ; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero 122 ; X32-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero 131 ; X64-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero 148 ; X32-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
|
D | scalar-int-to-fp.ll | 21 ; AVX512_32-NEXT: vmovss %xmm0, (%esp) 73 ; AVX512_32-NEXT: vmovss %xmm0, (%esp) 224 ; AVX512_32-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero 308 ; AVX512DQVL_32-NEXT: vmovss %xmm0, (%esp) 324 ; AVX512DQ_32-NEXT: vmovss %xmm0, (%esp) 344 ; AVX512F_32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 345 ; AVX512F_32-NEXT: vmovss %xmm0, (%esp) 418 ; AVX512DQVL_32-NEXT: vmovss %xmm0, (%esp) 434 ; AVX512DQ_32-NEXT: vmovss %xmm0, (%esp) 482 ; AVX512DQVL_32-NEXT: vmovss %xmm0, (%esp) [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | fma-scalar-memfold.ll | 17 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]] 19 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx) 42 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]] 44 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx) 67 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]] 69 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx) 92 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]] 94 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx) 117 ; CHECK: vmovss (%rcx), %[[XMM:xmm[0-9]+]] 119 ; CHECK-NEXT: vmovss %[[XMM]], (%rcx) [all …]
|
D | lower-vec-shift.ll | 21 ; AVX-NEXT: vmovss 24 ; AVX2-NEXT: vmovss 55 ; AVX-NEXT: vmovss 107 ; AVX-NEXT: vmovss
|
D | function-subtarget-features.ll | 5 ; In this case avx has a vmovss instruction and otherwise we should be using movss 15 ; CHECK: vmovss 39 ; CHECK: vmovss
|
D | f16c-intrinsics-fast-isel.ll | 14 ; X32-NEXT: vmovss %xmm0, (%esp) 41 ; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 43 ; X32-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] 52 ; X64-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
D | avx-load-store.ll | 28 ;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory 33 ; CHECK: vmovss (% 94 ; CHECK-O0: vmovss LCPI 96 ; CHECK-O0: vmovss %xmm
|
D | avx512-build-vector.ll | 19 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] 20 ; CHECK-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
|
D | load-slice.ll | 21 ; STRESS: vmovss 64([[BASE:[^(]+]]), [[OUT_Real:%xmm[0-9]+]] 25 ; STRESS-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]] 36 ; REGULAR: vmovss 64([[BASE:[^)]+]]), [[OUT_Real:%xmm[0-9]+]] 40 ; REGULAR-NEXT: vmovss 68([[BASE]]), [[OUT_Imm:%xmm[0-9]+]]
|
D | fold-load-unops.ll | 16 ; AVX-NEXT: vmovss (%rdi), %xmm0 35 ; AVX-NEXT: vmovss (%rdi), %xmm0 54 ; AVX-NEXT: vmovss (%rdi), %xmm0
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | avx-load-store.ll | 28 ;; + ins_subvec+ zext into only a single vmovss or vmovsd 30 ; CHECK: vmovss (% 85 ; CHECK-O0: vmovss LCPI 87 ; CHECK-O0: vmovss %xmm
|