/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | pr34653.ll | 36 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 39 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 42 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 45 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 48 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 51 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 54 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 57 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 60 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill 69 ; CHECK-NEXT: vmovsd %xmm0, {{[0-9]+}}(%rsp) # 8-byte Spill [all …]
|
D | vector-constrained-fp-intrinsics.ll | 467 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 468 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 471 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 472 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 522 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 523 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 526 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 527 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 532 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 533 ; HAS-FMA-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero [all …]
|
D | fma-scalar-memfold.ll | 228 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 230 ; CHECK-NEXT: vmovsd %xmm0, (%rdi) 250 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 252 ; CHECK-NEXT: vmovsd %xmm0, (%rdi) 272 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 274 ; CHECK-NEXT: vmovsd %xmm0, (%rdi) 294 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 296 ; CHECK-NEXT: vmovsd %xmm0, (%rdi) 316 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 318 ; CHECK-NEXT: vmovsd %xmm0, (%rdi) [all …]
|
D | avx512-load-store.ll | 56 ; CHECK64-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1} 63 ; CHECK32-NEXT: vmovsd %xmm2, %xmm1, %xmm0 {%k1} 79 ; CHECK64-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z} 86 ; CHECK32-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1} {z} 125 ; CHECK64-NEXT: vmovsd %xmm0, (%rdi) {%k1} 133 ; CHECK32-NEXT: vmovsd %xmm0, (%eax) {%k1} 174 ; CHECK64-NEXT: vmovsd (%rsi), %xmm0 {%k1} 182 ; CHECK32-NEXT: vmovsd (%eax), %xmm0 {%k1} 223 ; CHECK64-NEXT: vmovsd (%rsi), %xmm0 {%k1} {z} 231 ; CHECK32-NEXT: vmovsd (%eax), %xmm0 {%k1} {z} [all …]
|
D | vector-shuffle-variable-256.ll | 21 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 22 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 51 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 75 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 76 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 104 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 105 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 107 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 108 ; ALL-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero 135 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero [all …]
|
D | pr37879.ll | 12 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 13 ; CHECK-NEXT: vmovsd %xmm1, %xmm0, %xmm0 {%k1}
|
D | pr23103.ll | 15 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 17 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
D | fma4-scalar-memfold.ll | 64 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 66 ; CHECK-NEXT: vmovsd %xmm0, (%rdi) 86 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 88 ; CHECK-NEXT: vmovsd %xmm0, (%rdi)
|
D | fast-isel-select-sse.ll | 55 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 109 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 163 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 215 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 267 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 319 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 371 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 423 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 475 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} 529 ; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} [all …]
|
D | scalar-int-to-fp.ll | 117 ; AVX512_32-NEXT: vmovsd %xmm0, (%esp) 175 ; AVX512_32-NEXT: vmovsd %xmm0, (%esp) 223 ; AVX512_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 227 ; AVX512_32-NEXT: vmovsd %xmm0, (%esp) 239 ; AVX512_64-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp) 306 ; AVX512DQVL_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 322 ; AVX512DQ_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 336 ; AVX512F_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 416 ; AVX512DQVL_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 432 ; AVX512DQ_32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero [all …]
|
D | merge-consecutive-loads-512.ll | 194 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 202 ; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero 265 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 271 ; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 379 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 385 ; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 501 ; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 507 ; AVX512BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 513 ; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 533 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero [all …]
|
D | chain_order.ll | 8 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 9 ; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
D | fast-isel-uint-float-conversion.ll | 23 ; AVX_X86-NEXT: vmovsd %xmm0, (%esp) 52 ; AVX_X86-NEXT: vmovsd %xmm0, (%esp) 81 ; AVX_X86-NEXT: vmovsd %xmm0, (%esp)
|
D | pr34080.ll | 128 ; AVX-NEXT: vmovsd %xmm0, -48(%rbp) 129 ; AVX-NEXT: vmovsd %xmm0, -24(%rbp) 136 ; AVX-NEXT: vmovsd %xmm0, -40(%rbp) 137 ; AVX-NEXT: vmovsd %xmm0, -16(%rbp)
|
D | insertelement-shuffle.ll | 53 ; X32_AVX256-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero 100 ; X32_AVX256-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 115 ; X32_AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
D | merge-consecutive-loads-256.ll | 74 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 80 ; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 201 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 207 ; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 302 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 308 ; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 374 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 382 ; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 458 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 464 ; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero [all …]
|
D | avx-bitcast.ll | 6 ; CHECK: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
D | vector-sqrt.ll | 8 ; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 9 ; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
/external/llvm/test/CodeGen/X86/ |
D | lower-vec-shift.ll | 38 ; AVX-NEXT: vmovsd 41 ; AVX2-NEXT: vmovsd 70 ; AVX-NEXT: vmovsd 90 ; AVX-NEXT: vmovsd 93 ; AVX2-NEXT: vmovsd 122 ; AVX-NEXT: vmovsd
|
D | pr23103.ll | 11 ; CHECK: vmovsd (%rdi), %xmm0 12 ; CHECK-NEXT: vmovsd %xmm0, {{.*}}(%rsp) {{.*#+}} 8-byte Spill
|
D | chain_order.ll | 4 ; CHECK: vmovsd (%rdi), %xmm{{.*}} 5 ; CHECK-NEXT: vmovsd 16(%rdi), %xmm{{.*}}
|
D | fma-scalar-memfold.ll | 217 ; CHECK: vmovsd (%rcx), %[[XMM:xmm[0-9]+]] 238 ; CHECK: vmovsd (%rcx), %[[XMM:xmm[0-9]+]] 259 ; CHECK: vmovsd (%rcx), %[[XMM:xmm[0-9]+]] 280 ; CHECK: vmovsd (%rcx), %[[XMM:xmm[0-9]+]] 301 ; CHECK: vmovsd (%rcx), %[[XMM:xmm[0-9]+]] 322 ; CHECK: vmovsd (%rcx), %[[XMM:xmm[0-9]+]] 343 ; CHECK: vmovsd (%rcx), %[[XMM:xmm[0-9]+]] 364 ; CHECK: vmovsd (%rcx), %[[XMM:xmm[0-9]+]]
|
D | avx-bitcast.ll | 6 ; CHECK: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | avx-bitcast.ll | 3 ; CHECK: vmovsd (%
|
D | avx-load-store.ll | 28 ;; + ins_subvec+ zext into only a single vmovss or vmovsd 37 ; CHECK: vmovsd (%
|