/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | sse-regcall.ll | 144 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 145 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 146 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 147 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 148 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 149 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 150 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 151 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 152 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} 153 ; X32: addps {{([0-9])+}}(%ebp), {{%xmm([0-7])}} [all …]
|
D | sse-intel-ocl.ll | 8 ; WIN64: addps {{.*}}, {{%xmm[0-3]}} 9 ; WIN64: addps {{.*}}, {{%xmm[0-3]}} 10 ; WIN64: addps {{.*}}, {{%xmm[0-3]}} 11 ; WIN64: addps {{.*}}, {{%xmm[0-3]}} 18 ; WIN32: addps {{.*}}, {{%xmm[0-3]}} 19 ; WIN32: addps {{.*}}, {{%xmm[0-3]}} 20 ; WIN32: addps {{.*}}, {{%xmm[0-3]}} 21 ; WIN32: addps {{.*}}, {{%xmm[0-3]}} 26 ; NOT_WIN: addps {{.*}}, {{%xmm[0-3]}} 27 ; NOT_WIN: addps {{.*}}, {{%xmm[0-3]}} [all …]
|
D | v2f32.ll | 31 ; X64-NEXT: addps %xmm1, %xmm0 36 ; X32-NEXT: addps %xmm1, %xmm0 45 ; X64-NEXT: addps %xmm0, %xmm0 50 ; X32-NEXT: addps %xmm0, %xmm0 60 ; X64-NEXT: addps %xmm0, %xmm0 65 ; X32-NEXT: addps %xmm0, %xmm0 74 ; X64-NEXT: addps %xmm0, %xmm0 75 ; X64-NEXT: addps %xmm0, %xmm0 80 ; X32-NEXT: addps %xmm0, %xmm0 81 ; X32-NEXT: addps %xmm0, %xmm0
|
D | vector-reduce-fadd-fast.ll | 18 ; SSE2-NEXT: addps %xmm1, %xmm0 45 ; SSE2-NEXT: addps %xmm1, %xmm2 48 ; SSE2-NEXT: addps %xmm2, %xmm0 55 ; SSE41-NEXT: addps %xmm1, %xmm0 79 ; SSE2-NEXT: addps %xmm2, %xmm1 82 ; SSE2-NEXT: addps %xmm1, %xmm2 85 ; SSE2-NEXT: addps %xmm2, %xmm0 90 ; SSE41-NEXT: addps %xmm2, %xmm1 93 ; SSE41-NEXT: addps %xmm1, %xmm0 125 ; SSE2-NEXT: addps %xmm4, %xmm2 [all …]
|
D | vec_uint_to_fp.ll | 50 ; SSE-NEXT: addps [[MAGICCSTADDR]](%rip), %xmm0 51 ; SSE-NEXT: addps [[MASK]], %xmm0 60 ; SSE41-NEXT: addps [[MAGICCSTADDR]](%rip), %xmm0 61 ; SSE41-NEXT: addps [[LOWVEC]], %xmm0 111 ; SSE-NEXT: addps %[[MAGICCST]], %xmm0 112 ; SSE-NEXT: addps [[VECLOW]], %xmm0 118 ; SSE-NEXT: addps %[[MAGICCST]], %xmm1 119 ; SSE-NEXT: addps %[[MASK]], %xmm1 129 ; SSE41-NEXT: addps %[[MAGICCST]], %xmm0 130 ; SSE41-NEXT: addps [[VECLOW]], %xmm0 [all …]
|
D | pr32368.ll | 11 ; SSE-NEXT: addps %xmm0, %xmm0 55 ; SSE-NEXT: addps %xmm1, %xmm1 56 ; SSE-NEXT: addps %xmm0, %xmm0 104 ; SSE-NEXT: addps %xmm3, %xmm3 105 ; SSE-NEXT: addps %xmm2, %xmm2 106 ; SSE-NEXT: addps %xmm1, %xmm1 107 ; SSE-NEXT: addps %xmm0, %xmm0
|
D | sse-schedule.ll | 27 ; GENERIC-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] 28 ; GENERIC-NEXT: addps (%rdi), %xmm0 # sched: [9:1.00] 33 ; ATOM-NEXT: addps %xmm1, %xmm0 # sched: [5:5.00] 34 ; ATOM-NEXT: addps (%rdi), %xmm0 # sched: [5:5.00] 39 ; SLM-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] 40 ; SLM-NEXT: addps (%rdi), %xmm0 # sched: [6:1.00] 45 ; SANDY-SSE-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] 46 ; SANDY-SSE-NEXT: addps (%rdi), %xmm0 # sched: [9:1.00] 57 ; HASWELL-SSE-NEXT: addps %xmm1, %xmm0 # sched: [3:1.00] 58 ; HASWELL-SSE-NEXT: addps (%rdi), %xmm0 # sched: [9:1.00] [all …]
|
D | vec_extract.ll | 11 ; X32-NEXT: addps %xmm0, %xmm0 18 ; X64-NEXT: addps %xmm0, %xmm0 35 ; X32-NEXT: addps %xmm0, %xmm0 45 ; X64-NEXT: addps %xmm0, %xmm0
|
D | illegal-vector-args-return.ll | 3 ; RUN: llc < %s -mattr=+sse2 -mcpu=nehalem | grep "addps %xmm3, %xmm1" 4 ; RUN: llc < %s -mattr=+sse2 -mcpu=nehalem | grep "addps %xmm2, %xmm0"
|
D | vec_uint_to_fp-fastmath.ll | 38 ; SSE2-NEXT: addps %xmm1, %xmm0 49 ; SSE41-NEXT: addps %xmm1, %xmm0 117 ; SSE2-NEXT: addps %xmm2, %xmm0 124 ; SSE2-NEXT: addps %xmm2, %xmm1 137 ; SSE41-NEXT: addps %xmm2, %xmm0 144 ; SSE41-NEXT: addps %xmm2, %xmm1
|
D | cvtv2f32.ll | 16 ; X32-NEXT: addps {{\.LCPI.*}}, %xmm1 17 ; X32-NEXT: addps %xmm2, %xmm1 29 ; X64-NEXT: addps {{.*}}(%rip), %xmm1 30 ; X64-NEXT: addps %xmm2, %xmm1
|
D | brcond.ll | 101 ; CHECK-NEXT: addps LCPI2_0, %xmm1 133 ; CHECK-NEXT: addps LCPI3_0, %xmm1 165 ; CHECK-NEXT: addps LCPI4_0, %xmm1 197 ; CHECK-NEXT: addps LCPI5_0, %xmm1 229 ; CHECK-NEXT: addps LCPI6_0, %xmm1 261 ; CHECK-NEXT: addps LCPI7_0, %xmm1
|
D | vec_partial.ll | 9 ; X86-NEXT: addps {{\.LCPI.*}}, %xmm0 14 ; X64-NEXT: addps {{.*}}(%rip), %xmm0
|
D | insertps-unfold-load-bug.ll | 15 ; X32-NEXT: addps %xmm1, %xmm0 23 ; X64-NEXT: addps %xmm1, %xmm0
|
D | haddsub-3.ll | 11 ; SSE2-NEXT: addps %xmm0, %xmm1 20 ; SSSE3-NEXT: addps %xmm0, %xmm1
|
/external/llvm/test/CodeGen/X86/ |
D | sse-intel-ocl.ll | 8 ; WIN64: addps {{.*}}, {{%xmm[0-3]}} 9 ; WIN64: addps {{.*}}, {{%xmm[0-3]}} 10 ; WIN64: addps {{.*}}, {{%xmm[0-3]}} 11 ; WIN64: addps {{.*}}, {{%xmm[0-3]}} 18 ; WIN32: addps {{.*}}, {{%xmm[0-3]}} 19 ; WIN32: addps {{.*}}, {{%xmm[0-3]}} 20 ; WIN32: addps {{.*}}, {{%xmm[0-3]}} 21 ; WIN32: addps {{.*}}, {{%xmm[0-3]}} 26 ; NOT_WIN: addps {{.*}}, {{%xmm[0-3]}} 27 ; NOT_WIN: addps {{.*}}, {{%xmm[0-3]}} [all …]
|
D | v2f32.ll | 31 ; X64-NEXT: addps %xmm1, %xmm0 36 ; X32-NEXT: addps %xmm1, %xmm0 45 ; X64-NEXT: addps %xmm0, %xmm0 50 ; X32-NEXT: addps %xmm0, %xmm0 60 ; X64-NEXT: addps %xmm0, %xmm0 65 ; X32-NEXT: addps %xmm0, %xmm0 74 ; X64-NEXT: addps %xmm0, %xmm0 75 ; X64-NEXT: addps %xmm0, %xmm0 80 ; X32-NEXT: addps %xmm0, %xmm0 81 ; X32-NEXT: addps %xmm0, %xmm0
|
D | vec_uint_to_fp.ll | 50 ; SSE-NEXT: addps [[MAGICCSTADDR]](%rip), %xmm0 51 ; SSE-NEXT: addps [[MASK]], %xmm0 60 ; SSE41-NEXT: addps [[MAGICCSTADDR]](%rip), %xmm0 61 ; SSE41-NEXT: addps [[LOWVEC]], %xmm0 111 ; SSE-NEXT: addps %[[MAGICCST]], %xmm0 112 ; SSE-NEXT: addps [[VECLOW]], %xmm0 118 ; SSE-NEXT: addps %[[MAGICCST]], %xmm1 119 ; SSE-NEXT: addps %[[MASK]], %xmm1 129 ; SSE41-NEXT: addps %[[MAGICCST]], %xmm0 130 ; SSE41-NEXT: addps [[VECLOW]], %xmm0 [all …]
|
D | vec_extract.ll | 11 ; X32-NEXT: addps %xmm0, %xmm0 18 ; X64-NEXT: addps %xmm0, %xmm0 35 ; X32-NEXT: addps %xmm0, %xmm0 45 ; X64-NEXT: addps %xmm0, %xmm0
|
D | illegal-vector-args-return.ll | 3 ; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=nehalem | grep "addps %xmm3, %xmm1" 4 ; RUN: llc < %s -march=x86 -mattr=+sse2 -mcpu=nehalem | grep "addps %xmm2, %xmm0"
|
D | insertps-unfold-load-bug.ll | 15 ; X32-NEXT: addps %xmm1, %xmm0 23 ; X64-NEXT: addps %xmm1, %xmm0
|
D | lower-bitcast.ll | 54 ; CHECK: addps 60 ; CHECK-WIDE: addps 95 ; CHECK: addps 99 ; CHECK-WIDE: addps
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | v2f32.ll | 40 ; X64-NEXT: addps %xmm1, %xmm0 45 ; W64-NEXT: addps (%rdx), %xmm0 49 ; X32: addps %xmm1, %xmm0 58 ; X64-NEXT: addps %xmm0, %xmm0 63 ; W64-NEXT: addps %xmm0, %xmm0 67 ; X32-NEXT: addps %xmm0, %xmm0 75 ; X64-NEXT: addps %xmm0, %xmm0 80 ; W64-NEXT: addps %xmm0, %xmm0 84 ; X32-NEXT: addps %xmm0, %xmm0 99 ; X64-NEXT: addps %xmm0, %xmm0 [all …]
|
D | illegal-vector-args-return.ll | 3 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {addps %xmm3, %xmm1} 4 ; RUN: llc < %s -march=x86 -mattr=+sse2 | grep {addps %xmm2, %xmm0}
|
/external/mesa3d/src/mesa/x86-64/ |
D | xform4.S | 93 addps %xmm1, %xmm0 /* ox*m3+oy*m7 | ... */ 95 addps %xmm2, %xmm0 /* ox*m3+oy*m7+oz*m11 | ... */ 97 addps %xmm3, %xmm0 /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */ 179 addps %xmm1, %xmm0 /* ox*m3+oy*m7 | ... */ 181 addps %xmm2, %xmm0 /* ox*m3+oy*m7+oz*m11 | ... */ 183 addps %xmm3, %xmm0 /* ox*m3+oy*m7+oz*m11+ow*m15 | ... */
|