/external/llvm/test/CodeGen/X86/ |
D | avx-intel-ocl.ll | 11 ; WIN64: vaddps {{.*}}, {{%ymm[0-1]}} 12 ; WIN64: vaddps {{.*}}, {{%ymm[0-1]}} 18 ; X32: vaddps {{.*}}, {{%ymm[0-1]}} 19 ; X32: vaddps {{.*}}, {{%ymm[0-1]}} 26 ; X64: vaddps {{.*}}, {{%ymm[0-1]}} 27 ; X64: vaddps {{.*}}, {{%ymm[0-1]}} 47 ; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}} 48 ; WIN64: vaddps {{%ymm[6-7]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}} 54 ; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}} 55 ; X64: vaddps {{%ymm[8-9]}}, {{%ymm[0-1]}}, {{%ymm[0-1]}} [all …]
|
D | stack-folding-int-avx2.ll | 13 ;CHECK: vbroadcastsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload 35 ;CHECK: vbroadcastss {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload 46 …;CHECK: vextracti128 $1, {{%ymm[0-9][0-9]*}}, {{-?[0-9]*}}(%rsp) {{.*#+}} 16-byte Folded Spi… 56 …;CHECK: vinserti128 $1, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}… 66 …;CHECK: vmpsadbw $7, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 3… 75 ;CHECK: vpabsb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload 84 ;CHECK: vpabsd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload 93 ;CHECK: vpabsw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload 102 …;CHECK: vpackssdw {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-b… 111 …;CHECK: vpacksswb {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-b… [all …]
|
D | pr12312.ll | 37 ; AVX: vptest %ymm{{.*}}, %ymm{{.*}} 59 ; AVX: vptest %ymm{{.*}}, %ymm{{.*}} 88 ; AVX: vptest %ymm{{.*}}, %ymm{{.*}} 106 ; AVX: vptest %ymm{{.*}}, %ymm{{.*}} 135 ; AVX: vptest %ymm{{.*}}, %ymm{{.*}} 153 ; AVX: vptest %ymm{{.*}}, %ymm{{.*}}
|
D | avx2-arith.ll | 3 ; CHECK: vpaddq %ymm 9 ; CHECK: vpaddd %ymm 15 ; CHECK: vpaddw %ymm 21 ; CHECK: vpaddb %ymm 27 ; CHECK: vpsubq %ymm 33 ; CHECK: vpsubd %ymm 39 ; CHECK: vpsubw %ymm 45 ; CHECK: vpsubb %ymm 51 ; CHECK: vpmulld %ymm 57 ; CHECK: vpmullw %ymm [all …]
|
D | avx512vl-arith.ll | 6 ; CHECK: vpaddq %ymm{{.*}} 14 ; CHECK: vpaddq (%rdi), %ymm{{.*}} 23 ; CHECK: vpaddq LCP{{.*}}(%rip){1to4}, %ymm{{.*}} 31 ; CHECK: vpaddq (%rdi){1to4}, %ymm{{.*}} 42 ; CHECK: vpaddd %ymm{{.*}} 50 ; CHECK: vpaddd (%rdi), %ymm{{.*}} 59 ; CHECK: vpaddd LCP{{.*}}(%rip){1to8}, %ymm{{.*}} 67 ; CHECK: vpaddd %ymm{{.*%k[1-7].*}} 77 ; CHECK: vpaddd %ymm{{.*{%k[1-7]} {z}.*}} 87 ; CHECK: vpaddd (%rdi), %ymm{{.*%k[1-7]}} [all …]
|
D | avx2-cmp.ll | 3 ; CHECK: vpcmpgtd %ymm 10 ; CHECK: vpcmpgtq %ymm 17 ; CHECK: vpcmpgtw %ymm 24 ; CHECK: vpcmpgtb %ymm 31 ; CHECK: vpcmpeqd %ymm 38 ; CHECK: vpcmpeqq %ymm 45 ; CHECK: vpcmpeqw %ymm 52 ; CHECK: vpcmpeqb %ymm
|
D | avx-win64.ll | 6 ; This function has live ymm registers across a win64 call. 13 ; CHECK: vmovaps %ymm{{.*}}(%r 14 ; CHECK: vmovaps %ymm{{.*}}(%r 17 ; CHECK: vmovaps {{.*\(%r.*}}, %ymm 18 ; CHECK: vmovaps {{.*\(%r.*}}, %ymm
|
D | avx2-logic.ll | 4 ; CHECK: vpandn %ymm 16 ; CHECK: vpand %ymm 27 ; CHECK: vpor %ymm 38 ; CHECK: vpxor %ymm 49 ; CHECK: vpblendvb %ymm
|
D | avx512bwvl-arith.ll | 6 ; CHECK: vpaddb %ymm{{.*}} 14 ; CHECK: vpaddb (%rdi), %ymm{{.*}} 23 ; CHECK: vpaddw %ymm{{.*}} 31 ; CHECK: vpaddw (%rdi), %ymm{{.*}} 40 ; CHECK: vpaddw %ymm{{.*%k[1-7].*}} 50 ; CHECK: vpaddw %ymm{{.*{%k[1-7]} {z}.*}} 60 ; CHECK: vpaddw (%rdi), %ymm{{.*%k[1-7]}} 71 ; CHECK: vpaddw (%rdi), %ymm{{.*{%k[1-7]} {z}}} 82 ; CHECK: vpsubb %ymm{{.*}} 90 ; CHECK: vpsubw %ymm{{.*}} [all …]
|
D | stack-folding-fp-avx1.ll | 21 …;CHECK: vaddpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte… 37 …;CHECK: vaddps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte… 88 …;CHECK: vaddsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-b… 106 …;CHECK: vaddsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-b… 129 …;CHECK: vandnpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byt… 157 …;CHECK: vandnps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byt… 184 …;CHECK: vandpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte… 210 …;CHECK: vandps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte… 231 …;CHECK: vblendpd $6, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 3… 247 …;CHECK: vblendps $102, {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}}… [all …]
|
D | avx512vl-logic.ll | 6 ; CHECK: vpandd %ymm 17 ; CHECK: vpandnd %ymm 29 ; CHECK: vpord %ymm 40 ; CHECK: vpxord %ymm 51 ; CHECK: vpandq %ymm 62 ; CHECK: vpandnq %ymm 74 ; CHECK: vporq %ymm 85 ; CHECK: vpxorq %ymm
|
D | avx-minmax.ll | 36 ; UNSAFE: vmaxpd {{.+}}, %ymm 44 ; UNSAFE: vminpd {{.+}}, %ymm 52 ; UNSAFE: vmaxps {{.+}}, %ymm 60 ; UNSAFE: vminps {{.+}}, %ymm
|
D | vec_uint_to_fp.ll | 145 ; AVX2: vpbroadcastd [[LOWCSTADDR]](%rip), [[LOWCST:%ymm[0-9]+]] 146 ; AVX2-NEXT: vpblendw $170, [[LOWCST]], %ymm0, [[LOWVEC:%ymm[0-9]+]] 147 ; AVX2-NEXT: vpsrld $16, %ymm0, [[SHIFTVEC:%ymm[0-9]+]] 148 ; AVX2-NEXT: vpbroadcastd [[HIGHCSTADDR]](%rip), [[HIGHCST:%ymm[0-9]+]] 149 ; AVX2-NEXT: vpblendw $170, [[HIGHCST]], [[SHIFTVEC]], [[HIGHVEC:%ymm[0-9]+]] 150 ; AVX2-NEXT: vbroadcastss [[MAGICCSTADDR]](%rip), [[MAGICCST:%ymm[0-9]+]] 151 ; AVX2-NEXT: vaddps [[MAGICCST]], [[HIGHVEC]], [[TMP:%ymm[0-9]+]]
|
D | memset.ll | 31 ; YMM: vxorps %ymm{{[0-9]+}}, %ymm{{[0-9]+}}, [[Z:%ymm[0-9]+]]
|
D | avx512vl-nontemporal.ll | 4 ; CHECK: vmovntps %ymm{{.*}} ## encoding: [0x62 8 ; CHECK: vmovntdq %ymm{{.*}} ## encoding: [0x62 12 ; CHECK: vmovntpd %ymm{{.*}} ## encoding: [0x62
|
D | avx2-shift.ll | 272 ; CHECK-DAG: vpmovzxwd %xmm1, [[AMT:%ymm[0-9]+]] 273 ; CHECK-DAG: vpmovzxwd %xmm0, [[LHS:%ymm[0-9]+]] 274 ; CHECK: vpsllvd [[AMT]], [[LHS]], {{%ymm[0-9]+}} 283 ; CHECK-DAG: vpmovzxwd %xmm1, [[AMT:%ymm[0-9]+]] 284 ; CHECK-DAG: vpmovsxwd %xmm0, [[LHS:%ymm[0-9]+]] 285 ; CHECK: vpsravd [[AMT]], [[LHS]], {{%ymm[0-9]+}} 294 ; CHECK-DAG: vpmovzxwd %xmm1, [[AMT:%ymm[0-9]+]] 295 ; CHECK-DAG: vpmovzxwd %xmm0, [[LHS:%ymm[0-9]+]] 296 ; CHECK: vpsrlvd [[AMT]], [[LHS]], {{%ymm[0-9]+}}
|
D | fma-intrinsics-x86.ll | 120 ; CHECK-FMA-WIN-NEXT: vmovaps (%{{(rcx|rdx)}}), %ymm{{0|1}} 121 ; CHECK-FMA-WIN-NEXT: vmovaps (%{{(rcx|rdx)}}), %ymm{{0|1}} 138 ; CHECK-FMA-WIN-NEXT: vmovapd (%{{(rcx|rdx)}}), %ymm{{0|1}} 139 ; CHECK-FMA-WIN-NEXT: vmovapd (%{{(rcx|rdx)}}), %ymm{{0|1}} 265 ; CHECK-FMA-WIN-NEXT: vmovaps (%{{(rcx|rdx)}}), %ymm{{0|1}} 266 ; CHECK-FMA-WIN-NEXT: vmovaps (%{{(rcx|rdx)}}), %ymm{{0|1}} 283 ; CHECK-FMA-WIN-NEXT: vmovapd (%{{(rcx|rdx)}}), %ymm{{0|1}} 284 ; CHECK-FMA-WIN-NEXT: vmovapd (%{{(rcx|rdx)}}), %ymm{{0|1}} 410 ; CHECK-FMA-WIN-NEXT: vmovaps (%{{(rcx|rdx)}}), %ymm{{0|1}} 411 ; CHECK-FMA-WIN-NEXT: vmovaps (%{{(rcx|rdx)}}), %ymm{{0|1}} [all …]
|
D | llc-override-mcpu-mattr.ll | 7 ; CHECK: vpsadbw (%r{{si|dx}}), %ymm{{[0-9]+}}, %ymm{{[0-9]+}}
|
D | avx-load-store.ll | 48 ; CHECK: vmovaps %ymm 63 ; CHECK: vmovaps %ymm 130 ; CHECK: vmovaps ({{.*}}), %ymm{{.*}} 131 ; CHECK: vmovaps %ymm{{.*}}, ({{.*}})
|
D | memset-sse-stack-realignment.ll | 34 ; AVX1: vmovaps %ymm 39 ; AVX2: vmovaps %ymm
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | avx-vshufp.ll | 3 ; CHECK: vshufps $-53, %ymm 10 ; CHECK: vshufpd $10, %ymm 17 ; CHECK: vshufps $-53, %ymm 24 ; CHECK: vshufpd $2, %ymm
|
D | avx-cvt.ll | 3 ; CHECK: vcvtdq2ps %ymm 9 ; CHECK: vcvttps2dq %ymm 21 ; CHECK: vcvtpd2dqy %ymm 27 ; CHECK: vcvtpd2psy %ymm 28 ; CHECK-NEXT: vcvtpd2psy %ymm
|
D | avx-minmax.ll | 36 ; UNSAFE: vmaxpd %ymm 44 ; UNSAFE: vminpd %ymm 52 ; UNSAFE: vmaxps %ymm 60 ; UNSAFE: vminps %ymm
|
D | avx-load-store.ll | 44 ; CHECK: vmovaps %ymm 50 ; CHECK: vmovups %ymm 56 ; CHECK: vmovaps %ymm 62 ; CHECK: vmovups %ymm
|
D | avx-unpack.ll | 31 ; CHECK-NOT: vunpcklps %ymm 38 ; CHECK-NOT: vunpcklpd %ymm 45 ; CHECK-NOT: vunpckhps %ymm 52 ; CHECK-NOT: vunpckhpd %ymm
|