Home
last modified time | relevance | path

Searched refs:vmulps (Results 1 – 25 of 35) sorted by relevance

12

/external/llvm/test/CodeGen/X86/
Drecip-fastmath.ll56 ; RECIP: vmulps
58 ; RECIP: vmulps
64 ; REFINE: vmulps
66 ; REFINE: vmulps
68 ; REFINE: vmulps
70 ; REFINE: vmulps
90 ; RECIP: vmulps
92 ; RECIP: vmulps
98 ; REFINE: vmulps
100 ; REFINE: vmulps
[all …]
Dwide-fma-contraction.ll8 ; CHECK-NOT: vmulps
11 ; CHECK-NOT: vmulps
14 ; CHECK-NOT: vmulps
18 ; CHECK-NOFMA: vmulps
21 ; CHECK-NOFMA: vmulps
Dsqrt-fastmath.ll102 ; ESTIMATE-NEXT: vmulps %xmm1, %xmm1, %xmm2
103 ; ESTIMATE-NEXT: vmulps %xmm2, %xmm0, %xmm0
105 ; ESTIMATE-NEXT: vmulps {{.*}}(%rip), %xmm1, %xmm1
106 ; ESTIMATE-NEXT: vmulps %xmm0, %xmm1, %xmm0
127 ; ESTIMATE-NEXT: vmulps %ymm1, %ymm1, %ymm2
128 ; ESTIMATE-NEXT: vmulps %ymm2, %ymm0, %ymm0
130 ; ESTIMATE-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
131 ; ESTIMATE-NEXT: vmulps %ymm0, %ymm1, %ymm0
Dextended-fma-contraction.ll7 ; CHECK-NOT: vmulps
10 ; CHECK-NOT: vmulps
14 ; CHECK-NOFMA: vmulps
Dvec_uint_to_fp-fastmath.ll50 ; AVX-NEXT: vmulps [[FPMASKCSTADDR]](%rip), %xmm0, %xmm0
59 ; AVX2-NEXT: vmulps %xmm2, %xmm1, %xmm1
129 ; AVX-NEXT: vmulps [[FPMASKCSTADDR_v8]](%rip), %ymm0, %ymm0
138 ; AVX2-NEXT: vmulps %ymm2, %ymm1, %ymm1
D2012-04-26-sdglue.ll16 ; CHECK-NEXT: vmulps %ymm0, %ymm2, %ymm2
17 ; CHECK-NEXT: vmulps %ymm0, %ymm0, %ymm0
20 ; CHECK-NEXT: vmulps %xmm0, %xmm0, %xmm0
Dfnabs.ll42 ; CHECK: vmulps
63 ; CHECK: vmulps
DWidenArith.ll8 ; CHECK-NEXT: vmulps %ymm0, %ymm1, %ymm1
Dmachine-combiner.ll225 ; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
270 ; AVX-NEXT: vmulps %xmm3, %xmm2, %xmm1
271 ; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
306 ; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
337 ; AVX-NEXT: vmulps %ymm3, %ymm2, %ymm1
338 ; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
Davx-arith.ll75 ; CHECK: vmulps
82 ; CHECK: vmulps LCP{{.*}}(%rip)
Dfma_patterns_wide.ll692 ; FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
693 ; FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
698 ; FMA4-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0
699 ; FMA4-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1
704 ; AVX512-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
Dcombine-64bit-vec-binop.ll271 ; AVX: vmulps
Dfma_patterns.ll1069 ; FMA-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
1074 ; FMA4-NEXT: vmulps {{.*}}(%rip), %xmm0, %xmm0
1079 ; AVX512-NEXT: vmulps {{.*}}(%rip){1to4}, %xmm0, %xmm0
Davx512-arith.ll288 ; CHECK-NEXT: vmulps %zmm0, %zmm1, %zmm0
298 ; CHECK-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm0
649 ; CHECK-NEXT: vmulps %zmm2, %zmm1, %zmm0 {%k1}
Davx512vl-arith.ll191 ; CHECK: vmulps {{%ymm[0-9]{1,2}, %ymm[0-9]{1,2}, %ymm[0-9]{1,2} {%k[1-7]}}}
584 ; CHECK: vmulps {{%xmm[0-9]{1,2}, %xmm[0-9]{1,2}, %xmm[0-9]{1,2} {%k[1-7]}}}
/external/capstone/suite/MC/X86/
Dx86-32-avx.s.cs20 0xc5,0xc8,0x59,0xd4 = vmulps %xmm4, %xmm6, %xmm2
28 0xc5,0xe8,0x59,0xac,0xcb,0xef,0xbe,0xad,0xde = vmulps 3735928559(%ebx, %ecx, 8), %xmm2, %xmm5
610 0xc5,0xdc,0x59,0xf2 = vmulps %ymm2, %ymm4, %ymm6
622 0xc5,0xdc,0x59,0x30 = vmulps (%eax), %ymm4, %ymm6
Dx86_64-avx-encoding.s.cs20 0xc4,0x41,0x20,0x59,0xfa = vmulps %xmm10, %xmm11, %xmm15
28 0xc5,0x28,0x59,0x5c,0xd9,0xfc = vmulps -4(%rcx, %rbx, 8), %xmm10, %xmm11
795 0xc4,0xc1,0x5c,0x59,0xf4 = vmulps %ymm12, %ymm4, %ymm6
807 0xc5,0xdc,0x59,0x30 = vmulps (%rax), %ymm4, %ymm6
/external/llvm/test/MC/X86/
Dx86-64-avx512f_vl.s1913 vmulps %xmm21, %xmm17, %xmm29
1917 vmulps %xmm21, %xmm17, %xmm29 {%k2}
1921 vmulps %xmm21, %xmm17, %xmm29 {%k2} {z}
1925 vmulps (%rcx), %xmm17, %xmm29
1929 vmulps 291(%rax,%r14,8), %xmm17, %xmm29
1933 vmulps (%rcx){1to4}, %xmm17, %xmm29
1937 vmulps 2032(%rdx), %xmm17, %xmm29
1941 vmulps 2048(%rdx), %xmm17, %xmm29
1945 vmulps -2048(%rdx), %xmm17, %xmm29
1949 vmulps -2064(%rdx), %xmm17, %xmm29
[all …]
Dx86-32-avx.s77 vmulps %xmm4, %xmm6, %xmm2
109 vmulps 3735928559(%ebx,%ecx,8), %xmm2, %xmm5
2436 vmulps %ymm2, %ymm4, %ymm6
2484 vmulps (%eax), %ymm4, %ymm6
Dx86_64-avx-encoding.s77 vmulps %xmm10, %xmm11, %xmm15 label
109 vmulps -4(%rcx,%rbx,8), %xmm10, %xmm11 label
3177 vmulps %ymm12, %ymm4, %ymm6
3225 vmulps (%rax), %ymm4, %ymm6
Davx512-encodings.s981 vmulps %zmm24, %zmm6, %zmm3
985 vmulps %zmm24, %zmm6, %zmm3 {%k4}
989 vmulps %zmm24, %zmm6, %zmm3 {%k4} {z}
993 vmulps (%rcx), %zmm6, %zmm3
997 vmulps 291(%rax,%r14,8), %zmm6, %zmm3
1001 vmulps (%rcx){1to16}, %zmm6, %zmm3
1005 vmulps 8128(%rdx), %zmm6, %zmm3
1009 vmulps 8192(%rdx), %zmm6, %zmm3
1013 vmulps -8192(%rdx), %zmm6, %zmm3
1017 vmulps -8256(%rdx), %zmm6, %zmm3
[all …]
Dintel-syntax-avx512.s9021 vmulps zmm3, zmm6, zmm24
9025 vmulps zmm3 {k4}, zmm6, zmm24
9029 vmulps zmm3 {k4} {z}, zmm6, zmm24
9033 vmulps zmm3, zmm6, zmm24, {rn-sae}
9037 vmulps zmm3, zmm6, zmm24, {ru-sae}
9041 vmulps zmm3, zmm6, zmm24, {rd-sae}
9045 vmulps zmm3, zmm6, zmm24, {rz-sae}
9049 vmulps zmm3, zmm6, zmmword ptr [rcx]
9053 vmulps zmm3, zmm6, zmmword ptr [rax + 8*r14 + 291]
9057 vmulps zmm3, zmm6, dword ptr [rcx]{1to16}
[all …]
/external/libvpx/third_party/libyuv/source/
Drow_win.cc5933 vmulps ymm2, ymm0, ymm0 // X * X
5934 vmulps ymm3, ymm0, ymm7 // C3 * X
6014 vmulps ymm3, ymm3, ymm4 // scale to adjust exponent for 5 bit range.
6015 vmulps ymm2, ymm2, ymm4
6047 vmulps ymm2, ymm2, ymm4 // scale to normalized range 0 to 1
6048 vmulps ymm3, ymm3, ymm4
/external/libaom/third_party/libyuv/source/
Drow_win.cc5936 vmulps ymm2, ymm0, ymm0 // X * X
5937 vmulps ymm3, ymm0, ymm7 // C3 * X
6017 vmulps ymm3, ymm3, ymm4 // scale to adjust exponent for 5 bit range.
6018 vmulps ymm2, ymm2, ymm4
6050 vmulps ymm2, ymm2, ymm4 // scale to normalized range 0 to 1
6051 vmulps ymm3, ymm3, ymm4
/external/libyuv/files/source/
Drow_win.cc6102 vmulps ymm2, ymm0, ymm0 // X * X
6103 vmulps ymm3, ymm0, ymm7 // C3 * X
6183 vmulps ymm3, ymm3, ymm4 // scale to adjust exponent for 5 bit range.
6184 vmulps ymm2, ymm2, ymm4
6216 vmulps ymm2, ymm2, ymm4 // scale to normalized range 0 to 1
6217 vmulps ymm3, ymm3, ymm4

12