• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+fma < %s | FileCheck %s
3
4define <1 x float> @constrained_vector_fma_v1f32() #0 {
5; CHECK-LABEL: constrained_vector_fma_v1f32:
6; CHECK:       # %bb.0: # %entry
7; CHECK-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
8; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
9; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
10; CHECK-NEXT:    retq
11entry:
12  %fma = call <1 x float> @llvm.experimental.constrained.fma.v1f32(
13           <1 x float> <float 0.5>,
14           <1 x float> <float 2.5>,
15           <1 x float> <float 4.5>,
16           metadata !"round.dynamic",
17           metadata !"fpexcept.strict") #0
18  ret <1 x float> %fma
19}
20
21define <2 x double> @constrained_vector_fma_v2f64() #0 {
22; CHECK-LABEL: constrained_vector_fma_v2f64:
23; CHECK:       # %bb.0: # %entry
24; CHECK-NEXT:    vmovapd {{.*#+}} xmm1 = [1.5E+0,5.0E-1]
25; CHECK-NEXT:    vmovapd {{.*#+}} xmm0 = [3.5E+0,2.5E+0]
26; CHECK-NEXT:    vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
27; CHECK-NEXT:    retq
28entry:
29  %fma = call <2 x double> @llvm.experimental.constrained.fma.v2f64(
30           <2 x double> <double 1.5, double 0.5>,
31           <2 x double> <double 3.5, double 2.5>,
32           <2 x double> <double 5.5, double 4.5>,
33           metadata !"round.dynamic",
34           metadata !"fpexcept.strict") #0
35  ret <2 x double> %fma
36}
37
38define <3 x float> @constrained_vector_fma_v3f32() #0 {
39; CHECK-LABEL: constrained_vector_fma_v3f32:
40; CHECK:       # %bb.0: # %entry
41; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
42; CHECK-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
43; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm1 = (xmm0 * xmm1) + mem
44; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
45; CHECK-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
46; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm2 = (xmm0 * xmm2) + mem
47; CHECK-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
48; CHECK-NEXT:    vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
49; CHECK-NEXT:    vfmadd213ss {{.*#+}} xmm3 = (xmm0 * xmm3) + mem
50; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0],xmm3[0],xmm2[2,3]
51; CHECK-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
52; CHECK-NEXT:    retq
53entry:
54  %fma = call <3 x float> @llvm.experimental.constrained.fma.v3f32(
55           <3 x float> <float 2.5, float 1.5, float 0.5>,
56           <3 x float> <float 5.5, float 4.5, float 3.5>,
57           <3 x float> <float 8.5, float 7.5, float 6.5>,
58           metadata !"round.dynamic",
59           metadata !"fpexcept.strict") #0
60  ret <3 x float> %fma
61}
62
63define <3 x double> @constrained_vector_fma_v3f64() #0 {
64; CHECK-LABEL: constrained_vector_fma_v3f64:
65; CHECK:       # %bb.0: # %entry
66; CHECK-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
67; CHECK-NEXT:    vmovsd {{.*#+}} xmm1 = mem[0],zero
68; CHECK-NEXT:    vfmadd213sd {{.*#+}} xmm1 = (xmm0 * xmm1) + mem
69; CHECK-NEXT:    vmovapd {{.*#+}} xmm0 = [2.5E+0,1.5E+0]
70; CHECK-NEXT:    vmovapd {{.*#+}} xmm2 = [5.5E+0,4.5E+0]
71; CHECK-NEXT:    vfmadd213pd {{.*#+}} xmm2 = (xmm0 * xmm2) + mem
72; CHECK-NEXT:    vinsertf128 $1, %xmm1, %ymm2, %ymm0
73; CHECK-NEXT:    retq
74entry:
75  %fma = call <3 x double> @llvm.experimental.constrained.fma.v3f64(
76           <3 x double> <double 2.5, double 1.5, double 0.5>,
77           <3 x double> <double 5.5, double 4.5, double 3.5>,
78           <3 x double> <double 8.5, double 7.5, double 6.5>,
79           metadata !"round.dynamic",
80           metadata !"fpexcept.strict") #0
81  ret <3 x double> %fma
82}
83
84define <4 x double> @constrained_vector_fma_v4f64() #0 {
85; CHECK-LABEL: constrained_vector_fma_v4f64:
86; CHECK:       # %bb.0: # %entry
87; CHECK-NEXT:    vmovapd {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
88; CHECK-NEXT:    vmovapd {{.*#+}} ymm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0]
89; CHECK-NEXT:    vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
90; CHECK-NEXT:    retq
91entry:
92  %fma = call <4 x double> @llvm.experimental.constrained.fma.v4f64(
93           <4 x double> <double 3.5, double 2.5, double 1.5, double 0.5>,
94           <4 x double> <double 7.5, double 6.5, double 5.5, double 4.5>,
95           <4 x double> <double 11.5, double 10.5, double 9.5, double 8.5>,
96           metadata !"round.dynamic",
97           metadata !"fpexcept.strict") #0
98  ret <4 x double> %fma
99}
100
101define <4 x float> @constrained_vector_fma_v4f32() #0 {
102; CHECK-LABEL: constrained_vector_fma_v4f32:
103; CHECK:       # %bb.0: # %entry
104; CHECK-NEXT:    vmovaps {{.*#+}} xmm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1]
105; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0]
106; CHECK-NEXT:    vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + mem
107; CHECK-NEXT:    retq
108entry:
109  %fma = call <4 x float> @llvm.experimental.constrained.fma.v4f32(
110           <4 x float> <float 3.5, float 2.5, float 1.5, float 0.5>,
111           <4 x float> <float 7.5, float 6.5, float 5.5, float 4.5>,
112           <4 x float> <float 11.5, float 10.5, float 9.5, float 8.5>,
113           metadata !"round.dynamic",
114           metadata !"fpexcept.strict") #0
115  ret <4 x float> %fma
116}
117
118define <8 x float> @constrained_vector_fma_v8f32() #0 {
119; CHECK-LABEL: constrained_vector_fma_v8f32:
120; CHECK:       # %bb.0: # %entry
121; CHECK-NEXT:    vmovaps {{.*#+}} ymm1 = [3.5E+0,2.5E+0,1.5E+0,5.0E-1,7.5E+0,6.5E+0,5.5E+0,4.5E+0]
122; CHECK-NEXT:    vmovaps {{.*#+}} ymm0 = [7.5E+0,6.5E+0,5.5E+0,4.5E+0,1.15E+1,1.05E+1,9.5E+0,8.5E+0]
123; CHECK-NEXT:    vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + mem
124; CHECK-NEXT:    retq
125entry:
126  %fma = call <8 x float> @llvm.experimental.constrained.fma.v8f32(
127           <8 x float> <float 3.5, float 2.5, float 1.5, float 0.5,
128                        float 7.5, float 6.5, float 5.5, float 4.5>,
129           <8 x float> <float 7.5, float 6.5, float 5.5, float 4.5,
130                        float 11.5, float 10.5, float 9.5, float 8.5>,
131           <8 x float> <float 11.5, float 10.5, float 9.5, float 8.5,
132                        float 15.5, float 14.5, float 13.5, float 12.5>,
133           metadata !"round.dynamic",
134           metadata !"fpexcept.strict") #0
135  ret <8 x float> %fma
136}
137
138attributes #0 = { strictfp }
139
140; Single width declarations
141declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
142declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
143
144; Scalar width declarations
145declare <1 x float> @llvm.experimental.constrained.fma.v1f32(<1 x float>, <1 x float>, <1 x float>, metadata, metadata)
146
147; Illegal width declarations
148declare <3 x float> @llvm.experimental.constrained.fma.v3f32(<3 x float>, <3 x float>, <3 x float>, metadata, metadata)
149declare <3 x double> @llvm.experimental.constrained.fma.v3f64(<3 x double>, <3 x double>, <3 x double>, metadata, metadata)
150
151; Double width declarations
152declare <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double>, <4 x double>, <4 x double>, metadata, metadata)
153declare <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float>, <8 x float>, <8 x float>, metadata, metadata)
154