• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -aarch64-simd-scalar| FileCheck %s
2
3define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) {
4;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
5	%tmp3 = add <8 x i8> %A, %B;
6	ret <8 x i8> %tmp3
7}
8
9define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) {
10;CHECK: add {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
11	%tmp3 = add <16 x i8> %A, %B;
12	ret <16 x i8> %tmp3
13}
14
15define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) {
16;CHECK: add {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
17	%tmp3 = add <4 x i16> %A, %B;
18	ret <4 x i16> %tmp3
19}
20
21define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) {
22;CHECK: add {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
23	%tmp3 = add <8 x i16> %A, %B;
24	ret <8 x i16> %tmp3
25}
26
27define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) {
28;CHECK: add {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
29	%tmp3 = add <2 x i32> %A, %B;
30	ret <2 x i32> %tmp3
31}
32
33define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) {
34;CHECK: add {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
35	%tmp3 = add <4 x i32> %A, %B;
36	ret <4 x i32> %tmp3
37}
38
39define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) {
40;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
41	%tmp3 = add <2 x i64> %A, %B;
42	ret <2 x i64> %tmp3
43}
44
45define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) {
46;CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
47	%tmp3 = fadd <2 x float> %A, %B;
48	ret <2 x float> %tmp3
49}
50
51define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) {
52;CHECK: fadd {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
53	%tmp3 = fadd <4 x float> %A, %B;
54	ret <4 x float> %tmp3
55}
56define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) {
57;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
58	%tmp3 = fadd <2 x double> %A, %B;
59	ret <2 x double> %tmp3
60}
61
62define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) {
63;CHECK: sub {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
64	%tmp3 = sub <8 x i8> %A, %B;
65	ret <8 x i8> %tmp3
66}
67
68define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) {
69;CHECK: sub {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b
70	%tmp3 = sub <16 x i8> %A, %B;
71	ret <16 x i8> %tmp3
72}
73
74define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) {
75;CHECK: sub {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
76	%tmp3 = sub <4 x i16> %A, %B;
77	ret <4 x i16> %tmp3
78}
79
80define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) {
81;CHECK: sub {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
82	%tmp3 = sub <8 x i16> %A, %B;
83	ret <8 x i16> %tmp3
84}
85
86define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) {
87;CHECK: sub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
88	%tmp3 = sub <2 x i32> %A, %B;
89	ret <2 x i32> %tmp3
90}
91
92define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) {
93;CHECK: sub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
94	%tmp3 = sub <4 x i32> %A, %B;
95	ret <4 x i32> %tmp3
96}
97
98define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) {
99;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
100	%tmp3 = sub <2 x i64> %A, %B;
101	ret <2 x i64> %tmp3
102}
103
104define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) {
105;CHECK: fsub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
106	%tmp3 = fsub <2 x float> %A, %B;
107	ret <2 x float> %tmp3
108}
109
110define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) {
111;CHECK: fsub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
112	%tmp3 = fsub <4 x float> %A, %B;
113	ret <4 x float> %tmp3
114}
115define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) {
116;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d
117	%tmp3 = fsub <2 x double> %A, %B;
118	ret <2 x double> %tmp3
119}
120
121define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) {
122; CHECK-LABEL: test_vadd_f64
123; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
124  %1 = fadd <1 x double> %a, %b
125  ret <1 x double> %1
126}
127
128define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) {
129; CHECK-LABEL: test_vmul_f64
130; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
131  %1 = fmul <1 x double> %a, %b
132  ret <1 x double> %1
133}
134
135define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) {
136; CHECK-LABEL: test_vdiv_f64
137; CHECK: fdiv d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
138  %1 = fdiv <1 x double> %a, %b
139  ret <1 x double> %1
140}
141
142define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
143; CHECK-LABEL: test_vmla_f64
144; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
145; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
146  %1 = fmul <1 x double> %b, %c
147  %2 = fadd <1 x double> %1, %a
148  ret <1 x double> %2
149}
150
151define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
152; CHECK-LABEL: test_vmls_f64
153; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
154; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
155  %1 = fmul <1 x double> %b, %c
156  %2 = fsub <1 x double> %a, %1
157  ret <1 x double> %2
158}
159
160define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
161; CHECK-LABEL: test_vfms_f64
162; CHECK: fmsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
163  %1 = fsub <1 x double> <double -0.000000e+00>, %b
164  %2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a)
165  ret <1 x double> %2
166}
167
168define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) {
169; CHECK-LABEL: test_vfma_f64
170; CHECK: fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
171  %1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
172  ret <1 x double> %1
173}
174
175define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) {
176; CHECK-LABEL: test_vsub_f64
177; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
178  %1 = fsub <1 x double> %a, %b
179  ret <1 x double> %1
180}
181
182define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) {
183; CHECK-LABEL: test_vabd_f64
184; CHECK: fabd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
185  %1 = tail call <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double> %a, <1 x double> %b)
186  ret <1 x double> %1
187}
188
189define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) {
190; CHECK-LABEL: test_vmax_f64
191; CHECK: fmax d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
192  %1 = tail call <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double> %a, <1 x double> %b)
193  ret <1 x double> %1
194}
195
196define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) {
197; CHECK-LABEL: test_vmin_f64
198; CHECK: fmin d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
199  %1 = tail call <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double> %a, <1 x double> %b)
200  ret <1 x double> %1
201}
202
203define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) {
204; CHECK-LABEL: test_vmaxnm_f64
205; CHECK: fmaxnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
206  %1 = tail call <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double> %a, <1 x double> %b)
207  ret <1 x double> %1
208}
209
210define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) {
211; CHECK-LABEL: test_vminnm_f64
212; CHECK: fminnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
213  %1 = tail call <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double> %a, <1 x double> %b)
214  ret <1 x double> %1
215}
216
217define <1 x double> @test_vabs_f64(<1 x double> %a) {
218; CHECK-LABEL: test_vabs_f64
219; CHECK: fabs d{{[0-9]+}}, d{{[0-9]+}}
220  %1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a)
221  ret <1 x double> %1
222}
223
224define <1 x double> @test_vneg_f64(<1 x double> %a) {
225; CHECK-LABEL: test_vneg_f64
226; CHECK: fneg d{{[0-9]+}}, d{{[0-9]+}}
227  %1 = fsub <1 x double> <double -0.000000e+00>, %a
228  ret <1 x double> %1
229}
230
231declare <1 x double> @llvm.fabs.v1f64(<1 x double>)
232declare <1 x double> @llvm.aarch64.neon.fminnm.v1f64(<1 x double>, <1 x double>)
233declare <1 x double> @llvm.aarch64.neon.fmaxnm.v1f64(<1 x double>, <1 x double>)
234declare <1 x double> @llvm.aarch64.neon.fmin.v1f64(<1 x double>, <1 x double>)
235declare <1 x double> @llvm.aarch64.neon.fmax.v1f64(<1 x double>, <1 x double>)
236declare <1 x double> @llvm.aarch64.neon.fabd.v1f64(<1 x double>, <1 x double>)
237declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>)
238