• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon -fp-contract=fast | FileCheck %s
2
3; FIXME: We should not generate ld/st for such register spill/fill, because the
4; test case seems very simple and the register pressure is not high. If the
5; spill/fill algorithm is optimized, this test case may not be triggered. And
6; then we can delete it.
7define i32 @spill.DPairReg(i32* %arg1, i32 %arg2) {
8; CHECK-LABEL: spill.DPairReg:
9; CHECK: ld2 { v{{[0-9]+}}.2s, v{{[0-9]+}}.2s }, [{{x[0-9]+|sp}}]
10; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
11; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
12entry:
13  %vld = tail call { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32* %arg1)
14  %cmp = icmp eq i32 %arg2, 0
15  br i1 %cmp, label %if.then, label %if.end
16
17if.then:
18  tail call void @foo()
19  br label %if.end
20
21if.end:
22  %vld.extract = extractvalue { <2 x i32>, <2 x i32> } %vld, 0
23  %res = extractelement <2 x i32> %vld.extract, i32 1
24  ret i32 %res
25}
26
27define i16 @spill.DTripleReg(i16* %arg1, i32 %arg2) {
28; CHECK-LABEL: spill.DTripleReg:
29; CHECK: ld3 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}]
30; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
31; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
32entry:
33  %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16* %arg1)
34  %cmp = icmp eq i32 %arg2, 0
35  br i1 %cmp, label %if.then, label %if.end
36
37if.then:
38  tail call void @foo()
39  br label %if.end
40
41if.end:
42  %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0
43  %res = extractelement <4 x i16> %vld.extract, i32 1
44  ret i16 %res
45}
46
47define i16 @spill.DQuadReg(i16* %arg1, i32 %arg2) {
48; CHECK-LABEL: spill.DQuadReg:
49; CHECK: ld4 { v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h }, [{{x[0-9]+|sp}}]
50; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
51; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
52entry:
53  %vld = tail call { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16* %arg1)
54  %cmp = icmp eq i32 %arg2, 0
55  br i1 %cmp, label %if.then, label %if.end
56
57if.then:
58  tail call void @foo()
59  br label %if.end
60
61if.end:
62  %vld.extract = extractvalue { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } %vld, 0
63  %res = extractelement <4 x i16> %vld.extract, i32 0
64  ret i16 %res
65}
66
67define i32 @spill.QPairReg(i32* %arg1, i32 %arg2) {
68; CHECK-LABEL: spill.QPairReg:
69; CHECK: ld2 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}]
70; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
71; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
72entry:
73  %vld = tail call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32* %arg1)
74  %cmp = icmp eq i32 %arg2, 0
75  br i1 %cmp, label %if.then, label %if.end
76
77if.then:
78  tail call void @foo()
79  br label %if.end
80
81if.end:
82  %vld.extract = extractvalue { <4 x i32>, <4 x i32> } %vld, 0
83  %res = extractelement <4 x i32> %vld.extract, i32 1
84  ret i32 %res
85}
86
87define float @spill.QTripleReg(float* %arg1, i32 %arg2) {
88; CHECK-LABEL: spill.QTripleReg:
89; CHECK: ld3 { v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s }, [{{x[0-9]+|sp}}]
90; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
91; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
92entry:
93  %vld3 = tail call { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float* %arg1)
94  %cmp = icmp eq i32 %arg2, 0
95  br i1 %cmp, label %if.then, label %if.end
96
97if.then:
98  tail call void @foo()
99  br label %if.end
100
101if.end:
102  %vld3.extract = extractvalue { <4 x float>, <4 x float>, <4 x float> } %vld3, 0
103  %res = extractelement <4 x float> %vld3.extract, i32 1
104  ret float %res
105}
106
107define i8 @spill.QQuadReg(i8* %arg1, i32 %arg2) {
108; CHECK-LABEL: spill.QQuadReg:
109; CHECK: ld4 { v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b }, [{{x[0-9]+|sp}}]
110; CHECK: st1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
111; CHECK: ld1 { v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d }, [{{x[0-9]+|sp}}]
112entry:
113  %vld = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8* %arg1)
114  %cmp = icmp eq i32 %arg2, 0
115  br i1 %cmp, label %if.then, label %if.end
116
117if.then:
118  tail call void @foo()
119  br label %if.end
120
121if.end:
122  %vld.extract = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %vld, 0
123  %res = extractelement <16 x i8> %vld.extract, i32 1
124  ret i8 %res
125}
126
127declare { <2 x i32>, <2 x i32> } @llvm.aarch64.neon.ld2.v2i32.p0i32(i32*)
128declare { <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld3.v4i16.p0i16(i16*)
129declare { <4 x i16>, <4 x i16>, <4 x i16>, <4 x i16> } @llvm.aarch64.neon.ld4.v4i16.p0i16(i16*)
130declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32.p0i32(i32*)
131declare { <4 x float>, <4 x float>, <4 x float> } @llvm.aarch64.neon.ld3.v4f32.p0f32(float*)
132declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.aarch64.neon.ld4.v16i8.p0i8(i8*)
133
134declare void @foo()
135
136; FIXME: We should not generate ld/st for such register spill/fill, because the
137; test case seems very simple and the register pressure is not high. If the
138; spill/fill algorithm is optimized, this test case may not be triggered. And
139; then we can delete it.
140; check the spill for Register Class QPair_with_qsub_0_in_FPR128Lo
141define <8 x i16> @test_2xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
142  tail call void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
143  tail call void @foo()
144  %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
145  %1 = bitcast <2 x i64> %sv to <8 x i16>
146  %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
147  %3 = mul <8 x i16> %2, %2
148  ret <8 x i16> %3
149}
150
151; check the spill for Register Class QTriple_with_qsub_0_in_FPR128Lo
152define <8 x i16> @test_3xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
153  tail call void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
154  tail call void @foo()
155  %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
156  %1 = bitcast <2 x i64> %sv to <8 x i16>
157  %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
158  %3 = mul <8 x i16> %2, %2
159  ret <8 x i16> %3
160}
161
162; check the spill for Register Class QQuad_with_qsub_0_in_FPR128Lo
163define <8 x i16> @test_4xFPR128Lo(i64 %got, i64* %ptr, <1 x i64> %a) {
164  tail call void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, <1 x i64> zeroinitializer, i64 0, i64* %ptr)
165  tail call void @foo()
166  %sv = shufflevector <1 x i64> zeroinitializer, <1 x i64> %a, <2 x i32> <i32 0, i32 1>
167  %1 = bitcast <2 x i64> %sv to <8 x i16>
168  %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
169  %3 = mul <8 x i16> %2, %2
170  ret <8 x i16> %3
171}
172
173declare void @llvm.aarch64.neon.st2lane.v1i64.p0i64(<1 x i64>, <1 x i64>, i64, i64*)
174declare void @llvm.aarch64.neon.st3lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
175declare void @llvm.aarch64.neon.st4lane.v1i64.p0i64(<1 x i64>, <1 x i64>, <1 x i64>, <1 x i64>, i64, i64*)
176