• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -march=mips -mattr=+msa,+fp64,+mips32r5 < %s | FileCheck %s
2; RUN: llc -march=mipsel -mattr=+msa,+fp64,+mips32r5 < %s | FileCheck %s
3
4define void @loadstore_v16i8_near() nounwind {
5  ; CHECK: loadstore_v16i8_near:
6
7  %1 = alloca <16 x i8>
8  %2 = load volatile <16 x i8>, <16 x i8>* %1
9  ; CHECK: ld.b [[R1:\$w[0-9]+]], 0($sp)
10  store volatile <16 x i8> %2, <16 x i8>* %1
11  ; CHECK: st.b [[R1]], 0($sp)
12
13  ret void
14  ; CHECK: .size loadstore_v16i8_near
15}
16
17define void @loadstore_v16i8_just_under_simm10() nounwind {
18  ; CHECK: loadstore_v16i8_just_under_simm10:
19
20  %1 = alloca <16 x i8>
21  %2 = alloca [492 x i8] ; Push the frame--acounting for the emergency spill
22                         ; slot--right up to 512 bytes
23
24  %3 = load volatile <16 x i8>, <16 x i8>* %1
25  ; CHECK: ld.b [[R1:\$w[0-9]+]], 496($sp)
26  store volatile <16 x i8> %3, <16 x i8>* %1
27  ; CHECK: st.b [[R1]], 496($sp)
28
29  ret void
30  ; CHECK: .size loadstore_v16i8_just_under_simm10
31}
32
33define void @loadstore_v16i8_just_over_simm10() nounwind {
34  ; CHECK: loadstore_v16i8_just_over_simm10:
35
36  %1 = alloca <16 x i8>
37  %2 = alloca [497 x i8] ; Push the frame--acounting for the emergency spill
38                         ; slot--right up to 512 bytes
39
40  %3 = load volatile <16 x i8>, <16 x i8>* %1
41  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
42  ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
43  store volatile <16 x i8> %3, <16 x i8>* %1
44  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 512
45  ; CHECK: st.b [[R1]], 0([[BASE]])
46
47  ret void
48  ; CHECK: .size loadstore_v16i8_just_over_simm10
49}
50
51define void @loadstore_v16i8_just_under_simm16() nounwind {
52  ; CHECK: loadstore_v16i8_just_under_simm16:
53
54  %1 = alloca <16 x i8>
55  %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
56                           ; slot--right up to 32768 bytes
57
58  %3 = load volatile <16 x i8>, <16 x i8>* %1
59  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
60  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
61  ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
62  store volatile <16 x i8> %3, <16 x i8>* %1
63  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
64  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
65  ; CHECK: st.b [[R1]], 0([[BASE]])
66
67  ret void
68  ; CHECK: .size loadstore_v16i8_just_under_simm16
69}
70
71define void @loadstore_v16i8_just_over_simm16() nounwind {
72  ; CHECK: loadstore_v16i8_just_over_simm16:
73
74  %1 = alloca <16 x i8>
75  %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
76                           ; slot--just over 32768 bytes
77
78  %3 = load volatile <16 x i8>, <16 x i8>* %1
79  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
80  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
81  ; CHECK: ld.b [[R1:\$w[0-9]+]], 0([[BASE]])
82  store volatile <16 x i8> %3, <16 x i8>* %1
83  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
84  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
85  ; CHECK: st.b [[R1]], 0([[BASE]])
86
87  ret void
88  ; CHECK: .size loadstore_v16i8_just_over_simm16
89}
90
91define void @loadstore_v8i16_near() nounwind {
92  ; CHECK: loadstore_v8i16_near:
93
94  %1 = alloca <8 x i16>
95  %2 = load volatile <8 x i16>, <8 x i16>* %1
96  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0($sp)
97  store volatile <8 x i16> %2, <8 x i16>* %1
98  ; CHECK: st.h [[R1]], 0($sp)
99
100  ret void
101  ; CHECK: .size loadstore_v8i16_near
102}
103
104define void @loadstore_v8i16_unaligned() nounwind {
105  ; CHECK: loadstore_v8i16_unaligned:
106
107  %1 = alloca [2 x <8 x i16>]
108  %2 = bitcast [2 x <8 x i16>]* %1 to i8*
109  %3 = getelementptr i8, i8* %2, i32 1
110  %4 = bitcast i8* %3 to [2 x <8 x i16>]*
111  %5 = getelementptr [2 x <8 x i16>], [2 x <8 x i16>]* %4, i32 0, i32 0
112
113  %6 = load volatile <8 x i16>, <8 x i16>* %5
114  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
115  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
116  store volatile <8 x i16> %6, <8 x i16>* %5
117  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
118  ; CHECK: st.h [[R1]], 0([[BASE]])
119
120  ret void
121  ; CHECK: .size loadstore_v8i16_unaligned
122}
123
124define void @loadstore_v8i16_just_under_simm10() nounwind {
125  ; CHECK: loadstore_v8i16_just_under_simm10:
126
127  %1 = alloca <8 x i16>
128  %2 = alloca [1004 x i8] ; Push the frame--acounting for the emergency spill
129                          ; slot--right up to 1024 bytes
130
131  %3 = load volatile <8 x i16>, <8 x i16>* %1
132  ; CHECK: ld.h [[R1:\$w[0-9]+]], 1008($sp)
133  store volatile <8 x i16> %3, <8 x i16>* %1
134  ; CHECK: st.h [[R1]], 1008($sp)
135
136  ret void
137  ; CHECK: .size loadstore_v8i16_just_under_simm10
138}
139
140define void @loadstore_v8i16_just_over_simm10() nounwind {
141  ; CHECK: loadstore_v8i16_just_over_simm10:
142
143  %1 = alloca <8 x i16>
144  %2 = alloca [1009 x i8] ; Push the frame--acounting for the emergency spill
145                          ; slot--just over 1024 bytes
146
147  %3 = load volatile <8 x i16>, <8 x i16>* %1
148  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
149  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
150  store volatile <8 x i16> %3, <8 x i16>* %1
151  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1024
152  ; CHECK: st.h [[R1]], 0([[BASE]])
153
154  ret void
155  ; CHECK: .size loadstore_v8i16_just_over_simm10
156}
157
158define void @loadstore_v8i16_just_under_simm16() nounwind {
159  ; CHECK: loadstore_v8i16_just_under_simm16:
160
161  %1 = alloca <8 x i16>
162  %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
163                           ; slot--right up to 32768 bytes
164
165  %3 = load volatile <8 x i16>, <8 x i16>* %1
166  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
167  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
168  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
169  store volatile <8 x i16> %3, <8 x i16>* %1
170  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
171  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
172  ; CHECK: st.h [[R1]], 0([[BASE]])
173
174  ret void
175  ; CHECK: .size loadstore_v8i16_just_under_simm16
176}
177
178define void @loadstore_v8i16_just_over_simm16() nounwind {
179  ; CHECK: loadstore_v8i16_just_over_simm16:
180
181  %1 = alloca <8 x i16>
182  %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
183                           ; slot--just over 32768 bytes
184
185  %3 = load volatile <8 x i16>, <8 x i16>* %1
186  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
187  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
188  ; CHECK: ld.h [[R1:\$w[0-9]+]], 0([[BASE]])
189  store volatile <8 x i16> %3, <8 x i16>* %1
190  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
191  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
192  ; CHECK: st.h [[R1]], 0([[BASE]])
193
194  ret void
195  ; CHECK: .size loadstore_v8i16_just_over_simm16
196}
197
198define void @loadstore_v4i32_near() nounwind {
199  ; CHECK: loadstore_v4i32_near:
200
201  %1 = alloca <4 x i32>
202  %2 = load volatile <4 x i32>, <4 x i32>* %1
203  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0($sp)
204  store volatile <4 x i32> %2, <4 x i32>* %1
205  ; CHECK: st.w [[R1]], 0($sp)
206
207  ret void
208  ; CHECK: .size loadstore_v4i32_near
209}
210
211define void @loadstore_v4i32_unaligned() nounwind {
212  ; CHECK: loadstore_v4i32_unaligned:
213
214  %1 = alloca [2 x <4 x i32>]
215  %2 = bitcast [2 x <4 x i32>]* %1 to i8*
216  %3 = getelementptr i8, i8* %2, i32 1
217  %4 = bitcast i8* %3 to [2 x <4 x i32>]*
218  %5 = getelementptr [2 x <4 x i32>], [2 x <4 x i32>]* %4, i32 0, i32 0
219
220  %6 = load volatile <4 x i32>, <4 x i32>* %5
221  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
222  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
223  store volatile <4 x i32> %6, <4 x i32>* %5
224  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
225  ; CHECK: st.w [[R1]], 0([[BASE]])
226
227  ret void
228  ; CHECK: .size loadstore_v4i32_unaligned
229}
230
231define void @loadstore_v4i32_just_under_simm10() nounwind {
232  ; CHECK: loadstore_v4i32_just_under_simm10:
233
234  %1 = alloca <4 x i32>
235  %2 = alloca [2028 x i8] ; Push the frame--acounting for the emergency spill
236                          ; slot--right up to 2048 bytes
237
238  %3 = load volatile <4 x i32>, <4 x i32>* %1
239  ; CHECK: ld.w [[R1:\$w[0-9]+]], 2032($sp)
240  store volatile <4 x i32> %3, <4 x i32>* %1
241  ; CHECK: st.w [[R1]], 2032($sp)
242
243  ret void
244  ; CHECK: .size loadstore_v4i32_just_under_simm10
245}
246
247define void @loadstore_v4i32_just_over_simm10() nounwind {
248  ; CHECK: loadstore_v4i32_just_over_simm10:
249
250  %1 = alloca <4 x i32>
251  %2 = alloca [2033 x i8] ; Push the frame--acounting for the emergency spill
252                          ; slot--just over 2048 bytes
253
254  %3 = load volatile <4 x i32>, <4 x i32>* %1
255  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
256  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
257  store volatile <4 x i32> %3, <4 x i32>* %1
258  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 2048
259  ; CHECK: st.w [[R1]], 0([[BASE]])
260
261  ret void
262  ; CHECK: .size loadstore_v4i32_just_over_simm10
263}
264
265define void @loadstore_v4i32_just_under_simm16() nounwind {
266  ; CHECK: loadstore_v4i32_just_under_simm16:
267
268  %1 = alloca <4 x i32>
269  %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
270                           ; slot-- right up to 32768 bytes
271
272  %3 = load volatile <4 x i32>, <4 x i32>* %1
273  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
274  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
275  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
276  store volatile <4 x i32> %3, <4 x i32>* %1
277  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
278  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
279  ; CHECK: st.w [[R1]], 0([[BASE]])
280
281  ret void
282  ; CHECK: .size loadstore_v4i32_just_under_simm16
283}
284
285define void @loadstore_v4i32_just_over_simm16() nounwind {
286  ; CHECK: loadstore_v4i32_just_over_simm16:
287
288  %1 = alloca <4 x i32>
289  %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
290                           ; slot--just over 32768 bytes
291
292  %3 = load volatile <4 x i32>, <4 x i32>* %1
293  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
294  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
295  ; CHECK: ld.w [[R1:\$w[0-9]+]], 0([[BASE]])
296  store volatile <4 x i32> %3, <4 x i32>* %1
297  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
298  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
299  ; CHECK: st.w [[R1]], 0([[BASE]])
300
301  ret void
302  ; CHECK: .size loadstore_v4i32_just_over_simm16
303}
304
305define void @loadstore_v2i64_near() nounwind {
306  ; CHECK: loadstore_v2i64_near:
307
308  %1 = alloca <2 x i64>
309  %2 = load volatile <2 x i64>, <2 x i64>* %1
310  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0($sp)
311  store volatile <2 x i64> %2, <2 x i64>* %1
312  ; CHECK: st.d [[R1]], 0($sp)
313
314  ret void
315  ; CHECK: .size loadstore_v2i64_near
316}
317
318define void @loadstore_v2i64_unaligned() nounwind {
319  ; CHECK: loadstore_v2i64_unaligned:
320
321  %1 = alloca [2 x <2 x i64>]
322  %2 = bitcast [2 x <2 x i64>]* %1 to i8*
323  %3 = getelementptr i8, i8* %2, i32 1
324  %4 = bitcast i8* %3 to [2 x <2 x i64>]*
325  %5 = getelementptr [2 x <2 x i64>], [2 x <2 x i64>]* %4, i32 0, i32 0
326
327  %6 = load volatile <2 x i64>, <2 x i64>* %5
328  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
329  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
330  store volatile <2 x i64> %6, <2 x i64>* %5
331  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 1
332  ; CHECK: st.d [[R1]], 0([[BASE]])
333
334  ret void
335  ; CHECK: .size loadstore_v2i64_unaligned
336}
337
338define void @loadstore_v2i64_just_under_simm10() nounwind {
339  ; CHECK: loadstore_v2i64_just_under_simm10:
340
341  %1 = alloca <2 x i64>
342  %2 = alloca [4076 x i8] ; Push the frame--acounting for the emergency spill
343                          ; slot--right up to 4096 bytes
344  %3 = load volatile <2 x i64>, <2 x i64>* %1
345  ; CHECK: ld.d [[R1:\$w[0-9]+]], 4080($sp)
346  store volatile <2 x i64> %3, <2 x i64>* %1
347  ; CHECK: st.d [[R1]], 4080($sp)
348
349  ret void
350  ; CHECK: .size loadstore_v2i64_just_under_simm10
351}
352
353define void @loadstore_v2i64_just_over_simm10() nounwind {
354  ; CHECK: loadstore_v2i64_just_over_simm10:
355
356  %1 = alloca <2 x i64>
357  %2 = alloca [4081 x i8] ; Push the frame--acounting for the emergency spill
358                          ; slot--just over 4096 bytes
359
360  %3 = load volatile <2 x i64>, <2 x i64>* %1
361  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
362  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
363  store volatile <2 x i64> %3, <2 x i64>* %1
364  ; CHECK: addiu [[BASE:\$([0-9]+|gp)]], $sp, 4096
365  ; CHECK: st.d [[R1]], 0([[BASE]])
366
367  ret void
368  ; CHECK: .size loadstore_v2i64_just_over_simm10
369}
370
371define void @loadstore_v2i64_just_under_simm16() nounwind {
372  ; CHECK: loadstore_v2i64_just_under_simm16:
373
374  %1 = alloca <2 x i64>
375  %2 = alloca [32752 x i8] ; Push the frame--acounting for the emergency spill
376                           ; slot--right up to 32768 bytes
377
378  %3 = load volatile <2 x i64>, <2 x i64>* %1
379  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
380  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
381  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
382  store volatile <2 x i64> %3, <2 x i64>* %1
383  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
384  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
385  ; CHECK: st.d [[R1]], 0([[BASE]])
386
387  ret void
388  ; CHECK: .size loadstore_v2i64_just_under_simm16
389}
390
391define void @loadstore_v2i64_just_over_simm16() nounwind {
392  ; CHECK: loadstore_v2i64_just_over_simm16:
393
394  %1 = alloca <2 x i64>
395  %2 = alloca [32753 x i8] ; Push the frame--acounting for the emergency spill
396                           ; slot--just over 32768 bytes
397
398  %3 = load volatile <2 x i64>, <2 x i64>* %1
399  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
400  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
401  ; CHECK: ld.d [[R1:\$w[0-9]+]], 0([[BASE]])
402  store volatile <2 x i64> %3, <2 x i64>* %1
403  ; CHECK: ori [[R2:\$([0-9]+|gp)]], $zero, 32768
404  ; CHECK: addu [[BASE:\$([0-9]+|gp)]], $sp, [[R2]]
405  ; CHECK: st.d [[R1]], 0([[BASE]])
406
407  ret void
408  ; CHECK: .size loadstore_v2i64_just_over_simm16
409}
410