• Home
  • Raw
  • Download

Lines Matching +full:- +full:x

1 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - | FileCheck %s
3 define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
4 ;CHECK-LABEL: vshls8:
6 %tmp1 = load <8 x i8>, <8 x i8>* %A
7 %tmp2 = load <8 x i8>, <8 x i8>* %B
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
9 ret <8 x i8> %tmp3
12 define <4 x i16> @vshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
13 ;CHECK-LABEL: vshls16:
15 %tmp1 = load <4 x i16>, <4 x i16>* %A
16 %tmp2 = load <4 x i16>, <4 x i16>* %B
17 %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
18 ret <4 x i16> %tmp3
21 define <2 x i32> @vshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
22 ;CHECK-LABEL: vshls32:
24 %tmp1 = load <2 x i32>, <2 x i32>* %A
25 %tmp2 = load <2 x i32>, <2 x i32>* %B
26 %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
27 ret <2 x i32> %tmp3
30 define <1 x i64> @vshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
31 ;CHECK-LABEL: vshls64:
33 %tmp1 = load <1 x i64>, <1 x i64>* %A
34 %tmp2 = load <1 x i64>, <1 x i64>* %B
35 %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
36 ret <1 x i64> %tmp3
39 define <8 x i8> @vshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
40 ;CHECK-LABEL: vshlu8:
42 %tmp1 = load <8 x i8>, <8 x i8>* %A
43 %tmp2 = load <8 x i8>, <8 x i8>* %B
44 %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
45 ret <8 x i8> %tmp3
48 define <4 x i16> @vshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
49 ;CHECK-LABEL: vshlu16:
51 %tmp1 = load <4 x i16>, <4 x i16>* %A
52 %tmp2 = load <4 x i16>, <4 x i16>* %B
53 %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
54 ret <4 x i16> %tmp3
57 define <2 x i32> @vshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
58 ;CHECK-LABEL: vshlu32:
60 %tmp1 = load <2 x i32>, <2 x i32>* %A
61 %tmp2 = load <2 x i32>, <2 x i32>* %B
62 %tmp3 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
63 ret <2 x i32> %tmp3
66 define <1 x i64> @vshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
67 ;CHECK-LABEL: vshlu64:
69 %tmp1 = load <1 x i64>, <1 x i64>* %A
70 %tmp2 = load <1 x i64>, <1 x i64>* %B
71 %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
72 ret <1 x i64> %tmp3
75 define <16 x i8> @vshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
76 ;CHECK-LABEL: vshlQs8:
78 %tmp1 = load <16 x i8>, <16 x i8>* %A
79 %tmp2 = load <16 x i8>, <16 x i8>* %B
80 %tmp3 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
81 ret <16 x i8> %tmp3
84 define <8 x i16> @vshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
85 ;CHECK-LABEL: vshlQs16:
87 %tmp1 = load <8 x i16>, <8 x i16>* %A
88 %tmp2 = load <8 x i16>, <8 x i16>* %B
89 %tmp3 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
90 ret <8 x i16> %tmp3
93 define <4 x i32> @vshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
94 ;CHECK-LABEL: vshlQs32:
96 %tmp1 = load <4 x i32>, <4 x i32>* %A
97 %tmp2 = load <4 x i32>, <4 x i32>* %B
98 %tmp3 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
99 ret <4 x i32> %tmp3
102 define <2 x i64> @vshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
103 ;CHECK-LABEL: vshlQs64:
105 %tmp1 = load <2 x i64>, <2 x i64>* %A
106 %tmp2 = load <2 x i64>, <2 x i64>* %B
107 %tmp3 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
108 ret <2 x i64> %tmp3
111 define <16 x i8> @vshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
112 ;CHECK-LABEL: vshlQu8:
114 %tmp1 = load <16 x i8>, <16 x i8>* %A
115 %tmp2 = load <16 x i8>, <16 x i8>* %B
116 %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
117 ret <16 x i8> %tmp3
120 define <8 x i16> @vshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
121 ;CHECK-LABEL: vshlQu16:
123 %tmp1 = load <8 x i16>, <8 x i16>* %A
124 %tmp2 = load <8 x i16>, <8 x i16>* %B
125 %tmp3 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
126 ret <8 x i16> %tmp3
129 define <4 x i32> @vshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
130 ;CHECK-LABEL: vshlQu32:
132 %tmp1 = load <4 x i32>, <4 x i32>* %A
133 %tmp2 = load <4 x i32>, <4 x i32>* %B
134 %tmp3 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
135 ret <4 x i32> %tmp3
138 define <2 x i64> @vshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
139 ;CHECK-LABEL: vshlQu64:
141 %tmp1 = load <2 x i64>, <2 x i64>* %A
142 %tmp2 = load <2 x i64>, <2 x i64>* %B
143 %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
144 ret <2 x i64> %tmp3
150 define <8 x i8> @vshli8(<8 x i8>* %A) nounwind {
151 ;CHECK-LABEL: vshli8:
153 %tmp1 = load <8 x i8>, <8 x i8>* %A
154 …%tmp2 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 …
155 ret <8 x i8> %tmp2
158 define <4 x i16> @vshli16(<4 x i16>* %A) nounwind {
159 ;CHECK-LABEL: vshli16:
161 %tmp1 = load <4 x i16>, <4 x i16>* %A
162 …%tmp2 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i…
163 ret <4 x i16> %tmp2
166 define <2 x i32> @vshli32(<2 x i32>* %A) nounwind {
167 ;CHECK-LABEL: vshli32:
169 %tmp1 = load <2 x i32>, <2 x i32>* %A
170 %tmp2 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
171 ret <2 x i32> %tmp2
174 define <1 x i64> @vshli64(<1 x i64>* %A) nounwind {
175 ;CHECK-LABEL: vshli64:
177 %tmp1 = load <1 x i64>, <1 x i64>* %A
178 %tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
179 ret <1 x i64> %tmp2
182 define <16 x i8> @vshlQi8(<16 x i8>* %A) nounwind {
183 ;CHECK-LABEL: vshlQi8:
185 %tmp1 = load <16 x i8>, <16 x i8>* %A
186 …%tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7,…
187 ret <16 x i8> %tmp2
190 define <8 x i16> @vshlQi16(<8 x i16>* %A) nounwind {
191 ;CHECK-LABEL: vshlQi16:
193 %tmp1 = load <8 x i16>, <8 x i16>* %A
194 …%tmp2 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i…
195 ret <8 x i16> %tmp2
198 define <4 x i32> @vshlQi32(<4 x i32>* %A) nounwind {
199 ;CHECK-LABEL: vshlQi32:
201 %tmp1 = load <4 x i32>, <4 x i32>* %A
202 …%tmp2 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i…
203 ret <4 x i32> %tmp2
206 define <2 x i64> @vshlQi64(<2 x i64>* %A) nounwind {
207 ;CHECK-LABEL: vshlQi64:
209 %tmp1 = load <2 x i64>, <2 x i64>* %A
210 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
211 ret <2 x i64> %tmp2
216 define <8 x i8> @vshrs8(<8 x i8>* %A) nounwind {
217 ;CHECK-LABEL: vshrs8:
219 %tmp1 = load <8 x i8>, <8 x i8>* %A
220 …%tmp2 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, …
221 ret <8 x i8> %tmp2
224 define <4 x i16> @vshrs16(<4 x i16>* %A) nounwind {
225 ;CHECK-LABEL: vshrs16:
227 %tmp1 = load <4 x i16>, <4 x i16>* %A
228 …%tmp2 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16,…
229 ret <4 x i16> %tmp2
232 define <2 x i32> @vshrs32(<2 x i32>* %A) nounwind {
233 ;CHECK-LABEL: vshrs32:
235 %tmp1 = load <2 x i32>, <2 x i32>* %A
236 …%tmp2 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 …
237 ret <2 x i32> %tmp2
240 define <1 x i64> @vshrs64(<1 x i64>* %A) nounwind {
241 ;CHECK-LABEL: vshrs64:
243 %tmp1 = load <1 x i64>, <1 x i64>* %A
244 %tmp2 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
245 ret <1 x i64> %tmp2
248 define <8 x i8> @vshru8(<8 x i8>* %A) nounwind {
249 ;CHECK-LABEL: vshru8:
251 %tmp1 = load <8 x i8>, <8 x i8>* %A
252 …%tmp2 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, …
253 ret <8 x i8> %tmp2
256 define <4 x i16> @vshru16(<4 x i16>* %A) nounwind {
257 ;CHECK-LABEL: vshru16:
259 %tmp1 = load <4 x i16>, <4 x i16>* %A
260 …%tmp2 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16,…
261 ret <4 x i16> %tmp2
264 define <2 x i32> @vshru32(<2 x i32>* %A) nounwind {
265 ;CHECK-LABEL: vshru32:
267 %tmp1 = load <2 x i32>, <2 x i32>* %A
268 …%tmp2 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 …
269 ret <2 x i32> %tmp2
272 define <1 x i64> @vshru64(<1 x i64>* %A) nounwind {
273 ;CHECK-LABEL: vshru64:
275 %tmp1 = load <1 x i64>, <1 x i64>* %A
276 %tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
277 ret <1 x i64> %tmp2
280 define <16 x i8> @vshrQs8(<16 x i8>* %A) nounwind {
281 ;CHECK-LABEL: vshrQs8:
283 %tmp1 = load <16 x i8>, <16 x i8>* %A
284x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8,…
285 ret <16 x i8> %tmp2
288 define <8 x i16> @vshrQs16(<8 x i16>* %A) nounwind {
289 ;CHECK-LABEL: vshrQs16:
291 %tmp1 = load <8 x i16>, <8 x i16>* %A
292 …= call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -
293 ret <8 x i16> %tmp2
296 define <4 x i32> @vshrQs32(<4 x i32>* %A) nounwind {
297 ;CHECK-LABEL: vshrQs32:
299 %tmp1 = load <4 x i32>, <4 x i32>* %A
300 …%tmp2 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32,…
301 ret <4 x i32> %tmp2
304 define <2 x i64> @vshrQs64(<2 x i64>* %A) nounwind {
305 ;CHECK-LABEL: vshrQs64:
307 %tmp1 = load <2 x i64>, <2 x i64>* %A
308 …%tmp2 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 …
309 ret <2 x i64> %tmp2
312 define <16 x i8> @vshrQu8(<16 x i8>* %A) nounwind {
313 ;CHECK-LABEL: vshrQu8:
315 %tmp1 = load <16 x i8>, <16 x i8>* %A
316x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8,…
317 ret <16 x i8> %tmp2
320 define <8 x i16> @vshrQu16(<8 x i16>* %A) nounwind {
321 ;CHECK-LABEL: vshrQu16:
323 %tmp1 = load <8 x i16>, <8 x i16>* %A
324 …= call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -
325 ret <8 x i16> %tmp2
328 define <4 x i32> @vshrQu32(<4 x i32>* %A) nounwind {
329 ;CHECK-LABEL: vshrQu32:
331 %tmp1 = load <4 x i32>, <4 x i32>* %A
332 …%tmp2 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32,…
333 ret <4 x i32> %tmp2
336 define <2 x i64> @vshrQu64(<2 x i64>* %A) nounwind {
337 ;CHECK-LABEL: vshrQu64:
339 %tmp1 = load <2 x i64>, <2 x i64>* %A
340 …%tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 …
341 ret <2 x i64> %tmp2
344 declare <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
345 declare <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
346 declare <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
347 declare <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
349 declare <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
350 declare <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
351 declare <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
352 declare <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
354 declare <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
355 declare <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
356 declare <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
357 declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
359 declare <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
360 declare <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
361 declare <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
362 declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
364 define <8 x i8> @vrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
365 ;CHECK-LABEL: vrshls8:
367 %tmp1 = load <8 x i8>, <8 x i8>* %A
368 %tmp2 = load <8 x i8>, <8 x i8>* %B
369 %tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
370 ret <8 x i8> %tmp3
373 define <4 x i16> @vrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
374 ;CHECK-LABEL: vrshls16:
376 %tmp1 = load <4 x i16>, <4 x i16>* %A
377 %tmp2 = load <4 x i16>, <4 x i16>* %B
378 %tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
379 ret <4 x i16> %tmp3
382 define <2 x i32> @vrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
383 ;CHECK-LABEL: vrshls32:
385 %tmp1 = load <2 x i32>, <2 x i32>* %A
386 %tmp2 = load <2 x i32>, <2 x i32>* %B
387 %tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
388 ret <2 x i32> %tmp3
391 define <1 x i64> @vrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
392 ;CHECK-LABEL: vrshls64:
394 %tmp1 = load <1 x i64>, <1 x i64>* %A
395 %tmp2 = load <1 x i64>, <1 x i64>* %B
396 %tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
397 ret <1 x i64> %tmp3
400 define <8 x i8> @vrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
401 ;CHECK-LABEL: vrshlu8:
403 %tmp1 = load <8 x i8>, <8 x i8>* %A
404 %tmp2 = load <8 x i8>, <8 x i8>* %B
405 %tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
406 ret <8 x i8> %tmp3
409 define <4 x i16> @vrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
410 ;CHECK-LABEL: vrshlu16:
412 %tmp1 = load <4 x i16>, <4 x i16>* %A
413 %tmp2 = load <4 x i16>, <4 x i16>* %B
414 %tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
415 ret <4 x i16> %tmp3
418 define <2 x i32> @vrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
419 ;CHECK-LABEL: vrshlu32:
421 %tmp1 = load <2 x i32>, <2 x i32>* %A
422 %tmp2 = load <2 x i32>, <2 x i32>* %B
423 %tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
424 ret <2 x i32> %tmp3
427 define <1 x i64> @vrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
428 ;CHECK-LABEL: vrshlu64:
430 %tmp1 = load <1 x i64>, <1 x i64>* %A
431 %tmp2 = load <1 x i64>, <1 x i64>* %B
432 %tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
433 ret <1 x i64> %tmp3
436 define <16 x i8> @vrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
437 ;CHECK-LABEL: vrshlQs8:
439 %tmp1 = load <16 x i8>, <16 x i8>* %A
440 %tmp2 = load <16 x i8>, <16 x i8>* %B
441 %tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
442 ret <16 x i8> %tmp3
445 define <8 x i16> @vrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
446 ;CHECK-LABEL: vrshlQs16:
448 %tmp1 = load <8 x i16>, <8 x i16>* %A
449 %tmp2 = load <8 x i16>, <8 x i16>* %B
450 %tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
451 ret <8 x i16> %tmp3
454 define <4 x i32> @vrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
455 ;CHECK-LABEL: vrshlQs32:
457 %tmp1 = load <4 x i32>, <4 x i32>* %A
458 %tmp2 = load <4 x i32>, <4 x i32>* %B
459 %tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
460 ret <4 x i32> %tmp3
463 define <2 x i64> @vrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
464 ;CHECK-LABEL: vrshlQs64:
466 %tmp1 = load <2 x i64>, <2 x i64>* %A
467 %tmp2 = load <2 x i64>, <2 x i64>* %B
468 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
469 ret <2 x i64> %tmp3
472 define <16 x i8> @vrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
473 ;CHECK-LABEL: vrshlQu8:
475 %tmp1 = load <16 x i8>, <16 x i8>* %A
476 %tmp2 = load <16 x i8>, <16 x i8>* %B
477 %tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
478 ret <16 x i8> %tmp3
481 define <8 x i16> @vrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
482 ;CHECK-LABEL: vrshlQu16:
484 %tmp1 = load <8 x i16>, <8 x i16>* %A
485 %tmp2 = load <8 x i16>, <8 x i16>* %B
486 %tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
487 ret <8 x i16> %tmp3
490 define <4 x i32> @vrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
491 ;CHECK-LABEL: vrshlQu32:
493 %tmp1 = load <4 x i32>, <4 x i32>* %A
494 %tmp2 = load <4 x i32>, <4 x i32>* %B
495 %tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
496 ret <4 x i32> %tmp3
499 define <2 x i64> @vrshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
500 ;CHECK-LABEL: vrshlQu64:
502 %tmp1 = load <2 x i64>, <2 x i64>* %A
503 %tmp2 = load <2 x i64>, <2 x i64>* %B
504 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
505 ret <2 x i64> %tmp3
508 define <8 x i8> @vrshrs8(<8 x i8>* %A) nounwind {
509 ;CHECK-LABEL: vrshrs8:
511 %tmp1 = load <8 x i8>, <8 x i8>* %A
512 …%tmp2 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8,…
513 ret <8 x i8> %tmp2
516 define <4 x i16> @vrshrs16(<4 x i16>* %A) nounwind {
517 ;CHECK-LABEL: vrshrs16:
519 %tmp1 = load <4 x i16>, <4 x i16>* %A
520 …%tmp2 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16…
521 ret <4 x i16> %tmp2
524 define <2 x i32> @vrshrs32(<2 x i32>* %A) nounwind {
525 ;CHECK-LABEL: vrshrs32:
527 %tmp1 = load <2 x i32>, <2 x i32>* %A
528 …%tmp2 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32…
529 ret <2 x i32> %tmp2
532 define <1 x i64> @vrshrs64(<1 x i64>* %A) nounwind {
533 ;CHECK-LABEL: vrshrs64:
535 %tmp1 = load <1 x i64>, <1 x i64>* %A
536 %tmp2 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
537 ret <1 x i64> %tmp2
540 define <8 x i8> @vrshru8(<8 x i8>* %A) nounwind {
541 ;CHECK-LABEL: vrshru8:
543 %tmp1 = load <8 x i8>, <8 x i8>* %A
544 …%tmp2 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8,…
545 ret <8 x i8> %tmp2
548 define <4 x i16> @vrshru16(<4 x i16>* %A) nounwind {
549 ;CHECK-LABEL: vrshru16:
551 %tmp1 = load <4 x i16>, <4 x i16>* %A
552 …%tmp2 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16…
553 ret <4 x i16> %tmp2
556 define <2 x i32> @vrshru32(<2 x i32>* %A) nounwind {
557 ;CHECK-LABEL: vrshru32:
559 %tmp1 = load <2 x i32>, <2 x i32>* %A
560 …%tmp2 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32…
561 ret <2 x i32> %tmp2
564 define <1 x i64> @vrshru64(<1 x i64>* %A) nounwind {
565 ;CHECK-LABEL: vrshru64:
567 %tmp1 = load <1 x i64>, <1 x i64>* %A
568 %tmp2 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >)
569 ret <1 x i64> %tmp2
572 define <16 x i8> @vrshrQs8(<16 x i8>* %A) nounwind {
573 ;CHECK-LABEL: vrshrQs8:
575 %tmp1 = load <16 x i8>, <16 x i8>* %A
576x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8…
577 ret <16 x i8> %tmp2
580 define <8 x i16> @vrshrQs16(<8 x i16>* %A) nounwind {
581 ;CHECK-LABEL: vrshrQs16:
583 %tmp1 = load <8 x i16>, <8 x i16>* %A
584 … call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -
585 ret <8 x i16> %tmp2
588 define <4 x i32> @vrshrQs32(<4 x i32>* %A) nounwind {
589 ;CHECK-LABEL: vrshrQs32:
591 %tmp1 = load <4 x i32>, <4 x i32>* %A
592 …%tmp2 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32…
593 ret <4 x i32> %tmp2
596 define <2 x i64> @vrshrQs64(<2 x i64>* %A) nounwind {
597 ;CHECK-LABEL: vrshrQs64:
599 %tmp1 = load <2 x i64>, <2 x i64>* %A
600 …%tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64…
601 ret <2 x i64> %tmp2
604 define <16 x i8> @vrshrQu8(<16 x i8>* %A) nounwind {
605 ;CHECK-LABEL: vrshrQu8:
607 %tmp1 = load <16 x i8>, <16 x i8>* %A
608x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8…
609 ret <16 x i8> %tmp2
612 define <8 x i16> @vrshrQu16(<8 x i16>* %A) nounwind {
613 ;CHECK-LABEL: vrshrQu16:
615 %tmp1 = load <8 x i16>, <8 x i16>* %A
616 … call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -
617 ret <8 x i16> %tmp2
620 define <4 x i32> @vrshrQu32(<4 x i32>* %A) nounwind {
621 ;CHECK-LABEL: vrshrQu32:
623 %tmp1 = load <4 x i32>, <4 x i32>* %A
624 …%tmp2 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32…
625 ret <4 x i32> %tmp2
628 define <2 x i64> @vrshrQu64(<2 x i64>* %A) nounwind {
629 ;CHECK-LABEL: vrshrQu64:
631 %tmp1 = load <2 x i64>, <2 x i64>* %A
632 …%tmp2 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64…
633 ret <2 x i64> %tmp2
636 declare <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
637 declare <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
638 declare <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
639 declare <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
641 declare <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
642 declare <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
643 declare <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
644 declare <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
646 declare <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
647 declare <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
648 declare <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
649 declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
651 declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
652 declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
653 declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
654 declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone