• Home
  • Raw
  • Download

Lines Matching +full:4 +full:x

4 define <4 x half> @load_64(<4 x half>* nocapture readonly %a) #0 {
8 %0 = load <4 x half>, <4 x half>* %a, align 8
9 ret <4 x half> %0
13 define <8 x half> @load_128(<8 x half>* nocapture readonly %a) #0 {
17 %0 = load <8 x half>, <8 x half>* %a, align 16
18 ret <8 x half> %0
22 define <4 x half> @load_dup_64(half* nocapture readonly %a) #0 {
24 ; CHECK: ld1r { v0.4h }, [x0]
27 %1 = insertelement <4 x half> undef, half %0, i32 0
28 %2 = shufflevector <4 x half> %1, <4 x half> undef, <4 x i32> zeroinitializer
29 ret <4 x half> %2
33 define <8 x half> @load_dup_128(half* nocapture readonly %a) #0 {
38 %1 = insertelement <8 x half> undef, half %0, i32 0
39 %2 = shufflevector <8 x half> %1, <8 x half> undef, <8 x i32> zeroinitializer
40 ret <8 x half> %2
44 define <4 x half> @load_lane_64(half* nocapture readonly %a, <4 x half> %b) #0 {
49 %1 = insertelement <4 x half> %b, half %0, i32 2
50 ret <4 x half> %1
54 define <8 x half> @load_lane_128(half* nocapture readonly %a, <8 x half> %b) #0 {
59 %1 = insertelement <8 x half> %b, half %0, i32 5
60 ret <8 x half> %1
64 define void @store_64(<4 x half>* nocapture %a, <4 x half> %b) #1 {
68 store <4 x half> %b, <4 x half>* %a, align 8
73 define void @store_128(<8 x half>* nocapture %a, <8 x half> %b) #1 {
77 store <8 x half> %b, <8 x half>* %a, align 16
82 define void @store_lane_64(half* nocapture %a, <4 x half> %b) #1 {
86 %0 = extractelement <4 x half> %b, i32 2
92 define void @store_lane_128(half* nocapture %a, <8 x half> %b) #1 {
96 %0 = extractelement <8 x half> %b, i32 5
102 declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>*)
103 declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x half>*)
104 declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0v4f16(<4
105 declare void @llvm.aarch64.neon.st2.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>*)
106 declare void @llvm.aarch64.neon.st3.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>*)
107 declare void @llvm.aarch64.neon.st4.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, <
108 declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>*)
109 declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x half>*)
110 declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v8f16(<8
111 declare void @llvm.aarch64.neon.st2.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>*)
112 declare void @llvm.aarch64.neon.st3.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>*)
113 …eclare void @llvm.aarch64.neon.st4.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, <…
115 ; Load 2 x v4f16 with de-interleaving
116 define { <4 x half>, <4 x half> } @load_interleave_64_2(<4 x half>* %a) #0 {
118 ; CHECK: ld2 { v0.4h, v1.4h }, [x0]
120 %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2.v4f16.p0v4f16(<4 x half>* %a)
121 ret { <4 x half>, <4 x half> } %0
124 ; Load 3 x v4f16 with de-interleaving
125 define { <4 x half>, <4 x half>, <4 x half> } @load_interleave_64_3(<4 x half>* %a) #0 {
127 ; CHECK: ld3 { v0.4h, v1.4h, v2.4h }, [x0]
129 …%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3.v4f16.p0v4f16(<4 x ha…
130 ret { <4 x half>, <4 x half>, <4 x half> } %0
133 ; Load 4 x v4f16 with de-interleaving
134 define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_interleave_64_4(<4 x half>* %a) #0 {
136 ; CHECK: ld4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
138 …%0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4.v4f16.p0v…
139 ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
142 ; Store 2 x v4f16 with interleaving
143 define void @store_interleave_64_2(<4 x half>* %a, <4 x half> %b, <4 x half> %c) #0 {
145 ; CHECK: st2 { v0.4h, v1.4h }, [x0]
147 tail call void @llvm.aarch64.neon.st2.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half>* %a)
151 ; Store 3 x v4f16 with interleaving
152 define void @store_interleave_64_3(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
154 ; CHECK: st3 { v0.4h, v1.4h, v2.4h }, [x0]
156 …ail call void @llvm.aarch64.neon.st3.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4
160 ; Store 4 x v4f16 with interleaving
161 define void @store_interleave_64_4(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4
163 ; CHECK: st4 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
165 …l void @llvm.aarch64.neon.st4.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half…
169 ; Load 2 x v8f16 with de-interleaving
170 define { <8 x half>, <8 x half> } @load_interleave_128_2(<8 x half>* %a) #0 {
174 %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2.v8f16.p0v8f16(<8 x half>* %a)
175 ret { <8 x half>, <8 x half> } %0
178 ; Load 3 x v8f16 with de-interleaving
179 define { <8 x half>, <8 x half>, <8 x half> } @load_interleave_128_3(<8 x half>* %a) #0 {
183 …%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3.v8f16.p0v8f16(<8 x ha…
184 ret { <8 x half>, <8 x half>, <8 x half> } %0
187 ; Load 8 x v8f16 with de-interleaving
188 define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_interleave_128_4(<8 x half>* %a) #0…
192 …%0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4.v8f16.p0v…
193 ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
196 ; Store 2 x v8f16 with interleaving
197 define void @store_interleave_128_2(<8 x half>* %a, <8 x half> %b, <8 x half> %c) #0 {
201 tail call void @llvm.aarch64.neon.st2.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half>* %a)
205 ; Store 3 x v8f16 with interleaving
206 define void @store_interleave_128_3(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0…
210 …il call void @llvm.aarch64.neon.st3.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8
214 ; Store 8 x v8f16 with interleaving
215 define void @store_interleave_128_4(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8…
219 … void @llvm.aarch64.neon.st4.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half>…
224 declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0f16(half*)
225 declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0f16(half*)
226 declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0f16(half…
227 declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0f16(half*)
228 declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0f16(half*)
229 declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0f16(half…
231 ; Load 2 x v4f16 with duplication
232 define { <4 x half>, <4 x half> } @load_dup_64_2(half* %a) #0 {
234 ; CHECK: ld2r { v0.4h, v1.4h }, [x0]
236 %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2r.v4f16.p0f16(half* %a)
237 ret { <4 x half>, <4 x half> } %0
240 ; Load 3 x v4f16 with duplication
241 define { <4 x half>, <4 x half>, <4 x half> } @load_dup_64_3(half* %a) #0 {
243 ; CHECK: ld3r { v0.4h, v1.4h, v2.4h }, [x0]
245 …%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3r.v4f16.p0f16(half* %a)
246 ret { <4 x half>, <4 x half>, <4 x half> } %0
249 ; Load 4 x v4f16 with duplication
250 define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_dup_64_4(half* %a) #0 {
252 ; CHECK: ld4r { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
254 …%0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4r.v4f16.p0…
255 ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
258 ; Load 2 x v8f16 with duplication
259 define { <8 x half>, <8 x half> } @load_dup_128_2(half* %a) #0 {
263 %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2r.v8f16.p0f16(half* %a)
264 ret { <8 x half>, <8 x half> } %0
267 ; Load 3 x v8f16 with duplication
268 define { <8 x half>, <8 x half>, <8 x half> } @load_dup_128_3(half* %a) #0 {
272 …%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3r.v8f16.p0f16(half* %a)
273 ret { <8 x half>, <8 x half>, <8 x half> } %0
276 ; Load 8 x v8f16 with duplication
277 define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_dup_128_4(half* %a) #0 {
281 …%0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4r.v8f16.p0…
282 ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
287 declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0f16(<4 x half>, <4 x half>, i…
288 declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0f16(<4 x half>, <
289 declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0f16(<
290 declare void @llvm.aarch64.neon.st2lane.v4f16.p0f16(<4 x half>, <4 x half>, i64, half*)
291 declare void @llvm.aarch64.neon.st3lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, i64, half*)
292 declare void @llvm.aarch64.neon.st4lane.v4f16.p0f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>,…
293 declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0f16(<8 x half>, <8 x half>, i…
294 declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0f16(<8 x half>, <…
295 declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0f16(<…
296 declare void @llvm.aarch64.neon.st2lane.v8f16.p0f16(<8 x half>, <8 x half>, i64, half*)
297 declare void @llvm.aarch64.neon.st3lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, i64, half*)
298 declare void @llvm.aarch64.neon.st4lane.v8f16.p0f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>,…
300 ; Load one lane of 2 x v4f16
301 define { <4 x half>, <4 x half> } @load_lane_64_2(half* %a, <4 x half> %b, <4 x half> %c) #0 {
305 …%0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld2lane.v4f16.p0f16(<4 x half> %b, <4
306 ret { <4 x half>, <4 x half> } %0
309 ; Load one lane of 3 x v4f16
310 define { <4 x half>, <4 x half>, <4 x half> } @load_lane_64_3(half* %a, <4 x half> %b, <4 x half> %…
314 …%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld3lane.v4f16.p0f16(<4 x
315 ret { <4 x half>, <4 x half>, <4 x half> } %0
318 ; Load one lane of 4 x v4f16
319 define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_lane_64_4(half* %a, <4 x half> %b, …
323 …call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld4lane.v4f16.p0f16(<4
324 ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
327 ; Store one lane of 2 x v4f16
328 define void @store_lane_64_2(half* %a, <4 x half> %b, <4 x half> %c) #0 {
332 …tail call void @llvm.aarch64.neon.st2lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, i64 2, half* %…
336 ; Store one lane of 3 x v4f16
337 define void @store_lane_64_3(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
341 …tail call void @llvm.aarch64.neon.st3lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d,…
345 ; Store one lane of 4 x v4f16
346 define void @store_lane_64_4(half* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e) …
350 …l call void @llvm.aarch64.neon.st4lane.v4f16.p0f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4
354 ; Load one lane of 2 x v8f16
355 define { <8 x half>, <8 x half> } @load_lane_128_2(half* %a, <8 x half> %b, <8 x half> %c) #0 {
359 …%0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld2lane.v8f16.p0f16(<8 x half> %b, <8…
360 ret { <8 x half>, <8 x half> } %0
363 ; Load one lane of 3 x v8f16
364 define { <8 x half>, <8 x half>, <8 x half> } @load_lane_128_3(half* %a, <8 x half> %b, <8 x half> …
368 …%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld3lane.v8f16.p0f16(<8 x
369 ret { <8 x half>, <8 x half>, <8 x half> } %0
372 ; Load one lane of 8 x v8f16
373 define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_lane_128_4(half* %a, <8 x half> %b,…
377 …all { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld4lane.v8f16.p0f16(<8 x
378 ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
381 ; Store one lane of 2 x v8f16
382 define void @store_lane_128_2(half* %a, <8 x half> %b, <8 x half> %c) #0 {
386 …tail call void @llvm.aarch64.neon.st2lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, i64 2, half* %…
390 ; Store one lane of 3 x v8f16
391 define void @store_lane_128_3(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
395 …tail call void @llvm.aarch64.neon.st3lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d,…
399 ; Store one lane of 8 x v8f16
400 define void @store_lane_128_4(half* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e)…
404 … call void @llvm.aarch64.neon.st4lane.v8f16.p0f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8
409 declare { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0v4f16(<4 x half>*)
410 declare { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0v4f16(<4 x half>*)
411 declare { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p0v4f16(<
412 declare void @llvm.aarch64.neon.st1x2.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>*)
413 declare void @llvm.aarch64.neon.st1x3.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>*)
414 …clare void @llvm.aarch64.neon.st1x4.v4f16.p0v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x half>, …
415 declare { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0v8f16(<8 x half>*)
416 declare { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0v8f16(<8 x half>*)
417 declare { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p0v8f16(<…
418 declare void @llvm.aarch64.neon.st1x2.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>*)
419 declare void @llvm.aarch64.neon.st1x3.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>*)
420 …lare void @llvm.aarch64.neon.st1x4.v8f16.p0v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x half>, <…
422 ; Load 2 x v4f16 without de-interleaving
423 define { <4 x half>, <4 x half> } @load_64_2(<4 x half>* %a) #0 {
425 ; CHECK: ld1 { v0.4h, v1.4h }, [x0]
427 %0 = tail call { <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x2.v4f16.p0v4f16(<4 x half>* %a)
428 ret { <4 x half>, <4 x half> } %0
431 ; Load 3 x v4f16 without de-interleaving
432 define { <4 x half>, <4 x half>, <4 x half> } @load_64_3(<4 x half>* %a) #0 {
434 ; CHECK: ld1 { v0.4h, v1.4h, v2.4h }, [x0]
436 …%0 = tail call { <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x3.v4f16.p0v4f16(<4 x
437 ret { <4 x half>, <4 x half>, <4 x half> } %0
440 ; Load 4 x v4f16 without de-interleaving
441 define { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @load_64_4(<4 x half>* %a) #0 {
443 ; CHECK: ld1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
445 …%0 = tail call { <4 x half>, <4 x half>, <4 x half>, <4 x half> } @llvm.aarch64.neon.ld1x4.v4f16.p…
446 ret { <4 x half>, <4 x half>, <4 x half>, <4 x half> } %0
449 ; Store 2 x v4f16 without interleaving
450 define void @store_64_2(<4 x half>* %a, <4 x half> %b, <4 x half> %c) #0 {
452 ; CHECK: st1 { v0.4h, v1.4h }, [x0]
454 …tail call void @llvm.aarch64.neon.st1x2.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half>* %a)
458 ; Store 3 x v4f16 without interleaving
459 define void @store_64_3(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d) #0 {
461 ; CHECK: st1 { v0.4h, v1.4h, v2.4h }, [x0]
463 …l call void @llvm.aarch64.neon.st1x3.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4
467 ; Store 4 x v4f16 without interleaving
468 define void @store_64_4(<4 x half>* %a, <4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half> %e)…
470 ; CHECK: st1 { v0.4h, v1.4h, v2.4h, v3.4h }, [x0]
472 …void @llvm.aarch64.neon.st1x4.v4f16.p0v4f16(<4 x half> %b, <4 x half> %c, <4 x half> %d, <4 x half…
476 ; Load 2 x v8f16 without de-interleaving
477 define { <8 x half>, <8 x half> } @load_128_2(<8 x half>* %a) #0 {
481 %0 = tail call { <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x2.v8f16.p0v8f16(<8 x half>* %a)
482 ret { <8 x half>, <8 x half> } %0
485 ; Load 3 x v8f16 without de-interleaving
486 define { <8 x half>, <8 x half>, <8 x half> } @load_128_3(<8 x half>* %a) #0 {
490 …%0 = tail call { <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x3.v8f16.p0v8f16(<8 x
491 ret { <8 x half>, <8 x half>, <8 x half> } %0
494 ; Load 8 x v8f16 without de-interleaving
495 define { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @load_128_4(<8 x half>* %a) #0 {
499 …%0 = tail call { <8 x half>, <8 x half>, <8 x half>, <8 x half> } @llvm.aarch64.neon.ld1x4.v8f16.p…
500 ret { <8 x half>, <8 x half>, <8 x half>, <8 x half> } %0
503 ; Store 2 x v8f16 without interleaving
504 define void @store_128_2(<8 x half>* %a, <8 x half> %b, <8 x half> %c) #0 {
508 …tail call void @llvm.aarch64.neon.st1x2.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half>* %a)
512 ; Store 3 x v8f16 without interleaving
513 define void @store_128_3(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d) #0 {
517 … call void @llvm.aarch64.neon.st1x3.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8
521 ; Store 8 x v8f16 without interleaving
522 define void @store_128_4(<8 x half>* %a, <8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half> %e…
526 …oid @llvm.aarch64.neon.st1x4.v8f16.p0v8f16(<8 x half> %b, <8 x half> %c, <8 x half> %d, <8 x half>…