• Home
  • Raw
  • Download

Lines Matching full:align

6   %xS8x8 = alloca <8 x i8>, align 8
7 %__a = alloca <8 x i8>, align 8
8 %__b = alloca <8 x i8>, align 8
9 %tmp = load <8 x i8>, <8 x i8>* %xS8x8, align 8
10 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
11 %tmp1 = load <8 x i8>, <8 x i8>* %xS8x8, align 8
12 store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
13 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
14 %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
16 store <8 x i8> %vext, <8 x i8>* %xS8x8, align 8
23 %xU8x8 = alloca <8 x i8>, align 8
24 %__a = alloca <8 x i8>, align 8
25 %__b = alloca <8 x i8>, align 8
26 %tmp = load <8 x i8>, <8 x i8>* %xU8x8, align 8
27 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
28 %tmp1 = load <8 x i8>, <8 x i8>* %xU8x8, align 8
29 store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
30 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
31 %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
33 store <8 x i8> %vext, <8 x i8>* %xU8x8, align 8
40 %xP8x8 = alloca <8 x i8>, align 8
41 %__a = alloca <8 x i8>, align 8
42 %__b = alloca <8 x i8>, align 8
43 %tmp = load <8 x i8>, <8 x i8>* %xP8x8, align 8
44 store <8 x i8> %tmp, <8 x i8>* %__a, align 8
45 %tmp1 = load <8 x i8>, <8 x i8>* %xP8x8, align 8
46 store <8 x i8> %tmp1, <8 x i8>* %__b, align 8
47 %tmp2 = load <8 x i8>, <8 x i8>* %__a, align 8
48 %tmp3 = load <8 x i8>, <8 x i8>* %__b, align 8
50 store <8 x i8> %vext, <8 x i8>* %xP8x8, align 8
57 %xS16x4 = alloca <4 x i16>, align 8
58 %__a = alloca <4 x i16>, align 8
59 %__b = alloca <4 x i16>, align 8
60 %tmp = load <4 x i16>, <4 x i16>* %xS16x4, align 8
61 store <4 x i16> %tmp, <4 x i16>* %__a, align 8
62 %tmp1 = load <4 x i16>, <4 x i16>* %xS16x4, align 8
63 store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
64 %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
66 %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
71 store <4 x i16> %vext, <4 x i16>* %xS16x4, align 8
78 %xU16x4 = alloca <4 x i16>, align 8
79 %__a = alloca <4 x i16>, align 8
80 %__b = alloca <4 x i16>, align 8
81 %tmp = load <4 x i16>, <4 x i16>* %xU16x4, align 8
82 store <4 x i16> %tmp, <4 x i16>* %__a, align 8
83 %tmp1 = load <4 x i16>, <4 x i16>* %xU16x4, align 8
84 store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
85 %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
87 %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
92 store <4 x i16> %vext, <4 x i16>* %xU16x4, align 8
99 %xP16x4 = alloca <4 x i16>, align 8
100 %__a = alloca <4 x i16>, align 8
101 %__b = alloca <4 x i16>, align 8
102 %tmp = load <4 x i16>, <4 x i16>* %xP16x4, align 8
103 store <4 x i16> %tmp, <4 x i16>* %__a, align 8
104 %tmp1 = load <4 x i16>, <4 x i16>* %xP16x4, align 8
105 store <4 x i16> %tmp1, <4 x i16>* %__b, align 8
106 %tmp2 = load <4 x i16>, <4 x i16>* %__a, align 8
108 %tmp4 = load <4 x i16>, <4 x i16>* %__b, align 8
113 store <4 x i16> %vext, <4 x i16>* %xP16x4, align 8
120 %xS32x2 = alloca <2 x i32>, align 8
121 %__a = alloca <2 x i32>, align 8
122 %__b = alloca <2 x i32>, align 8
123 %tmp = load <2 x i32>, <2 x i32>* %xS32x2, align 8
124 store <2 x i32> %tmp, <2 x i32>* %__a, align 8
125 %tmp1 = load <2 x i32>, <2 x i32>* %xS32x2, align 8
126 store <2 x i32> %tmp1, <2 x i32>* %__b, align 8
127 %tmp2 = load <2 x i32>, <2 x i32>* %__a, align 8
129 %tmp4 = load <2 x i32>, <2 x i32>* %__b, align 8
134 store <2 x i32> %vext, <2 x i32>* %xS32x2, align 8
141 %xU32x2 = alloca <2 x i32>, align 8
142 %__a = alloca <2 x i32>, align 8
143 %__b = alloca <2 x i32>, align 8
144 %tmp = load <2 x i32>, <2 x i32>* %xU32x2, align 8
145 store <2 x i32> %tmp, <2 x i32>* %__a, align 8
146 %tmp1 = load <2 x i32>, <2 x i32>* %xU32x2, align 8
147 store <2 x i32> %tmp1, <2 x i32>* %__b, align 8
148 %tmp2 = load <2 x i32>, <2 x i32>* %__a, align 8
150 %tmp4 = load <2 x i32>, <2 x i32>* %__b, align 8
155 store <2 x i32> %vext, <2 x i32>* %xU32x2, align 8
162 %xF32x2 = alloca <2 x float>, align 8
163 %__a = alloca <2 x float>, align 8
164 %__b = alloca <2 x float>, align 8
165 %tmp = load <2 x float>, <2 x float>* %xF32x2, align 8
166 store <2 x float> %tmp, <2 x float>* %__a, align 8
167 %tmp1 = load <2 x float>, <2 x float>* %xF32x2, align 8
168 store <2 x float> %tmp1, <2 x float>* %__b, align 8
169 %tmp2 = load <2 x float>, <2 x float>* %__a, align 8
171 %tmp4 = load <2 x float>, <2 x float>* %__b, align 8
176 store <2 x float> %vext, <2 x float>* %xF32x2, align 8
184 %xS64x1 = alloca <1 x i64>, align 8
185 %__a = alloca <1 x i64>, align 8
186 %__b = alloca <1 x i64>, align 8
187 %tmp = load <1 x i64>, <1 x i64>* %xS64x1, align 8
188 store <1 x i64> %tmp, <1 x i64>* %__a, align 8
189 %tmp1 = load <1 x i64>, <1 x i64>* %xS64x1, align 8
190 store <1 x i64> %tmp1, <1 x i64>* %__b, align 8
191 %tmp2 = load <1 x i64>, <1 x i64>* %__a, align 8
193 %tmp4 = load <1 x i64>, <1 x i64>* %__b, align 8
198 store <1 x i64> %vext, <1 x i64>* %xS64x1, align 8
206 %xU64x1 = alloca <1 x i64>, align 8
207 %__a = alloca <1 x i64>, align 8
208 %__b = alloca <1 x i64>, align 8
209 %tmp = load <1 x i64>, <1 x i64>* %xU64x1, align 8
210 store <1 x i64> %tmp, <1 x i64>* %__a, align 8
211 %tmp1 = load <1 x i64>, <1 x i64>* %xU64x1, align 8
212 store <1 x i64> %tmp1, <1 x i64>* %__b, align 8
213 %tmp2 = load <1 x i64>, <1 x i64>* %__a, align 8
215 %tmp4 = load <1 x i64>, <1 x i64>* %__b, align 8
220 store <1 x i64> %vext, <1 x i64>* %xU64x1, align 8
227 %xS8x16 = alloca <16 x i8>, align 16
228 %__a = alloca <16 x i8>, align 16
229 %__b = alloca <16 x i8>, align 16
230 %tmp = load <16 x i8>, <16 x i8>* %xS8x16, align 16
231 store <16 x i8> %tmp, <16 x i8>* %__a, align 16
232 %tmp1 = load <16 x i8>, <16 x i8>* %xS8x16, align 16
233 store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
234 %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
235 %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
237 store <16 x i8> %vext, <16 x i8>* %xS8x16, align 16
244 %xU8x16 = alloca <16 x i8>, align 16
245 %__a = alloca <16 x i8>, align 16
246 %__b = alloca <16 x i8>, align 16
247 %tmp = load <16 x i8>, <16 x i8>* %xU8x16, align 16
248 store <16 x i8> %tmp, <16 x i8>* %__a, align 16
249 %tmp1 = load <16 x i8>, <16 x i8>* %xU8x16, align 16
250 store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
251 %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
252 %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
254 store <16 x i8> %vext, <16 x i8>* %xU8x16, align 16
261 %xP8x16 = alloca <16 x i8>, align 16
262 %__a = alloca <16 x i8>, align 16
263 %__b = alloca <16 x i8>, align 16
264 %tmp = load <16 x i8>, <16 x i8>* %xP8x16, align 16
265 store <16 x i8> %tmp, <16 x i8>* %__a, align 16
266 %tmp1 = load <16 x i8>, <16 x i8>* %xP8x16, align 16
267 store <16 x i8> %tmp1, <16 x i8>* %__b, align 16
268 %tmp2 = load <16 x i8>, <16 x i8>* %__a, align 16
269 %tmp3 = load <16 x i8>, <16 x i8>* %__b, align 16
271 store <16 x i8> %vext, <16 x i8>* %xP8x16, align 16
278 %xS16x8 = alloca <8 x i16>, align 16
279 %__a = alloca <8 x i16>, align 16
280 %__b = alloca <8 x i16>, align 16
281 %tmp = load <8 x i16>, <8 x i16>* %xS16x8, align 16
282 store <8 x i16> %tmp, <8 x i16>* %__a, align 16
283 %tmp1 = load <8 x i16>, <8 x i16>* %xS16x8, align 16
284 store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
285 %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
287 %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
292 store <8 x i16> %vext, <8 x i16>* %xS16x8, align 16
299 %xU16x8 = alloca <8 x i16>, align 16
300 %__a = alloca <8 x i16>, align 16
301 %__b = alloca <8 x i16>, align 16
302 %tmp = load <8 x i16>, <8 x i16>* %xU16x8, align 16
303 store <8 x i16> %tmp, <8 x i16>* %__a, align 16
304 %tmp1 = load <8 x i16>, <8 x i16>* %xU16x8, align 16
305 store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
306 %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
308 %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
313 store <8 x i16> %vext, <8 x i16>* %xU16x8, align 16
320 %xP16x8 = alloca <8 x i16>, align 16
321 %__a = alloca <8 x i16>, align 16
322 %__b = alloca <8 x i16>, align 16
323 %tmp = load <8 x i16>, <8 x i16>* %xP16x8, align 16
324 store <8 x i16> %tmp, <8 x i16>* %__a, align 16
325 %tmp1 = load <8 x i16>, <8 x i16>* %xP16x8, align 16
326 store <8 x i16> %tmp1, <8 x i16>* %__b, align 16
327 %tmp2 = load <8 x i16>, <8 x i16>* %__a, align 16
329 %tmp4 = load <8 x i16>, <8 x i16>* %__b, align 16
334 store <8 x i16> %vext, <8 x i16>* %xP16x8, align 16
341 %xS32x4 = alloca <4 x i32>, align 16
342 %__a = alloca <4 x i32>, align 16
343 %__b = alloca <4 x i32>, align 16
344 %tmp = load <4 x i32>, <4 x i32>* %xS32x4, align 16
345 store <4 x i32> %tmp, <4 x i32>* %__a, align 16
346 %tmp1 = load <4 x i32>, <4 x i32>* %xS32x4, align 16
347 store <4 x i32> %tmp1, <4 x i32>* %__b, align 16
348 %tmp2 = load <4 x i32>, <4 x i32>* %__a, align 16
350 %tmp4 = load <4 x i32>, <4 x i32>* %__b, align 16
355 store <4 x i32> %vext, <4 x i32>* %xS32x4, align 16
362 %xU32x4 = alloca <4 x i32>, align 16
363 %__a = alloca <4 x i32>, align 16
364 %__b = alloca <4 x i32>, align 16
365 %tmp = load <4 x i32>, <4 x i32>* %xU32x4, align 16
366 store <4 x i32> %tmp, <4 x i32>* %__a, align 16
367 %tmp1 = load <4 x i32>, <4 x i32>* %xU32x4, align 16
368 store <4 x i32> %tmp1, <4 x i32>* %__b, align 16
369 %tmp2 = load <4 x i32>, <4 x i32>* %__a, align 16
371 %tmp4 = load <4 x i32>, <4 x i32>* %__b, align 16
376 store <4 x i32> %vext, <4 x i32>* %xU32x4, align 16
383 %xF32x4 = alloca <4 x float>, align 16
384 %__a = alloca <4 x float>, align 16
385 %__b = alloca <4 x float>, align 16
386 %tmp = load <4 x float>, <4 x float>* %xF32x4, align 16
387 store <4 x float> %tmp, <4 x float>* %__a, align 16
388 %tmp1 = load <4 x float>, <4 x float>* %xF32x4, align 16
389 store <4 x float> %tmp1, <4 x float>* %__b, align 16
390 %tmp2 = load <4 x float>, <4 x float>* %__a, align 16
392 %tmp4 = load <4 x float>, <4 x float>* %__b, align 16
397 store <4 x float> %vext, <4 x float>* %xF32x4, align 16
404 %xS64x2 = alloca <2 x i64>, align 16
405 %__a = alloca <2 x i64>, align 16
406 %__b = alloca <2 x i64>, align 16
407 %tmp = load <2 x i64>, <2 x i64>* %xS64x2, align 16
408 store <2 x i64> %tmp, <2 x i64>* %__a, align 16
409 %tmp1 = load <2 x i64>, <2 x i64>* %xS64x2, align 16
410 store <2 x i64> %tmp1, <2 x i64>* %__b, align 16
411 %tmp2 = load <2 x i64>, <2 x i64>* %__a, align 16
413 %tmp4 = load <2 x i64>, <2 x i64>* %__b, align 16
418 store <2 x i64> %vext, <2 x i64>* %xS64x2, align 16
425 %xU64x2 = alloca <2 x i64>, align 16
426 %__a = alloca <2 x i64>, align 16
427 %__b = alloca <2 x i64>, align 16
428 %tmp = load <2 x i64>, <2 x i64>* %xU64x2, align 16
429 store <2 x i64> %tmp, <2 x i64>* %__a, align 16
430 %tmp1 = load <2 x i64>, <2 x i64>* %xU64x2, align 16
431 store <2 x i64> %tmp1, <2 x i64>* %__b, align 16
432 %tmp2 = load <2 x i64>, <2 x i64>* %__a, align 16
434 %tmp4 = load <2 x i64>, <2 x i64>* %__b, align 16
439 store <2 x i64> %vext, <2 x i64>* %xU64x2, align 16