Lines Matching refs:x
4 declare i8* @rsOffset([1 x i32] %a.coerce, i32 %sizeOf, i32 %x, i32 %y, i32 %z)
5 declare i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z)
36 define void @rsSetElementAtImpl_char([1 x i32] %a.coerce, i8 signext %val, i32 %x, i32 %y, i32 %z) …
37 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 1, i32 %x, i32 %y, i32 %z) #2
42 define signext i8 @rsGetElementAtImpl_char([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
43 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 1, i32 %x, i32 %y, i32 %z) #2
49 define void @rsSetElementAtImpl_char2([1 x i32] %a.coerce, <2 x i8> %val, i32 %x, i32 %y, i32 %z) #…
50 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
51 %2 = bitcast i8* %1 to <2 x i8>*
52 store <2 x i8> %val, <2 x i8>* %2, align 2, !tbaa !22
56 define <2 x i8> @rsGetElementAtImpl_char2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
57 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
58 %2 = bitcast i8* %1 to <2 x i8>*
59 %3 = load <2 x i8>, <2 x i8>* %2, align 2, !tbaa !22
60 ret <2 x i8> %3
64 define void @rsSetElementAtImpl_char3([1 x i32] %a.coerce, <3 x i8> %val, i32 %x, i32 %y, i32 %z) #…
65 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
66 %2 = shufflevector <3 x i8> %val, <3 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
67 %3 = bitcast i8* %1 to <4 x i8>*
68 store <4 x i8> %2, <4 x i8>* %3, align 4, !tbaa !23
72 define <3 x i8> @rsGetElementAtImpl_char3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
73 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
74 %2 = bitcast i8* %1 to <4 x i8>*
75 %3 = load <4 x i8>, <4 x i8>* %2, align 4, !tbaa !23
76 %4 = shufflevector <4 x i8> %3, <4 x i8> undef, <3 x i32> <i32 0, i32 1, i32 2>
77 ret <3 x i8> %4
81 define void @rsSetElementAtImpl_char4([1 x i32] %a.coerce, <4 x i8> %val, i32 %x, i32 %y, i32 %z) #…
82 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
83 %2 = bitcast i8* %1 to <4 x i8>*
84 store <4 x i8> %val, <4 x i8>* %2, align 4, !tbaa !24
88 define <4 x i8> @rsGetElementAtImpl_char4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
89 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
90 %2 = bitcast i8* %1 to <4 x i8>*
91 %3 = load <4 x i8>, <4 x i8>* %2, align 4, !tbaa !24
92 ret <4 x i8> %3
96 define void @rsSetElementAtImpl_uchar([1 x i32] %a.coerce, i8 zeroext %val, i32 %x, i32 %y, i32 %z)…
97 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 1, i32 %x, i32 %y, i32 %z) #2
102 define zeroext i8 @rsGetElementAtImpl_uchar([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
103 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 1, i32 %x, i32 %y, i32 %z) #2
109 define void @rsSetElementAtImpl_uchar2([1 x i32] %a.coerce, <2 x i8> %val, i32 %x, i32 %y, i32 %z) …
110 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
111 %2 = bitcast i8* %1 to <2 x i8>*
112 store <2 x i8> %val, <2 x i8>* %2, align 2, !tbaa !26
116 define <2 x i8> @rsGetElementAtImpl_uchar2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
117 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
118 %2 = bitcast i8* %1 to <2 x i8>*
119 %3 = load <2 x i8>, <2 x i8>* %2, align 2, !tbaa !26
120 ret <2 x i8> %3
124 define void @rsSetElementAtImpl_uchar3([1 x i32] %a.coerce, <3 x i8> %val, i32 %x, i32 %y, i32 %z) …
125 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
126 %2 = shufflevector <3 x i8> %val, <3 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
127 %3 = bitcast i8* %1 to <4 x i8>*
128 store <4 x i8> %2, <4 x i8>* %3, align 4, !tbaa !27
132 define <3 x i8> @rsGetElementAtImpl_uchar3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
133 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
134 %2 = bitcast i8* %1 to <4 x i8>*
135 %3 = load <4 x i8>, <4 x i8>* %2, align 4, !tbaa !27
136 %4 = shufflevector <4 x i8> %3, <4 x i8> undef, <3 x i32> <i32 0, i32 1, i32 2>
137 ret <3 x i8> %4
141 define void @rsSetElementAtImpl_uchar4([1 x i32] %a.coerce, <4 x i8> %val, i32 %x, i32 %y, i32 %z) …
142 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
143 %2 = bitcast i8* %1 to <4 x i8>*
144 store <4 x i8> %val, <4 x i8>* %2, align 4, !tbaa !28
148 define <4 x i8> @rsGetElementAtImpl_uchar4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
149 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
150 %2 = bitcast i8* %1 to <4 x i8>*
151 %3 = load <4 x i8>, <4 x i8>* %2, align 4, !tbaa !28
152 ret <4 x i8> %3
156 define void @rsSetElementAtImpl_short([1 x i32] %a.coerce, i16 signext %val, i32 %x, i32 %y, i32 %z…
157 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
163 define signext i16 @rsGetElementAtImpl_short([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
164 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
171 define void @rsSetElementAtImpl_short2([1 x i32] %a.coerce, <2 x i16> %val, i32 %x, i32 %y, i32 %z)…
172 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
173 %2 = bitcast i8* %1 to <2 x i16>*
174 store <2 x i16> %val, <2 x i16>* %2, align 4, !tbaa !30
178 define <2 x i16> @rsGetElementAtImpl_short2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
179 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
180 %2 = bitcast i8* %1 to <2 x i16>*
181 %3 = load <2 x i16>, <2 x i16>* %2, align 4, !tbaa !30
182 ret <2 x i16> %3
186 define void @rsSetElementAtImpl_short3([1 x i32] %a.coerce, <3 x i16> %val, i32 %x, i32 %y, i32 %z)…
187 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
188 %2 = shufflevector <3 x i16> %val, <3 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
189 %3 = bitcast i8* %1 to <4 x i16>*
190 store <4 x i16> %2, <4 x i16>* %3, align 8, !tbaa !31
194 define <3 x i16> @rsGetElementAtImpl_short3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
195 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
196 %2 = bitcast i8* %1 to <4 x i16>*
197 %3 = load <4 x i16>, <4 x i16>* %2, align 8, !tbaa !31
198 %4 = shufflevector <4 x i16> %3, <4 x i16> undef, <3 x i32> <i32 0, i32 1, i32 2>
199 ret <3 x i16> %4
203 define void @rsSetElementAtImpl_short4([1 x i32] %a.coerce, <4 x i16> %val, i32 %x, i32 %y, i32 %z)…
204 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
205 %2 = bitcast i8* %1 to <4 x i16>*
206 store <4 x i16> %val, <4 x i16>* %2, align 8, !tbaa !32
210 define <4 x i16> @rsGetElementAtImpl_short4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
211 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
212 %2 = bitcast i8* %1 to <4 x i16>*
213 %3 = load <4 x i16>, <4 x i16>* %2, align 8, !tbaa !32
214 ret <4 x i16> %3
218 define void @rsSetElementAtImpl_ushort([1 x i32] %a.coerce, i16 zeroext %val, i32 %x, i32 %y, i32 %…
219 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
225 define zeroext i16 @rsGetElementAtImpl_ushort([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
226 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
233 define void @rsSetElementAtImpl_ushort2([1 x i32] %a.coerce, <2 x i16> %val, i32 %x, i32 %y, i32 %z…
234 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
235 %2 = bitcast i8* %1 to <2 x i16>*
236 store <2 x i16> %val, <2 x i16>* %2, align 4, !tbaa !34
240 define <2 x i16> @rsGetElementAtImpl_ushort2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
241 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
242 %2 = bitcast i8* %1 to <2 x i16>*
243 %3 = load <2 x i16>, <2 x i16>* %2, align 4, !tbaa !34
244 ret <2 x i16> %3
248 define void @rsSetElementAtImpl_ushort3([1 x i32] %a.coerce, <3 x i16> %val, i32 %x, i32 %y, i32 %z…
249 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
250 %2 = shufflevector <3 x i16> %val, <3 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
251 %3 = bitcast i8* %1 to <4 x i16>*
252 store <4 x i16> %2, <4 x i16>* %3, align 8, !tbaa !35
256 define <3 x i16> @rsGetElementAtImpl_ushort3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
257 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
258 %2 = bitcast i8* %1 to <4 x i16>*
259 %3 = load <4 x i16>, <4 x i16>* %2, align 8, !tbaa !35
260 %4 = shufflevector <4 x i16> %3, <4 x i16> undef, <3 x i32> <i32 0, i32 1, i32 2>
261 ret <3 x i16> %4
265 define void @rsSetElementAtImpl_ushort4([1 x i32] %a.coerce, <4 x i16> %val, i32 %x, i32 %y, i32 %z…
266 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
267 %2 = bitcast i8* %1 to <4 x i16>*
268 store <4 x i16> %val, <4 x i16>* %2, align 8, !tbaa !36
272 define <4 x i16> @rsGetElementAtImpl_ushort4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
273 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
274 %2 = bitcast i8* %1 to <4 x i16>*
275 %3 = load <4 x i16>, <4 x i16>* %2, align 8, !tbaa !36
276 ret <4 x i16> %3
280 define void @rsSetElementAtImpl_int([1 x i32] %a.coerce, i32 %val, i32 %x, i32 %y, i32 %z) #1 {
281 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
287 define i32 @rsGetElementAtImpl_int([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
288 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
295 define void @rsSetElementAtImpl_int2([1 x i32] %a.coerce, <2 x i32> %val, i32 %x, i32 %y, i32 %z) #…
296 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
297 %2 = bitcast i8* %1 to <2 x i32>*
298 store <2 x i32> %val, <2 x i32>* %2, align 8, !tbaa !38
302 define <2 x i32> @rsGetElementAtImpl_int2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
303 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
304 %2 = bitcast i8* %1 to <2 x i32>*
305 %3 = load <2 x i32>, <2 x i32>* %2, align 8, !tbaa !38
306 ret <2 x i32> %3
310 define void @rsSetElementAtImpl_int3([1 x i32] %a.coerce, <3 x i32> %val, i32 %x, i32 %y, i32 %z) #…
311 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
312 %2 = shufflevector <3 x i32> %val, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
313 %3 = bitcast i8* %1 to <4 x i32>*
314 store <4 x i32> %2, <4 x i32>* %3, align 16, !tbaa !39
318 define <3 x i32> @rsGetElementAtImpl_int3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
319 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
320 %2 = bitcast i8* %1 to <4 x i32>*
321 %3 = load <4 x i32>, <4 x i32>* %2, align 8, !tbaa !39
322 %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
323 ret <3 x i32> %4
327 define void @rsSetElementAtImpl_int4([1 x i32] %a.coerce, <4 x i32> %val, i32 %x, i32 %y, i32 %z) #…
328 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
329 %2 = bitcast i8* %1 to <4 x i32>*
330 store <4 x i32> %val, <4 x i32>* %2, align 16, !tbaa !40
334 define <4 x i32> @rsGetElementAtImpl_int4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
335 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
336 %2 = bitcast i8* %1 to <4 x i32>*
337 %3 = load <4 x i32>, <4 x i32>* %2, align 16, !tbaa !40
338 ret <4 x i32> %3
342 define void @rsSetElementAtImpl_uint([1 x i32] %a.coerce, i32 %val, i32 %x, i32 %y, i32 %z) #1 {
343 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
349 define i32 @rsGetElementAtImpl_uint([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
350 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
357 define void @rsSetElementAtImpl_uint2([1 x i32] %a.coerce, <2 x i32> %val, i32 %x, i32 %y, i32 %z) …
358 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
359 %2 = bitcast i8* %1 to <2 x i32>*
360 store <2 x i32> %val, <2 x i32>* %2, align 8, !tbaa !42
364 define <2 x i32> @rsGetElementAtImpl_uint2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
365 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
366 %2 = bitcast i8* %1 to <2 x i32>*
367 %3 = load <2 x i32>, <2 x i32>* %2, align 8, !tbaa !42
368 ret <2 x i32> %3
372 define void @rsSetElementAtImpl_uint3([1 x i32] %a.coerce, <3 x i32> %val, i32 %x, i32 %y, i32 %z) …
373 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
374 %2 = shufflevector <3 x i32> %val, <3 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
375 %3 = bitcast i8* %1 to <4 x i32>*
376 store <4 x i32> %2, <4 x i32>* %3, align 16, !tbaa !43
380 define <3 x i32> @rsGetElementAtImpl_uint3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
381 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
382 %2 = bitcast i8* %1 to <4 x i32>*
383 %3 = load <4 x i32>, <4 x i32>* %2, align 8, !tbaa !43
384 %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
385 ret <3 x i32> %4
389 define void @rsSetElementAtImpl_uint4([1 x i32] %a.coerce, <4 x i32> %val, i32 %x, i32 %y, i32 %z) …
390 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
391 %2 = bitcast i8* %1 to <4 x i32>*
392 store <4 x i32> %val, <4 x i32>* %2, align 16, !tbaa !44
396 define <4 x i32> @rsGetElementAtImpl_uint4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
397 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
398 %2 = bitcast i8* %1 to <4 x i32>*
399 %3 = load <4 x i32>, <4 x i32>* %2, align 16, !tbaa !44
400 ret <4 x i32> %3
404 define void @rsSetElementAtImpl_long([1 x i32] %a.coerce, i64 %val, i32 %x, i32 %y, i32 %z) #1 {
405 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
411 define i64 @rsGetElementAtImpl_long([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
412 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
419 define void @rsSetElementAtImpl_long2([1 x i32] %a.coerce, <2 x i64> %val, i32 %x, i32 %y, i32 %z) …
420 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
421 %2 = bitcast i8* %1 to <2 x i64>*
422 store <2 x i64> %val, <2 x i64>* %2, align 16, !tbaa !46
426 define <2 x i64> @rsGetElementAtImpl_long2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
427 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
428 %2 = bitcast i8* %1 to <2 x i64>*
429 %3 = load <2 x i64>, <2 x i64>* %2, align 16, !tbaa !46
430 ret <2 x i64> %3
434 define void @rsSetElementAtImpl_long3([1 x i32] %a.coerce, <3 x i64> %val, i32 %x, i32 %y, i32 %z) …
435 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
436 %2 = shufflevector <3 x i64> %val, <3 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
437 %3 = bitcast i8* %1 to <4 x i64>*
438 store <4 x i64> %2, <4 x i64>* %3, align 32, !tbaa !47
442 define void @rsGetElementAtImpl_long3(<3 x i64>* noalias nocapture sret %agg.result, [1 x i32] %a.c…
443 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
444 %2 = bitcast i8* %1 to <4 x i64>*
445 %3 = load <4 x i64>, <4 x i64>* %2, align 32
446 %4 = bitcast <3 x i64>* %agg.result to <4 x i64>*
447 store <4 x i64> %3, <4 x i64>* %4, align 32, !tbaa !47
452 define void @rsSetElementAtImpl_long4([1 x i32] %a.coerce, <4 x i64> %val, i32 %x, i32 %y, i32 %z) …
453 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
454 %2 = bitcast i8* %1 to <4 x i64>*
455 store <4 x i64> %val, <4 x i64>* %2, align 32, !tbaa !48
459 define void @rsGetElementAtImpl_long4(<4 x i64>* noalias nocapture sret %agg.result, [1 x i32] %a.c…
460 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
461 %2 = bitcast i8* %1 to <4 x i64>*
462 %3 = load <4 x i64>, <4 x i64>* %2, align 32, !tbaa !15
463 store <4 x i64> %3, <4 x i64>* %agg.result, align 32, !tbaa !48
468 define void @rsSetElementAtImpl_ulong([1 x i32] %a.coerce, i64 %val, i32 %x, i32 %y, i32 %z) #1 {
469 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
475 define i64 @rsGetElementAtImpl_ulong([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
476 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
483 define void @rsSetElementAtImpl_ulong2([1 x i32] %a.coerce, <2 x i64> %val, i32 %x, i32 %y, i32 %z)…
484 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
485 %2 = bitcast i8* %1 to <2 x i64>*
486 store <2 x i64> %val, <2 x i64>* %2, align 16, !tbaa !50
490 define <2 x i64> @rsGetElementAtImpl_ulong2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
491 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
492 %2 = bitcast i8* %1 to <2 x i64>*
493 %3 = load <2 x i64>, <2 x i64>* %2, align 16, !tbaa !50
494 ret <2 x i64> %3
498 define void @rsSetElementAtImpl_ulong3([1 x i32] %a.coerce, <3 x i64> %val, i32 %x, i32 %y, i32 %z)…
499 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
500 %2 = shufflevector <3 x i64> %val, <3 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
501 %3 = bitcast i8* %1 to <4 x i64>*
502 store <4 x i64> %2, <4 x i64>* %3, align 32, !tbaa !51
506 …efine void @rsGetElementAtImpl_ulong3(<3 x i64>* noalias nocapture sret %agg.result, [1 x i32] %a.…
507 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
508 %2 = bitcast i8* %1 to <4 x i64>*
509 %3 = load <4 x i64>, <4 x i64>* %2, align 32
510 %4 = bitcast <3 x i64>* %agg.result to <4 x i64>*
511 store <4 x i64> %3, <4 x i64>* %4, align 32, !tbaa !51
516 define void @rsSetElementAtImpl_ulong4([1 x i32] %a.coerce, <4 x i64> %val, i32 %x, i32 %y, i32 %z)…
517 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
518 %2 = bitcast i8* %1 to <4 x i64>*
519 store <4 x i64> %val, <4 x i64>* %2, align 32, !tbaa !52
523 …efine void @rsGetElementAtImpl_ulong4(<4 x i64>* noalias nocapture sret %agg.result, [1 x i32] %a.…
524 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
525 %2 = bitcast i8* %1 to <4 x i64>*
526 %3 = load <4 x i64>, <4 x i64>* %2, align 32, !tbaa !15
527 store <4 x i64> %3, <4 x i64>* %agg.result, align 32, !tbaa !52
532 define void @rsSetElementAtImpl_float([1 x i32] %a.coerce, float %val, i32 %x, i32 %y, i32 %z) #1 {
533 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
539 define float @rsGetElementAtImpl_float([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
540 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
547 define void @rsSetElementAtImpl_float2([1 x i32] %a.coerce, <2 x float> %val, i32 %x, i32 %y, i32 %…
548 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
549 %2 = bitcast i8* %1 to <2 x float>*
550 store <2 x float> %val, <2 x float>* %2, align 8, !tbaa !54
554 define <2 x float> @rsGetElementAtImpl_float2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
555 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
556 %2 = bitcast i8* %1 to <2 x float>*
557 %3 = load <2 x float>, <2 x float>* %2, align 8, !tbaa !54
558 ret <2 x float> %3
562 define void @rsSetElementAtImpl_float3([1 x i32] %a.coerce, <3 x float> %val, i32 %x, i32 %y, i32 %…
563 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
564 %2 = shufflevector <3 x float> %val, <3 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
565 %3 = bitcast i8* %1 to <4 x float>*
566 store <4 x float> %2, <4 x float>* %3, align 16, !tbaa !55
570 define <3 x float> @rsGetElementAtImpl_float3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
571 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
572 %2 = bitcast i8* %1 to <4 x float>*
573 %3 = load <4 x float>, <4 x float>* %2, align 8, !tbaa !55
574 %4 = shufflevector <4 x float> %3, <4 x float> undef, <3 x i32> <i32 0, i32 1, i32 2>
575 ret <3 x float> %4
579 define void @rsSetElementAtImpl_float4([1 x i32] %a.coerce, <4 x float> %val, i32 %x, i32 %y, i32 %…
580 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
581 %2 = bitcast i8* %1 to <4 x float>*
582 store <4 x float> %val, <4 x float>* %2, align 16, !tbaa !56
586 define <4 x float> @rsGetElementAtImpl_float4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
587 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
588 %2 = bitcast i8* %1 to <4 x float>*
589 %3 = load <4 x float>, <4 x float>* %2, align 16, !tbaa !56
590 ret <4 x float> %3
594 define void @rsSetElementAtImpl_double([1 x i32] %a.coerce, double %val, i32 %x, i32 %y, i32 %z) #1…
595 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
601 define double @rsGetElementAtImpl_double([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
602 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
609 define void @rsSetElementAtImpl_double2([1 x i32] %a.coerce, <2 x double> %val, i32 %x, i32 %y, i32…
610 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
611 %2 = bitcast i8* %1 to <2 x double>*
612 store <2 x double> %val, <2 x double>* %2, align 16, !tbaa !58
616 define <2 x double> @rsGetElementAtImpl_double2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
617 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 16, i32 %x, i32 %y, i32 %z) #2
618 %2 = bitcast i8* %1 to <2 x double>*
619 %3 = load <2 x double>, <2 x double>* %2, align 16, !tbaa !58
620 ret <2 x double> %3
624 define void @rsSetElementAtImpl_double3([1 x i32] %a.coerce, <3 x double> %val, i32 %x, i32 %y, i32…
625 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
626 …%2 = shufflevector <3 x double> %val, <3 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 unde…
627 %3 = bitcast i8* %1 to <4 x double>*
628 store <4 x double> %2, <4 x double>* %3, align 32, !tbaa !59
633 …ne void @rsGetElementAtImpl_double3(<3 x double>* noalias nocapture sret %agg.result, [1 x i32] %a…
634 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
635 %2 = bitcast i8* %1 to <4 x double>*
636 %3 = load <4 x double>, <4 x double>* %2, align 32
637 %4 = bitcast <3 x double>* %agg.result to <4 x double>*
638 store <4 x double> %3, <4 x double>* %4, align 32, !tbaa !59
643 define void @rsSetElementAtImpl_double4([1 x i32] %a.coerce, <4 x double> %val, i32 %x, i32 %y, i32…
644 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
645 %2 = bitcast i8* %1 to <4 x double>*
646 store <4 x double> %val, <4 x double>* %2, align 32, !tbaa !60
649 …ne void @rsGetElementAtImpl_double4(<4 x double>* noalias nocapture sret %agg.result, [1 x i32] %a…
650 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 32, i32 %x, i32 %y, i32 %z) #2
651 %2 = bitcast i8* %1 to <4 x double>*
652 %3 = load <4 x double>, <4 x double>* %2, align 32, !tbaa !15
653 store <4 x double> %3, <4 x double>* %agg.result, align 32, !tbaa !60
658 define void @rsSetElementAtImpl_half([1 x i32] %a.coerce, half %val, i32 %x, i32 %y, i32 %z) #1 {
659 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
665 define half @rsGetElementAtImpl_half([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
666 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 2, i32 %x, i32 %y, i32 %z) #2
673 define void @rsSetElementAtImpl_half2([1 x i32] %a.coerce, <2 x half> %val, i32 %x, i32 %y, i32 %z)…
674 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
675 %2 = bitcast i8* %1 to <2 x half>*
676 store <2 x half> %val, <2 x half>* %2, align 4, !tbaa !62
680 define <2 x half> @rsGetElementAtImpl_half2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
681 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 4, i32 %x, i32 %y, i32 %z) #2
682 %2 = bitcast i8* %1 to <2 x half>*
683 %3 = load <2 x half>, <2 x half>* %2, align 4, !tbaa !62
684 ret <2 x half> %3
688 define void @rsSetElementAtImpl_half3([1 x i32] %a.coerce, <3 x half> %val, i32 %x, i32 %y, i32 %z)…
689 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
690 %2 = shufflevector <3 x half> %val, <3 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
691 %3 = bitcast i8* %1 to <4 x half>*
692 store <4 x half> %2, <4 x half>* %3, align 8, !tbaa !63
696 define <3 x half> @rsGetElementAtImpl_half3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #1 {
697 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
698 %2 = bitcast i8* %1 to <4 x half>*
699 %3 = load <4 x half>, <4 x half>* %2, align 8, !tbaa !63
700 %4 = shufflevector <4 x half> %3, <4 x half> undef, <3 x i32> <i32 0, i32 1, i32 2>
701 ret <3 x half> %4
705 define void @rsSetElementAtImpl_half4([1 x i32] %a.coerce, <4 x half> %val, i32 %x, i32 %y, i32 %z)…
706 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
707 %2 = bitcast i8* %1 to <4 x half>*
708 store <4 x half> %val, <4 x half>* %2, align 8, !tbaa !64
712 define <4 x half> @rsGetElementAtImpl_half4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
713 %1 = tail call i8* @rsOffset([1 x i32] %a.coerce, i32 8, i32 %x, i32 %y, i32 %z) #2
714 %2 = bitcast i8* %1 to <4 x half>*
715 %3 = load <4 x half>, <4 x half>* %2, align 8, !tbaa !64
716 ret <4 x half> %3
719 … void @__rsAllocationVLoadXImpl_long4(<4 x i64>* noalias nocapture sret %agg.result, [1 x i32] %a.…
720 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
721 %2 = bitcast i8* %1 to <4 x i64>*
722 %3 = load <4 x i64>, <4 x i64>* %2, align 8
723 store <4 x i64> %3, <4 x i64>* %agg.result, align 32, !tbaa !52
726 … void @__rsAllocationVLoadXImpl_long3(<3 x i64>* noalias nocapture sret %agg.result, [1 x i32] %a.…
727 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
728 %2 = bitcast i8* %1 to <4 x i64>*
729 %3 = load <4 x i64>, <4 x i64>* %2, align 8
730 %4 = bitcast <3 x i64>* %agg.result to <4 x i64>*
731 store <4 x i64> %3, <4 x i64>* %4, align 32, !tbaa !47
734 define <2 x i64> @__rsAllocationVLoadXImpl_long2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
735 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
736 %2 = bitcast i8* %1 to <2 x i64>*
737 %3 = load <2 x i64>, <2 x i64>* %2, align 8
738 ret <2 x i64> %3
741 …void @__rsAllocationVLoadXImpl_ulong4(<4 x i64>* noalias nocapture sret %agg.result, [1 x i32] %a.…
742 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
743 %2 = bitcast i8* %1 to <4 x i64>*
744 %3 = load <4 x i64>, <4 x i64>* %2, align 8
745 store <4 x i64> %3, <4 x i64>* %agg.result, align 32, !tbaa !48
748 …void @__rsAllocationVLoadXImpl_ulong3(<3 x i64>* noalias nocapture sret %agg.result, [1 x i32] %a.…
749 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
750 %2 = bitcast i8* %1 to <4 x i64>*
751 %3 = load <4 x i64>, <4 x i64>* %2, align 8
752 %4 = bitcast <3 x i64>* %agg.result to <4 x i64>*
753 store <4 x i64> %3, <4 x i64>* %4, align 32, !tbaa !51
756 define <2 x i64> @__rsAllocationVLoadXImpl_ulong2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
757 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
758 %2 = bitcast i8* %1 to <2 x i64>*
759 %3 = load <2 x i64>, <2 x i64>* %2, align 8
760 ret <2 x i64> %3
763 define <4 x i32> @__rsAllocationVLoadXImpl_int4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
764 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
765 %2 = bitcast i8* %1 to <4 x i32>*
766 %3 = load <4 x i32>, <4 x i32>* %2, align 4
767 ret <4 x i32> %3
769 define <3 x i32> @__rsAllocationVLoadXImpl_int3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
770 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
771 %2 = bitcast i8* %1 to <3 x i32>*
772 %3 = load <3 x i32>, <3 x i32>* %2, align 4
773 ret <3 x i32> %3
775 define <2 x i32> @__rsAllocationVLoadXImpl_int2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
776 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
777 %2 = bitcast i8* %1 to <2 x i32>*
778 %3 = load <2 x i32>, <2 x i32>* %2, align 4
779 ret <2 x i32> %3
782 define <4 x i32> @__rsAllocationVLoadXImpl_uint4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
783 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
784 %2 = bitcast i8* %1 to <4 x i32>*
785 %3 = load <4 x i32>, <4 x i32>* %2, align 4
786 ret <4 x i32> %3
788 define <3 x i32> @__rsAllocationVLoadXImpl_uint3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
789 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
790 %2 = bitcast i8* %1 to <3 x i32>*
791 %3 = load <3 x i32>, <3 x i32>* %2, align 4
792 ret <3 x i32> %3
794 define <2 x i32> @__rsAllocationVLoadXImpl_uint2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
795 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
796 %2 = bitcast i8* %1 to <2 x i32>*
797 %3 = load <2 x i32>, <2 x i32>* %2, align 4
798 ret <2 x i32> %3
801 define <4 x i16> @__rsAllocationVLoadXImpl_short4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
802 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
803 %2 = bitcast i8* %1 to <4 x i16>*
804 %3 = load <4 x i16>, <4 x i16>* %2, align 2
805 ret <4 x i16> %3
807 define <3 x i16> @__rsAllocationVLoadXImpl_short3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
808 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
809 %2 = bitcast i8* %1 to <3 x i16>*
810 %3 = load <3 x i16>, <3 x i16>* %2, align 2
811 ret <3 x i16> %3
813 define <2 x i16> @__rsAllocationVLoadXImpl_short2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
814 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
815 %2 = bitcast i8* %1 to <2 x i16>*
816 %3 = load <2 x i16>, <2 x i16>* %2, align 2
817 ret <2 x i16> %3
820 define <4 x i16> @__rsAllocationVLoadXImpl_ushort4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
821 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
822 %2 = bitcast i8* %1 to <4 x i16>*
823 %3 = load <4 x i16>, <4 x i16>* %2, align 2
824 ret <4 x i16> %3
826 define <3 x i16> @__rsAllocationVLoadXImpl_ushort3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
827 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
828 %2 = bitcast i8* %1 to <3 x i16>*
829 %3 = load <3 x i16>, <3 x i16>* %2, align 2
830 ret <3 x i16> %3
832 define <2 x i16> @__rsAllocationVLoadXImpl_ushort2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
833 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
834 %2 = bitcast i8* %1 to <2 x i16>*
835 %3 = load <2 x i16>, <2 x i16>* %2, align 2
836 ret <2 x i16> %3
839 define <4 x i8> @__rsAllocationVLoadXImpl_char4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
840 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
841 %2 = bitcast i8* %1 to <4 x i8>*
842 %3 = load <4 x i8>, <4 x i8>* %2, align 1
843 ret <4 x i8> %3
845 define <3 x i8> @__rsAllocationVLoadXImpl_char3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
846 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
847 %2 = bitcast i8* %1 to <3 x i8>*
848 %3 = load <3 x i8>, <3 x i8>* %2, align 1
849 ret <3 x i8> %3
851 define <2 x i8> @__rsAllocationVLoadXImpl_char2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
852 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
853 %2 = bitcast i8* %1 to <2 x i8>*
854 %3 = load <2 x i8>, <2 x i8>* %2, align 1
855 ret <2 x i8> %3
858 define <4 x i8> @__rsAllocationVLoadXImpl_uchar4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
859 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
860 %2 = bitcast i8* %1 to <4 x i8>*
861 %3 = load <4 x i8>, <4 x i8>* %2, align 1
862 ret <4 x i8> %3
864 define <3 x i8> @__rsAllocationVLoadXImpl_uchar3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
865 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
866 %2 = bitcast i8* %1 to <3 x i8>*
867 %3 = load <3 x i8>, <3 x i8>* %2, align 1
868 ret <3 x i8> %3
870 define <2 x i8> @__rsAllocationVLoadXImpl_uchar2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0 {
871 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
872 %2 = bitcast i8* %1 to <2 x i8>*
873 %3 = load <2 x i8>, <2 x i8>* %2, align 1
874 ret <2 x i8> %3
877 define <4 x float> @__rsAllocationVLoadXImpl_float4([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0…
878 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
879 %2 = bitcast i8* %1 to <4 x float>*
880 %3 = load <4 x float>, <4 x float>* %2, align 4
881 ret <4 x float> %3
883 define <3 x float> @__rsAllocationVLoadXImpl_float3([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0…
884 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
885 %2 = bitcast i8* %1 to <3 x float>*
886 %3 = load <3 x float>, <3 x float>* %2, align 4
887 ret <3 x float> %3
889 define <2 x float> @__rsAllocationVLoadXImpl_float2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #0…
890 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
891 %2 = bitcast i8* %1 to <2 x float>*
892 %3 = load <2 x float>, <2 x float>* %2, align 4
893 ret <2 x float> %3
896 …d @__rsAllocationVLoadXImpl_double4(<4 x double>* noalias nocapture sret %agg.result, [1 x i32] %a…
897 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
898 %2 = bitcast i8* %1 to <4 x double>*
899 %3 = load <4 x double>, <4 x double>* %2, align 8
900 store <4 x double> %3, <4 x double>* %agg.result, align 32, !tbaa !60
903 …d @__rsAllocationVLoadXImpl_double3(<3 x double>* noalias nocapture sret %agg.result, [1 x i32] %a…
904 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
905 %2 = bitcast i8* %1 to <4 x double>*
906 %3 = load <4 x double>, <4 x double>* %2, align 8
907 %4 = bitcast <3 x double>* %agg.result to <4 x double>*
908 store <4 x double> %3, <4 x double>* %4, align 32, !tbaa !59
911 define <2 x double> @__rsAllocationVLoadXImpl_double2([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) …
912 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
913 %2 = bitcast i8* %1 to <2 x double>*
914 %3 = load <2 x double>, <2 x double>* %2, align 8
915 ret <2 x double> %3
919 define void @__rsAllocationVStoreXImpl_long4([1 x i32] %a.coerce, <4 x i64> %val, i32 %x, i32 %y, i…
920 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
921 %2 = bitcast i8* %1 to <4 x i64>*
922 store <4 x i64> %val, <4 x i64>* %2, align 8
925 define void @__rsAllocationVStoreXImpl_long3([1 x i32] %a.coerce, <3 x i64> %val, i32 %x, i32 %y, i…
926 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
927 %2 = bitcast i8* %1 to <3 x i64>*
928 store <3 x i64> %val, <3 x i64>* %2, align 8
931 define void @__rsAllocationVStoreXImpl_long2([1 x i32] %a.coerce, <2 x i64> %val, i32 %x, i32 %y, i…
932 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
933 %2 = bitcast i8* %1 to <2 x i64>*
934 store <2 x i64> %val, <2 x i64>* %2, align 8
938 define void @__rsAllocationVStoreXImpl_ulong4([1 x i32] %a.coerce, <4 x i64> %val, i32 %x, i32 %y, …
939 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
940 %2 = bitcast i8* %1 to <4 x i64>*
941 store <4 x i64> %val, <4 x i64>* %2, align 8
944 define void @__rsAllocationVStoreXImpl_ulong3([1 x i32] %a.coerce, <3 x i64> %val, i32 %x, i32 %y, …
945 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
946 %2 = bitcast i8* %1 to <3 x i64>*
947 store <3 x i64> %val, <3 x i64>* %2, align 8
950 define void @__rsAllocationVStoreXImpl_ulong2([1 x i32] %a.coerce, <2 x i64> %val, i32 %x, i32 %y, …
951 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
952 %2 = bitcast i8* %1 to <2 x i64>*
953 store <2 x i64> %val, <2 x i64>* %2, align 8
957 define void @__rsAllocationVStoreXImpl_int4([1 x i32] %a.coerce, <4 x i32> %val, i32 %x, i32 %y, i3…
958 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
959 %2 = bitcast i8* %1 to <4 x i32>*
960 store <4 x i32> %val, <4 x i32>* %2, align 4
963 define void @__rsAllocationVStoreXImpl_int3([1 x i32] %a.coerce, <3 x i32> %val, i32 %x, i32 %y, i3…
964 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
965 %2 = bitcast i8* %1 to <3 x i32>*
966 store <3 x i32> %val, <3 x i32>* %2, align 4
969 define void @__rsAllocationVStoreXImpl_int2([1 x i32] %a.coerce, <2 x i32> %val, i32 %x, i32 %y, i3…
970 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
971 %2 = bitcast i8* %1 to <2 x i32>*
972 store <2 x i32> %val, <2 x i32>* %2, align 4
976 define void @__rsAllocationVStoreXImpl_uint4([1 x i32] %a.coerce, <4 x i32> %val, i32 %x, i32 %y, i…
977 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
978 %2 = bitcast i8* %1 to <4 x i32>*
979 store <4 x i32> %val, <4 x i32>* %2, align 4
982 define void @__rsAllocationVStoreXImpl_uint3([1 x i32] %a.coerce, <3 x i32> %val, i32 %x, i32 %y, i…
983 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
984 %2 = bitcast i8* %1 to <3 x i32>*
985 store <3 x i32> %val, <3 x i32>* %2, align 4
988 define void @__rsAllocationVStoreXImpl_uint2([1 x i32] %a.coerce, <2 x i32> %val, i32 %x, i32 %y, i…
989 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
990 %2 = bitcast i8* %1 to <2 x i32>*
991 store <2 x i32> %val, <2 x i32>* %2, align 4
995 define void @__rsAllocationVStoreXImpl_short4([1 x i32] %a.coerce, <4 x i16> %val, i32 %x, i32 %y, …
996 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
997 %2 = bitcast i8* %1 to <4 x i16>*
998 store <4 x i16> %val, <4 x i16>* %2, align 2
1001 define void @__rsAllocationVStoreXImpl_short3([1 x i32] %a.coerce, <3 x i16> %val, i32 %x, i32 %y, …
1002 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1003 %2 = bitcast i8* %1 to <3 x i16>*
1004 store <3 x i16> %val, <3 x i16>* %2, align 2
1007 define void @__rsAllocationVStoreXImpl_short2([1 x i32] %a.coerce, <2 x i16> %val, i32 %x, i32 %y, …
1008 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1009 %2 = bitcast i8* %1 to <2 x i16>*
1010 store <2 x i16> %val, <2 x i16>* %2, align 2
1014 define void @__rsAllocationVStoreXImpl_ushort4([1 x i32] %a.coerce, <4 x i16> %val, i32 %x, i32 %y,…
1015 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1016 %2 = bitcast i8* %1 to <4 x i16>*
1017 store <4 x i16> %val, <4 x i16>* %2, align 2
1020 define void @__rsAllocationVStoreXImpl_ushort3([1 x i32] %a.coerce, <3 x i16> %val, i32 %x, i32 %y,…
1021 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1022 %2 = bitcast i8* %1 to <3 x i16>*
1023 store <3 x i16> %val, <3 x i16>* %2, align 2
1026 define void @__rsAllocationVStoreXImpl_ushort2([1 x i32] %a.coerce, <2 x i16> %val, i32 %x, i32 %y,…
1027 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1028 %2 = bitcast i8* %1 to <2 x i16>*
1029 store <2 x i16> %val, <2 x i16>* %2, align 2
1033 define void @__rsAllocationVStoreXImpl_char4([1 x i32] %a.coerce, <4 x i8> %val, i32 %x, i32 %y, i3…
1034 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1035 %2 = bitcast i8* %1 to <4 x i8>*
1036 store <4 x i8> %val, <4 x i8>* %2, align 1
1039 define void @__rsAllocationVStoreXImpl_char3([1 x i32] %a.coerce, <3 x i8> %val, i32 %x, i32 %y, i3…
1040 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1041 %2 = bitcast i8* %1 to <3 x i8>*
1042 store <3 x i8> %val, <3 x i8>* %2, align 1
1045 define void @__rsAllocationVStoreXImpl_char2([1 x i32] %a.coerce, <2 x i8> %val, i32 %x, i32 %y, i3…
1046 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1047 %2 = bitcast i8* %1 to <2 x i8>*
1048 store <2 x i8> %val, <2 x i8>* %2, align 1
1052 define void @__rsAllocationVStoreXImpl_uchar4([1 x i32] %a.coerce, <4 x i8> %val, i32 %x, i32 %y, i…
1053 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1054 %2 = bitcast i8* %1 to <4 x i8>*
1055 store <4 x i8> %val, <4 x i8>* %2, align 1
1058 define void @__rsAllocationVStoreXImpl_uchar3([1 x i32] %a.coerce, <3 x i8> %val, i32 %x, i32 %y, i…
1059 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1060 %2 = bitcast i8* %1 to <3 x i8>*
1061 store <3 x i8> %val, <3 x i8>* %2, align 1
1064 define void @__rsAllocationVStoreXImpl_uchar2([1 x i32] %a.coerce, <2 x i8> %val, i32 %x, i32 %y, i…
1065 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1066 %2 = bitcast i8* %1 to <2 x i8>*
1067 store <2 x i8> %val, <2 x i8>* %2, align 1
1071 define void @__rsAllocationVStoreXImpl_float4([1 x i32] %a.coerce, <4 x float> %val, i32 %x, i32 %y…
1072 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1073 %2 = bitcast i8* %1 to <4 x float>*
1074 store <4 x float> %val, <4 x float>* %2, align 4
1077 define void @__rsAllocationVStoreXImpl_float3([1 x i32] %a.coerce, <3 x float> %val, i32 %x, i32 %y…
1078 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1079 %2 = bitcast i8* %1 to <3 x float>*
1080 store <3 x float> %val, <3 x float>* %2, align 4
1083 define void @__rsAllocationVStoreXImpl_float2([1 x i32] %a.coerce, <2 x float> %val, i32 %x, i32 %y…
1084 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1085 %2 = bitcast i8* %1 to <2 x float>*
1086 store <2 x float> %val, <2 x float>* %2, align 4
1090 define void @__rsAllocationVStoreXImpl_double4([1 x i32] %a.coerce, <4 x double> %val, i32 %x, i32 …
1091 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1092 %2 = bitcast i8* %1 to <4 x double>*
1093 store <4 x double> %val, <4 x double>* %2, align 8
1096 define void @__rsAllocationVStoreXImpl_double3([1 x i32] %a.coerce, <3 x double> %val, i32 %x, i32 …
1097 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1098 %2 = bitcast i8* %1 to <3 x double>*
1099 store <3 x double> %val, <3 x double>* %2, align 8
1102 define void @__rsAllocationVStoreXImpl_double2([1 x i32] %a.coerce, <2 x double> %val, i32 %x, i32 …
1103 %1 = tail call i8* @rsOffsetNs([1 x i32] %a.coerce, i32 %x, i32 %y, i32 %z) #2
1104 %2 = bitcast i8* %1 to <2 x double>*
1105 store <2 x double> %val, <2 x double>* %2, align 8