• Home
  • Raw
  • Download

Lines Matching refs:i8

3 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v…
6 @sc = common global i8 0
7 @uc = common global i8 0
19 %0 = atomicrmw add i8* @sc, i8 1 monotonic
20 %1 = atomicrmw add i8* @uc, i8 1 monotonic
21 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
23 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
25 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
27 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
29 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
31 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
33 %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
35 %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
37 %18 = atomicrmw sub i8* @sc, i8 1 monotonic
38 %19 = atomicrmw sub i8* @uc, i8 1 monotonic
39 %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
41 %22 = bitcast i8* bitcast (i16* @us to i8*) to i16*
43 %24 = bitcast i8* bitcast (i32* @si to i8*) to i32*
45 %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
47 %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
49 %30 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
51 %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
53 %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
55 %36 = atomicrmw or i8* @sc, i8 1 monotonic
56 %37 = atomicrmw or i8* @uc, i8 1 monotonic
57 %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
59 %40 = bitcast i8* bitcast (i16* @us to i8*) to i16*
61 %42 = bitcast i8* bitcast (i32* @si to i8*) to i32*
63 %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
65 %46 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
67 %48 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
69 %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
71 %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
73 %54 = atomicrmw xor i8* @sc, i8 1 monotonic
74 %55 = atomicrmw xor i8* @uc, i8 1 monotonic
75 %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
77 %58 = bitcast i8* bitcast (i16* @us to i8*) to i16*
79 %60 = bitcast i8* bitcast (i32* @si to i8*) to i32*
81 %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
83 %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
85 %66 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
87 %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
89 %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
91 %72 = atomicrmw and i8* @sc, i8 1 monotonic
92 %73 = atomicrmw and i8* @uc, i8 1 monotonic
93 %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
95 %76 = bitcast i8* bitcast (i16* @us to i8*) to i16*
97 %78 = bitcast i8* bitcast (i32* @si to i8*) to i32*
99 %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
101 %82 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
103 %84 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
105 %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
107 %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
109 %90 = atomicrmw nand i8* @sc, i8 1 monotonic
110 %91 = atomicrmw nand i8* @uc, i8 1 monotonic
111 %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
113 %94 = bitcast i8* bitcast (i16* @us to i8*) to i16*
115 %96 = bitcast i8* bitcast (i32* @si to i8*) to i32*
117 %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
119 %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
121 %102 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
123 %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
125 %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
135 %0 = atomicrmw add i8* @sc, i8 11 monotonic
136 store i8 %0, i8* @sc, align 1
137 %1 = atomicrmw add i8* @uc, i8 11 monotonic
138 store i8 %1, i8* @uc, align 1
139 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
142 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
145 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
148 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
151 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
154 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
157 %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
160 %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
163 %18 = atomicrmw sub i8* @sc, i8 11 monotonic
164 store i8 %18, i8* @sc, align 1
165 %19 = atomicrmw sub i8* @uc, i8 11 monotonic
166 store i8 %19, i8* @uc, align 1
167 %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
170 %22 = bitcast i8* bitcast (i16* @us to i8*) to i16*
173 %24 = bitcast i8* bitcast (i32* @si to i8*) to i32*
176 %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
179 %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
182 %30 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
185 %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
188 %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
191 %36 = atomicrmw or i8* @sc, i8 11 monotonic
192 store i8 %36, i8* @sc, align 1
193 %37 = atomicrmw or i8* @uc, i8 11 monotonic
194 store i8 %37, i8* @uc, align 1
195 %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
198 %40 = bitcast i8* bitcast (i16* @us to i8*) to i16*
201 %42 = bitcast i8* bitcast (i32* @si to i8*) to i32*
204 %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
207 %46 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
210 %48 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
213 %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
216 %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
219 %54 = atomicrmw xor i8* @sc, i8 11 monotonic
220 store i8 %54, i8* @sc, align 1
221 %55 = atomicrmw xor i8* @uc, i8 11 monotonic
222 store i8 %55, i8* @uc, align 1
223 %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
226 %58 = bitcast i8* bitcast (i16* @us to i8*) to i16*
229 %60 = bitcast i8* bitcast (i32* @si to i8*) to i32*
232 %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
235 %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
238 %66 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
241 %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
244 %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
247 %72 = atomicrmw and i8* @sc, i8 11 monotonic
248 store i8 %72, i8* @sc, align 1
249 %73 = atomicrmw and i8* @uc, i8 11 monotonic
250 store i8 %73, i8* @uc, align 1
251 %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
254 %76 = bitcast i8* bitcast (i16* @us to i8*) to i16*
257 %78 = bitcast i8* bitcast (i32* @si to i8*) to i32*
260 %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
263 %82 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
266 %84 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
269 %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
272 %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
275 %90 = atomicrmw nand i8* @sc, i8 11 monotonic
276 store i8 %90, i8* @sc, align 1
277 %91 = atomicrmw nand i8* @uc, i8 11 monotonic
278 store i8 %91, i8* @uc, align 1
279 %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
282 %94 = bitcast i8* bitcast (i16* @us to i8*) to i16*
285 %96 = bitcast i8* bitcast (i32* @si to i8*) to i32*
288 %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
291 %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
294 %102 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
297 %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
300 %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
311 %0 = load i8* @uc, align 1
312 %1 = zext i8 %0 to i32
313 %2 = trunc i32 %1 to i8
314 %3 = atomicrmw add i8* @sc, i8 %2 monotonic
315 %4 = add i8 %3, %2
316 store i8 %4, i8* @sc, align 1
317 %5 = load i8* @uc, align 1
318 %6 = zext i8 %5 to i32
319 %7 = trunc i32 %6 to i8
320 %8 = atomicrmw add i8* @uc, i8 %7 monotonic
321 %9 = add i8 %8, %7
322 store i8 %9, i8* @uc, align 1
323 %10 = load i8* @uc, align 1
324 %11 = zext i8 %10 to i32
325 %12 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
330 %16 = load i8* @uc, align 1
331 %17 = zext i8 %16 to i32
332 %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
337 %22 = load i8* @uc, align 1
338 %23 = zext i8 %22 to i32
339 %24 = bitcast i8* bitcast (i32* @si to i8*) to i32*
343 %27 = load i8* @uc, align 1
344 %28 = zext i8 %27 to i32
345 %29 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
349 %32 = load i8* @uc, align 1
350 %33 = zext i8 %32 to i64
351 %34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
355 %37 = load i8* @uc, align 1
356 %38 = zext i8 %37 to i64
357 %39 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
361 %42 = load i8* @uc, align 1
362 %43 = zext i8 %42 to i64
363 %44 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
367 %47 = load i8* @uc, align 1
368 %48 = zext i8 %47 to i64
369 %49 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
373 %52 = load i8* @uc, align 1
374 %53 = zext i8 %52 to i32
375 %54 = trunc i32 %53 to i8
376 %55 = atomicrmw sub i8* @sc, i8 %54 monotonic
377 %56 = sub i8 %55, %54
378 store i8 %56, i8* @sc, align 1
379 %57 = load i8* @uc, align 1
380 %58 = zext i8 %57 to i32
381 %59 = trunc i32 %58 to i8
382 %60 = atomicrmw sub i8* @uc, i8 %59 monotonic
383 %61 = sub i8 %60, %59
384 store i8 %61, i8* @uc, align 1
385 %62 = load i8* @uc, align 1
386 %63 = zext i8 %62 to i32
387 %64 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
392 %68 = load i8* @uc, align 1
393 %69 = zext i8 %68 to i32
394 %70 = bitcast i8* bitcast (i16* @us to i8*) to i16*
399 %74 = load i8* @uc, align 1
400 %75 = zext i8 %74 to i32
401 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
405 %79 = load i8* @uc, align 1
406 %80 = zext i8 %79 to i32
407 %81 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
411 %84 = load i8* @uc, align 1
412 %85 = zext i8 %84 to i64
413 %86 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
417 %89 = load i8* @uc, align 1
418 %90 = zext i8 %89 to i64
419 %91 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
423 %94 = load i8* @uc, align 1
424 %95 = zext i8 %94 to i64
425 %96 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
429 %99 = load i8* @uc, align 1
430 %100 = zext i8 %99 to i64
431 %101 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
435 %104 = load i8* @uc, align 1
436 %105 = zext i8 %104 to i32
437 %106 = trunc i32 %105 to i8
438 %107 = atomicrmw or i8* @sc, i8 %106 monotonic
439 %108 = or i8 %107, %106
440 store i8 %108, i8* @sc, align 1
441 %109 = load i8* @uc, align 1
442 %110 = zext i8 %109 to i32
443 %111 = trunc i32 %110 to i8
444 %112 = atomicrmw or i8* @uc, i8 %111 monotonic
445 %113 = or i8 %112, %111
446 store i8 %113, i8* @uc, align 1
447 %114 = load i8* @uc, align 1
448 %115 = zext i8 %114 to i32
449 %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
454 %120 = load i8* @uc, align 1
455 %121 = zext i8 %120 to i32
456 %122 = bitcast i8* bitcast (i16* @us to i8*) to i16*
461 %126 = load i8* @uc, align 1
462 %127 = zext i8 %126 to i32
463 %128 = bitcast i8* bitcast (i32* @si to i8*) to i32*
467 %131 = load i8* @uc, align 1
468 %132 = zext i8 %131 to i32
469 %133 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
473 %136 = load i8* @uc, align 1
474 %137 = zext i8 %136 to i64
475 %138 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
479 %141 = load i8* @uc, align 1
480 %142 = zext i8 %141 to i64
481 %143 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
485 %146 = load i8* @uc, align 1
486 %147 = zext i8 %146 to i64
487 %148 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
491 %151 = load i8* @uc, align 1
492 %152 = zext i8 %151 to i64
493 %153 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
497 %156 = load i8* @uc, align 1
498 %157 = zext i8 %156 to i32
499 %158 = trunc i32 %157 to i8
500 %159 = atomicrmw xor i8* @sc, i8 %158 monotonic
501 %160 = xor i8 %159, %158
502 store i8 %160, i8* @sc, align 1
503 %161 = load i8* @uc, align 1
504 %162 = zext i8 %161 to i32
505 %163 = trunc i32 %162 to i8
506 %164 = atomicrmw xor i8* @uc, i8 %163 monotonic
507 %165 = xor i8 %164, %163
508 store i8 %165, i8* @uc, align 1
509 %166 = load i8* @uc, align 1
510 %167 = zext i8 %166 to i32
511 %168 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
516 %172 = load i8* @uc, align 1
517 %173 = zext i8 %172 to i32
518 %174 = bitcast i8* bitcast (i16* @us to i8*) to i16*
523 %178 = load i8* @uc, align 1
524 %179 = zext i8 %178 to i32
525 %180 = bitcast i8* bitcast (i32* @si to i8*) to i32*
529 %183 = load i8* @uc, align 1
530 %184 = zext i8 %183 to i32
531 %185 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
535 %188 = load i8* @uc, align 1
536 %189 = zext i8 %188 to i64
537 %190 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
541 %193 = load i8* @uc, align 1
542 %194 = zext i8 %193 to i64
543 %195 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
547 %198 = load i8* @uc, align 1
548 %199 = zext i8 %198 to i64
549 %200 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
553 %203 = load i8* @uc, align 1
554 %204 = zext i8 %203 to i64
555 %205 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
559 %208 = load i8* @uc, align 1
560 %209 = zext i8 %208 to i32
561 %210 = trunc i32 %209 to i8
562 %211 = atomicrmw and i8* @sc, i8 %210 monotonic
563 %212 = and i8 %211, %210
564 store i8 %212, i8* @sc, align 1
565 %213 = load i8* @uc, align 1
566 %214 = zext i8 %213 to i32
567 %215 = trunc i32 %214 to i8
568 %216 = atomicrmw and i8* @uc, i8 %215 monotonic
569 %217 = and i8 %216, %215
570 store i8 %217, i8* @uc, align 1
571 %218 = load i8* @uc, align 1
572 %219 = zext i8 %218 to i32
573 %220 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
578 %224 = load i8* @uc, align 1
579 %225 = zext i8 %224 to i32
580 %226 = bitcast i8* bitcast (i16* @us to i8*) to i16*
585 %230 = load i8* @uc, align 1
586 %231 = zext i8 %230 to i32
587 %232 = bitcast i8* bitcast (i32* @si to i8*) to i32*
591 %235 = load i8* @uc, align 1
592 %236 = zext i8 %235 to i32
593 %237 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
597 %240 = load i8* @uc, align 1
598 %241 = zext i8 %240 to i64
599 %242 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
603 %245 = load i8* @uc, align 1
604 %246 = zext i8 %245 to i64
605 %247 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
609 %250 = load i8* @uc, align 1
610 %251 = zext i8 %250 to i64
611 %252 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
615 %255 = load i8* @uc, align 1
616 %256 = zext i8 %255 to i64
617 %257 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
621 %260 = load i8* @uc, align 1
622 %261 = zext i8 %260 to i32
623 %262 = trunc i32 %261 to i8
624 %263 = atomicrmw nand i8* @sc, i8 %262 monotonic
625 %264 = xor i8 %263, -1
626 %265 = and i8 %264, %262
627 store i8 %265, i8* @sc, align 1
628 %266 = load i8* @uc, align 1
629 %267 = zext i8 %266 to i32
630 %268 = trunc i32 %267 to i8
631 %269 = atomicrmw nand i8* @uc, i8 %268 monotonic
632 %270 = xor i8 %269, -1
633 %271 = and i8 %270, %268
634 store i8 %271, i8* @uc, align 1
635 %272 = load i8* @uc, align 1
636 %273 = zext i8 %272 to i32
637 %274 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
643 %279 = load i8* @uc, align 1
644 %280 = zext i8 %279 to i32
645 %281 = bitcast i8* bitcast (i16* @us to i8*) to i16*
651 %286 = load i8* @uc, align 1
652 %287 = zext i8 %286 to i32
653 %288 = bitcast i8* bitcast (i32* @si to i8*) to i32*
658 %292 = load i8* @uc, align 1
659 %293 = zext i8 %292 to i32
660 %294 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
665 %298 = load i8* @uc, align 1
666 %299 = zext i8 %298 to i64
667 %300 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
672 %304 = load i8* @uc, align 1
673 %305 = zext i8 %304 to i64
674 %306 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
679 %310 = load i8* @uc, align 1
680 %311 = zext i8 %310 to i64
681 %312 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
686 %316 = load i8* @uc, align 1
687 %317 = zext i8 %316 to i64
688 %318 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
701 %0 = load i8* @sc, align 1
702 %1 = zext i8 %0 to i32
703 %2 = load i8* @uc, align 1
704 %3 = zext i8 %2 to i32
705 %4 = trunc i32 %3 to i8
706 %5 = trunc i32 %1 to i8
707 %6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic
708 store i8 %6, i8* @sc, align 1
709 %7 = load i8* @sc, align 1
710 %8 = zext i8 %7 to i32
711 %9 = load i8* @uc, align 1
712 %10 = zext i8 %9 to i32
713 %11 = trunc i32 %10 to i8
714 %12 = trunc i32 %8 to i8
715 %13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic
716 store i8 %13, i8* @uc, align 1
717 %14 = load i8* @sc, align 1
718 %15 = sext i8 %14 to i16
720 %17 = load i8* @uc, align 1
721 %18 = zext i8 %17 to i32
722 %19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
727 %23 = load i8* @sc, align 1
728 %24 = sext i8 %23 to i16
730 %26 = load i8* @uc, align 1
731 %27 = zext i8 %26 to i32
732 %28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
737 %32 = load i8* @sc, align 1
738 %33 = sext i8 %32 to i32
739 %34 = load i8* @uc, align 1
740 %35 = zext i8 %34 to i32
741 %36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
744 %38 = load i8* @sc, align 1
745 %39 = sext i8 %38 to i32
746 %40 = load i8* @uc, align 1
747 %41 = zext i8 %40 to i32
748 %42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
751 %44 = load i8* @sc, align 1
752 %45 = sext i8 %44 to i64
753 %46 = load i8* @uc, align 1
754 %47 = zext i8 %46 to i64
755 %48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
758 %50 = load i8* @sc, align 1
759 %51 = sext i8 %50 to i64
760 %52 = load i8* @uc, align 1
761 %53 = zext i8 %52 to i64
762 %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
765 %56 = load i8* @sc, align 1
766 %57 = sext i8 %56 to i64
767 %58 = load i8* @uc, align 1
768 %59 = zext i8 %58 to i64
769 %60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
772 %62 = load i8* @sc, align 1
773 %63 = sext i8 %62 to i64
774 %64 = load i8* @uc, align 1
775 %65 = zext i8 %64 to i64
776 %66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
779 %68 = load i8* @sc, align 1
780 %69 = zext i8 %68 to i32
781 %70 = load i8* @uc, align 1
782 %71 = zext i8 %70 to i32
783 %72 = trunc i32 %71 to i8
784 %73 = trunc i32 %69 to i8
785 %74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic
786 %75 = icmp eq i8 %74, %72
787 %76 = zext i1 %75 to i8
788 %77 = zext i8 %76 to i32
790 %78 = load i8* @sc, align 1
791 %79 = zext i8 %78 to i32
792 %80 = load i8* @uc, align 1
793 %81 = zext i8 %80 to i32
794 %82 = trunc i32 %81 to i8
795 %83 = trunc i32 %79 to i8
796 %84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic
797 %85 = icmp eq i8 %84, %82
798 %86 = zext i1 %85 to i8
799 %87 = zext i8 %86 to i32
801 %88 = load i8* @sc, align 1
802 %89 = sext i8 %88 to i16
804 %91 = load i8* @uc, align 1
805 %92 = zext i8 %91 to i32
806 %93 = trunc i32 %92 to i8
807 %94 = trunc i32 %90 to i8
808 %95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic
809 %96 = icmp eq i8 %95, %93
810 %97 = zext i1 %96 to i8
811 %98 = zext i8 %97 to i32
813 %99 = load i8* @sc, align 1
814 %100 = sext i8 %99 to i16
816 %102 = load i8* @uc, align 1
817 %103 = zext i8 %102 to i32
818 %104 = trunc i32 %103 to i8
819 %105 = trunc i32 %101 to i8
820 %106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic
821 %107 = icmp eq i8 %106, %104
822 %108 = zext i1 %107 to i8
823 %109 = zext i8 %108 to i32
825 %110 = load i8* @sc, align 1
826 %111 = sext i8 %110 to i32
827 %112 = load i8* @uc, align 1
828 %113 = zext i8 %112 to i32
829 %114 = trunc i32 %113 to i8
830 %115 = trunc i32 %111 to i8
831 %116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic
832 %117 = icmp eq i8 %116, %114
833 %118 = zext i1 %117 to i8
834 %119 = zext i8 %118 to i32
836 %120 = load i8* @sc, align 1
837 %121 = sext i8 %120 to i32
838 %122 = load i8* @uc, align 1
839 %123 = zext i8 %122 to i32
840 %124 = trunc i32 %123 to i8
841 %125 = trunc i32 %121 to i8
842 %126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic
843 %127 = icmp eq i8 %126, %124
844 %128 = zext i1 %127 to i8
845 %129 = zext i8 %128 to i32
847 %130 = load i8* @sc, align 1
848 %131 = sext i8 %130 to i64
849 %132 = load i8* @uc, align 1
850 %133 = zext i8 %132 to i64
851 %134 = trunc i64 %133 to i8
852 %135 = trunc i64 %131 to i8
853 %136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic
854 %137 = icmp eq i8 %136, %134
855 %138 = zext i1 %137 to i8
856 %139 = zext i8 %138 to i32
858 %140 = load i8* @sc, align 1
859 %141 = sext i8 %140 to i64
860 %142 = load i8* @uc, align 1
861 %143 = zext i8 %142 to i64
862 %144 = trunc i64 %143 to i8
863 %145 = trunc i64 %141 to i8
864 %146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic
865 %147 = icmp eq i8 %146, %144
866 %148 = zext i1 %147 to i8
867 %149 = zext i8 %148 to i32
869 %150 = load i8* @sc, align 1
870 %151 = sext i8 %150 to i64
871 %152 = load i8* @uc, align 1
872 %153 = zext i8 %152 to i64
873 %154 = trunc i64 %153 to i8
874 %155 = trunc i64 %151 to i8
875 %156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic
876 %157 = icmp eq i8 %156, %154
877 %158 = zext i1 %157 to i8
878 %159 = zext i8 %158 to i32
880 %160 = load i8* @sc, align 1
881 %161 = sext i8 %160 to i64
882 %162 = load i8* @uc, align 1
883 %163 = zext i8 %162 to i64
884 %164 = trunc i64 %163 to i8
885 %165 = trunc i64 %161 to i8
886 %166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic
887 %167 = icmp eq i8 %166, %164
888 %168 = zext i1 %167 to i8
889 %169 = zext i8 %168 to i32
899 %0 = atomicrmw xchg i8* @sc, i8 1 monotonic
900 store i8 %0, i8* @sc, align 1
901 %1 = atomicrmw xchg i8* @uc, i8 1 monotonic
902 store i8 %1, i8* @uc, align 1
903 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
906 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
909 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
912 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
915 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
918 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
921 %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
924 %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
928 store volatile i8 0, i8* @sc, align 1
929 store volatile i8 0, i8* @uc, align 1
930 %18 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
932 %19 = bitcast i8* bitcast (i16* @us to i8*) to i16*
934 %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
936 %21 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
938 %22 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
940 %23 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
942 %24 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
944 %25 = bitcast i8* bitcast (i64* @ull to i8*) to i64*