1; RUN: llc < %s -march=x86-64 > %t 2; ModuleID = 'Atomics.c' 3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128" 4target triple = "x86_64-apple-darwin8" 5@sc = common global i8 0 ; <i8*> [#uses=56] 6@uc = common global i8 0 ; <i8*> [#uses=116] 7@ss = common global i16 0 ; <i16*> [#uses=15] 8@us = common global i16 0 ; <i16*> [#uses=15] 9@si = common global i32 0 ; <i32*> [#uses=15] 10@ui = common global i32 0 ; <i32*> [#uses=25] 11@sl = common global i64 0 ; <i64*> [#uses=15] 12@ul = common global i64 0 ; <i64*> [#uses=15] 13@sll = common global i64 0 ; <i64*> [#uses=15] 14@ull = common global i64 0 ; <i64*> [#uses=15] 15 16define void @test_op_ignore() nounwind { 17entry: 18 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=0] 19 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=0] 20 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] 21 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=0] 22 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] 23 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=0] 24 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] 25 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=0] 26 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] 27 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=0] 28 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] 29 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=0] 30 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] 31 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=0] 32 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1] 33 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 1 ) ; <i64>:15 [#uses=0] 34 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1] 35 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 1 ) ; <i64>:17 [#uses=0] 36 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:18 [#uses=0] 37 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:19 [#uses=0] 38 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:20 [#uses=1] 39 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 1 ) ; <i16>:21 [#uses=0] 40 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:22 [#uses=1] 41 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 1 ) ; <i16>:23 [#uses=0] 42 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1] 43 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; <i32>:25 [#uses=0] 44 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:26 [#uses=1] 45 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; <i32>:27 [#uses=0] 46 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1] 47 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 1 ) ; <i64>:29 [#uses=0] 48 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:30 [#uses=1] 49 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 1 ) ; <i64>:31 [#uses=0] 50 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:32 [#uses=1] 51 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 1 ) ; <i64>:33 [#uses=0] 52 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:34 [#uses=1] 53 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 1 ) ; <i64>:35 [#uses=0] 54 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:36 [#uses=0] 55 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:37 [#uses=0] 56 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:38 [#uses=1] 57 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 1 ) ; <i16>:39 [#uses=0] 58 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:40 [#uses=1] 59 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 1 ) ; <i16>:41 [#uses=0] 60 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:42 [#uses=1] 61 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 1 ) ; <i32>:43 [#uses=0] 62 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:44 [#uses=1] 63 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 1 ) ; <i32>:45 [#uses=0] 64 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:46 [#uses=1] 65 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 1 ) ; <i64>:47 [#uses=0] 66 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:48 [#uses=1] 67 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 1 ) ; <i64>:49 [#uses=0] 68 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:50 [#uses=1] 69 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 1 ) ; <i64>:51 [#uses=0] 70 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:52 [#uses=1] 71 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 1 ) ; <i64>:53 [#uses=0] 72 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:54 [#uses=0] 73 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:55 [#uses=0] 74 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1] 75 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 1 ) ; <i16>:57 [#uses=0] 76 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:58 [#uses=1] 77 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 1 ) ; <i16>:59 [#uses=0] 78 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:60 [#uses=1] 79 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 1 ) ; <i32>:61 [#uses=0] 80 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:62 [#uses=1] 81 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 1 ) ; <i32>:63 [#uses=0] 82 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1] 83 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 1 ) ; <i64>:65 [#uses=0] 84 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:66 [#uses=1] 85 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 1 ) ; <i64>:67 [#uses=0] 86 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:68 [#uses=1] 87 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 1 ) ; <i64>:69 [#uses=0] 88 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:70 [#uses=1] 89 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 1 ) ; <i64>:71 [#uses=0] 90 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:72 [#uses=0] 91 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:73 [#uses=0] 92 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:74 [#uses=1] 93 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 1 ) ; <i16>:75 [#uses=0] 94 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:76 [#uses=1] 95 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 1 ) ; <i16>:77 [#uses=0] 96 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:78 [#uses=1] 97 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 1 ) ; <i32>:79 [#uses=0] 98 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1] 99 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 1 ) ; <i32>:81 [#uses=0] 100 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:82 [#uses=1] 101 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 1 ) ; <i64>:83 [#uses=0] 102 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:84 [#uses=1] 103 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 1 ) ; <i64>:85 [#uses=0] 104 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:86 [#uses=1] 105 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 1 ) ; <i64>:87 [#uses=0] 106 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:88 [#uses=1] 107 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 1 ) ; <i64>:89 [#uses=0] 108 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:90 [#uses=0] 109 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:91 [#uses=0] 110 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:92 [#uses=1] 111 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 1 ) ; <i16>:93 [#uses=0] 112 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:94 [#uses=1] 113 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 1 ) ; <i16>:95 [#uses=0] 114 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:96 [#uses=1] 115 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 1 ) ; <i32>:97 [#uses=0] 116 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:98 [#uses=1] 117 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 1 ) ; <i32>:99 [#uses=0] 118 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1] 119 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 1 ) ; <i64>:101 [#uses=0] 120 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:102 [#uses=1] 121 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 1 ) ; <i64>:103 [#uses=0] 122 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:104 [#uses=1] 123 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 1 ) ; <i64>:105 [#uses=0] 124 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:106 [#uses=1] 125 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 1 ) ; <i64>:107 [#uses=0] 126 br label %return 127 128return: ; preds = %entry 129 ret void 130} 131 132declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind 133 134declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind 135 136declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind 137 138declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind 139 140declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind 141 142declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind 143 144declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind 145 146declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind 147 148declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind 149 150declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind 151 152declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind 153 154declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind 155 156declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind 157 158declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind 159 160declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind 161 162declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind 163 164declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind 165 166declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind 167 168declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind 169 170declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind 171 172declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind 173 174declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind 175 176declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind 177 178declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind 179 180define void @test_fetch_and_op() nounwind { 181entry: 182 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:0 [#uses=1] 183 store i8 %0, i8* @sc, align 1 184 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:1 [#uses=1] 185 store i8 %1, i8* @uc, align 1 186 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] 187 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; <i16>:3 [#uses=1] 188 store i16 %3, i16* @ss, align 2 189 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] 190 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; <i16>:5 [#uses=1] 191 store i16 %5, i16* @us, align 2 192 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] 193 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; <i32>:7 [#uses=1] 194 store i32 %7, i32* @si, align 4 195 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] 196 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; <i32>:9 [#uses=1] 197 store i32 %9, i32* @ui, align 4 198 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] 199 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; <i64>:11 [#uses=1] 200 store i64 %11, i64* @sl, align 8 201 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] 202 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; <i64>:13 [#uses=1] 203 store i64 %13, i64* @ul, align 8 204 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1] 205 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %14, i64 11 ) ; <i64>:15 [#uses=1] 206 store i64 %15, i64* @sll, align 8 207 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1] 208 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %16, i64 11 ) ; <i64>:17 [#uses=1] 209 store i64 %17, i64* @ull, align 8 210 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:18 [#uses=1] 211 store i8 %18, i8* @sc, align 1 212 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:19 [#uses=1] 213 store i8 %19, i8* @uc, align 1 214 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:20 [#uses=1] 215 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %20, i16 11 ) ; <i16>:21 [#uses=1] 216 store i16 %21, i16* @ss, align 2 217 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:22 [#uses=1] 218 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %22, i16 11 ) ; <i16>:23 [#uses=1] 219 store i16 %23, i16* @us, align 2 220 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1] 221 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; <i32>:25 [#uses=1] 222 store i32 %25, i32* @si, align 4 223 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:26 [#uses=1] 224 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; <i32>:27 [#uses=1] 225 store i32 %27, i32* @ui, align 4 226 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:28 [#uses=1] 227 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %28, i64 11 ) ; <i64>:29 [#uses=1] 228 store i64 %29, i64* @sl, align 8 229 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:30 [#uses=1] 230 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %30, i64 11 ) ; <i64>:31 [#uses=1] 231 store i64 %31, i64* @ul, align 8 232 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:32 [#uses=1] 233 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %32, i64 11 ) ; <i64>:33 [#uses=1] 234 store i64 %33, i64* @sll, align 8 235 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:34 [#uses=1] 236 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %34, i64 11 ) ; <i64>:35 [#uses=1] 237 store i64 %35, i64* @ull, align 8 238 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:36 [#uses=1] 239 store i8 %36, i8* @sc, align 1 240 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:37 [#uses=1] 241 store i8 %37, i8* @uc, align 1 242 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:38 [#uses=1] 243 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %38, i16 11 ) ; <i16>:39 [#uses=1] 244 store i16 %39, i16* @ss, align 2 245 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:40 [#uses=1] 246 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %40, i16 11 ) ; <i16>:41 [#uses=1] 247 store i16 %41, i16* @us, align 2 248 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:42 [#uses=1] 249 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %42, i32 11 ) ; <i32>:43 [#uses=1] 250 store i32 %43, i32* @si, align 4 251 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:44 [#uses=1] 252 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %44, i32 11 ) ; <i32>:45 [#uses=1] 253 store i32 %45, i32* @ui, align 4 254 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:46 [#uses=1] 255 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %46, i64 11 ) ; <i64>:47 [#uses=1] 256 store i64 %47, i64* @sl, align 8 257 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:48 [#uses=1] 258 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %48, i64 11 ) ; <i64>:49 [#uses=1] 259 store i64 %49, i64* @ul, align 8 260 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:50 [#uses=1] 261 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %50, i64 11 ) ; <i64>:51 [#uses=1] 262 store i64 %51, i64* @sll, align 8 263 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:52 [#uses=1] 264 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %52, i64 11 ) ; <i64>:53 [#uses=1] 265 store i64 %53, i64* @ull, align 8 266 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:54 [#uses=1] 267 store i8 %54, i8* @sc, align 1 268 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:55 [#uses=1] 269 store i8 %55, i8* @uc, align 1 270 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:56 [#uses=1] 271 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %56, i16 11 ) ; <i16>:57 [#uses=1] 272 store i16 %57, i16* @ss, align 2 273 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:58 [#uses=1] 274 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %58, i16 11 ) ; <i16>:59 [#uses=1] 275 store i16 %59, i16* @us, align 2 276 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:60 [#uses=1] 277 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %60, i32 11 ) ; <i32>:61 [#uses=1] 278 store i32 %61, i32* @si, align 4 279 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:62 [#uses=1] 280 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %62, i32 11 ) ; <i32>:63 [#uses=1] 281 store i32 %63, i32* @ui, align 4 282 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:64 [#uses=1] 283 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %64, i64 11 ) ; <i64>:65 [#uses=1] 284 store i64 %65, i64* @sl, align 8 285 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:66 [#uses=1] 286 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %66, i64 11 ) ; <i64>:67 [#uses=1] 287 store i64 %67, i64* @ul, align 8 288 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:68 [#uses=1] 289 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %68, i64 11 ) ; <i64>:69 [#uses=1] 290 store i64 %69, i64* @sll, align 8 291 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:70 [#uses=1] 292 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %70, i64 11 ) ; <i64>:71 [#uses=1] 293 store i64 %71, i64* @ull, align 8 294 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:72 [#uses=1] 295 store i8 %72, i8* @sc, align 1 296 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:73 [#uses=1] 297 store i8 %73, i8* @uc, align 1 298 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:74 [#uses=1] 299 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %74, i16 11 ) ; <i16>:75 [#uses=1] 300 store i16 %75, i16* @ss, align 2 301 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:76 [#uses=1] 302 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %76, i16 11 ) ; <i16>:77 [#uses=1] 303 store i16 %77, i16* @us, align 2 304 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:78 [#uses=1] 305 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %78, i32 11 ) ; <i32>:79 [#uses=1] 306 store i32 %79, i32* @si, align 4 307 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:80 [#uses=1] 308 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %80, i32 11 ) ; <i32>:81 [#uses=1] 309 store i32 %81, i32* @ui, align 4 310 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:82 [#uses=1] 311 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %82, i64 11 ) ; <i64>:83 [#uses=1] 312 store i64 %83, i64* @sl, align 8 313 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:84 [#uses=1] 314 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %84, i64 11 ) ; <i64>:85 [#uses=1] 315 store i64 %85, i64* @ul, align 8 316 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:86 [#uses=1] 317 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %86, i64 11 ) ; <i64>:87 [#uses=1] 318 store i64 %87, i64* @sll, align 8 319 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:88 [#uses=1] 320 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %88, i64 11 ) ; <i64>:89 [#uses=1] 321 store i64 %89, i64* @ull, align 8 322 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; <i8>:90 [#uses=1] 323 store i8 %90, i8* @sc, align 1 324 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; <i8>:91 [#uses=1] 325 store i8 %91, i8* @uc, align 1 326 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:92 [#uses=1] 327 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %92, i16 11 ) ; <i16>:93 [#uses=1] 328 store i16 %93, i16* @ss, align 2 329 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:94 [#uses=1] 330 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %94, i16 11 ) ; <i16>:95 [#uses=1] 331 store i16 %95, i16* @us, align 2 332 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:96 [#uses=1] 333 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %96, i32 11 ) ; <i32>:97 [#uses=1] 334 store i32 %97, i32* @si, align 4 335 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:98 [#uses=1] 336 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %98, i32 11 ) ; <i32>:99 [#uses=1] 337 store i32 %99, i32* @ui, align 4 338 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:100 [#uses=1] 339 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %100, i64 11 ) ; <i64>:101 [#uses=1] 340 store i64 %101, i64* @sl, align 8 341 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:102 [#uses=1] 342 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %102, i64 11 ) ; <i64>:103 [#uses=1] 343 store i64 %103, i64* @ul, align 8 344 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:104 [#uses=1] 345 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %104, i64 11 ) ; <i64>:105 [#uses=1] 346 store i64 %105, i64* @sll, align 8 347 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:106 [#uses=1] 348 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %106, i64 11 ) ; <i64>:107 [#uses=1] 349 store i64 %107, i64* @ull, align 8 350 br label %return 351 352return: ; preds = %entry 353 ret void 354} 355 356define void @test_op_and_fetch() nounwind { 357entry: 358 load i8* @uc, align 1 ; <i8>:0 [#uses=1] 359 zext i8 %0 to i32 ; <i32>:1 [#uses=1] 360 trunc i32 %1 to i8 ; <i8>:2 [#uses=2] 361 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %2 ) ; <i8>:3 [#uses=1] 362 add i8 %3, %2 ; <i8>:4 [#uses=1] 363 store i8 %4, i8* @sc, align 1 364 load i8* @uc, align 1 ; <i8>:5 [#uses=1] 365 zext i8 %5 to i32 ; <i32>:6 [#uses=1] 366 trunc i32 %6 to i8 ; <i8>:7 [#uses=2] 367 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %7 ) ; <i8>:8 [#uses=1] 368 add i8 %8, %7 ; <i8>:9 [#uses=1] 369 store i8 %9, i8* @uc, align 1 370 load i8* @uc, align 1 ; <i8>:10 [#uses=1] 371 zext i8 %10 to i32 ; <i32>:11 [#uses=1] 372 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:12 [#uses=1] 373 trunc i32 %11 to i16 ; <i16>:13 [#uses=2] 374 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %12, i16 %13 ) ; <i16>:14 [#uses=1] 375 add i16 %14, %13 ; <i16>:15 [#uses=1] 376 store i16 %15, i16* @ss, align 2 377 load i8* @uc, align 1 ; <i8>:16 [#uses=1] 378 zext i8 %16 to i32 ; <i32>:17 [#uses=1] 379 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:18 [#uses=1] 380 trunc i32 %17 to i16 ; <i16>:19 [#uses=2] 381 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %18, i16 %19 ) ; <i16>:20 [#uses=1] 382 add i16 %20, %19 ; <i16>:21 [#uses=1] 383 store i16 %21, i16* @us, align 2 384 load i8* @uc, align 1 ; <i8>:22 [#uses=1] 385 zext i8 %22 to i32 ; <i32>:23 [#uses=2] 386 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:24 [#uses=1] 387 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %24, i32 %23 ) ; <i32>:25 [#uses=1] 388 add i32 %25, %23 ; <i32>:26 [#uses=1] 389 store i32 %26, i32* @si, align 4 390 load i8* @uc, align 1 ; <i8>:27 [#uses=1] 391 zext i8 %27 to i32 ; <i32>:28 [#uses=2] 392 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:29 [#uses=1] 393 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %29, i32 %28 ) ; <i32>:30 [#uses=1] 394 add i32 %30, %28 ; <i32>:31 [#uses=1] 395 store i32 %31, i32* @ui, align 4 396 load i8* @uc, align 1 ; <i8>:32 [#uses=1] 397 zext i8 %32 to i64 ; <i64>:33 [#uses=2] 398 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:34 [#uses=1] 399 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %34, i64 %33 ) ; <i64>:35 [#uses=1] 400 add i64 %35, %33 ; <i64>:36 [#uses=1] 401 store i64 %36, i64* @sl, align 8 402 load i8* @uc, align 1 ; <i8>:37 [#uses=1] 403 zext i8 %37 to i64 ; <i64>:38 [#uses=2] 404 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:39 [#uses=1] 405 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %39, i64 %38 ) ; <i64>:40 [#uses=1] 406 add i64 %40, %38 ; <i64>:41 [#uses=1] 407 store i64 %41, i64* @ul, align 8 408 load i8* @uc, align 1 ; <i8>:42 [#uses=1] 409 zext i8 %42 to i64 ; <i64>:43 [#uses=2] 410 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:44 [#uses=1] 411 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %44, i64 %43 ) ; <i64>:45 [#uses=1] 412 add i64 %45, %43 ; <i64>:46 [#uses=1] 413 store i64 %46, i64* @sll, align 8 414 load i8* @uc, align 1 ; <i8>:47 [#uses=1] 415 zext i8 %47 to i64 ; <i64>:48 [#uses=2] 416 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:49 [#uses=1] 417 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %49, i64 %48 ) ; <i64>:50 [#uses=1] 418 add i64 %50, %48 ; <i64>:51 [#uses=1] 419 store i64 %51, i64* @ull, align 8 420 load i8* @uc, align 1 ; <i8>:52 [#uses=1] 421 zext i8 %52 to i32 ; <i32>:53 [#uses=1] 422 trunc i32 %53 to i8 ; <i8>:54 [#uses=2] 423 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %54 ) ; <i8>:55 [#uses=1] 424 sub i8 %55, %54 ; <i8>:56 [#uses=1] 425 store i8 %56, i8* @sc, align 1 426 load i8* @uc, align 1 ; <i8>:57 [#uses=1] 427 zext i8 %57 to i32 ; <i32>:58 [#uses=1] 428 trunc i32 %58 to i8 ; <i8>:59 [#uses=2] 429 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %59 ) ; <i8>:60 [#uses=1] 430 sub i8 %60, %59 ; <i8>:61 [#uses=1] 431 store i8 %61, i8* @uc, align 1 432 load i8* @uc, align 1 ; <i8>:62 [#uses=1] 433 zext i8 %62 to i32 ; <i32>:63 [#uses=1] 434 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:64 [#uses=1] 435 trunc i32 %63 to i16 ; <i16>:65 [#uses=2] 436 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %64, i16 %65 ) ; <i16>:66 [#uses=1] 437 sub i16 %66, %65 ; <i16>:67 [#uses=1] 438 store i16 %67, i16* @ss, align 2 439 load i8* @uc, align 1 ; <i8>:68 [#uses=1] 440 zext i8 %68 to i32 ; <i32>:69 [#uses=1] 441 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:70 [#uses=1] 442 trunc i32 %69 to i16 ; <i16>:71 [#uses=2] 443 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %70, i16 %71 ) ; <i16>:72 [#uses=1] 444 sub i16 %72, %71 ; <i16>:73 [#uses=1] 445 store i16 %73, i16* @us, align 2 446 load i8* @uc, align 1 ; <i8>:74 [#uses=1] 447 zext i8 %74 to i32 ; <i32>:75 [#uses=2] 448 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:76 [#uses=1] 449 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %76, i32 %75 ) ; <i32>:77 [#uses=1] 450 sub i32 %77, %75 ; <i32>:78 [#uses=1] 451 store i32 %78, i32* @si, align 4 452 load i8* @uc, align 1 ; <i8>:79 [#uses=1] 453 zext i8 %79 to i32 ; <i32>:80 [#uses=2] 454 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:81 [#uses=1] 455 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %81, i32 %80 ) ; <i32>:82 [#uses=1] 456 sub i32 %82, %80 ; <i32>:83 [#uses=1] 457 store i32 %83, i32* @ui, align 4 458 load i8* @uc, align 1 ; <i8>:84 [#uses=1] 459 zext i8 %84 to i64 ; <i64>:85 [#uses=2] 460 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:86 [#uses=1] 461 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %86, i64 %85 ) ; <i64>:87 [#uses=1] 462 sub i64 %87, %85 ; <i64>:88 [#uses=1] 463 store i64 %88, i64* @sl, align 8 464 load i8* @uc, align 1 ; <i8>:89 [#uses=1] 465 zext i8 %89 to i64 ; <i64>:90 [#uses=2] 466 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:91 [#uses=1] 467 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %91, i64 %90 ) ; <i64>:92 [#uses=1] 468 sub i64 %92, %90 ; <i64>:93 [#uses=1] 469 store i64 %93, i64* @ul, align 8 470 load i8* @uc, align 1 ; <i8>:94 [#uses=1] 471 zext i8 %94 to i64 ; <i64>:95 [#uses=2] 472 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:96 [#uses=1] 473 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %96, i64 %95 ) ; <i64>:97 [#uses=1] 474 sub i64 %97, %95 ; <i64>:98 [#uses=1] 475 store i64 %98, i64* @sll, align 8 476 load i8* @uc, align 1 ; <i8>:99 [#uses=1] 477 zext i8 %99 to i64 ; <i64>:100 [#uses=2] 478 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:101 [#uses=1] 479 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %101, i64 %100 ) ; <i64>:102 [#uses=1] 480 sub i64 %102, %100 ; <i64>:103 [#uses=1] 481 store i64 %103, i64* @ull, align 8 482 load i8* @uc, align 1 ; <i8>:104 [#uses=1] 483 zext i8 %104 to i32 ; <i32>:105 [#uses=1] 484 trunc i32 %105 to i8 ; <i8>:106 [#uses=2] 485 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %106 ) ; <i8>:107 [#uses=1] 486 or i8 %107, %106 ; <i8>:108 [#uses=1] 487 store i8 %108, i8* @sc, align 1 488 load i8* @uc, align 1 ; <i8>:109 [#uses=1] 489 zext i8 %109 to i32 ; <i32>:110 [#uses=1] 490 trunc i32 %110 to i8 ; <i8>:111 [#uses=2] 491 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %111 ) ; <i8>:112 [#uses=1] 492 or i8 %112, %111 ; <i8>:113 [#uses=1] 493 store i8 %113, i8* @uc, align 1 494 load i8* @uc, align 1 ; <i8>:114 [#uses=1] 495 zext i8 %114 to i32 ; <i32>:115 [#uses=1] 496 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:116 [#uses=1] 497 trunc i32 %115 to i16 ; <i16>:117 [#uses=2] 498 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %116, i16 %117 ) ; <i16>:118 [#uses=1] 499 or i16 %118, %117 ; <i16>:119 [#uses=1] 500 store i16 %119, i16* @ss, align 2 501 load i8* @uc, align 1 ; <i8>:120 [#uses=1] 502 zext i8 %120 to i32 ; <i32>:121 [#uses=1] 503 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:122 [#uses=1] 504 trunc i32 %121 to i16 ; <i16>:123 [#uses=2] 505 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %122, i16 %123 ) ; <i16>:124 [#uses=1] 506 or i16 %124, %123 ; <i16>:125 [#uses=1] 507 store i16 %125, i16* @us, align 2 508 load i8* @uc, align 1 ; <i8>:126 [#uses=1] 509 zext i8 %126 to i32 ; <i32>:127 [#uses=2] 510 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:128 [#uses=1] 511 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %128, i32 %127 ) ; <i32>:129 [#uses=1] 512 or i32 %129, %127 ; <i32>:130 [#uses=1] 513 store i32 %130, i32* @si, align 4 514 load i8* @uc, align 1 ; <i8>:131 [#uses=1] 515 zext i8 %131 to i32 ; <i32>:132 [#uses=2] 516 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:133 [#uses=1] 517 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %133, i32 %132 ) ; <i32>:134 [#uses=1] 518 or i32 %134, %132 ; <i32>:135 [#uses=1] 519 store i32 %135, i32* @ui, align 4 520 load i8* @uc, align 1 ; <i8>:136 [#uses=1] 521 zext i8 %136 to i64 ; <i64>:137 [#uses=2] 522 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:138 [#uses=1] 523 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %138, i64 %137 ) ; <i64>:139 [#uses=1] 524 or i64 %139, %137 ; <i64>:140 [#uses=1] 525 store i64 %140, i64* @sl, align 8 526 load i8* @uc, align 1 ; <i8>:141 [#uses=1] 527 zext i8 %141 to i64 ; <i64>:142 [#uses=2] 528 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:143 [#uses=1] 529 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %143, i64 %142 ) ; <i64>:144 [#uses=1] 530 or i64 %144, %142 ; <i64>:145 [#uses=1] 531 store i64 %145, i64* @ul, align 8 532 load i8* @uc, align 1 ; <i8>:146 [#uses=1] 533 zext i8 %146 to i64 ; <i64>:147 [#uses=2] 534 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:148 [#uses=1] 535 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %148, i64 %147 ) ; <i64>:149 [#uses=1] 536 or i64 %149, %147 ; <i64>:150 [#uses=1] 537 store i64 %150, i64* @sll, align 8 538 load i8* @uc, align 1 ; <i8>:151 [#uses=1] 539 zext i8 %151 to i64 ; <i64>:152 [#uses=2] 540 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:153 [#uses=1] 541 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %153, i64 %152 ) ; <i64>:154 [#uses=1] 542 or i64 %154, %152 ; <i64>:155 [#uses=1] 543 store i64 %155, i64* @ull, align 8 544 load i8* @uc, align 1 ; <i8>:156 [#uses=1] 545 zext i8 %156 to i32 ; <i32>:157 [#uses=1] 546 trunc i32 %157 to i8 ; <i8>:158 [#uses=2] 547 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %158 ) ; <i8>:159 [#uses=1] 548 xor i8 %159, %158 ; <i8>:160 [#uses=1] 549 store i8 %160, i8* @sc, align 1 550 load i8* @uc, align 1 ; <i8>:161 [#uses=1] 551 zext i8 %161 to i32 ; <i32>:162 [#uses=1] 552 trunc i32 %162 to i8 ; <i8>:163 [#uses=2] 553 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %163 ) ; <i8>:164 [#uses=1] 554 xor i8 %164, %163 ; <i8>:165 [#uses=1] 555 store i8 %165, i8* @uc, align 1 556 load i8* @uc, align 1 ; <i8>:166 [#uses=1] 557 zext i8 %166 to i32 ; <i32>:167 [#uses=1] 558 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:168 [#uses=1] 559 trunc i32 %167 to i16 ; <i16>:169 [#uses=2] 560 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %168, i16 %169 ) ; <i16>:170 [#uses=1] 561 xor i16 %170, %169 ; <i16>:171 [#uses=1] 562 store i16 %171, i16* @ss, align 2 563 load i8* @uc, align 1 ; <i8>:172 [#uses=1] 564 zext i8 %172 to i32 ; <i32>:173 [#uses=1] 565 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:174 [#uses=1] 566 trunc i32 %173 to i16 ; <i16>:175 [#uses=2] 567 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %174, i16 %175 ) ; <i16>:176 [#uses=1] 568 xor i16 %176, %175 ; <i16>:177 [#uses=1] 569 store i16 %177, i16* @us, align 2 570 load i8* @uc, align 1 ; <i8>:178 [#uses=1] 571 zext i8 %178 to i32 ; <i32>:179 [#uses=2] 572 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:180 [#uses=1] 573 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %180, i32 %179 ) ; <i32>:181 [#uses=1] 574 xor i32 %181, %179 ; <i32>:182 [#uses=1] 575 store i32 %182, i32* @si, align 4 576 load i8* @uc, align 1 ; <i8>:183 [#uses=1] 577 zext i8 %183 to i32 ; <i32>:184 [#uses=2] 578 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:185 [#uses=1] 579 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %185, i32 %184 ) ; <i32>:186 [#uses=1] 580 xor i32 %186, %184 ; <i32>:187 [#uses=1] 581 store i32 %187, i32* @ui, align 4 582 load i8* @uc, align 1 ; <i8>:188 [#uses=1] 583 zext i8 %188 to i64 ; <i64>:189 [#uses=2] 584 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:190 [#uses=1] 585 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %190, i64 %189 ) ; <i64>:191 [#uses=1] 586 xor i64 %191, %189 ; <i64>:192 [#uses=1] 587 store i64 %192, i64* @sl, align 8 588 load i8* @uc, align 1 ; <i8>:193 [#uses=1] 589 zext i8 %193 to i64 ; <i64>:194 [#uses=2] 590 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:195 [#uses=1] 591 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %195, i64 %194 ) ; <i64>:196 [#uses=1] 592 xor i64 %196, %194 ; <i64>:197 [#uses=1] 593 store i64 %197, i64* @ul, align 8 594 load i8* @uc, align 1 ; <i8>:198 [#uses=1] 595 zext i8 %198 to i64 ; <i64>:199 [#uses=2] 596 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:200 [#uses=1] 597 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %200, i64 %199 ) ; <i64>:201 [#uses=1] 598 xor i64 %201, %199 ; <i64>:202 [#uses=1] 599 store i64 %202, i64* @sll, align 8 600 load i8* @uc, align 1 ; <i8>:203 [#uses=1] 601 zext i8 %203 to i64 ; <i64>:204 [#uses=2] 602 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:205 [#uses=1] 603 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %205, i64 %204 ) ; <i64>:206 [#uses=1] 604 xor i64 %206, %204 ; <i64>:207 [#uses=1] 605 store i64 %207, i64* @ull, align 8 606 load i8* @uc, align 1 ; <i8>:208 [#uses=1] 607 zext i8 %208 to i32 ; <i32>:209 [#uses=1] 608 trunc i32 %209 to i8 ; <i8>:210 [#uses=2] 609 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %210 ) ; <i8>:211 [#uses=1] 610 and i8 %211, %210 ; <i8>:212 [#uses=1] 611 store i8 %212, i8* @sc, align 1 612 load i8* @uc, align 1 ; <i8>:213 [#uses=1] 613 zext i8 %213 to i32 ; <i32>:214 [#uses=1] 614 trunc i32 %214 to i8 ; <i8>:215 [#uses=2] 615 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %215 ) ; <i8>:216 [#uses=1] 616 and i8 %216, %215 ; <i8>:217 [#uses=1] 617 store i8 %217, i8* @uc, align 1 618 load i8* @uc, align 1 ; <i8>:218 [#uses=1] 619 zext i8 %218 to i32 ; <i32>:219 [#uses=1] 620 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:220 [#uses=1] 621 trunc i32 %219 to i16 ; <i16>:221 [#uses=2] 622 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %220, i16 %221 ) ; <i16>:222 [#uses=1] 623 and i16 %222, %221 ; <i16>:223 [#uses=1] 624 store i16 %223, i16* @ss, align 2 625 load i8* @uc, align 1 ; <i8>:224 [#uses=1] 626 zext i8 %224 to i32 ; <i32>:225 [#uses=1] 627 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:226 [#uses=1] 628 trunc i32 %225 to i16 ; <i16>:227 [#uses=2] 629 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %226, i16 %227 ) ; <i16>:228 [#uses=1] 630 and i16 %228, %227 ; <i16>:229 [#uses=1] 631 store i16 %229, i16* @us, align 2 632 load i8* @uc, align 1 ; <i8>:230 [#uses=1] 633 zext i8 %230 to i32 ; <i32>:231 [#uses=2] 634 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:232 [#uses=1] 635 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %232, i32 %231 ) ; <i32>:233 [#uses=1] 636 and i32 %233, %231 ; <i32>:234 [#uses=1] 637 store i32 %234, i32* @si, align 4 638 load i8* @uc, align 1 ; <i8>:235 [#uses=1] 639 zext i8 %235 to i32 ; <i32>:236 [#uses=2] 640 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:237 [#uses=1] 641 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %237, i32 %236 ) ; <i32>:238 [#uses=1] 642 and i32 %238, %236 ; <i32>:239 [#uses=1] 643 store i32 %239, i32* @ui, align 4 644 load i8* @uc, align 1 ; <i8>:240 [#uses=1] 645 zext i8 %240 to i64 ; <i64>:241 [#uses=2] 646 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:242 [#uses=1] 647 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %242, i64 %241 ) ; <i64>:243 [#uses=1] 648 and i64 %243, %241 ; <i64>:244 [#uses=1] 649 store i64 %244, i64* @sl, align 8 650 load i8* @uc, align 1 ; <i8>:245 [#uses=1] 651 zext i8 %245 to i64 ; <i64>:246 [#uses=2] 652 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:247 [#uses=1] 653 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %247, i64 %246 ) ; <i64>:248 [#uses=1] 654 and i64 %248, %246 ; <i64>:249 [#uses=1] 655 store i64 %249, i64* @ul, align 8 656 load i8* @uc, align 1 ; <i8>:250 [#uses=1] 657 zext i8 %250 to i64 ; <i64>:251 [#uses=2] 658 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:252 [#uses=1] 659 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %252, i64 %251 ) ; <i64>:253 [#uses=1] 660 and i64 %253, %251 ; <i64>:254 [#uses=1] 661 store i64 %254, i64* @sll, align 8 662 load i8* @uc, align 1 ; <i8>:255 [#uses=1] 663 zext i8 %255 to i64 ; <i64>:256 [#uses=2] 664 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:257 [#uses=1] 665 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %257, i64 %256 ) ; <i64>:258 [#uses=1] 666 and i64 %258, %256 ; <i64>:259 [#uses=1] 667 store i64 %259, i64* @ull, align 8 668 load i8* @uc, align 1 ; <i8>:260 [#uses=1] 669 zext i8 %260 to i32 ; <i32>:261 [#uses=1] 670 trunc i32 %261 to i8 ; <i8>:262 [#uses=2] 671 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %262 ) ; <i8>:263 [#uses=1] 672 xor i8 %263, -1 ; <i8>:264 [#uses=1] 673 and i8 %264, %262 ; <i8>:265 [#uses=1] 674 store i8 %265, i8* @sc, align 1 675 load i8* @uc, align 1 ; <i8>:266 [#uses=1] 676 zext i8 %266 to i32 ; <i32>:267 [#uses=1] 677 trunc i32 %267 to i8 ; <i8>:268 [#uses=2] 678 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %268 ) ; <i8>:269 [#uses=1] 679 xor i8 %269, -1 ; <i8>:270 [#uses=1] 680 and i8 %270, %268 ; <i8>:271 [#uses=1] 681 store i8 %271, i8* @uc, align 1 682 load i8* @uc, align 1 ; <i8>:272 [#uses=1] 683 zext i8 %272 to i32 ; <i32>:273 [#uses=1] 684 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:274 [#uses=1] 685 trunc i32 %273 to i16 ; <i16>:275 [#uses=2] 686 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %274, i16 %275 ) ; <i16>:276 [#uses=1] 687 xor i16 %276, -1 ; <i16>:277 [#uses=1] 688 and i16 %277, %275 ; <i16>:278 [#uses=1] 689 store i16 %278, i16* @ss, align 2 690 load i8* @uc, align 1 ; <i8>:279 [#uses=1] 691 zext i8 %279 to i32 ; <i32>:280 [#uses=1] 692 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:281 [#uses=1] 693 trunc i32 %280 to i16 ; <i16>:282 [#uses=2] 694 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %281, i16 %282 ) ; <i16>:283 [#uses=1] 695 xor i16 %283, -1 ; <i16>:284 [#uses=1] 696 and i16 %284, %282 ; <i16>:285 [#uses=1] 697 store i16 %285, i16* @us, align 2 698 load i8* @uc, align 1 ; <i8>:286 [#uses=1] 699 zext i8 %286 to i32 ; <i32>:287 [#uses=2] 700 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:288 [#uses=1] 701 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %288, i32 %287 ) ; <i32>:289 [#uses=1] 702 xor i32 %289, -1 ; <i32>:290 [#uses=1] 703 and i32 %290, %287 ; <i32>:291 [#uses=1] 704 store i32 %291, i32* @si, align 4 705 load i8* @uc, align 1 ; <i8>:292 [#uses=1] 706 zext i8 %292 to i32 ; <i32>:293 [#uses=2] 707 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:294 [#uses=1] 708 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %294, i32 %293 ) ; <i32>:295 [#uses=1] 709 xor i32 %295, -1 ; <i32>:296 [#uses=1] 710 and i32 %296, %293 ; <i32>:297 [#uses=1] 711 store i32 %297, i32* @ui, align 4 712 load i8* @uc, align 1 ; <i8>:298 [#uses=1] 713 zext i8 %298 to i64 ; <i64>:299 [#uses=2] 714 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:300 [#uses=1] 715 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %300, i64 %299 ) ; <i64>:301 [#uses=1] 716 xor i64 %301, -1 ; <i64>:302 [#uses=1] 717 and i64 %302, %299 ; <i64>:303 [#uses=1] 718 store i64 %303, i64* @sl, align 8 719 load i8* @uc, align 1 ; <i8>:304 [#uses=1] 720 zext i8 %304 to i64 ; <i64>:305 [#uses=2] 721 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:306 [#uses=1] 722 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %306, i64 %305 ) ; <i64>:307 [#uses=1] 723 xor i64 %307, -1 ; <i64>:308 [#uses=1] 724 and i64 %308, %305 ; <i64>:309 [#uses=1] 725 store i64 %309, i64* @ul, align 8 726 load i8* @uc, align 1 ; <i8>:310 [#uses=1] 727 zext i8 %310 to i64 ; <i64>:311 [#uses=2] 728 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:312 [#uses=1] 729 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %312, i64 %311 ) ; <i64>:313 [#uses=1] 730 xor i64 %313, -1 ; <i64>:314 [#uses=1] 731 and i64 %314, %311 ; <i64>:315 [#uses=1] 732 store i64 %315, i64* @sll, align 8 733 load i8* @uc, align 1 ; <i8>:316 [#uses=1] 734 zext i8 %316 to i64 ; <i64>:317 [#uses=2] 735 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:318 [#uses=1] 736 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %318, i64 %317 ) ; <i64>:319 [#uses=1] 737 xor i64 %319, -1 ; <i64>:320 [#uses=1] 738 and i64 %320, %317 ; <i64>:321 [#uses=1] 739 store i64 %321, i64* @ull, align 8 740 br label %return 741 742return: ; preds = %entry 743 ret void 744} 745 746define void @test_compare_and_swap() nounwind { 747entry: 748 load i8* @sc, align 1 ; <i8>:0 [#uses=1] 749 zext i8 %0 to i32 ; <i32>:1 [#uses=1] 750 load i8* @uc, align 1 ; <i8>:2 [#uses=1] 751 zext i8 %2 to i32 ; <i32>:3 [#uses=1] 752 trunc i32 %3 to i8 ; <i8>:4 [#uses=1] 753 trunc i32 %1 to i8 ; <i8>:5 [#uses=1] 754 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %4, i8 %5 ) ; <i8>:6 [#uses=1] 755 store i8 %6, i8* @sc, align 1 756 load i8* @sc, align 1 ; <i8>:7 [#uses=1] 757 zext i8 %7 to i32 ; <i32>:8 [#uses=1] 758 load i8* @uc, align 1 ; <i8>:9 [#uses=1] 759 zext i8 %9 to i32 ; <i32>:10 [#uses=1] 760 trunc i32 %10 to i8 ; <i8>:11 [#uses=1] 761 trunc i32 %8 to i8 ; <i8>:12 [#uses=1] 762 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %11, i8 %12 ) ; <i8>:13 [#uses=1] 763 store i8 %13, i8* @uc, align 1 764 load i8* @sc, align 1 ; <i8>:14 [#uses=1] 765 sext i8 %14 to i16 ; <i16>:15 [#uses=1] 766 zext i16 %15 to i32 ; <i32>:16 [#uses=1] 767 load i8* @uc, align 1 ; <i8>:17 [#uses=1] 768 zext i8 %17 to i32 ; <i32>:18 [#uses=1] 769 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:19 [#uses=1] 770 trunc i32 %18 to i16 ; <i16>:20 [#uses=1] 771 trunc i32 %16 to i16 ; <i16>:21 [#uses=1] 772 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %19, i16 %20, i16 %21 ) ; <i16>:22 [#uses=1] 773 store i16 %22, i16* @ss, align 2 774 load i8* @sc, align 1 ; <i8>:23 [#uses=1] 775 sext i8 %23 to i16 ; <i16>:24 [#uses=1] 776 zext i16 %24 to i32 ; <i32>:25 [#uses=1] 777 load i8* @uc, align 1 ; <i8>:26 [#uses=1] 778 zext i8 %26 to i32 ; <i32>:27 [#uses=1] 779 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:28 [#uses=1] 780 trunc i32 %27 to i16 ; <i16>:29 [#uses=1] 781 trunc i32 %25 to i16 ; <i16>:30 [#uses=1] 782 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %28, i16 %29, i16 %30 ) ; <i16>:31 [#uses=1] 783 store i16 %31, i16* @us, align 2 784 load i8* @sc, align 1 ; <i8>:32 [#uses=1] 785 sext i8 %32 to i32 ; <i32>:33 [#uses=1] 786 load i8* @uc, align 1 ; <i8>:34 [#uses=1] 787 zext i8 %34 to i32 ; <i32>:35 [#uses=1] 788 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:36 [#uses=1] 789 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %36, i32 %35, i32 %33 ) ; <i32>:37 [#uses=1] 790 store i32 %37, i32* @si, align 4 791 load i8* @sc, align 1 ; <i8>:38 [#uses=1] 792 sext i8 %38 to i32 ; <i32>:39 [#uses=1] 793 load i8* @uc, align 1 ; <i8>:40 [#uses=1] 794 zext i8 %40 to i32 ; <i32>:41 [#uses=1] 795 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:42 [#uses=1] 796 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %42, i32 %41, i32 %39 ) ; <i32>:43 [#uses=1] 797 store i32 %43, i32* @ui, align 4 798 load i8* @sc, align 1 ; <i8>:44 [#uses=1] 799 sext i8 %44 to i64 ; <i64>:45 [#uses=1] 800 load i8* @uc, align 1 ; <i8>:46 [#uses=1] 801 zext i8 %46 to i64 ; <i64>:47 [#uses=1] 802 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:48 [#uses=1] 803 call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %48, i64 %47, i64 %45 ) ; <i64>:49 [#uses=1] 804 store i64 %49, i64* @sl, align 8 805 load i8* @sc, align 1 ; <i8>:50 [#uses=1] 806 sext i8 %50 to i64 ; <i64>:51 [#uses=1] 807 load i8* @uc, align 1 ; <i8>:52 [#uses=1] 808 zext i8 %52 to i64 ; <i64>:53 [#uses=1] 809 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:54 [#uses=1] 810 call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %54, i64 %53, i64 %51 ) ; <i64>:55 [#uses=1] 811 store i64 %55, i64* @ul, align 8 812 load i8* @sc, align 1 ; <i8>:56 [#uses=1] 813 sext i8 %56 to i64 ; <i64>:57 [#uses=1] 814 load i8* @uc, align 1 ; <i8>:58 [#uses=1] 815 zext i8 %58 to i64 ; <i64>:59 [#uses=1] 816 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:60 [#uses=1] 817 call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %60, i64 %59, i64 %57 ) ; <i64>:61 [#uses=1] 818 store i64 %61, i64* @sll, align 8 819 load i8* @sc, align 1 ; <i8>:62 [#uses=1] 820 sext i8 %62 to i64 ; <i64>:63 [#uses=1] 821 load i8* @uc, align 1 ; <i8>:64 [#uses=1] 822 zext i8 %64 to i64 ; <i64>:65 [#uses=1] 823 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:66 [#uses=1] 824 call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %66, i64 %65, i64 %63 ) ; <i64>:67 [#uses=1] 825 store i64 %67, i64* @ull, align 8 826 load i8* @sc, align 1 ; <i8>:68 [#uses=1] 827 zext i8 %68 to i32 ; <i32>:69 [#uses=1] 828 load i8* @uc, align 1 ; <i8>:70 [#uses=1] 829 zext i8 %70 to i32 ; <i32>:71 [#uses=1] 830 trunc i32 %71 to i8 ; <i8>:72 [#uses=2] 831 trunc i32 %69 to i8 ; <i8>:73 [#uses=1] 832 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %72, i8 %73 ) ; <i8>:74 [#uses=1] 833 icmp eq i8 %74, %72 ; <i1>:75 [#uses=1] 834 zext i1 %75 to i8 ; <i8>:76 [#uses=1] 835 zext i8 %76 to i32 ; <i32>:77 [#uses=1] 836 store i32 %77, i32* @ui, align 4 837 load i8* @sc, align 1 ; <i8>:78 [#uses=1] 838 zext i8 %78 to i32 ; <i32>:79 [#uses=1] 839 load i8* @uc, align 1 ; <i8>:80 [#uses=1] 840 zext i8 %80 to i32 ; <i32>:81 [#uses=1] 841 trunc i32 %81 to i8 ; <i8>:82 [#uses=2] 842 trunc i32 %79 to i8 ; <i8>:83 [#uses=1] 843 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %82, i8 %83 ) ; <i8>:84 [#uses=1] 844 icmp eq i8 %84, %82 ; <i1>:85 [#uses=1] 845 zext i1 %85 to i8 ; <i8>:86 [#uses=1] 846 zext i8 %86 to i32 ; <i32>:87 [#uses=1] 847 store i32 %87, i32* @ui, align 4 848 load i8* @sc, align 1 ; <i8>:88 [#uses=1] 849 sext i8 %88 to i16 ; <i16>:89 [#uses=1] 850 zext i16 %89 to i32 ; <i32>:90 [#uses=1] 851 load i8* @uc, align 1 ; <i8>:91 [#uses=1] 852 zext i8 %91 to i32 ; <i32>:92 [#uses=1] 853 trunc i32 %92 to i8 ; <i8>:93 [#uses=2] 854 trunc i32 %90 to i8 ; <i8>:94 [#uses=1] 855 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 ) ; <i8>:95 [#uses=1] 856 icmp eq i8 %95, %93 ; <i1>:96 [#uses=1] 857 zext i1 %96 to i8 ; <i8>:97 [#uses=1] 858 zext i8 %97 to i32 ; <i32>:98 [#uses=1] 859 store i32 %98, i32* @ui, align 4 860 load i8* @sc, align 1 ; <i8>:99 [#uses=1] 861 sext i8 %99 to i16 ; <i16>:100 [#uses=1] 862 zext i16 %100 to i32 ; <i32>:101 [#uses=1] 863 load i8* @uc, align 1 ; <i8>:102 [#uses=1] 864 zext i8 %102 to i32 ; <i32>:103 [#uses=1] 865 trunc i32 %103 to i8 ; <i8>:104 [#uses=2] 866 trunc i32 %101 to i8 ; <i8>:105 [#uses=1] 867 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 ) ; <i8>:106 [#uses=1] 868 icmp eq i8 %106, %104 ; <i1>:107 [#uses=1] 869 zext i1 %107 to i8 ; <i8>:108 [#uses=1] 870 zext i8 %108 to i32 ; <i32>:109 [#uses=1] 871 store i32 %109, i32* @ui, align 4 872 load i8* @sc, align 1 ; <i8>:110 [#uses=1] 873 sext i8 %110 to i32 ; <i32>:111 [#uses=1] 874 load i8* @uc, align 1 ; <i8>:112 [#uses=1] 875 zext i8 %112 to i32 ; <i32>:113 [#uses=1] 876 trunc i32 %113 to i8 ; <i8>:114 [#uses=2] 877 trunc i32 %111 to i8 ; <i8>:115 [#uses=1] 878 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 ) ; <i8>:116 [#uses=1] 879 icmp eq i8 %116, %114 ; <i1>:117 [#uses=1] 880 zext i1 %117 to i8 ; <i8>:118 [#uses=1] 881 zext i8 %118 to i32 ; <i32>:119 [#uses=1] 882 store i32 %119, i32* @ui, align 4 883 load i8* @sc, align 1 ; <i8>:120 [#uses=1] 884 sext i8 %120 to i32 ; <i32>:121 [#uses=1] 885 load i8* @uc, align 1 ; <i8>:122 [#uses=1] 886 zext i8 %122 to i32 ; <i32>:123 [#uses=1] 887 trunc i32 %123 to i8 ; <i8>:124 [#uses=2] 888 trunc i32 %121 to i8 ; <i8>:125 [#uses=1] 889 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 ) ; <i8>:126 [#uses=1] 890 icmp eq i8 %126, %124 ; <i1>:127 [#uses=1] 891 zext i1 %127 to i8 ; <i8>:128 [#uses=1] 892 zext i8 %128 to i32 ; <i32>:129 [#uses=1] 893 store i32 %129, i32* @ui, align 4 894 load i8* @sc, align 1 ; <i8>:130 [#uses=1] 895 sext i8 %130 to i64 ; <i64>:131 [#uses=1] 896 load i8* @uc, align 1 ; <i8>:132 [#uses=1] 897 zext i8 %132 to i64 ; <i64>:133 [#uses=1] 898 trunc i64 %133 to i8 ; <i8>:134 [#uses=2] 899 trunc i64 %131 to i8 ; <i8>:135 [#uses=1] 900 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 ) ; <i8>:136 [#uses=1] 901 icmp eq i8 %136, %134 ; <i1>:137 [#uses=1] 902 zext i1 %137 to i8 ; <i8>:138 [#uses=1] 903 zext i8 %138 to i32 ; <i32>:139 [#uses=1] 904 store i32 %139, i32* @ui, align 4 905 load i8* @sc, align 1 ; <i8>:140 [#uses=1] 906 sext i8 %140 to i64 ; <i64>:141 [#uses=1] 907 load i8* @uc, align 1 ; <i8>:142 [#uses=1] 908 zext i8 %142 to i64 ; <i64>:143 [#uses=1] 909 trunc i64 %143 to i8 ; <i8>:144 [#uses=2] 910 trunc i64 %141 to i8 ; <i8>:145 [#uses=1] 911 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 ) ; <i8>:146 [#uses=1] 912 icmp eq i8 %146, %144 ; <i1>:147 [#uses=1] 913 zext i1 %147 to i8 ; <i8>:148 [#uses=1] 914 zext i8 %148 to i32 ; <i32>:149 [#uses=1] 915 store i32 %149, i32* @ui, align 4 916 load i8* @sc, align 1 ; <i8>:150 [#uses=1] 917 sext i8 %150 to i64 ; <i64>:151 [#uses=1] 918 load i8* @uc, align 1 ; <i8>:152 [#uses=1] 919 zext i8 %152 to i64 ; <i64>:153 [#uses=1] 920 trunc i64 %153 to i8 ; <i8>:154 [#uses=2] 921 trunc i64 %151 to i8 ; <i8>:155 [#uses=1] 922 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 ) ; <i8>:156 [#uses=1] 923 icmp eq i8 %156, %154 ; <i1>:157 [#uses=1] 924 zext i1 %157 to i8 ; <i8>:158 [#uses=1] 925 zext i8 %158 to i32 ; <i32>:159 [#uses=1] 926 store i32 %159, i32* @ui, align 4 927 load i8* @sc, align 1 ; <i8>:160 [#uses=1] 928 sext i8 %160 to i64 ; <i64>:161 [#uses=1] 929 load i8* @uc, align 1 ; <i8>:162 [#uses=1] 930 zext i8 %162 to i64 ; <i64>:163 [#uses=1] 931 trunc i64 %163 to i8 ; <i8>:164 [#uses=2] 932 trunc i64 %161 to i8 ; <i8>:165 [#uses=1] 933 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 ) ; <i8>:166 [#uses=1] 934 icmp eq i8 %166, %164 ; <i1>:167 [#uses=1] 935 zext i1 %167 to i8 ; <i8>:168 [#uses=1] 936 zext i8 %168 to i32 ; <i32>:169 [#uses=1] 937 store i32 %169, i32* @ui, align 4 938 br label %return 939 940return: ; preds = %entry 941 ret void 942} 943 944declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind 945 946declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind 947 948declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind 949 950declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind 951 952define void @test_lock() nounwind { 953entry: 954 call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; <i8>:0 [#uses=1] 955 store i8 %0, i8* @sc, align 1 956 call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; <i8>:1 [#uses=1] 957 store i8 %1, i8* @uc, align 1 958 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:2 [#uses=1] 959 call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; <i16>:3 [#uses=1] 960 store i16 %3, i16* @ss, align 2 961 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:4 [#uses=1] 962 call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; <i16>:5 [#uses=1] 963 store i16 %5, i16* @us, align 2 964 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:6 [#uses=1] 965 call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; <i32>:7 [#uses=1] 966 store i32 %7, i32* @si, align 4 967 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:8 [#uses=1] 968 call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; <i32>:9 [#uses=1] 969 store i32 %9, i32* @ui, align 4 970 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:10 [#uses=1] 971 call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; <i64>:11 [#uses=1] 972 store i64 %11, i64* @sl, align 8 973 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:12 [#uses=1] 974 call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; <i64>:13 [#uses=1] 975 store i64 %13, i64* @ul, align 8 976 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:14 [#uses=1] 977 call i64 @llvm.atomic.swap.i64.p0i64( i64* %14, i64 1 ) ; <i64>:15 [#uses=1] 978 store i64 %15, i64* @sll, align 8 979 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:16 [#uses=1] 980 call i64 @llvm.atomic.swap.i64.p0i64( i64* %16, i64 1 ) ; <i64>:17 [#uses=1] 981 store i64 %17, i64* @ull, align 8 982 call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false ) 983 volatile store i8 0, i8* @sc, align 1 984 volatile store i8 0, i8* @uc, align 1 985 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*>:18 [#uses=1] 986 volatile store i16 0, i16* %18, align 2 987 bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*>:19 [#uses=1] 988 volatile store i16 0, i16* %19, align 2 989 bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*>:20 [#uses=1] 990 volatile store i32 0, i32* %20, align 4 991 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*>:21 [#uses=1] 992 volatile store i32 0, i32* %21, align 4 993 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; <i64*>:22 [#uses=1] 994 volatile store i64 0, i64* %22, align 8 995 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; <i64*>:23 [#uses=1] 996 volatile store i64 0, i64* %23, align 8 997 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*>:24 [#uses=1] 998 volatile store i64 0, i64* %24, align 8 999 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*>:25 [#uses=1] 1000 volatile store i64 0, i64* %25, align 8 1001 br label %return 1002 1003return: ; preds = %entry 1004 ret void 1005} 1006 1007declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind 1008 1009declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind 1010 1011declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind 1012 1013declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind 1014 1015declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind 1016