1; RUN: llc < %s -march=x86 > %t 2;; This version includes 64-bit version of binary operators (in 32-bit mode). 3;; Swap, cmp-and-swap not supported yet in this mode. 4; ModuleID = 'Atomics.c' 5target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" 6target triple = "i386-apple-darwin8" 7@sc = common global i8 0 ; <i8*> [#uses=52] 8@uc = common global i8 0 ; <i8*> [#uses=112] 9@ss = common global i16 0 ; <i16*> [#uses=15] 10@us = common global i16 0 ; <i16*> [#uses=15] 11@si = common global i32 0 ; <i32*> [#uses=15] 12@ui = common global i32 0 ; <i32*> [#uses=23] 13@sl = common global i32 0 ; <i32*> [#uses=15] 14@ul = common global i32 0 ; <i32*> [#uses=15] 15@sll = common global i64 0, align 8 ; <i64*> [#uses=13] 16@ull = common global i64 0, align 8 ; <i64*> [#uses=13] 17 18define void @test_op_ignore() nounwind { 19entry: 20 %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] 21 %1 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] 22 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 23 %3 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %2, i16 1) ; <i16> [#uses=0] 24 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 25 %5 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %4, i16 1) ; <i16> [#uses=0] 26 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 27 %7 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %6, i32 1) ; <i32> [#uses=0] 28 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 29 %9 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %8, i32 1) ; <i32> [#uses=0] 30 %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 31 %11 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %10, i32 1) ; <i32> [#uses=0] 32 %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 33 %13 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %12, i32 1) ; <i32> [#uses=0] 34 %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 35 %15 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %14, i64 1) ; <i64> [#uses=0] 36 %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 37 %17 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %16, i64 1) ; <i64> [#uses=0] 38 %18 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] 39 %19 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] 40 %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 41 %21 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %20, i16 1) ; <i16> [#uses=0] 42 %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 43 %23 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %22, i16 1) ; <i16> [#uses=0] 44 %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 45 %25 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %24, i32 1) ; <i32> [#uses=0] 46 %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 47 %27 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %26, i32 1) ; <i32> [#uses=0] 48 %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 49 %29 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %28, i32 1) ; <i32> [#uses=0] 50 %30 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 51 %31 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %30, i32 1) ; <i32> [#uses=0] 52 %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 53 %33 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %32, i64 1) ; <i64> [#uses=0] 54 %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 55 %35 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %34, i64 1) ; <i64> [#uses=0] 56 %36 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] 57 %37 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] 58 %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 59 %39 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %38, i16 1) ; <i16> [#uses=0] 60 %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 61 %41 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %40, i16 1) ; <i16> [#uses=0] 62 %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 63 %43 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %42, i32 1) ; <i32> [#uses=0] 64 %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 65 %45 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %44, i32 1) ; <i32> [#uses=0] 66 %46 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 67 %47 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %46, i32 1) ; <i32> [#uses=0] 68 %48 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 69 %49 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %48, i32 1) ; <i32> [#uses=0] 70 %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 71 %51 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %50, i64 1) ; <i64> [#uses=0] 72 %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 73 %53 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %52, i64 1) ; <i64> [#uses=0] 74 %54 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] 75 %55 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] 76 %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 77 %57 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %56, i16 1) ; <i16> [#uses=0] 78 %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 79 %59 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %58, i16 1) ; <i16> [#uses=0] 80 %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 81 %61 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %60, i32 1) ; <i32> [#uses=0] 82 %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 83 %63 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %62, i32 1) ; <i32> [#uses=0] 84 %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 85 %65 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %64, i32 1) ; <i32> [#uses=0] 86 %66 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 87 %67 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %66, i32 1) ; <i32> [#uses=0] 88 %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 89 %69 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %68, i64 1) ; <i64> [#uses=0] 90 %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 91 %71 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %70, i64 1) ; <i64> [#uses=0] 92 %72 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] 93 %73 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] 94 %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 95 %75 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %74, i16 1) ; <i16> [#uses=0] 96 %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 97 %77 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %76, i16 1) ; <i16> [#uses=0] 98 %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 99 %79 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %78, i32 1) ; <i32> [#uses=0] 100 %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 101 %81 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %80, i32 1) ; <i32> [#uses=0] 102 %82 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 103 %83 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %82, i32 1) ; <i32> [#uses=0] 104 %84 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 105 %85 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %84, i32 1) ; <i32> [#uses=0] 106 %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 107 %87 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %86, i64 1) ; <i64> [#uses=0] 108 %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 109 %89 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %88, i64 1) ; <i64> [#uses=0] 110 %90 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=0] 111 %91 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=0] 112 %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 113 %93 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %92, i16 1) ; <i16> [#uses=0] 114 %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 115 %95 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %94, i16 1) ; <i16> [#uses=0] 116 %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 117 %97 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %96, i32 1) ; <i32> [#uses=0] 118 %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 119 %99 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %98, i32 1) ; <i32> [#uses=0] 120 %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 121 %101 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %100, i32 1) ; <i32> [#uses=0] 122 %102 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 123 %103 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %102, i32 1) ; <i32> [#uses=0] 124 %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 125 %105 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %104, i64 1) ; <i64> [#uses=0] 126 %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 127 %107 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %106, i64 1) ; <i64> [#uses=0] 128 br label %return 129 130return: ; preds = %entry 131 ret void 132} 133 134declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind 135 136declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind 137 138declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind 139 140declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind 141 142declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind 143 144declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind 145 146declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind 147 148declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind 149 150declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind 151 152declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind 153 154declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind 155 156declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind 157 158declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind 159 160declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind 161 162declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind 163 164declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind 165 166declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind 167 168declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind 169 170declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind 171 172declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind 173 174declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind 175 176declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind 177 178declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind 179 180declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind 181 182define void @test_fetch_and_op() nounwind { 183entry: 184 %0 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] 185 store i8 %0, i8* @sc, align 1 186 %1 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] 187 store i8 %1, i8* @uc, align 1 188 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 189 %3 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %2, i16 11) ; <i16> [#uses=1] 190 store i16 %3, i16* @ss, align 2 191 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 192 %5 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %4, i16 11) ; <i16> [#uses=1] 193 store i16 %5, i16* @us, align 2 194 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 195 %7 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %6, i32 11) ; <i32> [#uses=1] 196 store i32 %7, i32* @si, align 4 197 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 198 %9 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %8, i32 11) ; <i32> [#uses=1] 199 store i32 %9, i32* @ui, align 4 200 %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 201 %11 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %10, i32 11) ; <i32> [#uses=1] 202 store i32 %11, i32* @sl, align 4 203 %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 204 %13 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %12, i32 11) ; <i32> [#uses=1] 205 store i32 %13, i32* @ul, align 4 206 %14 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 207 %15 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %14, i64 11) ; <i64> [#uses=1] 208 store i64 %15, i64* @sll, align 8 209 %16 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 210 %17 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %16, i64 11) ; <i64> [#uses=1] 211 store i64 %17, i64* @ull, align 8 212 %18 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] 213 store i8 %18, i8* @sc, align 1 214 %19 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] 215 store i8 %19, i8* @uc, align 1 216 %20 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 217 %21 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %20, i16 11) ; <i16> [#uses=1] 218 store i16 %21, i16* @ss, align 2 219 %22 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 220 %23 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %22, i16 11) ; <i16> [#uses=1] 221 store i16 %23, i16* @us, align 2 222 %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 223 %25 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %24, i32 11) ; <i32> [#uses=1] 224 store i32 %25, i32* @si, align 4 225 %26 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 226 %27 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %26, i32 11) ; <i32> [#uses=1] 227 store i32 %27, i32* @ui, align 4 228 %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 229 %29 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %28, i32 11) ; <i32> [#uses=1] 230 store i32 %29, i32* @sl, align 4 231 %30 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 232 %31 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %30, i32 11) ; <i32> [#uses=1] 233 store i32 %31, i32* @ul, align 4 234 %32 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 235 %33 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %32, i64 11) ; <i64> [#uses=1] 236 store i64 %33, i64* @sll, align 8 237 %34 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 238 %35 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %34, i64 11) ; <i64> [#uses=1] 239 store i64 %35, i64* @ull, align 8 240 %36 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] 241 store i8 %36, i8* @sc, align 1 242 %37 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] 243 store i8 %37, i8* @uc, align 1 244 %38 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 245 %39 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %38, i16 11) ; <i16> [#uses=1] 246 store i16 %39, i16* @ss, align 2 247 %40 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 248 %41 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %40, i16 11) ; <i16> [#uses=1] 249 store i16 %41, i16* @us, align 2 250 %42 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 251 %43 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %42, i32 11) ; <i32> [#uses=1] 252 store i32 %43, i32* @si, align 4 253 %44 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 254 %45 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %44, i32 11) ; <i32> [#uses=1] 255 store i32 %45, i32* @ui, align 4 256 %46 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 257 %47 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %46, i32 11) ; <i32> [#uses=1] 258 store i32 %47, i32* @sl, align 4 259 %48 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 260 %49 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %48, i32 11) ; <i32> [#uses=1] 261 store i32 %49, i32* @ul, align 4 262 %50 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 263 %51 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %50, i64 11) ; <i64> [#uses=1] 264 store i64 %51, i64* @sll, align 8 265 %52 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 266 %53 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %52, i64 11) ; <i64> [#uses=1] 267 store i64 %53, i64* @ull, align 8 268 %54 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] 269 store i8 %54, i8* @sc, align 1 270 %55 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] 271 store i8 %55, i8* @uc, align 1 272 %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 273 %57 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %56, i16 11) ; <i16> [#uses=1] 274 store i16 %57, i16* @ss, align 2 275 %58 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 276 %59 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %58, i16 11) ; <i16> [#uses=1] 277 store i16 %59, i16* @us, align 2 278 %60 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 279 %61 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %60, i32 11) ; <i32> [#uses=1] 280 store i32 %61, i32* @si, align 4 281 %62 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 282 %63 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %62, i32 11) ; <i32> [#uses=1] 283 store i32 %63, i32* @ui, align 4 284 %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 285 %65 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %64, i32 11) ; <i32> [#uses=1] 286 store i32 %65, i32* @sl, align 4 287 %66 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 288 %67 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %66, i32 11) ; <i32> [#uses=1] 289 store i32 %67, i32* @ul, align 4 290 %68 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 291 %69 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %68, i64 11) ; <i64> [#uses=1] 292 store i64 %69, i64* @sll, align 8 293 %70 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 294 %71 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %70, i64 11) ; <i64> [#uses=1] 295 store i64 %71, i64* @ull, align 8 296 %72 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] 297 store i8 %72, i8* @sc, align 1 298 %73 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] 299 store i8 %73, i8* @uc, align 1 300 %74 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 301 %75 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %74, i16 11) ; <i16> [#uses=1] 302 store i16 %75, i16* @ss, align 2 303 %76 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 304 %77 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %76, i16 11) ; <i16> [#uses=1] 305 store i16 %77, i16* @us, align 2 306 %78 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 307 %79 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %78, i32 11) ; <i32> [#uses=1] 308 store i32 %79, i32* @si, align 4 309 %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 310 %81 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %80, i32 11) ; <i32> [#uses=1] 311 store i32 %81, i32* @ui, align 4 312 %82 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 313 %83 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %82, i32 11) ; <i32> [#uses=1] 314 store i32 %83, i32* @sl, align 4 315 %84 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 316 %85 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %84, i32 11) ; <i32> [#uses=1] 317 store i32 %85, i32* @ul, align 4 318 %86 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 319 %87 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %86, i64 11) ; <i64> [#uses=1] 320 store i64 %87, i64* @sll, align 8 321 %88 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 322 %89 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %88, i64 11) ; <i64> [#uses=1] 323 store i64 %89, i64* @ull, align 8 324 %90 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 11) ; <i8> [#uses=1] 325 store i8 %90, i8* @sc, align 1 326 %91 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 11) ; <i8> [#uses=1] 327 store i8 %91, i8* @uc, align 1 328 %92 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 329 %93 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %92, i16 11) ; <i16> [#uses=1] 330 store i16 %93, i16* @ss, align 2 331 %94 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 332 %95 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %94, i16 11) ; <i16> [#uses=1] 333 store i16 %95, i16* @us, align 2 334 %96 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 335 %97 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %96, i32 11) ; <i32> [#uses=1] 336 store i32 %97, i32* @si, align 4 337 %98 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 338 %99 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %98, i32 11) ; <i32> [#uses=1] 339 store i32 %99, i32* @ui, align 4 340 %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 341 %101 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %100, i32 11) ; <i32> [#uses=1] 342 store i32 %101, i32* @sl, align 4 343 %102 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 344 %103 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %102, i32 11) ; <i32> [#uses=1] 345 store i32 %103, i32* @ul, align 4 346 %104 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 347 %105 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %104, i64 11) ; <i64> [#uses=1] 348 store i64 %105, i64* @sll, align 8 349 %106 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 350 %107 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %106, i64 11) ; <i64> [#uses=1] 351 store i64 %107, i64* @ull, align 8 352 br label %return 353 354return: ; preds = %entry 355 ret void 356} 357 358define void @test_op_and_fetch() nounwind { 359entry: 360 %0 = load i8* @uc, align 1 ; <i8> [#uses=1] 361 %1 = zext i8 %0 to i32 ; <i32> [#uses=1] 362 %2 = trunc i32 %1 to i8 ; <i8> [#uses=2] 363 %3 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @sc, i8 %2) ; <i8> [#uses=1] 364 %4 = add i8 %3, %2 ; <i8> [#uses=1] 365 store i8 %4, i8* @sc, align 1 366 %5 = load i8* @uc, align 1 ; <i8> [#uses=1] 367 %6 = zext i8 %5 to i32 ; <i32> [#uses=1] 368 %7 = trunc i32 %6 to i8 ; <i8> [#uses=2] 369 %8 = call i8 @llvm.atomic.load.add.i8.p0i8(i8* @uc, i8 %7) ; <i8> [#uses=1] 370 %9 = add i8 %8, %7 ; <i8> [#uses=1] 371 store i8 %9, i8* @uc, align 1 372 %10 = load i8* @uc, align 1 ; <i8> [#uses=1] 373 %11 = zext i8 %10 to i32 ; <i32> [#uses=1] 374 %12 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 375 %13 = trunc i32 %11 to i16 ; <i16> [#uses=2] 376 %14 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %12, i16 %13) ; <i16> [#uses=1] 377 %15 = add i16 %14, %13 ; <i16> [#uses=1] 378 store i16 %15, i16* @ss, align 2 379 %16 = load i8* @uc, align 1 ; <i8> [#uses=1] 380 %17 = zext i8 %16 to i32 ; <i32> [#uses=1] 381 %18 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 382 %19 = trunc i32 %17 to i16 ; <i16> [#uses=2] 383 %20 = call i16 @llvm.atomic.load.add.i16.p0i16(i16* %18, i16 %19) ; <i16> [#uses=1] 384 %21 = add i16 %20, %19 ; <i16> [#uses=1] 385 store i16 %21, i16* @us, align 2 386 %22 = load i8* @uc, align 1 ; <i8> [#uses=1] 387 %23 = zext i8 %22 to i32 ; <i32> [#uses=2] 388 %24 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 389 %25 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %24, i32 %23) ; <i32> [#uses=1] 390 %26 = add i32 %25, %23 ; <i32> [#uses=1] 391 store i32 %26, i32* @si, align 4 392 %27 = load i8* @uc, align 1 ; <i8> [#uses=1] 393 %28 = zext i8 %27 to i32 ; <i32> [#uses=2] 394 %29 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 395 %30 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %29, i32 %28) ; <i32> [#uses=1] 396 %31 = add i32 %30, %28 ; <i32> [#uses=1] 397 store i32 %31, i32* @ui, align 4 398 %32 = load i8* @uc, align 1 ; <i8> [#uses=1] 399 %33 = zext i8 %32 to i32 ; <i32> [#uses=2] 400 %34 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 401 %35 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %34, i32 %33) ; <i32> [#uses=1] 402 %36 = add i32 %35, %33 ; <i32> [#uses=1] 403 store i32 %36, i32* @sl, align 4 404 %37 = load i8* @uc, align 1 ; <i8> [#uses=1] 405 %38 = zext i8 %37 to i32 ; <i32> [#uses=2] 406 %39 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 407 %40 = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %39, i32 %38) ; <i32> [#uses=1] 408 %41 = add i32 %40, %38 ; <i32> [#uses=1] 409 store i32 %41, i32* @ul, align 4 410 %42 = load i8* @uc, align 1 ; <i8> [#uses=1] 411 %43 = zext i8 %42 to i64 ; <i64> [#uses=2] 412 %44 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 413 %45 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %44, i64 %43) ; <i64> [#uses=1] 414 %46 = add i64 %45, %43 ; <i64> [#uses=1] 415 store i64 %46, i64* @sll, align 8 416 %47 = load i8* @uc, align 1 ; <i8> [#uses=1] 417 %48 = zext i8 %47 to i64 ; <i64> [#uses=2] 418 %49 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 419 %50 = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %49, i64 %48) ; <i64> [#uses=1] 420 %51 = add i64 %50, %48 ; <i64> [#uses=1] 421 store i64 %51, i64* @ull, align 8 422 %52 = load i8* @uc, align 1 ; <i8> [#uses=1] 423 %53 = zext i8 %52 to i32 ; <i32> [#uses=1] 424 %54 = trunc i32 %53 to i8 ; <i8> [#uses=2] 425 %55 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @sc, i8 %54) ; <i8> [#uses=1] 426 %56 = sub i8 %55, %54 ; <i8> [#uses=1] 427 store i8 %56, i8* @sc, align 1 428 %57 = load i8* @uc, align 1 ; <i8> [#uses=1] 429 %58 = zext i8 %57 to i32 ; <i32> [#uses=1] 430 %59 = trunc i32 %58 to i8 ; <i8> [#uses=2] 431 %60 = call i8 @llvm.atomic.load.sub.i8.p0i8(i8* @uc, i8 %59) ; <i8> [#uses=1] 432 %61 = sub i8 %60, %59 ; <i8> [#uses=1] 433 store i8 %61, i8* @uc, align 1 434 %62 = load i8* @uc, align 1 ; <i8> [#uses=1] 435 %63 = zext i8 %62 to i32 ; <i32> [#uses=1] 436 %64 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 437 %65 = trunc i32 %63 to i16 ; <i16> [#uses=2] 438 %66 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %64, i16 %65) ; <i16> [#uses=1] 439 %67 = sub i16 %66, %65 ; <i16> [#uses=1] 440 store i16 %67, i16* @ss, align 2 441 %68 = load i8* @uc, align 1 ; <i8> [#uses=1] 442 %69 = zext i8 %68 to i32 ; <i32> [#uses=1] 443 %70 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 444 %71 = trunc i32 %69 to i16 ; <i16> [#uses=2] 445 %72 = call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %70, i16 %71) ; <i16> [#uses=1] 446 %73 = sub i16 %72, %71 ; <i16> [#uses=1] 447 store i16 %73, i16* @us, align 2 448 %74 = load i8* @uc, align 1 ; <i8> [#uses=1] 449 %75 = zext i8 %74 to i32 ; <i32> [#uses=2] 450 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 451 %77 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %76, i32 %75) ; <i32> [#uses=1] 452 %78 = sub i32 %77, %75 ; <i32> [#uses=1] 453 store i32 %78, i32* @si, align 4 454 %79 = load i8* @uc, align 1 ; <i8> [#uses=1] 455 %80 = zext i8 %79 to i32 ; <i32> [#uses=2] 456 %81 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 457 %82 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %81, i32 %80) ; <i32> [#uses=1] 458 %83 = sub i32 %82, %80 ; <i32> [#uses=1] 459 store i32 %83, i32* @ui, align 4 460 %84 = load i8* @uc, align 1 ; <i8> [#uses=1] 461 %85 = zext i8 %84 to i32 ; <i32> [#uses=2] 462 %86 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 463 %87 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %86, i32 %85) ; <i32> [#uses=1] 464 %88 = sub i32 %87, %85 ; <i32> [#uses=1] 465 store i32 %88, i32* @sl, align 4 466 %89 = load i8* @uc, align 1 ; <i8> [#uses=1] 467 %90 = zext i8 %89 to i32 ; <i32> [#uses=2] 468 %91 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 469 %92 = call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %91, i32 %90) ; <i32> [#uses=1] 470 %93 = sub i32 %92, %90 ; <i32> [#uses=1] 471 store i32 %93, i32* @ul, align 4 472 %94 = load i8* @uc, align 1 ; <i8> [#uses=1] 473 %95 = zext i8 %94 to i64 ; <i64> [#uses=2] 474 %96 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 475 %97 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %96, i64 %95) ; <i64> [#uses=1] 476 %98 = sub i64 %97, %95 ; <i64> [#uses=1] 477 store i64 %98, i64* @sll, align 8 478 %99 = load i8* @uc, align 1 ; <i8> [#uses=1] 479 %100 = zext i8 %99 to i64 ; <i64> [#uses=2] 480 %101 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 481 %102 = call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %101, i64 %100) ; <i64> [#uses=1] 482 %103 = sub i64 %102, %100 ; <i64> [#uses=1] 483 store i64 %103, i64* @ull, align 8 484 %104 = load i8* @uc, align 1 ; <i8> [#uses=1] 485 %105 = zext i8 %104 to i32 ; <i32> [#uses=1] 486 %106 = trunc i32 %105 to i8 ; <i8> [#uses=2] 487 %107 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @sc, i8 %106) ; <i8> [#uses=1] 488 %108 = or i8 %107, %106 ; <i8> [#uses=1] 489 store i8 %108, i8* @sc, align 1 490 %109 = load i8* @uc, align 1 ; <i8> [#uses=1] 491 %110 = zext i8 %109 to i32 ; <i32> [#uses=1] 492 %111 = trunc i32 %110 to i8 ; <i8> [#uses=2] 493 %112 = call i8 @llvm.atomic.load.or.i8.p0i8(i8* @uc, i8 %111) ; <i8> [#uses=1] 494 %113 = or i8 %112, %111 ; <i8> [#uses=1] 495 store i8 %113, i8* @uc, align 1 496 %114 = load i8* @uc, align 1 ; <i8> [#uses=1] 497 %115 = zext i8 %114 to i32 ; <i32> [#uses=1] 498 %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 499 %117 = trunc i32 %115 to i16 ; <i16> [#uses=2] 500 %118 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %116, i16 %117) ; <i16> [#uses=1] 501 %119 = or i16 %118, %117 ; <i16> [#uses=1] 502 store i16 %119, i16* @ss, align 2 503 %120 = load i8* @uc, align 1 ; <i8> [#uses=1] 504 %121 = zext i8 %120 to i32 ; <i32> [#uses=1] 505 %122 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 506 %123 = trunc i32 %121 to i16 ; <i16> [#uses=2] 507 %124 = call i16 @llvm.atomic.load.or.i16.p0i16(i16* %122, i16 %123) ; <i16> [#uses=1] 508 %125 = or i16 %124, %123 ; <i16> [#uses=1] 509 store i16 %125, i16* @us, align 2 510 %126 = load i8* @uc, align 1 ; <i8> [#uses=1] 511 %127 = zext i8 %126 to i32 ; <i32> [#uses=2] 512 %128 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 513 %129 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %128, i32 %127) ; <i32> [#uses=1] 514 %130 = or i32 %129, %127 ; <i32> [#uses=1] 515 store i32 %130, i32* @si, align 4 516 %131 = load i8* @uc, align 1 ; <i8> [#uses=1] 517 %132 = zext i8 %131 to i32 ; <i32> [#uses=2] 518 %133 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 519 %134 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %133, i32 %132) ; <i32> [#uses=1] 520 %135 = or i32 %134, %132 ; <i32> [#uses=1] 521 store i32 %135, i32* @ui, align 4 522 %136 = load i8* @uc, align 1 ; <i8> [#uses=1] 523 %137 = zext i8 %136 to i32 ; <i32> [#uses=2] 524 %138 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 525 %139 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %138, i32 %137) ; <i32> [#uses=1] 526 %140 = or i32 %139, %137 ; <i32> [#uses=1] 527 store i32 %140, i32* @sl, align 4 528 %141 = load i8* @uc, align 1 ; <i8> [#uses=1] 529 %142 = zext i8 %141 to i32 ; <i32> [#uses=2] 530 %143 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 531 %144 = call i32 @llvm.atomic.load.or.i32.p0i32(i32* %143, i32 %142) ; <i32> [#uses=1] 532 %145 = or i32 %144, %142 ; <i32> [#uses=1] 533 store i32 %145, i32* @ul, align 4 534 %146 = load i8* @uc, align 1 ; <i8> [#uses=1] 535 %147 = zext i8 %146 to i64 ; <i64> [#uses=2] 536 %148 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 537 %149 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %148, i64 %147) ; <i64> [#uses=1] 538 %150 = or i64 %149, %147 ; <i64> [#uses=1] 539 store i64 %150, i64* @sll, align 8 540 %151 = load i8* @uc, align 1 ; <i8> [#uses=1] 541 %152 = zext i8 %151 to i64 ; <i64> [#uses=2] 542 %153 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 543 %154 = call i64 @llvm.atomic.load.or.i64.p0i64(i64* %153, i64 %152) ; <i64> [#uses=1] 544 %155 = or i64 %154, %152 ; <i64> [#uses=1] 545 store i64 %155, i64* @ull, align 8 546 %156 = load i8* @uc, align 1 ; <i8> [#uses=1] 547 %157 = zext i8 %156 to i32 ; <i32> [#uses=1] 548 %158 = trunc i32 %157 to i8 ; <i8> [#uses=2] 549 %159 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @sc, i8 %158) ; <i8> [#uses=1] 550 %160 = xor i8 %159, %158 ; <i8> [#uses=1] 551 store i8 %160, i8* @sc, align 1 552 %161 = load i8* @uc, align 1 ; <i8> [#uses=1] 553 %162 = zext i8 %161 to i32 ; <i32> [#uses=1] 554 %163 = trunc i32 %162 to i8 ; <i8> [#uses=2] 555 %164 = call i8 @llvm.atomic.load.xor.i8.p0i8(i8* @uc, i8 %163) ; <i8> [#uses=1] 556 %165 = xor i8 %164, %163 ; <i8> [#uses=1] 557 store i8 %165, i8* @uc, align 1 558 %166 = load i8* @uc, align 1 ; <i8> [#uses=1] 559 %167 = zext i8 %166 to i32 ; <i32> [#uses=1] 560 %168 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 561 %169 = trunc i32 %167 to i16 ; <i16> [#uses=2] 562 %170 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %168, i16 %169) ; <i16> [#uses=1] 563 %171 = xor i16 %170, %169 ; <i16> [#uses=1] 564 store i16 %171, i16* @ss, align 2 565 %172 = load i8* @uc, align 1 ; <i8> [#uses=1] 566 %173 = zext i8 %172 to i32 ; <i32> [#uses=1] 567 %174 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 568 %175 = trunc i32 %173 to i16 ; <i16> [#uses=2] 569 %176 = call i16 @llvm.atomic.load.xor.i16.p0i16(i16* %174, i16 %175) ; <i16> [#uses=1] 570 %177 = xor i16 %176, %175 ; <i16> [#uses=1] 571 store i16 %177, i16* @us, align 2 572 %178 = load i8* @uc, align 1 ; <i8> [#uses=1] 573 %179 = zext i8 %178 to i32 ; <i32> [#uses=2] 574 %180 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 575 %181 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %180, i32 %179) ; <i32> [#uses=1] 576 %182 = xor i32 %181, %179 ; <i32> [#uses=1] 577 store i32 %182, i32* @si, align 4 578 %183 = load i8* @uc, align 1 ; <i8> [#uses=1] 579 %184 = zext i8 %183 to i32 ; <i32> [#uses=2] 580 %185 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 581 %186 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %185, i32 %184) ; <i32> [#uses=1] 582 %187 = xor i32 %186, %184 ; <i32> [#uses=1] 583 store i32 %187, i32* @ui, align 4 584 %188 = load i8* @uc, align 1 ; <i8> [#uses=1] 585 %189 = zext i8 %188 to i32 ; <i32> [#uses=2] 586 %190 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 587 %191 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %190, i32 %189) ; <i32> [#uses=1] 588 %192 = xor i32 %191, %189 ; <i32> [#uses=1] 589 store i32 %192, i32* @sl, align 4 590 %193 = load i8* @uc, align 1 ; <i8> [#uses=1] 591 %194 = zext i8 %193 to i32 ; <i32> [#uses=2] 592 %195 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 593 %196 = call i32 @llvm.atomic.load.xor.i32.p0i32(i32* %195, i32 %194) ; <i32> [#uses=1] 594 %197 = xor i32 %196, %194 ; <i32> [#uses=1] 595 store i32 %197, i32* @ul, align 4 596 %198 = load i8* @uc, align 1 ; <i8> [#uses=1] 597 %199 = zext i8 %198 to i64 ; <i64> [#uses=2] 598 %200 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 599 %201 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %200, i64 %199) ; <i64> [#uses=1] 600 %202 = xor i64 %201, %199 ; <i64> [#uses=1] 601 store i64 %202, i64* @sll, align 8 602 %203 = load i8* @uc, align 1 ; <i8> [#uses=1] 603 %204 = zext i8 %203 to i64 ; <i64> [#uses=2] 604 %205 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 605 %206 = call i64 @llvm.atomic.load.xor.i64.p0i64(i64* %205, i64 %204) ; <i64> [#uses=1] 606 %207 = xor i64 %206, %204 ; <i64> [#uses=1] 607 store i64 %207, i64* @ull, align 8 608 %208 = load i8* @uc, align 1 ; <i8> [#uses=1] 609 %209 = zext i8 %208 to i32 ; <i32> [#uses=1] 610 %210 = trunc i32 %209 to i8 ; <i8> [#uses=2] 611 %211 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @sc, i8 %210) ; <i8> [#uses=1] 612 %212 = and i8 %211, %210 ; <i8> [#uses=1] 613 store i8 %212, i8* @sc, align 1 614 %213 = load i8* @uc, align 1 ; <i8> [#uses=1] 615 %214 = zext i8 %213 to i32 ; <i32> [#uses=1] 616 %215 = trunc i32 %214 to i8 ; <i8> [#uses=2] 617 %216 = call i8 @llvm.atomic.load.and.i8.p0i8(i8* @uc, i8 %215) ; <i8> [#uses=1] 618 %217 = and i8 %216, %215 ; <i8> [#uses=1] 619 store i8 %217, i8* @uc, align 1 620 %218 = load i8* @uc, align 1 ; <i8> [#uses=1] 621 %219 = zext i8 %218 to i32 ; <i32> [#uses=1] 622 %220 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 623 %221 = trunc i32 %219 to i16 ; <i16> [#uses=2] 624 %222 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %220, i16 %221) ; <i16> [#uses=1] 625 %223 = and i16 %222, %221 ; <i16> [#uses=1] 626 store i16 %223, i16* @ss, align 2 627 %224 = load i8* @uc, align 1 ; <i8> [#uses=1] 628 %225 = zext i8 %224 to i32 ; <i32> [#uses=1] 629 %226 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 630 %227 = trunc i32 %225 to i16 ; <i16> [#uses=2] 631 %228 = call i16 @llvm.atomic.load.and.i16.p0i16(i16* %226, i16 %227) ; <i16> [#uses=1] 632 %229 = and i16 %228, %227 ; <i16> [#uses=1] 633 store i16 %229, i16* @us, align 2 634 %230 = load i8* @uc, align 1 ; <i8> [#uses=1] 635 %231 = zext i8 %230 to i32 ; <i32> [#uses=2] 636 %232 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 637 %233 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %232, i32 %231) ; <i32> [#uses=1] 638 %234 = and i32 %233, %231 ; <i32> [#uses=1] 639 store i32 %234, i32* @si, align 4 640 %235 = load i8* @uc, align 1 ; <i8> [#uses=1] 641 %236 = zext i8 %235 to i32 ; <i32> [#uses=2] 642 %237 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 643 %238 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %237, i32 %236) ; <i32> [#uses=1] 644 %239 = and i32 %238, %236 ; <i32> [#uses=1] 645 store i32 %239, i32* @ui, align 4 646 %240 = load i8* @uc, align 1 ; <i8> [#uses=1] 647 %241 = zext i8 %240 to i32 ; <i32> [#uses=2] 648 %242 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 649 %243 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %242, i32 %241) ; <i32> [#uses=1] 650 %244 = and i32 %243, %241 ; <i32> [#uses=1] 651 store i32 %244, i32* @sl, align 4 652 %245 = load i8* @uc, align 1 ; <i8> [#uses=1] 653 %246 = zext i8 %245 to i32 ; <i32> [#uses=2] 654 %247 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 655 %248 = call i32 @llvm.atomic.load.and.i32.p0i32(i32* %247, i32 %246) ; <i32> [#uses=1] 656 %249 = and i32 %248, %246 ; <i32> [#uses=1] 657 store i32 %249, i32* @ul, align 4 658 %250 = load i8* @uc, align 1 ; <i8> [#uses=1] 659 %251 = zext i8 %250 to i64 ; <i64> [#uses=2] 660 %252 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 661 %253 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %252, i64 %251) ; <i64> [#uses=1] 662 %254 = and i64 %253, %251 ; <i64> [#uses=1] 663 store i64 %254, i64* @sll, align 8 664 %255 = load i8* @uc, align 1 ; <i8> [#uses=1] 665 %256 = zext i8 %255 to i64 ; <i64> [#uses=2] 666 %257 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 667 %258 = call i64 @llvm.atomic.load.and.i64.p0i64(i64* %257, i64 %256) ; <i64> [#uses=1] 668 %259 = and i64 %258, %256 ; <i64> [#uses=1] 669 store i64 %259, i64* @ull, align 8 670 %260 = load i8* @uc, align 1 ; <i8> [#uses=1] 671 %261 = zext i8 %260 to i32 ; <i32> [#uses=1] 672 %262 = trunc i32 %261 to i8 ; <i8> [#uses=2] 673 %263 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @sc, i8 %262) ; <i8> [#uses=1] 674 %264 = xor i8 %263, -1 ; <i8> [#uses=1] 675 %265 = and i8 %264, %262 ; <i8> [#uses=1] 676 store i8 %265, i8* @sc, align 1 677 %266 = load i8* @uc, align 1 ; <i8> [#uses=1] 678 %267 = zext i8 %266 to i32 ; <i32> [#uses=1] 679 %268 = trunc i32 %267 to i8 ; <i8> [#uses=2] 680 %269 = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* @uc, i8 %268) ; <i8> [#uses=1] 681 %270 = xor i8 %269, -1 ; <i8> [#uses=1] 682 %271 = and i8 %270, %268 ; <i8> [#uses=1] 683 store i8 %271, i8* @uc, align 1 684 %272 = load i8* @uc, align 1 ; <i8> [#uses=1] 685 %273 = zext i8 %272 to i32 ; <i32> [#uses=1] 686 %274 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 687 %275 = trunc i32 %273 to i16 ; <i16> [#uses=2] 688 %276 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %274, i16 %275) ; <i16> [#uses=1] 689 %277 = xor i16 %276, -1 ; <i16> [#uses=1] 690 %278 = and i16 %277, %275 ; <i16> [#uses=1] 691 store i16 %278, i16* @ss, align 2 692 %279 = load i8* @uc, align 1 ; <i8> [#uses=1] 693 %280 = zext i8 %279 to i32 ; <i32> [#uses=1] 694 %281 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 695 %282 = trunc i32 %280 to i16 ; <i16> [#uses=2] 696 %283 = call i16 @llvm.atomic.load.nand.i16.p0i16(i16* %281, i16 %282) ; <i16> [#uses=1] 697 %284 = xor i16 %283, -1 ; <i16> [#uses=1] 698 %285 = and i16 %284, %282 ; <i16> [#uses=1] 699 store i16 %285, i16* @us, align 2 700 %286 = load i8* @uc, align 1 ; <i8> [#uses=1] 701 %287 = zext i8 %286 to i32 ; <i32> [#uses=2] 702 %288 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 703 %289 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %288, i32 %287) ; <i32> [#uses=1] 704 %290 = xor i32 %289, -1 ; <i32> [#uses=1] 705 %291 = and i32 %290, %287 ; <i32> [#uses=1] 706 store i32 %291, i32* @si, align 4 707 %292 = load i8* @uc, align 1 ; <i8> [#uses=1] 708 %293 = zext i8 %292 to i32 ; <i32> [#uses=2] 709 %294 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 710 %295 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %294, i32 %293) ; <i32> [#uses=1] 711 %296 = xor i32 %295, -1 ; <i32> [#uses=1] 712 %297 = and i32 %296, %293 ; <i32> [#uses=1] 713 store i32 %297, i32* @ui, align 4 714 %298 = load i8* @uc, align 1 ; <i8> [#uses=1] 715 %299 = zext i8 %298 to i32 ; <i32> [#uses=2] 716 %300 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 717 %301 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %300, i32 %299) ; <i32> [#uses=1] 718 %302 = xor i32 %301, -1 ; <i32> [#uses=1] 719 %303 = and i32 %302, %299 ; <i32> [#uses=1] 720 store i32 %303, i32* @sl, align 4 721 %304 = load i8* @uc, align 1 ; <i8> [#uses=1] 722 %305 = zext i8 %304 to i32 ; <i32> [#uses=2] 723 %306 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 724 %307 = call i32 @llvm.atomic.load.nand.i32.p0i32(i32* %306, i32 %305) ; <i32> [#uses=1] 725 %308 = xor i32 %307, -1 ; <i32> [#uses=1] 726 %309 = and i32 %308, %305 ; <i32> [#uses=1] 727 store i32 %309, i32* @ul, align 4 728 %310 = load i8* @uc, align 1 ; <i8> [#uses=1] 729 %311 = zext i8 %310 to i64 ; <i64> [#uses=2] 730 %312 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 731 %313 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %312, i64 %311) ; <i64> [#uses=1] 732 %314 = xor i64 %313, -1 ; <i64> [#uses=1] 733 %315 = and i64 %314, %311 ; <i64> [#uses=1] 734 store i64 %315, i64* @sll, align 8 735 %316 = load i8* @uc, align 1 ; <i8> [#uses=1] 736 %317 = zext i8 %316 to i64 ; <i64> [#uses=2] 737 %318 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 738 %319 = call i64 @llvm.atomic.load.nand.i64.p0i64(i64* %318, i64 %317) ; <i64> [#uses=1] 739 %320 = xor i64 %319, -1 ; <i64> [#uses=1] 740 %321 = and i64 %320, %317 ; <i64> [#uses=1] 741 store i64 %321, i64* @ull, align 8 742 br label %return 743 744return: ; preds = %entry 745 ret void 746} 747 748define void @test_compare_and_swap() nounwind { 749entry: 750 %0 = load i8* @sc, align 1 ; <i8> [#uses=1] 751 %1 = zext i8 %0 to i32 ; <i32> [#uses=1] 752 %2 = load i8* @uc, align 1 ; <i8> [#uses=1] 753 %3 = zext i8 %2 to i32 ; <i32> [#uses=1] 754 %4 = trunc i32 %3 to i8 ; <i8> [#uses=1] 755 %5 = trunc i32 %1 to i8 ; <i8> [#uses=1] 756 %6 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @sc, i8 %4, i8 %5) ; <i8> [#uses=1] 757 store i8 %6, i8* @sc, align 1 758 %7 = load i8* @sc, align 1 ; <i8> [#uses=1] 759 %8 = zext i8 %7 to i32 ; <i32> [#uses=1] 760 %9 = load i8* @uc, align 1 ; <i8> [#uses=1] 761 %10 = zext i8 %9 to i32 ; <i32> [#uses=1] 762 %11 = trunc i32 %10 to i8 ; <i8> [#uses=1] 763 %12 = trunc i32 %8 to i8 ; <i8> [#uses=1] 764 %13 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @uc, i8 %11, i8 %12) ; <i8> [#uses=1] 765 store i8 %13, i8* @uc, align 1 766 %14 = load i8* @sc, align 1 ; <i8> [#uses=1] 767 %15 = sext i8 %14 to i16 ; <i16> [#uses=1] 768 %16 = zext i16 %15 to i32 ; <i32> [#uses=1] 769 %17 = load i8* @uc, align 1 ; <i8> [#uses=1] 770 %18 = zext i8 %17 to i32 ; <i32> [#uses=1] 771 %19 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 772 %20 = trunc i32 %18 to i16 ; <i16> [#uses=1] 773 %21 = trunc i32 %16 to i16 ; <i16> [#uses=1] 774 %22 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %19, i16 %20, i16 %21) ; <i16> [#uses=1] 775 store i16 %22, i16* @ss, align 2 776 %23 = load i8* @sc, align 1 ; <i8> [#uses=1] 777 %24 = sext i8 %23 to i16 ; <i16> [#uses=1] 778 %25 = zext i16 %24 to i32 ; <i32> [#uses=1] 779 %26 = load i8* @uc, align 1 ; <i8> [#uses=1] 780 %27 = zext i8 %26 to i32 ; <i32> [#uses=1] 781 %28 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 782 %29 = trunc i32 %27 to i16 ; <i16> [#uses=1] 783 %30 = trunc i32 %25 to i16 ; <i16> [#uses=1] 784 %31 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %28, i16 %29, i16 %30) ; <i16> [#uses=1] 785 store i16 %31, i16* @us, align 2 786 %32 = load i8* @sc, align 1 ; <i8> [#uses=1] 787 %33 = sext i8 %32 to i32 ; <i32> [#uses=1] 788 %34 = load i8* @uc, align 1 ; <i8> [#uses=1] 789 %35 = zext i8 %34 to i32 ; <i32> [#uses=1] 790 %36 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 791 %37 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %36, i32 %35, i32 %33) ; <i32> [#uses=1] 792 store i32 %37, i32* @si, align 4 793 %38 = load i8* @sc, align 1 ; <i8> [#uses=1] 794 %39 = sext i8 %38 to i32 ; <i32> [#uses=1] 795 %40 = load i8* @uc, align 1 ; <i8> [#uses=1] 796 %41 = zext i8 %40 to i32 ; <i32> [#uses=1] 797 %42 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 798 %43 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %42, i32 %41, i32 %39) ; <i32> [#uses=1] 799 store i32 %43, i32* @ui, align 4 800 %44 = load i8* @sc, align 1 ; <i8> [#uses=1] 801 %45 = sext i8 %44 to i32 ; <i32> [#uses=1] 802 %46 = load i8* @uc, align 1 ; <i8> [#uses=1] 803 %47 = zext i8 %46 to i32 ; <i32> [#uses=1] 804 %48 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 805 %49 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %48, i32 %47, i32 %45) ; <i32> [#uses=1] 806 store i32 %49, i32* @sl, align 4 807 %50 = load i8* @sc, align 1 ; <i8> [#uses=1] 808 %51 = sext i8 %50 to i32 ; <i32> [#uses=1] 809 %52 = load i8* @uc, align 1 ; <i8> [#uses=1] 810 %53 = zext i8 %52 to i32 ; <i32> [#uses=1] 811 %54 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 812 %55 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %54, i32 %53, i32 %51) ; <i32> [#uses=1] 813 store i32 %55, i32* @ul, align 4 814 %56 = load i8* @sc, align 1 ; <i8> [#uses=1] 815 %57 = zext i8 %56 to i32 ; <i32> [#uses=1] 816 %58 = load i8* @uc, align 1 ; <i8> [#uses=1] 817 %59 = zext i8 %58 to i32 ; <i32> [#uses=1] 818 %60 = trunc i32 %59 to i8 ; <i8> [#uses=2] 819 %61 = trunc i32 %57 to i8 ; <i8> [#uses=1] 820 %62 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @sc, i8 %60, i8 %61) ; <i8> [#uses=1] 821 %63 = icmp eq i8 %62, %60 ; <i1> [#uses=1] 822 %64 = zext i1 %63 to i8 ; <i8> [#uses=1] 823 %65 = zext i8 %64 to i32 ; <i32> [#uses=1] 824 store i32 %65, i32* @ui, align 4 825 %66 = load i8* @sc, align 1 ; <i8> [#uses=1] 826 %67 = zext i8 %66 to i32 ; <i32> [#uses=1] 827 %68 = load i8* @uc, align 1 ; <i8> [#uses=1] 828 %69 = zext i8 %68 to i32 ; <i32> [#uses=1] 829 %70 = trunc i32 %69 to i8 ; <i8> [#uses=2] 830 %71 = trunc i32 %67 to i8 ; <i8> [#uses=1] 831 %72 = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* @uc, i8 %70, i8 %71) ; <i8> [#uses=1] 832 %73 = icmp eq i8 %72, %70 ; <i1> [#uses=1] 833 %74 = zext i1 %73 to i8 ; <i8> [#uses=1] 834 %75 = zext i8 %74 to i32 ; <i32> [#uses=1] 835 store i32 %75, i32* @ui, align 4 836 %76 = load i8* @sc, align 1 ; <i8> [#uses=1] 837 %77 = sext i8 %76 to i16 ; <i16> [#uses=1] 838 %78 = zext i16 %77 to i32 ; <i32> [#uses=1] 839 %79 = load i8* @uc, align 1 ; <i8> [#uses=1] 840 %80 = zext i8 %79 to i32 ; <i32> [#uses=1] 841 %81 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 842 %82 = trunc i32 %80 to i16 ; <i16> [#uses=2] 843 %83 = trunc i32 %78 to i16 ; <i16> [#uses=1] 844 %84 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %81, i16 %82, i16 %83) ; <i16> [#uses=1] 845 %85 = icmp eq i16 %84, %82 ; <i1> [#uses=1] 846 %86 = zext i1 %85 to i8 ; <i8> [#uses=1] 847 %87 = zext i8 %86 to i32 ; <i32> [#uses=1] 848 store i32 %87, i32* @ui, align 4 849 %88 = load i8* @sc, align 1 ; <i8> [#uses=1] 850 %89 = sext i8 %88 to i16 ; <i16> [#uses=1] 851 %90 = zext i16 %89 to i32 ; <i32> [#uses=1] 852 %91 = load i8* @uc, align 1 ; <i8> [#uses=1] 853 %92 = zext i8 %91 to i32 ; <i32> [#uses=1] 854 %93 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 855 %94 = trunc i32 %92 to i16 ; <i16> [#uses=2] 856 %95 = trunc i32 %90 to i16 ; <i16> [#uses=1] 857 %96 = call i16 @llvm.atomic.cmp.swap.i16.p0i16(i16* %93, i16 %94, i16 %95) ; <i16> [#uses=1] 858 %97 = icmp eq i16 %96, %94 ; <i1> [#uses=1] 859 %98 = zext i1 %97 to i8 ; <i8> [#uses=1] 860 %99 = zext i8 %98 to i32 ; <i32> [#uses=1] 861 store i32 %99, i32* @ui, align 4 862 %100 = load i8* @sc, align 1 ; <i8> [#uses=1] 863 %101 = sext i8 %100 to i32 ; <i32> [#uses=1] 864 %102 = load i8* @uc, align 1 ; <i8> [#uses=1] 865 %103 = zext i8 %102 to i32 ; <i32> [#uses=2] 866 %104 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 867 %105 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %104, i32 %103, i32 %101) ; <i32> [#uses=1] 868 %106 = icmp eq i32 %105, %103 ; <i1> [#uses=1] 869 %107 = zext i1 %106 to i8 ; <i8> [#uses=1] 870 %108 = zext i8 %107 to i32 ; <i32> [#uses=1] 871 store i32 %108, i32* @ui, align 4 872 %109 = load i8* @sc, align 1 ; <i8> [#uses=1] 873 %110 = sext i8 %109 to i32 ; <i32> [#uses=1] 874 %111 = load i8* @uc, align 1 ; <i8> [#uses=1] 875 %112 = zext i8 %111 to i32 ; <i32> [#uses=2] 876 %113 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 877 %114 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %113, i32 %112, i32 %110) ; <i32> [#uses=1] 878 %115 = icmp eq i32 %114, %112 ; <i1> [#uses=1] 879 %116 = zext i1 %115 to i8 ; <i8> [#uses=1] 880 %117 = zext i8 %116 to i32 ; <i32> [#uses=1] 881 store i32 %117, i32* @ui, align 4 882 %118 = load i8* @sc, align 1 ; <i8> [#uses=1] 883 %119 = sext i8 %118 to i32 ; <i32> [#uses=1] 884 %120 = load i8* @uc, align 1 ; <i8> [#uses=1] 885 %121 = zext i8 %120 to i32 ; <i32> [#uses=2] 886 %122 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 887 %123 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %122, i32 %121, i32 %119) ; <i32> [#uses=1] 888 %124 = icmp eq i32 %123, %121 ; <i1> [#uses=1] 889 %125 = zext i1 %124 to i8 ; <i8> [#uses=1] 890 %126 = zext i8 %125 to i32 ; <i32> [#uses=1] 891 store i32 %126, i32* @ui, align 4 892 %127 = load i8* @sc, align 1 ; <i8> [#uses=1] 893 %128 = sext i8 %127 to i32 ; <i32> [#uses=1] 894 %129 = load i8* @uc, align 1 ; <i8> [#uses=1] 895 %130 = zext i8 %129 to i32 ; <i32> [#uses=2] 896 %131 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 897 %132 = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %131, i32 %130, i32 %128) ; <i32> [#uses=1] 898 %133 = icmp eq i32 %132, %130 ; <i1> [#uses=1] 899 %134 = zext i1 %133 to i8 ; <i8> [#uses=1] 900 %135 = zext i8 %134 to i32 ; <i32> [#uses=1] 901 store i32 %135, i32* @ui, align 4 902 br label %return 903 904return: ; preds = %entry 905 ret void 906} 907 908declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind 909 910declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind 911 912declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind 913 914define void @test_lock() nounwind { 915entry: 916 %0 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @sc, i8 1) ; <i8> [#uses=1] 917 store i8 %0, i8* @sc, align 1 918 %1 = call i8 @llvm.atomic.swap.i8.p0i8(i8* @uc, i8 1) ; <i8> [#uses=1] 919 store i8 %1, i8* @uc, align 1 920 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 921 %3 = call i16 @llvm.atomic.swap.i16.p0i16(i16* %2, i16 1) ; <i16> [#uses=1] 922 store i16 %3, i16* @ss, align 2 923 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 924 %5 = call i16 @llvm.atomic.swap.i16.p0i16(i16* %4, i16 1) ; <i16> [#uses=1] 925 store i16 %5, i16* @us, align 2 926 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 927 %7 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %6, i32 1) ; <i32> [#uses=1] 928 store i32 %7, i32* @si, align 4 929 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 930 %9 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %8, i32 1) ; <i32> [#uses=1] 931 store i32 %9, i32* @ui, align 4 932 %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 933 %11 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %10, i32 1) ; <i32> [#uses=1] 934 store i32 %11, i32* @sl, align 4 935 %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 936 %13 = call i32 @llvm.atomic.swap.i32.p0i32(i32* %12, i32 1) ; <i32> [#uses=1] 937 store i32 %13, i32* @ul, align 4 938 call void @llvm.memory.barrier(i1 true, i1 true, i1 true, i1 true, i1 false) 939 volatile store i8 0, i8* @sc, align 1 940 volatile store i8 0, i8* @uc, align 1 941 %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16* ; <i16*> [#uses=1] 942 volatile store i16 0, i16* %14, align 2 943 %15 = bitcast i8* bitcast (i16* @us to i8*) to i16* ; <i16*> [#uses=1] 944 volatile store i16 0, i16* %15, align 2 945 %16 = bitcast i8* bitcast (i32* @si to i8*) to i32* ; <i32*> [#uses=1] 946 volatile store i32 0, i32* %16, align 4 947 %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32* ; <i32*> [#uses=1] 948 volatile store i32 0, i32* %17, align 4 949 %18 = bitcast i8* bitcast (i32* @sl to i8*) to i32* ; <i32*> [#uses=1] 950 volatile store i32 0, i32* %18, align 4 951 %19 = bitcast i8* bitcast (i32* @ul to i8*) to i32* ; <i32*> [#uses=1] 952 volatile store i32 0, i32* %19, align 4 953 %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64* ; <i64*> [#uses=1] 954 volatile store i64 0, i64* %20, align 8 955 %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64* ; <i64*> [#uses=1] 956 volatile store i64 0, i64* %21, align 8 957 br label %return 958 959return: ; preds = %entry 960 ret void 961} 962 963declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind 964 965declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind 966 967declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind 968 969declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind 970