1; This tests parsing NaCl intrinsics not related to atomic operations. 2 3; RUN: %p2i -i %s --insts --args -allow-externally-defined-symbols \ 4; RUN: | FileCheck %s 5; RUN: %p2i -i %s --args -notranslate -timing \ 6; RUN: -allow-externally-defined-symbols | \ 7; RUN: FileCheck --check-prefix=NOIR %s 8 9declare i8* @llvm.nacl.read.tp() 10declare void @llvm.memcpy.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) 11declare void @llvm.memmove.p0i8.p0i8.i32(i8*, i8*, i32, i32, i1) 12declare void @llvm.memset.p0i8.i32(i8*, i8, i32, i32, i1) 13declare void @llvm.nacl.longjmp(i8*, i32) 14declare i32 @llvm.nacl.setjmp(i8*) 15declare float @llvm.sqrt.f32(float) 16declare double @llvm.sqrt.f64(double) 17declare float @llvm.fabs.f32(float) 18declare double @llvm.fabs.f64(double) 19declare <4 x float> @llvm.fabs.v4f32(<4 x float>) 20declare void @llvm.trap() 21declare i16 @llvm.bswap.i16(i16) 22declare i32 @llvm.bswap.i32(i32) 23declare i64 @llvm.bswap.i64(i64) 24declare i32 @llvm.ctlz.i32(i32, i1) 25declare i64 @llvm.ctlz.i64(i64, i1) 26declare i32 @llvm.cttz.i32(i32, i1) 27declare i64 @llvm.cttz.i64(i64, i1) 28declare i32 @llvm.ctpop.i32(i32) 29declare i64 @llvm.ctpop.i64(i64) 30declare i8* @llvm.stacksave() 31declare void @llvm.stackrestore(i8*) 32 33define internal i32 @test_nacl_read_tp() { 34entry: 35 %ptr = call i8* @llvm.nacl.read.tp() 36 %__1 = ptrtoint i8* %ptr to i32 37 ret i32 %__1 38} 39 40; CHECK: define internal i32 @test_nacl_read_tp() { 41; CHECK-NEXT: entry: 42; CHECK-NEXT: %ptr = call i32 @llvm.nacl.read.tp() 43; CHECK-NEXT: ret i32 %ptr 44; CHECK-NEXT: } 45 46define internal void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) { 47entry: 48 %dst = inttoptr i32 %iptr_dst to i8* 49 %src = inttoptr i32 %iptr_src to i8* 50 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dst, i8* %src, 51 i32 %len, i32 1, i1 false) 52 ret void 53} 54 55; CHECK-NEXT: define internal void @test_memcpy(i32 %iptr_dst, i32 %iptr_src, i32 %len) { 56; CHECK-NEXT: entry: 57; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i32 %iptr_dst, i32 %iptr_src, i32 %len, i32 1, i1 false) 58; CHECK-NEXT: ret void 59; CHECK-NEXT: } 60 61define internal void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) { 62entry: 63 %dst = inttoptr i32 %iptr_dst to i8* 64 %src = inttoptr i32 %iptr_src to i8* 65 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dst, i8* %src, 66 i32 %len, i32 1, i1 false) 67 ret void 68} 69 70; CHECK-NEXT: define internal void @test_memmove(i32 %iptr_dst, i32 %iptr_src, i32 %len) { 71; CHECK-NEXT: entry: 72; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i32 %iptr_dst, i32 %iptr_src, i32 %len, i32 1, i1 false) 73; CHECK-NEXT: ret void 74; CHECK-NEXT: } 75 76define internal void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) { 77entry: 78 %val = trunc i32 %wide_val to i8 79 %dst = inttoptr i32 %iptr_dst to i8* 80 call void @llvm.memset.p0i8.i32(i8* %dst, i8 %val, 81 i32 %len, i32 1, i1 false) 82 ret void 83} 84 85; CHECK-NEXT: define internal void @test_memset(i32 %iptr_dst, i32 %wide_val, i32 %len) { 86; CHECK-NEXT: entry: 87; CHECK-NEXT: %val = trunc i32 %wide_val to i8 88; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i32 %iptr_dst, i8 %val, i32 %len, i32 1, i1 false) 89; CHECK-NEXT: ret void 90; CHECK-NEXT: } 91 92define internal i32 @test_setjmplongjmp(i32 %iptr_env) { 93entry: 94 %env = inttoptr i32 %iptr_env to i8* 95 %i = call i32 @llvm.nacl.setjmp(i8* %env) 96 %r1 = icmp eq i32 %i, 0 97 br i1 %r1, label %Zero, label %NonZero 98Zero: 99 ; Redundant inttoptr, to make --pnacl cast-eliding/re-insertion happy. 100 %env2 = inttoptr i32 %iptr_env to i8* 101 call void @llvm.nacl.longjmp(i8* %env2, i32 1) 102 ret i32 0 103NonZero: 104 ret i32 1 105} 106 107; CHECK-NEXT: define internal i32 @test_setjmplongjmp(i32 %iptr_env) { 108; CHECK-NEXT: entry: 109; CHECK-NEXT: %i = call i32 @llvm.nacl.setjmp(i32 %iptr_env) 110; CHECK-NEXT: %r1 = icmp eq i32 %i, 0 111; CHECK-NEXT: br i1 %r1, label %Zero, label %NonZero 112; CHECK-NEXT: Zero: 113; CHECK-NEXT: call void @llvm.nacl.longjmp(i32 %iptr_env, i32 1) 114; CHECK-NEXT: ret i32 0 115; CHECK-NEXT: NonZero: 116; CHECK-NEXT: ret i32 1 117; CHECK-NEXT: } 118 119define internal float @test_sqrt_float(float %x, i32 %iptr) { 120entry: 121 %r = call float @llvm.sqrt.f32(float %x) 122 %r2 = call float @llvm.sqrt.f32(float %r) 123 %r3 = call float @llvm.sqrt.f32(float -0.0) 124 %r4 = fadd float %r2, %r3 125 ret float %r4 126} 127 128; CHECK-NEXT: define internal float @test_sqrt_float(float %x, i32 %iptr) { 129; CHECK-NEXT: entry: 130; CHECK-NEXT: %r = call float @llvm.sqrt.f32(float %x) 131; CHECK-NEXT: %r2 = call float @llvm.sqrt.f32(float %r) 132; CHECK-NEXT: %r3 = call float @llvm.sqrt.f32(float -0.000000e+00) 133; CHECK-NEXT: %r4 = fadd float %r2, %r3 134; CHECK-NEXT: ret float %r4 135; CHECK-NEXT: } 136 137define internal double @test_sqrt_double(double %x, i32 %iptr) { 138entry: 139 %r = call double @llvm.sqrt.f64(double %x) 140 %r2 = call double @llvm.sqrt.f64(double %r) 141 %r3 = call double @llvm.sqrt.f64(double -0.0) 142 %r4 = fadd double %r2, %r3 143 ret double %r4 144} 145 146; CHECK-NEXT: define internal double @test_sqrt_double(double %x, i32 %iptr) { 147; CHECK-NEXT: entry: 148; CHECK-NEXT: %r = call double @llvm.sqrt.f64(double %x) 149; CHECK-NEXT: %r2 = call double @llvm.sqrt.f64(double %r) 150; CHECK-NEXT: %r3 = call double @llvm.sqrt.f64(double -0.000000e+00) 151; CHECK-NEXT: %r4 = fadd double %r2, %r3 152; CHECK-NEXT: ret double %r4 153; CHECK-NEXT: } 154 155define internal float @test_fabs_float(float %x) { 156entry: 157 %r = call float @llvm.fabs.f32(float %x) 158 %r2 = call float @llvm.fabs.f32(float %r) 159 %r3 = call float @llvm.fabs.f32(float -0.0) 160 %r4 = fadd float %r2, %r3 161 ret float %r4 162} 163 164; CHECK-NEXT: define internal float @test_fabs_float(float %x) { 165; CHECK-NEXT: entry: 166; CHECK-NEXT: %r = call float @llvm.fabs.f32(float %x) 167; CHECK-NEXT: %r2 = call float @llvm.fabs.f32(float %r) 168; CHECK-NEXT: %r3 = call float @llvm.fabs.f32(float -0.000000e+00) 169; CHECK-NEXT: %r4 = fadd float %r2, %r3 170; CHECK-NEXT: ret float %r4 171; CHECK-NEXT: } 172 173define internal double @test_fabs_double(double %x) { 174entry: 175 %r = call double @llvm.fabs.f64(double %x) 176 %r2 = call double @llvm.fabs.f64(double %r) 177 %r3 = call double @llvm.fabs.f64(double -0.0) 178 %r4 = fadd double %r2, %r3 179 ret double %r4 180} 181 182; CHECK-NEXT: define internal double @test_fabs_double(double %x) { 183; CHECK-NEXT: entry: 184; CHECK-NEXT: %r = call double @llvm.fabs.f64(double %x) 185; CHECK-NEXT: %r2 = call double @llvm.fabs.f64(double %r) 186; CHECK-NEXT: %r3 = call double @llvm.fabs.f64(double -0.000000e+00) 187; CHECK-NEXT: %r4 = fadd double %r2, %r3 188; CHECK-NEXT: ret double %r4 189; CHECK-NEXT: } 190 191define internal <4 x float> @test_fabs_v4f32(<4 x float> %x) { 192entry: 193 %r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x) 194 %r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r) 195 %r3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) 196 %r4 = fadd <4 x float> %r2, %r3 197 ret <4 x float> %r4 198} 199 200; CHECK-NEXT: define internal <4 x float> @test_fabs_v4f32(<4 x float> %x) { 201; CHECK-NEXT: entry: 202; CHECK-NEXT: %r = call <4 x float> @llvm.fabs.v4f32(<4 x float> %x) 203; CHECK-NEXT: %r2 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %r) 204; CHECK-NEXT: %r3 = call <4 x float> @llvm.fabs.v4f32(<4 x float> undef) 205; CHECK-NEXT: %r4 = fadd <4 x float> %r2, %r3 206; CHECK-NEXT: ret <4 x float> %r4 207; CHECK-NEXT: } 208 209define internal i32 @test_trap(i32 %br) { 210entry: 211 %r1 = icmp eq i32 %br, 0 212 br i1 %r1, label %Zero, label %NonZero 213Zero: 214 call void @llvm.trap() 215 unreachable 216NonZero: 217 ret i32 1 218} 219 220; CHECK-NEXT: define internal i32 @test_trap(i32 %br) { 221; CHECK-NEXT: entry: 222; CHECK-NEXT: %r1 = icmp eq i32 %br, 0 223; CHECK-NEXT: br i1 %r1, label %Zero, label %NonZero 224; CHECK-NEXT: Zero: 225; CHECK-NEXT: call void @llvm.trap() 226; CHECK-NEXT: unreachable 227; CHECK-NEXT: NonZero: 228; CHECK-NEXT: ret i32 1 229; CHECK-NEXT: } 230 231define internal i32 @test_bswap_16(i32 %x) { 232entry: 233 %x_trunc = trunc i32 %x to i16 234 %r = call i16 @llvm.bswap.i16(i16 %x_trunc) 235 %r_zext = zext i16 %r to i32 236 ret i32 %r_zext 237} 238 239; CHECK-NEXT: define internal i32 @test_bswap_16(i32 %x) { 240; CHECK-NEXT: entry: 241; CHECK-NEXT: %x_trunc = trunc i32 %x to i16 242; CHECK-NEXT: %r = call i16 @llvm.bswap.i16(i16 %x_trunc) 243; CHECK-NEXT: %r_zext = zext i16 %r to i32 244; CHECK-NEXT: ret i32 %r_zext 245; CHECK-NEXT: } 246 247define internal i32 @test_bswap_32(i32 %x) { 248entry: 249 %r = call i32 @llvm.bswap.i32(i32 %x) 250 ret i32 %r 251} 252 253; CHECK-NEXT: define internal i32 @test_bswap_32(i32 %x) { 254; CHECK-NEXT: entry: 255; CHECK-NEXT: %r = call i32 @llvm.bswap.i32(i32 %x) 256; CHECK-NEXT: ret i32 %r 257; CHECK-NEXT: } 258 259define internal i64 @test_bswap_64(i64 %x) { 260entry: 261 %r = call i64 @llvm.bswap.i64(i64 %x) 262 ret i64 %r 263} 264 265; CHECK-NEXT: define internal i64 @test_bswap_64(i64 %x) { 266; CHECK-NEXT: entry: 267; CHECK-NEXT: %r = call i64 @llvm.bswap.i64(i64 %x) 268; CHECK-NEXT: ret i64 %r 269; CHECK-NEXT: } 270 271define internal i32 @test_ctlz_32(i32 %x) { 272entry: 273 %r = call i32 @llvm.ctlz.i32(i32 %x, i1 false) 274 ret i32 %r 275} 276 277; CHECK-NEXT: define internal i32 @test_ctlz_32(i32 %x) { 278; CHECK-NEXT: entry: 279; CHECK-NEXT: %r = call i32 @llvm.ctlz.i32(i32 %x, i1 false) 280; CHECK-NEXT: ret i32 %r 281; CHECK-NEXT: } 282 283define internal i64 @test_ctlz_64(i64 %x) { 284entry: 285 %r = call i64 @llvm.ctlz.i64(i64 %x, i1 false) 286 ret i64 %r 287} 288 289; CHECK-NEXT: define internal i64 @test_ctlz_64(i64 %x) { 290; CHECK-NEXT: entry: 291; CHECK-NEXT: %r = call i64 @llvm.ctlz.i64(i64 %x, i1 false) 292; CHECK-NEXT: ret i64 %r 293; CHECK-NEXT: } 294 295define internal i32 @test_cttz_32(i32 %x) { 296entry: 297 %r = call i32 @llvm.cttz.i32(i32 %x, i1 false) 298 ret i32 %r 299} 300 301; CHECK-NEXT: define internal i32 @test_cttz_32(i32 %x) { 302; CHECK-NEXT: entry: 303; CHECK-NEXT: %r = call i32 @llvm.cttz.i32(i32 %x, i1 false) 304; CHECK-NEXT: ret i32 %r 305; CHECK-NEXT: } 306 307define internal i64 @test_cttz_64(i64 %x) { 308entry: 309 %r = call i64 @llvm.cttz.i64(i64 %x, i1 false) 310 ret i64 %r 311} 312 313; CHECK-NEXT: define internal i64 @test_cttz_64(i64 %x) { 314; CHECK-NEXT: entry: 315; CHECK-NEXT: %r = call i64 @llvm.cttz.i64(i64 %x, i1 false) 316; CHECK-NEXT: ret i64 %r 317; CHECK-NEXT: } 318 319define internal i32 @test_popcount_32(i32 %x) { 320entry: 321 %r = call i32 @llvm.ctpop.i32(i32 %x) 322 ret i32 %r 323} 324 325; CHECK-NEXT: define internal i32 @test_popcount_32(i32 %x) { 326; CHECK-NEXT: entry: 327; CHECK-NEXT: %r = call i32 @llvm.ctpop.i32(i32 %x) 328; CHECK-NEXT: ret i32 %r 329; CHECK-NEXT: } 330 331define internal i64 @test_popcount_64(i64 %x) { 332entry: 333 %r = call i64 @llvm.ctpop.i64(i64 %x) 334 ret i64 %r 335} 336 337; CHECK-NEXT: define internal i64 @test_popcount_64(i64 %x) { 338; CHECK-NEXT: entry: 339; CHECK-NEXT: %r = call i64 @llvm.ctpop.i64(i64 %x) 340; CHECK-NEXT: ret i64 %r 341; CHECK-NEXT: } 342 343define internal void @test_stacksave_noalloca() { 344entry: 345 %sp = call i8* @llvm.stacksave() 346 call void @llvm.stackrestore(i8* %sp) 347 ret void 348} 349 350; CHECK-NEXT: define internal void @test_stacksave_noalloca() { 351; CHECK-NEXT: entry: 352; CHECK-NEXT: %sp = call i32 @llvm.stacksave() 353; CHECK-NEXT: call void @llvm.stackrestore(i32 %sp) 354; CHECK-NEXT: ret void 355; CHECK-NEXT: } 356 357declare i32 @foo(i32 %x) 358 359define internal void @test_stacksave_multiple(i32 %x) { 360entry: 361 %x_4 = mul i32 %x, 4 362 %sp1 = call i8* @llvm.stacksave() 363 %tmp1 = alloca i8, i32 %x_4, align 4 364 365 %sp2 = call i8* @llvm.stacksave() 366 %tmp2 = alloca i8, i32 %x_4, align 4 367 368 %y = call i32 @foo(i32 %x) 369 370 %sp3 = call i8* @llvm.stacksave() 371 %tmp3 = alloca i8, i32 %x_4, align 4 372 373 %__9 = bitcast i8* %tmp1 to i32* 374 store i32 %y, i32* %__9, align 1 375 376 %__10 = bitcast i8* %tmp2 to i32* 377 store i32 %x, i32* %__10, align 1 378 379 %__11 = bitcast i8* %tmp3 to i32* 380 store i32 %x, i32* %__11, align 1 381 382 call void @llvm.stackrestore(i8* %sp1) 383 ret void 384} 385 386; CHECK-NEXT: define internal void @test_stacksave_multiple(i32 %x) { 387; CHECK-NEXT: entry: 388; CHECK-NEXT: %x_4 = mul i32 %x, 4 389; CHECK-NEXT: %sp1 = call i32 @llvm.stacksave() 390; CHECK-NEXT: %tmp1 = alloca i8, i32 %x_4, align 4 391; CHECK-NEXT: %sp2 = call i32 @llvm.stacksave() 392; CHECK-NEXT: %tmp2 = alloca i8, i32 %x_4, align 4 393; CHECK-NEXT: %y = call i32 @foo(i32 %x) 394; CHECK-NEXT: %sp3 = call i32 @llvm.stacksave() 395; CHECK-NEXT: %tmp3 = alloca i8, i32 %x_4, align 4 396; CHECK-NEXT: store i32 %y, i32* %tmp1, align 1 397; CHECK-NEXT: store i32 %x, i32* %tmp2, align 1 398; CHECK-NEXT: store i32 %x, i32* %tmp3, align 1 399; CHECK-NEXT: call void @llvm.stackrestore(i32 %sp1) 400; CHECK-NEXT: ret void 401; CHECK-NEXT: } 402 403; NOIR: Total across all functions 404