1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -instsimplify -S | FileCheck %s 3; RUN: opt < %s -passes=instsimplify -S | FileCheck %s 4 5declare {i8, i1} @llvm.uadd.with.overflow.i8(i8 %a, i8 %b) 6declare {i8, i1} @llvm.sadd.with.overflow.i8(i8 %a, i8 %b) 7declare {i8, i1} @llvm.usub.with.overflow.i8(i8 %a, i8 %b) 8declare {i8, i1} @llvm.ssub.with.overflow.i8(i8 %a, i8 %b) 9declare {i8, i1} @llvm.umul.with.overflow.i8(i8 %a, i8 %b) 10declare {i8, i1} @llvm.smul.with.overflow.i8(i8 %a, i8 %b) 11 12define i1 @test_uadd1() { 13; CHECK-LABEL: @test_uadd1( 14; CHECK-NEXT: ret i1 true 15; 16 %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 3) 17 %overflow = extractvalue {i8, i1} %x, 1 18 ret i1 %overflow 19} 20 21define i8 @test_uadd2() { 22; CHECK-LABEL: @test_uadd2( 23; CHECK-NEXT: ret i8 42 24; 25 %x = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 254, i8 44) 26 %result = extractvalue {i8, i1} %x, 0 27 ret i8 %result 28} 29 30define {i8, i1} @test_uadd3(i8 %v) { 31; CHECK-LABEL: @test_uadd3( 32; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 33; 34 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 undef) 35 ret {i8, i1} %result 36} 37 38define {i8, i1} @test_uadd3_poison(i8 %v) { 39; CHECK-LABEL: @test_uadd3_poison( 40; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 41; 42 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 %v, i8 poison) 43 ret {i8, i1} %result 44} 45 46define {i8, i1} @test_uadd4(i8 %v) { 47; CHECK-LABEL: @test_uadd4( 48; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 49; 50 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 undef, i8 %v) 51 ret {i8, i1} %result 52} 53 54define {i8, i1} @test_uadd4_poison(i8 %v) { 55; CHECK-LABEL: @test_uadd4_poison( 56; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 57; 58 %result = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 poison, i8 %v) 59 ret {i8, i1} %result 60} 61 62define i1 @test_sadd1() { 63; CHECK-LABEL: @test_sadd1( 64; CHECK-NEXT: ret i1 true 65; 66 %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 3) 67 %overflow = extractvalue {i8, i1} %x, 1 68 ret i1 %overflow 69} 70 71define i8 @test_sadd2() { 72; CHECK-LABEL: @test_sadd2( 73; CHECK-NEXT: ret i8 -86 74; 75 %x = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 126, i8 44) 76 %result = extractvalue {i8, i1} %x, 0 77 ret i8 %result 78} 79 80define {i8, i1} @test_sadd3(i8 %v) { 81; CHECK-LABEL: @test_sadd3( 82; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 83; 84 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 undef) 85 ret {i8, i1} %result 86} 87 88define {i8, i1} @test_sadd3_poison(i8 %v) { 89; CHECK-LABEL: @test_sadd3_poison( 90; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 91; 92 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 %v, i8 poison) 93 ret {i8, i1} %result 94} 95 96define {i8, i1} @test_sadd4(i8 %v) { 97; CHECK-LABEL: @test_sadd4( 98; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 99; 100 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 %v) 101 ret {i8, i1} %result 102} 103 104define {i8, i1} @test_sadd4_poison(i8 %v) { 105; CHECK-LABEL: @test_sadd4_poison( 106; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 107; 108 %result = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 poison, i8 %v) 109 ret {i8, i1} %result 110} 111 112define {i8, i1} @test_usub1(i8 %V) { 113; CHECK-LABEL: @test_usub1( 114; CHECK-NEXT: ret { i8, i1 } zeroinitializer 115; 116 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 %V) 117 ret {i8, i1} %x 118} 119 120define {i8, i1} @test_usub2(i8 %V) { 121; CHECK-LABEL: @test_usub2( 122; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 123; 124 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 undef) 125 ret {i8, i1} %x 126} 127 128define {i8, i1} @test_usub2_poison(i8 %V) { 129; CHECK-LABEL: @test_usub2_poison( 130; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 131; 132 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 %V, i8 poison) 133 ret {i8, i1} %x 134} 135 136define {i8, i1} @test_usub3(i8 %V) { 137; CHECK-LABEL: @test_usub3( 138; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 139; 140 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 undef, i8 %V) 141 ret {i8, i1} %x 142} 143 144define {i8, i1} @test_usub3_poison(i8 %V) { 145; CHECK-LABEL: @test_usub3_poison( 146; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 147; 148 %x = call {i8, i1} @llvm.usub.with.overflow.i8(i8 poison, i8 %V) 149 ret {i8, i1} %x 150} 151 152define {i8, i1} @test_ssub1(i8 %V) { 153; CHECK-LABEL: @test_ssub1( 154; CHECK-NEXT: ret { i8, i1 } zeroinitializer 155; 156 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 %V) 157 ret {i8, i1} %x 158} 159 160define {i8, i1} @test_ssub2(i8 %V) { 161; CHECK-LABEL: @test_ssub2( 162; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 163; 164 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 undef) 165 ret {i8, i1} %x 166} 167 168define {i8, i1} @test_ssub2_poison(i8 %V) { 169; CHECK-LABEL: @test_ssub2_poison( 170; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 171; 172 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 %V, i8 poison) 173 ret {i8, i1} %x 174} 175 176define {i8, i1} @test_ssub3(i8 %V) { 177; CHECK-LABEL: @test_ssub3( 178; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 179; 180 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 undef, i8 %V) 181 ret {i8, i1} %x 182} 183 184define {i8, i1} @test_ssub3_poison(i8 %V) { 185; CHECK-LABEL: @test_ssub3_poison( 186; CHECK-NEXT: ret { i8, i1 } { i8 undef, i1 false } 187; 188 %x = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 poison, i8 %V) 189 ret {i8, i1} %x 190} 191 192define {i8, i1} @test_umul1(i8 %V) { 193; CHECK-LABEL: @test_umul1( 194; CHECK-NEXT: ret { i8, i1 } zeroinitializer 195; 196 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 0) 197 ret {i8, i1} %x 198} 199 200define {i8, i1} @test_umul2(i8 %V) { 201; CHECK-LABEL: @test_umul2( 202; CHECK-NEXT: ret { i8, i1 } zeroinitializer 203; 204 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 undef) 205 ret {i8, i1} %x 206} 207 208define {i8, i1} @test_umul2_poison(i8 %V) { 209; CHECK-LABEL: @test_umul2_poison( 210; CHECK-NEXT: ret { i8, i1 } zeroinitializer 211; 212 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 %V, i8 poison) 213 ret {i8, i1} %x 214} 215 216define {i8, i1} @test_umul3(i8 %V) { 217; CHECK-LABEL: @test_umul3( 218; CHECK-NEXT: ret { i8, i1 } zeroinitializer 219; 220 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 0, i8 %V) 221 ret {i8, i1} %x 222} 223 224define {i8, i1} @test_umul4(i8 %V) { 225; CHECK-LABEL: @test_umul4( 226; CHECK-NEXT: ret { i8, i1 } zeroinitializer 227; 228 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 %V) 229 ret {i8, i1} %x 230} 231 232define {i8, i1} @test_umul4_poison(i8 %V) { 233; CHECK-LABEL: @test_umul4_poison( 234; CHECK-NEXT: ret { i8, i1 } zeroinitializer 235; 236 %x = call {i8, i1} @llvm.umul.with.overflow.i8(i8 poison, i8 %V) 237 ret {i8, i1} %x 238} 239 240define {i8, i1} @test_smul1(i8 %V) { 241; CHECK-LABEL: @test_smul1( 242; CHECK-NEXT: ret { i8, i1 } zeroinitializer 243; 244 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 0) 245 ret {i8, i1} %x 246} 247 248define {i8, i1} @test_smul2(i8 %V) { 249; CHECK-LABEL: @test_smul2( 250; CHECK-NEXT: ret { i8, i1 } zeroinitializer 251; 252 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 undef) 253 ret {i8, i1} %x 254} 255 256define {i8, i1} @test_smul2_poison(i8 %V) { 257; CHECK-LABEL: @test_smul2_poison( 258; CHECK-NEXT: ret { i8, i1 } zeroinitializer 259; 260 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %V, i8 poison) 261 ret {i8, i1} %x 262} 263 264define {i8, i1} @test_smul3(i8 %V) { 265; CHECK-LABEL: @test_smul3( 266; CHECK-NEXT: ret { i8, i1 } zeroinitializer 267; 268 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 0, i8 %V) 269 ret {i8, i1} %x 270} 271 272define {i8, i1} @test_smul4(i8 %V) { 273; CHECK-LABEL: @test_smul4( 274; CHECK-NEXT: ret { i8, i1 } zeroinitializer 275; 276 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 %V) 277 ret {i8, i1} %x 278} 279 280define {i8, i1} @test_smul4_poison(i8 %V) { 281; CHECK-LABEL: @test_smul4_poison( 282; CHECK-NEXT: ret { i8, i1 } zeroinitializer 283; 284 %x = call {i8, i1} @llvm.smul.with.overflow.i8(i8 poison, i8 %V) 285 ret {i8, i1} %x 286} 287 288; Test a non-intrinsic that we know about as a library call. 289declare float @fabs(float %x) 290 291define float @test_fabs_libcall() { 292; CHECK-LABEL: @test_fabs_libcall( 293; CHECK-NEXT: [[X:%.*]] = call float @fabs(float -4.200000e+01) 294; CHECK-NEXT: ret float 4.200000e+01 295; 296 297 %x = call float @fabs(float -42.0) 298; This is still a real function call, so instsimplify won't nuke it -- other 299; passes have to do that. 300 301 ret float %x 302} 303 304 305declare float @llvm.fabs.f32(float) nounwind readnone 306declare float @llvm.floor.f32(float) nounwind readnone 307declare float @llvm.ceil.f32(float) nounwind readnone 308declare float @llvm.trunc.f32(float) nounwind readnone 309declare float @llvm.rint.f32(float) nounwind readnone 310declare float @llvm.nearbyint.f32(float) nounwind readnone 311declare float @llvm.canonicalize.f32(float) nounwind readnone 312 313; Test idempotent intrinsics 314define float @test_idempotence(float %a) { 315; CHECK-LABEL: @test_idempotence( 316; CHECK-NEXT: [[A0:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) 317; CHECK-NEXT: [[B0:%.*]] = call float @llvm.floor.f32(float [[A]]) 318; CHECK-NEXT: [[C0:%.*]] = call float @llvm.ceil.f32(float [[A]]) 319; CHECK-NEXT: [[D0:%.*]] = call float @llvm.trunc.f32(float [[A]]) 320; CHECK-NEXT: [[E0:%.*]] = call float @llvm.rint.f32(float [[A]]) 321; CHECK-NEXT: [[F0:%.*]] = call float @llvm.nearbyint.f32(float [[A]]) 322; CHECK-NEXT: [[G0:%.*]] = call float @llvm.canonicalize.f32(float [[A]]) 323; CHECK-NEXT: [[R0:%.*]] = fadd float [[A0]], [[B0]] 324; CHECK-NEXT: [[R1:%.*]] = fadd float [[R0]], [[C0]] 325; CHECK-NEXT: [[R2:%.*]] = fadd float [[R1]], [[D0]] 326; CHECK-NEXT: [[R3:%.*]] = fadd float [[R2]], [[E0]] 327; CHECK-NEXT: [[R4:%.*]] = fadd float [[R3]], [[F0]] 328; CHECK-NEXT: [[R5:%.*]] = fadd float [[R4]], [[G0]] 329; CHECK-NEXT: ret float [[R5]] 330; 331 332 %a0 = call float @llvm.fabs.f32(float %a) 333 %a1 = call float @llvm.fabs.f32(float %a0) 334 335 %b0 = call float @llvm.floor.f32(float %a) 336 %b1 = call float @llvm.floor.f32(float %b0) 337 338 %c0 = call float @llvm.ceil.f32(float %a) 339 %c1 = call float @llvm.ceil.f32(float %c0) 340 341 %d0 = call float @llvm.trunc.f32(float %a) 342 %d1 = call float @llvm.trunc.f32(float %d0) 343 344 %e0 = call float @llvm.rint.f32(float %a) 345 %e1 = call float @llvm.rint.f32(float %e0) 346 347 %f0 = call float @llvm.nearbyint.f32(float %a) 348 %f1 = call float @llvm.nearbyint.f32(float %f0) 349 350 %g0 = call float @llvm.canonicalize.f32(float %a) 351 %g1 = call float @llvm.canonicalize.f32(float %g0) 352 353 %r0 = fadd float %a1, %b1 354 %r1 = fadd float %r0, %c1 355 %r2 = fadd float %r1, %d1 356 %r3 = fadd float %r2, %e1 357 %r4 = fadd float %r3, %f1 358 %r5 = fadd float %r4, %g1 359 360 ret float %r5 361} 362 363define i8* @operator_new() { 364; CHECK-LABEL: @operator_new( 365; CHECK-NEXT: entry: 366; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @_Znwm(i64 8) 367; CHECK-NEXT: br i1 false, label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 368; CHECK: cast.notnull: 369; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4 370; CHECK-NEXT: br label [[CAST_END]] 371; CHECK: cast.end: 372; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 373; CHECK-NEXT: ret i8* [[CAST_RESULT]] 374; 375entry: 376 %call = tail call noalias i8* @_Znwm(i64 8) 377 %cmp = icmp eq i8* %call, null 378 br i1 %cmp, label %cast.end, label %cast.notnull 379 380cast.notnull: ; preds = %entry 381 %add.ptr = getelementptr inbounds i8, i8* %call, i64 4 382 br label %cast.end 383 384cast.end: ; preds = %cast.notnull, %entry 385 %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ] 386 ret i8* %cast.result 387 388} 389 390declare nonnull noalias i8* @_Znwm(i64) 391 392%"struct.std::nothrow_t" = type { i8 } 393@_ZSt7nothrow = external global %"struct.std::nothrow_t" 394 395define i8* @operator_new_nothrow_t() { 396; CHECK-LABEL: @operator_new_nothrow_t( 397; CHECK-NEXT: entry: 398; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @_ZnamRKSt9nothrow_t(i64 8, %"struct.std::nothrow_t"* @_ZSt7nothrow) 399; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[CALL]], null 400; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 401; CHECK: cast.notnull: 402; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4 403; CHECK-NEXT: br label [[CAST_END]] 404; CHECK: cast.end: 405; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 406; CHECK-NEXT: ret i8* [[CAST_RESULT]] 407; 408entry: 409 %call = tail call noalias i8* @_ZnamRKSt9nothrow_t(i64 8, %"struct.std::nothrow_t"* @_ZSt7nothrow) 410 %cmp = icmp eq i8* %call, null 411 br i1 %cmp, label %cast.end, label %cast.notnull 412 413cast.notnull: ; preds = %entry 414 %add.ptr = getelementptr inbounds i8, i8* %call, i64 4 415 br label %cast.end 416 417cast.end: ; preds = %cast.notnull, %entry 418 %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ] 419 ret i8* %cast.result 420 421} 422 423declare i8* @_ZnamRKSt9nothrow_t(i64, %"struct.std::nothrow_t"*) nounwind 424 425define i8* @malloc_can_return_null() { 426; CHECK-LABEL: @malloc_can_return_null( 427; CHECK-NEXT: entry: 428; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @malloc(i64 8) 429; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8* [[CALL]], null 430; CHECK-NEXT: br i1 [[CMP]], label [[CAST_END:%.*]], label [[CAST_NOTNULL:%.*]] 431; CHECK: cast.notnull: 432; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[CALL]], i64 4 433; CHECK-NEXT: br label [[CAST_END]] 434; CHECK: cast.end: 435; CHECK-NEXT: [[CAST_RESULT:%.*]] = phi i8* [ [[ADD_PTR]], [[CAST_NOTNULL]] ], [ null, [[ENTRY:%.*]] ] 436; CHECK-NEXT: ret i8* [[CAST_RESULT]] 437; 438entry: 439 %call = tail call noalias i8* @malloc(i64 8) 440 %cmp = icmp eq i8* %call, null 441 br i1 %cmp, label %cast.end, label %cast.notnull 442 443cast.notnull: ; preds = %entry 444 %add.ptr = getelementptr inbounds i8, i8* %call, i64 4 445 br label %cast.end 446 447cast.end: ; preds = %cast.notnull, %entry 448 %cast.result = phi i8* [ %add.ptr, %cast.notnull ], [ null, %entry ] 449 ret i8* %cast.result 450 451} 452 453define i32 @call_null() { 454; CHECK-LABEL: @call_null( 455; CHECK-NEXT: entry: 456; CHECK-NEXT: [[CALL:%.*]] = call i32 null() 457; CHECK-NEXT: ret i32 undef 458; 459entry: 460 %call = call i32 null() 461 ret i32 %call 462} 463 464define i32 @call_undef() { 465; CHECK-LABEL: @call_undef( 466; CHECK-NEXT: entry: 467; CHECK-NEXT: [[CALL:%.*]] = call i32 undef() 468; CHECK-NEXT: ret i32 undef 469; 470entry: 471 %call = call i32 undef() 472 ret i32 %call 473} 474 475@GV = private constant [8 x i32] [i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49] 476 477define <8 x i32> @partial_masked_load() { 478; CHECK-LABEL: @partial_masked_load( 479; CHECK-NEXT: ret <8 x i32> <i32 undef, i32 undef, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47> 480; 481 %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* bitcast (i32* getelementptr ([8 x i32], [8 x i32]* @GV, i64 0, i64 -2) to <8 x i32>*), i32 4, <8 x i1> <i1 false, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i32> undef) 482 ret <8 x i32> %masked.load 483} 484 485define <8 x i32> @masked_load_undef_mask(<8 x i32>* %V) { 486; CHECK-LABEL: @masked_load_undef_mask( 487; CHECK-NEXT: ret <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0> 488; 489 %masked.load = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* %V, i32 4, <8 x i1> undef, <8 x i32> <i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0>) 490 ret <8 x i32> %masked.load 491} 492 493declare noalias i8* @malloc(i64) 494 495declare <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>*, i32, <8 x i1>, <8 x i32>) 496 497declare double @llvm.powi.f64(double, i32) 498declare <2 x double> @llvm.powi.v2f64(<2 x double>, i32) 499 500define double @constant_fold_powi() { 501; CHECK-LABEL: @constant_fold_powi( 502; CHECK-NEXT: ret double 9.000000e+00 503; 504 %t0 = call double @llvm.powi.f64(double 3.00000e+00, i32 2) 505 ret double %t0 506} 507 508define <2 x double> @constant_fold_powi_vec() { 509; CHECK-LABEL: @constant_fold_powi_vec( 510; CHECK-NEXT: ret <2 x double> <double 9.000000e+00, double 2.500000e+01> 511; 512 %t0 = call <2 x double> @llvm.powi.v2f64(<2 x double> <double 3.00000e+00, double 5.00000e+00>, i32 2) 513 ret <2 x double> %t0 514} 515 516declare i8 @llvm.fshl.i8(i8, i8, i8) 517declare i9 @llvm.fshr.i9(i9, i9, i9) 518declare <2 x i7> @llvm.fshl.v2i7(<2 x i7>, <2 x i7>, <2 x i7>) 519declare <2 x i8> @llvm.fshr.v2i8(<2 x i8>, <2 x i8>, <2 x i8>) 520 521define i8 @fshl_no_shift(i8 %x, i8 %y) { 522; CHECK-LABEL: @fshl_no_shift( 523; CHECK-NEXT: ret i8 [[X:%.*]] 524; 525 %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 0) 526 ret i8 %z 527} 528 529define i9 @fshr_no_shift(i9 %x, i9 %y) { 530; CHECK-LABEL: @fshr_no_shift( 531; CHECK-NEXT: ret i9 [[Y:%.*]] 532; 533 %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 0) 534 ret i9 %z 535} 536 537define i8 @fshl_no_shift_modulo_bitwidth(i8 %x, i8 %y) { 538; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth( 539; CHECK-NEXT: ret i8 [[X:%.*]] 540; 541 %z = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 40) 542 ret i8 %z 543} 544 545define i9 @fshr_no_shift_modulo_bitwidth(i9 %x, i9 %y) { 546; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth( 547; CHECK-NEXT: ret i9 [[Y:%.*]] 548; 549 %z = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 189) 550 ret i9 %z 551} 552 553define <2 x i7> @fshl_no_shift_modulo_bitwidth_splat(<2 x i7> %x, <2 x i7> %y) { 554; CHECK-LABEL: @fshl_no_shift_modulo_bitwidth_splat( 555; CHECK-NEXT: ret <2 x i7> [[X:%.*]] 556; 557 %z = call <2 x i7> @llvm.fshl.v2i7(<2 x i7> %x, <2 x i7> %y, <2 x i7> <i7 21, i7 21>) 558 ret <2 x i7> %z 559} 560 561define <2 x i8> @fshr_no_shift_modulo_bitwidth_splat(<2 x i8> %x, <2 x i8> %y) { 562; CHECK-LABEL: @fshr_no_shift_modulo_bitwidth_splat( 563; CHECK-NEXT: ret <2 x i8> [[Y:%.*]] 564; 565 %z = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %y, <2 x i8> <i8 72, i8 72>) 566 ret <2 x i8> %z 567} 568 569; If y is poison, eliminating the guard is not safe. 570 571define i8 @fshl_zero_shift_guard(i8 %x, i8 %y, i8 %sh) { 572; CHECK-LABEL: @fshl_zero_shift_guard( 573; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0 574; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) 575; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[X]], i8 [[F]] 576; CHECK-NEXT: ret i8 [[S]] 577; 578 %c = icmp eq i8 %sh, 0 579 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 580 %s = select i1 %c, i8 %x, i8 %f 581 ret i8 %s 582} 583 584; If y is poison, eliminating the guard is not safe. 585 586define i8 @fshl_zero_shift_guard_swapped(i8 %x, i8 %y, i8 %sh) { 587; CHECK-LABEL: @fshl_zero_shift_guard_swapped( 588; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[SH:%.*]], 0 589; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) 590; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[F]], i8 [[X]] 591; CHECK-NEXT: ret i8 [[S]] 592; 593 %c = icmp ne i8 %sh, 0 594 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 595 %s = select i1 %c, i8 %f, i8 %x 596 ret i8 %s 597} 598 599; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. 600 601define i8 @fshl_zero_shift_guard_inverted(i8 %x, i8 %y, i8 %sh) { 602; CHECK-LABEL: @fshl_zero_shift_guard_inverted( 603; CHECK-NEXT: ret i8 [[X:%.*]] 604; 605 %c = icmp eq i8 %sh, 0 606 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 607 %s = select i1 %c, i8 %f, i8 %x 608 ret i8 %s 609} 610 611; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. 612 613define i8 @fshl_zero_shift_guard_inverted_swapped(i8 %x, i8 %y, i8 %sh) { 614; CHECK-LABEL: @fshl_zero_shift_guard_inverted_swapped( 615; CHECK-NEXT: ret i8 [[X:%.*]] 616; 617 %c = icmp ne i8 %sh, 0 618 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 619 %s = select i1 %c, i8 %x, i8 %f 620 ret i8 %s 621} 622 623; If x is poison, eliminating the guard is not safe. 624 625define i9 @fshr_zero_shift_guard(i9 %x, i9 %y, i9 %sh) { 626; CHECK-LABEL: @fshr_zero_shift_guard( 627; CHECK-NEXT: [[C:%.*]] = icmp eq i9 [[SH:%.*]], 0 628; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]]) 629; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i9 [[Y]], i9 [[F]] 630; CHECK-NEXT: ret i9 [[S]] 631; 632 %c = icmp eq i9 %sh, 0 633 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) 634 %s = select i1 %c, i9 %y, i9 %f 635 ret i9 %s 636} 637 638; If x is poison, eliminating the guard is not safe. 639 640define i9 @fshr_zero_shift_guard_swapped(i9 %x, i9 %y, i9 %sh) { 641; CHECK-LABEL: @fshr_zero_shift_guard_swapped( 642; CHECK-NEXT: [[C:%.*]] = icmp ne i9 [[SH:%.*]], 0 643; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[Y:%.*]], i9 [[SH]]) 644; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i9 [[F]], i9 [[Y]] 645; CHECK-NEXT: ret i9 [[S]] 646; 647 %c = icmp ne i9 %sh, 0 648 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) 649 %s = select i1 %c, i9 %f, i9 %y 650 ret i9 %s 651} 652 653; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted. 654 655define i9 @fshr_zero_shift_guard_inverted(i9 %x, i9 %y, i9 %sh) { 656; CHECK-LABEL: @fshr_zero_shift_guard_inverted( 657; CHECK-NEXT: ret i9 [[Y:%.*]] 658; 659 %c = icmp eq i9 %sh, 0 660 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) 661 %s = select i1 %c, i9 %f, i9 %y 662 ret i9 %s 663} 664 665; When the shift amount is 0, fshr returns its 2nd parameter (y), so everything is deleted. 666 667define i9 @fshr_zero_shift_guard_inverted_swapped(i9 %x, i9 %y, i9 %sh) { 668; CHECK-LABEL: @fshr_zero_shift_guard_inverted_swapped( 669; CHECK-NEXT: ret i9 [[Y:%.*]] 670; 671 %c = icmp ne i9 %sh, 0 672 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 %sh) 673 %s = select i1 %c, i9 %y, i9 %f 674 ret i9 %s 675} 676 677; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed. 678 679define i8 @rotl_zero_shift_guard(i8 %x, i8 %sh) { 680; CHECK-LABEL: @rotl_zero_shift_guard( 681; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]]) 682; CHECK-NEXT: ret i8 [[F]] 683; 684 %c = icmp eq i8 %sh, 0 685 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) 686 %s = select i1 %c, i8 %x, i8 %f 687 ret i8 %s 688} 689 690; When the shift amount is 0, fshl returns its 1st parameter (x), so the guard is not needed. 691 692define i8 @rotl_zero_shift_guard_swapped(i8 %x, i8 %sh) { 693; CHECK-LABEL: @rotl_zero_shift_guard_swapped( 694; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[SH:%.*]]) 695; CHECK-NEXT: ret i8 [[F]] 696; 697 %c = icmp ne i8 %sh, 0 698 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) 699 %s = select i1 %c, i8 %f, i8 %x 700 ret i8 %s 701} 702 703; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. 704 705define i8 @rotl_zero_shift_guard_inverted(i8 %x, i8 %sh) { 706; CHECK-LABEL: @rotl_zero_shift_guard_inverted( 707; CHECK-NEXT: ret i8 [[X:%.*]] 708; 709 %c = icmp eq i8 %sh, 0 710 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) 711 %s = select i1 %c, i8 %f, i8 %x 712 ret i8 %s 713} 714 715; When the shift amount is 0, fshl returns its 1st parameter (x), so everything is deleted. 716 717define i8 @rotl_zero_shift_guard_inverted_swapped(i8 %x, i8 %sh) { 718; CHECK-LABEL: @rotl_zero_shift_guard_inverted_swapped( 719; CHECK-NEXT: ret i8 [[X:%.*]] 720; 721 %c = icmp ne i8 %sh, 0 722 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %sh) 723 %s = select i1 %c, i8 %x, i8 %f 724 ret i8 %s 725} 726 727; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed. 728 729define i9 @rotr_zero_shift_guard(i9 %x, i9 %sh) { 730; CHECK-LABEL: @rotr_zero_shift_guard( 731; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]]) 732; CHECK-NEXT: ret i9 [[F]] 733; 734 %c = icmp eq i9 %sh, 0 735 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) 736 %s = select i1 %c, i9 %x, i9 %f 737 ret i9 %s 738} 739 740; When the shift amount is 0, fshr returns its 2nd parameter (x), so the guard is not needed. 741 742define i9 @rotr_zero_shift_guard_swapped(i9 %x, i9 %sh) { 743; CHECK-LABEL: @rotr_zero_shift_guard_swapped( 744; CHECK-NEXT: [[F:%.*]] = call i9 @llvm.fshr.i9(i9 [[X:%.*]], i9 [[X]], i9 [[SH:%.*]]) 745; CHECK-NEXT: ret i9 [[F]] 746; 747 %c = icmp ne i9 %sh, 0 748 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) 749 %s = select i1 %c, i9 %f, i9 %x 750 ret i9 %s 751} 752 753; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted. 754 755define i9 @rotr_zero_shift_guard_inverted(i9 %x, i9 %sh) { 756; CHECK-LABEL: @rotr_zero_shift_guard_inverted( 757; CHECK-NEXT: ret i9 [[X:%.*]] 758; 759 %c = icmp eq i9 %sh, 0 760 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) 761 %s = select i1 %c, i9 %f, i9 %x 762 ret i9 %s 763} 764 765; When the shift amount is 0, fshr returns its 2nd parameter (x), so everything is deleted. 766 767define i9 @rotr_zero_shift_guard_inverted_swapped(i9 %x, i9 %sh) { 768; CHECK-LABEL: @rotr_zero_shift_guard_inverted_swapped( 769; CHECK-NEXT: ret i9 [[X:%.*]] 770; 771 %c = icmp ne i9 %sh, 0 772 %f = call i9 @llvm.fshr.i9(i9 %x, i9 %x, i9 %sh) 773 %s = select i1 %c, i9 %x, i9 %f 774 ret i9 %s 775} 776 777; Negative test - make sure we're matching the correct parameter of fshl. 778 779define i8 @fshl_zero_shift_guard_wrong_select_op(i8 %x, i8 %y, i8 %sh) { 780; CHECK-LABEL: @fshl_zero_shift_guard_wrong_select_op( 781; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SH:%.*]], 0 782; CHECK-NEXT: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[SH]]) 783; CHECK-NEXT: [[S:%.*]] = select i1 [[C]], i8 [[Y]], i8 [[F]] 784; CHECK-NEXT: ret i8 [[S]] 785; 786 %c = icmp eq i8 %sh, 0 787 %f = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 %sh) 788 %s = select i1 %c, i8 %y, i8 %f 789 ret i8 %s 790} 791 792; Vector types work too. 793 794define <2 x i8> @rotr_zero_shift_guard_splat(<2 x i8> %x, <2 x i8> %sh) { 795; CHECK-LABEL: @rotr_zero_shift_guard_splat( 796; CHECK-NEXT: [[F:%.*]] = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> [[X:%.*]], <2 x i8> [[X]], <2 x i8> [[SH:%.*]]) 797; CHECK-NEXT: ret <2 x i8> [[F]] 798; 799 %c = icmp eq <2 x i8> %sh, zeroinitializer 800 %f = call <2 x i8> @llvm.fshr.v2i8(<2 x i8> %x, <2 x i8> %x, <2 x i8> %sh) 801 %s = select <2 x i1> %c, <2 x i8> %x, <2 x i8> %f 802 ret <2 x i8> %s 803} 804 805; If first two operands of funnel shift are undef, the result is undef 806 807define i8 @fshl_ops_undef(i8 %shamt) { 808; CHECK-LABEL: @fshl_ops_undef( 809; CHECK-NEXT: ret i8 undef 810; 811 %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 %shamt) 812 ret i8 %r 813} 814 815define i9 @fshr_ops_undef(i9 %shamt) { 816; CHECK-LABEL: @fshr_ops_undef( 817; CHECK-NEXT: ret i9 undef 818; 819 %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 %shamt) 820 ret i9 %r 821} 822 823; If shift amount is undef, treat it as zero, returning operand 0 or 1 824 825define i8 @fshl_shift_undef(i8 %x, i8 %y) { 826; CHECK-LABEL: @fshl_shift_undef( 827; CHECK-NEXT: ret i8 [[X:%.*]] 828; 829 %r = call i8 @llvm.fshl.i8(i8 %x, i8 %y, i8 undef) 830 ret i8 %r 831} 832 833define i9 @fshr_shift_undef(i9 %x, i9 %y) { 834; CHECK-LABEL: @fshr_shift_undef( 835; CHECK-NEXT: ret i9 [[Y:%.*]] 836; 837 %r = call i9 @llvm.fshr.i9(i9 %x, i9 %y, i9 undef) 838 ret i9 %r 839} 840 841; If one of operands is poison, the result is poison 842; TODO: these should be poison 843define i8 @fshl_ops_poison(i8 %b, i8 %shamt) { 844; CHECK-LABEL: @fshl_ops_poison( 845; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 poison, i8 [[B:%.*]], i8 [[SHAMT:%.*]]) 846; CHECK-NEXT: ret i8 [[R]] 847; 848 %r = call i8 @llvm.fshl.i8(i8 poison, i8 %b, i8 %shamt) 849 ret i8 %r 850} 851 852define i8 @fshl_ops_poison2(i8 %shamt) { 853; CHECK-LABEL: @fshl_ops_poison2( 854; CHECK-NEXT: ret i8 undef 855; 856 %r = call i8 @llvm.fshl.i8(i8 poison, i8 undef, i8 %shamt) 857 ret i8 %r 858} 859 860define i8 @fshl_ops_poison3(i8 %a, i8 %shamt) { 861; CHECK-LABEL: @fshl_ops_poison3( 862; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 [[A:%.*]], i8 poison, i8 [[SHAMT:%.*]]) 863; CHECK-NEXT: ret i8 [[R]] 864; 865 %r = call i8 @llvm.fshl.i8(i8 %a, i8 poison, i8 %shamt) 866 ret i8 %r 867} 868 869define i8 @fshl_ops_poison4(i8 %shamt) { 870; CHECK-LABEL: @fshl_ops_poison4( 871; CHECK-NEXT: ret i8 undef 872; 873 %r = call i8 @llvm.fshl.i8(i8 undef, i8 poison, i8 %shamt) 874 ret i8 %r 875} 876 877define i8 @fshl_ops_poison5(i8 %a, i8 %b) { 878; CHECK-LABEL: @fshl_ops_poison5( 879; CHECK-NEXT: ret i8 [[A:%.*]] 880; 881 %r = call i8 @llvm.fshl.i8(i8 %a, i8 %b, i8 poison) 882 ret i8 %r 883} 884 885define i8 @fshl_ops_poison6() { 886; CHECK-LABEL: @fshl_ops_poison6( 887; CHECK-NEXT: ret i8 undef 888; 889 %r = call i8 @llvm.fshl.i8(i8 undef, i8 undef, i8 poison) 890 ret i8 %r 891} 892 893define i9 @fshr_ops_poison(i9 %b, i9 %shamt) { 894; CHECK-LABEL: @fshr_ops_poison( 895; CHECK-NEXT: [[R:%.*]] = call i9 @llvm.fshr.i9(i9 poison, i9 [[B:%.*]], i9 [[SHAMT:%.*]]) 896; CHECK-NEXT: ret i9 [[R]] 897; 898 %r = call i9 @llvm.fshr.i9(i9 poison, i9 %b, i9 %shamt) 899 ret i9 %r 900} 901 902define i9 @fshr_ops_poison2(i9 %shamt) { 903; CHECK-LABEL: @fshr_ops_poison2( 904; CHECK-NEXT: ret i9 undef 905; 906 %r = call i9 @llvm.fshr.i9(i9 poison, i9 undef, i9 %shamt) 907 ret i9 %r 908} 909 910define i9 @fshr_ops_poison3(i9 %a, i9 %shamt) { 911; CHECK-LABEL: @fshr_ops_poison3( 912; CHECK-NEXT: [[R:%.*]] = call i9 @llvm.fshr.i9(i9 [[A:%.*]], i9 poison, i9 [[SHAMT:%.*]]) 913; CHECK-NEXT: ret i9 [[R]] 914; 915 %r = call i9 @llvm.fshr.i9(i9 %a, i9 poison, i9 %shamt) 916 ret i9 %r 917} 918 919define i9 @fshr_ops_poison4(i9 %shamt) { 920; CHECK-LABEL: @fshr_ops_poison4( 921; CHECK-NEXT: ret i9 undef 922; 923 %r = call i9 @llvm.fshr.i9(i9 undef, i9 poison, i9 %shamt) 924 ret i9 %r 925} 926 927define i9 @fshr_ops_poison5(i9 %a, i9 %b) { 928; CHECK-LABEL: @fshr_ops_poison5( 929; CHECK-NEXT: ret i9 [[B:%.*]] 930; 931 %r = call i9 @llvm.fshr.i9(i9 %a, i9 %b, i9 poison) 932 ret i9 %r 933} 934 935define i9 @fshr_ops_poison6() { 936; CHECK-LABEL: @fshr_ops_poison6( 937; CHECK-NEXT: ret i9 undef 938; 939 %r = call i9 @llvm.fshr.i9(i9 undef, i9 undef, i9 poison) 940 ret i9 %r 941} 942 943declare double @llvm.fma.f64(double,double,double) 944declare double @llvm.fmuladd.f64(double,double,double) 945 946define double @fma_undef_op0(double %x, double %y) { 947; CHECK-LABEL: @fma_undef_op0( 948; CHECK-NEXT: ret double 0x7FF8000000000000 949; 950 %r = call double @llvm.fma.f64(double undef, double %x, double %y) 951 ret double %r 952} 953 954define double @fma_poison_op0(double %x, double %y) { 955; CHECK-LABEL: @fma_poison_op0( 956; CHECK-NEXT: ret double 0x7FF8000000000000 957; 958 %r = call double @llvm.fma.f64(double poison, double %x, double %y) 959 ret double %r 960} 961 962define double @fma_undef_op1(double %x, double %y) { 963; CHECK-LABEL: @fma_undef_op1( 964; CHECK-NEXT: ret double 0x7FF8000000000000 965; 966 %r = call double @llvm.fma.f64(double %x, double undef, double %y) 967 ret double %r 968} 969 970define double @fma_poison_op1(double %x, double %y) { 971; CHECK-LABEL: @fma_poison_op1( 972; CHECK-NEXT: ret double 0x7FF8000000000000 973; 974 %r = call double @llvm.fma.f64(double %x, double poison, double %y) 975 ret double %r 976} 977 978define double @fma_undef_op2(double %x, double %y) { 979; CHECK-LABEL: @fma_undef_op2( 980; CHECK-NEXT: ret double 0x7FF8000000000000 981; 982 %r = call double @llvm.fma.f64(double %x, double %y, double undef) 983 ret double %r 984} 985 986define double @fma_poison_op2(double %x, double %y) { 987; CHECK-LABEL: @fma_poison_op2( 988; CHECK-NEXT: ret double 0x7FF8000000000000 989; 990 %r = call double @llvm.fma.f64(double %x, double %y, double poison) 991 ret double %r 992} 993 994define double @fmuladd_undef_op0(double %x, double %y) { 995; CHECK-LABEL: @fmuladd_undef_op0( 996; CHECK-NEXT: ret double 0x7FF8000000000000 997; 998 %r = call double @llvm.fmuladd.f64(double undef, double %x, double %y) 999 ret double %r 1000} 1001 1002define double @fmuladd_poison_op0(double %x, double %y) { 1003; CHECK-LABEL: @fmuladd_poison_op0( 1004; CHECK-NEXT: ret double 0x7FF8000000000000 1005; 1006 %r = call double @llvm.fmuladd.f64(double poison, double %x, double %y) 1007 ret double %r 1008} 1009 1010define double @fmuladd_undef_op1(double %x, double %y) { 1011; CHECK-LABEL: @fmuladd_undef_op1( 1012; CHECK-NEXT: ret double 0x7FF8000000000000 1013; 1014 %r = call double @llvm.fmuladd.f64(double %x, double undef, double %y) 1015 ret double %r 1016} 1017 1018define double @fmuladd_poison_op1(double %x, double %y) { 1019; CHECK-LABEL: @fmuladd_poison_op1( 1020; CHECK-NEXT: ret double 0x7FF8000000000000 1021; 1022 %r = call double @llvm.fmuladd.f64(double %x, double poison, double %y) 1023 ret double %r 1024} 1025 1026define double @fmuladd_undef_op2(double %x, double %y) { 1027; CHECK-LABEL: @fmuladd_undef_op2( 1028; CHECK-NEXT: ret double 0x7FF8000000000000 1029; 1030 %r = call double @llvm.fmuladd.f64(double %x, double %y, double undef) 1031 ret double %r 1032} 1033 1034define double @fmuladd_poison_op2(double %x, double %y) { 1035; CHECK-LABEL: @fmuladd_poison_op2( 1036; CHECK-NEXT: ret double 0x7FF8000000000000 1037; 1038 %r = call double @llvm.fmuladd.f64(double %x, double %y, double poison) 1039 ret double %r 1040} 1041 1042define double @fma_nan_op0(double %x, double %y) { 1043; CHECK-LABEL: @fma_nan_op0( 1044; CHECK-NEXT: ret double 0x7FF8000000000000 1045; 1046 %r = call double @llvm.fma.f64(double 0x7ff8000000000000, double %x, double %y) 1047 ret double %r 1048} 1049 1050define double @fma_nan_op1(double %x, double %y) { 1051; CHECK-LABEL: @fma_nan_op1( 1052; CHECK-NEXT: ret double 0x7FF8000000000001 1053; 1054 %r = call double @llvm.fma.f64(double %x, double 0x7ff8000000000001, double %y) 1055 ret double %r 1056} 1057 1058define double @fma_nan_op2(double %x, double %y) { 1059; CHECK-LABEL: @fma_nan_op2( 1060; CHECK-NEXT: ret double 0x7FF8000000000002 1061; 1062 %r = call double @llvm.fma.f64(double %x, double %y, double 0x7ff8000000000002) 1063 ret double %r 1064} 1065 1066define double @fmuladd_nan_op0_op1(double %x) { 1067; CHECK-LABEL: @fmuladd_nan_op0_op1( 1068; CHECK-NEXT: ret double 0x7FF8000000001234 1069; 1070 %r = call double @llvm.fmuladd.f64(double 0x7ff8000000001234, double 0x7ff800000000dead, double %x) 1071 ret double %r 1072} 1073 1074define double @fmuladd_nan_op0_op2(double %x) { 1075; CHECK-LABEL: @fmuladd_nan_op0_op2( 1076; CHECK-NEXT: ret double 0x7FF8000000005678 1077; 1078 %r = call double @llvm.fmuladd.f64(double 0x7ff8000000005678, double %x, double 0x7ff800000000dead) 1079 ret double %r 1080} 1081 1082define double @fmuladd_nan_op1_op2(double %x) { 1083; CHECK-LABEL: @fmuladd_nan_op1_op2( 1084; CHECK-NEXT: ret double 0x7FF80000AAAAAAAA 1085; 1086 %r = call double @llvm.fmuladd.f64(double %x, double 0x7ff80000aaaaaaaa, double 0x7ff800000000dead) 1087 ret double %r 1088} 1089 1090define double @fma_nan_multiplicand_inf_zero(double %x) { 1091; CHECK-LABEL: @fma_nan_multiplicand_inf_zero( 1092; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double 0.000000e+00, double [[X:%.*]]) 1093; CHECK-NEXT: ret double [[R]] 1094; 1095 %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double 0.0, double %x) 1096 ret double %r 1097} 1098 1099define double @fma_nan_multiplicand_zero_inf(double %x) { 1100; CHECK-LABEL: @fma_nan_multiplicand_zero_inf( 1101; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]]) 1102; CHECK-NEXT: ret double [[R]] 1103; 1104 %r = call double @llvm.fma.f64(double 0.0, double 0x7ff0000000000000, double %x) 1105 ret double %r 1106} 1107 1108define double @fma_nan_addend_inf_neginf(double %x, i32 %y) { 1109; CHECK-LABEL: @fma_nan_addend_inf_neginf( 1110; CHECK-NEXT: [[NOTNAN:%.*]] = uitofp i32 [[Y:%.*]] to double 1111; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000) 1112; CHECK-NEXT: ret double [[R]] 1113; 1114 %notnan = uitofp i32 %y to double 1115 %r = call double @llvm.fma.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000) 1116 ret double %r 1117} 1118 1119define double @fma_nan_addend_neginf_inf(double %x, i1 %y) { 1120; CHECK-LABEL: @fma_nan_addend_neginf_inf( 1121; CHECK-NEXT: [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01 1122; CHECK-NEXT: [[R:%.*]] = call double @llvm.fma.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000) 1123; CHECK-NEXT: ret double [[R]] 1124; 1125 %notnan = select i1 %y, double 42.0, double -0.1 1126 %r = call double @llvm.fma.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000) 1127 ret double %r 1128} 1129 1130define double @fmuladd_nan_multiplicand_neginf_zero(double %x) { 1131; CHECK-LABEL: @fmuladd_nan_multiplicand_neginf_zero( 1132; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double 0xFFF0000000000000, double 0.000000e+00, double [[X:%.*]]) 1133; CHECK-NEXT: ret double [[R]] 1134; 1135 %r = call double @llvm.fmuladd.f64(double 0xfff0000000000000, double 0.0, double %x) 1136 ret double %r 1137} 1138 1139define double @fmuladd_nan_multiplicand_negzero_inf(double %x) { 1140; CHECK-LABEL: @fmuladd_nan_multiplicand_negzero_inf( 1141; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double -0.000000e+00, double 0x7FF0000000000000, double [[X:%.*]]) 1142; CHECK-NEXT: ret double [[R]] 1143; 1144 %r = call double @llvm.fmuladd.f64(double -0.0, double 0x7ff0000000000000, double %x) 1145 ret double %r 1146} 1147 1148define double @fmuladd_nan_addend_inf_neginf(double %x, i32 %y) { 1149; CHECK-LABEL: @fmuladd_nan_addend_inf_neginf( 1150; CHECK-NEXT: [[NOTNAN:%.*]] = sitofp i32 [[Y:%.*]] to double 1151; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double 0x7FF0000000000000, double [[NOTNAN]], double 0xFFF0000000000000) 1152; CHECK-NEXT: ret double [[R]] 1153; 1154 %notnan = sitofp i32 %y to double 1155 %r = call double @llvm.fmuladd.f64(double 0x7ff0000000000000, double %notnan, double 0xfff0000000000000) 1156 ret double %r 1157} 1158 1159define double @fmuladd_nan_addend_neginf_inf(double %x, i1 %y) { 1160; CHECK-LABEL: @fmuladd_nan_addend_neginf_inf( 1161; CHECK-NEXT: [[NOTNAN:%.*]] = select i1 [[Y:%.*]], double 4.200000e+01, double -1.000000e-01 1162; CHECK-NEXT: [[R:%.*]] = call double @llvm.fmuladd.f64(double [[NOTNAN]], double 0xFFF0000000000000, double 0x7FF0000000000000) 1163; CHECK-NEXT: ret double [[R]] 1164; 1165 %notnan = select i1 %y, double 42.0, double -0.1 1166 %r = call double @llvm.fmuladd.f64(double %notnan, double 0xfff0000000000000, double 0x7ff0000000000000) 1167 ret double %r 1168} 1169 1170declare float @llvm.copysign.f32(float, float) 1171declare <2 x double> @llvm.copysign.v2f64(<2 x double>, <2 x double>) 1172 1173define float @copysign_same_operand(float %x) { 1174; CHECK-LABEL: @copysign_same_operand( 1175; CHECK-NEXT: ret float [[X:%.*]] 1176; 1177 %r = call float @llvm.copysign.f32(float %x, float %x) 1178 ret float %r 1179} 1180 1181define <2 x double> @copysign_same_operand_vec(<2 x double> %x) { 1182; CHECK-LABEL: @copysign_same_operand_vec( 1183; CHECK-NEXT: ret <2 x double> [[X:%.*]] 1184; 1185 %r = call <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %x) 1186 ret <2 x double> %r 1187} 1188 1189define float @negated_sign_arg(float %x) { 1190; CHECK-LABEL: @negated_sign_arg( 1191; CHECK-NEXT: [[NEGX:%.*]] = fsub ninf float -0.000000e+00, [[X:%.*]] 1192; CHECK-NEXT: ret float [[NEGX]] 1193; 1194 %negx = fsub ninf float -0.0, %x 1195 %r = call arcp float @llvm.copysign.f32(float %x, float %negx) 1196 ret float %r 1197} 1198 1199define <2 x double> @negated_sign_arg_vec(<2 x double> %x) { 1200; CHECK-LABEL: @negated_sign_arg_vec( 1201; CHECK-NEXT: [[NEGX:%.*]] = fneg afn <2 x double> [[X:%.*]] 1202; CHECK-NEXT: ret <2 x double> [[NEGX]] 1203; 1204 %negx = fneg afn <2 x double> %x 1205 %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %x, <2 x double> %negx) 1206 ret <2 x double> %r 1207} 1208 1209define float @negated_mag_arg(float %x) { 1210; CHECK-LABEL: @negated_mag_arg( 1211; CHECK-NEXT: ret float [[X:%.*]] 1212; 1213 %negx = fneg nnan float %x 1214 %r = call ninf float @llvm.copysign.f32(float %negx, float %x) 1215 ret float %r 1216} 1217 1218define <2 x double> @negated_mag_arg_vec(<2 x double> %x) { 1219; CHECK-LABEL: @negated_mag_arg_vec( 1220; CHECK-NEXT: ret <2 x double> [[X:%.*]] 1221; 1222 %negx = fneg afn <2 x double> %x 1223 %r = call arcp <2 x double> @llvm.copysign.v2f64(<2 x double> %negx, <2 x double> %x) 1224 ret <2 x double> %r 1225} 1226 1227; We handle the "returned" attribute only in InstCombine, because the fact 1228; that this simplification may replace one call with another may cause issues 1229; for call graph passes. 1230 1231declare i32 @passthru_i32(i32 returned) 1232declare i8* @passthru_p8(i8* returned) 1233 1234define i32 @returned_const_int_arg() { 1235; CHECK-LABEL: @returned_const_int_arg( 1236; CHECK-NEXT: [[X:%.*]] = call i32 @passthru_i32(i32 42) 1237; CHECK-NEXT: ret i32 [[X]] 1238; 1239 %x = call i32 @passthru_i32(i32 42) 1240 ret i32 %x 1241} 1242 1243define i8* @returned_const_ptr_arg() { 1244; CHECK-LABEL: @returned_const_ptr_arg( 1245; CHECK-NEXT: [[X:%.*]] = call i8* @passthru_p8(i8* null) 1246; CHECK-NEXT: ret i8* [[X]] 1247; 1248 %x = call i8* @passthru_p8(i8* null) 1249 ret i8* %x 1250} 1251 1252define i32 @returned_var_arg(i32 %arg) { 1253; CHECK-LABEL: @returned_var_arg( 1254; CHECK-NEXT: [[X:%.*]] = call i32 @passthru_i32(i32 [[ARG:%.*]]) 1255; CHECK-NEXT: ret i32 [[X]] 1256; 1257 %x = call i32 @passthru_i32(i32 %arg) 1258 ret i32 %x 1259} 1260 1261define i32 @returned_const_int_arg_musttail(i32 %arg) { 1262; CHECK-LABEL: @returned_const_int_arg_musttail( 1263; CHECK-NEXT: [[X:%.*]] = musttail call i32 @passthru_i32(i32 42) 1264; CHECK-NEXT: ret i32 [[X]] 1265; 1266 %x = musttail call i32 @passthru_i32(i32 42) 1267 ret i32 %x 1268} 1269 1270define i32 @returned_var_arg_musttail(i32 %arg) { 1271; CHECK-LABEL: @returned_var_arg_musttail( 1272; CHECK-NEXT: [[X:%.*]] = musttail call i32 @passthru_i32(i32 [[ARG:%.*]]) 1273; CHECK-NEXT: ret i32 [[X]] 1274; 1275 %x = musttail call i32 @passthru_i32(i32 %arg) 1276 ret i32 %x 1277} 1278 1279define i32 @call_undef_musttail() { 1280; CHECK-LABEL: @call_undef_musttail( 1281; CHECK-NEXT: [[X:%.*]] = musttail call i32 undef() 1282; CHECK-NEXT: ret i32 [[X]] 1283; 1284 %x = musttail call i32 undef() 1285 ret i32 %x 1286} 1287 1288; This is not the builtin fmax, so we don't know anything about its behavior. 1289 1290define float @nobuiltin_fmax() { 1291; CHECK-LABEL: @nobuiltin_fmax( 1292; CHECK-NEXT: [[M:%.*]] = call float @fmaxf(float 0.000000e+00, float 1.000000e+00) [[ATTR3:#.*]] 1293; CHECK-NEXT: [[R:%.*]] = call float @llvm.fabs.f32(float [[M]]) 1294; CHECK-NEXT: ret float [[R]] 1295; 1296 %m = call float @fmaxf(float 0.0, float 1.0) #0 1297 %r = call float @llvm.fabs.f32(float %m) 1298 ret float %r 1299} 1300 1301declare float @fmaxf(float, float) 1302 1303attributes #0 = { nobuiltin readnone } 1304