1; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -asm-verbose=false -post-RA-scheduler=true | FileCheck %s 2 3declare void @bar(i32) 4declare void @car(i32) 5declare void @dar(i32) 6declare void @ear(i32) 7declare void @far(i32) 8declare i1 @qux() 9 10@GHJK = global i32 0 11@HABC = global i32 0 12 13; BranchFolding should tail-merge the stores since they all precede 14; direct branches to the same place. 15 16; CHECK-LABEL: tail_merge_me: 17; CHECK-NOT: GHJK 18; CHECK: movl $0, GHJK(%rip) 19; CHECK-NEXT: movl $1, HABC(%rip) 20; CHECK-NOT: GHJK 21 22define void @tail_merge_me() nounwind { 23entry: 24 %a = call i1 @qux() 25 br i1 %a, label %A, label %next 26next: 27 %b = call i1 @qux() 28 br i1 %b, label %B, label %C 29 30A: 31 call void @bar(i32 0) 32 store i32 0, i32* @GHJK 33 br label %M 34 35B: 36 call void @car(i32 1) 37 store i32 0, i32* @GHJK 38 br label %M 39 40C: 41 call void @dar(i32 2) 42 store i32 0, i32* @GHJK 43 br label %M 44 45M: 46 store i32 1, i32* @HABC 47 %c = call i1 @qux() 48 br i1 %c, label %return, label %altret 49 50return: 51 call void @ear(i32 1000) 52 ret void 53altret: 54 call void @far(i32 1001) 55 ret void 56} 57 58declare i8* @choose(i8*, i8*) 59 60; BranchFolding should tail-duplicate the indirect jump to avoid 61; redundant branching. 62 63; CHECK-LABEL: tail_duplicate_me: 64; CHECK: movl $0, GHJK(%rip) 65; CHECK-NEXT: jmpq *%r 66; CHECK: movl $0, GHJK(%rip) 67; CHECK-NEXT: jmpq *%r 68; CHECK: movl $0, GHJK(%rip) 69; CHECK-NEXT: jmpq *%r 70 71define void @tail_duplicate_me() nounwind { 72entry: 73 %a = call i1 @qux() 74 %c = call i8* @choose(i8* blockaddress(@tail_duplicate_me, %return), 75 i8* blockaddress(@tail_duplicate_me, %altret)) 76 br i1 %a, label %A, label %next 77next: 78 %b = call i1 @qux() 79 br i1 %b, label %B, label %C 80 81A: 82 call void @bar(i32 0) 83 store i32 0, i32* @GHJK 84 br label %M 85 86B: 87 call void @car(i32 1) 88 store i32 0, i32* @GHJK 89 br label %M 90 91C: 92 call void @dar(i32 2) 93 store i32 0, i32* @GHJK 94 br label %M 95 96M: 97 indirectbr i8* %c, [label %return, label %altret] 98 99return: 100 call void @ear(i32 1000) 101 ret void 102altret: 103 call void @far(i32 1001) 104 ret void 105} 106 107; BranchFolding shouldn't try to merge the tails of two blocks 108; with only a branch in common, regardless of the fallthrough situation. 109 110; CHECK-LABEL: dont_merge_oddly: 111; CHECK-NOT: ret 112; CHECK: ucomiss %xmm{{[0-2]}}, %xmm{{[0-2]}} 113; CHECK-NEXT: jbe .LBB2_3 114; CHECK-NEXT: ucomiss %xmm{{[0-2]}}, %xmm{{[0-2]}} 115; CHECK-NEXT: ja .LBB2_4 116; CHECK-NEXT: .LBB2_2: 117; CHECK-NEXT: movb $1, %al 118; CHECK-NEXT: ret 119; CHECK-NEXT: .LBB2_3: 120; CHECK-NEXT: ucomiss %xmm{{[0-2]}}, %xmm{{[0-2]}} 121; CHECK-NEXT: jbe .LBB2_2 122; CHECK-NEXT: .LBB2_4: 123; CHECK-NEXT: xorl %eax, %eax 124; CHECK-NEXT: ret 125 126define i1 @dont_merge_oddly(float* %result) nounwind { 127entry: 128 %tmp4 = getelementptr float, float* %result, i32 2 129 %tmp5 = load float, float* %tmp4, align 4 130 %tmp7 = getelementptr float, float* %result, i32 4 131 %tmp8 = load float, float* %tmp7, align 4 132 %tmp10 = getelementptr float, float* %result, i32 6 133 %tmp11 = load float, float* %tmp10, align 4 134 %tmp12 = fcmp olt float %tmp8, %tmp11 135 br i1 %tmp12, label %bb, label %bb21 136 137bb: 138 %tmp23469 = fcmp olt float %tmp5, %tmp8 139 br i1 %tmp23469, label %bb26, label %bb30 140 141bb21: 142 %tmp23 = fcmp olt float %tmp5, %tmp11 143 br i1 %tmp23, label %bb26, label %bb30 144 145bb26: 146 ret i1 0 147 148bb30: 149 ret i1 1 150} 151 152; Do any-size tail-merging when two candidate blocks will both require 153; an unconditional jump to complete a two-way conditional branch. 154 155; CHECK-LABEL: c_expand_expr_stmt: 156; 157; This test only works when register allocation happens to use %rax for both 158; load addresses. 159; 160; CHE: jmp .LBB3_11 161; CHE-NEXT: .LBB3_9: 162; CHE-NEXT: movq 8(%rax), %rax 163; CHE-NEXT: xorl %edx, %edx 164; CHE-NEXT: movb 16(%rax), %al 165; CHE-NEXT: cmpb $16, %al 166; CHE-NEXT: je .LBB3_11 167; CHE-NEXT: cmpb $23, %al 168; CHE-NEXT: jne .LBB3_14 169; CHE-NEXT: .LBB3_11: 170 171%0 = type { %struct.rtx_def* } 172%struct.lang_decl = type opaque 173%struct.rtx_def = type { i16, i8, i8, [1 x %union.rtunion] } 174%struct.tree_decl = type { [24 x i8], i8*, i32, %union.tree_node*, i32, i8, i8, i8, i8, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %union.tree_node*, %struct.rtx_def*, %union..2anon, %0, %union.tree_node*, %struct.lang_decl* } 175%union..2anon = type { i32 } 176%union.rtunion = type { i8* } 177%union.tree_node = type { %struct.tree_decl } 178 179define fastcc void @c_expand_expr_stmt(%union.tree_node* %expr) nounwind { 180entry: 181 %tmp4 = load i8, i8* null, align 8 ; <i8> [#uses=3] 182 switch i8 %tmp4, label %bb3 [ 183 i8 18, label %bb 184 ] 185 186bb: ; preds = %entry 187 switch i32 undef, label %bb1 [ 188 i32 0, label %bb2.i 189 i32 37, label %bb.i 190 ] 191 192bb.i: ; preds = %bb 193 switch i32 undef, label %bb1 [ 194 i32 0, label %lvalue_p.exit 195 ] 196 197bb2.i: ; preds = %bb 198 br label %bb3 199 200lvalue_p.exit: ; preds = %bb.i 201 %tmp21 = load %union.tree_node*, %union.tree_node** null, align 8 ; <%union.tree_node*> [#uses=3] 202 %tmp22 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 0 ; <i8*> [#uses=1] 203 %tmp23 = load i8, i8* %tmp22, align 8 ; <i8> [#uses=1] 204 %tmp24 = zext i8 %tmp23 to i32 ; <i32> [#uses=1] 205 switch i32 %tmp24, label %lvalue_p.exit4 [ 206 i32 0, label %bb2.i3 207 i32 2, label %bb.i1 208 ] 209 210bb.i1: ; preds = %lvalue_p.exit 211 %tmp25 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 2 ; <i32*> [#uses=1] 212 %tmp26 = bitcast i32* %tmp25 to %union.tree_node** ; <%union.tree_node**> [#uses=1] 213 %tmp27 = load %union.tree_node*, %union.tree_node** %tmp26, align 8 ; <%union.tree_node*> [#uses=2] 214 %tmp28 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1] 215 %tmp29 = load i8, i8* %tmp28, align 8 ; <i8> [#uses=1] 216 %tmp30 = zext i8 %tmp29 to i32 ; <i32> [#uses=1] 217 switch i32 %tmp30, label %lvalue_p.exit4 [ 218 i32 0, label %bb2.i.i2 219 i32 2, label %bb.i.i 220 ] 221 222bb.i.i: ; preds = %bb.i1 223 %tmp34 = tail call fastcc i32 @lvalue_p(%union.tree_node* null) nounwind ; <i32> [#uses=1] 224 %phitmp = icmp ne i32 %tmp34, 0 ; <i1> [#uses=1] 225 br label %lvalue_p.exit4 226 227bb2.i.i2: ; preds = %bb.i1 228 %tmp35 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp27, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1] 229 %tmp36 = bitcast i8* %tmp35 to %union.tree_node** ; <%union.tree_node**> [#uses=1] 230 %tmp37 = load %union.tree_node*, %union.tree_node** %tmp36, align 8 ; <%union.tree_node*> [#uses=1] 231 %tmp38 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp37, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1] 232 %tmp39 = load i8, i8* %tmp38, align 8 ; <i8> [#uses=1] 233 switch i8 %tmp39, label %bb2 [ 234 i8 16, label %lvalue_p.exit4 235 i8 23, label %lvalue_p.exit4 236 ] 237 238bb2.i3: ; preds = %lvalue_p.exit 239 %tmp40 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp21, i64 0, i32 0, i32 0, i64 8 ; <i8*> [#uses=1] 240 %tmp41 = bitcast i8* %tmp40 to %union.tree_node** ; <%union.tree_node**> [#uses=1] 241 %tmp42 = load %union.tree_node*, %union.tree_node** %tmp41, align 8 ; <%union.tree_node*> [#uses=1] 242 %tmp43 = getelementptr inbounds %union.tree_node, %union.tree_node* %tmp42, i64 0, i32 0, i32 0, i64 16 ; <i8*> [#uses=1] 243 %tmp44 = load i8, i8* %tmp43, align 8 ; <i8> [#uses=1] 244 switch i8 %tmp44, label %bb2 [ 245 i8 16, label %lvalue_p.exit4 246 i8 23, label %lvalue_p.exit4 247 ] 248 249lvalue_p.exit4: ; preds = %bb2.i3, %bb2.i3, %bb2.i.i2, %bb2.i.i2, %bb.i.i, %bb.i1, %lvalue_p.exit 250 %tmp45 = phi i1 [ %phitmp, %bb.i.i ], [ false, %bb2.i.i2 ], [ false, %bb2.i.i2 ], [ false, %bb.i1 ], [ false, %bb2.i3 ], [ false, %bb2.i3 ], [ false, %lvalue_p.exit ] ; <i1> [#uses=1] 251 %tmp46 = icmp eq i8 %tmp4, 0 ; <i1> [#uses=1] 252 %or.cond = or i1 %tmp45, %tmp46 ; <i1> [#uses=1] 253 br i1 %or.cond, label %bb2, label %bb3 254 255bb1: ; preds = %bb2.i.i, %bb.i, %bb 256 %.old = icmp eq i8 %tmp4, 23 ; <i1> [#uses=1] 257 br i1 %.old, label %bb2, label %bb3 258 259bb2: ; preds = %bb1, %lvalue_p.exit4, %bb2.i3, %bb2.i.i2 260 br label %bb3 261 262bb3: ; preds = %bb2, %bb1, %lvalue_p.exit4, %bb2.i, %entry 263 %expr_addr.0 = phi %union.tree_node* [ null, %bb2 ], [ %expr, %bb2.i ], [ %expr, %entry ], [ %expr, %bb1 ], [ %expr, %lvalue_p.exit4 ] ; <%union.tree_node*> [#uses=0] 264 unreachable 265} 266 267declare fastcc i32 @lvalue_p(%union.tree_node* nocapture) nounwind readonly 268 269declare fastcc %union.tree_node* @default_conversion(%union.tree_node*) nounwind 270 271 272; If one tail merging candidate falls through into the other, 273; tail merging is likely profitable regardless of how few 274; instructions are involved. This function should have only 275; one ret instruction. 276 277; CHECK-LABEL: foo: 278; CHECK: callq func 279; CHECK-NEXT: popq 280; CHECK-NEXT: .LBB4_2: 281; CHECK-NEXT: ret 282 283define void @foo(i1* %V) nounwind { 284entry: 285 %t0 = icmp eq i1* %V, null 286 br i1 %t0, label %return, label %bb 287 288bb: 289 call void @func() 290 ret void 291 292return: 293 ret void 294} 295 296declare void @func() 297 298; one - One instruction may be tail-duplicated even with optsize. 299 300; CHECK-LABEL: one: 301; CHECK: j{{.*}} tail_call_me 302; CHECK: j{{.*}} tail_call_me 303 304@XYZ = external global i32 305 306declare void @tail_call_me() 307 308define void @one(i32 %v) nounwind optsize { 309entry: 310 %0 = icmp eq i32 %v, 0 311 br i1 %0, label %bbx, label %bby 312 313bby: 314 switch i32 %v, label %bb7 [ 315 i32 16, label %return 316 ] 317 318bb7: 319 tail call void @tail_call_me() 320 ret void 321 322bbx: 323 switch i32 %v, label %bb12 [ 324 i32 128, label %return 325 ] 326 327bb12: 328 tail call void @tail_call_me() 329 ret void 330 331return: 332 ret void 333} 334 335; two - Same as one, but with two instructions in the common 336; tail instead of one. This is too much to be merged, given 337; the optsize attribute. 338 339; CHECK-LABEL: two: 340; CHECK-NOT: XYZ 341; CHECK: ret 342; CHECK: movl $0, XYZ(%rip) 343; CHECK: movl $1, XYZ(%rip) 344; CHECK-NOT: XYZ 345 346define void @two() nounwind optsize { 347entry: 348 %0 = icmp eq i32 undef, 0 349 br i1 %0, label %bbx, label %bby 350 351bby: 352 switch i32 undef, label %bb7 [ 353 i32 16, label %return 354 ] 355 356bb7: 357 store volatile i32 0, i32* @XYZ 358 store volatile i32 1, i32* @XYZ 359 unreachable 360 361bbx: 362 switch i32 undef, label %bb12 [ 363 i32 128, label %return 364 ] 365 366bb12: 367 store volatile i32 0, i32* @XYZ 368 store volatile i32 1, i32* @XYZ 369 unreachable 370 371return: 372 ret void 373} 374 375; two_minsize - Same as two, but with minsize instead of optsize. 376 377; CHECK-LABEL: two_minsize: 378; CHECK-NOT: XYZ 379; CHECK: ret 380; CHECK: andl $0, XYZ(%rip) 381; CHECK: movl $1, XYZ(%rip) 382; CHECK-NOT: XYZ 383 384define void @two_minsize() nounwind minsize { 385entry: 386 %0 = icmp eq i32 undef, 0 387 br i1 %0, label %bbx, label %bby 388 389bby: 390 switch i32 undef, label %bb7 [ 391 i32 16, label %return 392 ] 393 394bb7: 395 store volatile i32 0, i32* @XYZ 396 store volatile i32 1, i32* @XYZ 397 unreachable 398 399bbx: 400 switch i32 undef, label %bb12 [ 401 i32 128, label %return 402 ] 403 404bb12: 405 store volatile i32 0, i32* @XYZ 406 store volatile i32 1, i32* @XYZ 407 unreachable 408 409return: 410 ret void 411} 412 413; two_nosize - Same as two, but without the optsize attribute. 414; Now two instructions are enough to be tail-duplicated. 415 416; CHECK-LABEL: two_nosize: 417; CHECK: movl $0, XYZ(%rip) 418; CHECK: jmp tail_call_me 419; CHECK: movl $0, XYZ(%rip) 420; CHECK: jmp tail_call_me 421 422define void @two_nosize() nounwind { 423entry: 424 %0 = icmp eq i32 undef, 0 425 br i1 %0, label %bbx, label %bby 426 427bby: 428 switch i32 undef, label %bb7 [ 429 i32 16, label %return 430 ] 431 432bb7: 433 store volatile i32 0, i32* @XYZ 434 tail call void @tail_call_me() 435 ret void 436 437bbx: 438 switch i32 undef, label %bb12 [ 439 i32 128, label %return 440 ] 441 442bb12: 443 store volatile i32 0, i32* @XYZ 444 tail call void @tail_call_me() 445 ret void 446 447return: 448 ret void 449} 450 451; Tail-merging should merge the two ret instructions since one side 452; can fall-through into the ret and the other side has to branch anyway. 453 454; CHECK-LABEL: TESTE: 455; CHECK: ret 456; CHECK-NOT: ret 457; CHECK: size TESTE 458 459define i64 @TESTE(i64 %parami, i64 %paraml) nounwind readnone { 460entry: 461 %cmp = icmp slt i64 %parami, 1 ; <i1> [#uses=1] 462 %varx.0 = select i1 %cmp, i64 1, i64 %parami ; <i64> [#uses=1] 463 %cmp410 = icmp slt i64 %paraml, 1 ; <i1> [#uses=1] 464 br i1 %cmp410, label %for.end, label %bb.nph 465 466bb.nph: ; preds = %entry 467 %tmp15 = mul i64 %paraml, %parami ; <i64> [#uses=1] 468 ret i64 %tmp15 469 470for.end: ; preds = %entry 471 ret i64 %varx.0 472} 473 474; We should tail merge small blocks that don't end in a tail call or return 475; instruction. Those blocks are typically unreachable and will be placed 476; out-of-line after the main return, so we should try to eliminate as many of 477; them as possible. 478 479; CHECK-LABEL: merge_aborts: 480; CHECK-NOT: callq abort 481; CHECK: ret 482; CHECK: callq abort 483; CHECK-NOT: callq abort 484; CHECK: .Lfunc_end{{.*}}: 485 486declare void @abort() 487define void @merge_aborts() { 488entry: 489 %c1 = call i1 @qux() 490 br i1 %c1, label %cont1, label %abort1 491abort1: 492 call void @abort() 493 unreachable 494cont1: 495 %c2 = call i1 @qux() 496 br i1 %c2, label %cont2, label %abort2 497abort2: 498 call void @abort() 499 unreachable 500cont2: 501 %c3 = call i1 @qux() 502 br i1 %c3, label %cont3, label %abort3 503abort3: 504 call void @abort() 505 unreachable 506cont3: 507 %c4 = call i1 @qux() 508 br i1 %c4, label %cont4, label %abort4 509abort4: 510 call void @abort() 511 unreachable 512cont4: 513 ret void 514} 515 516; Use alternating abort functions so that the blocks we wish to merge are not 517; layout successors during branch folding. 518 519; CHECK-LABEL: merge_alternating_aborts: 520; CHECK-NOT: callq abort 521; CHECK: ret 522; CHECK: callq abort 523; CHECK: callq alt_abort 524; CHECK-NOT: callq abort 525; CHECK-NOT: callq alt_abort 526; CHECK: .Lfunc_end{{.*}}: 527 528declare void @alt_abort() 529 530define void @merge_alternating_aborts() { 531entry: 532 %c1 = call i1 @qux() 533 br i1 %c1, label %cont1, label %abort1 534abort1: 535 call void @abort() 536 unreachable 537cont1: 538 %c2 = call i1 @qux() 539 br i1 %c2, label %cont2, label %abort2 540abort2: 541 call void @alt_abort() 542 unreachable 543cont2: 544 %c3 = call i1 @qux() 545 br i1 %c3, label %cont3, label %abort3 546abort3: 547 call void @abort() 548 unreachable 549cont3: 550 %c4 = call i1 @qux() 551 br i1 %c4, label %cont4, label %abort4 552abort4: 553 call void @alt_abort() 554 unreachable 555cont4: 556 ret void 557} 558