1; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 3 4; This should end with an no-op sequence of exec mask manipulations 5; Mask should be in original state after executed unreachable block 6 7 8; GCN-LABEL: {{^}}uniform_br_trivial_ret_divergent_br_trivial_unreachable: 9; GCN: s_cbranch_scc1 [[RET_BB:BB[0-9]+_[0-9]+]] 10 11; GCN-NEXT: ; %else 12 13; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc 14; GCN-NEXT: ; mask branch [[FLOW:BB[0-9]+_[0-9]+]] 15 16; GCN: BB{{[0-9]+_[0-9]+}}: ; %unreachable.bb 17; GCN-NEXT: ; divergent unreachable 18 19; GCN-NEXT: {{^}}[[FLOW]]: ; %Flow 20; GCN-NEXT: s_or_b64 exec, exec 21 22; GCN-NEXT: [[RET_BB]]: 23; GCN-NEXT: ; return 24; GCN-NEXT: .Lfunc_end0 25define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_trivial_ret_divergent_br_trivial_unreachable([9 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, i32 inreg %arg17, i32 %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 { 26entry: 27 %i.i = extractelement <2 x i32> %arg7, i32 0 28 %j.i = extractelement <2 x i32> %arg7, i32 1 29 %i.f.i = bitcast i32 %i.i to float 30 %j.f.i = bitcast i32 %j.i to float 31 %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 1, i32 0, i32 %arg5) #2 32 %p2 = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 1, i32 0, i32 %arg5) #2 33 %p87 = fmul float %p2, %p2 34 %p88 = fadd float %p87, %p87 35 %p93 = fadd float %p88, %p88 36 %p97 = fmul float %p93, %p93 37 %p102 = fsub float %p97, %p97 38 %p104 = fmul float %p102, %p102 39 %p106 = fadd float 0.000000e+00, %p104 40 %p108 = fadd float %p106, %p106 41 %uniform.cond = icmp slt i32 %arg17, 0 42 br i1 %uniform.cond, label %ret.bb, label %else 43 44else: ; preds = %main_body 45 %p124 = fmul float %p108, %p108 46 %p125 = fsub float %p124, %p124 47 %divergent.cond = fcmp olt float %p125, 0.000000e+00 48 br i1 %divergent.cond, label %ret.bb, label %unreachable.bb 49 50unreachable.bb: ; preds = %else 51 unreachable 52 53ret.bb: ; preds = %else, %main_body 54 ret <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef 55} 56 57; GCN-LABEL: {{^}}uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable: 58; GCN: s_cbranch_vccnz [[RET_BB:BB[0-9]+_[0-9]+]] 59 60; GCN: ; %bb.{{[0-9]+}}: ; %else 61; GCN: s_and_saveexec_b64 [[SAVE_EXEC:s\[[0-9]+:[0-9]+\]]], vcc 62; GCN-NEXT: ; mask branch [[FLOW1:BB[0-9]+_[0-9]+]] 63 64; GCN-NEXT: ; %unreachable.bb 65; GCN: ds_write_b32 66; GCN: ; divergent unreachable 67 68; GCN: ; %ret.bb 69; GCN: store_dword 70 71; GCN: ; %UnifiedReturnBlock 72; GCN-NEXT: s_or_b64 exec, exec 73; GCN-NEXT: s_waitcnt 74; GCN-NEXT: ; return 75; GCN-NEXT: .Lfunc_end 76define amdgpu_ps <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> @uniform_br_nontrivial_ret_divergent_br_nontrivial_unreachable([9 x <4 x i32>] addrspace(2)* byval %arg, [17 x <4 x i32>] addrspace(2)* byval %arg1, [17 x <8 x i32>] addrspace(2)* byval %arg2, i32 addrspace(2)* byval %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, i32 inreg %arg18, i32 %arg19, float %arg20, i32 %arg21) #0 { 77main_body: 78 %i.i = extractelement <2 x i32> %arg7, i32 0 79 %j.i = extractelement <2 x i32> %arg7, i32 1 80 %i.f.i = bitcast i32 %i.i to float 81 %j.f.i = bitcast i32 %j.i to float 82 %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 1, i32 0, i32 %arg5) #2 83 %p2 = call float @llvm.amdgcn.interp.p2(float %p1.i, float %j.f.i, i32 1, i32 0, i32 %arg5) #2 84 %p87 = fmul float %p2, %p2 85 %p88 = fadd float %p87, %p87 86 %p93 = fadd float %p88, %p88 87 %p97 = fmul float %p93, %p93 88 %p102 = fsub float %p97, %p97 89 %p104 = fmul float %p102, %p102 90 %p106 = fadd float 0.000000e+00, %p104 91 %p108 = fadd float %p106, %p106 92 %uniform.cond = icmp slt i32 %arg18, 0 93 br i1 %uniform.cond, label %ret.bb, label %else 94 95else: ; preds = %main_body 96 %p124 = fmul float %p108, %p108 97 %p125 = fsub float %p124, %p124 98 %divergent.cond = fcmp olt float %p125, 0.000000e+00 99 br i1 %divergent.cond, label %ret.bb, label %unreachable.bb 100 101unreachable.bb: ; preds = %else 102 store volatile i32 8, i32 addrspace(3)* undef 103 unreachable 104 105ret.bb: ; preds = %else, %main_body 106 store volatile i32 11, i32 addrspace(1)* undef 107 ret <{ i32, i32, i32, i32, i32, i32, i32, i32, i32, float, float, float, float, float, float, float, float, float, float, float, float, float, float }> undef 108} 109 110; Function Attrs: nounwind readnone 111declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #1 112 113; Function Attrs: nounwind readnone 114declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #1 115 116; Function Attrs: nounwind readnone 117declare float @llvm.amdgcn.interp.mov(i32, i32, i32, i32) #1 118 119; Function Attrs: nounwind readnone 120declare float @llvm.fabs.f32(float) #1 121 122; Function Attrs: nounwind readnone 123declare float @llvm.sqrt.f32(float) #1 124 125; Function Attrs: nounwind readnone 126declare float @llvm.floor.f32(float) #1 127 128attributes #0 = { "InitialPSInputAddr"="36983" } 129attributes #1 = { nounwind readnone } 130attributes #2 = { nounwind } 131