1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -mtriple=amdgcn-amd-amdhsa -S -structurizecfg %s | FileCheck %s 3; 4; StructurizeCFG::orderNodes basically uses a reverse post-order (RPO) traversal of the region 5; list to get the order. The only problem with it is that sometimes backedges 6; for outer loops will be visited before backedges for inner loops. To solve this problem, 7; a loop depth based approach has been used to make sure all blocks in this loop has been visited 8; before moving on to outer loop. 9; 10; However, we found a problem for a SubRegion which is a loop itself: 11; _ 12; | | 13; V | 14; --> BB1 --> BB2 --> BB3 --> 15; 16; In this case, BB2 is a SubRegion (loop), and thus its loopdepth is different than that of 17; BB1 and BB3. This fact will lead BB2 to be placed in the wrong order. 18; 19; In this work, we treat the SubRegion as a special case and use its exit block to determine 20; the loop and its depth to guard the sorting. 21define amdgpu_kernel void @loop_subregion_misordered(i32 addrspace(1)* %arg0) #0 { 22; CHECK-LABEL: @loop_subregion_misordered( 23; CHECK-NEXT: entry: 24; CHECK-NEXT: [[TMP:%.*]] = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16 25; CHECK-NEXT: [[LOAD1:%.*]] = load volatile <2 x float>, <2 x float> addrspace(1)* undef 26; CHECK-NEXT: [[TID:%.*]] = call i32 @llvm.amdgcn.workitem.id.x() 27; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, i32 addrspace(1)* [[ARG0:%.*]], i32 [[TID]] 28; CHECK-NEXT: [[I_INITIAL:%.*]] = load volatile i32, i32 addrspace(1)* [[GEP]], align 4 29; CHECK-NEXT: br label [[LOOP_HEADER:%.*]] 30; CHECK: LOOP.HEADER: 31; CHECK-NEXT: [[I:%.*]] = phi i32 [ [[I_INITIAL]], [[ENTRY:%.*]] ], [ [[TMP4:%.*]], [[FLOW3:%.*]] ] 32; CHECK-NEXT: call void asm sideeffect "s_nop 0x100b 33; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[I]] to i64 34; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 [[TMP12]] 35; CHECK-NEXT: [[TMP14:%.*]] = load <4 x i32>, <4 x i32> addrspace(1)* [[TMP13]], align 16 36; CHECK-NEXT: [[TMP15:%.*]] = extractelement <4 x i32> [[TMP14]], i64 0 37; CHECK-NEXT: [[TMP16:%.*]] = and i32 [[TMP15]], 65535 38; CHECK-NEXT: [[TMP17:%.*]] = icmp eq i32 [[TMP16]], 1 39; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[TMP17]], true 40; CHECK-NEXT: br i1 [[TMP0]], label [[BB62:%.*]], label [[FLOW:%.*]] 41; CHECK: Flow1: 42; CHECK-NEXT: [[TMP1:%.*]] = phi i32 [ [[INC_I:%.*]], [[INCREMENT_I:%.*]] ], [ undef, [[BB62]] ] 43; CHECK-NEXT: [[TMP2:%.*]] = phi i1 [ false, [[INCREMENT_I]] ], [ true, [[BB62]] ] 44; CHECK-NEXT: [[TMP3:%.*]] = phi i1 [ true, [[INCREMENT_I]] ], [ false, [[BB62]] ] 45; CHECK-NEXT: br label [[FLOW]] 46; CHECK: bb18: 47; CHECK-NEXT: [[TMP19:%.*]] = extractelement <2 x i32> [[TMP]], i64 0 48; CHECK-NEXT: [[TMP22:%.*]] = lshr i32 [[TMP19]], 16 49; CHECK-NEXT: [[TMP24:%.*]] = urem i32 [[TMP22]], 52 50; CHECK-NEXT: [[TMP25:%.*]] = mul nuw nsw i32 [[TMP24]], 52 51; CHECK-NEXT: br label [[INNER_LOOP:%.*]] 52; CHECK: Flow2: 53; CHECK-NEXT: [[TMP4]] = phi i32 [ [[TMP59:%.*]], [[INNER_LOOP_BREAK:%.*]] ], [ [[TMP8:%.*]], [[FLOW]] ] 54; CHECK-NEXT: [[TMP5:%.*]] = phi i1 [ true, [[INNER_LOOP_BREAK]] ], [ [[TMP10:%.*]], [[FLOW]] ] 55; CHECK-NEXT: br i1 [[TMP5]], label [[END_ELSE_BLOCK:%.*]], label [[FLOW3]] 56; CHECK: INNER_LOOP: 57; CHECK-NEXT: [[INNER_LOOP_J:%.*]] = phi i32 [ [[INNER_LOOP_J_INC:%.*]], [[INNER_LOOP]] ], [ [[TMP25]], [[BB18:%.*]] ] 58; CHECK-NEXT: call void asm sideeffect " 59; CHECK-NEXT: [[INNER_LOOP_J_INC]] = add nsw i32 [[INNER_LOOP_J]], 1 60; CHECK-NEXT: [[INNER_LOOP_CMP:%.*]] = icmp eq i32 [[INNER_LOOP_J]], 0 61; CHECK-NEXT: br i1 [[INNER_LOOP_CMP]], label [[INNER_LOOP_BREAK]], label [[INNER_LOOP]] 62; CHECK: INNER_LOOP_BREAK: 63; CHECK-NEXT: [[TMP59]] = extractelement <4 x i32> [[TMP14]], i64 2 64; CHECK-NEXT: call void asm sideeffect "s_nop 23 ", "~{memory}"() #0 65; CHECK-NEXT: br label [[FLOW2:%.*]] 66; CHECK: bb62: 67; CHECK-NEXT: [[LOAD13:%.*]] = icmp ult i32 [[TMP16]], 271 68; CHECK-NEXT: [[TMP6:%.*]] = xor i1 [[LOAD13]], true 69; CHECK-NEXT: br i1 [[TMP6]], label [[INCREMENT_I]], label [[FLOW1:%.*]] 70; CHECK: Flow3: 71; CHECK-NEXT: [[TMP7:%.*]] = phi i1 [ [[CMP_END_ELSE_BLOCK:%.*]], [[END_ELSE_BLOCK]] ], [ true, [[FLOW2]] ] 72; CHECK-NEXT: br i1 [[TMP7]], label [[FLOW4:%.*]], label [[LOOP_HEADER]] 73; CHECK: Flow4: 74; CHECK-NEXT: br i1 [[TMP9:%.*]], label [[BB64:%.*]], label [[RETURN:%.*]] 75; CHECK: bb64: 76; CHECK-NEXT: call void asm sideeffect "s_nop 42", "~{memory}"() #0 77; CHECK-NEXT: br label [[RETURN]] 78; CHECK: Flow: 79; CHECK-NEXT: [[TMP8]] = phi i32 [ [[TMP1]], [[FLOW1]] ], [ undef, [[LOOP_HEADER]] ] 80; CHECK-NEXT: [[TMP9]] = phi i1 [ [[TMP2]], [[FLOW1]] ], [ false, [[LOOP_HEADER]] ] 81; CHECK-NEXT: [[TMP10]] = phi i1 [ [[TMP3]], [[FLOW1]] ], [ false, [[LOOP_HEADER]] ] 82; CHECK-NEXT: [[TMP11:%.*]] = phi i1 [ false, [[FLOW1]] ], [ true, [[LOOP_HEADER]] ] 83; CHECK-NEXT: br i1 [[TMP11]], label [[BB18]], label [[FLOW2]] 84; CHECK: INCREMENT_I: 85; CHECK-NEXT: [[INC_I]] = add i32 [[I]], 1 86; CHECK-NEXT: call void asm sideeffect "s_nop 0x1336 87; CHECK-NEXT: br label [[FLOW1]] 88; CHECK: END_ELSE_BLOCK: 89; CHECK-NEXT: call void asm sideeffect "s_nop 0x1337 90; CHECK-NEXT: [[CMP_END_ELSE_BLOCK]] = icmp eq i32 [[TMP4]], -1 91; CHECK-NEXT: br label [[FLOW3]] 92; CHECK: RETURN: 93; CHECK-NEXT: call void asm sideeffect "s_nop 0x99 94; CHECK-NEXT: store volatile <2 x float> [[LOAD1]], <2 x float> addrspace(1)* undef, align 8 95; CHECK-NEXT: ret void 96; 97entry: 98 %tmp = load volatile <2 x i32>, <2 x i32> addrspace(1)* undef, align 16 99 %load1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef 100 %tid = call i32 @llvm.amdgcn.workitem.id.x() 101 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg0, i32 %tid 102 %i.initial = load volatile i32, i32 addrspace(1)* %gep, align 4 103 br label %LOOP.HEADER 104 105LOOP.HEADER: 106 %i = phi i32 [ %i.final, %END_ELSE_BLOCK ], [ %i.initial, %entry ] 107 call void asm sideeffect "s_nop 0x100b ; loop $0 ", "r,~{memory}"(i32 %i) #0 108 %tmp12 = zext i32 %i to i64 109 %tmp13 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* null, i64 %tmp12 110 %tmp14 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp13, align 16 111 %tmp15 = extractelement <4 x i32> %tmp14, i64 0 112 %tmp16 = and i32 %tmp15, 65535 113 %tmp17 = icmp eq i32 %tmp16, 1 114 br i1 %tmp17, label %bb18, label %bb62 115 116bb18: 117 %tmp19 = extractelement <2 x i32> %tmp, i64 0 118 %tmp22 = lshr i32 %tmp19, 16 119 %tmp24 = urem i32 %tmp22, 52 120 %tmp25 = mul nuw nsw i32 %tmp24, 52 121 br label %INNER_LOOP 122 123INNER_LOOP: 124 %inner.loop.j = phi i32 [ %tmp25, %bb18 ], [ %inner.loop.j.inc, %INNER_LOOP ] 125 call void asm sideeffect "; inner loop body", ""() #0 126 %inner.loop.j.inc = add nsw i32 %inner.loop.j, 1 127 %inner.loop.cmp = icmp eq i32 %inner.loop.j, 0 128 br i1 %inner.loop.cmp, label %INNER_LOOP_BREAK, label %INNER_LOOP 129 130INNER_LOOP_BREAK: 131 %tmp59 = extractelement <4 x i32> %tmp14, i64 2 132 call void asm sideeffect "s_nop 23 ", "~{memory}"() #0 133 br label %END_ELSE_BLOCK 134 135bb62: 136 %load13 = icmp ult i32 %tmp16, 271 137 br i1 %load13, label %bb64, label %INCREMENT_I 138 139bb64: 140 call void asm sideeffect "s_nop 42", "~{memory}"() #0 141 br label %RETURN 142 143INCREMENT_I: 144 %inc.i = add i32 %i, 1 145 call void asm sideeffect "s_nop 0x1336 ; increment $0", "v,~{memory}"(i32 %inc.i) #0 146 br label %END_ELSE_BLOCK 147 148END_ELSE_BLOCK: 149 %i.final = phi i32 [ %tmp59, %INNER_LOOP_BREAK ], [ %inc.i, %INCREMENT_I ] 150 call void asm sideeffect "s_nop 0x1337 ; end else block $0", "v,~{memory}"(i32 %i.final) #0 151 %cmp.end.else.block = icmp eq i32 %i.final, -1 152 br i1 %cmp.end.else.block, label %RETURN, label %LOOP.HEADER 153 154RETURN: 155 call void asm sideeffect "s_nop 0x99 ; ClosureEval return", "~{memory}"() #0 156 store volatile <2 x float> %load1, <2 x float> addrspace(1)* undef, align 8 157 ret void 158} 159 160declare i32 @llvm.amdgcn.workitem.id.x() #1 161 162attributes #0 = { convergent nounwind } 163attributes #1 = { convergent nounwind readnone } 164