1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -instcombine -S | FileCheck %s 3 4target datalayout = "e-p:40:64:64:32-p1:16:16:16-p2:32:32:32-p3:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" 5 6declare i32 @test58_d(i64 ) 7 8define i1 @test59(i8* %foo) { 9; CHECK-LABEL: @test59( 10; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, i8* [[FOO:%.*]], i32 8 11; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8* [[GEP1]] to i40 12; CHECK-NEXT: [[USE:%.*]] = zext i40 [[TMP1]] to i64 13; CHECK-NEXT: [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]]) 14; CHECK-NEXT: ret i1 true 15; 16 %bit = bitcast i8* %foo to i32* 17 %gep1 = getelementptr inbounds i32, i32* %bit, i64 2 18 %gep2 = getelementptr inbounds i8, i8* %foo, i64 10 19 %cast1 = bitcast i32* %gep1 to i8* 20 %cmp = icmp ult i8* %cast1, %gep2 21 %use = ptrtoint i8* %cast1 to i64 22 %call = call i32 @test58_d(i64 %use) 23 ret i1 %cmp 24} 25 26define i1 @test59_as1(i8 addrspace(1)* %foo) { 27; CHECK-LABEL: @test59_as1( 28; CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, i8 addrspace(1)* [[FOO:%.*]], i16 8 29; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint i8 addrspace(1)* [[GEP1]] to i16 30; CHECK-NEXT: [[USE:%.*]] = zext i16 [[TMP1]] to i64 31; CHECK-NEXT: [[CALL:%.*]] = call i32 @test58_d(i64 [[USE]]) 32; CHECK-NEXT: ret i1 true 33; 34 %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)* 35 %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 2 36 %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 10 37 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)* 38 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2 39 %use = ptrtoint i8 addrspace(1)* %cast1 to i64 40 %call = call i32 @test58_d(i64 %use) 41 ret i1 %cmp 42} 43 44define i1 @test60(i8* %foo, i64 %i, i64 %j) { 45; CHECK-LABEL: @test60( 46; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32 47; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32 48; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i32 [[TMP1]], 2 49; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i32 [[GEP1_IDX]], [[TMP2]] 50; CHECK-NEXT: ret i1 [[TMP3]] 51; 52 %bit = bitcast i8* %foo to i32* 53 %gep1 = getelementptr inbounds i32, i32* %bit, i64 %i 54 %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j 55 %cast1 = bitcast i32* %gep1 to i8* 56 %cmp = icmp ult i8* %cast1, %gep2 57 ret i1 %cmp 58} 59 60define i1 @test60_as1(i8 addrspace(1)* %foo, i64 %i, i64 %j) { 61; CHECK-LABEL: @test60_as1( 62; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i16 63; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i16 64; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[TMP1]], 2 65; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP2]] 66; CHECK-NEXT: ret i1 [[TMP3]] 67; 68 %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)* 69 %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i64 %i 70 %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i64 %j 71 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)* 72 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2 73 ret i1 %cmp 74} 75 76; Same as test60, but look through an addrspacecast instead of a 77; bitcast. This uses the same sized addrspace. 78define i1 @test60_addrspacecast(i8* %foo, i64 %i, i64 %j) { 79; CHECK-LABEL: @test60_addrspacecast( 80; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i32 81; CHECK-NEXT: [[I_TR:%.*]] = trunc i64 [[I:%.*]] to i32 82; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[I_TR]], 2 83; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i32 [[TMP2]], [[TMP1]] 84; CHECK-NEXT: ret i1 [[TMP3]] 85; 86 %bit = addrspacecast i8* %foo to i32 addrspace(3)* 87 %gep1 = getelementptr inbounds i32, i32 addrspace(3)* %bit, i64 %i 88 %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j 89 %cast1 = addrspacecast i32 addrspace(3)* %gep1 to i8* 90 %cmp = icmp ult i8* %cast1, %gep2 91 ret i1 %cmp 92} 93 94define i1 @test60_addrspacecast_smaller(i8* %foo, i16 %i, i64 %j) { 95; CHECK-LABEL: @test60_addrspacecast_smaller( 96; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i16 [[I:%.*]], 2 97; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[J:%.*]] to i16 98; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i16 [[GEP1_IDX]], [[TMP1]] 99; CHECK-NEXT: ret i1 [[TMP2]] 100; 101 %bit = addrspacecast i8* %foo to i32 addrspace(1)* 102 %gep1 = getelementptr inbounds i32, i32 addrspace(1)* %bit, i16 %i 103 %gep2 = getelementptr inbounds i8, i8* %foo, i64 %j 104 %cast1 = addrspacecast i32 addrspace(1)* %gep1 to i8* 105 %cmp = icmp ult i8* %cast1, %gep2 106 ret i1 %cmp 107} 108 109define i1 @test60_addrspacecast_larger(i8 addrspace(1)* %foo, i32 %i, i16 %j) { 110; CHECK-LABEL: @test60_addrspacecast_larger( 111; CHECK-NEXT: [[I_TR:%.*]] = trunc i32 [[I:%.*]] to i16 112; CHECK-NEXT: [[TMP1:%.*]] = shl i16 [[I_TR]], 2 113; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i16 [[TMP1]], [[J:%.*]] 114; CHECK-NEXT: ret i1 [[TMP2]] 115; 116 %bit = addrspacecast i8 addrspace(1)* %foo to i32 addrspace(2)* 117 %gep1 = getelementptr inbounds i32, i32 addrspace(2)* %bit, i32 %i 118 %gep2 = getelementptr inbounds i8, i8 addrspace(1)* %foo, i16 %j 119 %cast1 = addrspacecast i32 addrspace(2)* %gep1 to i8 addrspace(1)* 120 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2 121 ret i1 %cmp 122} 123 124define i1 @test61(i8* %foo, i64 %i, i64 %j) { 125; CHECK-LABEL: @test61( 126; CHECK-NEXT: [[BIT:%.*]] = bitcast i8* [[FOO:%.*]] to i32* 127; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[I:%.*]] to i32 128; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32* [[BIT]], i32 [[TMP1]] 129; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[J:%.*]] to i32 130; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8* [[FOO]], i32 [[TMP2]] 131; CHECK-NEXT: [[CAST1:%.*]] = bitcast i32* [[GEP1]] to i8* 132; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8* [[GEP2]], [[CAST1]] 133; CHECK-NEXT: ret i1 [[CMP]] 134; 135 %bit = bitcast i8* %foo to i32* 136 %gep1 = getelementptr i32, i32* %bit, i64 %i 137 %gep2 = getelementptr i8, i8* %foo, i64 %j 138 %cast1 = bitcast i32* %gep1 to i8* 139 %cmp = icmp ult i8* %cast1, %gep2 140 ret i1 %cmp 141; Don't transform non-inbounds GEPs. 142} 143 144define i1 @test61_as1(i8 addrspace(1)* %foo, i16 %i, i16 %j) { 145; CHECK-LABEL: @test61_as1( 146; CHECK-NEXT: [[BIT:%.*]] = bitcast i8 addrspace(1)* [[FOO:%.*]] to i32 addrspace(1)* 147; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, i32 addrspace(1)* [[BIT]], i16 [[I:%.*]] 148; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i8, i8 addrspace(1)* [[FOO]], i16 [[J:%.*]] 149; CHECK-NEXT: [[CAST1:%.*]] = bitcast i32 addrspace(1)* [[GEP1]] to i8 addrspace(1)* 150; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 addrspace(1)* [[GEP2]], [[CAST1]] 151; CHECK-NEXT: ret i1 [[CMP]] 152; 153 %bit = bitcast i8 addrspace(1)* %foo to i32 addrspace(1)* 154 %gep1 = getelementptr i32, i32 addrspace(1)* %bit, i16 %i 155 %gep2 = getelementptr i8, i8 addrspace(1)* %foo, i16 %j 156 %cast1 = bitcast i32 addrspace(1)* %gep1 to i8 addrspace(1)* 157 %cmp = icmp ult i8 addrspace(1)* %cast1, %gep2 158 ret i1 %cmp 159; Don't transform non-inbounds GEPs. 160} 161 162define i1 @test62(i8* %a) { 163; CHECK-LABEL: @test62( 164; CHECK-NEXT: ret i1 true 165; 166 %arrayidx1 = getelementptr inbounds i8, i8* %a, i64 1 167 %arrayidx2 = getelementptr inbounds i8, i8* %a, i64 10 168 %cmp = icmp slt i8* %arrayidx1, %arrayidx2 169 ret i1 %cmp 170} 171 172define i1 @test62_as1(i8 addrspace(1)* %a) { 173; CHECK-LABEL: @test62_as1( 174; CHECK-NEXT: ret i1 true 175; 176 %arrayidx1 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 1 177 %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 10 178 %cmp = icmp slt i8 addrspace(1)* %arrayidx1, %arrayidx2 179 ret i1 %cmp 180} 181 182 183; Variation of the above with an ashr 184define i1 @icmp_and_ashr_multiuse(i32 %X) { 185; CHECK-LABEL: @icmp_and_ashr_multiuse( 186; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 240 187; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 224 188; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X]], 496 189; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[TMP2]], 432 190; CHECK-NEXT: [[AND3:%.*]] = and i1 [[TOBOOL]], [[TOBOOL2]] 191; CHECK-NEXT: ret i1 [[AND3]] 192; 193 %shr = ashr i32 %X, 4 194 %and = and i32 %shr, 15 195 %and2 = and i32 %shr, 31 ; second use of the shift 196 %tobool = icmp ne i32 %and, 14 197 %tobool2 = icmp ne i32 %and2, 27 198 %and3 = and i1 %tobool, %tobool2 199 ret i1 %and3 200} 201 202define i1 @icmp_lshr_and_overshift(i8 %X) { 203; CHECK-LABEL: @icmp_lshr_and_overshift( 204; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ugt i8 [[X:%.*]], 31 205; CHECK-NEXT: ret i1 [[TOBOOL]] 206; 207 %shr = lshr i8 %X, 5 208 %and = and i8 %shr, 15 209 %tobool = icmp ne i8 %and, 0 210 ret i1 %tobool 211} 212 213; We shouldn't simplify this because the and uses bits that are shifted in. 214define i1 @icmp_ashr_and_overshift(i8 %X) { 215; CHECK-LABEL: @icmp_ashr_and_overshift( 216; CHECK-NEXT: [[SHR:%.*]] = ashr i8 [[X:%.*]], 5 217; CHECK-NEXT: [[AND:%.*]] = and i8 [[SHR]], 15 218; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[AND]], 0 219; CHECK-NEXT: ret i1 [[TOBOOL]] 220; 221 %shr = ashr i8 %X, 5 222 %and = and i8 %shr, 15 223 %tobool = icmp ne i8 %and, 0 224 ret i1 %tobool 225} 226 227; PR16244 228define i1 @test71(i8* %x) { 229; CHECK-LABEL: @test71( 230; CHECK-NEXT: ret i1 false 231; 232 %a = getelementptr i8, i8* %x, i64 8 233 %b = getelementptr inbounds i8, i8* %x, i64 8 234 %c = icmp ugt i8* %a, %b 235 ret i1 %c 236} 237 238define i1 @test71_as1(i8 addrspace(1)* %x) { 239; CHECK-LABEL: @test71_as1( 240; CHECK-NEXT: ret i1 false 241; 242 %a = getelementptr i8, i8 addrspace(1)* %x, i64 8 243 %b = getelementptr inbounds i8, i8 addrspace(1)* %x, i64 8 244 %c = icmp ugt i8 addrspace(1)* %a, %b 245 ret i1 %c 246} 247 248