1; RUN: opt < %s -mtriple=nvptx64-nvidia-cuda -separate-const-offset-from-gep \ 2; RUN: -reassociate-geps-verify-no-dead-code -S | FileCheck %s 3 4; Several unit tests for -separate-const-offset-from-gep. The transformation 5; heavily relies on TargetTransformInfo, so we put these tests under 6; target-specific folders. 7 8%struct.S = type { float, double } 9 10@struct_array = global [1024 x %struct.S] zeroinitializer, align 16 11@float_2d_array = global [32 x [32 x float]] zeroinitializer, align 4 12 13; We should not extract any struct field indices, because fields in a struct 14; may have different types. 15define double* @struct(i32 %i) { 16entry: 17 %add = add nsw i32 %i, 5 18 %idxprom = sext i32 %add to i64 19 %p = getelementptr inbounds [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1 20 ret double* %p 21} 22; CHECK-LABEL: @struct( 23; CHECK: getelementptr [1024 x %struct.S], [1024 x %struct.S]* @struct_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1 24 25; We should be able to trace into sext(a + b) if a + b is non-negative 26; (e.g., used as an index of an inbounds GEP) and one of a and b is 27; non-negative. 28define float* @sext_add(i32 %i, i32 %j) { 29entry: 30 %0 = add i32 %i, 1 31 %1 = sext i32 %0 to i64 ; inbound sext(i + 1) = sext(i) + 1 32 %2 = add i32 %j, -2 33 ; However, inbound sext(j + -2) != sext(j) + -2, e.g., j = INT_MIN 34 %3 = sext i32 %2 to i64 35 %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %1, i64 %3 36 ret float* %p 37} 38; CHECK-LABEL: @sext_add( 39; CHECK-NOT: = add 40; CHECK: add i32 %j, -2 41; CHECK: sext 42; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} 43; CHECK: getelementptr inbounds float, float* %{{[a-zA-Z0-9]+}}, i64 32 44 45; We should be able to trace into sext/zext if it can be distributed to both 46; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b) 47; 48; This test verifies we can transform 49; gep base, a + sext(b +nsw 1), c + zext(d +nuw 1) 50; to 51; gep base, a + sext(b), c + zext(d); gep ..., 1 * 32 + 1 52define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) { 53 %b1 = add nsw i32 %b, 1 54 %b2 = sext i32 %b1 to i64 55 %i = add i64 %a, %b2 ; i = a + sext(b +nsw 1) 56 %d1 = add nuw i32 %d, 1 57 %d2 = zext i32 %d1 to i64 58 %j = add i64 %c, %d2 ; j = c + zext(d +nuw 1) 59 %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j 60 ret float* %p 61} 62; CHECK-LABEL: @ext_add_no_overflow( 63; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} 64; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 33 65 66; Verifies we handle nested sext/zext correctly. 67define void @sext_zext(i32 %a, i32 %b, float** %out1, float** %out2) { 68entry: 69 %0 = add nsw nuw i32 %a, 1 70 %1 = sext i32 %0 to i48 71 %2 = zext i48 %1 to i64 ; zext(sext(a +nsw nuw 1)) = zext(sext(a)) + 1 72 %3 = add nsw i32 %b, 2 73 %4 = sext i32 %3 to i48 74 %5 = zext i48 %4 to i64 ; zext(sext(b +nsw 2)) != zext(sext(b)) + 2 75 %p1 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %2, i64 %5 76 store float* %p1, float** %out1 77 %6 = add nuw i32 %a, 3 78 %7 = zext i32 %6 to i48 79 %8 = sext i48 %7 to i64 ; sext(zext(a +nuw 3)) = zext(a +nuw 3) = zext(a) + 3 80 %9 = add nsw i32 %b, 4 81 %10 = zext i32 %9 to i48 82 %11 = sext i48 %10 to i64 ; sext(zext(b +nsw 4)) != zext(b) + 4 83 %p2 = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %8, i64 %11 84 store float* %p2, float** %out2 85 ret void 86} 87; CHECK-LABEL: @sext_zext( 88; CHECK: [[BASE_PTR_1:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} 89; CHECK: getelementptr float, float* [[BASE_PTR_1]], i64 32 90; CHECK: [[BASE_PTR_2:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} 91; CHECK: getelementptr float, float* [[BASE_PTR_2]], i64 96 92 93; Similar to @ext_add_no_overflow, we should be able to trace into s/zext if 94; its operand is an OR and the two operands of the OR have no common bits. 95define float* @sext_or(i64 %a, i32 %b) { 96entry: 97 %b1 = shl i32 %b, 2 98 %b2 = or i32 %b1, 1 ; (b << 2) and 1 have no common bits 99 %b3 = or i32 %b1, 4 ; (b << 2) and 4 may have common bits 100 %b2.ext = zext i32 %b2 to i64 101 %b3.ext = sext i32 %b3 to i64 102 %i = add i64 %a, %b2.ext 103 %j = add i64 %a, %b3.ext 104 %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j 105 ret float* %p 106} 107; CHECK-LABEL: @sext_or( 108; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}} 109; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 32 110 111; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b + 112; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't 113; affected. 114define float* @expr(i64 %a, i64 %b, i64* %out) { 115entry: 116 %b5 = add i64 %b, 5 117 %i = add i64 %b5, %a 118 %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0 119 store i64 %b5, i64* %out 120 ret float* %p 121} 122; CHECK-LABEL: @expr( 123; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 0 124; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 160 125; CHECK: store i64 %b5, i64* %out 126 127; d + sext(a +nsw (b +nsw (c +nsw 8))) => (d + sext(a) + sext(b) + sext(c)) + 8 128define float* @sext_expr(i32 %a, i32 %b, i32 %c, i64 %d) { 129entry: 130 %0 = add nsw i32 %c, 8 131 %1 = add nsw i32 %b, %0 132 %2 = add nsw i32 %a, %1 133 %3 = sext i32 %2 to i64 134 %i = add i64 %d, %3 135 %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i 136 ret float* %p 137} 138; CHECK-LABEL: @sext_expr( 139; CHECK: sext i32 140; CHECK: sext i32 141; CHECK: sext i32 142; CHECK: getelementptr inbounds float, float* %{{[a-zA-Z0-9]+}}, i64 8 143 144; Verifies we handle "sub" correctly. 145define float* @sub(i64 %i, i64 %j) { 146 %i2 = sub i64 %i, 5 ; i - 5 147 %j2 = sub i64 5, %j ; 5 - i 148 %p = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2 149 ret float* %p 150} 151; CHECK-LABEL: @sub( 152; CHECK: %[[j2:[a-zA-Z0-9]+]] = sub i64 0, %j 153; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]] 154; CHECK: getelementptr inbounds float, float* [[BASE_PTR]], i64 -155 155 156%struct.Packed = type <{ [3 x i32], [8 x i64] }> ; <> means packed 157 158; Verifies we can emit correct uglygep if the address is not natually aligned. 159define i64* @packed_struct(i32 %i, i32 %j) { 160entry: 161 %s = alloca [1024 x %struct.Packed], align 16 162 %add = add nsw i32 %j, 3 163 %idxprom = sext i32 %add to i64 164 %add1 = add nsw i32 %i, 1 165 %idxprom2 = sext i32 %add1 to i64 166 %arrayidx3 = getelementptr inbounds [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %idxprom2, i32 1, i64 %idxprom 167 ret i64* %arrayidx3 168} 169; CHECK-LABEL: @packed_struct( 170; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [1024 x %struct.Packed], [1024 x %struct.Packed]* %s, i64 0, i64 %{{[a-zA-Z0-9]+}}, i32 1, i64 %{{[a-zA-Z0-9]+}} 171; CHECK: [[CASTED_PTR:%[a-zA-Z0-9]+]] = bitcast i64* [[BASE_PTR]] to i8* 172; CHECK: %uglygep = getelementptr inbounds i8, i8* [[CASTED_PTR]], i64 100 173; CHECK: bitcast i8* %uglygep to i64* 174 175; We shouldn't be able to extract the 8 from "zext(a +nuw (b + 8))", 176; because "zext(b + 8) != zext(b) + 8" 177define float* @zext_expr(i32 %a, i32 %b) { 178entry: 179 %0 = add i32 %b, 8 180 %1 = add nuw i32 %a, %0 181 %i = zext i32 %1 to i64 182 %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i 183 ret float* %p 184} 185; CHECK-LABEL: zext_expr( 186; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %i 187 188; Per http://llvm.org/docs/LangRef.html#id181, the indices of a off-bound gep 189; should be considered sign-extended to the pointer size. Therefore, 190; gep base, (add i32 a, b) != gep (gep base, i32 a), i32 b 191; because 192; sext(a + b) != sext(a) + sext(b) 193; 194; This test verifies we do not illegitimately extract the 8 from 195; gep base, (i32 a + 8) 196define float* @i32_add(i32 %a) { 197entry: 198 %i = add i32 %a, 8 199 %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i32 %i 200 ret float* %p 201} 202; CHECK-LABEL: @i32_add( 203; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}} 204; CHECK-NOT: getelementptr 205 206; Verifies that we compute the correct constant offset when the index is 207; sign-extended and then zero-extended. The old version of our code failed to 208; handle this case because it simply computed the constant offset as the 209; sign-extended value of the constant part of the GEP index. 210define float* @apint(i1 %a) { 211entry: 212 %0 = add nsw nuw i1 %a, 1 213 %1 = sext i1 %0 to i4 214 %2 = zext i4 %1 to i64 ; zext (sext i1 1 to i4) to i64 = 15 215 %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %2 216 ret float* %p 217} 218; CHECK-LABEL: @apint( 219; CHECK: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %{{[a-zA-Z0-9]+}} 220; CHECK: getelementptr float, float* [[BASE_PTR]], i64 15 221 222; Do not trace into binary operators other than ADD, SUB, and OR. 223define float* @and(i64 %a) { 224entry: 225 %0 = shl i64 %a, 2 226 %1 = and i64 %0, 1 227 %p = getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array, i64 0, i64 0, i64 %1 228 ret float* %p 229} 230; CHECK-LABEL: @and( 231; CHECK: getelementptr [32 x [32 x float]], [32 x [32 x float]]* @float_2d_array 232; CHECK-NOT: getelementptr 233 234; The code that rebuilds an OR expression used to be buggy, and failed on this 235; test. 236define float* @shl_add_or(i64 %a, float* %ptr) { 237; CHECK-LABEL: @shl_add_or( 238entry: 239 %shl = shl i64 %a, 2 240 %add = add i64 %shl, 12 241 %or = or i64 %add, 1 242; CHECK: [[OR:%or[0-9]*]] = add i64 %shl, 1 243 ; ((a << 2) + 12) and 1 have no common bits. Therefore, 244 ; SeparateConstOffsetFromGEP is able to extract the 12. 245 ; TODO(jingyue): We could reassociate the expression to combine 12 and 1. 246 %p = getelementptr float, float* %ptr, i64 %or 247; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr float, float* %ptr, i64 [[OR]] 248; CHECK: getelementptr float, float* [[PTR]], i64 12 249 ret float* %p 250; CHECK-NEXT: ret 251} 252 253; The source code used to be buggy in checking 254; (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) 255; where AccumulativeByteOffset is signed but ElementTypeSizeOfGEP is unsigned. 256; The compiler would promote AccumulativeByteOffset to unsigned, causing 257; unexpected results. For example, while -64 % (int64_t)24 != 0, 258; -64 % (uint64_t)24 == 0. 259%struct3 = type { i64, i32 } 260%struct2 = type { %struct3, i32 } 261%struct1 = type { i64, %struct2 } 262%struct0 = type { i32, i32, i64*, [100 x %struct1] } 263define %struct2* @sign_mod_unsign(%struct0* %ptr, i64 %idx) { 264; CHECK-LABEL: @sign_mod_unsign( 265entry: 266 %arrayidx = add nsw i64 %idx, -2 267; CHECK-NOT: add 268 %ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i64 0, i32 3, i64 %arrayidx, i32 1 269; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1 270; CHECK: getelementptr inbounds %struct2, %struct2* [[PTR]], i64 -3 271 ret %struct2* %ptr2 272; CHECK-NEXT: ret 273} 274 275; Check that we can see through explicit trunc() instruction. 276define %struct2* @trunk_explicit(%struct0* %ptr, i64 %idx) { 277; CHECK-LABEL: @trunk_explicit( 278entry: 279 %idx0 = trunc i64 1 to i32 280 %ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i32 %idx0, i32 3, i64 %idx, i32 1 281; CHECK-NOT: trunc 282; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1 283; CHECK: getelementptr inbounds %struct2, %struct2* %0, i64 151 284 ret %struct2* %ptr2 285; CHECK-NEXT: ret 286} 287 288; Check that we can deal with trunc inserted by 289; canonicalizeArrayIndicesToPointerSize() if size of an index is larger than 290; that of the pointer. 291define %struct2* @trunk_long_idx(%struct0* %ptr, i64 %idx) { 292; CHECK-LABEL: @trunk_long_idx( 293entry: 294 %ptr2 = getelementptr inbounds %struct0, %struct0* %ptr, i65 1, i32 3, i64 %idx, i32 1 295; CHECK-NOT: trunc 296; CHECK: [[PTR:%[a-zA-Z0-9]+]] = getelementptr %struct0, %struct0* %ptr, i64 0, i32 3, i64 %idx, i32 1 297; CHECK: getelementptr inbounds %struct2, %struct2* %0, i64 151 298 ret %struct2* %ptr2 299; CHECK-NEXT: ret 300} 301