• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_20 \
2; RUN:     | FileCheck %s --check-prefix=PTX
3; RUN: opt < %s -mtriple=nvptx64-nvidia-cuda -S -separate-const-offset-from-gep \
4; RUN:       -reassociate-geps-verify-no-dead-code -gvn \
5; RUN:     | FileCheck %s --check-prefix=IR
6
7; Verifies the SeparateConstOffsetFromGEP pass.
8; The following code computes
9; *output = array[x][y] + array[x][y+1] + array[x+1][y] + array[x+1][y+1]
10;
11; We expect SeparateConstOffsetFromGEP to transform it to
12;
13; float *base = &a[x][y];
14; *output = base[0] + base[1] + base[32] + base[33];
15;
16; so the backend can emit PTX that uses fewer virtual registers.
17
18@array = internal addrspace(3) constant [32 x [32 x float]] zeroinitializer, align 4
19
20define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) {
21.preheader:
22  %0 = sext i32 %y to i64
23  %1 = sext i32 %x to i64
24  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
25  %3 = addrspacecast float addrspace(3)* %2 to float*
26  %4 = load float, float* %3, align 4
27  %5 = fadd float %4, 0.000000e+00
28  %6 = add i32 %y, 1
29  %7 = sext i32 %6 to i64
30  %8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
31  %9 = addrspacecast float addrspace(3)* %8 to float*
32  %10 = load float, float* %9, align 4
33  %11 = fadd float %5, %10
34  %12 = add i32 %x, 1
35  %13 = sext i32 %12 to i64
36  %14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
37  %15 = addrspacecast float addrspace(3)* %14 to float*
38  %16 = load float, float* %15, align 4
39  %17 = fadd float %11, %16
40  %18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
41  %19 = addrspacecast float addrspace(3)* %18 to float*
42  %20 = load float, float* %19, align 4
43  %21 = fadd float %17, %20
44  store float %21, float* %output, align 4
45  ret void
46}
47; PTX-LABEL: sum_of_array(
48; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
49; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
50; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
51; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
52
53; IR-LABEL: @sum_of_array(
54; TODO: GVN is unable to preserve the "inbounds" keyword on the first GEP. Need
55; some infrastructure changes to enable such optimizations.
56; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
57; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
58; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
59; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
60
61; @sum_of_array2 is very similar to @sum_of_array. The only difference is in
62; the order of "sext" and "add" when computing the array indices. @sum_of_array
63; computes add before sext, e.g., array[sext(x + 1)][sext(y + 1)], while
64; @sum_of_array2 computes sext before add,
65; e.g., array[sext(x) + 1][sext(y) + 1]. SeparateConstOffsetFromGEP should be
66; able to extract constant offsets from both forms.
67define void @sum_of_array2(i32 %x, i32 %y, float* nocapture %output) {
68.preheader:
69  %0 = sext i32 %y to i64
70  %1 = sext i32 %x to i64
71  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
72  %3 = addrspacecast float addrspace(3)* %2 to float*
73  %4 = load float, float* %3, align 4
74  %5 = fadd float %4, 0.000000e+00
75  %6 = add i64 %0, 1
76  %7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
77  %8 = addrspacecast float addrspace(3)* %7 to float*
78  %9 = load float, float* %8, align 4
79  %10 = fadd float %5, %9
80  %11 = add i64 %1, 1
81  %12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
82  %13 = addrspacecast float addrspace(3)* %12 to float*
83  %14 = load float, float* %13, align 4
84  %15 = fadd float %10, %14
85  %16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
86  %17 = addrspacecast float addrspace(3)* %16 to float*
87  %18 = load float, float* %17, align 4
88  %19 = fadd float %15, %18
89  store float %19, float* %output, align 4
90  ret void
91}
92; PTX-LABEL: sum_of_array2(
93; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
94; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
95; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
96; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
97
98; IR-LABEL: @sum_of_array2(
99; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
100; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
101; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
102; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
103
104
105; This function loads
106;   array[zext(x)][zext(y)]
107;   array[zext(x)][zext(y +nuw 1)]
108;   array[zext(x +nuw 1)][zext(y)]
109;   array[zext(x +nuw 1)][zext(y +nuw 1)].
110;
111; This function is similar to @sum_of_array, but it
112; 1) extends array indices using zext instead of sext;
113; 2) annotates the addition with "nuw"; otherwise, zext(x + 1) => zext(x) + 1
114;    may be invalid.
115define void @sum_of_array3(i32 %x, i32 %y, float* nocapture %output) {
116.preheader:
117  %0 = zext i32 %y to i64
118  %1 = zext i32 %x to i64
119  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
120  %3 = addrspacecast float addrspace(3)* %2 to float*
121  %4 = load float, float* %3, align 4
122  %5 = fadd float %4, 0.000000e+00
123  %6 = add nuw i32 %y, 1
124  %7 = zext i32 %6 to i64
125  %8 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7
126  %9 = addrspacecast float addrspace(3)* %8 to float*
127  %10 = load float, float* %9, align 4
128  %11 = fadd float %5, %10
129  %12 = add nuw i32 %x, 1
130  %13 = zext i32 %12 to i64
131  %14 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0
132  %15 = addrspacecast float addrspace(3)* %14 to float*
133  %16 = load float, float* %15, align 4
134  %17 = fadd float %11, %16
135  %18 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7
136  %19 = addrspacecast float addrspace(3)* %18 to float*
137  %20 = load float, float* %19, align 4
138  %21 = fadd float %17, %20
139  store float %21, float* %output, align 4
140  ret void
141}
142; PTX-LABEL: sum_of_array3(
143; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
144; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
145; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
146; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
147
148; IR-LABEL: @sum_of_array3(
149; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
150; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
151; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
152; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
153
154
155; This function loads
156;   array[zext(x)][zext(y)]
157;   array[zext(x)][zext(y)]
158;   array[zext(x) + 1][zext(y) + 1]
159;   array[zext(x) + 1][zext(y) + 1].
160;
161; We expect the generated code to reuse the computation of
162; &array[zext(x)][zext(y)]. See the expected IR and PTX for details.
163define void @sum_of_array4(i32 %x, i32 %y, float* nocapture %output) {
164.preheader:
165  %0 = zext i32 %y to i64
166  %1 = zext i32 %x to i64
167  %2 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0
168  %3 = addrspacecast float addrspace(3)* %2 to float*
169  %4 = load float, float* %3, align 4
170  %5 = fadd float %4, 0.000000e+00
171  %6 = add i64 %0, 1
172  %7 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %6
173  %8 = addrspacecast float addrspace(3)* %7 to float*
174  %9 = load float, float* %8, align 4
175  %10 = fadd float %5, %9
176  %11 = add i64 %1, 1
177  %12 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %0
178  %13 = addrspacecast float addrspace(3)* %12 to float*
179  %14 = load float, float* %13, align 4
180  %15 = fadd float %10, %14
181  %16 = getelementptr inbounds [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %11, i64 %6
182  %17 = addrspacecast float addrspace(3)* %16 to float*
183  %18 = load float, float* %17, align 4
184  %19 = fadd float %15, %18
185  store float %19, float* %output, align 4
186  ret void
187}
188; PTX-LABEL: sum_of_array4(
189; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}}
190; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}}
191; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}}
192; PTX-DAG: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}}
193
194; IR-LABEL: @sum_of_array4(
195; IR: [[BASE_PTR:%[a-zA-Z0-9]+]] = getelementptr [32 x [32 x float]], [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %{{[a-zA-Z0-9]+}}, i64 %{{[a-zA-Z0-9]+}}
196; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 1
197; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 32
198; IR: getelementptr inbounds float, float addrspace(3)* [[BASE_PTR]], i64 33
199
200
201; The source code is:
202;   p0 = &input[sext(x + y)];
203;   p1 = &input[sext(x + (y + 5))];
204;
205; Without reuniting extensions, SeparateConstOffsetFromGEP would emit
206;   p0 = &input[sext(x + y)];
207;   t1 = &input[sext(x) + sext(y)];
208;   p1 = &t1[5];
209;
210; With reuniting extensions, it merges p0 and t1 and thus emits
211;   p0 = &input[sext(x + y)];
212;   p1 = &p0[5];
213define void @reunion(i32 %x, i32 %y, float* %input) {
214; IR-LABEL: @reunion(
215; PTX-LABEL: reunion(
216entry:
217  %xy = add nsw i32 %x, %y
218  %0 = sext i32 %xy to i64
219  %p0 = getelementptr inbounds float, float* %input, i64 %0
220  %v0 = load float, float* %p0, align 4
221; PTX: ld.f32 %f{{[0-9]+}}, {{\[}}[[p0:%rd[0-9]+]]{{\]}}
222  call void @use(float %v0)
223
224  %y5 = add nsw i32 %y, 5
225  %xy5 = add nsw i32 %x, %y5
226  %1 = sext i32 %xy5 to i64
227  %p1 = getelementptr inbounds float, float* %input, i64 %1
228; IR: getelementptr inbounds float, float* %p0, i64 5
229  %v1 = load float, float* %p1, align 4
230; PTX: ld.f32 %f{{[0-9]+}}, {{\[}}[[p0]]+20{{\]}}
231  call void @use(float %v1)
232
233  ret void
234}
235
236declare void @use(float)
237