• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Test target codegen - host bc file has to be created first.
2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
3 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
6 // RUN: %clang_cc1 -verify -fopenmp -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
7 // expected-no-diagnostics
8 
9 #ifndef HEADER
10 #define HEADER
11 
12 // Check that the execution mode of all 7 target regions is set to Generic Mode.
13 // CHECK-DAG: [[NONSPMD:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds
14 // CHECK-DAG: [[UNKNOWN:@.+]] = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 2, i32 0, i8* getelementptr inbounds
15 // CHECK-DAG: {{@__omp_offloading_.+l45}}_exec_mode = weak constant i8 0
16 // CHECK-DAG: {{@__omp_offloading_.+l123}}_exec_mode = weak constant i8 1
17 // CHECK-DAG: {{@__omp_offloading_.+l200}}_exec_mode = weak constant i8 1
18 // CHECK-DAG: {{@__omp_offloading_.+l310}}_exec_mode = weak constant i8 1
19 // CHECK-DAG: {{@__omp_offloading_.+l347}}_exec_mode = weak constant i8 1
20 // CHECK-DAG: {{@__omp_offloading_.+l365}}_exec_mode = weak constant i8 1
21 // CHECK-DAG: {{@__omp_offloading_.+l331}}_exec_mode = weak constant i8 1
22 
23 __thread int id;
24 
25 int baz(int f, double &a);
26 
27 template <typename tx, typename ty>
28 struct TT {
29   tx X;
30   ty Y;
operator []TT31   tx &operator[](int i) { return X; }
32 };
33 
34 // CHECK: define weak void @__omp_offloading_{{.+}}_{{.+}}targetBar{{.+}}_l45(i32* [[PTR1:%.+]], i32** nonnull align {{[0-9]+}} dereferenceable{{.*}} [[PTR2_REF:%.+]])
35 // CHECK: store i32* [[PTR1]], i32** [[PTR1_ADDR:%.+]],
36 // CHECK: store i32** [[PTR2_REF]], i32*** [[PTR2_REF_PTR:%.+]],
37 // CHECK: [[PTR2_REF:%.+]] = load i32**, i32*** [[PTR2_REF_PTR]],
38 // CHECK: call void @__kmpc_spmd_kernel_init(
39 // CHECK: call void @__kmpc_data_sharing_init_stack_spmd()
40 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @{{.+}})
41 // CHECK: store i32 [[GTID]], i32* [[THREADID:%.+]],
42 // CHECK: call void @{{.+}}(i32* [[THREADID]], i32* %{{.+}}, i32** [[PTR1_ADDR]], i32** [[PTR2_REF]])
43 // CHECK: call void @__kmpc_spmd_kernel_deinit_v2(i16 1)
targetBar(int * Ptr1,int * Ptr2)44 void targetBar(int *Ptr1, int *Ptr2) {
45 #pragma omp target map(Ptr1[:0], Ptr2)
46 #pragma omp parallel num_threads(2)
47   *Ptr1 = *Ptr2;
48 }
49 
foo(int n)50 int foo(int n) {
51   int a = 0;
52   short aa = 0;
53   float b[10];
54   float bn[n];
55   double c[5][10];
56   double cn[5][n];
57   TT<long long, char> d;
58 
59 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l123}}_worker()
60 // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
61 // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
62 // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
63 // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
64 // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
65 //
66 // CHECK: [[AWAIT_WORK]]
67 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
68 // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
69 // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
70 // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
71 //
72 // CHECK: [[SEL_WORKERS]]
73 // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
74 // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
75 // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
76 //
77 // CHECK: [[EXEC_PARALLEL]]
78 // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]]
79 //
80 // CHECK: [[TERM_PARALLEL]]
81 // CHECK: br label {{%?}}[[BAR_PARALLEL]]
82 //
83 // CHECK: [[BAR_PARALLEL]]
84 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
85 // CHECK: br label {{%?}}[[AWAIT_WORK]]
86 //
87 // CHECK: [[EXIT]]
88 // CHECK: ret void
89 
90 // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+l123]]()
91 // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
92 // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
93 // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
94 // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]]
95 // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
96 // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
97 //
98 // CHECK: [[WORKER]]
99 // CHECK: {{call|invoke}} void [[T1]]_worker()
100 // CHECK: br label {{%?}}[[EXIT:.+]]
101 //
102 // CHECK: [[CHECK_MASTER]]
103 // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
104 // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
105 // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
106 // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]],
107 // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
108 //
109 // CHECK: [[MASTER]]
110 // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
111 // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
112 // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
113 // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
114 // CHECK: br label {{%?}}[[TERMINATE:.+]]
115 //
116 // CHECK: [[TERMINATE]]
117 // CHECK: call void @__kmpc_kernel_deinit(
118 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
119 // CHECK: br label {{%?}}[[EXIT]]
120 //
121 // CHECK: [[EXIT]]
122 // CHECK: ret void
123 #pragma omp target
124   {
125   }
126 
127 // CHECK-NOT: define {{.*}}void [[T2:@__omp_offloading_.+foo.+]]_worker()
128 #pragma omp target if (0)
129   {
130   }
131 
132 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l200}}_worker()
133 // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
134 // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
135 // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
136 // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
137 // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
138 //
139 // CHECK: [[AWAIT_WORK]]
140 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
141 // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
142 // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
143 // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
144 //
145 // CHECK: [[SEL_WORKERS]]
146 // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
147 // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
148 // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
149 //
150 // CHECK: [[EXEC_PARALLEL]]
151 // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]]
152 //
153 // CHECK: [[TERM_PARALLEL]]
154 // CHECK: br label {{%?}}[[BAR_PARALLEL]]
155 //
156 // CHECK: [[BAR_PARALLEL]]
157 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
158 // CHECK: br label {{%?}}[[AWAIT_WORK]]
159 //
160 // CHECK: [[EXIT]]
161 // CHECK: ret void
162 
163 // CHECK: define {{.*}}void [[T2:@__omp_offloading_.+foo.+l200]](i[[SZ:32|64]] [[ARG1:%[a-zA-Z_]+]])
164 // CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]],
165 // CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]],
166 // CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16*
167 // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
168 // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
169 // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
170 // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]]
171 // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
172 // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
173 //
174 // CHECK: [[WORKER]]
175 // CHECK: {{call|invoke}} void [[T2]]_worker()
176 // CHECK: br label {{%?}}[[EXIT:.+]]
177 //
178 // CHECK: [[CHECK_MASTER]]
179 // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
180 // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
181 // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
182 // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]],
183 // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
184 //
185 // CHECK: [[MASTER]]
186 // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
187 // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
188 // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
189 // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
190 // CHECK: load i16, i16* [[AA_CADDR]],
191 // CHECK: br label {{%?}}[[TERMINATE:.+]]
192 //
193 // CHECK: [[TERMINATE]]
194 // CHECK: call void @__kmpc_kernel_deinit(
195 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
196 // CHECK: br label {{%?}}[[EXIT]]
197 //
198 // CHECK: [[EXIT]]
199 // CHECK: ret void
200 #pragma omp target if (1)
201   {
202     aa += 1;
203     aa += 2;
204   }
205 
206 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+foo.+l310}}_worker()
207 // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
208 // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
209 // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
210 // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
211 // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
212 //
213 // CHECK: [[AWAIT_WORK]]
214 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
215 // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
216 // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
217 // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
218 //
219 // CHECK: [[SEL_WORKERS]]
220 // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
221 // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
222 // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
223 //
224 // CHECK: [[EXEC_PARALLEL]]
225 // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]]
226 //
227 // CHECK: [[TERM_PARALLEL]]
228 // CHECK: br label {{%?}}[[BAR_PARALLEL]]
229 //
230 // CHECK: [[BAR_PARALLEL]]
231 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
232 // CHECK: br label {{%?}}[[AWAIT_WORK]]
233 //
234 // CHECK: [[EXIT]]
235 // CHECK: ret void
236 
237 // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+l310]](i[[SZ]]
238 // Create local storage for each capture.
239 // CHECK:    [[LOCAL_A:%.+]] = alloca i[[SZ]]
240 // CHECK:    [[LOCAL_B:%.+]] = alloca [10 x float]*
241 // CHECK:    [[LOCAL_VLA1:%.+]] = alloca i[[SZ]]
242 // CHECK:    [[LOCAL_BN:%.+]] = alloca float*
243 // CHECK:    [[LOCAL_C:%.+]] = alloca [5 x [10 x double]]*
244 // CHECK:    [[LOCAL_VLA2:%.+]] = alloca i[[SZ]]
245 // CHECK:    [[LOCAL_VLA3:%.+]] = alloca i[[SZ]]
246 // CHECK:    [[LOCAL_CN:%.+]] = alloca double*
247 // CHECK:    [[LOCAL_D:%.+]] = alloca [[TT:%.+]]*
248 // CHECK-DAG: store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]]
249 // CHECK-DAG: store [10 x float]* [[ARG_B:%.+]], [10 x float]** [[LOCAL_B]]
250 // CHECK-DAG: store i[[SZ]] [[ARG_VLA1:%.+]], i[[SZ]]* [[LOCAL_VLA1]]
251 // CHECK-DAG: store float* [[ARG_BN:%.+]], float** [[LOCAL_BN]]
252 // CHECK-DAG: store [5 x [10 x double]]* [[ARG_C:%.+]], [5 x [10 x double]]** [[LOCAL_C]]
253 // CHECK-DAG: store i[[SZ]] [[ARG_VLA2:%.+]], i[[SZ]]* [[LOCAL_VLA2]]
254 // CHECK-DAG: store i[[SZ]] [[ARG_VLA3:%.+]], i[[SZ]]* [[LOCAL_VLA3]]
255 // CHECK-DAG: store double* [[ARG_CN:%.+]], double** [[LOCAL_CN]]
256 // CHECK-DAG: store [[TT]]* [[ARG_D:%.+]], [[TT]]** [[LOCAL_D]]
257 //
258 // CHECK-64-DAG: [[REF_A:%.+]] = bitcast i64* [[LOCAL_A]] to i32*
259 // CHECK-DAG:    [[REF_B:%.+]] = load [10 x float]*, [10 x float]** [[LOCAL_B]],
260 // CHECK-DAG:    [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]],
261 // CHECK-DAG:    [[REF_BN:%.+]] = load float*, float** [[LOCAL_BN]],
262 // CHECK-DAG:    [[REF_C:%.+]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[LOCAL_C]],
263 // CHECK-DAG:    [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]],
264 // CHECK-DAG:    [[VAL_VLA3:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA3]],
265 // CHECK-DAG:    [[REF_CN:%.+]] = load double*, double** [[LOCAL_CN]],
266 // CHECK-DAG:    [[REF_D:%.+]] = load [[TT]]*, [[TT]]** [[LOCAL_D]],
267 //
268 // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
269 // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
270 // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
271 // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]]
272 // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
273 // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
274 //
275 // CHECK: [[WORKER]]
276 // CHECK: {{call|invoke}} void [[T3]]_worker()
277 // CHECK: br label {{%?}}[[EXIT:.+]]
278 //
279 // CHECK: [[CHECK_MASTER]]
280 // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
281 // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
282 // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
283 // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]],
284 // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
285 //
286 // CHECK: [[MASTER]]
287 // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
288 // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
289 // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
290 // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
291 //
292 // Use captures.
293 // CHECK-64-DAG:  load i32, i32* [[REF_A]]
294 // CHECK-32-DAG:  load i32, i32* [[LOCAL_A]]
295 // CHECK-DAG:  getelementptr inbounds [10 x float], [10 x float]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
296 // CHECK-DAG:  getelementptr inbounds float, float* [[REF_BN]], i[[SZ]] 3
297 // CHECK-DAG:  getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[REF_C]], i[[SZ]] 0, i[[SZ]] 1
298 // CHECK-DAG:  getelementptr inbounds double, double* [[REF_CN]], i[[SZ]] %{{.+}}
299 // CHECK-DAG:     getelementptr inbounds [[TT]], [[TT]]* [[REF_D]], i32 0, i32 0
300 //
301 // CHECK: br label {{%?}}[[TERMINATE:.+]]
302 //
303 // CHECK: [[TERMINATE]]
304 // CHECK: call void @__kmpc_kernel_deinit(
305 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
306 // CHECK: br label {{%?}}[[EXIT]]
307 //
308 // CHECK: [[EXIT]]
309 // CHECK: ret void
310 #pragma omp target if (n > 20)
311   {
312     a += 1;
313     b[2] += 1.0;
314     bn[3] += 1.0;
315     c[1][2] += 1.0;
316     cn[1][3] += 1.0;
317     d.X += 1;
318     d.Y += 1;
319     d[0] += 1;
320   }
321 
322   return a;
323 }
324 
325 template <typename tx>
ftemplate(int n)326 tx ftemplate(int n) {
327   tx a = 0;
328   short aa = 0;
329   tx b[10];
330 
331 #pragma omp target if (n > 40)
332   {
333     a += 1;
334     aa += 1;
335     b[2] += 1;
336   }
337 
338   return a;
339 }
340 
fstatic(int n)341 static int fstatic(int n) {
342   int a = 0;
343   short aa = 0;
344   char aaa = 0;
345   int b[10];
346 
347 #pragma omp target if (n > 50)
348   {
349     a += 1;
350     aa += 1;
351     aaa += 1;
352     b[2] += 1;
353   }
354 
355   return a;
356 }
357 
358 struct S1 {
359   double a;
360 
r1S1361   int r1(int n) {
362     int b = n + 1;
363     short int c[2][n];
364 
365 #pragma omp target if (n > 60)
366     {
367       this->a = (double)b + 1.5;
368       c[1][1] = ++a;
369       baz(a, a);
370     }
371 
372     return c[1][1] + (int)b;
373   }
374 };
375 
bar(int n)376 int bar(int n) {
377   int a = 0;
378 
379   a += foo(n);
380 
381   S1 S;
382   a += S.r1(n);
383 
384   a += fstatic(n);
385 
386   a += ftemplate<int>(n);
387 
388   return a;
389 }
390 
baz(int f,double & a)391 int baz(int f, double &a) {
392 #pragma omp parallel
393   f = 2 + a;
394   return f;
395 }
396 
397 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+static.+347}}_worker()
398 // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
399 // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
400 // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
401 // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
402 // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
403 //
404 // CHECK: [[AWAIT_WORK]]
405 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
406 // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
407 // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
408 // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
409 //
410 // CHECK: [[SEL_WORKERS]]
411 // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
412 // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
413 // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
414 //
415 // CHECK: [[EXEC_PARALLEL]]
416 // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]]
417 //
418 // CHECK: [[TERM_PARALLEL]]
419 // CHECK: br label {{%?}}[[BAR_PARALLEL]]
420 //
421 // CHECK: [[BAR_PARALLEL]]
422 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
423 // CHECK: br label {{%?}}[[AWAIT_WORK]]
424 //
425 // CHECK: [[EXIT]]
426 // CHECK: ret void
427 
428 // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+static.+l347]](i[[SZ]]
429 // Create local storage for each capture.
430 // CHECK:  [[LOCAL_A:%.+]] = alloca i[[SZ]]
431 // CHECK:  [[LOCAL_AA:%.+]] = alloca i[[SZ]]
432 // CHECK:  [[LOCAL_AAA:%.+]] = alloca i[[SZ]]
433 // CHECK:  [[LOCAL_B:%.+]] = alloca [10 x i32]*
434 // CHECK-DAG:  store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]]
435 // CHECK-DAG:  store i[[SZ]] [[ARG_AA:%.+]], i[[SZ]]* [[LOCAL_AA]]
436 // CHECK-DAG:  store i[[SZ]] [[ARG_AAA:%.+]], i[[SZ]]* [[LOCAL_AAA]]
437 // CHECK-DAG:  store [10 x i32]* [[ARG_B:%.+]], [10 x i32]** [[LOCAL_B]]
438 // Store captures in the context.
439 // CHECK-64-DAG:   [[REF_A:%.+]] = bitcast i[[SZ]]* [[LOCAL_A]] to i32*
440 // CHECK-DAG:      [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16*
441 // CHECK-DAG:      [[REF_AAA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AAA]] to i8*
442 // CHECK-DAG:      [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]],
443 //
444 // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
445 // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
446 // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
447 // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]]
448 // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
449 // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
450 //
451 // CHECK: [[WORKER]]
452 // CHECK: {{call|invoke}} void [[T4]]_worker()
453 // CHECK: br label {{%?}}[[EXIT:.+]]
454 //
455 // CHECK: [[CHECK_MASTER]]
456 // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
457 // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
458 // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
459 // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]],
460 // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
461 //
462 // CHECK: [[MASTER]]
463 // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
464 // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
465 // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
466 // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
467 // CHECK-64-DAG: load i32, i32* [[REF_A]]
468 // CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
469 // CHECK-DAG:    load i16, i16* [[REF_AA]]
470 // CHECK-DAG:    getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
471 // CHECK: br label {{%?}}[[TERMINATE:.+]]
472 //
473 // CHECK: [[TERMINATE]]
474 // CHECK: call void @__kmpc_kernel_deinit(
475 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
476 // CHECK: br label {{%?}}[[EXIT]]
477 //
478 // CHECK: [[EXIT]]
479 // CHECK: ret void
480 
481 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+S1.+l365}}_worker()
482 // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
483 // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
484 // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
485 // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
486 // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
487 //
488 // CHECK: [[AWAIT_WORK]]
489 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
490 // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
491 // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
492 // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
493 //
494 // CHECK: [[SEL_WORKERS]]
495 // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
496 // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
497 // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
498 //
499 // CHECK: [[EXEC_PARALLEL]]
500 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[NONSPMD]]
501 // CHECK: [[WORK_FN:%.+]] = bitcast i8* [[WORK]] to void (i16, i32)*
502 // CHECK: call void [[WORK_FN]](i16 0, i32 [[GTID]])
503 // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]]
504 //
505 // CHECK: [[TERM_PARALLEL]]
506 // CHECK: br label {{%?}}[[BAR_PARALLEL]]
507 //
508 // CHECK: [[BAR_PARALLEL]]
509 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
510 // CHECK: br label {{%?}}[[AWAIT_WORK]]
511 //
512 // CHECK: [[EXIT]]
513 // CHECK: ret void
514 
515 // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+S1.+l365]](
516 // Create local storage for each capture.
517 // CHECK:       [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]*
518 // CHECK:       [[LOCAL_B:%.+]] = alloca i[[SZ]]
519 // CHECK:       [[LOCAL_VLA1:%.+]] = alloca i[[SZ]]
520 // CHECK:       [[LOCAL_VLA2:%.+]] = alloca i[[SZ]]
521 // CHECK:       [[LOCAL_C:%.+]] = alloca i16*
522 // CHECK-DAG:   store [[S1]]* [[ARG_THIS:%.+]], [[S1]]** [[LOCAL_THIS]]
523 // CHECK-DAG:   store i[[SZ]] [[ARG_B:%.+]], i[[SZ]]* [[LOCAL_B]]
524 // CHECK-DAG:   store i[[SZ]] [[ARG_VLA1:%.+]], i[[SZ]]* [[LOCAL_VLA1]]
525 // CHECK-DAG:   store i[[SZ]] [[ARG_VLA2:%.+]], i[[SZ]]* [[LOCAL_VLA2]]
526 // CHECK-DAG:   store i16* [[ARG_C:%.+]], i16** [[LOCAL_C]]
527 // Store captures in the context.
528 // CHECK-DAG:   [[REF_THIS:%.+]] = load [[S1]]*, [[S1]]** [[LOCAL_THIS]],
529 // CHECK-64-DAG:[[REF_B:%.+]] = bitcast i[[SZ]]* [[LOCAL_B]] to i32*
530 // CHECK-DAG:   [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]],
531 // CHECK-DAG:   [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]],
532 // CHECK-DAG:   [[REF_C:%.+]] = load i16*, i16** [[LOCAL_C]],
533 //
534 // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
535 // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
536 // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
537 // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]]
538 // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
539 // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
540 //
541 // CHECK: [[WORKER]]
542 // CHECK: {{call|invoke}} void [[T5]]_worker()
543 // CHECK: br label {{%?}}[[EXIT:.+]]
544 //
545 // CHECK: [[CHECK_MASTER]]
546 // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
547 // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
548 // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
549 // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]],
550 // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
551 //
552 // CHECK: [[MASTER]]
553 // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
554 // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
555 // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
556 // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
557 // Use captures.
558 // CHECK-DAG:   getelementptr inbounds [[S1]], [[S1]]* [[REF_THIS]], i32 0, i32 0
559 // CHECK-64-DAG:load i32, i32* [[REF_B]]
560 // CHECK-32-DAG:load i32, i32* [[LOCAL_B]]
561 // CHECK-DAG:   getelementptr inbounds i16, i16* [[REF_C]], i[[SZ]] %{{.+}}
562 // CHECK: call i32 [[BAZ:@.*baz.*]](i32 %
563 // CHECK: br label {{%?}}[[TERMINATE:.+]]
564 //
565 // CHECK: [[TERMINATE]]
566 // CHECK: call void @__kmpc_kernel_deinit(
567 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
568 // CHECK: br label {{%?}}[[EXIT]]
569 //
570 // CHECK: [[EXIT]]
571 // CHECK: ret void
572 
573 // CHECK: define{{ hidden | }}i32 [[BAZ]](i32 [[F:%.*]], double* nonnull align {{[0-9]+}} dereferenceable{{.*}})
574 // CHECK: alloca i32,
575 // CHECK: [[LOCAL_F_PTR:%.+]] = alloca i32,
576 // CHECK: [[ZERO_ADDR:%.+]] = alloca i32,
577 // CHECK: [[BND_ZERO_ADDR:%.+]] = alloca i32,
578 // CHECK: store i32 0, i32* [[BND_ZERO_ADDR]]
579 // CHECK: store i32 0, i32* [[ZERO_ADDR]]
580 // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* [[UNKNOWN]]
581 // CHECK: [[PAR_LEVEL:%.+]] = call i16 @__kmpc_parallel_level(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]])
582 // CHECK: [[IS_TTD:%.+]] = icmp eq i16 %1, 0
583 // CHECK: [[RES:%.+]] = call i8 @__kmpc_is_spmd_exec_mode()
584 // CHECK: [[IS_SPMD:%.+]] = icmp ne i8 [[RES]], 0
585 // CHECK: br i1 [[IS_SPMD]], label
586 // CHECK: br label
587 // CHECK: [[SIZE:%.+]] = select i1 [[IS_TTD]], i{{64|32}} 4, i{{64|32}} 128
588 // CHECK: [[PTR:%.+]] = call i8* @__kmpc_data_sharing_coalesced_push_stack(i{{64|32}} [[SIZE]], i16 0)
589 // CHECK: [[REC_ADDR:%.+]] = bitcast i8* [[PTR]] to [[GLOBAL_ST:%.+]]*
590 // CHECK: br label
591 // CHECK: [[ITEMS:%.+]] = phi [[GLOBAL_ST]]* [ null, {{.+}} ], [ [[REC_ADDR]], {{.+}} ]
592 // CHECK: [[TTD_ITEMS:%.+]] = bitcast [[GLOBAL_ST]]* [[ITEMS]] to [[SEC_GLOBAL_ST:%.+]]*
593 // CHECK: [[F_PTR_ARR:%.+]] = getelementptr inbounds [[GLOBAL_ST]], [[GLOBAL_ST]]* [[ITEMS]], i32 0, i32 0
594 // CHECK: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
595 // CHECK: [[LID:%.+]] = and i32 [[TID]], 31
596 // CHECK: [[GLOBAL_F_PTR_PAR:%.+]] = getelementptr inbounds [32 x i32], [32 x i32]* [[F_PTR_ARR]], i32 0, i32 [[LID]]
597 // CHECK: [[GLOBAL_F_PTR_TTD:%.+]] = getelementptr inbounds [[SEC_GLOBAL_ST]], [[SEC_GLOBAL_ST]]* [[TTD_ITEMS]], i32 0, i32 0
598 // CHECK: [[GLOBAL_F_PTR:%.+]] = select i1 [[IS_TTD]], i32* [[GLOBAL_F_PTR_TTD]], i32* [[GLOBAL_F_PTR_PAR]]
599 // CHECK: [[F_PTR:%.+]] = select i1 [[IS_SPMD]], i32* [[LOCAL_F_PTR]], i32* [[GLOBAL_F_PTR]]
600 // CHECK: store i32 %{{.+}}, i32* [[F_PTR]],
601 
602 // CHECK: [[RES:%.+]] = call i8 @__kmpc_is_spmd_exec_mode()
603 // CHECK: icmp ne i8 [[RES]], 0
604 // CHECK: br i1
605 
606 // CHECK: [[RES:%.+]] = call i16 @__kmpc_parallel_level(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]])
607 // CHECK: icmp ne i16 [[RES]], 0
608 // CHECK: br i1
609 
610 // CHECK: call void @__kmpc_serialized_parallel(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]])
611 // CHECK: call void [[OUTLINED:@.+]](i32* [[ZERO_ADDR]], i32* [[BND_ZERO_ADDR]], i32* [[F_PTR]], double* %{{.+}})
612 // CHECK: call void @__kmpc_end_serialized_parallel(%struct.ident_t* [[UNKNOWN]], i32 [[GTID]])
613 // CHECK: br label
614 
615 // CHECK: call void @__kmpc_kernel_prepare_parallel(i8* bitcast (void (i16, i32)* @{{.+}} to i8*))
616 // CHECK: call void @__kmpc_begin_sharing_variables(i8*** [[SHARED_PTR:%.+]], i{{64|32}} 2)
617 // CHECK: [[SHARED:%.+]] = load i8**, i8*** [[SHARED_PTR]],
618 // CHECK: [[REF:%.+]] = getelementptr inbounds i8*, i8** [[SHARED]], i{{64|32}} 0
619 // CHECK: [[F_REF:%.+]] = bitcast i32* [[F_PTR]] to i8*
620 // CHECK: store i8* [[F_REF]], i8** [[REF]],
621 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
622 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
623 // CHECK: call void @__kmpc_end_sharing_variables()
624 // CHECK: br label
625 
626 // CHECK: [[RES:%.+]] = load i32, i32* [[F_PTR]],
627 // CHECK: store i32 [[RES]], i32* [[RET:%.+]],
628 // CHECK: br i1 [[IS_SPMD]], label
629 // CHECK: [[BC:%.+]] = bitcast [[GLOBAL_ST]]* [[ITEMS]] to i8*
630 // CHECK: call void @__kmpc_data_sharing_pop_stack(i8* [[BC]])
631 // CHECK: br label
632 // CHECK: [[RES:%.+]] = load i32, i32* [[RET]],
633 // CHECK: ret i32 [[RES]]
634 
635 // CHECK-LABEL: define {{.*}}void {{@__omp_offloading_.+template.+l331}}_worker()
636 // CHECK-DAG: [[OMP_EXEC_STATUS:%.+]] = alloca i8,
637 // CHECK-DAG: [[OMP_WORK_FN:%.+]] = alloca i8*,
638 // CHECK: store i8* null, i8** [[OMP_WORK_FN]],
639 // CHECK: store i8 0, i8* [[OMP_EXEC_STATUS]],
640 // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
641 //
642 // CHECK: [[AWAIT_WORK]]
643 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
644 // CHECK: [[WORK:%.+]] = load i8*, i8** [[OMP_WORK_FN]],
645 // CHECK: [[SHOULD_EXIT:%.+]] = icmp eq i8* [[WORK]], null
646 // CHECK: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
647 //
648 // CHECK: [[SEL_WORKERS]]
649 // CHECK: [[ST:%.+]] = load i8, i8* [[OMP_EXEC_STATUS]],
650 // CHECK: [[IS_ACTIVE:%.+]] = icmp ne i8 [[ST]], 0
651 // CHECK: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
652 //
653 // CHECK: [[EXEC_PARALLEL]]
654 // CHECK: br label {{%?}}[[TERM_PARALLEL:.+]]
655 //
656 // CHECK: [[TERM_PARALLEL]]
657 // CHECK: br label {{%?}}[[BAR_PARALLEL]]
658 //
659 // CHECK: [[BAR_PARALLEL]]
660 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
661 // CHECK: br label {{%?}}[[AWAIT_WORK]]
662 //
663 // CHECK: [[EXIT]]
664 // CHECK: ret void
665 
666 // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+template.+l331]](i[[SZ]]
667 // Create local storage for each capture.
668 // CHECK:  [[LOCAL_A:%.+]] = alloca i[[SZ]]
669 // CHECK:  [[LOCAL_AA:%.+]] = alloca i[[SZ]]
670 // CHECK:  [[LOCAL_B:%.+]] = alloca [10 x i32]*
671 // CHECK-DAG:  store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]]
672 // CHECK-DAG:  store i[[SZ]] [[ARG_AA:%.+]], i[[SZ]]* [[LOCAL_AA]]
673 // CHECK-DAG:   store [10 x i32]* [[ARG_B:%.+]], [10 x i32]** [[LOCAL_B]]
674 // Store captures in the context.
675 // CHECK-64-DAG:[[REF_A:%.+]] = bitcast i[[SZ]]* [[LOCAL_A]] to i32*
676 // CHECK-DAG:   [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16*
677 // CHECK-DAG:   [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]],
678 //
679 // CHECK-DAG: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
680 // CHECK-DAG: [[NTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
681 // CHECK-DAG: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
682 // CHECK-DAG: [[TH_LIMIT:%.+]] = sub nuw i32 [[NTH]], [[WS]]
683 // CHECK: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[TH_LIMIT]]
684 // CHECK: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[CHECK_MASTER:.+]]
685 //
686 // CHECK: [[WORKER]]
687 // CHECK: {{call|invoke}} void [[T6]]_worker()
688 // CHECK: br label {{%?}}[[EXIT:.+]]
689 //
690 // CHECK: [[CHECK_MASTER]]
691 // CHECK-DAG: [[CMTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
692 // CHECK-DAG: [[CMNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
693 // CHECK-DAG: [[CMWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
694 // CHECK: [[IS_MASTER:%.+]] = icmp eq i32 [[CMTID]],
695 // CHECK: br i1 [[IS_MASTER]], label {{%?}}[[MASTER:.+]], label {{%?}}[[EXIT]]
696 //
697 // CHECK: [[MASTER]]
698 // CHECK-DAG: [[MNTH:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
699 // CHECK-DAG: [[MWS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
700 // CHECK: [[MTMP1:%.+]] = sub nuw i32 [[MNTH]], [[MWS]]
701 // CHECK: call void @__kmpc_kernel_init(i32 [[MTMP1]]
702 //
703 // CHECK-64-DAG: load i32, i32* [[REF_A]]
704 // CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
705 // CHECK-DAG:    load i16, i16* [[REF_AA]]
706 // CHECK-DAG:    getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
707 //
708 // CHECK: br label {{%?}}[[TERMINATE:.+]]
709 //
710 // CHECK: [[TERMINATE]]
711 // CHECK: call void @__kmpc_kernel_deinit(
712 // CHECK: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0)
713 // CHECK: br label {{%?}}[[EXIT]]
714 //
715 // CHECK: [[EXIT]]
716 // CHECK: ret void
717 
718 #endif
719