• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
6 // expected-no-diagnostics
7 // REQUIRES: x86-registered-target
8 #ifndef HEADER
9 #define HEADER
10 
11 template <class T>
12 struct S {
13   T f;
SS14   S(T a) : f(a) {}
SS15   S() : f() {}
16   S<T> &operator=(const S<T> &);
operator TS17   operator T() { return T(); }
~SS18   ~S() {}
19 };
20 
21 volatile int g = 1212;
22 
23 // CHECK: [[S_FLOAT_TY:%.+]] = type { float }
24 // CHECK [[CAP_MAIN_TY:%.+]] = type { i{{[0-9]+}}*, [2 x i{{[0-9]+}}]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i{{[0-9]+}}* }
25 // CHECK: [[S_INT_TY:%.+]] = type { i32 }
26 // CHECK-DAG: [[SECTIONS_BARRIER_LOC:@.+]] = private unnamed_addr constant %{{.+}} { i32 0, i32 194, i32 0, i32 0, i8*
27 // CHECK-DAG: [[X:@.+]] = global double 0.0
28 template <typename T>
tmain()29 T tmain() {
30   S<T> test;
31   T t_var = T();
32   T vec[] = {1, 2};
33   S<T> s_arr[] = {1, 2};
34   S<T> var(3);
35 #pragma omp parallel
36 #pragma omp sections lastprivate(t_var, vec, s_arr, var)
37   {
38     vec[0] = t_var;
39 #pragma omp section
40     s_arr[0] = var;
41   }
42   return T();
43 }
44 
45 namespace A {
46 double x;
47 }
48 namespace B {
49 using A::x;
50 }
51 
main()52 int main() {
53   static int sivar;
54 #ifdef LAMBDA
55   // LAMBDA: [[G:@.+]] = global i{{[0-9]+}} 1212,
56   // LAMBDA-LABEL: @main
57   // LAMBDA: call void [[OUTER_LAMBDA:@.+]](
58   [&]() {
59   // LAMBDA: define{{.*}} internal{{.*}} void [[OUTER_LAMBDA]](
60   // LAMBDA: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
61 #pragma omp parallel
62 #pragma omp sections lastprivate(g, sivar)
63   {
64     // LAMBDA: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias [[GTID:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR_REF:%.+]])
65     // LAMBDA: alloca i{{[0-9]+}},
66     // LAMBDA: alloca i{{[0-9]+}},
67     // LAMBDA: alloca i{{[0-9]+}},
68     // LAMBDA: alloca i{{[0-9]+}},
69     // LAMBDA: alloca i{{[0-9]+}},
70     // LAMBDA: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
71     // LAMBDA: [[SIVAR1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
72 
73     // LAMBDA: store i{{[0-9]+}}* [[SIVAR_REF]], i{{[0-9]+}}** %{{.+}},
74     // LAMBDA: [[SIVAR_REF_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}},
75 
76     // LAMBDA: [[GTID_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %{{.+}}, align 8
77     // LAMBDA: [[GTID_ADDR_REF:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_ADDR]], align 4
78 
79     // LAMBDA: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID_ADDR_REF]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
80     // LAMBDA: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
81     // LAMBDA: store i{{[0-9]+}} 13, i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]],
82     // LAMBDA: [[G_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
83     // LAMBDA: store i{{[0-9]+}}* [[G_PRIVATE_ADDR]], i{{[0-9]+}}** [[G_PRIVATE_ADDR_REF]]
84     // LAMBDA: [[SIVAR_PRIVATE_ADDR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG:%.+]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
85     // LAMBDA: store i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]], i{{[0-9]+}}** [[SIVAR_PRIVATE_ADDR_REF]]
86     // LAMBDA: call void [[INNER_LAMBDA:@.+]](%{{.+}}* [[ARG]])
87     // LAMBDA: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID_ADDR_REF]])
88     {
89       g = 1;
90       sivar = 13;
91     }
92     // Check for final copying of private values back to original vars.
93     // LAMBDA: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
94     // LAMBDA: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
95     // LAMBDA: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
96     // LAMBDA: [[LAST_THEN]]
97     // Actual copying.
98 
99     // original g=private_g;
100     // LAMBDA: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
101     // LAMBDA: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
102 
103     // original sivar = private sivar;
104     // LAMBDA: [[SIVAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]],
105     // LAMBDA: store i{{[0-9]+}} [[SIVAR1_VAL]], i{{[0-9]+}}* [[SIVAR_REF_ADDR]],
106     // LAMBDA: br label %[[LAST_DONE]]
107     // LAMBDA: [[LAST_DONE]]
108     // LAMBDA: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID_ADDR_REF]])
109 #pragma omp section
110     [&]() {
111       // LAMBDA: define {{.+}} void [[INNER_LAMBDA]](%{{.+}}* [[ARG_PTR:%.+]])
112       // LAMBDA: store %{{.+}}* [[ARG_PTR]], %{{.+}}** [[ARG_PTR_REF:%.+]],
113       g = 2;
114       sivar = 23;
115       // LAMBDA: [[ARG_PTR:%.+]] = load %{{.+}}*, %{{.+}}** [[ARG_PTR_REF]]
116       // LAMBDA: [[G_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
117       // LAMBDA: [[G_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[G_PTR_REF]]
118       // LAMBDA: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[G_REF]]
119       // LAMBDA: [[SIVAR_PTR_REF:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}}* [[ARG_PTR]], i{{[0-9]+}} 0, i{{[0-9]+}} 1
120       // LAMBDA: [[SIVAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_PTR_REF]]
121       // LAMBDA: store i{{[0-9]+}} 23, i{{[0-9]+}}* [[SIVAR_REF]]
122     }();
123   }
124   }();
125   return 0;
126 #elif defined(BLOCKS)
127   // BLOCKS: [[G:@.+]] = global i{{[0-9]+}} 1212,
128   // BLOCKS-LABEL: @main
129   // BLOCKS: call void {{%.+}}(i8
130   ^{
131   // BLOCKS: define{{.*}} internal{{.*}} void {{.+}}(i8*
132   // BLOCKS: call void {{.+}} @__kmpc_fork_call({{.+}}, i32 1, {{.+}}* [[OMP_REGION:@.+]] to {{.+}})
133 #pragma omp parallel
134 #pragma omp sections lastprivate(g, sivar)
135   {
136     // BLOCKS: define{{.*}} internal{{.*}} void [[OMP_REGION]](i32* noalias [[GTID:%.+]], i32* noalias %{{.+}}, i32* dereferenceable(4) [[SIVAR:%.+]])
137     // BLOCKS: alloca i{{[0-9]+}},
138     // BLOCKS: alloca i{{[0-9]+}},
139     // BLOCKS: alloca i{{[0-9]+}},
140     // BLOCKS: alloca i{{[0-9]+}},
141     // BLOCKS: alloca i{{[0-9]+}},
142     // BLOCKS: [[G_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
143     // BLOCKS: [[SIVAR1_PRIVATE_ADDR:%.+]] = alloca i{{[0-9]+}},
144 
145     // BLOCKS: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}}** [[SIVAR_ADDR:%.+]],
146     // BLOCKS: [[SIVAR_REF_ADDR:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[SIVAR_ADDR]],
147 
148     // BLOCKS: [[GTID_ADDR:%.+]] = load i32*, i32** [[GTID:%.+]], align 8
149     // BLOCKS: [[GTID_ADDR_REF:%.+]] = load i32, i32* [[GTID_ADDR]], align 4
150     // BLOCKS: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID_ADDR_REF]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
151     // BLOCKS: store i{{[0-9]+}} 1, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
152     // BLOCKS: store i{{[0-9]+}} 17, i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]],
153     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
154     // BLOCKS: i{{[0-9]+}}* [[G_PRIVATE_ADDR]]
155     // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
156     // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
157     // BLOCKS: i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]]
158     // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
159     // BLOCKS: call void {{%.+}}(i8
160     // BLOCKS: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID_ADDR_REF]])
161     {
162       g = 1;
163       sivar = 17;
164     }
165     // Check for final copying of private values back to original vars.
166     // BLOCKS: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
167     // BLOCKS: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
168     // BLOCKS: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
169     // BLOCKS: [[LAST_THEN]]
170     // Actual copying.
171 
172     // original g=private_g;
173     // BLOCKS: [[G_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[G_PRIVATE_ADDR]],
174     // BLOCKS: store volatile i{{[0-9]+}} [[G_VAL]], i{{[0-9]+}}* [[G]],
175 
176     // original sivar = private sivar;
177     // BLOCKS: [[SIVAR1_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[SIVAR1_PRIVATE_ADDR]],
178     // BLOCKS: store i{{[0-9]+}} [[SIVAR1_VAL]], i{{[0-9]+}}* [[SIVAR_REF_ADDR]],
179     // BLOCKS: br label %[[LAST_DONE]]
180     // BLOCKS: [[LAST_DONE]]
181     // BLOCKS: call void @__kmpc_barrier(%{{.+}}* @{{.+}}, i{{[0-9]+}} [[GTID_ADDR_REF]])
182 #pragma omp section
183     ^{
184       // BLOCKS: define {{.+}} void {{@.+}}(i8*
185       g = 2;
186       sivar = 29;
187       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
188       // BLOCKS: store i{{[0-9]+}} 2, i{{[0-9]+}}*
189       // BLOCKS-NOT: [[G]]{{[[^:word:]]}}
190       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
191       // BLOCKS: store i{{[0-9]+}} 29, i{{[0-9]+}}*
192       // BLOCKS-NOT: [[SIVAR]]{{[[^:word:]]}}
193       // BLOCKS: ret
194     }();
195   }
196   }();
197   return 0;
198 #else
199   S<float> test;
200   int t_var = 0;
201   int vec[] = {1, 2};
202   S<float> s_arr[] = {1, 2};
203   S<float> var(3);
204 #pragma omp parallel
205 #pragma omp sections lastprivate(t_var, vec, s_arr, var, sivar)
206   {
207     {
208     vec[0] = t_var;
209     s_arr[0] = var;
210     sivar = 31;
211     }
212   }
213 #pragma omp parallel
214 #pragma omp sections lastprivate(A::x, B::x)
215   {
216     A::x++;
217 #pragma omp section
218     ;
219   }
220   return tmain<int>();
221 #endif
222 }
223 
224 // CHECK: define i{{[0-9]+}} @main()
225 // CHECK: [[TEST:%.+]] = alloca [[S_FLOAT_TY]],
226 // CHECK: call {{.*}} [[S_FLOAT_TY_DEF_CONSTR:@.+]]([[S_FLOAT_TY]]* [[TEST]])
227 
228 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 5, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_FLOAT_TY]]]*, [[S_FLOAT_TY]]*, i{{[0-9]+}}*)* [[MAIN_MICROTASK:@.+]] to void
229 
230 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 0, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*)* [[MAIN_MICROTASK1:@.+]] to void
231 // CHECK: = call {{.+}} [[TMAIN_INT:@.+]]()
232 // CHECK: call void [[S_FLOAT_TY_DESTR:@.+]]([[S_FLOAT_TY]]*
233 // CHECK: ret
234 
235 // CHECK: define internal void [[MAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
236 // CHECK: alloca i{{[0-9]+}},
237 // CHECK: alloca i{{[0-9]+}},
238 // CHECK: alloca i{{[0-9]+}},
239 // CHECK: alloca i{{[0-9]+}},
240 // CHECK: alloca i{{[0-9]+}},
241 // CHECK: alloca i{{[0-9]+}},
242 // CHECK: alloca [2 x i{{[0-9]+}}],
243 // CHECK: alloca [2 x [[S_FLOAT_TY]]],
244 // CHECK: alloca [[S_FLOAT_TY]],
245 // CHECK: alloca i{{[0-9]+}},
246 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
247 
248 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
249 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
250 
251 // CHECK: call void @__kmpc_for_static_init_4(
252 // <Skip loop body>
253 // CHECK: call void @__kmpc_for_static_fini(
254 
255 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
256 // CHECK-DAG: call {{.*}} [[S_FLOAT_TY_DESTR]]([[S_FLOAT_TY]]*
257 
258 // CHECK: call void @__kmpc_barrier(
259 // CHECK: ret void
260 
261 //
262 // CHECK: define internal void [[MAIN_MICROTASK1]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}})
263 // CHECK: [[X_PRIV:%.+]] = alloca double,
264 // CHECK-NOT: alloca double
265 
266 // Check for default initialization.
267 // CHECK-NOT: [[X_PRIV]]
268 
269 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
270 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
271 // CHECK: call void @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 [[GTID]], i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
272 // <Skip loop body>
273 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 [[GTID]])
274 
275 // Check for final copying of private values back to original vars.
276 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
277 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
278 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
279 // CHECK: [[LAST_THEN]]
280 // Actual copying.
281 
282 // original x=private_x;
283 // CHECK: [[X_VAL:%.+]] = load double, double* [[X_PRIV]],
284 // CHECK: store double [[X_VAL]], double* [[X]],
285 // CHECK-NEXT: br label %[[LAST_DONE]]
286 // CHECK: [[LAST_DONE]]
287 
288 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[SECTIONS_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
289 // CHECK: ret void
290 
291 // CHECK: define {{.*}} i{{[0-9]+}} [[TMAIN_INT]]()
292 // CHECK: [[TEST:%.+]] = alloca [[S_INT_TY]],
293 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR:@.+]]([[S_INT_TY]]* [[TEST]])
294 // CHECK: call void (%{{.+}}*, i{{[0-9]+}}, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)*, ...) @__kmpc_fork_call(%{{.+}}* @{{.+}}, i{{[0-9]+}} 4, void (i{{[0-9]+}}*, i{{[0-9]+}}*, ...)* bitcast (void (i{{[0-9]+}}*, i{{[0-9]+}}*, i32*, [2 x i32]*, [2 x [[S_INT_TY]]]*, [[S_INT_TY]]*)* [[TMAIN_MICROTASK:@.+]] to void
295 // CHECK: call void [[S_INT_TY_DESTR:@.+]]([[S_INT_TY]]*
296 // CHECK: ret
297 //
298 // CHECK: define internal void [[TMAIN_MICROTASK]](i{{[0-9]+}}* noalias [[GTID_ADDR:%.+]], i{{[0-9]+}}* noalias %{{.+}},
299 // CHECK: alloca i{{[0-9]+}},
300 // CHECK: alloca i{{[0-9]+}},
301 // CHECK: alloca i{{[0-9]+}},
302 // CHECK: alloca i{{[0-9]+}},
303 // CHECK: alloca i{{[0-9]+}},
304 // CHECK: [[T_VAR_PRIV:%.+]] = alloca i{{[0-9]+}},
305 // CHECK: [[VEC_PRIV:%.+]] = alloca [2 x i{{[0-9]+}}],
306 // CHECK: [[S_ARR_PRIV:%.+]] = alloca [2 x [[S_INT_TY]]],
307 // CHECK: [[VAR_PRIV:%.+]] = alloca [[S_INT_TY]],
308 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_REF:%.+]]
309 
310 // CHECK: [[T_VAR_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** %
311 // CHECK: [[VEC_REF:%.+]] = load [2 x i{{[0-9]+}}]*, [2 x i{{[0-9]+}}]** %
312 // CHECK: [[S_ARR_REF:%.+]] = load [2 x [[S_INT_TY]]]*, [2 x [[S_INT_TY]]]** %
313 // CHECK: [[VAR_REF:%.+]] = load [[S_INT_TY]]*, [[S_INT_TY]]** %
314 
315 // Check for default initialization.
316 // CHECK-NOT: [[T_VAR_PRIV]]
317 // CHECK-NOT: [[VEC_PRIV]]
318 // CHECK: [[S_ARR_PRIV_ITEM:%.+]] = phi [[S_INT_TY]]*
319 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[S_ARR_PRIV_ITEM]])
320 // CHECK: call {{.*}} [[S_INT_TY_DEF_CONSTR]]([[S_INT_TY]]* [[VAR_PRIV]])
321 // CHECK: call {{.+}} @__kmpc_for_static_init_4(%{{.+}}* @{{.+}}, i32 %{{.+}}, i32 34, i32* [[IS_LAST_ADDR:%.+]], i32* %{{.+}}, i32* %{{.+}}, i32* %{{.+}}, i32 1, i32 1)
322 // <Skip loop body>
323 // CHECK: call void @__kmpc_for_static_fini(%{{.+}}* @{{.+}}, i32 %{{.+}})
324 
325 // Check for final copying of private values back to original vars.
326 // CHECK: [[IS_LAST_VAL:%.+]] = load i32, i32* [[IS_LAST_ADDR]],
327 // CHECK: [[IS_LAST_ITER:%.+]] = icmp ne i32 [[IS_LAST_VAL]], 0
328 // CHECK: br i1 [[IS_LAST_ITER:%.+]], label %[[LAST_THEN:.+]], label %[[LAST_DONE:.+]]
329 // CHECK: [[LAST_THEN]]
330 // Actual copying.
331 
332 // original t_var=private_t_var;
333 // CHECK: [[T_VAR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[T_VAR_PRIV]],
334 // CHECK: store i{{[0-9]+}} [[T_VAR_VAL]], i{{[0-9]+}}* [[T_VAR_REF]],
335 
336 // original vec[]=private_vec[];
337 // CHECK: [[VEC_DEST:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_REF]] to i8*
338 // CHECK: [[VEC_SRC:%.+]] = bitcast [2 x i{{[0-9]+}}]* [[VEC_PRIV]] to i8*
339 // CHECK: call void @llvm.memcpy.{{.+}}(i8* [[VEC_DEST]], i8* [[VEC_SRC]],
340 
341 // original s_arr[]=private_s_arr[];
342 // CHECK: [[S_ARR_BEGIN:%.+]] = getelementptr inbounds [2 x [[S_INT_TY]]], [2 x [[S_INT_TY]]]* [[S_ARR_REF]], i{{[0-9]+}} 0, i{{[0-9]+}} 0
343 // CHECK: [[S_ARR_PRIV_BEGIN:%.+]] = bitcast [2 x [[S_INT_TY]]]* [[S_ARR_PRIV]] to [[S_INT_TY]]*
344 // CHECK: [[S_ARR_END:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 2
345 
346 // CHK: [[SIVAR_REF:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[S_ARR_BEGIN]], i{{[0-9]+}} 4
347 // CHK: store i{{[0-9]+}}* [[SIVAR]], i{{[0-9]+}} [[SIVAR_REF]]
348 
349 // CHECK: [[IS_EMPTY:%.+]] = icmp eq [[S_INT_TY]]* [[S_ARR_BEGIN]], [[S_ARR_END]]
350 // CHECK: br i1 [[IS_EMPTY]], label %[[S_ARR_BODY_DONE:.+]], label %[[S_ARR_BODY:.+]]
351 // CHECK: [[S_ARR_BODY]]
352 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* {{.+}}, [[S_INT_TY]]* {{.+}})
353 // CHECK: br i1 {{.+}}, label %[[S_ARR_BODY_DONE]], label %[[S_ARR_BODY]]
354 // CHECK: [[S_ARR_BODY_DONE]]
355 
356 // original var=private_var;
357 // CHECK: call {{.*}} [[S_INT_TY_COPY_ASSIGN:@.+]]([[S_INT_TY]]* [[VAR_REF]], [[S_INT_TY]]* {{.*}} [[VAR_PRIV]])
358 // CHECK: br label %[[LAST_DONE]]
359 // CHECK: [[LAST_DONE]]
360 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]* [[VAR_PRIV]])
361 // CHECK-DAG: call void [[S_INT_TY_DESTR]]([[S_INT_TY]]*
362 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_REF]]
363 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
364 // CHECK: call void @__kmpc_barrier(%{{.+}}* [[SECTIONS_BARRIER_LOC]], i{{[0-9]+}} [[GTID]])
365 // CHECK: ret void
366 #endif
367 
368