• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs
2// Check that the CHECK lines are generated for clang-generated functions
3// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp %s -emit-llvm -o - | FileCheck --check-prefix=OMP %s
4// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu %s -emit-llvm -o - | FileCheck --check-prefix=NOOMP %s
5
6const int size = 1024 * 1024 * 32;
7
8double A[size];
9
10void foo(void);
11
12int main() {
13  int i = 0;
14
15#pragma omp parallel for
16  for (i = 0; i < size; ++i) {
17    A[i] = 0.0;
18  }
19
20  foo();
21
22  return 0;
23}
24
25void foo(void) {
26  int i = 0;
27
28#pragma omp parallel for
29  for (i = 0; i < size; ++i) {
30    A[i] = 1.0;
31  }
32}
33// OMP-LABEL: @foo(
34// OMP-NEXT:  entry:
35// OMP-NEXT:    [[I:%.*]] = alloca i32, align 4
36// OMP-NEXT:    store i32 0, i32* [[I]], align 4
37// OMP-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB2:@.*]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
38// OMP-NEXT:    ret void
39//
40//
41// OMP-LABEL: @.omp_outlined.(
42// OMP-NEXT:  entry:
43// OMP-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
44// OMP-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
45// OMP-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
46// OMP-NEXT:    [[TMP:%.*]] = alloca i32, align 4
47// OMP-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
48// OMP-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
49// OMP-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
50// OMP-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
51// OMP-NEXT:    [[I:%.*]] = alloca i32, align 4
52// OMP-NEXT:    store i32* [[DOTGLOBAL_TID_:%.*]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
53// OMP-NEXT:    store i32* [[DOTBOUND_TID_:%.*]], i32** [[DOTBOUND_TID__ADDR]], align 8
54// OMP-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
55// OMP-NEXT:    store i32 33554431, i32* [[DOTOMP_UB]], align 4
56// OMP-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
57// OMP-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
58// OMP-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
59// OMP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
60// OMP-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* [[GLOB1:@.*]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
61// OMP-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
62// OMP-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 33554431
63// OMP-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
64// OMP:       cond.true:
65// OMP-NEXT:    br label [[COND_END:%.*]]
66// OMP:       cond.false:
67// OMP-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
68// OMP-NEXT:    br label [[COND_END]]
69// OMP:       cond.end:
70// OMP-NEXT:    [[COND:%.*]] = phi i32 [ 33554431, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
71// OMP-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
72// OMP-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
73// OMP-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
74// OMP-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
75// OMP:       omp.inner.for.cond:
76// OMP-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
77// OMP-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
78// OMP-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
79// OMP-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
80// OMP:       omp.inner.for.body:
81// OMP-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
82// OMP-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
83// OMP-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
84// OMP-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
85// OMP-NEXT:    [[TMP8:%.*]] = load i32, i32* [[I]], align 4
86// OMP-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
87// OMP-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]]
88// OMP-NEXT:    store double 1.000000e+00, double* [[ARRAYIDX]], align 8
89// OMP-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
90// OMP:       omp.body.continue:
91// OMP-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
92// OMP:       omp.inner.for.inc:
93// OMP-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
94// OMP-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
95// OMP-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
96// OMP-NEXT:    br label [[OMP_INNER_FOR_COND]]
97// OMP:       omp.inner.for.end:
98// OMP-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
99// OMP:       omp.loop.exit:
100// OMP-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* [[GLOB1]], i32 [[TMP1]])
101// OMP-NEXT:    ret void
102//
103//
104// OMP-LABEL: @main(
105// OMP-NEXT:  entry:
106// OMP-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
107// OMP-NEXT:    [[I:%.*]] = alloca i32, align 4
108// OMP-NEXT:    store i32 0, i32* [[RETVAL]], align 4
109// OMP-NEXT:    store i32 0, i32* [[I]], align 4
110// OMP-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* [[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*))
111// OMP-NEXT:    call void @foo()
112// OMP-NEXT:    ret i32 0
113//
114//
115// OMP-LABEL: @.omp_outlined..1(
116// OMP-NEXT:  entry:
117// OMP-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
118// OMP-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
119// OMP-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
120// OMP-NEXT:    [[TMP:%.*]] = alloca i32, align 4
121// OMP-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
122// OMP-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
123// OMP-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
124// OMP-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
125// OMP-NEXT:    [[I:%.*]] = alloca i32, align 4
126// OMP-NEXT:    store i32* [[DOTGLOBAL_TID_:%.*]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
127// OMP-NEXT:    store i32* [[DOTBOUND_TID_:%.*]], i32** [[DOTBOUND_TID__ADDR]], align 8
128// OMP-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
129// OMP-NEXT:    store i32 33554431, i32* [[DOTOMP_UB]], align 4
130// OMP-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
131// OMP-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
132// OMP-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
133// OMP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
134// OMP-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* [[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
135// OMP-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
136// OMP-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 33554431
137// OMP-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
138// OMP:       cond.true:
139// OMP-NEXT:    br label [[COND_END:%.*]]
140// OMP:       cond.false:
141// OMP-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
142// OMP-NEXT:    br label [[COND_END]]
143// OMP:       cond.end:
144// OMP-NEXT:    [[COND:%.*]] = phi i32 [ 33554431, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
145// OMP-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
146// OMP-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
147// OMP-NEXT:    store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4
148// OMP-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
149// OMP:       omp.inner.for.cond:
150// OMP-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
151// OMP-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
152// OMP-NEXT:    [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
153// OMP-NEXT:    br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
154// OMP:       omp.inner.for.body:
155// OMP-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
156// OMP-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
157// OMP-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
158// OMP-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
159// OMP-NEXT:    [[TMP8:%.*]] = load i32, i32* [[I]], align 4
160// OMP-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP8]] to i64
161// OMP-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]]
162// OMP-NEXT:    store double 0.000000e+00, double* [[ARRAYIDX]], align 8
163// OMP-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
164// OMP:       omp.body.continue:
165// OMP-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
166// OMP:       omp.inner.for.inc:
167// OMP-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
168// OMP-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
169// OMP-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4
170// OMP-NEXT:    br label [[OMP_INNER_FOR_COND]]
171// OMP:       omp.inner.for.end:
172// OMP-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
173// OMP:       omp.loop.exit:
174// OMP-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* [[GLOB1]], i32 [[TMP1]])
175// OMP-NEXT:    ret void
176//
177//
178// NOOMP-LABEL: @main(
179// NOOMP-NEXT:  entry:
180// NOOMP-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
181// NOOMP-NEXT:    [[I:%.*]] = alloca i32, align 4
182// NOOMP-NEXT:    store i32 0, i32* [[RETVAL]], align 4
183// NOOMP-NEXT:    store i32 0, i32* [[I]], align 4
184// NOOMP-NEXT:    store i32 0, i32* [[I]], align 4
185// NOOMP-NEXT:    br label [[FOR_COND:%.*]]
186// NOOMP:       for.cond:
187// NOOMP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[I]], align 4
188// NOOMP-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP0]], 33554432
189// NOOMP-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
190// NOOMP:       for.body:
191// NOOMP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[I]], align 4
192// NOOMP-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
193// NOOMP-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]]
194// NOOMP-NEXT:    store double 0.000000e+00, double* [[ARRAYIDX]], align 8
195// NOOMP-NEXT:    br label [[FOR_INC:%.*]]
196// NOOMP:       for.inc:
197// NOOMP-NEXT:    [[TMP2:%.*]] = load i32, i32* [[I]], align 4
198// NOOMP-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP2]], 1
199// NOOMP-NEXT:    store i32 [[INC]], i32* [[I]], align 4
200// NOOMP-NEXT:    br label [[FOR_COND]], [[LOOP2:!llvm.loop !.*]]
201// NOOMP:       for.end:
202// NOOMP-NEXT:    call void @foo()
203// NOOMP-NEXT:    ret i32 0
204//
205//
206// NOOMP-LABEL: @foo(
207// NOOMP-NEXT:  entry:
208// NOOMP-NEXT:    [[I:%.*]] = alloca i32, align 4
209// NOOMP-NEXT:    store i32 0, i32* [[I]], align 4
210// NOOMP-NEXT:    store i32 0, i32* [[I]], align 4
211// NOOMP-NEXT:    br label [[FOR_COND:%.*]]
212// NOOMP:       for.cond:
213// NOOMP-NEXT:    [[TMP0:%.*]] = load i32, i32* [[I]], align 4
214// NOOMP-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP0]], 33554432
215// NOOMP-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
216// NOOMP:       for.body:
217// NOOMP-NEXT:    [[TMP1:%.*]] = load i32, i32* [[I]], align 4
218// NOOMP-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP1]] to i64
219// NOOMP-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [33554432 x double], [33554432 x double]* @A, i64 0, i64 [[IDXPROM]]
220// NOOMP-NEXT:    store double 1.000000e+00, double* [[ARRAYIDX]], align 8
221// NOOMP-NEXT:    br label [[FOR_INC:%.*]]
222// NOOMP:       for.inc:
223// NOOMP-NEXT:    [[TMP2:%.*]] = load i32, i32* [[I]], align 4
224// NOOMP-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP2]], 1
225// NOOMP-NEXT:    store i32 [[INC]], i32* [[I]], align 4
226// NOOMP-NEXT:    br label [[FOR_COND]], [[LOOP4:!llvm.loop !.*]]
227// NOOMP:       for.end:
228// NOOMP-NEXT:    ret void
229//
230