1 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=50 -x c++ -emit-llvm %s -o - | FileCheck %s
2 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
3 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
4 //
5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fopenmp-version=50 -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
6 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s
7 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
8 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
9 // expected-no-diagnostics
10 #ifndef HEADER
11 #define HEADER
12
13 // CHECK-LABEL: @main
main()14 int main() {
15 float *p;
16 int a = 10;
17 // kmp_task_affinity_info_t affs[1];
18 // CHECK: [[AFFS_ADDR:%.+]] = alloca [1 x %struct.kmp_task_affinity_info_t],
19 // CHECK: [[TD:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 [[GTID:%.+]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %{{.+}}*)* @{{.+}} to i32 (i32, i8*)*))
20 // CHECK: [[AFFINE_LST_ADDR:%.+]] = getelementptr inbounds [1 x %struct.kmp_task_affinity_info_t], [1 x %struct.kmp_task_affinity_info_t]* [[AFFS_ADDR]], i64 0, i64 0
21 // CHECK: [[P:%.+]] = load float*, float** [[P_ADDR:%.+]],
22 // CHECK: [[A_VAL:%.+]] = load i32, i32* [[A_ADDR:%.+]],
23 // CHECK: [[A_SZ:%.+]] = sext i32 [[A_VAL]] to i64
24 // CHECK: [[BYTES:%.+]] = mul nuw i64 4, [[A_SZ]]
25 // CHECK: [[SZ:%.+]] = mul nuw i64 [[BYTES]], 10
26 // CHECK: [[A_VAL:%.+]] = load i32, i32* [[A_ADDR]],
27 // CHECK: [[A_SZ1:%.+]] = sext i32 [[A_VAL]] to i64
28 // CHECK: [[SIZE:%.+]] = mul nuw i64 [[SZ]], [[A_SZ]]
29 // CHECK: [[AFFS_0_ADDR:%.+]] = getelementptr %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFINE_LST_ADDR]], i64 0
30
31 // affs[0].base = p;
32 // CHECK: [[AFFS_0_BASE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFS_0_ADDR]], i32 0, i32 0
33 // CHECK: [[P_INTPTR:%.+]] = ptrtoint float* [[P]] to i64
34 // CHECK: store i64 [[P_INTPTR]], i64* [[AFFS_0_BASE_ADDR]],
35
36 // affs[0].size = sizeof(*p) * a * 10 * a;
37 // CHECK: [[AFFS_0_SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFS_0_ADDR]], i32 0, i32 1
38 // CHECK: store i64 [[SIZE]], i64* [[AFFS_0_SIZE_ADDR]],
39 // CHECK: [[BC:%.+]] = bitcast %struct.kmp_task_affinity_info_t* [[AFFINE_LST_ADDR]] to i8*
40 // CHECK: call i32 @__kmpc_omp_reg_task_with_affinity(%struct.ident_t* @{{.+}}, i32 [[GTID]], i8* [[TD]], i32 1, i8* [[BC]])
41 #pragma omp task affinity(([a][10][a])p)
42 ;
43 // CHECK: [[TD:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 [[GTID]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %{{.+}}*)* @{{.+}} to i32 (i32, i8*)*))
44 // CHECK: [[A_VAL:%.+]] = load i32, i32* [[A_ADDR]],
45 // CHECK: [[SUB:%.+]] = sub nsw i32 [[A_VAL]], 0
46 // CHECK: [[CONV:%.+]] = zext i32 [[SUB]] to i64
47
48 // <num_elem> = <num_iters> + 1 constant affinity for affinity(a)
49 // CHECK: [[NUM_ELEMS:%.+]] = add nuw i64 1, [[CONV]]
50 // CHECK: [[SV:%.+]] = call i8* @llvm.stacksave()
51 // CHECK: store i8* [[SV]], i8** [[SV_ADDR:%.+]],
52
53 // kmp_task_affinity_info_t affs[<num_elem>];
54 // CHECK: [[AFFS_ADDR:%.+]] = alloca %struct.kmp_task_affinity_info_t, i64 [[NUM_ELEMS]],
55 // store i64 %21, i64* %__vla_expr0, align 8
56 // CHECK: [[NAFFS:%.+]] = trunc i64 [[NUM_ELEMS]] to i32
57 // CHECK: [[AFFS_0_ADDR:%.+]] = getelementptr %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFS_ADDR]], i64 0
58
59 // affs[0].base = &a;
60 // CHECK: [[AFFS_0_BASE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFS_0_ADDR]], i32 0, i32 0
61 // CHECK: [[A_INTPTR:%.+]] = ptrtoint i32* [[A_ADDR]] to i64
62 // CHECK: store i64 [[A_INTPTR]], i64* [[AFFS_0_BASE_ADDR]],
63
64 // affs[0].size = sizeof(a);
65 // CHECK: [[AFFS_0_SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFS_0_ADDR]], i32 0, i32 1
66 // CHECK: store i64 4, i64* [[AFFS_0_SIZE_ADDR]],
67
68 // affs_cnt = 1;
69 // CHECK: store i64 1, i64* [[AFFS_CNT_ADDR:%.+]],
70 // CHECK: [[A_VAL:%.+]] = load i32, i32* [[A_ADDR]],
71 // CHECK: [[NITERS:%.+]] = sub nsw i32 [[A_VAL]], 0
72 // CHECK: store i32 0, i32* [[CNT_ADDR:%.+]],
73 // CHECK: br label %[[CONT:[^,]+]]
74
75 //for (int cnt = 0; cnt < (a-0); ++cnt) {
76 // int i = cnt + 0;
77 // affs[affs_cnt].base = &p[i];
78 // affs[affs_cnt].size = sizeof(p[i]);
79 // ++affs_cnt;
80 // }
81
82 // CHECK: [[CONT]]:
83 // CHECK: [[CNT:%.+]] = load i32, i32* [[CNT_ADDR]],
84 // CHECK: [[CMP:%.+]] = icmp slt i32 [[CNT]], [[NITERS]]
85 // CHECK: br i1 [[CMP]], label %[[BODY:[^,]+]], label %[[DONE:[^,]+]]
86
87 // CHECK: [[BODY]]:
88 // i = cnt + 0;
89 // CHECK: [[CNT:%.+]] = load i32, i32* [[CNT_ADDR]],
90 // CHECK: [[VAL:%.+]] = add nsw i32 0, [[CNT]]
91 // CHECK: store i32 [[VAL]], i32* [[I_ADDR:%.+]],
92
93 // &p[i]
94 // CHECK: [[P:%.+]] = load float*, float** [[P_ADDR]],
95 // CHECK: [[I:%.+]] = load i32, i32* [[I_ADDR]],
96 // CHECK: [[IDX:%.+]] = sext i32 [[I]] to i64
97 // CHECK: [[P_I_ADDR:%.+]] = getelementptr inbounds float, float* [[P]], i64 [[IDX]]
98
99 // affs[affs_cnt]
100 // CHECK: [[AFFS_CNT:%.+]] = load i64, i64* [[AFFS_CNT_ADDR]],
101 // CHECK: [[AFFS_ELEM_ADDR:%.+]] = getelementptr %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFS_ADDR]], i64 [[AFFS_CNT]]
102
103 // affs[affs_cnt].base = &p[i];
104 // CHECK: [[AFFS_ELEM_BASE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFS_ELEM_ADDR]], i32 0, i32 0
105 // CHECK: [[CAST:%.+]] = ptrtoint float* [[P_I_ADDR]] to i64
106 // CHECK: store i64 [[CAST]], i64* [[AFFS_ELEM_BASE_ADDR]],
107
108 // affs[affs_cnt].size = sizeof(p[i]);
109 // CHECK: [[AFFS_ELEM_SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_task_affinity_info_t, %struct.kmp_task_affinity_info_t* [[AFFS_ELEM_ADDR]], i32 0, i32 1
110 // CHECK: store i64 4, i64* [[AFFS_ELEM_SIZE_ADDR]],
111
112 // ++affs_cnt;
113 // CHECK: [[AFFS_CNT_NEXT:%.+]] = add nuw i64 [[AFFS_CNT]], 1
114 // CHECK: store i64 [[AFFS_CNT_NEXT]], i64* [[AFFS_CNT_ADDR]],
115
116 // ++cnt;
117 // CHECK: [[CNT:%.+]] = load i32, i32* [[CNT_ADDR]],
118 // CHECK: [[CNT_NEXT:%.+]] = add nsw i32 [[CNT]], 1
119 // CHECK: store i32 [[CNT_NEXT]], i32* [[CNT_ADDR]],
120 // CHECK: br label %[[CONT]]
121
122 // CHECK: [[DONE]]:
123 // CHECK: [[BC:%.+]] = bitcast %struct.kmp_task_affinity_info_t* [[AFFS_ADDR]] to i8*
124 // CHECK: call i32 @__kmpc_omp_reg_task_with_affinity(%struct.ident_t* @{{.+}} i32 [[GTID]], i8* [[TD]], i32 [[NAFFS]], i8* [[BC]])
125 // CHECK: [[SV:%.+]] = load i8*, i8** [[SV_ADDR]],
126 // CHECK: call void @llvm.stackrestore(i8* [[SV]])
127 #pragma omp task affinity(iterator(i=0:a): p[i]) affinity(a)
128 ;
129 return 0;
130 }
131
132 #endif
133