1 //===------------ libcall.cu - OpenMP GPU user calls ------------- CUDA -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the OpenMP runtime functions that can be
10 // invoked by the user in an OpenMP region
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "common/omptarget.h"
15 #include "common/target_atomic.h"
16 #include "target_impl.h"
17
omp_get_wtick(void)18 EXTERN double omp_get_wtick(void) {
19 double rc = __kmpc_impl_get_wtick();
20 PRINT(LD_IO, "omp_get_wtick() returns %g\n", rc);
21 return rc;
22 }
23
omp_get_wtime(void)24 EXTERN double omp_get_wtime(void) {
25 double rc = __kmpc_impl_get_wtime();
26 PRINT(LD_IO, "call omp_get_wtime() returns %g\n", rc);
27 return rc;
28 }
29
omp_set_num_threads(int num)30 EXTERN void omp_set_num_threads(int num) {
31 // Ignore it for SPMD mode.
32 if (isSPMDMode())
33 return;
34 ASSERT0(LT_FUSSY, isRuntimeInitialized(), "Expected initialized runtime.");
35 PRINT(LD_IO, "call omp_set_num_threads(num %d)\n", num);
36 if (num <= 0) {
37 WARNING0(LW_INPUT, "expected positive num; ignore\n");
38 } else if (parallelLevel[GetWarpId()] == 0) {
39 nThreads = num;
40 }
41 }
42
omp_get_num_threads(void)43 EXTERN int omp_get_num_threads(void) {
44 int rc = GetNumberOfOmpThreads(isSPMDMode());
45 PRINT(LD_IO, "call omp_get_num_threads() return %d\n", rc);
46 return rc;
47 }
48
omp_get_max_threads(void)49 EXTERN int omp_get_max_threads(void) {
50 if (parallelLevel[GetWarpId()] > 0)
51 // We're already in parallel region.
52 return 1; // default is 1 thread avail
53 // Not currently in a parallel region, return what was set.
54 int rc = 1;
55 if (parallelLevel[GetWarpId()] == 0)
56 rc = nThreads;
57 ASSERT0(LT_FUSSY, rc >= 0, "bad number of threads");
58 PRINT(LD_IO, "call omp_get_max_threads() return %d\n", rc);
59 return rc;
60 }
61
omp_get_thread_limit(void)62 EXTERN int omp_get_thread_limit(void) {
63 if (isSPMDMode())
64 return GetNumberOfThreadsInBlock();
65 int rc = threadLimit;
66 PRINT(LD_IO, "call omp_get_thread_limit() return %d\n", rc);
67 return rc;
68 }
69
omp_get_thread_num()70 EXTERN int omp_get_thread_num() {
71 bool isSPMDExecutionMode = isSPMDMode();
72 int tid = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
73 int rc = GetOmpThreadId(tid, isSPMDExecutionMode);
74 PRINT(LD_IO, "call omp_get_thread_num() returns %d\n", rc);
75 return rc;
76 }
77
omp_get_num_procs(void)78 EXTERN int omp_get_num_procs(void) {
79 int rc = GetNumberOfProcsInDevice(isSPMDMode());
80 PRINT(LD_IO, "call omp_get_num_procs() returns %d\n", rc);
81 return rc;
82 }
83
omp_in_parallel(void)84 EXTERN int omp_in_parallel(void) {
85 int rc = parallelLevel[GetWarpId()] > OMP_ACTIVE_PARALLEL_LEVEL ? 1 : 0;
86 PRINT(LD_IO, "call omp_in_parallel() returns %d\n", rc);
87 return rc;
88 }
89
omp_in_final(void)90 EXTERN int omp_in_final(void) {
91 // treat all tasks as final... Specs may expect runtime to keep
92 // track more precisely if a task was actively set by users... This
93 // is not explicitly specified; will treat as if runtime can
94 // actively decide to put a non-final task into a final one.
95 int rc = 1;
96 PRINT(LD_IO, "call omp_in_final() returns %d\n", rc);
97 return rc;
98 }
99
omp_set_dynamic(int flag)100 EXTERN void omp_set_dynamic(int flag) {
101 PRINT(LD_IO, "call omp_set_dynamic(%d) is ignored (no support)\n", flag);
102 }
103
omp_get_dynamic(void)104 EXTERN int omp_get_dynamic(void) {
105 int rc = 0;
106 PRINT(LD_IO, "call omp_get_dynamic() returns %d\n", rc);
107 return rc;
108 }
109
omp_set_nested(int flag)110 EXTERN void omp_set_nested(int flag) {
111 PRINT(LD_IO, "call omp_set_nested(%d) is ignored (no nested support)\n",
112 flag);
113 }
114
omp_get_nested(void)115 EXTERN int omp_get_nested(void) {
116 int rc = 0;
117 PRINT(LD_IO, "call omp_get_nested() returns %d\n", rc);
118 return rc;
119 }
120
omp_set_max_active_levels(int level)121 EXTERN void omp_set_max_active_levels(int level) {
122 PRINT(LD_IO,
123 "call omp_set_max_active_levels(%d) is ignored (no nested support)\n",
124 level);
125 }
126
omp_get_max_active_levels(void)127 EXTERN int omp_get_max_active_levels(void) {
128 int rc = 1;
129 PRINT(LD_IO, "call omp_get_max_active_levels() returns %d\n", rc);
130 return rc;
131 }
132
omp_get_level(void)133 EXTERN int omp_get_level(void) {
134 int level = parallelLevel[GetWarpId()] & (OMP_ACTIVE_PARALLEL_LEVEL - 1);
135 PRINT(LD_IO, "call omp_get_level() returns %d\n", level);
136 return level;
137 }
138
omp_get_active_level(void)139 EXTERN int omp_get_active_level(void) {
140 int level = parallelLevel[GetWarpId()] > OMP_ACTIVE_PARALLEL_LEVEL ? 1 : 0;
141 PRINT(LD_IO, "call omp_get_active_level() returns %d\n", level)
142 return level;
143 }
144
omp_get_ancestor_thread_num(int level)145 EXTERN int omp_get_ancestor_thread_num(int level) {
146 if (isSPMDMode())
147 return level == 1 ? GetThreadIdInBlock() : 0;
148 int rc = -1;
149 // If level is 0 or all parallel regions are not active - return 0.
150 unsigned parLevel = parallelLevel[GetWarpId()];
151 if (level == 1 && parLevel > OMP_ACTIVE_PARALLEL_LEVEL) {
152 int totLevel = omp_get_level();
153 if (level <= totLevel) {
154 omptarget_nvptx_TaskDescr *currTaskDescr =
155 getMyTopTaskDescriptor(/*isSPMDExecutionMode=*/false);
156 int steps = totLevel - level;
157 PRINT(LD_IO, "backtrack %d steps\n", steps);
158 ASSERT0(LT_FUSSY, currTaskDescr,
159 "do not expect fct to be called in a non-active thread");
160 do {
161 if (DON(LD_IOD)) {
162 // print current state
163 omp_sched_t sched = currTaskDescr->GetRuntimeSched();
164 PRINT(LD_ALL,
165 "task descr %s %d: %s, in par %d, rt sched %d,"
166 " chunk %" PRIu64 "; tid %d, tnum %d, nthreads %d\n",
167 "ancestor", steps,
168 (currTaskDescr->IsParallelConstruct() ? "par" : "task"),
169 (int)currTaskDescr->InParallelRegion(), (int)sched,
170 currTaskDescr->RuntimeChunkSize(),
171 (int)currTaskDescr->ThreadId(), (int)threadsInTeam,
172 (int)nThreads);
173 }
174
175 if (currTaskDescr->IsParallelConstruct()) {
176 // found the level
177 if (!steps) {
178 rc = currTaskDescr->ThreadId();
179 break;
180 }
181 steps--;
182 }
183 currTaskDescr = currTaskDescr->GetPrevTaskDescr();
184 } while (currTaskDescr);
185 ASSERT0(LT_FUSSY, !steps, "expected to find all steps");
186 }
187 } else if (level == 0 ||
188 (level > 0 && parLevel < OMP_ACTIVE_PARALLEL_LEVEL &&
189 level <= parLevel) ||
190 (level > 1 && parLevel > OMP_ACTIVE_PARALLEL_LEVEL &&
191 level <= (parLevel - OMP_ACTIVE_PARALLEL_LEVEL))) {
192 rc = 0;
193 }
194 PRINT(LD_IO, "call omp_get_ancestor_thread_num(level %d) returns %d\n", level,
195 rc)
196 return rc;
197 }
198
omp_get_team_size(int level)199 EXTERN int omp_get_team_size(int level) {
200 if (isSPMDMode())
201 return level == 1 ? GetNumberOfThreadsInBlock() : 1;
202 int rc = -1;
203 unsigned parLevel = parallelLevel[GetWarpId()];
204 // If level is 0 or all parallel regions are not active - return 1.
205 if (level == 1 && parLevel > OMP_ACTIVE_PARALLEL_LEVEL) {
206 rc = threadsInTeam;
207 } else if (level == 0 ||
208 (level > 0 && parLevel < OMP_ACTIVE_PARALLEL_LEVEL &&
209 level <= parLevel) ||
210 (level > 1 && parLevel > OMP_ACTIVE_PARALLEL_LEVEL &&
211 level <= (parLevel - OMP_ACTIVE_PARALLEL_LEVEL))) {
212 rc = 1;
213 }
214 PRINT(LD_IO, "call omp_get_team_size(level %d) returns %d\n", level, rc)
215 return rc;
216 }
217
omp_get_schedule(omp_sched_t * kind,int * modifier)218 EXTERN void omp_get_schedule(omp_sched_t *kind, int *modifier) {
219 if (isRuntimeUninitialized()) {
220 ASSERT0(LT_FUSSY, isSPMDMode(),
221 "Expected SPMD mode only with uninitialized runtime.");
222 *kind = omp_sched_static;
223 *modifier = 1;
224 } else {
225 omptarget_nvptx_TaskDescr *currTaskDescr =
226 getMyTopTaskDescriptor(isSPMDMode());
227 *kind = currTaskDescr->GetRuntimeSched();
228 *modifier = currTaskDescr->RuntimeChunkSize();
229 }
230 PRINT(LD_IO, "call omp_get_schedule returns sched %d and modif %d\n",
231 (int)*kind, *modifier);
232 }
233
omp_set_schedule(omp_sched_t kind,int modifier)234 EXTERN void omp_set_schedule(omp_sched_t kind, int modifier) {
235 PRINT(LD_IO, "call omp_set_schedule(sched %d, modif %d)\n", (int)kind,
236 modifier);
237 if (isRuntimeUninitialized()) {
238 ASSERT0(LT_FUSSY, isSPMDMode(),
239 "Expected SPMD mode only with uninitialized runtime.");
240 return;
241 }
242 if (kind >= omp_sched_static && kind < omp_sched_auto) {
243 omptarget_nvptx_TaskDescr *currTaskDescr =
244 getMyTopTaskDescriptor(isSPMDMode());
245 currTaskDescr->SetRuntimeSched(kind);
246 currTaskDescr->RuntimeChunkSize() = modifier;
247 PRINT(LD_IOD, "omp_set_schedule did set sched %d & modif %" PRIu64 "\n",
248 (int)currTaskDescr->GetRuntimeSched(),
249 currTaskDescr->RuntimeChunkSize());
250 }
251 }
252
omp_get_proc_bind(void)253 EXTERN omp_proc_bind_t omp_get_proc_bind(void) {
254 PRINT0(LD_IO, "call omp_get_proc_bin() is true, regardless on state\n");
255 return omp_proc_bind_true;
256 }
257
omp_get_num_places(void)258 EXTERN int omp_get_num_places(void) {
259 PRINT0(LD_IO, "call omp_get_num_places() returns 0\n");
260 return 0;
261 }
262
omp_get_place_num_procs(int place_num)263 EXTERN int omp_get_place_num_procs(int place_num) {
264 PRINT0(LD_IO, "call omp_get_place_num_procs() returns 0\n");
265 return 0;
266 }
267
omp_get_place_proc_ids(int place_num,int * ids)268 EXTERN void omp_get_place_proc_ids(int place_num, int *ids) {
269 PRINT0(LD_IO, "call to omp_get_place_proc_ids()\n");
270 }
271
omp_get_place_num(void)272 EXTERN int omp_get_place_num(void) {
273 PRINT0(LD_IO, "call to omp_get_place_num() returns 0\n");
274 return 0;
275 }
276
omp_get_partition_num_places(void)277 EXTERN int omp_get_partition_num_places(void) {
278 PRINT0(LD_IO, "call to omp_get_partition_num_places() returns 0\n");
279 return 0;
280 }
281
omp_get_partition_place_nums(int * place_nums)282 EXTERN void omp_get_partition_place_nums(int *place_nums) {
283 PRINT0(LD_IO, "call to omp_get_partition_place_nums()\n");
284 }
285
omp_get_cancellation(void)286 EXTERN int omp_get_cancellation(void) {
287 int rc = 0;
288 PRINT(LD_IO, "call omp_get_cancellation() returns %d\n", rc);
289 return rc;
290 }
291
omp_set_default_device(int deviceId)292 EXTERN void omp_set_default_device(int deviceId) {
293 PRINT0(LD_IO, "call omp_get_default_device() is undef on device\n");
294 }
295
omp_get_default_device(void)296 EXTERN int omp_get_default_device(void) {
297 PRINT0(LD_IO,
298 "call omp_get_default_device() is undef on device, returns 0\n");
299 return 0;
300 }
301
omp_get_num_devices(void)302 EXTERN int omp_get_num_devices(void) {
303 PRINT0(LD_IO, "call omp_get_num_devices() is undef on device, returns 0\n");
304 return 0;
305 }
306
omp_get_num_teams(void)307 EXTERN int omp_get_num_teams(void) {
308 int rc = GetNumberOfOmpTeams();
309 PRINT(LD_IO, "call omp_get_num_teams() returns %d\n", rc);
310 return rc;
311 }
312
omp_get_team_num()313 EXTERN int omp_get_team_num() {
314 int rc = GetOmpTeamId();
315 PRINT(LD_IO, "call omp_get_team_num() returns %d\n", rc);
316 return rc;
317 }
318
omp_is_initial_device(void)319 EXTERN int omp_is_initial_device(void) {
320 PRINT0(LD_IO, "call omp_is_initial_device() returns 0\n");
321 return 0; // 0 by def on device
322 }
323
324 // Unspecified on the device.
omp_get_initial_device(void)325 EXTERN int omp_get_initial_device(void) {
326 PRINT0(LD_IO, "call omp_get_initial_device() returns 0\n");
327 return 0;
328 }
329
330 // Unused for now.
omp_get_max_task_priority(void)331 EXTERN int omp_get_max_task_priority(void) {
332 PRINT0(LD_IO, "call omp_get_max_task_priority() returns 0\n");
333 return 0;
334 }
335
336 ////////////////////////////////////////////////////////////////////////////////
337 // locks
338 ////////////////////////////////////////////////////////////////////////////////
339
omp_init_lock(omp_lock_t * lock)340 EXTERN void omp_init_lock(omp_lock_t *lock) {
341 __kmpc_impl_init_lock(lock);
342 PRINT0(LD_IO, "call omp_init_lock()\n");
343 }
344
omp_destroy_lock(omp_lock_t * lock)345 EXTERN void omp_destroy_lock(omp_lock_t *lock) {
346 __kmpc_impl_destroy_lock(lock);
347 PRINT0(LD_IO, "call omp_destroy_lock()\n");
348 }
349
omp_set_lock(omp_lock_t * lock)350 EXTERN void omp_set_lock(omp_lock_t *lock) {
351 __kmpc_impl_set_lock(lock);
352 PRINT0(LD_IO, "call omp_set_lock()\n");
353 }
354
omp_unset_lock(omp_lock_t * lock)355 EXTERN void omp_unset_lock(omp_lock_t *lock) {
356 __kmpc_impl_unset_lock(lock);
357 PRINT0(LD_IO, "call omp_unset_lock()\n");
358 }
359
omp_test_lock(omp_lock_t * lock)360 EXTERN int omp_test_lock(omp_lock_t *lock) {
361 int rc = __kmpc_impl_test_lock(lock);
362 PRINT(LD_IO, "call omp_test_lock() return %d\n", rc);
363 return rc;
364 }
365