/external/llvm-project/openmp/runtime/src/ |
D | kmp_os.h | 127 typedef unsigned __int64 kmp_uint64; typedef 135 typedef struct kmp_struct64 kmp_uint64; typedef 160 typedef unsigned long long kmp_uint64; typedef 196 typedef kmp_uint64 kmp_uint; 474 extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v); 475 extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v); 622 __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 639 __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 642 __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \ 646 __sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v)) [all …]
|
D | z_Windows_NT-586_util.cpp | 105 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) { in __kmp_test_then_or64() 106 kmp_uint64 old_value, new_value; in __kmp_test_then_or64() 120 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) { in __kmp_test_then_and64() 121 kmp_uint64 old_value, new_value; in __kmp_test_then_and64()
|
D | kmp_dispatch_hier.h | 140 volatile kmp_uint64 val[2]; 155 kmp_uint64 index) { in set_next_hand_thread() 161 void set_next(T nlb, T nub, ST nst, kmp_int32 nstatus, kmp_uint64 index) { in set_next() 169 kmp_int32 get_next_status(kmp_uint64 index) const { in get_next_status() 172 T get_next_lb(kmp_uint64 index) const { return lb[1 - index]; } in get_next_lb() 173 T get_next_ub(kmp_uint64 index) const { return ub[1 - index]; } in get_next_ub() 174 ST get_next_st(kmp_uint64 index) const { return st[1 - index]; } in get_next_st() 175 dispatch_shared_info_template<T> volatile *get_next_sh(kmp_uint64 index) { in get_next_sh() 179 kmp_int32 get_curr_status(kmp_uint64 index) const { return status[index]; } in get_curr_status() 180 T get_curr_lb(kmp_uint64 index) const { return lb[index]; } in get_curr_lb() [all …]
|
D | kmp_wait_release.h | 184 kmp_uint64 poll_count; in __kmp_wait_template() 185 kmp_uint64 hibernate_goal; in __kmp_wait_template() 512 void *cacheline = (void *)(kmp_uint64(spin) & ~(CACHE_LINE - 1)); in __kmp_mwait_template() 625 template <> struct flag_traits<kmp_uint64> { 626 typedef kmp_uint64 flag_t; 879 class kmp_flag_64 : public kmp_basic_flag_native<kmp_uint64, Sleepable> { 881 kmp_flag_64(volatile kmp_uint64 *p) 882 : kmp_basic_flag_native<kmp_uint64, Sleepable>(p) {} 883 kmp_flag_64(volatile kmp_uint64 *p, kmp_info_t *thr) 884 : kmp_basic_flag_native<kmp_uint64, Sleepable>(p, thr) {} [all …]
|
D | kmp_itt.h | 71 __kmp_inline void __kmp_itt_metadata_imbalance(int gtid, kmp_uint64 begin, 72 kmp_uint64 end, 73 kmp_uint64 imbalance, 74 kmp_uint64 reduction); 77 __kmp_inline void __kmp_itt_metadata_loop(ident_t *loc, kmp_uint64 sched_type, 78 kmp_uint64 iterations, 79 kmp_uint64 chunk);
|
D | kmp_omp.h | 34 kmp_uint64 addr; 40 kmp_uint64 flags; // Flags for future extensions. 41 kmp_uint64 43 kmp_uint64 func; // Pointer to name of routine where the parallel region is. 51 kmp_uint64 array; // Address of array of kmp_omp_num_threads_item_t. 229 kmp_uint64 last_field;
|
D | kmp_tasking.cpp | 1541 kmp_uint64 cur_time; in __kmp_invoke_task() 4039 kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub) in kmp_taskloop_bounds_t() 4051 kmp_uint64 get_lb() const { in get_lb() 4072 kmp_uint64 get_ub() const { in get_ub() 4093 void set_lb(kmp_uint64 lb) { in set_lb() 4097 *(kmp_uint64 *)((char *)task + lower_offset) = lb; in set_lb() 4104 kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds); in set_lb() 4105 *lower = (kmp_uint64)lb; in set_lb() 4109 *(kmp_uint64 *)((char *)task + lower_offset) = lb; in set_lb() 4112 void set_ub(kmp_uint64 ub) { in set_ub() [all …]
|
D | kmp_atomic.h | 520 void __kmpc_atomic_fixed8u_div(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 521 kmp_uint64 rhs); 530 void __kmpc_atomic_fixed8u_shr(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 531 kmp_uint64 rhs); 734 void __kmpc_atomic_fixed8u_div_rev(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 735 kmp_uint64 rhs); 740 void __kmpc_atomic_fixed8u_shr_rev(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 741 kmp_uint64 rhs); 878 void __kmpc_atomic_fixed8u_add_fp(ident_t *id_ref, int gtid, kmp_uint64 *lhs, 882 void __kmpc_atomic_fixed8u_sub_fp(ident_t *id_ref, int gtid, kmp_uint64 *lhs, [all …]
|
D | kmp.h | 271 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32))) 958 kmp_uint64 pool_size; 959 kmp_uint64 pool_used; 983 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1))) 1061 extern kmp_uint64 __kmp_ticks_per_msec; 1063 #define KMP_NOW() ((kmp_uint64)_rdtsc()) 1073 extern kmp_uint64 __kmp_now_nsec(); 1183 kmp_uint64 frequency; // Nominal CPU frequency in Hz. 1615 kmp_uint64 index; 1616 kmp_uint64 wait_val[2]; [all …]
|
D | z_Linux_util.cpp | 93 kmp_uint64 __kmp_ticks_per_msec = 1000000; 422 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) { in __kmp_test_then_or64() 423 kmp_uint64 old_value, new_value; in __kmp_test_then_or64() 435 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) { in __kmp_test_then_and64() 436 kmp_uint64 old_value, new_value; in __kmp_test_then_and64() 1976 kmp_uint64 __kmp_now_nsec() { in __kmp_now_nsec() 1979 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec + in __kmp_now_nsec() 1980 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec; in __kmp_now_nsec() 1987 kmp_uint64 now, nsec2, diff; in __kmp_initialize_system_tick() 1988 kmp_uint64 delay = 100000; // 50~100 usec on most machines. in __kmp_initialize_system_tick() [all …]
|
D | kmp_sched.cpp | 390 kmp_uint64 cur_chunk = chunk; in __kmp_for_static_init() 855 kmp_uint64 *plower, kmp_uint64 *pupper, in __kmpc_for_static_init_8u() 858 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower, in __kmpc_for_static_init_8u() 930 kmp_uint64 *plower, kmp_uint64 *pupper, in __kmpc_dist_for_static_init_8u() 931 kmp_uint64 *pupperD, kmp_int64 *pstride, in __kmpc_dist_for_static_init_8u() 933 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower, in __kmpc_dist_for_static_init_8u() 1003 kmp_uint64 *p_lb, kmp_uint64 *p_ub, in __kmpc_team_static_init_8u() 1007 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr, in __kmpc_team_static_init_8u()
|
D | kmp_dispatch.cpp | 109 kmp_uint64 *cur_chunk, in __kmp_dispatch_init_algorithm() 741 __kmp_dispatch_init_hier_runtime<kmp_uint64>(ident_t *loc, kmp_uint64 lb, in __kmp_dispatch_init_hier_runtime() 742 kmp_uint64 ub, kmp_int64 st) { in __kmp_dispatch_init_hier_runtime() 743 __kmp_dispatch_init_hierarchy<kmp_uint64>( in __kmp_dispatch_init_hier_runtime() 854 kmp_uint64 cur_chunk = chunk; in __kmp_dispatch_init() 921 kmp_uint64 schedtype = 0; in __kmp_dispatch_init() 2340 enum sched_type schedule, kmp_uint64 lb, in __kmpc_dispatch_init_8u() 2341 kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk) { in __kmpc_dispatch_init_8u() 2346 __kmp_dispatch_init<kmp_uint64>(loc, gtid, schedule, lb, ub, st, chunk, true); in __kmpc_dispatch_init_8u() 2396 kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, in __kmpc_dist_dispatch_init_8u() [all …]
|
D | kmp_debugger.cpp | 38 { (kmp_uint64)(&var), sizeof(var) } 65 {(kmp_uint64)(__kmp_copyright) + KMP_VERSION_MAGIC_LEN, 228 static inline void *__kmp_convert_to_ptr(kmp_uint64 addr) { in __kmp_convert_to_ptr()
|
D | kmp_gsupport.cpp | 872 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 873 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 910 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 911 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 937 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 938 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 1035 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 1036 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ 1083 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ 1084 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ [all …]
|
D | kmp_atomic.cpp | 1016 ATOMIC_CMPXCHG(fixed8u, div, kmp_uint64, 64, /, 8i, 7, 1026 ATOMIC_CMPXCHG(fixed8u, shr, kmp_uint64, 64, >>, 8i, 7, 1494 ATOMIC_CMPXCHG_REV(fixed8u, div, kmp_uint64, 64, /, 8i, 1500 ATOMIC_CMPXCHG_REV(fixed8u, shr, kmp_uint64, 64, >>, 8i, 1731 ATOMIC_CMPXCHG_MIX(fixed8u, kmp_uint64, add, 64, +, fp, _Quad, 8i, 7, 1735 ATOMIC_CMPXCHG_MIX(fixed8u, kmp_uint64, sub, 64, -, fp, _Quad, 8i, 7, 1739 ATOMIC_CMPXCHG_MIX(fixed8u, kmp_uint64, mul, 64, *, fp, _Quad, 8i, 7, 1743 ATOMIC_CMPXCHG_MIX(fixed8u, kmp_uint64, div, 64, /, fp, _Quad, 8i, 7, 1804 ATOMIC_CMPXCHG_REV_MIX(fixed8u, kmp_uint64, sub_rev, 64, -, fp, _Quad, 8i, 7, 1808 ATOMIC_CMPXCHG_REV_MIX(fixed8u, kmp_uint64, div_rev, 64, /, fp, _Quad, 8i, 7, [all …]
|
D | kmp_lock.h | 448 std::atomic<std::atomic<kmp_uint64> *> polls; 449 std::atomic<kmp_uint64> mask; // is 2**num_polls-1 for mod op 450 kmp_uint64 cleanup_ticket; // thread with cleanup ticket 451 std::atomic<kmp_uint64> *old_polls; // will deallocate old_polls 458 std::atomic<kmp_uint64> next_ticket; 473 kmp_uint64 now_serving; // doesn't have to be volatile
|
D | kmp_itt.inl | 327 LINKAGE void __kmp_itt_metadata_imbalance(int gtid, kmp_uint64 begin, 328 kmp_uint64 end, kmp_uint64 imbalance, 329 kmp_uint64 reduction) { 344 kmp_uint64 imbalance_data[4]; 356 LINKAGE void __kmp_itt_metadata_loop(ident_t *loc, kmp_uint64 sched_type, 357 kmp_uint64 iterations, kmp_uint64 chunk) { 374 kmp_uint64 loop_data[5]; 406 kmp_uint64 single_data[2]; 492 kmp_uint64 counter =
|
D | kmp_lock.cpp | 2237 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket); in __kmp_acquire_drdpa_lock_timed_template() 2238 kmp_uint64 mask = lck->lk.mask; // atomic load in __kmp_acquire_drdpa_lock_timed_template() 2239 std::atomic<kmp_uint64> *polls = lck->lk.polls; in __kmp_acquire_drdpa_lock_timed_template() 2292 std::atomic<kmp_uint64> *old_polls = polls; in __kmp_acquire_drdpa_lock_timed_template() 2304 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls * in __kmp_acquire_drdpa_lock_timed_template() 2312 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1; in __kmp_acquire_drdpa_lock_timed_template() 2325 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls * in __kmp_acquire_drdpa_lock_timed_template() 2396 kmp_uint64 ticket = lck->lk.next_ticket; // atomic load in __kmp_test_drdpa_lock() 2397 std::atomic<kmp_uint64> *polls = lck->lk.polls; in __kmp_test_drdpa_lock() 2398 kmp_uint64 mask = lck->lk.mask; // atomic load in __kmp_test_drdpa_lock() [all …]
|
D | kmp_utility.cpp | 93 static kmp_uint64 __kmp_parse_frequency( // R: Frequency in Hz. in __kmp_parse_frequency() 99 kmp_uint64 result = 0; /* Zero is a better unknown value than all ones. */ in __kmp_parse_frequency()
|
D | kmp_barrier.cpp | 88 kmp_uint64 new_state = team_bar->b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_linear_barrier_gather_template() 304 kmp_uint64 new_state; in __kmp_tree_barrier_gather() 519 kmp_uint64 new_state = KMP_BARRIER_UNUSED_STATE; in __kmp_hyper_barrier_gather() 876 kmp_uint64 new_state; in __kmp_hierarchical_barrier_gather() 906 (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP; in __kmp_hierarchical_barrier_gather() 911 kmp_uint64 leaf_state = in __kmp_hierarchical_barrier_gather() 1127 kmp_uint64 old_leaf_state = thr_bar->leaf_state; in __kmp_hierarchical_barrier_release() 1459 kmp_uint64 cur_time = __itt_get_timestamp(); in __kmp_barrier_template() 1477 kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time; in __kmp_barrier_template() 1487 (kmp_uint64)(reduce != NULL)); in __kmp_barrier_template() [all …]
|
D | kmp_str.h | 118 void __kmp_str_to_uint(char const *str, kmp_uint64 *out, char const **error);
|
/external/llvm-project/openmp/runtime/test/tasking/ |
D | kmp_taskloop.c | 53 typedef unsigned long long kmp_uint64; typedef 61 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
|
D | kmp_detach_tasks_t1.c | 12 typedef unsigned long long kmp_uint64; typedef
|
D | kmp_detach_tasks_t2.c | 12 typedef unsigned long long kmp_uint64; typedef
|
D | kmp_detach_tasks_t3.c | 14 typedef unsigned long long kmp_uint64; typedef
|