/kernel/linux/linux-5.10/kernel/rcu/ |
D | tree_stall.h | 30 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); in rcu_jiffies_till_stall_check() 74 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); in rcu_gp_might_be_stalled() 144 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); in record_gp_stall_check_time() 163 if (!READ_ONCE(rcu_kick_kthreads)) in rcu_stall_kick_kthreads() 165 j = READ_ONCE(rcu_state.jiffies_kick_kthreads); in rcu_stall_kick_kthreads() 167 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { in rcu_stall_kick_kthreads() 388 unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity); in rcu_is_gp_kthread_starving() 531 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) in print_other_cpu_stall() 578 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) in print_cpu_stall() 607 if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) || in check_cpu_stall() [all …]
|
D | sync.c | 78 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE); in rcu_sync_func() 79 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED); in rcu_sync_func() 155 wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED); in rcu_sync_enter() 170 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE); in rcu_sync_exit() 171 WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0); in rcu_sync_exit() 193 WARN_ON_ONCE(READ_ONCE(rsp->gp_count)); in rcu_sync_dtor() 194 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED); in rcu_sync_dtor()
|
D | srcutiny.c | 102 if (!newval && READ_ONCE(ssp->srcu_gp_waiting)) in __srcu_read_unlock() 120 if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) in srcu_drive_gp() 133 swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); in srcu_drive_gp() 153 if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) in srcu_drive_gp() 163 if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) in srcu_gp_start_if_needed() 166 if (!READ_ONCE(ssp->srcu_gp_running)) { in srcu_gp_start_if_needed() 216 ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1; in get_state_synchronize_srcu() 243 bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie); in poll_state_synchronize_srcu()
|
D | rcu_segcblist.h | 15 return READ_ONCE(rclp->len); in rcu_cblist_n_cbs() 40 return !READ_ONCE(rsclp->head); in rcu_segcblist_empty() 49 return READ_ONCE(rsclp->len); in rcu_segcblist_n_cbs() 75 return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); in rcu_segcblist_restempty()
|
/kernel/linux/linux-5.10/Documentation/translations/ko_KR/ |
D | memory-barriers.txt | 259 Q = READ_ONCE(P); D = READ_ONCE(*Q); 266 READ_ONCE() 는 메모리 배리어 명령도 내게 되어 있어서, DEC Alpha CPU 는 271 DEC Alpha 에서 수행되든 아니든, READ_ONCE() 는 컴파일러로부터의 악영향 277 a = READ_ONCE(*X); WRITE_ONCE(*X, b); 285 WRITE_ONCE(*X, c); d = READ_ONCE(*X); 296 (*) 컴파일러가 READ_ONCE() 나 WRITE_ONCE() 로 보호되지 않은 메모리 액세스를 577 리눅스 커널 v4.15 기준으로, smp_mb() 가 DEC Alpha 용 READ_ONCE() 코드에 579 전용 코드를 만드는 사람들과 READ_ONCE() 자체를 만드는 사람들 뿐임을 의미합니다. 593 Q = READ_ONCE(P); 620 Q = READ_ONCE(P); [all …]
|
/kernel/linux/linux-5.10/net/mptcp/ |
D | mptcp_diag.c | 128 info->mptcpi_subflows = READ_ONCE(msk->pm.subflows); in mptcp_diag_get_info() 129 info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled); in mptcp_diag_get_info() 130 info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted); in mptcp_diag_get_info() 131 info->mptcpi_subflows_max = READ_ONCE(msk->pm.subflows_max); in mptcp_diag_get_info() 132 val = READ_ONCE(msk->pm.add_addr_signal_max); in mptcp_diag_get_info() 134 val = READ_ONCE(msk->pm.add_addr_accept_max); in mptcp_diag_get_info() 138 if (READ_ONCE(msk->can_ack)) in mptcp_diag_get_info() 141 info->mptcpi_token = READ_ONCE(msk->token); in mptcp_diag_get_info() 142 info->mptcpi_write_seq = READ_ONCE(msk->write_seq); in mptcp_diag_get_info() 144 info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq); in mptcp_diag_get_info()
|
D | pm.c | 63 pm->subflows_max, READ_ONCE(pm->accept_subflow)); in mptcp_pm_allow_new_subflow() 66 if (!READ_ONCE(pm->accept_subflow)) in mptcp_pm_allow_new_subflow() 70 if (READ_ONCE(pm->accept_subflow)) { in mptcp_pm_allow_new_subflow() 104 if (!READ_ONCE(pm->work_pending)) in mptcp_pm_fully_established() 109 if (READ_ONCE(pm->work_pending)) in mptcp_pm_fully_established() 127 if (!READ_ONCE(pm->work_pending)) in mptcp_pm_subflow_established() 132 if (READ_ONCE(pm->work_pending)) in mptcp_pm_subflow_established() 149 READ_ONCE(pm->accept_addr)); in mptcp_pm_add_addr_received() 153 if (!READ_ONCE(pm->accept_addr)) in mptcp_pm_add_addr_received() 186 *echo = READ_ONCE(msk->pm.add_addr_echo); in mptcp_pm_add_addr_signal()
|
/kernel/linux/linux-5.10/arch/arm64/include/asm/ |
D | preempt.h | 12 return READ_ONCE(current_thread_info()->preempt.count); in preempt_count() 46 u32 pc = READ_ONCE(current_thread_info()->preempt.count); in __preempt_count_add() 53 u32 pc = READ_ONCE(current_thread_info()->preempt.count); in __preempt_count_sub() 61 u64 pc = READ_ONCE(ti->preempt_count); in __preempt_count_dec_and_test() 73 return !pc || !READ_ONCE(ti->preempt_count); in __preempt_count_dec_and_test() 78 u64 pc = READ_ONCE(current_thread_info()->preempt_count); in should_resched()
|
/kernel/linux/linux-5.10/include/net/ |
D | busy_poll.h | 34 return READ_ONCE(sysctl_net_busy_poll); in net_busy_loop_on() 39 return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current); in sk_can_busy_loop() 74 unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll); in busy_loop_timeout() 90 unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec); in sk_busy_loop_timeout() 105 unsigned int napi_id = READ_ONCE(sk->sk_napi_id); in sk_busy_loop() 139 if (!READ_ONCE(sk->sk_napi_id)) in sk_mark_napi_id_once()
|
/kernel/linux/linux-5.10/arch/arm64/mm/ |
D | mmu.c | 161 pte_t old_pte = READ_ONCE(*ptep); in init_pte() 170 READ_ONCE(pte_val(*ptep)))); in init_pte() 185 pmd_t pmd = READ_ONCE(*pmdp); in alloc_init_cont_pte() 193 pmd = READ_ONCE(*pmdp); in alloc_init_cont_pte() 222 pmd_t old_pmd = READ_ONCE(*pmdp); in init_pmd() 236 READ_ONCE(pmd_val(*pmdp)))); in init_pmd() 242 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); in init_pmd() 256 pud_t pud = READ_ONCE(*pudp); in alloc_init_cont_pmd() 267 pud = READ_ONCE(*pudp); in alloc_init_cont_pmd() 307 p4d_t p4d = READ_ONCE(*p4dp); in alloc_init_pud() [all …]
|
/kernel/linux/linux-5.10/arch/s390/kernel/ |
D | idle.c | 61 idle_count = READ_ONCE(idle->idle_count); in show_idle_count() 62 if (READ_ONCE(idle->clock_idle_enter)) in show_idle_count() 78 idle_time = READ_ONCE(idle->idle_time); in show_idle_time() 79 idle_enter = READ_ONCE(idle->clock_idle_enter); in show_idle_time() 80 idle_exit = READ_ONCE(idle->clock_idle_exit); in show_idle_time() 104 idle_enter = READ_ONCE(idle->clock_idle_enter); in arch_cpu_idle_time() 105 idle_exit = READ_ONCE(idle->clock_idle_exit); in arch_cpu_idle_time()
|
/kernel/linux/linux-5.10/drivers/powercap/ |
D | idle_inject.c | 114 duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_timer_fn() 115 duration_us += READ_ONCE(ii_dev->idle_duration_us); in idle_inject_timer_fn() 144 play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC, in idle_inject_fn() 145 READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC); in idle_inject_fn() 172 *run_duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_get_duration() 173 *idle_duration_us = READ_ONCE(ii_dev->idle_duration_us); in idle_inject_get_duration() 198 unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us); in idle_inject_start() 199 unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us); in idle_inject_start()
|
/kernel/linux/linux-5.10/arch/s390/lib/ |
D | spinlock.c | 131 old = READ_ONCE(lp->lock); in arch_spin_lock_queued() 168 while (READ_ONCE(node->prev) != NULL) { in arch_spin_lock_queued() 182 old = READ_ONCE(lp->lock); in arch_spin_lock_queued() 202 while ((next = READ_ONCE(node->next)) == NULL) in arch_spin_lock_queued() 218 owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); in arch_spin_lock_classic() 258 owner = READ_ONCE(lp->lock); in arch_spin_trylock_retry() 272 while (READ_ONCE(rw->cnts) & 0x10000) in arch_read_lock_wait() 284 while (READ_ONCE(rw->cnts) & 0x10000) in arch_read_lock_wait() 301 old = READ_ONCE(rw->cnts); in arch_write_lock_wait() 317 cpu = READ_ONCE(lp->lock) & _Q_LOCK_CPU_MASK; in arch_spin_relax()
|
/kernel/linux/linux-5.10/arch/s390/include/asm/ |
D | preempt.h | 17 return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED; in preempt_count() 25 old = READ_ONCE(S390_lowcore.preempt_count); in preempt_count_set() 44 return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED); in test_preempt_need_resched() 74 return unlikely(READ_ONCE(S390_lowcore.preempt_count) == in should_resched() 84 return READ_ONCE(S390_lowcore.preempt_count); in preempt_count()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
D | en_port.c | 165 packets += READ_ONCE(ring->packets); in mlx4_en_fold_software_stats() 166 bytes += READ_ONCE(ring->bytes); in mlx4_en_fold_software_stats() 176 packets += READ_ONCE(ring->packets); in mlx4_en_fold_software_stats() 177 bytes += READ_ONCE(ring->bytes); in mlx4_en_fold_software_stats() 252 sw_rx_dropped += READ_ONCE(ring->dropped); in mlx4_en_DUMP_ETH_STATS() 253 priv->port_stats.rx_chksum_good += READ_ONCE(ring->csum_ok); in mlx4_en_DUMP_ETH_STATS() 254 priv->port_stats.rx_chksum_none += READ_ONCE(ring->csum_none); in mlx4_en_DUMP_ETH_STATS() 255 priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete); in mlx4_en_DUMP_ETH_STATS() 256 priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages); in mlx4_en_DUMP_ETH_STATS() 257 priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop); in mlx4_en_DUMP_ETH_STATS() [all …]
|
/kernel/linux/linux-5.10/tools/memory-model/litmus-tests/ |
D | SB+rfionceonce-poonceonces.litmus | 17 r1 = READ_ONCE(*x); 18 r2 = READ_ONCE(*y); 27 r3 = READ_ONCE(*y); 28 r4 = READ_ONCE(*x);
|
D | ISA2+poonceonces.litmus | 9 * of the smp_load_acquire() invocations are replaced by READ_ONCE()? 24 r0 = READ_ONCE(*y); 33 r0 = READ_ONCE(*z); 34 r1 = READ_ONCE(*x);
|
D | IRIW+poonceonces+OnceOnce.litmus | 25 r0 = READ_ONCE(*x); 26 r1 = READ_ONCE(*y); 39 r0 = READ_ONCE(*y); 40 r1 = READ_ONCE(*x);
|
D | IRIW+fencembonceonces+OnceOnce.litmus | 25 r0 = READ_ONCE(*x); 27 r1 = READ_ONCE(*y); 40 r0 = READ_ONCE(*y); 42 r1 = READ_ONCE(*x);
|
/kernel/linux/linux-5.10/include/clocksource/ |
D | hyperv_timer.h | 61 sequence = READ_ONCE(tsc_pg->tsc_sequence); in hv_read_tsc_page_tsc() 70 scale = READ_ONCE(tsc_pg->tsc_scale); in hv_read_tsc_page_tsc() 71 offset = READ_ONCE(tsc_pg->tsc_offset); in hv_read_tsc_page_tsc() 80 } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence); in hv_read_tsc_page_tsc()
|
/kernel/linux/linux-5.10/drivers/lightnvm/ |
D | pblk-rb.c | 174 flags = READ_ONCE(w_ctx->flags); in clean_wctx() 194 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_space() 195 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_space() 212 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_read_count() 213 unsigned int subm = READ_ONCE(rb->subm); in pblk_rb_read_count() 220 unsigned int mem = READ_ONCE(rb->mem); in pblk_rb_sync_count() 221 unsigned int sync = READ_ONCE(rb->sync); in pblk_rb_sync_count() 230 subm = READ_ONCE(rb->subm); in pblk_rb_read_commit() 251 flags = READ_ONCE(entry->w_ctx.flags); in __pblk_rb_update_l2p() 345 flags = READ_ONCE(entry->w_ctx.flags); in pblk_rb_write_entry_user() [all …]
|
/kernel/linux/linux-5.10/kernel/locking/ |
D | qspinlock_paravirt.h | 121 return !READ_ONCE(lock->locked) && in trylock_clear_pending() 246 if (READ_ONCE(he->lock) == lock) { in pv_unhash() 247 node = READ_ONCE(he->node); in pv_unhash() 272 return READ_ONCE(prev->state) != vcpu_running; in pv_wait_early() 302 if (READ_ONCE(node->locked)) in pv_wait_node() 322 if (!READ_ONCE(node->locked)) { in pv_wait_node() 343 !READ_ONCE(node->locked)); in pv_wait_node() 414 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head_or_lock()
|
/kernel/linux/linux-5.10/include/linux/ |
D | srcutiny.h | 63 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; in __srcu_read_lock() 84 idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; in srcu_torture_stats_print() 87 READ_ONCE(ssp->srcu_lock_nesting[!idx]), in srcu_torture_stats_print() 88 READ_ONCE(ssp->srcu_lock_nesting[idx])); in srcu_torture_stats_print()
|
/kernel/linux/linux-5.10/security/selinux/include/ |
D | security.h | 128 return READ_ONCE(state->enforcing); in enforcing_enabled() 148 return READ_ONCE(state->checkreqprot); in checkreqprot_get() 159 return READ_ONCE(state->disabled); in selinux_disabled() 177 return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_NETPEER]); in selinux_policycap_netpeer() 184 return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_OPENPERM]); in selinux_policycap_openperm() 191 return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_EXTSOCKCLASS]); in selinux_policycap_extsockclass() 198 return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_ALWAYSNETWORK]); in selinux_policycap_alwaysnetwork() 205 return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_CGROUPSECLABEL]); in selinux_policycap_cgroupseclabel() 212 return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_NNP_NOSUID_TRANSITION]); in selinux_policycap_nnp_nosuid_transition() 219 return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]); in selinux_policycap_genfs_seclabel_symlinks() [all …]
|
/kernel/linux/linux-5.10/lib/ |
D | errseq.c | 71 old = READ_ONCE(*eseq); in errseq_set() 124 errseq_t old = READ_ONCE(*eseq); in errseq_sample() 146 errseq_t cur = READ_ONCE(*eseq); in errseq_check() 184 old = READ_ONCE(*eseq); in errseq_check_and_advance()
|