/kernel/locking/ |
D | osq_lock.c | 44 struct optimistic_spin_node *prev) in osq_wait_next() argument 55 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL; in osq_wait_next() 93 struct optimistic_spin_node *prev, *next; in osq_lock() local 111 prev = decode_cpu(old); in osq_lock() 112 node->prev = prev; in osq_lock() 126 WRITE_ONCE(prev->next, node); in osq_lock() 144 vcpu_is_preempted(node_cpu(node->prev)))) in osq_lock() 161 if (data_race(prev->next) == node && in osq_lock() 162 cmpxchg(&prev->next, node, NULL) == node) in osq_lock() 179 prev = READ_ONCE(node->prev); in osq_lock() [all …]
|
D | mcs_spinlock.h | 67 struct mcs_spinlock *prev; in mcs_spin_lock() local 79 prev = xchg(lock, node); in mcs_spin_lock() 80 if (likely(prev == NULL)) { in mcs_spin_lock() 91 WRITE_ONCE(prev->next, node); in mcs_spin_lock()
|
D | lockdep.c | 1576 __calc_dep_bit(struct held_lock *prev, struct held_lock *next) in __calc_dep_bit() argument 1578 return (prev->read == 0) + ((next->read != 2) << 1); in __calc_dep_bit() 1581 static inline u8 calc_dep(struct held_lock *prev, struct held_lock *next) in calc_dep() argument 1583 return 1U << __calc_dep_bit(prev, next); in calc_dep() 1591 __calc_dep_bitb(struct held_lock *prev, struct held_lock *next) in __calc_dep_bitb() argument 1593 return (next->read != 2) + ((prev->read == 0) << 1); in __calc_dep_bitb() 1596 static inline u8 calc_depb(struct held_lock *prev, struct held_lock *next) in calc_depb() argument 1598 return 1U << __calc_dep_bitb(prev, next); in calc_depb() 2505 struct held_lock *prev, in print_bad_irq_dependency() argument 2529 print_lock(prev); in print_bad_irq_dependency() [all …]
|
D | qspinlock.c | 274 struct mcs_spinlock *prev) { } in __pv_wait_node() argument 317 struct mcs_spinlock *prev, *next, *node; in queued_spin_lock_slowpath() local 468 prev = decode_tail(old); in queued_spin_lock_slowpath() 471 WRITE_ONCE(prev->next, node); in queued_spin_lock_slowpath() 473 pv_wait_node(node, prev); in queued_spin_lock_slowpath()
|
D | qspinlock_paravirt.h | 267 pv_wait_early(struct pv_node *prev, int loop) in pv_wait_early() argument 272 return READ_ONCE(prev->state) != vcpu_running; in pv_wait_early() 293 static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev) in pv_wait_node() argument 296 struct pv_node *pp = (struct pv_node *)prev; in pv_wait_node()
|
/kernel/ |
D | cfi.c | 111 static void prepare_next_shadow(const struct cfi_shadow __rcu *prev, in prepare_next_shadow() argument 119 if (!prev) in prepare_next_shadow() 123 if (prev->base == next->base) { in prepare_next_shadow() 124 memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE); in prepare_next_shadow() 130 if (prev->shadow[i] == SHADOW_INVALID) in prepare_next_shadow() 133 index = ptr_to_shadow(next, shadow_to_ptr(prev, i)); in prepare_next_shadow() 138 shadow_to_check_fn(prev, prev->shadow[i])); in prepare_next_shadow() 200 struct cfi_shadow *prev; in update_shadow() local 207 prev = rcu_dereference_protected(cfi_shadow, in update_shadow() 212 prepare_next_shadow(prev, next); in update_shadow() [all …]
|
D | scs.c | 133 unsigned long *p, prev, curr = highest, used = 0; in scs_check_usage() local 145 prev = cmpxchg_relaxed(&highest, curr, used); in scs_check_usage() 147 if (prev == curr) { in scs_check_usage() 153 curr = prev; in scs_check_usage()
|
D | smp.c | 564 struct llist_node *entry, *prev; in flush_smp_call_function_queue() local 614 prev = NULL; in flush_smp_call_function_queue() 621 if (prev) { in flush_smp_call_function_queue() 622 prev->next = &csd_next->node.llist; in flush_smp_call_function_queue() 632 prev = &csd->node.llist; in flush_smp_call_function_queue() 646 prev = NULL; in flush_smp_call_function_queue() 651 if (prev) { in flush_smp_call_function_queue() 652 prev->next = &csd_next->node.llist; in flush_smp_call_function_queue() 670 prev = &csd->node.llist; in flush_smp_call_function_queue()
|
D | seccomp.c | 225 struct seccomp_filter *prev; member 418 for (; f; f = f->prev) { in seccomp_run_filters() 468 for (; child; child = child->prev) in is_ancestor() 529 orig = orig->prev; in __seccomp_filter_orphan() 538 orig = orig->prev; in __put_seccomp_filter() 837 sfilter->prev ? &sfilter->prev->cache : NULL; in seccomp_cache_prepare() 875 for (walker = current->seccomp.filter; walker; walker = walker->prev) in seccomp_attach_filter() 901 filter->prev = current->seccomp.filter; in seccomp_attach_filter() 1787 for (cur = current->seccomp.filter; cur; cur = cur->prev) { in has_duplicate_listener() 2033 for (filter = orig; filter; filter = filter->prev) in get_nth_filter() [all …]
|
D | static_call_inline.c | 398 struct static_call_mod *site_mod, **prev; in static_call_del_module() local 408 for (prev = &key->mods, site_mod = key->mods; in static_call_del_module() 410 prev = &site_mod->next, site_mod = site_mod->next) in static_call_del_module() 416 *prev = site_mod->next; in static_call_del_module()
|
/kernel/sched/ |
D | cputime.c | 431 void vtime_task_switch(struct task_struct *prev) in vtime_task_switch() argument 433 if (is_idle_task(prev)) in vtime_task_switch() 434 vtime_account_idle(prev); in vtime_task_switch() 436 vtime_account_kernel(prev); in vtime_task_switch() 438 vtime_flush(prev); in vtime_task_switch() 439 arch_vtime_task_switch(prev); in vtime_task_switch() 459 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument 563 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument 570 raw_spin_lock_irqsave(&prev->lock, flags); in cputime_adjust() 581 if (prev->stime + prev->utime >= rtime) in cputime_adjust() [all …]
|
D | stats.h | 155 static inline void psi_sched_switch(struct task_struct *prev, in psi_sched_switch() argument 162 psi_task_switch(prev, next, sleep); in psi_sched_switch() 169 static inline void psi_sched_switch(struct task_struct *prev, in psi_sched_switch() argument 252 sched_info_switch(struct rq *rq, struct task_struct *prev, struct task_struct *next) in sched_info_switch() argument 259 if (prev != rq->idle) in sched_info_switch() 260 sched_info_depart(rq, prev); in sched_info_switch()
|
D | core.c | 4772 static inline void finish_task(struct task_struct *prev) in finish_task() argument 4786 smp_store_release(&prev->on_cpu, 0); in finish_task() 4963 prepare_task_switch(struct rq *rq, struct task_struct *prev, in prepare_task_switch() argument 4966 kcov_prepare_switch(prev); in prepare_task_switch() 4967 sched_info_switch(rq, prev, next); in prepare_task_switch() 4968 perf_event_task_sched_out(prev, next); in prepare_task_switch() 4969 rseq_preempt(prev); in prepare_task_switch() 4970 fire_sched_out_preempt_notifiers(prev, next); in prepare_task_switch() 4995 static struct rq *finish_task_switch(struct task_struct *prev) in finish_task_switch() argument 5031 prev_state = READ_ONCE(prev->__state); in finish_task_switch() [all …]
|
D | autogroup.c | 138 struct autogroup *prev; in autogroup_move_group() local 144 prev = p->signal->autogroup; in autogroup_move_group() 145 if (prev == ag) { in autogroup_move_group() 166 autogroup_kref_put(prev); in autogroup_move_group()
|
D | psi.c | 870 void psi_task_switch(struct task_struct *prev, struct task_struct *next, in psi_task_switch() argument 874 int cpu = task_cpu(prev); in psi_task_switch() 889 identical_state = prev->psi_flags == next->psi_flags; in psi_task_switch() 902 if (prev->pid) { in psi_task_switch() 913 if (prev->in_memstall) in psi_task_switch() 915 if (prev->in_iowait) in psi_task_switch() 919 psi_flags_change(prev, clear, set); in psi_task_switch() 922 while ((group = iterate_groups(prev, &iter)) && group != common) in psi_task_switch() 931 for (; group; group = iterate_groups(prev, &iter)) in psi_task_switch()
|
D | stop_task.c | 20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument 72 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) in put_prev_task_stop() argument
|
/kernel/gcov/ |
D | base.c | 108 struct gcov_info *prev = NULL; in gcov_module_notifier() local 117 gcov_info_unlink(prev, info); in gcov_module_notifier() 121 prev = info; in gcov_module_notifier()
|
/kernel/dma/ |
D | pool.c | 224 static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) in dma_guess_pool() argument 226 if (prev == NULL) { in dma_guess_pool() 233 if (prev == atomic_pool_kernel) in dma_guess_pool() 235 if (prev == atomic_pool_dma32) in dma_guess_pool()
|
/kernel/trace/ |
D | trace_sched_wakeup.c | 375 struct task_struct *prev, in tracing_sched_switch_trace() argument 389 entry->prev_pid = prev->pid; in tracing_sched_switch_trace() 390 entry->prev_prio = prev->prio; in tracing_sched_switch_trace() 391 entry->prev_state = task_state_index(prev); in tracing_sched_switch_trace() 431 struct task_struct *prev, struct task_struct *next) in probe_wakeup_sched_switch() argument 440 tracing_record_cmdline(prev); in probe_wakeup_sched_switch() 476 tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx); in probe_wakeup_sched_switch()
|
D | ring_buffer.c | 1248 struct list_head *list = page->list.prev; in rb_is_reader_page() 1279 rb_set_list_to_head(head->list.prev); in rb_head_page_activate() 1306 struct buffer_page *prev, in rb_head_page_set() argument 1313 list = &prev->list; in rb_head_page_set() 1329 struct buffer_page *prev, in rb_head_page_set_update() argument 1332 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update() 1338 struct buffer_page *prev, in rb_head_page_set_head() argument 1341 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head() 1347 struct buffer_page *prev, in rb_head_page_set_normal() argument 1350 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal() [all …]
|
D | trace_sched_switch.c | 25 struct task_struct *prev, struct task_struct *next) in probe_sched_switch() argument 34 tracing_record_taskinfo_sched_switch(prev, next, flags); in probe_sched_switch()
|
/kernel/time/ |
D | tick-sched.c | 422 int prev; in tick_nohz_dep_set_all() local 424 prev = atomic_fetch_or(BIT(bit), dep); in tick_nohz_dep_set_all() 425 if (!prev) in tick_nohz_dep_set_all() 449 int prev; in tick_nohz_dep_set_cpu() local 454 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_set_cpu() 455 if (!prev) { in tick_nohz_dep_set_cpu() 502 int prev; in tick_nohz_dep_set_signal() local 505 prev = atomic_fetch_or(BIT(bit), &sig->tick_dep_mask); in tick_nohz_dep_set_signal() 506 if (!prev) { in tick_nohz_dep_set_signal()
|
/kernel/livepatch/ |
D | transition.c | 224 struct klp_func *prev; in klp_check_stack_func() local 226 prev = list_next_entry(func, stack_node); in klp_check_stack_func() 227 func_addr = (unsigned long)prev->new_func; in klp_check_stack_func() 228 func_size = prev->new_size; in klp_check_stack_func()
|
/kernel/events/ |
D | uprobes.c | 970 struct map_info *prev = NULL; in build_map_info() local 980 if (!prev && !more) { in build_map_info() 985 prev = kmalloc(sizeof(struct map_info), in build_map_info() 987 if (prev) in build_map_info() 988 prev->next = NULL; in build_map_info() 990 if (!prev) { in build_map_info() 998 info = prev; in build_map_info() 999 prev = prev->next; in build_map_info() 1011 prev = curr; in build_map_info() 1023 info->next = prev; in build_map_info() [all …]
|
/kernel/kcsan/ |
D | core.c | 211 struct list_head *prev_save = ctx->scoped_accesses.prev; in kcsan_check_scoped_accesses() 214 ctx->scoped_accesses.prev = NULL; /* Avoid recursion. */ in kcsan_check_scoped_accesses() 217 ctx->scoped_accesses.prev = prev_save; in kcsan_check_scoped_accesses() 637 else if (unlikely(ctx->scoped_accesses.prev)) in check_access() 771 if (!ctx->scoped_accesses.prev) /* Lazy initialize list head. */ in kcsan_begin_scoped_access() 784 if (WARN(!ctx->scoped_accesses.prev, "Unbalanced %s()?", __func__)) in kcsan_end_scoped_access() 797 ctx->scoped_accesses.prev = NULL; in kcsan_end_scoped_access()
|