/kernel/locking/ |
D | lockdep.c | 1927 struct task_struct *curr = current; in print_circular_bug_header() local 1938 curr->comm, task_pid_nr(curr)); in print_circular_bug_header() 2017 struct task_struct *curr = current; in print_circular_bug() local 2045 lockdep_print_held_locks(curr); in print_circular_bug() 2537 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument 2558 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency() 2560 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency() 2562 curr->softirqs_enabled); in print_bad_irq_dependency() 2591 lockdep_print_held_locks(curr); in print_bad_irq_dependency() 2766 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, in check_irq_usage() argument [all …]
|
D | osq_lock.c | 47 int curr = encode_cpu(smp_processor_id()); in osq_wait_next() local 58 if (atomic_read(&lock->tail) == curr && in osq_wait_next() 59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { in osq_wait_next() 94 int curr = encode_cpu(smp_processor_id()); in osq_lock() local 99 node->cpu = curr; in osq_lock() 107 old = atomic_xchg(&lock->tail, curr); in osq_lock() 210 int curr = encode_cpu(smp_processor_id()); in osq_unlock() local 215 if (likely(atomic_cmpxchg_release(&lock->tail, curr, in osq_unlock() 216 OSQ_UNLOCKED_VAL) == curr)) in osq_unlock()
|
D | mutex.c | 109 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_common() local 118 if (task != curr) in __mutex_trylock_common() 130 task = curr; in __mutex_trylock_common() 134 if (task == curr) in __mutex_trylock_common() 172 unsigned long curr = (unsigned long)current; in __mutex_trylock_fast() local 175 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) { in __mutex_trylock_fast() 185 unsigned long curr = (unsigned long)current; in __mutex_unlock_fast() local 187 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); in __mutex_unlock_fast()
|
/kernel/sched/ |
D | swait.c | 23 struct swait_queue *curr; in swake_up_locked() local 28 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); in swake_up_locked() 29 wake_up_process(curr->task); in swake_up_locked() 30 list_del_init(&curr->task_list); in swake_up_locked() 63 struct swait_queue *curr; in swake_up_all() local 69 curr = list_first_entry(&tmp, typeof(*curr), task_list); in swake_up_all() 71 wake_up_state(curr->task, TASK_NORMAL); in swake_up_all() 72 list_del_init(&curr->task_list); in swake_up_all()
|
D | rt.c | 150 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_rt_rq() 226 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_tg_rt_entry() 330 return rq->online && rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task() 583 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue() local 597 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue() 967 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer() 998 return rt_rq->highest_prio.curr; in rt_se_prio() 1060 struct task_struct *curr = rq->curr; in update_curr_rt() local 1061 struct sched_rt_entity *rt_se = &curr->rt; in update_curr_rt() 1065 if (curr->sched_class != &rt_sched_class) in update_curr_rt() [all …]
|
D | stop_task.c | 73 struct task_struct *curr = rq->curr; in put_prev_task_stop() local 77 delta_exec = now - curr->se.exec_start; in put_prev_task_stop() 81 schedstat_set(curr->stats.exec_max, in put_prev_task_stop() 82 max(curr->stats.exec_max, delta_exec)); in put_prev_task_stop() 84 update_current_exec_runtime(curr, now, delta_exec); in put_prev_task_stop() 95 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) in task_tick_stop() argument
|
D | deadline.c | 517 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; in init_dl_rq() 1184 if (dl_task(rq->curr)) in dl_task_timer() 1311 struct task_struct *curr = rq->curr; in update_curr_dl() local 1312 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl() 1317 if (!dl_task(curr) || !on_dl_rq(dl_se)) in update_curr_dl() 1329 delta_exec = now - curr->se.exec_start; in update_curr_dl() 1336 schedstat_set(curr->stats.exec_max, in update_curr_dl() 1337 max(curr->stats.exec_max, delta_exec)); in update_curr_dl() 1339 trace_sched_stat_runtime(curr, delta_exec, 0); in update_curr_dl() 1341 update_current_exec_runtime(curr, now, delta_exec); in update_curr_dl() [all …]
|
D | cputime.c | 52 void irqtime_account_irq(struct task_struct *curr, unsigned int offset) in irqtime_account_irq() argument 77 } else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd()) { in irqtime_account_irq() 82 trace_android_rvh_account_irq(curr, cpu, delta, irq_start); in irqtime_account_irq() 468 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument 471 *ut = curr->utime; in cputime_adjust() 472 *st = curr->stime; in cputime_adjust() 577 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument 585 rtime = curr->sum_exec_runtime; in cputime_adjust() 598 stime = curr->stime; in cputime_adjust() 599 utime = curr->utime; in cputime_adjust() [all …]
|
D | wait.c | 85 wait_queue_entry_t *curr, *next; in __wake_up_common() local 91 curr = list_next_entry(bookmark, entry); in __wake_up_common() 96 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common() 98 if (&curr->entry == &wq_head->head) in __wake_up_common() 101 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common() 102 unsigned flags = curr->flags; in __wake_up_common() 108 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
|
D | fair.c | 601 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime() local 606 if (curr) { in update_min_vruntime() 607 if (curr->on_rq) in update_min_vruntime() 608 vruntime = curr->vruntime; in update_min_vruntime() 610 curr = NULL; in update_min_vruntime() 616 if (!curr) in update_min_vruntime() 896 struct sched_entity *curr = cfs_rq->curr; in update_curr() local 900 if (unlikely(!curr)) in update_curr() 903 delta_exec = now - curr->exec_start; in update_curr() 907 curr->exec_start = now; in update_curr() [all …]
|
D | membarrier.c | 289 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited() 362 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited() 375 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited() 474 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
|
D | core.c | 273 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue() 724 psi_account_irqtime(rq->curr, irq_delta); in update_rq_clock_task() 795 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick() 1045 struct task_struct *curr = rq->curr; in resched_curr() local 1050 if (test_tsk_need_resched(curr)) in resched_curr() 1056 set_tsk_need_resched(curr); in resched_curr() 1061 if (set_nr_and_not_polling(curr)) in resched_curr() 2239 if (p->sched_class == rq->curr->sched_class) in check_preempt_curr() 2240 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr() 2241 else if (sched_class_above(p->sched_class, rq->curr->sched_class)) in check_preempt_curr() [all …]
|
D | sched.h | 585 struct sched_entity *curr; member 680 int curr; /* highest queued rt task prio */ member 729 u64 curr; member 1027 struct task_struct __rcu *curr; member 1233 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 2102 return rq->curr == p; in task_current() 2260 WARN_ON_ONCE(rq->curr != prev); in put_prev_task() 2341 struct task_struct *p = rq->curr; in get_push_task() 3271 static inline void update_current_exec_runtime(struct task_struct *curr, in update_current_exec_runtime() argument 3274 curr->se.sum_exec_runtime += delta_exec; in update_current_exec_runtime() [all …]
|
D | pelt.c | 316 cfs_rq->curr == se)) { in __update_load_avg_se() 332 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
|
D | pelt.h | 105 if (unlikely(is_idle_task(rq->curr))) { in update_rq_clock_pelt()
|
/kernel/futex/ |
D | core.c | 651 static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, in handle_futex_death() argument 709 if (owner != task_pid_vnr(curr)) in handle_futex_death() 787 static void exit_robust_list(struct task_struct *curr) in exit_robust_list() argument 789 struct robust_list_head __user *head = curr->robust_list; in exit_robust_list() 827 curr, pi, HANDLE_DEATH_LIST)) in exit_robust_list() 845 curr, pip, HANDLE_DEATH_PENDING); in exit_robust_list() 881 static void compat_exit_robust_list(struct task_struct *curr) in compat_exit_robust_list() argument 883 struct compat_robust_list_head __user *head = curr->compat_robust_list; in compat_exit_robust_list() 925 if (handle_futex_death(uaddr, curr, pi, in compat_exit_robust_list() 945 handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); in compat_exit_robust_list() [all …]
|
/kernel/ |
D | scs.c | 133 unsigned long *p, prev, curr = highest, used = 0; in scs_check_usage() local 144 while (used > curr) { in scs_check_usage() 145 prev = cmpxchg_relaxed(&highest, curr, used); in scs_check_usage() 147 if (prev == curr) { in scs_check_usage() 153 curr = prev; in scs_check_usage()
|
/kernel/time/ |
D | timer_list.c | 65 struct timerqueue_node *curr; in print_active_timers() local 75 curr = timerqueue_getnext(&base->active); in print_active_timers() 80 while (curr && i < next) { in print_active_timers() 81 curr = timerqueue_iterate_next(curr); in print_active_timers() 85 if (curr) { in print_active_timers() 87 timer = container_of(curr, struct hrtimer, node); in print_active_timers()
|
/kernel/power/ |
D | process.c | 190 struct task_struct *curr = current; in thaw_processes() local 210 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); in thaw_processes() 215 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); in thaw_processes() 216 curr->flags &= ~PF_SUSPEND_TASK; in thaw_processes()
|
/kernel/gcov/ |
D | fs.c | 677 char *curr; in add_node() local 687 for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { in add_node() 688 if (curr == next) in add_node() 691 if (strcmp(curr, ".") == 0) in add_node() 693 if (strcmp(curr, "..") == 0) { in add_node() 699 node = get_child_by_name(parent, curr); in add_node() 701 node = new_node(parent, NULL, curr); in add_node() 708 node = new_node(parent, info, curr); in add_node()
|
/kernel/bpf/ |
D | cgroup_iter.c | 103 struct cgroup_subsys_state *curr = (struct cgroup_subsys_state *)v; in cgroup_iter_seq_next() local 111 return css_next_descendant_pre(curr, p->start_css); in cgroup_iter_seq_next() 113 return css_next_descendant_post(curr, p->start_css); in cgroup_iter_seq_next() 115 return curr->parent; in cgroup_iter_seq_next()
|
D | core.c | 348 s32 end_new, s32 curr, const bool probe_pass) in bpf_adj_delta_to_imm() argument 354 if (curr < pos && curr + imm + 1 >= end_old) in bpf_adj_delta_to_imm() 356 else if (curr >= end_new && curr + imm + 1 < end_new) in bpf_adj_delta_to_imm() 366 s32 end_new, s32 curr, const bool probe_pass) in bpf_adj_delta_to_off() argument 372 if (curr < pos && curr + off + 1 >= end_old) in bpf_adj_delta_to_off() 374 else if (curr >= end_new && curr + off + 1 < end_new) in bpf_adj_delta_to_off()
|
/kernel/trace/ |
D | trace_sched_wakeup.c | 404 struct task_struct *curr, in tracing_sched_wakeup_trace() argument 417 entry->prev_pid = curr->pid; in tracing_sched_wakeup_trace() 418 entry->prev_prio = curr->prio; in tracing_sched_wakeup_trace() 419 entry->prev_state = task_state_index(curr); in tracing_sched_wakeup_trace()
|
D | trace_functions_graph.c | 419 struct ftrace_graph_ent_entry *curr) in get_return_for_leaf() argument 431 curr = &data->ent; in get_return_for_leaf() 461 data->ent = *curr; in get_return_for_leaf() 477 if (curr->ent.pid != next->ent.pid || in get_return_for_leaf() 478 curr->graph_ent.func != next->ret.func) in get_return_for_leaf()
|
/kernel/events/ |
D | uprobes.c | 966 struct map_info *curr = NULL; in build_map_info() local 997 info->next = curr; in build_map_info() 998 curr = info; in build_map_info() 1008 prev = curr; in build_map_info() 1009 while (curr) { in build_map_info() 1010 mmput(curr->mm); in build_map_info() 1011 curr = curr->next; in build_map_info() 1017 curr = ERR_PTR(-ENOMEM); in build_map_info() 1028 return curr; in build_map_info()
|