/kernel/locking/ |
D | lockdep.c | 1620 struct task_struct *curr = current; in print_circular_bug_header() local 1631 curr->comm, task_pid_nr(curr)); in print_circular_bug_header() 1653 struct task_struct *curr = current; in print_circular_bug() local 1681 lockdep_print_held_locks(curr); in print_circular_bug() 2024 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument 2045 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency() 2046 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, in print_bad_irq_dependency() 2047 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency() 2048 curr->hardirqs_enabled, in print_bad_irq_dependency() 2049 curr->softirqs_enabled); in print_bad_irq_dependency() [all …]
|
D | osq_lock.c | 47 int curr = encode_cpu(smp_processor_id()); in osq_wait_next() local 58 if (atomic_read(&lock->tail) == curr && in osq_wait_next() 59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { in osq_wait_next() 94 int curr = encode_cpu(smp_processor_id()); in osq_lock() local 99 node->cpu = curr; in osq_lock() 107 old = atomic_xchg(&lock->tail, curr); in osq_lock() 209 int curr = encode_cpu(smp_processor_id()); in osq_unlock() local 214 if (likely(atomic_cmpxchg_release(&lock->tail, curr, in osq_unlock() 215 OSQ_UNLOCKED_VAL) == curr)) in osq_unlock()
|
D | mutex.c | 109 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_or_owner() local 117 if (likely(task != curr)) in __mutex_trylock_or_owner() 137 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); in __mutex_trylock_or_owner() 168 unsigned long curr = (unsigned long)current; in __mutex_trylock_fast() local 171 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) in __mutex_trylock_fast() 179 unsigned long curr = (unsigned long)current; in __mutex_unlock_fast() local 181 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) in __mutex_unlock_fast()
|
/kernel/sched/ |
D | stop_task.c | 68 struct task_struct *curr = rq->curr; in put_prev_task_stop() local 71 delta_exec = rq_clock_task(rq) - curr->se.exec_start; in put_prev_task_stop() 75 schedstat_set(curr->se.statistics.exec_max, in put_prev_task_stop() 76 max(curr->se.statistics.exec_max, delta_exec)); in put_prev_task_stop() 78 curr->se.sum_exec_runtime += delta_exec; in put_prev_task_stop() 79 account_group_exec_runtime(curr, delta_exec); in put_prev_task_stop() 81 curr->se.exec_start = rq_clock_task(rq); in put_prev_task_stop() 82 cgroup_account_cputime(curr, delta_exec); in put_prev_task_stop() 93 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) in task_tick_stop() argument
|
D | swait.c | 24 struct swait_queue *curr; in swake_up_locked() local 29 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); in swake_up_locked() 30 wake_up_process(curr->task); in swake_up_locked() 31 list_del_init(&curr->task_list); in swake_up_locked() 51 struct swait_queue *curr; in swake_up_all() local 57 curr = list_first_entry(&tmp, typeof(*curr), task_list); in swake_up_all() 59 wake_up_state(curr->task, TASK_NORMAL); in swake_up_all() 60 list_del_init(&curr->task_list); in swake_up_all()
|
D | rt.c | 90 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_rt_rq() 162 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_tg_rt_entry() 266 return rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task() 527 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue() local 541 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue() 910 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer() 941 return rt_rq->highest_prio.curr; in rt_se_prio() 996 struct task_struct *curr = rq->curr; in update_curr_rt() local 997 struct sched_rt_entity *rt_se = &curr->rt; in update_curr_rt() 1001 if (curr->sched_class != &rt_sched_class) in update_curr_rt() [all …]
|
D | deadline.c | 362 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; in init_dl_rq() 1052 if (dl_task(rq->curr)) in dl_task_timer() 1181 struct task_struct *curr = rq->curr; in update_curr_dl() local 1182 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl() 1187 if (!dl_task(curr) || !on_dl_rq(dl_se)) in update_curr_dl() 1199 delta_exec = now - curr->se.exec_start; in update_curr_dl() 1206 schedstat_set(curr->se.statistics.exec_max, in update_curr_dl() 1207 max(curr->se.statistics.exec_max, delta_exec)); in update_curr_dl() 1209 curr->se.sum_exec_runtime += delta_exec; in update_curr_dl() 1210 account_group_exec_runtime(curr, delta_exec); in update_curr_dl() [all …]
|
D | wait.c | 70 wait_queue_entry_t *curr, *next; in __wake_up_common() local 76 curr = list_next_entry(bookmark, entry); in __wake_up_common() 81 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common() 83 if (&curr->entry == &wq_head->head) in __wake_up_common() 86 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common() 87 unsigned flags = curr->flags; in __wake_up_common() 93 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
|
D | fair.c | 532 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime() local 537 if (curr) { in update_min_vruntime() 538 if (curr->on_rq) in update_min_vruntime() 539 vruntime = curr->vruntime; in update_min_vruntime() 541 curr = NULL; in update_min_vruntime() 548 if (!curr) in update_min_vruntime() 835 struct sched_entity *curr = cfs_rq->curr; in update_curr() local 839 if (unlikely(!curr)) in update_curr() 842 delta_exec = now - curr->exec_start; in update_curr() 846 curr->exec_start = now; in update_curr() [all …]
|
D | cputime.c | 51 void irqtime_account_irq(struct task_struct *curr) in irqtime_account_irq() argument 72 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) in irqtime_account_irq() 451 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument 454 *ut = curr->utime; in cputime_adjust() 455 *st = curr->stime; in cputime_adjust() 598 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument 606 rtime = curr->sum_exec_runtime; in cputime_adjust() 619 stime = curr->stime; in cputime_adjust() 620 utime = curr->utime; in cputime_adjust()
|
D | core.c | 247 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick() 510 struct task_struct *curr = rq->curr; in resched_curr() local 515 if (test_tsk_need_resched(curr)) in resched_curr() 521 set_tsk_need_resched(curr); in resched_curr() 526 if (set_nr_and_not_polling(curr)) in resched_curr() 1421 if (p->sched_class == rq->curr->sched_class) { in check_preempt_curr() 1422 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr() 1425 if (class == rq->curr->sched_class) in check_preempt_curr() 1438 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr() 2372 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle() [all …]
|
D | membarrier.c | 108 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited() 177 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited() 243 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
|
D | pelt.c | 280 cfs_rq->curr == se)) { in __update_load_avg_se() 296 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
|
D | stats.h | 134 if (unlikely(rq->curr->flags & PF_MEMSTALL)) in psi_task_tick() 135 psi_memstall_tick(rq->curr, cpu_of(rq)); in psi_task_tick()
|
D | sched.h | 511 struct sched_entity *curr; member 599 int curr; /* highest queued rt task prio */ member 648 u64 curr; member 909 struct task_struct *curr; member 1058 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1629 return rq->curr == p; in task_current() 1786 WARN_ON_ONCE(rq->curr != prev); in put_prev_task() 1792 WARN_ON_ONCE(rq->curr != next); in set_next_task()
|
D | pelt.h | 60 if (unlikely(is_idle_task(rq->curr))) { in update_rq_clock_pelt()
|
/kernel/gcov/ |
D | fs.c | 567 char *curr; in add_node() local 577 for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { in add_node() 578 if (curr == next) in add_node() 581 if (strcmp(curr, ".") == 0) in add_node() 583 if (strcmp(curr, "..") == 0) { in add_node() 589 node = get_child_by_name(parent, curr); in add_node() 591 node = new_node(parent, NULL, curr); in add_node() 598 node = new_node(parent, info, curr); in add_node()
|
/kernel/time/ |
D | timer_list.c | 78 struct timerqueue_node *curr; in print_active_timers() local 88 curr = timerqueue_getnext(&base->active); in print_active_timers() 93 while (curr && i < next) { in print_active_timers() 94 curr = timerqueue_iterate_next(curr); in print_active_timers() 98 if (curr) { in print_active_timers() 100 timer = container_of(curr, struct hrtimer, node); in print_active_timers()
|
/kernel/power/ |
D | process.c | 204 struct task_struct *curr = current; in thaw_processes() local 224 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); in thaw_processes() 229 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); in thaw_processes() 230 curr->flags &= ~PF_SUSPEND_TASK; in thaw_processes()
|
D | snapshot.c | 710 struct mem_zone_bm_rtree *curr, *zone; in memory_bm_find_bit() local 722 list_for_each_entry(curr, &bm->zones, list) { in memory_bm_find_bit() 723 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { in memory_bm_find_bit() 724 zone = curr; in memory_bm_find_bit()
|
/kernel/ |
D | futex.c | 329 static void compat_exit_robust_list(struct task_struct *curr); 331 static inline void compat_exit_robust_list(struct task_struct *curr) { } in compat_exit_robust_list() argument 899 static void exit_pi_state_list(struct task_struct *curr) in exit_pi_state_list() argument 901 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list() 913 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list() 931 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list() 933 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list() 936 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list() 940 raw_spin_lock(&curr->pi_lock); in exit_pi_state_list() 953 WARN_ON(pi_state->owner != curr); in exit_pi_state_list() [all …]
|
/kernel/events/ |
D | uprobes.c | 980 struct map_info *curr = NULL; in build_map_info() local 1011 info->next = curr; in build_map_info() 1012 curr = info; in build_map_info() 1022 prev = curr; in build_map_info() 1023 while (curr) { in build_map_info() 1024 mmput(curr->mm); in build_map_info() 1025 curr = curr->next; in build_map_info() 1031 curr = ERR_PTR(-ENOMEM); in build_map_info() 1042 return curr; in build_map_info()
|
/kernel/trace/ |
D | trace_sched_wakeup.c | 405 struct task_struct *curr, in tracing_sched_wakeup_trace() argument 418 entry->prev_pid = curr->pid; in tracing_sched_wakeup_trace() 419 entry->prev_prio = curr->prio; in tracing_sched_wakeup_trace() 420 entry->prev_state = task_state_index(curr); in tracing_sched_wakeup_trace()
|
D | trace_functions_graph.c | 421 struct ftrace_graph_ent_entry *curr) in get_return_for_leaf() argument 433 curr = &data->ent; in get_return_for_leaf() 463 data->ent = *curr; in get_return_for_leaf() 479 if (curr->ent.pid != next->ent.pid || in get_return_for_leaf() 480 curr->graph_ent.func != next->ret.func) in get_return_for_leaf()
|
/kernel/bpf/ |
D | core.c | 337 s32 end_new, s32 curr, const bool probe_pass) in bpf_adj_delta_to_imm() argument 343 if (curr < pos && curr + imm + 1 >= end_old) in bpf_adj_delta_to_imm() 345 else if (curr >= end_new && curr + imm + 1 < end_new) in bpf_adj_delta_to_imm() 355 s32 end_new, s32 curr, const bool probe_pass) in bpf_adj_delta_to_off() argument 361 if (curr < pos && curr + off + 1 >= end_old) in bpf_adj_delta_to_off() 363 else if (curr >= end_new && curr + off + 1 < end_new) in bpf_adj_delta_to_off()
|