/kernel/ |
D | lockdep.c | 538 static void lockdep_print_held_locks(struct task_struct *curr) in lockdep_print_held_locks() argument 540 int i, depth = curr->lockdep_depth; in lockdep_print_held_locks() 543 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); in lockdep_print_held_locks() 547 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); in lockdep_print_held_locks() 551 print_lock(curr->held_locks + i); in lockdep_print_held_locks() 934 struct task_struct *curr = current; in print_circular_bug_header() local 944 curr->comm, task_pid_nr(curr)); in print_circular_bug_header() 958 struct task_struct *curr = current; in print_circular_bug_tail() local 971 lockdep_print_held_locks(curr); in print_circular_bug_tail() 1184 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument [all …]
|
D | sched_fair.c | 278 if (cfs_rq->curr) in update_min_vruntime() 279 vruntime = cfs_rq->curr->vruntime; in update_min_vruntime() 286 if (!cfs_rq->curr) in update_min_vruntime() 463 __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr, in __update_curr() argument 468 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); in __update_curr() 470 curr->sum_exec_runtime += delta_exec; in __update_curr() 472 delta_exec_weighted = calc_delta_fair(delta_exec, curr); in __update_curr() 473 curr->vruntime += delta_exec_weighted; in __update_curr() 479 struct sched_entity *curr = cfs_rq->curr; in update_curr() local 483 if (unlikely(!curr)) in update_curr() [all …]
|
D | sched_rt.c | 105 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue() local 111 if (rt_rq->highest_prio < curr->prio) in sched_rt_rq_enqueue() 112 resched_task(curr); in sched_rt_rq_enqueue() 203 resched_task(rq_of_rt_rq(rt_rq)->curr); in sched_rt_rq_enqueue() 514 struct task_struct *curr = rq->curr; in update_curr_rt() local 515 struct sched_rt_entity *rt_se = &curr->rt; in update_curr_rt() 519 if (!task_has_rt_policy(curr)) in update_curr_rt() 522 delta_exec = rq->clock - curr->se.exec_start; in update_curr_rt() 526 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); in update_curr_rt() 528 curr->se.sum_exec_runtime += delta_exec; in update_curr_rt() [all …]
|
D | rcupreempt.c | 575 long curr; in rcu_try_flip_waitack_needed() local 579 curr = rdssp->dynticks; in rcu_try_flip_waitack_needed() 592 if ((curr == snap) && ((curr & 0x1) == 0)) in rcu_try_flip_waitack_needed() 601 if ((curr - snap) > 2 || (curr & 0x1) == 0) in rcu_try_flip_waitack_needed() 612 long curr; in rcu_try_flip_waitmb_needed() local 616 curr = rdssp->dynticks; in rcu_try_flip_waitmb_needed() 628 if ((curr == snap) && ((curr & 0x1) == 0)) in rcu_try_flip_waitmb_needed() 636 if (curr != snap) in rcu_try_flip_waitmb_needed() 653 long curr; in rcu_qsctr_inc_needed_dyntick() local 657 curr = rdssp->dynticks; in rcu_qsctr_inc_needed_dyntick() [all …]
|
D | rtmutex-tester.c | 368 char *curr = buf; in sysfs_test_status() local 376 curr += sprintf(curr, in sysfs_test_status() 384 curr += sprintf(curr, "%d", td->mutexes[i]); in sysfs_test_status() 388 curr += sprintf(curr, ", T: %p, R: %p\n", tsk, in sysfs_test_status() 391 return curr - buf; in sysfs_test_status()
|
D | futex.c | 469 void exit_pi_state_list(struct task_struct *curr) in exit_pi_state_list() argument 471 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list() 483 spin_lock_irq(&curr->pi_lock); in exit_pi_state_list() 490 spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list() 494 spin_lock_irq(&curr->pi_lock); in exit_pi_state_list() 504 WARN_ON(pi_state->owner != curr); in exit_pi_state_list() 508 spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list() 514 spin_lock_irq(&curr->pi_lock); in exit_pi_state_list() 516 spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list() 1199 struct task_struct *curr = current; in futex_wait() local [all …]
|
D | sched_idletask.c | 64 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) in task_tick_idle() argument 77 resched_task(rq->curr); in switched_to_idle() 94 resched_task(rq->curr); in prio_changed_idle()
|
D | futex_compat.c | 48 void compat_exit_robust_list(struct task_struct *curr) in compat_exit_robust_list() argument 50 struct compat_robust_list_head __user *head = curr->compat_robust_list; in compat_exit_robust_list() 94 if (handle_futex_death(uaddr, curr, pi)) in compat_exit_robust_list() 113 handle_futex_death(uaddr, curr, pip); in compat_exit_robust_list()
|
D | sched.c | 420 struct sched_entity *curr, *next, *last; member 581 struct task_struct *curr, *idle; member 644 rq->curr->sched_class->check_preempt_curr(rq, p, sync); in check_preempt_curr() 669 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 873 return rq->curr == p; in task_current() 1061 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick() 1237 if (rq->curr != rq->idle) in wake_up_idle_cpu() 2515 static void fire_sched_in_preempt_notifiers(struct task_struct *curr) in fire_sched_in_preempt_notifiers() argument 2520 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link) in fire_sched_in_preempt_notifiers() 2525 fire_sched_out_preempt_notifiers(struct task_struct *curr, in fire_sched_out_preempt_notifiers() argument [all …]
|
D | exit.c | 369 struct task_struct *curr = current->group_leader; in __set_special_pids() local 372 if (task_session(curr) != pid) { in __set_special_pids() 373 change_pid(curr, PIDTYPE_SID, pid); in __set_special_pids() 374 set_task_session(curr, nr); in __set_special_pids() 376 if (task_pgrp(curr) != pid) { in __set_special_pids() 377 change_pid(curr, PIDTYPE_PGID, pid); in __set_special_pids() 378 set_task_pgrp(curr, nr); in __set_special_pids()
|
D | rcutree.c | 347 long curr; in rcu_implicit_dynticks_qs() local 352 curr = rdp->dynticks->dynticks; in rcu_implicit_dynticks_qs() 366 if ((curr != snap || (curr & 0x1) == 0) && in rcu_implicit_dynticks_qs()
|
D | sched_debug.c | 93 if (rq->curr == p) in print_task() 277 P(curr->pid); in print_cpu()
|
/kernel/time/ |
D | timer_stats.c | 169 struct entry **head, *curr, *prev; in tstat_lookup() local 172 curr = *head; in tstat_lookup() 179 while (curr) { in tstat_lookup() 180 if (match_entries(curr, entry)) in tstat_lookup() 181 return curr; in tstat_lookup() 183 curr = curr->next; in tstat_lookup() 189 curr = *head; in tstat_lookup() 195 while (curr) { in tstat_lookup() 196 if (match_entries(curr, entry)) in tstat_lookup() 199 prev = curr; in tstat_lookup() [all …]
|
D | timer_list.c | 82 struct rb_node *curr; in print_active_timers() local 89 curr = base->first; in print_active_timers() 94 while (curr && i < next) { in print_active_timers() 95 curr = rb_next(curr); in print_active_timers() 99 if (curr) { in print_active_timers() 101 timer = rb_entry(curr, struct hrtimer, node); in print_active_timers()
|
/kernel/trace/ |
D | trace_functions_graph.c | 206 struct ftrace_graph_ent_entry *curr) in trace_branch_is_leaf() argument 227 if (curr->ent.pid != next->ent.pid || in trace_branch_is_leaf() 228 curr->graph_ent.func != next->ret.func) in trace_branch_is_leaf()
|
D | trace_irqsoff.c | 304 inline void print_irqtrace_events(struct task_struct *curr) in print_irqtrace_events() argument
|
D | trace.c | 1114 struct task_struct *curr, argument 1128 entry->prev_pid = curr->pid; 1129 entry->prev_prio = curr->prio; 1130 entry->prev_state = curr->state;
|