Home
last modified time | relevance | path

Searched refs:curr (Results 1 – 25 of 30) sorted by relevance

12

/kernel/locking/
Dlockdep.c1881 struct task_struct *curr = current; in print_circular_bug_header() local
1892 curr->comm, task_pid_nr(curr)); in print_circular_bug_header()
1971 struct task_struct *curr = current; in print_circular_bug() local
1999 lockdep_print_held_locks(curr); in print_circular_bug()
2492 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument
2513 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency()
2515 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency()
2517 curr->softirqs_enabled); in print_bad_irq_dependency()
2546 lockdep_print_held_locks(curr); in print_bad_irq_dependency()
2724 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, in check_irq_usage() argument
[all …]
Dosq_lock.c47 int curr = encode_cpu(smp_processor_id()); in osq_wait_next() local
58 if (atomic_read(&lock->tail) == curr && in osq_wait_next()
59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { in osq_wait_next()
94 int curr = encode_cpu(smp_processor_id()); in osq_lock() local
99 node->cpu = curr; in osq_lock()
107 old = atomic_xchg(&lock->tail, curr); in osq_lock()
210 int curr = encode_cpu(smp_processor_id()); in osq_unlock() local
215 if (likely(atomic_cmpxchg_release(&lock->tail, curr, in osq_unlock()
216 OSQ_UNLOCKED_VAL) == curr)) in osq_unlock()
Dmutex.c111 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_or_owner() local
119 if (likely(task != curr)) in __mutex_trylock_or_owner()
139 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags); in __mutex_trylock_or_owner()
170 unsigned long curr = (unsigned long)current; in __mutex_trylock_fast() local
173 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) { in __mutex_trylock_fast()
183 unsigned long curr = (unsigned long)current; in __mutex_unlock_fast() local
185 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr) in __mutex_unlock_fast()
/kernel/sched/
Dstop_task.c65 struct task_struct *curr = rq->curr; in put_prev_task_stop() local
68 delta_exec = rq_clock_task(rq) - curr->se.exec_start; in put_prev_task_stop()
72 schedstat_set(curr->se.statistics.exec_max, in put_prev_task_stop()
73 max(curr->se.statistics.exec_max, delta_exec)); in put_prev_task_stop()
75 curr->se.sum_exec_runtime += delta_exec; in put_prev_task_stop()
76 account_group_exec_runtime(curr, delta_exec); in put_prev_task_stop()
78 curr->se.exec_start = rq_clock_task(rq); in put_prev_task_stop()
79 cgroup_account_cputime(curr, delta_exec); in put_prev_task_stop()
90 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) in task_tick_stop() argument
Dswait.c24 struct swait_queue *curr; in swake_up_locked() local
29 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); in swake_up_locked()
30 wake_up_process(curr->task); in swake_up_locked()
31 list_del_init(&curr->task_list); in swake_up_locked()
64 struct swait_queue *curr; in swake_up_all() local
70 curr = list_first_entry(&tmp, typeof(*curr), task_list); in swake_up_all()
72 wake_up_state(curr->task, TASK_NORMAL); in swake_up_all()
73 list_del_init(&curr->task_list); in swake_up_all()
Drt.c99 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_rt_rq()
171 rt_rq->highest_prio.curr = MAX_RT_PRIO; in init_tg_rt_entry()
275 return rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task()
536 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue() local
550 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
919 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
950 return rt_rq->highest_prio.curr; in rt_se_prio()
1012 struct task_struct *curr = rq->curr; in update_curr_rt() local
1013 struct sched_rt_entity *rt_se = &curr->rt; in update_curr_rt()
1017 if (curr->sched_class != &rt_sched_class) in update_curr_rt()
[all …]
Ddeadline.c426 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; in init_dl_rq()
1113 if (dl_task(rq->curr)) in dl_task_timer()
1242 struct task_struct *curr = rq->curr; in update_curr_dl() local
1243 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl()
1248 if (!dl_task(curr) || !on_dl_rq(dl_se)) in update_curr_dl()
1260 delta_exec = now - curr->se.exec_start; in update_curr_dl()
1267 schedstat_set(curr->se.statistics.exec_max, in update_curr_dl()
1268 max(curr->se.statistics.exec_max, delta_exec)); in update_curr_dl()
1270 curr->se.sum_exec_runtime += delta_exec; in update_curr_dl()
1271 account_group_exec_runtime(curr, delta_exec); in update_curr_dl()
[all …]
Dcputime.c53 void irqtime_account_irq(struct task_struct *curr) in irqtime_account_irq() argument
74 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) in irqtime_account_irq()
77 trace_android_rvh_account_irq(curr, cpu, delta); in irqtime_account_irq()
452 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument
455 *ut = curr->utime; in cputime_adjust()
456 *st = curr->stime; in cputime_adjust()
556 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument
564 rtime = curr->sum_exec_runtime; in cputime_adjust()
577 stime = curr->stime; in cputime_adjust()
578 utime = curr->utime; in cputime_adjust()
[all …]
Dwait.c71 wait_queue_entry_t *curr, *next; in __wake_up_common() local
77 curr = list_next_entry(bookmark, entry); in __wake_up_common()
82 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common()
84 if (&curr->entry == &wq_head->head) in __wake_up_common()
87 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common()
88 unsigned flags = curr->flags; in __wake_up_common()
94 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
Dfair.c548 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime() local
553 if (curr) { in update_min_vruntime()
554 if (curr->on_rq) in update_min_vruntime()
555 vruntime = curr->vruntime; in update_min_vruntime()
557 curr = NULL; in update_min_vruntime()
564 if (!curr) in update_min_vruntime()
864 struct sched_entity *curr = cfs_rq->curr; in update_curr() local
868 if (unlikely(!curr)) in update_curr()
871 delta_exec = now - curr->exec_start; in update_curr()
875 curr->exec_start = now; in update_curr()
[all …]
Dcore.c372 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
629 struct task_struct *curr = rq->curr; in resched_curr() local
634 if (test_tsk_need_resched(curr)) in resched_curr()
640 set_tsk_need_resched(curr); in resched_curr()
645 if (set_nr_and_not_polling(curr)) in resched_curr()
1783 if (p->sched_class == rq->curr->sched_class) in check_preempt_curr()
1784 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
1785 else if (p->sched_class > rq->curr->sched_class) in check_preempt_curr()
1792 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
2831 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
[all …]
Dmembarrier.c150 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()
225 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()
238 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()
338 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
Dstats.h152 if (unlikely(rq->curr->in_memstall)) in psi_task_tick()
153 psi_memstall_tick(rq->curr, cpu_of(rq)); in psi_task_tick()
Dpelt.c373 cfs_rq->curr == se)) { in __update_load_avg_se()
389 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
Dsched.h549 struct sched_entity *curr; member
639 int curr; /* highest queued rt task prio */ member
688 u64 curr; member
971 struct task_struct __rcu *curr; member
1133 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
1750 return rq->curr == p; in task_current()
1898 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
1904 WARN_ON_ONCE(rq->curr != next); in set_next_task()
Dpelt.h84 if (unlikely(is_idle_task(rq->curr))) { in update_rq_clock_pelt()
/kernel/
Dscs.c119 unsigned long *p, prev, curr = highest, used = 0; in scs_check_usage() local
130 while (used > curr) { in scs_check_usage()
131 prev = cmpxchg_relaxed(&highest, curr, used); in scs_check_usage()
133 if (prev == curr) { in scs_check_usage()
139 curr = prev; in scs_check_usage()
/kernel/gcov/
Dfs.c567 char *curr; in add_node() local
577 for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { in add_node()
578 if (curr == next) in add_node()
581 if (strcmp(curr, ".") == 0) in add_node()
583 if (strcmp(curr, "..") == 0) { in add_node()
589 node = get_child_by_name(parent, curr); in add_node()
591 node = new_node(parent, NULL, curr); in add_node()
598 node = new_node(parent, info, curr); in add_node()
/kernel/time/
Dtimer_list.c78 struct timerqueue_node *curr; in print_active_timers() local
88 curr = timerqueue_getnext(&base->active); in print_active_timers()
93 while (curr && i < next) { in print_active_timers()
94 curr = timerqueue_iterate_next(curr); in print_active_timers()
98 if (curr) { in print_active_timers()
100 timer = container_of(curr, struct hrtimer, node); in print_active_timers()
/kernel/power/
Dprocess.c201 struct task_struct *curr = current; in thaw_processes() local
221 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); in thaw_processes()
226 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); in thaw_processes()
227 curr->flags &= ~PF_SUSPEND_TASK; in thaw_processes()
/kernel/futex/
Dcore.c312 static void compat_exit_robust_list(struct task_struct *curr);
314 static inline void compat_exit_robust_list(struct task_struct *curr) { } in compat_exit_robust_list() argument
852 static void exit_pi_state_list(struct task_struct *curr) in exit_pi_state_list() argument
854 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list()
866 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
884 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
886 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
889 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
893 raw_spin_lock(&curr->pi_lock); in exit_pi_state_list()
906 WARN_ON(pi_state->owner != curr); in exit_pi_state_list()
[all …]
/kernel/events/
Duprobes.c970 struct map_info *curr = NULL; in build_map_info() local
1001 info->next = curr; in build_map_info()
1002 curr = info; in build_map_info()
1012 prev = curr; in build_map_info()
1013 while (curr) { in build_map_info()
1014 mmput(curr->mm); in build_map_info()
1015 curr = curr->next; in build_map_info()
1021 curr = ERR_PTR(-ENOMEM); in build_map_info()
1032 return curr; in build_map_info()
/kernel/trace/
Dtrace_sched_wakeup.c407 struct task_struct *curr, in tracing_sched_wakeup_trace() argument
420 entry->prev_pid = curr->pid; in tracing_sched_wakeup_trace()
421 entry->prev_prio = curr->prio; in tracing_sched_wakeup_trace()
422 entry->prev_state = task_state_index(curr); in tracing_sched_wakeup_trace()
Dtrace_functions_graph.c421 struct ftrace_graph_ent_entry *curr) in get_return_for_leaf() argument
433 curr = &data->ent; in get_return_for_leaf()
463 data->ent = *curr; in get_return_for_leaf()
479 if (curr->ent.pid != next->ent.pid || in get_return_for_leaf()
480 curr->graph_ent.func != next->ret.func) in get_return_for_leaf()
/kernel/bpf/
Dcore.c348 s32 end_new, s32 curr, const bool probe_pass) in bpf_adj_delta_to_imm() argument
354 if (curr < pos && curr + imm + 1 >= end_old) in bpf_adj_delta_to_imm()
356 else if (curr >= end_new && curr + imm + 1 < end_new) in bpf_adj_delta_to_imm()
366 s32 end_new, s32 curr, const bool probe_pass) in bpf_adj_delta_to_off() argument
372 if (curr < pos && curr + off + 1 >= end_old) in bpf_adj_delta_to_off()
374 else if (curr >= end_new && curr + off + 1 < end_new) in bpf_adj_delta_to_off()

12