Home
last modified time | relevance | path

Searched refs:curr (Results 1 – 25 of 29) sorted by relevance

12

/kernel/locking/
Dlockdep.c1890 struct task_struct *curr = current; in print_circular_bug_header() local
1901 curr->comm, task_pid_nr(curr)); in print_circular_bug_header()
1980 struct task_struct *curr = current; in print_circular_bug() local
2008 lockdep_print_held_locks(curr); in print_circular_bug()
2500 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument
2521 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency()
2523 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency()
2525 curr->softirqs_enabled); in print_bad_irq_dependency()
2554 lockdep_print_held_locks(curr); in print_bad_irq_dependency()
2729 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, in check_irq_usage() argument
[all …]
Dosq_lock.c47 int curr = encode_cpu(smp_processor_id()); in osq_wait_next() local
58 if (atomic_read(&lock->tail) == curr && in osq_wait_next()
59 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { in osq_wait_next()
94 int curr = encode_cpu(smp_processor_id()); in osq_lock() local
99 node->cpu = curr; in osq_lock()
107 old = atomic_xchg(&lock->tail, curr); in osq_lock()
210 int curr = encode_cpu(smp_processor_id()); in osq_unlock() local
215 if (likely(atomic_cmpxchg_release(&lock->tail, curr, in osq_unlock()
216 OSQ_UNLOCKED_VAL) == curr)) in osq_unlock()
Dmutex.c102 unsigned long owner, curr = (unsigned long)current; in __mutex_trylock_common() local
111 if (task != curr) in __mutex_trylock_common()
123 task = curr; in __mutex_trylock_common()
127 if (task == curr) in __mutex_trylock_common()
165 unsigned long curr = (unsigned long)current; in __mutex_trylock_fast() local
168 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) { in __mutex_trylock_fast()
178 unsigned long curr = (unsigned long)current; in __mutex_unlock_fast() local
180 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); in __mutex_unlock_fast()
/kernel/sched/
Dstop_task.c74 struct task_struct *curr = rq->curr; in put_prev_task_stop() local
77 delta_exec = rq_clock_task(rq) - curr->se.exec_start; in put_prev_task_stop()
81 schedstat_set(curr->se.statistics.exec_max, in put_prev_task_stop()
82 max(curr->se.statistics.exec_max, delta_exec)); in put_prev_task_stop()
84 curr->se.sum_exec_runtime += delta_exec; in put_prev_task_stop()
85 account_group_exec_runtime(curr, delta_exec); in put_prev_task_stop()
87 curr->se.exec_start = rq_clock_task(rq); in put_prev_task_stop()
88 cgroup_account_cputime(curr, delta_exec); in put_prev_task_stop()
99 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) in task_tick_stop() argument
Dswait.c24 struct swait_queue *curr; in swake_up_locked() local
29 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); in swake_up_locked()
30 wake_up_process(curr->task); in swake_up_locked()
31 list_del_init(&curr->task_list); in swake_up_locked()
64 struct swait_queue *curr; in swake_up_all() local
70 curr = list_first_entry(&tmp, typeof(*curr), task_list); in swake_up_all()
72 wake_up_state(curr->task, TASK_NORMAL); in swake_up_all()
73 list_del_init(&curr->task_list); in swake_up_all()
Drt.c99 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_rt_rq()
175 rt_rq->highest_prio.curr = MAX_RT_PRIO-1; in init_tg_rt_entry()
281 return rq->online && rq->rt.highest_prio.curr > prev->prio; in need_pull_rt_task()
543 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; in sched_rt_rq_enqueue() local
557 if (rt_rq->highest_prio.curr < curr->prio) in sched_rt_rq_enqueue()
927 if (rt_rq->rt_nr_running && rq->curr == rq->idle) in do_sched_rt_period_timer()
958 return rt_rq->highest_prio.curr; in rt_se_prio()
1020 struct task_struct *curr = rq->curr; in update_curr_rt() local
1021 struct sched_rt_entity *rt_se = &curr->rt; in update_curr_rt()
1025 if (curr->sched_class != &rt_sched_class) in update_curr_rt()
[all …]
Ddeadline.c443 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; in init_dl_rq()
1116 if (dl_task(rq->curr)) in dl_task_timer()
1245 struct task_struct *curr = rq->curr; in update_curr_dl() local
1246 struct sched_dl_entity *dl_se = &curr->dl; in update_curr_dl()
1251 if (!dl_task(curr) || !on_dl_rq(dl_se)) in update_curr_dl()
1263 delta_exec = now - curr->se.exec_start; in update_curr_dl()
1270 schedstat_set(curr->se.statistics.exec_max, in update_curr_dl()
1271 max(curr->se.statistics.exec_max, delta_exec)); in update_curr_dl()
1273 curr->se.sum_exec_runtime += delta_exec; in update_curr_dl()
1274 account_group_exec_runtime(curr, delta_exec); in update_curr_dl()
[all …]
Dcputime.c53 void irqtime_account_irq(struct task_struct *curr, unsigned int offset) in irqtime_account_irq() argument
78 } else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd()) { in irqtime_account_irq()
83 trace_android_rvh_account_irq(curr, cpu, delta); in irqtime_account_irq()
86 trace_android_rvh_account_irq_start(curr, cpu, delta); in irqtime_account_irq()
88 trace_android_rvh_account_irq_end(curr, cpu, delta); in irqtime_account_irq()
459 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument
462 *ut = curr->utime; in cputime_adjust()
463 *st = curr->stime; in cputime_adjust()
563 void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, in cputime_adjust() argument
571 rtime = curr->sum_exec_runtime; in cputime_adjust()
[all …]
Dfair.c546 struct sched_entity *curr = cfs_rq->curr; in update_min_vruntime() local
551 if (curr) { in update_min_vruntime()
552 if (curr->on_rq) in update_min_vruntime()
553 vruntime = curr->vruntime; in update_min_vruntime()
555 curr = NULL; in update_min_vruntime()
561 if (!curr) in update_min_vruntime()
836 struct sched_entity *curr = cfs_rq->curr; in update_curr() local
840 if (unlikely(!curr)) in update_curr()
843 delta_exec = now - curr->exec_start; in update_curr()
847 curr->exec_start = now; in update_curr()
[all …]
Dwait.c86 wait_queue_entry_t *curr, *next; in __wake_up_common() local
92 curr = list_next_entry(bookmark, entry); in __wake_up_common()
97 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common()
99 if (&curr->entry == &wq_head->head) in __wake_up_common()
102 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common()
103 unsigned flags = curr->flags; in __wake_up_common()
109 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
Dmembarrier.c289 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_global_expedited()
364 p = rcu_dereference(cpu_rq(cpu_id)->curr); in membarrier_private_expedited()
377 p = rcu_dereference(cpu_rq(cpu)->curr); in membarrier_private_expedited()
477 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
Dcore.c731 rq->curr->sched_class->task_tick(rq, rq->curr, 1); in hrtick()
987 struct task_struct *curr = rq->curr; in resched_curr() local
992 if (test_tsk_need_resched(curr)) in resched_curr()
998 set_tsk_need_resched(curr); in resched_curr()
1003 if (set_nr_and_not_polling(curr)) in resched_curr()
2141 if (p->sched_class == rq->curr->sched_class) in check_preempt_curr()
2142 rq->curr->sched_class->check_preempt_curr(rq, p, flags); in check_preempt_curr()
2143 else if (p->sched_class > rq->curr->sched_class) in check_preempt_curr()
2150 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) in check_preempt_curr()
3810 if (!is_idle_task(rcu_dereference(rq->curr))) in wake_up_if_idle()
[all …]
Dpelt.c314 cfs_rq->curr == se)) { in __update_load_avg_se()
330 cfs_rq->curr != NULL)) { in __update_load_avg_cfs_rq()
Dsched.h562 struct sched_entity *curr; member
653 int curr; /* highest queued rt task prio */ member
702 u64 curr; member
994 struct task_struct __rcu *curr; member
1183 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
2074 return rq->curr == p; in task_current()
2235 WARN_ON_ONCE(rq->curr != prev); in put_prev_task()
2315 struct task_struct *p = rq->curr; in get_push_task()
Dpelt.h84 if (unlikely(is_idle_task(rq->curr))) { in update_rq_clock_pelt()
Dcpupri.c79 struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr); in drop_nopreempt_cpus()
/kernel/
Dscs.c123 unsigned long *p, prev, curr = highest, used = 0; in scs_check_usage() local
134 while (used > curr) { in scs_check_usage()
135 prev = cmpxchg_relaxed(&highest, curr, used); in scs_check_usage()
137 if (prev == curr) { in scs_check_usage()
143 curr = prev; in scs_check_usage()
/kernel/time/
Dtimer_list.c65 struct timerqueue_node *curr; in print_active_timers() local
75 curr = timerqueue_getnext(&base->active); in print_active_timers()
80 while (curr && i < next) { in print_active_timers()
81 curr = timerqueue_iterate_next(curr); in print_active_timers()
85 if (curr) { in print_active_timers()
87 timer = container_of(curr, struct hrtimer, node); in print_active_timers()
/kernel/gcov/
Dfs.c677 char *curr; in add_node() local
687 for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { in add_node()
688 if (curr == next) in add_node()
691 if (strcmp(curr, ".") == 0) in add_node()
693 if (strcmp(curr, "..") == 0) { in add_node()
699 node = get_child_by_name(parent, curr); in add_node()
701 node = new_node(parent, NULL, curr); in add_node()
708 node = new_node(parent, info, curr); in add_node()
/kernel/power/
Dprocess.c203 struct task_struct *curr = current; in thaw_processes() local
223 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); in thaw_processes()
228 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); in thaw_processes()
229 curr->flags &= ~PF_SUSPEND_TASK; in thaw_processes()
/kernel/futex/
Dcore.c370 static void compat_exit_robust_list(struct task_struct *curr);
908 static void exit_pi_state_list(struct task_struct *curr) in exit_pi_state_list() argument
910 struct list_head *next, *head = &curr->pi_state_list; in exit_pi_state_list()
922 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
940 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
942 raw_spin_lock_irq(&curr->pi_lock); in exit_pi_state_list()
945 raw_spin_unlock_irq(&curr->pi_lock); in exit_pi_state_list()
949 raw_spin_lock(&curr->pi_lock); in exit_pi_state_list()
962 WARN_ON(pi_state->owner != curr); in exit_pi_state_list()
967 raw_spin_unlock(&curr->pi_lock); in exit_pi_state_list()
[all …]
/kernel/trace/
Dtrace_sched_wakeup.c404 struct task_struct *curr, in tracing_sched_wakeup_trace() argument
417 entry->prev_pid = curr->pid; in tracing_sched_wakeup_trace()
418 entry->prev_prio = curr->prio; in tracing_sched_wakeup_trace()
419 entry->prev_state = task_state_index(curr); in tracing_sched_wakeup_trace()
Dtrace_functions_graph.c419 struct ftrace_graph_ent_entry *curr) in get_return_for_leaf() argument
431 curr = &data->ent; in get_return_for_leaf()
461 data->ent = *curr; in get_return_for_leaf()
477 if (curr->ent.pid != next->ent.pid || in get_return_for_leaf()
478 curr->graph_ent.func != next->ret.func) in get_return_for_leaf()
/kernel/events/
Duprobes.c969 struct map_info *curr = NULL; in build_map_info() local
1000 info->next = curr; in build_map_info()
1001 curr = info; in build_map_info()
1011 prev = curr; in build_map_info()
1012 while (curr) { in build_map_info()
1013 mmput(curr->mm); in build_map_info()
1014 curr = curr->next; in build_map_info()
1020 curr = ERR_PTR(-ENOMEM); in build_map_info()
1031 return curr; in build_map_info()
/kernel/bpf/
Dcore.c342 s32 end_new, s32 curr, const bool probe_pass) in bpf_adj_delta_to_imm() argument
348 if (curr < pos && curr + imm + 1 >= end_old) in bpf_adj_delta_to_imm()
350 else if (curr >= end_new && curr + imm + 1 < end_new) in bpf_adj_delta_to_imm()
360 s32 end_new, s32 curr, const bool probe_pass) in bpf_adj_delta_to_off() argument
366 if (curr < pos && curr + off + 1 >= end_old) in bpf_adj_delta_to_off()
368 else if (curr >= end_new && curr + off + 1 < end_new) in bpf_adj_delta_to_off()

12