Home
last modified time | relevance | path

Searched refs:this_rq (Results 1 – 8 of 8) sorted by relevance

/kernel/sched/
Dloadavg.c80 long calc_load_fold_active(struct rq *this_rq, long adjust) in calc_load_fold_active() argument
84 nr_active = this_rq->nr_running - adjust; in calc_load_fold_active()
85 nr_active += (long)this_rq->nr_uninterruptible; in calc_load_fold_active()
87 if (nr_active != this_rq->calc_load_active) { in calc_load_fold_active()
88 delta = nr_active - this_rq->calc_load_active; in calc_load_fold_active()
89 this_rq->calc_load_active = nr_active; in calc_load_fold_active()
253 calc_load_nohz_fold(this_rq()); in calc_load_nohz_start()
267 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() local
272 this_rq->calc_load_update = READ_ONCE(calc_load_update); in calc_load_nohz_stop()
273 if (time_before(jiffies, this_rq->calc_load_update)) in calc_load_nohz_stop()
[all …]
Dsched.h96 extern void calc_global_load_tick(struct rq *this_rq);
97 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
1060 #define this_rq() this_cpu_ptr(&runqueues) macro
1272 rq = this_rq(); in this_rq_lock_irq()
1474 extern int newidle_balance(struct rq *this_rq, struct rq_flags *rf);
1480 static inline int newidle_balance(struct rq *this_rq, struct rq_flags *rf) { return 0; } in newidle_balance() argument
1757 void (*task_woken)(struct rq *this_rq, struct task_struct *task);
1775 void (*switched_from)(struct rq *this_rq, struct task_struct *task);
1776 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1777 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
[all …]
Drt.c270 static void pull_rt_task(struct rq *this_rq);
432 static inline void pull_rt_task(struct rq *this_rq) in pull_rt_task() argument
591 return this_rq()->rd->span; in sched_rt_period_mask()
2120 rq = this_rq(); in rto_push_irq_work_func()
2149 static void pull_rt_task(struct rq *this_rq) in pull_rt_task() argument
2151 int this_cpu = this_rq->cpu, cpu; in pull_rt_task()
2155 int rt_overload_count = rt_overloaded(this_rq); in pull_rt_task()
2168 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) in pull_rt_task()
2173 tell_cpu_to_push(this_rq); in pull_rt_task()
2178 for_each_cpu(cpu, this_rq->rd->rto_mask) { in pull_rt_task()
[all …]
Dfair.c6092 avg_idle = this_rq()->avg_idle / 512; in select_idle_cpu()
9138 static int load_balance(int this_cpu, struct rq *this_rq, in load_balance() argument
9152 .dst_rq = this_rq, in load_balance()
9844 SCHED_WARN_ON(rq != this_rq()); in nohz_balance_exit_idle()
9939 static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags, in _nohz_idle_balance() argument
9947 int this_cpu = this_rq->cpu; in _nohz_idle_balance()
10020 has_blocked_load |= this_rq->has_blocked_load; in _nohz_idle_balance()
10024 rebalance_domains(this_rq, CPU_IDLE); in _nohz_idle_balance()
10044 static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) in nohz_idle_balance() argument
10046 int this_cpu = this_rq->cpu; in nohz_idle_balance()
[all …]
Dcputime.c228 struct rq *rq = this_rq(); in account_idle_time()
248 steal -= this_rq()->prev_steal_time; in steal_account_process_time()
251 this_rq()->prev_steal_time += steal; in steal_account_process_time()
402 struct rq *rq = this_rq(); in irqtime_account_idle_ticks()
486 struct rq *rq = this_rq(); in account_process_tick()
Ddeadline.c2158 static void pull_dl_task(struct rq *this_rq) in pull_dl_task() argument
2160 int this_cpu = this_rq->cpu, cpu; in pull_dl_task()
2166 if (likely(!dl_overloaded(this_rq))) in pull_dl_task()
2175 for_each_cpu(cpu, this_rq->rd->dlo_mask) { in pull_dl_task()
2185 if (this_rq->dl.dl_nr_running && in pull_dl_task()
2186 dl_time_before(this_rq->dl.earliest_dl.curr, in pull_dl_task()
2191 double_lock_balance(this_rq, src_rq); in pull_dl_task()
2208 (!this_rq->dl.dl_nr_running || in pull_dl_task()
2210 this_rq->dl.earliest_dl.curr))) { in pull_dl_task()
2226 activate_task(this_rq, p, 0); in pull_dl_task()
[all …]
Didle.c22 idle_set_state(this_rq(), idle_state); in sched_idle_set_state()
Dcore.c303 if (rq == this_rq()) { in hrtick_start()
1657 struct rq *rq = this_rq(); in migration_cpu_stop()
2298 rq = this_rq(); in ttwu_stat()
2411 struct rq *rq = this_rq(); in sched_ttwu_pending()
2437 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) in scheduler_ipi()
2460 this_rq()->idle_balance = 1; in scheduler_ipi()
3309 struct rq *rq = this_rq(); in finish_task_switch()
4025 schedstat_inc(this_rq()->sched_count); in schedule_debug()
5817 rq = this_rq(); in yield_to()
6299 BUG_ON(current != this_rq()->idle); in idle_task_exit()