/kernel/sched/ |
D | sched.h | 84 struct rq; 96 extern void calc_global_load_tick(struct rq *this_rq); 97 extern long calc_load_fold_active(struct rq *this_rq, long adjust); 556 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member 625 struct rq *rq; member 801 extern void rq_attach_root(struct rq *rq, struct root_domain *rd); 861 struct rq { struct 1021 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() argument 1023 return cfs_rq->rq; in rq_of() 1028 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() [all …]
|
D | deadline.c | 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq() 30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq() 36 struct rq *rq = task_rq(p); in dl_rq_of_se() local 38 return &rq->dl; in dl_rq_of_se() 158 struct rq *rq; in dl_change_utilization() local 165 rq = task_rq(p); in dl_change_utilization() 167 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization() 179 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization() 180 __add_rq_bw(new_bw, &rq->dl); in dl_change_utilization() 242 struct rq *rq = rq_of_dl_rq(dl_rq); in task_non_contending() local [all …]
|
D | stats.h | 9 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument 11 if (rq) { in rq_sched_info_arrive() 12 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive() 13 rq->rq_sched_info.pcount++; in rq_sched_info_arrive() 21 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument 23 if (rq) in rq_sched_info_depart() 24 rq->rq_cpu_time += delta; in rq_sched_info_depart() 28 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument 30 if (rq) in rq_sched_info_dequeued() 31 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued() [all …]
|
D | pelt.h | 7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); 8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); 11 int update_irq_load_avg(struct rq *rq, u64 running); 14 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument 58 static inline void update_rq_clock_pelt(struct rq *rq, s64 delta) in update_rq_clock_pelt() argument 60 if (unlikely(is_idle_task(rq->curr))) { in update_rq_clock_pelt() 62 rq->clock_pelt = rq_clock_task(rq); in update_rq_clock_pelt() 82 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq))); in update_rq_clock_pelt() 83 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq))); in update_rq_clock_pelt() 85 rq->clock_pelt += delta; in update_rq_clock_pelt() [all …]
|
D | rt.c | 130 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() 132 return rt_rq->rq; in rq_of_rt_rq() 140 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se() 144 return rt_rq->rq; in rq_of_rt_se() 169 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local 173 rt_rq->rq = rq; in init_tg_rt_entry() 183 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry() 241 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() 243 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq() 246 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se() [all …]
|
D | core.c | 44 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 86 struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) in __task_rq_lock() 87 __acquires(rq->lock) in __task_rq_lock() 89 struct rq *rq; in __task_rq_lock() local 94 rq = task_rq(p); in __task_rq_lock() 95 raw_spin_lock(&rq->lock); in __task_rq_lock() 96 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { in __task_rq_lock() 97 rq_pin_lock(rq, rf); in __task_rq_lock() 98 return rq; in __task_rq_lock() 100 raw_spin_unlock(&rq->lock); in __task_rq_lock() [all …]
|
D | stop_task.c | 20 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument 22 return sched_stop_runnable(rq); in balance_stop() 27 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument 32 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument 34 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop() 38 pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_stop() argument 42 if (!sched_stop_runnable(rq)) in pick_next_task_stop() 45 set_next_task_stop(rq, rq->stop, true); in pick_next_task_stop() 46 return rq->stop; in pick_next_task_stop() 50 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument [all …]
|
D | idle.c | 371 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_idle() argument 380 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_idle() argument 382 resched_curr(rq); in check_preempt_curr_idle() 385 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) in put_prev_task_idle() argument 389 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) in set_next_task_idle() argument 391 update_idle_core(rq); in set_next_task_idle() 392 schedstat_inc(rq->sched_goidle); in set_next_task_idle() 396 pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in pick_next_task_idle() argument 398 struct task_struct *next = rq->idle; in pick_next_task_idle() 401 put_prev_task(rq, prev); in pick_next_task_idle() [all …]
|
D | fair.c | 294 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq() local 295 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq() 298 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq() 326 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq() 336 &rq->leaf_cfs_rq_list); in list_add_leaf_cfs_rq() 341 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq() 351 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq() 356 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq() 363 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq() local 372 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq() [all …]
|
D | pelt.c | 317 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument 319 if (___update_load_sum(now, &rq->avg_rt, in update_rt_rq_load_avg() 324 ___update_load_avg(&rq->avg_rt, 1, 1); in update_rt_rq_load_avg() 325 trace_pelt_rt_tp(rq); in update_rt_rq_load_avg() 341 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument 343 if (___update_load_sum(now, &rq->avg_dl, in update_dl_rq_load_avg() 348 ___update_load_avg(&rq->avg_dl, 1, 1); in update_dl_rq_load_avg() 349 trace_pelt_dl_tp(rq); in update_dl_rq_load_avg() 366 int update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument 375 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq))); in update_irq_load_avg() [all …]
|
D | stats.c | 23 struct rq *rq; in show_schedstat() local 29 rq = cpu_rq(cpu); in show_schedstat() 34 cpu, rq->yld_count, in show_schedstat() 35 rq->sched_count, rq->sched_goidle, in show_schedstat() 36 rq->ttwu_count, rq->ttwu_local, in show_schedstat() 37 rq->rq_cpu_time, in show_schedstat() 38 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); in show_schedstat()
|
D | loadavg.c | 80 long calc_load_fold_active(struct rq *this_rq, long adjust) in calc_load_fold_active() 235 static void calc_load_nohz_fold(struct rq *rq) in calc_load_nohz_fold() argument 239 delta = calc_load_fold_active(rq, 0); in calc_load_nohz_fold() 260 void calc_load_nohz_remote(struct rq *rq) in calc_load_nohz_remote() argument 262 calc_load_nohz_fold(rq); in calc_load_nohz_remote() 267 struct rq *this_rq = this_rq(); in calc_load_nohz_stop() 387 void calc_global_load_tick(struct rq *this_rq) in calc_global_load_tick()
|
D | cputime.c | 228 struct rq *rq = this_rq(); in account_idle_time() local 230 if (atomic_read(&rq->nr_iowait) > 0) in account_idle_time() 286 struct rq *rq; in read_sum_exec_runtime() local 288 rq = task_rq_lock(t, &rf); in read_sum_exec_runtime() 290 task_rq_unlock(rq, t, &rf); in read_sum_exec_runtime() 365 struct rq *rq, int ticks) in irqtime_account_process_tick() argument 391 } else if (p == rq->idle) { in irqtime_account_process_tick() 402 struct rq *rq = this_rq(); in irqtime_account_idle_ticks() local 404 irqtime_account_process_tick(current, 0, rq, ticks); in irqtime_account_idle_ticks() 409 struct rq *rq, int nr_ticks) { } in irqtime_account_process_tick() argument [all …]
|
D | debug.c | 453 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) in print_task() argument 455 if (rq->curr == p) in print_task() 481 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) in print_rq() argument 497 print_task(m, rq, p); in print_rq() 506 struct rq *rq = cpu_rq(cpu); in print_cfs_rq() local 520 raw_spin_lock_irqsave(&rq->lock, flags); in print_cfs_rq() 528 raw_spin_unlock_irqrestore(&rq->lock, flags); in print_cfs_rq() 635 struct rq *rq = cpu_rq(cpu); in print_cpu() local 650 if (sizeof(rq->x) == 4) \ in print_cpu() 651 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ in print_cpu() [all …]
|
D | cpufreq_schedutil.c | 217 struct rq *rq = cpu_rq(cpu); in schedutil_cpu_util() local 220 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) { in schedutil_cpu_util() 229 irq = cpu_util_irq(rq); in schedutil_cpu_util() 245 util = util_cfs + cpu_util_rt(rq); in schedutil_cpu_util() 247 util = uclamp_rq_util_with(rq, util, p); in schedutil_cpu_util() 249 dl_util = cpu_util_dl(rq); in schedutil_cpu_util() 293 util += cpu_bw_dl(rq); in schedutil_cpu_util() 300 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util() local 301 unsigned long util = cpu_util_cfs(rq); in sugov_get_util() 305 sg_cpu->bw_dl = cpu_bw_dl(rq); in sugov_get_util()
|
D | psi.c | 816 struct rq *rq; in psi_memstall_enter() local 829 rq = this_rq_lock_irq(&rf); in psi_memstall_enter() 834 rq_unlock_irq(rq, &rf); in psi_memstall_enter() 846 struct rq *rq; in psi_memstall_leave() local 858 rq = this_rq_lock_irq(&rf); in psi_memstall_leave() 863 rq_unlock_irq(rq, &rf); in psi_memstall_leave() 906 struct rq *rq; in cgroup_move_task() local 917 rq = task_rq_lock(task, &rf); in cgroup_move_task() 936 task_rq_unlock(rq, task, &rf); in cgroup_move_task()
|
D | topology.c | 440 void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument 445 raw_spin_lock_irqsave(&rq->lock, flags); in rq_attach_root() 447 if (rq->rd) { in rq_attach_root() 448 old_rd = rq->rd; in rq_attach_root() 450 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root() 451 set_rq_offline(rq); in rq_attach_root() 453 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root() 465 rq->rd = rd; in rq_attach_root() 467 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root() 468 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root() [all …]
|
D | membarrier.c | 267 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state() local 270 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
|
/kernel/trace/ |
D | blktrace.c | 811 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq) in blk_trace_request_get_cgid() argument 813 if (!rq->bio) in blk_trace_request_get_cgid() 816 return blk_trace_bio_get_cgid(q, rq->bio); in blk_trace_request_get_cgid() 835 static void blk_add_trace_rq(struct request *rq, int error, in blk_add_trace_rq() argument 842 bt = rcu_dereference(rq->q->blk_trace); in blk_add_trace_rq() 848 if (blk_rq_is_passthrough(rq)) in blk_add_trace_rq() 853 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq), in blk_add_trace_rq() 854 rq->cmd_flags, what, error, 0, NULL, cgid); in blk_add_trace_rq() 859 struct request_queue *q, struct request *rq) in blk_add_trace_rq_insert() argument 861 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, in blk_add_trace_rq_insert() [all …]
|
/kernel/livepatch/ |
D | transition.c | 284 struct rq *rq; in klp_try_switch_task() local 307 rq = task_rq_lock(task, &flags); in klp_try_switch_task() 309 if (task_running(rq, task) && task != current) { in klp_try_switch_task() 326 task_rq_unlock(rq, task, &flags); in klp_try_switch_task()
|