Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 20 of 20) sorted by relevance

/kernel/sched/
Dpelt.h7 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
8 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
11 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
13 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
15 return READ_ONCE(rq->avg_thermal.load_avg); in thermal_load_avg()
19 update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg() argument
24 static inline u64 thermal_load_avg(struct rq *rq) in thermal_load_avg() argument
31 int update_irq_load_avg(struct rq *rq, u64 running);
34 update_irq_load_avg(struct rq *rq, u64 running) in update_irq_load_avg() argument
64 static inline u64 rq_clock_task_mult(struct rq *rq) in rq_clock_task_mult() argument
[all …]
Dsched.h103 struct rq;
117 extern void calc_global_load_tick(struct rq *this_rq);
118 extern long calc_load_fold_active(struct rq *this_rq, long adjust);
120 extern void call_trace_sched_update_nr_running(struct rq *rq, int count);
628 struct rq *rq; /* CPU runqueue to which this cfs_rq is attached */ member
704 struct rq *rq; member
906 extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
913 extern struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu);
960 struct rq;
963 void (*func)(struct rq *rq);
[all …]
Ddeadline.c62 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
64 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
70 struct rq *rq = task_rq(p); in dl_rq_of_se() local
72 return &rq->dl; in dl_rq_of_se()
177 struct rq *rq = cpu_rq(i); in __dl_update() local
179 rq->dl.extra_bw += bw; in __dl_update()
313 struct rq *rq; in dl_change_utilization() local
320 rq = task_rq(p); in dl_change_utilization()
322 sub_running_bw(&p->dl, &rq->dl); in dl_change_utilization()
334 __sub_rq_bw(p->dl.dl_bw, &rq->dl); in dl_change_utilization()
[all …]
Drt.c181 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
183 return rt_rq->rq; in rq_of_rt_rq()
191 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
195 return rt_rq->rq; in rq_of_rt_se()
224 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
228 rt_rq->rq = rq; in init_tg_rt_entry()
238 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
296 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
298 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
301 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
Dstats.h13 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
15 if (rq) { in rq_sched_info_arrive()
16 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
17 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
25 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
27 if (rq) in rq_sched_info_depart()
28 rq->rq_cpu_time += delta; in rq_sched_info_depart()
32 rq_sched_info_dequeue(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeue() argument
34 if (rq) in rq_sched_info_dequeue()
35 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeue()
[all …]
Dcore.c127 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
248 void sched_core_enqueue(struct rq *rq, struct task_struct *p) in sched_core_enqueue() argument
250 rq->core->core_task_seq++; in sched_core_enqueue()
255 rb_add(&p->core_node, &rq->core_tree, rb_sched_core_less); in sched_core_enqueue()
258 void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) in sched_core_dequeue() argument
260 rq->core->core_task_seq++; in sched_core_dequeue()
263 rb_erase(&p->core_node, &rq->core_tree); in sched_core_dequeue()
272 if (!(flags & DEQUEUE_SAVE) && rq->nr_running == 1 && in sched_core_dequeue()
273 rq->core->core_forceidle_count && rq->curr == rq->idle) in sched_core_dequeue()
274 resched_curr(rq); in sched_core_dequeue()
[all …]
Dstop_task.c19 balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_stop() argument
21 return sched_stop_runnable(rq); in balance_stop()
26 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
31 static void set_next_task_stop(struct rq *rq, struct task_struct *stop, bool first) in set_next_task_stop() argument
33 stop->se.exec_start = rq_clock_task(rq); in set_next_task_stop()
36 static struct task_struct *pick_task_stop(struct rq *rq) in pick_task_stop() argument
38 if (!sched_stop_runnable(rq)) in pick_task_stop()
41 return rq->stop; in pick_task_stop()
44 static struct task_struct *pick_next_task_stop(struct rq *rq) in pick_next_task_stop() argument
46 struct task_struct *p = pick_task_stop(rq); in pick_next_task_stop()
[all …]
Dcore_sched.c58 struct rq *rq; in sched_core_update_cookie() local
60 rq = task_rq_lock(p, &rf); in sched_core_update_cookie()
68 SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq)); in sched_core_update_cookie()
71 sched_core_dequeue(rq, p, DEQUEUE_SAVE); in sched_core_update_cookie()
80 sched_core_enqueue(rq, p); in sched_core_update_cookie()
91 if (task_on_cpu(rq, p)) in sched_core_update_cookie()
92 resched_curr(rq); in sched_core_update_cookie()
94 task_rq_unlock(rq, p, &rf); in sched_core_update_cookie()
240 void __sched_core_account_forceidle(struct rq *rq) in __sched_core_account_forceidle() argument
242 const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); in __sched_core_account_forceidle()
[all …]
Didle.c416 balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) in balance_idle() argument
425 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_idle() argument
427 resched_curr(rq); in check_preempt_curr_idle()
430 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) in put_prev_task_idle() argument
434 static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) in set_next_task_idle() argument
436 update_idle_core(rq); in set_next_task_idle()
437 schedstat_inc(rq->sched_goidle); in set_next_task_idle()
441 static struct task_struct *pick_task_idle(struct rq *rq) in pick_task_idle() argument
443 return rq->idle; in pick_task_idle()
447 struct task_struct *pick_next_task_idle(struct rq *rq) in pick_next_task_idle() argument
[all …]
Dfair.c357 struct rq *rq = rq_of(cfs_rq); in list_add_leaf_cfs_rq() local
358 int cpu = cpu_of(rq); in list_add_leaf_cfs_rq()
361 return rq->tmp_alone_branch == &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
389 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
399 &rq->leaf_cfs_rq_list); in list_add_leaf_cfs_rq()
404 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
414 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch); in list_add_leaf_cfs_rq()
419 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list; in list_add_leaf_cfs_rq()
426 struct rq *rq = rq_of(cfs_rq); in list_del_leaf_cfs_rq() local
435 if (rq->tmp_alone_branch == &cfs_rq->leaf_cfs_rq_list) in list_del_leaf_cfs_rq()
[all …]
Dpelt.c353 int update_rt_rq_load_avg(u64 now, struct rq *rq, int running) in update_rt_rq_load_avg() argument
355 if (___update_load_sum(now, &rq->avg_rt, in update_rt_rq_load_avg()
360 ___update_load_avg(&rq->avg_rt, 1); in update_rt_rq_load_avg()
361 trace_pelt_rt_tp(rq); in update_rt_rq_load_avg()
379 int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) in update_dl_rq_load_avg() argument
381 if (___update_load_sum(now, &rq->avg_dl, in update_dl_rq_load_avg()
386 ___update_load_avg(&rq->avg_dl, 1); in update_dl_rq_load_avg()
387 trace_pelt_dl_tp(rq); in update_dl_rq_load_avg()
410 int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) in update_thermal_load_avg() argument
412 if (___update_load_sum(now, &rq->avg_thermal, in update_thermal_load_avg()
[all …]
Dstats.c6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p, in __update_stats_wait_start() argument
11 wait_start = rq_clock(rq); in __update_stats_wait_start()
20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p, in __update_stats_wait_end() argument
23 u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start); in __update_stats_wait_end()
47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, in __update_stats_enqueue_sleeper() argument
56 u64 delta = rq_clock(rq) - sleep_start; in __update_stats_enqueue_sleeper()
74 u64 delta = rq_clock(rq) - block_start; in __update_stats_enqueue_sleeper()
126 struct rq *rq; in show_schedstat() local
132 rq = cpu_rq(cpu); in show_schedstat()
137 cpu, rq->yld_count, in show_schedstat()
[all …]
Dloadavg.c78 long calc_load_fold_active(struct rq *this_rq, long adjust) in calc_load_fold_active()
233 static void calc_load_nohz_fold(struct rq *rq) in calc_load_nohz_fold() argument
237 delta = calc_load_fold_active(rq, 0); in calc_load_nohz_fold()
258 void calc_load_nohz_remote(struct rq *rq) in calc_load_nohz_remote() argument
260 calc_load_nohz_fold(rq); in calc_load_nohz_remote()
265 struct rq *this_rq = this_rq(); in calc_load_nohz_stop()
385 void calc_global_load_tick(struct rq *this_rq) in calc_global_load_tick()
Ddebug.c533 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) in print_task() argument
535 if (task_current(rq, p)) in print_task()
562 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) in print_rq() argument
578 print_task(m, rq, p); in print_rq()
587 struct rq *rq = cpu_rq(cpu); in print_cfs_rq() local
601 raw_spin_rq_lock_irqsave(rq, flags); in print_cfs_rq()
609 raw_spin_rq_unlock_irqrestore(rq, flags); in print_cfs_rq()
720 struct rq *rq = cpu_rq(cpu); in print_cpu() local
735 if (sizeof(rq->x) == 4) \ in print_cpu()
736 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ in print_cpu()
[all …]
Dmembarrier.c237 struct rq *rq = this_rq(); in membarrier_update_current_mm() local
242 if (READ_ONCE(rq->membarrier_state) == membarrier_state) in membarrier_update_current_mm()
244 WRITE_ONCE(rq->membarrier_state, membarrier_state); in membarrier_update_current_mm()
471 struct rq *rq = cpu_rq(cpu); in sync_runqueues_membarrier_state() local
474 p = rcu_dereference(rq->curr); in sync_runqueues_membarrier_state()
Dcputime.c236 struct rq *rq = this_rq(); in account_idle_time() local
238 if (atomic_read(&rq->nr_iowait) > 0) in account_idle_time()
309 struct rq *rq; in read_sum_exec_runtime() local
311 rq = task_rq_lock(t, &rf); in read_sum_exec_runtime()
313 task_rq_unlock(rq, t, &rf); in read_sum_exec_runtime()
1006 struct rq *rq; in kcpustat_field() local
1012 rq = cpu_rq(cpu); in kcpustat_field()
1018 curr = rcu_dereference(rq->curr); in kcpustat_field()
1091 struct rq *rq; in kcpustat_cpu_fetch() local
1099 rq = cpu_rq(cpu); in kcpustat_cpu_fetch()
[all …]
Dtopology.c478 void rq_attach_root(struct rq *rq, struct root_domain *rd) in rq_attach_root() argument
483 raw_spin_rq_lock_irqsave(rq, flags); in rq_attach_root()
485 if (rq->rd) { in rq_attach_root()
486 old_rd = rq->rd; in rq_attach_root()
488 if (cpumask_test_cpu(rq->cpu, old_rd->online)) in rq_attach_root()
489 set_rq_offline(rq); in rq_attach_root()
491 cpumask_clear_cpu(rq->cpu, old_rd->span); in rq_attach_root()
503 rq->rd = rd; in rq_attach_root()
505 cpumask_set_cpu(rq->cpu, rd->span); in rq_attach_root()
506 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) in rq_attach_root()
[all …]
Dpsi.c1000 struct rq *rq; in psi_memstall_enter() local
1013 rq = this_rq_lock_irq(&rf); in psi_memstall_enter()
1018 rq_unlock_irq(rq, &rf); in psi_memstall_enter()
1031 struct rq *rq; in psi_memstall_leave() local
1043 rq = this_rq_lock_irq(&rf); in psi_memstall_leave()
1048 rq_unlock_irq(rq, &rf); in psi_memstall_leave()
1100 struct rq *rq; in cgroup_move_task() local
1111 rq = task_rq_lock(task, &rf); in cgroup_move_task()
1148 task_rq_unlock(rq, task, &rf); in cgroup_move_task()
1174 struct rq *rq = cpu_rq(cpu); in psi_cgroup_restart() local
[all …]
Dcpufreq_schedutil.c167 struct rq *rq = cpu_rq(sg_cpu->cpu); in sugov_get_util() local
170 sg_cpu->bw_dl = cpu_bw_dl(rq); in sugov_get_util()
/kernel/trace/
Dblktrace.c807 blk_trace_request_get_cgid(struct request *rq) in blk_trace_request_get_cgid() argument
809 if (!rq->bio) in blk_trace_request_get_cgid()
812 return blk_trace_bio_get_cgid(rq->q, rq->bio); in blk_trace_request_get_cgid()
831 static void blk_add_trace_rq(struct request *rq, blk_status_t error, in blk_add_trace_rq() argument
837 bt = rcu_dereference(rq->q->blk_trace); in blk_add_trace_rq()
843 if (blk_rq_is_passthrough(rq)) in blk_add_trace_rq()
848 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, rq->cmd_flags, in blk_add_trace_rq()
853 static void blk_add_trace_rq_insert(void *ignore, struct request *rq) in blk_add_trace_rq_insert() argument
855 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT, in blk_add_trace_rq_insert()
856 blk_trace_request_get_cgid(rq)); in blk_add_trace_rq_insert()
[all …]