• Home
  • Raw
  • Download

Lines Matching refs:rq

8 static inline int rt_overloaded(struct rq *rq)  in rt_overloaded()  argument
10 return atomic_read(&rq->rd->rto_count); in rt_overloaded()
13 static inline void rt_set_overload(struct rq *rq) in rt_set_overload() argument
15 if (!rq->online) in rt_set_overload()
18 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); in rt_set_overload()
27 atomic_inc(&rq->rd->rto_count); in rt_set_overload()
30 static inline void rt_clear_overload(struct rq *rq) in rt_clear_overload() argument
32 if (!rq->online) in rt_clear_overload()
36 atomic_dec(&rq->rd->rto_count); in rt_clear_overload()
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); in rt_clear_overload()
40 static void update_rt_migration(struct rq *rq) in update_rt_migration() argument
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { in update_rt_migration()
43 if (!rq->rt.overloaded) { in update_rt_migration()
44 rt_set_overload(rq); in update_rt_migration()
45 rq->rt.overloaded = 1; in update_rt_migration()
47 } else if (rq->rt.overloaded) { in update_rt_migration()
48 rt_clear_overload(rq); in update_rt_migration()
49 rq->rt.overloaded = 0; in update_rt_migration()
79 #define for_each_leaf_rt_rq(rt_rq, rq) \ argument
80 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
82 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
84 return rt_rq->rq; in rq_of_rt_rq()
176 #define for_each_leaf_rt_rq(rt_rq, rq) \ argument
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
179 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
181 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
187 struct rq *rq = task_rq(p); in rt_rq_of_se() local
189 return &rq->rt; in rt_rq_of_se()
292 static void __disable_runtime(struct rq *rq) in __disable_runtime() argument
294 struct root_domain *rd = rq->rd; in __disable_runtime()
300 for_each_leaf_rt_rq(rt_rq, rq) { in __disable_runtime()
369 static void disable_runtime(struct rq *rq) in disable_runtime() argument
373 spin_lock_irqsave(&rq->lock, flags); in disable_runtime()
374 __disable_runtime(rq); in disable_runtime()
375 spin_unlock_irqrestore(&rq->lock, flags); in disable_runtime()
378 static void __enable_runtime(struct rq *rq) in __enable_runtime() argument
388 for_each_leaf_rt_rq(rt_rq, rq) { in __enable_runtime()
401 static void enable_runtime(struct rq *rq) in enable_runtime() argument
405 spin_lock_irqsave(&rq->lock, flags); in enable_runtime()
406 __enable_runtime(rq); in enable_runtime()
407 spin_unlock_irqrestore(&rq->lock, flags); in enable_runtime()
441 struct rq *rq = rq_of_rt_rq(rt_rq); in do_sched_rt_period_timer() local
443 spin_lock(&rq->lock); in do_sched_rt_period_timer()
464 spin_unlock(&rq->lock); in do_sched_rt_period_timer()
512 static void update_curr_rt(struct rq *rq) in update_curr_rt() argument
514 struct task_struct *curr = rq->curr; in update_curr_rt()
522 delta_exec = rq->clock - curr->se.exec_start; in update_curr_rt()
531 curr->se.exec_start = rq->clock; in update_curr_rt()
558 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_tasks() local
563 if (rq->online) in inc_rt_tasks()
564 cpupri_set(&rq->rd->cpupri, rq->cpu, in inc_rt_tasks()
571 struct rq *rq = rq_of_rt_rq(rt_rq); in inc_rt_tasks() local
573 rq->rt.rt_nr_migratory++; in inc_rt_tasks()
615 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_tasks() local
616 rq->rt.rt_nr_migratory--; in dec_rt_tasks()
620 struct rq *rq = rq_of_rt_rq(rt_rq); in dec_rt_tasks() local
622 if (rq->online) in dec_rt_tasks()
623 cpupri_set(&rq->rd->cpupri, rq->cpu, in dec_rt_tasks()
712 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) in enqueue_task_rt() argument
721 inc_cpu_load(rq, p->se.load.weight); in enqueue_task_rt()
724 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) in dequeue_task_rt() argument
728 update_curr_rt(rq); in dequeue_task_rt()
731 dec_cpu_load(rq, p->se.load.weight); in dequeue_task_rt()
752 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) in requeue_task_rt() argument
763 static void yield_task_rt(struct rq *rq) in yield_task_rt() argument
765 requeue_task_rt(rq, rq->curr, 0); in yield_task_rt()
773 struct rq *rq = task_rq(p); in select_task_rq_rt() local
792 if (unlikely(rt_task(rq->curr)) && in select_task_rq_rt()
806 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) in check_preempt_equal_prio() argument
810 if (rq->curr->rt.nr_cpus_allowed == 1) in check_preempt_equal_prio()
817 && cpupri_find(&rq->rd->cpupri, p, mask)) in check_preempt_equal_prio()
820 if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) in check_preempt_equal_prio()
828 requeue_task_rt(rq, p, 1); in check_preempt_equal_prio()
829 resched_task(rq->curr); in check_preempt_equal_prio()
839 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync) in check_preempt_curr_rt() argument
841 if (p->prio < rq->curr->prio) { in check_preempt_curr_rt()
842 resched_task(rq->curr); in check_preempt_curr_rt()
859 if (p->prio == rq->curr->prio && !need_resched()) in check_preempt_curr_rt()
860 check_preempt_equal_prio(rq, p); in check_preempt_curr_rt()
864 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, in pick_next_rt_entity() argument
881 static struct task_struct *pick_next_task_rt(struct rq *rq) in pick_next_task_rt() argument
887 rt_rq = &rq->rt; in pick_next_task_rt()
896 rt_se = pick_next_rt_entity(rq, rt_rq); in pick_next_task_rt()
902 p->se.exec_start = rq->clock; in pick_next_task_rt()
906 static void put_prev_task_rt(struct rq *rq, struct task_struct *p) in put_prev_task_rt() argument
908 update_curr_rt(rq); in put_prev_task_rt()
917 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
919 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) in pick_rt_task() argument
921 if (!task_running(rq, p) && in pick_rt_task()
929 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) in pick_next_highest_task_rt() argument
937 for_each_leaf_rt_rq(rt_rq, rq) { in pick_next_highest_task_rt()
947 if (pick_rt_task(rq, p, cpu)) { in pick_next_highest_task_rt()
1040 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) in find_lock_lowest_rq() argument
1042 struct rq *lowest_rq = NULL; in find_lock_lowest_rq()
1049 if ((cpu == -1) || (cpu == rq->cpu)) in find_lock_lowest_rq()
1055 if (double_lock_balance(rq, lowest_rq)) { in find_lock_lowest_rq()
1062 if (unlikely(task_rq(task) != rq || in find_lock_lowest_rq()
1065 task_running(rq, task) || in find_lock_lowest_rq()
1079 double_unlock_balance(rq, lowest_rq); in find_lock_lowest_rq()
1091 static int push_rt_task(struct rq *rq) in push_rt_task() argument
1094 struct rq *lowest_rq; in push_rt_task()
1098 if (!rq->rt.overloaded) in push_rt_task()
1101 next_task = pick_next_highest_task_rt(rq, -1); in push_rt_task()
1106 if (unlikely(next_task == rq->curr)) { in push_rt_task()
1116 if (unlikely(next_task->prio < rq->curr->prio)) { in push_rt_task()
1117 resched_task(rq->curr); in push_rt_task()
1125 lowest_rq = find_lock_lowest_rq(next_task, rq); in push_rt_task()
1133 task = pick_next_highest_task_rt(rq, -1); in push_rt_task()
1142 deactivate_task(rq, next_task, 0); in push_rt_task()
1148 double_unlock_balance(rq, lowest_rq); in push_rt_task()
1167 static void push_rt_tasks(struct rq *rq) in push_rt_tasks() argument
1170 while (push_rt_task(rq)) in push_rt_tasks()
1174 static int pull_rt_task(struct rq *this_rq) in pull_rt_task()
1178 struct rq *src_rq; in pull_rt_task()
1260 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) in pre_schedule_rt() argument
1263 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) in pre_schedule_rt()
1264 pull_rt_task(rq); in pre_schedule_rt()
1267 static void post_schedule_rt(struct rq *rq) in post_schedule_rt() argument
1276 if (unlikely(rq->rt.overloaded)) { in post_schedule_rt()
1277 spin_lock_irq(&rq->lock); in post_schedule_rt()
1278 push_rt_tasks(rq); in post_schedule_rt()
1279 spin_unlock_irq(&rq->lock); in post_schedule_rt()
1287 static void task_wake_up_rt(struct rq *rq, struct task_struct *p) in task_wake_up_rt() argument
1289 if (!task_running(rq, p) && in task_wake_up_rt()
1290 !test_tsk_need_resched(rq->curr) && in task_wake_up_rt()
1291 rq->rt.overloaded) in task_wake_up_rt()
1292 push_rt_tasks(rq); in task_wake_up_rt()
1296 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, in load_balance_rt()
1306 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, in move_one_task_rt()
1325 struct rq *rq = task_rq(p); in set_cpus_allowed_rt() local
1328 rq->rt.rt_nr_migratory++; in set_cpus_allowed_rt()
1330 BUG_ON(!rq->rt.rt_nr_migratory); in set_cpus_allowed_rt()
1331 rq->rt.rt_nr_migratory--; in set_cpus_allowed_rt()
1334 update_rt_migration(rq); in set_cpus_allowed_rt()
1342 static void rq_online_rt(struct rq *rq) in rq_online_rt() argument
1344 if (rq->rt.overloaded) in rq_online_rt()
1345 rt_set_overload(rq); in rq_online_rt()
1347 __enable_runtime(rq); in rq_online_rt()
1349 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); in rq_online_rt()
1353 static void rq_offline_rt(struct rq *rq) in rq_offline_rt() argument
1355 if (rq->rt.overloaded) in rq_offline_rt()
1356 rt_clear_overload(rq); in rq_offline_rt()
1358 __disable_runtime(rq); in rq_offline_rt()
1360 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); in rq_offline_rt()
1367 static void switched_from_rt(struct rq *rq, struct task_struct *p, in switched_from_rt() argument
1377 if (!rq->rt.rt_nr_running) in switched_from_rt()
1378 pull_rt_task(rq); in switched_from_rt()
1396 static void switched_to_rt(struct rq *rq, struct task_struct *p, in switched_to_rt() argument
1410 if (rq->rt.overloaded && push_rt_task(rq) && in switched_to_rt()
1412 rq != task_rq(p)) in switched_to_rt()
1415 if (check_resched && p->prio < rq->curr->prio) in switched_to_rt()
1416 resched_task(rq->curr); in switched_to_rt()
1424 static void prio_changed_rt(struct rq *rq, struct task_struct *p, in prio_changed_rt() argument
1434 pull_rt_task(rq); in prio_changed_rt()
1441 if (p->prio > rq->rt.highest_prio && rq->curr == p) in prio_changed_rt()
1454 if (p->prio < rq->curr->prio) in prio_changed_rt()
1455 resched_task(rq->curr); in prio_changed_rt()
1459 static void watchdog(struct rq *rq, struct task_struct *p) in watchdog() argument
1479 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) in task_tick_rt() argument
1481 update_curr_rt(rq); in task_tick_rt()
1483 watchdog(rq, p); in task_tick_rt()
1502 requeue_task_rt(rq, p, 0); in task_tick_rt()
1507 static void set_curr_task_rt(struct rq *rq) in set_curr_task_rt() argument
1509 struct task_struct *p = rq->curr; in set_curr_task_rt()
1511 p->se.exec_start = rq->clock; in set_curr_task_rt()