1 #include "sched.h"
2 #include "walt.h"
3
4 /*
5 * stop-task scheduling class.
6 *
7 * The stop task is the highest priority task in the system, it preempts
8 * everything and will be preempted by nothing.
9 *
10 * See kernel/stop_machine.c
11 */
12
13 #ifdef CONFIG_SMP
14 static int
select_task_rq_stop(struct task_struct * p,int cpu,int sd_flag,int flags)15 select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
16 {
17 return task_cpu(p); /* stop tasks as never migrate */
18 }
19 #endif /* CONFIG_SMP */
20
21 static void
check_preempt_curr_stop(struct rq * rq,struct task_struct * p,int flags)22 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
23 {
24 /* we're never preempted */
25 }
26
27 static struct task_struct *
pick_next_task_stop(struct rq * rq,struct task_struct * prev)28 pick_next_task_stop(struct rq *rq, struct task_struct *prev)
29 {
30 struct task_struct *stop = rq->stop;
31
32 if (!stop || !task_on_rq_queued(stop))
33 return NULL;
34
35 put_prev_task(rq, prev);
36
37 stop->se.exec_start = rq_clock_task(rq);
38
39 return stop;
40 }
41
42 static void
enqueue_task_stop(struct rq * rq,struct task_struct * p,int flags)43 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
44 {
45 add_nr_running(rq, 1);
46 walt_inc_cumulative_runnable_avg(rq, p);
47 }
48
49 static void
dequeue_task_stop(struct rq * rq,struct task_struct * p,int flags)50 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
51 {
52 sub_nr_running(rq, 1);
53 walt_dec_cumulative_runnable_avg(rq, p);
54 }
55
yield_task_stop(struct rq * rq)56 static void yield_task_stop(struct rq *rq)
57 {
58 BUG(); /* the stop task should never yield, its pointless. */
59 }
60
put_prev_task_stop(struct rq * rq,struct task_struct * prev)61 static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
62 {
63 struct task_struct *curr = rq->curr;
64 u64 delta_exec;
65
66 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
67 if (unlikely((s64)delta_exec < 0))
68 delta_exec = 0;
69
70 schedstat_set(curr->se.statistics.exec_max,
71 max(curr->se.statistics.exec_max, delta_exec));
72
73 curr->se.sum_exec_runtime += delta_exec;
74 account_group_exec_runtime(curr, delta_exec);
75
76 curr->se.exec_start = rq_clock_task(rq);
77 cpuacct_charge(curr, delta_exec);
78 }
79
task_tick_stop(struct rq * rq,struct task_struct * curr,int queued)80 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
81 {
82 }
83
set_curr_task_stop(struct rq * rq)84 static void set_curr_task_stop(struct rq *rq)
85 {
86 struct task_struct *stop = rq->stop;
87
88 stop->se.exec_start = rq_clock_task(rq);
89 }
90
switched_to_stop(struct rq * rq,struct task_struct * p)91 static void switched_to_stop(struct rq *rq, struct task_struct *p)
92 {
93 BUG(); /* its impossible to change to this class */
94 }
95
96 static void
prio_changed_stop(struct rq * rq,struct task_struct * p,int oldprio)97 prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
98 {
99 BUG(); /* how!?, what priority? */
100 }
101
102 static unsigned int
get_rr_interval_stop(struct rq * rq,struct task_struct * task)103 get_rr_interval_stop(struct rq *rq, struct task_struct *task)
104 {
105 return 0;
106 }
107
update_curr_stop(struct rq * rq)108 static void update_curr_stop(struct rq *rq)
109 {
110 }
111
112 /*
113 * Simple, special scheduling class for the per-CPU stop tasks:
114 */
115 const struct sched_class stop_sched_class = {
116 .next = &dl_sched_class,
117
118 .enqueue_task = enqueue_task_stop,
119 .dequeue_task = dequeue_task_stop,
120 .yield_task = yield_task_stop,
121
122 .check_preempt_curr = check_preempt_curr_stop,
123
124 .pick_next_task = pick_next_task_stop,
125 .put_prev_task = put_prev_task_stop,
126
127 #ifdef CONFIG_SMP
128 .select_task_rq = select_task_rq_stop,
129 #endif
130
131 .set_curr_task = set_curr_task_stop,
132 .task_tick = task_tick_stop,
133
134 .get_rr_interval = get_rr_interval_stop,
135
136 .prio_changed = prio_changed_stop,
137 .switched_to = switched_to_stop,
138 .update_curr = update_curr_stop,
139 };
140