• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 /*
4  * Only give sleepers 50% of their service deficit. This allows
5  * them to run sooner, but does not allow tons of sleepers to
6  * rip the spread apart.
7  */
8 SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
9 
10 /*
11  * Using the avg_vruntime, do the right thing and preserve lag across
12  * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled.
13  */
14 SCHED_FEAT(PLACE_LAG, true)
15 SCHED_FEAT(PLACE_DEADLINE_INITIAL, true)
16 SCHED_FEAT(RUN_TO_PARITY, true)
17 
18 /*
19  * Prefer to schedule the task we woke last (assuming it failed
20  * wakeup-preemption), since its likely going to consume data we
21  * touched, increases cache locality.
22  */
23 SCHED_FEAT(NEXT_BUDDY, false)
24 
25 /*
26  * Consider buddies to be cache hot, decreases the likeliness of a
27  * cache buddy being migrated away, increases cache locality.
28  */
29 SCHED_FEAT(CACHE_HOT_BUDDY, true)
30 
31 /*
32  * Allow wakeup-time preemption of the current task:
33  */
34 SCHED_FEAT(WAKEUP_PREEMPTION, true)
35 
36 SCHED_FEAT(HRTICK, false)
37 SCHED_FEAT(HRTICK_DL, false)
38 SCHED_FEAT(DOUBLE_TICK, false)
39 
40 /*
41  * Decrement CPU capacity based on time not spent running tasks
42  */
43 SCHED_FEAT(NONTASK_CAPACITY, true)
44 
45 #ifdef CONFIG_PREEMPT_RT
46 SCHED_FEAT(TTWU_QUEUE, false)
47 #else
48 
49 /*
50  * Queue remote wakeups on the target CPU and process them
51  * using the scheduler IPI. Reduces rq->lock contention/bounces.
52  */
53 SCHED_FEAT(TTWU_QUEUE, true)
54 #endif
55 
56 /*
57  * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
58  */
59 SCHED_FEAT(SIS_PROP, false)
60 SCHED_FEAT(SIS_UTIL, true)
61 
62 /*
63  * Issue a WARN when we do multiple update_rq_clock() calls
64  * in a single rq->lock section. Default disabled because the
65  * annotations are not complete.
66  */
67 SCHED_FEAT(WARN_DOUBLE_CLOCK, false)
68 
69 #ifdef HAVE_RT_PUSH_IPI
70 /*
71  * In order to avoid a thundering herd attack of CPUs that are
72  * lowering their priorities at the same time, and there being
73  * a single CPU that has an RT task that can migrate and is waiting
74  * to run, where the other CPUs will try to take that CPUs
75  * rq lock and possibly create a large contention, sending an
76  * IPI to that CPU and let that CPU push the RT task to where
77  * it should go may be a better scenario.
78  */
79 SCHED_FEAT(RT_PUSH_IPI, true)
80 #endif
81 
82 SCHED_FEAT(RT_RUNTIME_SHARE, false)
83 SCHED_FEAT(LB_MIN, false)
84 SCHED_FEAT(ATTACH_AGE_LOAD, true)
85 
86 SCHED_FEAT(WA_IDLE, true)
87 SCHED_FEAT(WA_WEIGHT, true)
88 SCHED_FEAT(WA_BIAS, true)
89 
90 /*
91  * UtilEstimation. Use estimated CPU utilization.
92  */
93 SCHED_FEAT(UTIL_EST, true)
94 SCHED_FEAT(UTIL_EST_FASTUP, true)
95 
96 SCHED_FEAT(LATENCY_WARN, false)
97 
98 SCHED_FEAT(HZ_BW, true)
99