1 /*
2 * menu.c - the menu idle governor
3 *
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 * Copyright (C) 2009 Intel Corporation
6 * Author:
7 * Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This code is licenced under the GPL version 2 as described
10 * in the COPYING file that acompanies the Linux Kernel.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/sched/loadavg.h>
22 #include <linux/sched/stat.h>
23 #include <linux/math64.h>
24 #include <linux/cpu.h>
25
26 /*
27 * Please note when changing the tuning values:
28 * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
29 * a scaling operation multiplication may overflow on 32 bit platforms.
30 * In that case, #define RESOLUTION as ULL to get 64 bit result:
31 * #define RESOLUTION 1024ULL
32 *
33 * The default values do not overflow.
34 */
35 #define BUCKETS 12
36 #define INTERVAL_SHIFT 3
37 #define INTERVALS (1UL << INTERVAL_SHIFT)
38 #define RESOLUTION 1024
39 #define DECAY 8
40 #define MAX_INTERESTING 50000
41
42
43 /*
44 * Concepts and ideas behind the menu governor
45 *
46 * For the menu governor, there are 3 decision factors for picking a C
47 * state:
48 * 1) Energy break even point
49 * 2) Performance impact
50 * 3) Latency tolerance (from pmqos infrastructure)
51 * These these three factors are treated independently.
52 *
53 * Energy break even point
54 * -----------------------
55 * C state entry and exit have an energy cost, and a certain amount of time in
56 * the C state is required to actually break even on this cost. CPUIDLE
57 * provides us this duration in the "target_residency" field. So all that we
58 * need is a good prediction of how long we'll be idle. Like the traditional
59 * menu governor, we start with the actual known "next timer event" time.
60 *
61 * Since there are other source of wakeups (interrupts for example) than
62 * the next timer event, this estimation is rather optimistic. To get a
63 * more realistic estimate, a correction factor is applied to the estimate,
64 * that is based on historic behavior. For example, if in the past the actual
65 * duration always was 50% of the next timer tick, the correction factor will
66 * be 0.5.
67 *
68 * menu uses a running average for this correction factor, however it uses a
69 * set of factors, not just a single factor. This stems from the realization
70 * that the ratio is dependent on the order of magnitude of the expected
71 * duration; if we expect 500 milliseconds of idle time the likelihood of
72 * getting an interrupt very early is much higher than if we expect 50 micro
73 * seconds of idle time. A second independent factor that has big impact on
74 * the actual factor is if there is (disk) IO outstanding or not.
75 * (as a special twist, we consider every sleep longer than 50 milliseconds
76 * as perfect; there are no power gains for sleeping longer than this)
77 *
78 * For these two reasons we keep an array of 12 independent factors, that gets
79 * indexed based on the magnitude of the expected duration as well as the
80 * "is IO outstanding" property.
81 *
82 * Repeatable-interval-detector
83 * ----------------------------
84 * There are some cases where "next timer" is a completely unusable predictor:
85 * Those cases where the interval is fixed, for example due to hardware
86 * interrupt mitigation, but also due to fixed transfer rate devices such as
87 * mice.
88 * For this, we use a different predictor: We track the duration of the last 8
89 * intervals and if the stand deviation of these 8 intervals is below a
90 * threshold value, we use the average of these intervals as prediction.
91 *
92 * Limiting Performance Impact
93 * ---------------------------
94 * C states, especially those with large exit latencies, can have a real
95 * noticeable impact on workloads, which is not acceptable for most sysadmins,
96 * and in addition, less performance has a power price of its own.
97 *
98 * As a general rule of thumb, menu assumes that the following heuristic
99 * holds:
100 * The busier the system, the less impact of C states is acceptable
101 *
102 * This rule-of-thumb is implemented using a performance-multiplier:
103 * If the exit latency times the performance multiplier is longer than
104 * the predicted duration, the C state is not considered a candidate
105 * for selection due to a too high performance impact. So the higher
106 * this multiplier is, the longer we need to be idle to pick a deep C
107 * state, and thus the less likely a busy CPU will hit such a deep
108 * C state.
109 *
110 * Two factors are used in determing this multiplier:
111 * a value of 10 is added for each point of "per cpu load average" we have.
112 * a value of 5 points is added for each process that is waiting for
113 * IO on this CPU.
114 * (these values are experimentally determined)
115 *
116 * The load average factor gives a longer term (few seconds) input to the
117 * decision, while the iowait value gives a cpu local instantanious input.
118 * The iowait factor may look low, but realize that this is also already
119 * represented in the system load average.
120 *
121 */
122
123 struct menu_device {
124 int last_state_idx;
125 int needs_update;
126 int tick_wakeup;
127
128 unsigned int next_timer_us;
129 unsigned int predicted_us;
130 unsigned int bucket;
131 unsigned int correction_factor[BUCKETS];
132 unsigned int intervals[INTERVALS];
133 int interval_ptr;
134 };
135
get_loadavg(unsigned long load)136 static inline int get_loadavg(unsigned long load)
137 {
138 return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10;
139 }
140
which_bucket(unsigned int duration,unsigned long nr_iowaiters)141 static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters)
142 {
143 int bucket = 0;
144
145 /*
146 * We keep two groups of stats; one with no
147 * IO pending, one without.
148 * This allows us to calculate
149 * E(duration)|iowait
150 */
151 if (nr_iowaiters)
152 bucket = BUCKETS/2;
153
154 if (duration < 10)
155 return bucket;
156 if (duration < 100)
157 return bucket + 1;
158 if (duration < 1000)
159 return bucket + 2;
160 if (duration < 10000)
161 return bucket + 3;
162 if (duration < 100000)
163 return bucket + 4;
164 return bucket + 5;
165 }
166
167 /*
168 * Return a multiplier for the exit latency that is intended
169 * to take performance requirements into account.
170 * The more performance critical we estimate the system
171 * to be, the higher this multiplier, and thus the higher
172 * the barrier to go to an expensive C state.
173 */
performance_multiplier(unsigned long nr_iowaiters,unsigned long load)174 static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load)
175 {
176 int mult = 1;
177
178 /* for higher loadavg, we are more reluctant */
179
180 /*
181 * this doesn't work as intended - it is almost always 0, but can
182 * sometimes, depending on workload, spike very high into the hundreds
183 * even when the average cpu load is under 10%.
184 */
185 /* mult += 2 * get_loadavg(); */
186
187 /* for IO wait tasks (per cpu!) we add 5x each */
188 mult += 10 * nr_iowaiters;
189
190 return mult;
191 }
192
193 static DEFINE_PER_CPU(struct menu_device, menu_devices);
194
195 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
196
197 /*
198 * Try detecting repeating patterns by keeping track of the last 8
199 * intervals, and checking if the standard deviation of that set
200 * of points is below a threshold. If it is... then use the
201 * average of these 8 points as the estimated value.
202 */
get_typical_interval(struct menu_device * data)203 static unsigned int get_typical_interval(struct menu_device *data)
204 {
205 int i, divisor;
206 unsigned int max, thresh, avg;
207 uint64_t sum, variance;
208
209 thresh = UINT_MAX; /* Discard outliers above this value */
210
211 again:
212
213 /* First calculate the average of past intervals */
214 max = 0;
215 sum = 0;
216 divisor = 0;
217 for (i = 0; i < INTERVALS; i++) {
218 unsigned int value = data->intervals[i];
219 if (value <= thresh) {
220 sum += value;
221 divisor++;
222 if (value > max)
223 max = value;
224 }
225 }
226 if (divisor == INTERVALS)
227 avg = sum >> INTERVAL_SHIFT;
228 else
229 avg = div_u64(sum, divisor);
230
231 /* Then try to determine variance */
232 variance = 0;
233 for (i = 0; i < INTERVALS; i++) {
234 unsigned int value = data->intervals[i];
235 if (value <= thresh) {
236 int64_t diff = (int64_t)value - avg;
237 variance += diff * diff;
238 }
239 }
240 if (divisor == INTERVALS)
241 variance >>= INTERVAL_SHIFT;
242 else
243 do_div(variance, divisor);
244
245 /*
246 * The typical interval is obtained when standard deviation is
247 * small (stddev <= 20 us, variance <= 400 us^2) or standard
248 * deviation is small compared to the average interval (avg >
249 * 6*stddev, avg^2 > 36*variance). The average is smaller than
250 * UINT_MAX aka U32_MAX, so computing its square does not
251 * overflow a u64. We simply reject this candidate average if
252 * the standard deviation is greater than 715 s (which is
253 * rather unlikely).
254 *
255 * Use this result only if there is no timer to wake us up sooner.
256 */
257 if (likely(variance <= U64_MAX/36)) {
258 if ((((u64)avg*avg > variance*36) && (divisor * 4 >= INTERVALS * 3))
259 || variance <= 400) {
260 return avg;
261 }
262 }
263
264 /*
265 * If we have outliers to the upside in our distribution, discard
266 * those by setting the threshold to exclude these outliers, then
267 * calculate the average and standard deviation again. Once we get
268 * down to the bottom 3/4 of our samples, stop excluding samples.
269 *
270 * This can deal with workloads that have long pauses interspersed
271 * with sporadic activity with a bunch of short pauses.
272 */
273 if ((divisor * 4) <= INTERVALS * 3)
274 return UINT_MAX;
275
276 thresh = max - 1;
277 goto again;
278 }
279
280 /**
281 * menu_select - selects the next idle state to enter
282 * @drv: cpuidle driver containing state data
283 * @dev: the CPU
284 * @stop_tick: indication on whether or not to stop the tick
285 */
menu_select(struct cpuidle_driver * drv,struct cpuidle_device * dev,bool * stop_tick)286 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
287 bool *stop_tick)
288 {
289 struct menu_device *data = this_cpu_ptr(&menu_devices);
290 struct device *device = get_cpu_device(dev->cpu);
291 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
292 int i;
293 int first_idx;
294 int idx;
295 unsigned int interactivity_req;
296 unsigned int expected_interval;
297 unsigned long nr_iowaiters, cpu_load;
298 int resume_latency = dev_pm_qos_raw_read_value(device);
299 ktime_t delta_next;
300
301 if (data->needs_update) {
302 menu_update(drv, dev);
303 data->needs_update = 0;
304 }
305
306 /* resume_latency is 0 means no restriction */
307 if (resume_latency && resume_latency < latency_req)
308 latency_req = resume_latency;
309
310 /* Special case when user has set very strict latency requirement */
311 if (unlikely(latency_req == 0)) {
312 *stop_tick = false;
313 return 0;
314 }
315
316 /* determine the expected residency time, round up */
317 data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next));
318
319 get_iowait_load(&nr_iowaiters, &cpu_load);
320 data->bucket = which_bucket(data->next_timer_us, nr_iowaiters);
321
322 /*
323 * Force the result of multiplication to be 64 bits even if both
324 * operands are 32 bits.
325 * Make sure to round up for half microseconds.
326 */
327 data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us *
328 data->correction_factor[data->bucket],
329 RESOLUTION * DECAY);
330
331 expected_interval = get_typical_interval(data);
332 expected_interval = min(expected_interval, data->next_timer_us);
333
334 first_idx = 0;
335 if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) {
336 struct cpuidle_state *s = &drv->states[1];
337 unsigned int polling_threshold;
338
339 /*
340 * We want to default to C1 (hlt), not to busy polling
341 * unless the timer is happening really really soon, or
342 * C1's exit latency exceeds the user configured limit.
343 */
344 polling_threshold = max_t(unsigned int, 20, s->target_residency);
345 if (data->next_timer_us > polling_threshold &&
346 latency_req > s->exit_latency && !s->disabled &&
347 !dev->states_usage[1].disable)
348 first_idx = 1;
349 }
350
351 /*
352 * Use the lowest expected idle interval to pick the idle state.
353 */
354 data->predicted_us = min(data->predicted_us, expected_interval);
355
356 if (tick_nohz_tick_stopped()) {
357 /*
358 * If the tick is already stopped, the cost of possible short
359 * idle duration misprediction is much higher, because the CPU
360 * may be stuck in a shallow idle state for a long time as a
361 * result of it. In that case say we might mispredict and try
362 * to force the CPU into a state for which we would have stopped
363 * the tick, unless a timer is going to expire really soon
364 * anyway.
365 */
366 if (data->predicted_us < TICK_USEC)
367 data->predicted_us = min_t(unsigned int, TICK_USEC,
368 ktime_to_us(delta_next));
369 } else {
370 /*
371 * Use the performance multiplier and the user-configurable
372 * latency_req to determine the maximum exit latency.
373 */
374 interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load);
375 if (latency_req > interactivity_req)
376 latency_req = interactivity_req;
377 }
378
379 expected_interval = data->predicted_us;
380 /*
381 * Find the idle state with the lowest power while satisfying
382 * our constraints.
383 */
384 idx = -1;
385 for (i = first_idx; i < drv->state_count; i++) {
386 struct cpuidle_state *s = &drv->states[i];
387 struct cpuidle_state_usage *su = &dev->states_usage[i];
388
389 if (s->disabled || su->disable)
390 continue;
391 if (idx == -1)
392 idx = i; /* first enabled state */
393 if (s->target_residency > data->predicted_us)
394 break;
395 if (s->exit_latency > latency_req) {
396 /*
397 * If we break out of the loop for latency reasons, use
398 * the target residency of the selected state as the
399 * expected idle duration so that the tick is retained
400 * as long as that target residency is low enough.
401 */
402 expected_interval = drv->states[idx].target_residency;
403 break;
404 }
405 idx = i;
406 }
407
408 if (idx == -1)
409 idx = 0; /* No states enabled. Must use 0. */
410
411 /*
412 * Don't stop the tick if the selected state is a polling one or if the
413 * expected idle duration is shorter than the tick period length.
414 */
415 if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) ||
416 expected_interval < TICK_USEC) {
417 unsigned int delta_next_us = ktime_to_us(delta_next);
418
419 *stop_tick = false;
420
421 if (!tick_nohz_tick_stopped() && idx > 0 &&
422 drv->states[idx].target_residency > delta_next_us) {
423 /*
424 * The tick is not going to be stopped and the target
425 * residency of the state to be returned is not within
426 * the time until the next timer event including the
427 * tick, so try to correct that.
428 */
429 for (i = idx - 1; i >= 0; i--) {
430 if (drv->states[i].disabled ||
431 dev->states_usage[i].disable)
432 continue;
433
434 idx = i;
435 if (drv->states[i].target_residency <= delta_next_us)
436 break;
437 }
438 }
439 }
440
441 data->last_state_idx = idx;
442
443 return data->last_state_idx;
444 }
445
446 /**
447 * menu_reflect - records that data structures need update
448 * @dev: the CPU
449 * @index: the index of actual entered state
450 *
451 * NOTE: it's important to be fast here because this operation will add to
452 * the overall exit latency.
453 */
menu_reflect(struct cpuidle_device * dev,int index)454 static void menu_reflect(struct cpuidle_device *dev, int index)
455 {
456 struct menu_device *data = this_cpu_ptr(&menu_devices);
457
458 data->last_state_idx = index;
459 data->needs_update = 1;
460 data->tick_wakeup = tick_nohz_idle_got_tick();
461 }
462
463 /**
464 * menu_update - attempts to guess what happened after entry
465 * @drv: cpuidle driver containing state data
466 * @dev: the CPU
467 */
menu_update(struct cpuidle_driver * drv,struct cpuidle_device * dev)468 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
469 {
470 struct menu_device *data = this_cpu_ptr(&menu_devices);
471 int last_idx = data->last_state_idx;
472 struct cpuidle_state *target = &drv->states[last_idx];
473 unsigned int measured_us;
474 unsigned int new_factor;
475
476 /*
477 * Try to figure out how much time passed between entry to low
478 * power state and occurrence of the wakeup event.
479 *
480 * If the entered idle state didn't support residency measurements,
481 * we use them anyway if they are short, and if long,
482 * truncate to the whole expected time.
483 *
484 * Any measured amount of time will include the exit latency.
485 * Since we are interested in when the wakeup begun, not when it
486 * was completed, we must subtract the exit latency. However, if
487 * the measured amount of time is less than the exit latency,
488 * assume the state was never reached and the exit latency is 0.
489 */
490
491 if (data->tick_wakeup && data->next_timer_us > TICK_USEC) {
492 /*
493 * The nohz code said that there wouldn't be any events within
494 * the tick boundary (if the tick was stopped), but the idle
495 * duration predictor had a differing opinion. Since the CPU
496 * was woken up by a tick (that wasn't stopped after all), the
497 * predictor was not quite right, so assume that the CPU could
498 * have been idle long (but not forever) to help the idle
499 * duration predictor do a better job next time.
500 */
501 measured_us = 9 * MAX_INTERESTING / 10;
502 } else {
503 /* measured value */
504 measured_us = cpuidle_get_last_residency(dev);
505
506 /* Deduct exit latency */
507 if (measured_us > 2 * target->exit_latency)
508 measured_us -= target->exit_latency;
509 else
510 measured_us /= 2;
511 }
512
513 /* Make sure our coefficients do not exceed unity */
514 if (measured_us > data->next_timer_us)
515 measured_us = data->next_timer_us;
516
517 /* Update our correction ratio */
518 new_factor = data->correction_factor[data->bucket];
519 new_factor -= new_factor / DECAY;
520
521 if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING)
522 new_factor += RESOLUTION * measured_us / data->next_timer_us;
523 else
524 /*
525 * we were idle so long that we count it as a perfect
526 * prediction
527 */
528 new_factor += RESOLUTION;
529
530 /*
531 * We don't want 0 as factor; we always want at least
532 * a tiny bit of estimated time. Fortunately, due to rounding,
533 * new_factor will stay nonzero regardless of measured_us values
534 * and the compiler can eliminate this test as long as DECAY > 1.
535 */
536 if (DECAY == 1 && unlikely(new_factor == 0))
537 new_factor = 1;
538
539 data->correction_factor[data->bucket] = new_factor;
540
541 /* update the repeating-pattern data */
542 data->intervals[data->interval_ptr++] = measured_us;
543 if (data->interval_ptr >= INTERVALS)
544 data->interval_ptr = 0;
545 }
546
547 /**
548 * menu_enable_device - scans a CPU's states and does setup
549 * @drv: cpuidle driver
550 * @dev: the CPU
551 */
menu_enable_device(struct cpuidle_driver * drv,struct cpuidle_device * dev)552 static int menu_enable_device(struct cpuidle_driver *drv,
553 struct cpuidle_device *dev)
554 {
555 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
556 int i;
557
558 memset(data, 0, sizeof(struct menu_device));
559
560 /*
561 * if the correction factor is 0 (eg first time init or cpu hotplug
562 * etc), we actually want to start out with a unity factor.
563 */
564 for(i = 0; i < BUCKETS; i++)
565 data->correction_factor[i] = RESOLUTION * DECAY;
566
567 return 0;
568 }
569
570 static struct cpuidle_governor menu_governor = {
571 .name = "menu",
572 .rating = 20,
573 .enable = menu_enable_device,
574 .select = menu_select,
575 .reflect = menu_reflect,
576 };
577
578 /**
579 * init_menu - initializes the governor
580 */
init_menu(void)581 static int __init init_menu(void)
582 {
583 return cpuidle_register_governor(&menu_governor);
584 }
585
586 postcore_initcall(init_menu);
587