1 /*
2 * menu.c - the menu idle governor
3 *
4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
5 * Copyright (C) 2009 Intel Corporation
6 * Author:
7 * Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This code is licenced under the GPL version 2 as described
10 * in the COPYING file that acompanies the Linux Kernel.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/cpuidle.h>
15 #include <linux/pm_qos.h>
16 #include <linux/time.h>
17 #include <linux/ktime.h>
18 #include <linux/hrtimer.h>
19 #include <linux/tick.h>
20 #include <linux/sched.h>
21 #include <linux/math64.h>
22 #include <linux/module.h>
23
24 #define BUCKETS 12
25 #define INTERVALS 8
26 #define RESOLUTION 1024
27 #define DECAY 8
28 #define MAX_INTERESTING 50000
29 #define STDDEV_THRESH 400
30
31 /* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
32 #define MAX_DEVIATION 60
33
34 static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
35 static DEFINE_PER_CPU(int, hrtimer_status);
36 /* menu hrtimer mode */
37 enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
38
39 /*
40 * Concepts and ideas behind the menu governor
41 *
42 * For the menu governor, there are 3 decision factors for picking a C
43 * state:
44 * 1) Energy break even point
45 * 2) Performance impact
46 * 3) Latency tolerance (from pmqos infrastructure)
47 * These these three factors are treated independently.
48 *
49 * Energy break even point
50 * -----------------------
51 * C state entry and exit have an energy cost, and a certain amount of time in
52 * the C state is required to actually break even on this cost. CPUIDLE
53 * provides us this duration in the "target_residency" field. So all that we
54 * need is a good prediction of how long we'll be idle. Like the traditional
55 * menu governor, we start with the actual known "next timer event" time.
56 *
57 * Since there are other source of wakeups (interrupts for example) than
58 * the next timer event, this estimation is rather optimistic. To get a
59 * more realistic estimate, a correction factor is applied to the estimate,
60 * that is based on historic behavior. For example, if in the past the actual
61 * duration always was 50% of the next timer tick, the correction factor will
62 * be 0.5.
63 *
64 * menu uses a running average for this correction factor, however it uses a
65 * set of factors, not just a single factor. This stems from the realization
66 * that the ratio is dependent on the order of magnitude of the expected
67 * duration; if we expect 500 milliseconds of idle time the likelihood of
68 * getting an interrupt very early is much higher than if we expect 50 micro
69 * seconds of idle time. A second independent factor that has big impact on
70 * the actual factor is if there is (disk) IO outstanding or not.
71 * (as a special twist, we consider every sleep longer than 50 milliseconds
72 * as perfect; there are no power gains for sleeping longer than this)
73 *
74 * For these two reasons we keep an array of 12 independent factors, that gets
75 * indexed based on the magnitude of the expected duration as well as the
76 * "is IO outstanding" property.
77 *
78 * Repeatable-interval-detector
79 * ----------------------------
80 * There are some cases where "next timer" is a completely unusable predictor:
81 * Those cases where the interval is fixed, for example due to hardware
82 * interrupt mitigation, but also due to fixed transfer rate devices such as
83 * mice.
84 * For this, we use a different predictor: We track the duration of the last 8
85 * intervals and if the stand deviation of these 8 intervals is below a
86 * threshold value, we use the average of these intervals as prediction.
87 *
88 * Limiting Performance Impact
89 * ---------------------------
90 * C states, especially those with large exit latencies, can have a real
91 * noticeable impact on workloads, which is not acceptable for most sysadmins,
92 * and in addition, less performance has a power price of its own.
93 *
94 * As a general rule of thumb, menu assumes that the following heuristic
95 * holds:
96 * The busier the system, the less impact of C states is acceptable
97 *
98 * This rule-of-thumb is implemented using a performance-multiplier:
99 * If the exit latency times the performance multiplier is longer than
100 * the predicted duration, the C state is not considered a candidate
101 * for selection due to a too high performance impact. So the higher
102 * this multiplier is, the longer we need to be idle to pick a deep C
103 * state, and thus the less likely a busy CPU will hit such a deep
104 * C state.
105 *
106 * Two factors are used in determing this multiplier:
107 * a value of 10 is added for each point of "per cpu load average" we have.
108 * a value of 5 points is added for each process that is waiting for
109 * IO on this CPU.
110 * (these values are experimentally determined)
111 *
112 * The load average factor gives a longer term (few seconds) input to the
113 * decision, while the iowait value gives a cpu local instantanious input.
114 * The iowait factor may look low, but realize that this is also already
115 * represented in the system load average.
116 *
117 */
118
119 /*
120 * The C-state residency is so long that is is worthwhile to exit
121 * from the shallow C-state and re-enter into a deeper C-state.
122 */
123 static unsigned int perfect_cstate_ms __read_mostly = 30;
124 module_param(perfect_cstate_ms, uint, 0000);
125
126 struct menu_device {
127 int last_state_idx;
128 int needs_update;
129
130 unsigned int expected_us;
131 u64 predicted_us;
132 unsigned int exit_us;
133 unsigned int bucket;
134 u64 correction_factor[BUCKETS];
135 u32 intervals[INTERVALS];
136 int interval_ptr;
137 };
138
139
140 #define LOAD_INT(x) ((x) >> FSHIFT)
141 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
142
get_loadavg(void)143 static int get_loadavg(void)
144 {
145 unsigned long this = this_cpu_load();
146
147
148 return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
149 }
150
which_bucket(unsigned int duration)151 static inline int which_bucket(unsigned int duration)
152 {
153 int bucket = 0;
154
155 /*
156 * We keep two groups of stats; one with no
157 * IO pending, one without.
158 * This allows us to calculate
159 * E(duration)|iowait
160 */
161 if (nr_iowait_cpu(smp_processor_id()))
162 bucket = BUCKETS/2;
163
164 if (duration < 10)
165 return bucket;
166 if (duration < 100)
167 return bucket + 1;
168 if (duration < 1000)
169 return bucket + 2;
170 if (duration < 10000)
171 return bucket + 3;
172 if (duration < 100000)
173 return bucket + 4;
174 return bucket + 5;
175 }
176
177 /*
178 * Return a multiplier for the exit latency that is intended
179 * to take performance requirements into account.
180 * The more performance critical we estimate the system
181 * to be, the higher this multiplier, and thus the higher
182 * the barrier to go to an expensive C state.
183 */
performance_multiplier(void)184 static inline int performance_multiplier(void)
185 {
186 int mult = 1;
187
188 /* for higher loadavg, we are more reluctant */
189
190 /*
191 * this doesn't work as intended - it is almost always 0, but can
192 * sometimes, depending on workload, spike very high into the hundreds
193 * even when the average cpu load is under 10%.
194 */
195 /* mult += 2 * get_loadavg(); */
196
197 /* for IO wait tasks (per cpu!) we add 5x each */
198 mult += 10 * nr_iowait_cpu(smp_processor_id());
199
200 return mult;
201 }
202
203 static DEFINE_PER_CPU(struct menu_device, menu_devices);
204
205 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
206
207 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
div_round64(u64 dividend,u32 divisor)208 static u64 div_round64(u64 dividend, u32 divisor)
209 {
210 return div_u64(dividend + (divisor / 2), divisor);
211 }
212
213 /* Cancel the hrtimer if it is not triggered yet */
menu_hrtimer_cancel(void)214 void menu_hrtimer_cancel(void)
215 {
216 int cpu = smp_processor_id();
217 struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
218
219 /* The timer is still not time out*/
220 if (per_cpu(hrtimer_status, cpu)) {
221 hrtimer_cancel(hrtmr);
222 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
223 }
224 }
225 EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
226
227 /* Call back for hrtimer is triggered */
menu_hrtimer_notify(struct hrtimer * hrtimer)228 static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
229 {
230 int cpu = smp_processor_id();
231 struct menu_device *data = &per_cpu(menu_devices, cpu);
232
233 /* In general case, the expected residency is much larger than
234 * deepest C-state target residency, but prediction logic still
235 * predicts a small predicted residency, so the prediction
236 * history is totally broken if the timer is triggered.
237 * So reset the correction factor.
238 */
239 if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
240 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
241
242 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
243
244 return HRTIMER_NORESTART;
245 }
246
247 /*
248 * Try detecting repeating patterns by keeping track of the last 8
249 * intervals, and checking if the standard deviation of that set
250 * of points is below a threshold. If it is... then use the
251 * average of these 8 points as the estimated value.
252 */
get_typical_interval(struct menu_device * data)253 static u32 get_typical_interval(struct menu_device *data)
254 {
255 int i = 0, divisor = 0;
256 uint64_t max = 0, avg = 0, stddev = 0;
257 int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
258 unsigned int ret = 0;
259
260 again:
261
262 /* first calculate average and standard deviation of the past */
263 max = avg = divisor = stddev = 0;
264 for (i = 0; i < INTERVALS; i++) {
265 int64_t value = data->intervals[i];
266 if (value <= thresh) {
267 avg += value;
268 divisor++;
269 if (value > max)
270 max = value;
271 }
272 }
273 do_div(avg, divisor);
274
275 for (i = 0; i < INTERVALS; i++) {
276 int64_t value = data->intervals[i];
277 if (value <= thresh) {
278 int64_t diff = value - avg;
279 stddev += diff * diff;
280 }
281 }
282 do_div(stddev, divisor);
283 stddev = int_sqrt(stddev);
284 /*
285 * If we have outliers to the upside in our distribution, discard
286 * those by setting the threshold to exclude these outliers, then
287 * calculate the average and standard deviation again. Once we get
288 * down to the bottom 3/4 of our samples, stop excluding samples.
289 *
290 * This can deal with workloads that have long pauses interspersed
291 * with sporadic activity with a bunch of short pauses.
292 *
293 * The typical interval is obtained when standard deviation is small
294 * or standard deviation is small compared to the average interval.
295 */
296 if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
297 || stddev <= 20) {
298 data->predicted_us = avg;
299 ret = 1;
300 return ret;
301
302 } else if ((divisor * 4) > INTERVALS * 3) {
303 /* Exclude the max interval */
304 thresh = max - 1;
305 goto again;
306 }
307
308 return ret;
309 }
310
311 /**
312 * menu_select - selects the next idle state to enter
313 * @drv: cpuidle driver containing state data
314 * @dev: the CPU
315 */
menu_select(struct cpuidle_driver * drv,struct cpuidle_device * dev)316 static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
317 {
318 struct menu_device *data = &__get_cpu_var(menu_devices);
319 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
320 int i;
321 int multiplier;
322 struct timespec t;
323 int repeat = 0, low_predicted = 0;
324 int cpu = smp_processor_id();
325 struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
326
327 if (data->needs_update) {
328 menu_update(drv, dev);
329 data->needs_update = 0;
330 }
331
332 data->last_state_idx = 0;
333 data->exit_us = 0;
334
335 /* Special case when user has set very strict latency requirement */
336 if (unlikely(latency_req == 0))
337 return 0;
338
339 /* determine the expected residency time, round up */
340 t = ktime_to_timespec(tick_nohz_get_sleep_length());
341 data->expected_us =
342 t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
343
344
345 data->bucket = which_bucket(data->expected_us);
346
347 multiplier = performance_multiplier();
348
349 /*
350 * if the correction factor is 0 (eg first time init or cpu hotplug
351 * etc), we actually want to start out with a unity factor.
352 */
353 if (data->correction_factor[data->bucket] == 0)
354 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
355
356 /* Make sure to round up for half microseconds */
357 data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
358 RESOLUTION * DECAY);
359
360 repeat = get_typical_interval(data);
361
362 /*
363 * We want to default to C1 (hlt), not to busy polling
364 * unless the timer is happening really really soon.
365 */
366 if (data->expected_us > 5 &&
367 !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
368 dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
369 data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
370
371 /*
372 * Find the idle state with the lowest power while satisfying
373 * our constraints.
374 */
375 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
376 struct cpuidle_state *s = &drv->states[i];
377 struct cpuidle_state_usage *su = &dev->states_usage[i];
378
379 if (s->disabled || su->disable)
380 continue;
381 if (s->target_residency > data->predicted_us) {
382 low_predicted = 1;
383 continue;
384 }
385 if (s->exit_latency > latency_req)
386 continue;
387 if (s->exit_latency * multiplier > data->predicted_us)
388 continue;
389
390 data->last_state_idx = i;
391 data->exit_us = s->exit_latency;
392 }
393
394 /* not deepest C-state chosen for low predicted residency */
395 if (low_predicted) {
396 unsigned int timer_us = 0;
397 unsigned int perfect_us = 0;
398
399 /*
400 * Set a timer to detect whether this sleep is much
401 * longer than repeat mode predicted. If the timer
402 * triggers, the code will evaluate whether to put
403 * the CPU into a deeper C-state.
404 * The timer is cancelled on CPU wakeup.
405 */
406 timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
407
408 perfect_us = perfect_cstate_ms * 1000;
409
410 if (repeat && (4 * timer_us < data->expected_us)) {
411 RCU_NONIDLE(hrtimer_start(hrtmr,
412 ns_to_ktime(1000 * timer_us),
413 HRTIMER_MODE_REL_PINNED));
414 /* In repeat case, menu hrtimer is started */
415 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
416 } else if (perfect_us < data->expected_us) {
417 /*
418 * The next timer is long. This could be because
419 * we did not make a useful prediction.
420 * In that case, it makes sense to re-enter
421 * into a deeper C-state after some time.
422 */
423 RCU_NONIDLE(hrtimer_start(hrtmr,
424 ns_to_ktime(1000 * timer_us),
425 HRTIMER_MODE_REL_PINNED));
426 /* In general case, menu hrtimer is started */
427 per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
428 }
429
430 }
431
432 return data->last_state_idx;
433 }
434
435 /**
436 * menu_reflect - records that data structures need update
437 * @dev: the CPU
438 * @index: the index of actual entered state
439 *
440 * NOTE: it's important to be fast here because this operation will add to
441 * the overall exit latency.
442 */
menu_reflect(struct cpuidle_device * dev,int index)443 static void menu_reflect(struct cpuidle_device *dev, int index)
444 {
445 struct menu_device *data = &__get_cpu_var(menu_devices);
446 data->last_state_idx = index;
447 if (index >= 0)
448 data->needs_update = 1;
449 }
450
451 /**
452 * menu_update - attempts to guess what happened after entry
453 * @drv: cpuidle driver containing state data
454 * @dev: the CPU
455 */
menu_update(struct cpuidle_driver * drv,struct cpuidle_device * dev)456 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
457 {
458 struct menu_device *data = &__get_cpu_var(menu_devices);
459 int last_idx = data->last_state_idx;
460 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
461 struct cpuidle_state *target = &drv->states[last_idx];
462 unsigned int measured_us;
463 u64 new_factor;
464
465 /*
466 * Ugh, this idle state doesn't support residency measurements, so we
467 * are basically lost in the dark. As a compromise, assume we slept
468 * for the whole expected time.
469 */
470 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
471 last_idle_us = data->expected_us;
472
473
474 measured_us = last_idle_us;
475
476 /*
477 * We correct for the exit latency; we are assuming here that the
478 * exit latency happens after the event that we're interested in.
479 */
480 if (measured_us > data->exit_us)
481 measured_us -= data->exit_us;
482
483
484 /* update our correction ratio */
485
486 new_factor = data->correction_factor[data->bucket]
487 * (DECAY - 1) / DECAY;
488
489 if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
490 new_factor += RESOLUTION * measured_us / data->expected_us;
491 else
492 /*
493 * we were idle so long that we count it as a perfect
494 * prediction
495 */
496 new_factor += RESOLUTION;
497
498 /*
499 * We don't want 0 as factor; we always want at least
500 * a tiny bit of estimated time.
501 */
502 if (new_factor == 0)
503 new_factor = 1;
504
505 data->correction_factor[data->bucket] = new_factor;
506
507 /* update the repeating-pattern data */
508 data->intervals[data->interval_ptr++] = last_idle_us;
509 if (data->interval_ptr >= INTERVALS)
510 data->interval_ptr = 0;
511 }
512
513 /**
514 * menu_enable_device - scans a CPU's states and does setup
515 * @drv: cpuidle driver
516 * @dev: the CPU
517 */
menu_enable_device(struct cpuidle_driver * drv,struct cpuidle_device * dev)518 static int menu_enable_device(struct cpuidle_driver *drv,
519 struct cpuidle_device *dev)
520 {
521 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
522 struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
523 hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
524 t->function = menu_hrtimer_notify;
525
526 memset(data, 0, sizeof(struct menu_device));
527
528 return 0;
529 }
530
531 static struct cpuidle_governor menu_governor = {
532 .name = "menu",
533 .rating = 20,
534 .enable = menu_enable_device,
535 .select = menu_select,
536 .reflect = menu_reflect,
537 .owner = THIS_MODULE,
538 };
539
540 /**
541 * init_menu - initializes the governor
542 */
init_menu(void)543 static int __init init_menu(void)
544 {
545 return cpuidle_register_governor(&menu_governor);
546 }
547
548 /**
549 * exit_menu - exits the governor
550 */
exit_menu(void)551 static void __exit exit_menu(void)
552 {
553 cpuidle_unregister_governor(&menu_governor);
554 }
555
556 MODULE_LICENSE("GPL");
557 module_init(init_menu);
558 module_exit(exit_menu);
559