1 /*
2 * cpuidle.c - core cpuidle infrastructure
3 *
4 * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
5 * Shaohua Li <shaohua.li@intel.com>
6 * Adam Belay <abelay@novell.com>
7 *
8 * This code is licenced under the GPL.
9 */
10
11 #include <linux/clockchips.h>
12 #include <linux/kernel.h>
13 #include <linux/mutex.h>
14 #include <linux/sched.h>
15 #include <linux/sched/clock.h>
16 #include <linux/notifier.h>
17 #include <linux/pm_qos.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuidle.h>
20 #include <linux/ktime.h>
21 #include <linux/hrtimer.h>
22 #include <linux/module.h>
23 #include <linux/suspend.h>
24 #include <linux/tick.h>
25 #include <trace/events/power.h>
26
27 #include "cpuidle.h"
28
29 DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
30 DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
31
32 DEFINE_MUTEX(cpuidle_lock);
33 LIST_HEAD(cpuidle_detected_devices);
34
35 static int enabled_devices;
36 static int off __read_mostly;
37 static int initialized __read_mostly;
38
cpuidle_disabled(void)39 int cpuidle_disabled(void)
40 {
41 return off;
42 }
disable_cpuidle(void)43 void disable_cpuidle(void)
44 {
45 off = 1;
46 }
47
cpuidle_not_available(struct cpuidle_driver * drv,struct cpuidle_device * dev)48 bool cpuidle_not_available(struct cpuidle_driver *drv,
49 struct cpuidle_device *dev)
50 {
51 return off || !initialized || !drv || !dev || !dev->enabled;
52 }
53
54 /**
55 * cpuidle_play_dead - cpu off-lining
56 *
57 * Returns in case of an error or no driver
58 */
cpuidle_play_dead(void)59 int cpuidle_play_dead(void)
60 {
61 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
62 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
63 int i;
64
65 if (!drv)
66 return -ENODEV;
67
68 /* Find lowest-power state that supports long-term idle */
69 for (i = drv->state_count - 1; i >= 0; i--)
70 if (drv->states[i].enter_dead)
71 return drv->states[i].enter_dead(dev, i);
72
73 return -ENODEV;
74 }
75
find_deepest_state(struct cpuidle_driver * drv,struct cpuidle_device * dev,unsigned int max_latency,unsigned int forbidden_flags,bool s2idle)76 static int find_deepest_state(struct cpuidle_driver *drv,
77 struct cpuidle_device *dev,
78 unsigned int max_latency,
79 unsigned int forbidden_flags,
80 bool s2idle)
81 {
82 unsigned int latency_req = 0;
83 int i, ret = 0;
84
85 for (i = 1; i < drv->state_count; i++) {
86 struct cpuidle_state *s = &drv->states[i];
87 struct cpuidle_state_usage *su = &dev->states_usage[i];
88
89 if (s->disabled || su->disable || s->exit_latency <= latency_req
90 || s->exit_latency > max_latency
91 || (s->flags & forbidden_flags)
92 || (s2idle && !s->enter_s2idle))
93 continue;
94
95 latency_req = s->exit_latency;
96 ret = i;
97 }
98 return ret;
99 }
100
101 /**
102 * cpuidle_use_deepest_state - Set/clear governor override flag.
103 * @enable: New value of the flag.
104 *
105 * Set/unset the current CPU to use the deepest idle state (override governors
106 * going forward if set).
107 */
cpuidle_use_deepest_state(bool enable)108 void cpuidle_use_deepest_state(bool enable)
109 {
110 struct cpuidle_device *dev;
111
112 preempt_disable();
113 dev = cpuidle_get_device();
114 if (dev)
115 dev->use_deepest_state = enable;
116 preempt_enable();
117 }
118
119 /**
120 * cpuidle_find_deepest_state - Find the deepest available idle state.
121 * @drv: cpuidle driver for the given CPU.
122 * @dev: cpuidle device for the given CPU.
123 */
cpuidle_find_deepest_state(struct cpuidle_driver * drv,struct cpuidle_device * dev)124 int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
125 struct cpuidle_device *dev)
126 {
127 return find_deepest_state(drv, dev, UINT_MAX, 0, false);
128 }
129
130 #ifdef CONFIG_SUSPEND
enter_s2idle_proper(struct cpuidle_driver * drv,struct cpuidle_device * dev,int index)131 static void enter_s2idle_proper(struct cpuidle_driver *drv,
132 struct cpuidle_device *dev, int index)
133 {
134 ktime_t time_start, time_end;
135
136 time_start = ns_to_ktime(local_clock());
137
138 /*
139 * trace_suspend_resume() called by tick_freeze() for the last CPU
140 * executing it contains RCU usage regarded as invalid in the idle
141 * context, so tell RCU about that.
142 */
143 RCU_NONIDLE(tick_freeze());
144 /*
145 * The state used here cannot be a "coupled" one, because the "coupled"
146 * cpuidle mechanism enables interrupts and doing that with timekeeping
147 * suspended is generally unsafe.
148 */
149 stop_critical_timings();
150 drv->states[index].enter_s2idle(dev, drv, index);
151 WARN_ON(!irqs_disabled());
152 /*
153 * timekeeping_resume() that will be called by tick_unfreeze() for the
154 * first CPU executing it calls functions containing RCU read-side
155 * critical sections, so tell RCU about that.
156 */
157 RCU_NONIDLE(tick_unfreeze());
158 start_critical_timings();
159
160 time_end = ns_to_ktime(local_clock());
161
162 dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
163 dev->states_usage[index].s2idle_usage++;
164 }
165
166 /**
167 * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
168 * @drv: cpuidle driver for the given CPU.
169 * @dev: cpuidle device for the given CPU.
170 *
171 * If there are states with the ->enter_s2idle callback, find the deepest of
172 * them and enter it with frozen tick.
173 */
cpuidle_enter_s2idle(struct cpuidle_driver * drv,struct cpuidle_device * dev)174 int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
175 {
176 int index;
177
178 /*
179 * Find the deepest state with ->enter_s2idle present, which guarantees
180 * that interrupts won't be enabled when it exits and allows the tick to
181 * be frozen safely.
182 */
183 index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
184 if (index > 0)
185 enter_s2idle_proper(drv, dev, index);
186
187 return index;
188 }
189 #endif /* CONFIG_SUSPEND */
190
191 /**
192 * cpuidle_enter_state - enter the state and update stats
193 * @dev: cpuidle device for this cpu
194 * @drv: cpuidle driver for this cpu
195 * @index: index into the states table in @drv of the state to enter
196 */
cpuidle_enter_state(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)197 int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
198 int index)
199 {
200 int entered_state;
201
202 struct cpuidle_state *target_state = &drv->states[index];
203 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
204 ktime_t time_start, time_end;
205
206 /*
207 * Tell the time framework to switch to a broadcast timer because our
208 * local timer will be shut down. If a local timer is used from another
209 * CPU as a broadcast timer, this call may fail if it is not available.
210 */
211 if (broadcast && tick_broadcast_enter()) {
212 index = find_deepest_state(drv, dev, target_state->exit_latency,
213 CPUIDLE_FLAG_TIMER_STOP, false);
214 if (index < 0) {
215 default_idle_call();
216 return -EBUSY;
217 }
218 target_state = &drv->states[index];
219 broadcast = false;
220 }
221
222 /* Take note of the planned idle state. */
223 sched_idle_set_state(target_state);
224
225 trace_cpu_idle_rcuidle(index, dev->cpu);
226 time_start = ns_to_ktime(local_clock());
227
228 stop_critical_timings();
229 entered_state = target_state->enter(dev, drv, index);
230 start_critical_timings();
231
232 sched_clock_idle_wakeup_event();
233 time_end = ns_to_ktime(local_clock());
234 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
235
236 /* The cpu is no longer idle or about to enter idle. */
237 sched_idle_set_state(NULL);
238
239 if (broadcast) {
240 if (WARN_ON_ONCE(!irqs_disabled()))
241 local_irq_disable();
242
243 tick_broadcast_exit();
244 }
245
246 if (!cpuidle_state_is_coupled(drv, index))
247 local_irq_enable();
248
249 if (entered_state >= 0) {
250 s64 diff, delay = drv->states[entered_state].exit_latency;
251 int i;
252
253 /*
254 * Update cpuidle counters
255 * This can be moved to within driver enter routine,
256 * but that results in multiple copies of same code.
257 */
258 diff = ktime_us_delta(time_end, time_start);
259 if (diff > INT_MAX)
260 diff = INT_MAX;
261
262 dev->last_residency = (int)diff;
263 dev->states_usage[entered_state].time += dev->last_residency;
264 dev->states_usage[entered_state].usage++;
265
266 if (diff < drv->states[entered_state].target_residency) {
267 for (i = entered_state - 1; i >= 0; i--) {
268 if (drv->states[i].disabled ||
269 dev->states_usage[i].disable)
270 continue;
271
272 /* Shallower states are enabled, so update. */
273 dev->states_usage[entered_state].above++;
274 break;
275 }
276 } else if (diff > delay) {
277 for (i = entered_state + 1; i < drv->state_count; i++) {
278 if (drv->states[i].disabled ||
279 dev->states_usage[i].disable)
280 continue;
281
282 /*
283 * Update if a deeper state would have been a
284 * better match for the observed idle duration.
285 */
286 if (diff - delay >= drv->states[i].target_residency)
287 dev->states_usage[entered_state].below++;
288
289 break;
290 }
291 }
292 } else {
293 dev->last_residency = 0;
294 }
295
296 return entered_state;
297 }
298
299 /**
300 * cpuidle_select - ask the cpuidle framework to choose an idle state
301 *
302 * @drv: the cpuidle driver
303 * @dev: the cpuidle device
304 * @stop_tick: indication on whether or not to stop the tick
305 *
306 * Returns the index of the idle state. The return value must not be negative.
307 *
308 * The memory location pointed to by @stop_tick is expected to be written the
309 * 'false' boolean value if the scheduler tick should not be stopped before
310 * entering the returned state.
311 */
cpuidle_select(struct cpuidle_driver * drv,struct cpuidle_device * dev,bool * stop_tick)312 int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
313 bool *stop_tick)
314 {
315 return cpuidle_curr_governor->select(drv, dev, stop_tick);
316 }
317
318 /**
319 * cpuidle_enter - enter into the specified idle state
320 *
321 * @drv: the cpuidle driver tied with the cpu
322 * @dev: the cpuidle device
323 * @index: the index in the idle state table
324 *
325 * Returns the index in the idle state, < 0 in case of error.
326 * The error code depends on the backend driver
327 */
cpuidle_enter(struct cpuidle_driver * drv,struct cpuidle_device * dev,int index)328 int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
329 int index)
330 {
331 int ret = 0;
332
333 /*
334 * Store the next hrtimer, which becomes either next tick or the next
335 * timer event, whatever expires first. Additionally, to make this data
336 * useful for consumers outside cpuidle, we rely on that the governor's
337 * ->select() callback have decided, whether to stop the tick or not.
338 */
339 WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
340
341 if (cpuidle_state_is_coupled(drv, index))
342 ret = cpuidle_enter_state_coupled(dev, drv, index);
343 else
344 ret = cpuidle_enter_state(dev, drv, index);
345
346 WRITE_ONCE(dev->next_hrtimer, 0);
347 return ret;
348 }
349
350 /**
351 * cpuidle_reflect - tell the underlying governor what was the state
352 * we were in
353 *
354 * @dev : the cpuidle device
355 * @index: the index in the idle state table
356 *
357 */
cpuidle_reflect(struct cpuidle_device * dev,int index)358 void cpuidle_reflect(struct cpuidle_device *dev, int index)
359 {
360 if (cpuidle_curr_governor->reflect && index >= 0)
361 cpuidle_curr_governor->reflect(dev, index);
362 }
363
364 /**
365 * cpuidle_poll_time - return amount of time to poll for,
366 * governors can override dev->poll_limit_ns if necessary
367 *
368 * @drv: the cpuidle driver tied with the cpu
369 * @dev: the cpuidle device
370 *
371 */
cpuidle_poll_time(struct cpuidle_driver * drv,struct cpuidle_device * dev)372 u64 cpuidle_poll_time(struct cpuidle_driver *drv,
373 struct cpuidle_device *dev)
374 {
375 int i;
376 u64 limit_ns;
377
378 if (dev->poll_limit_ns)
379 return dev->poll_limit_ns;
380
381 limit_ns = TICK_NSEC;
382 for (i = 1; i < drv->state_count; i++) {
383 if (drv->states[i].disabled || dev->states_usage[i].disable)
384 continue;
385
386 limit_ns = (u64)drv->states[i].target_residency * NSEC_PER_USEC;
387 break;
388 }
389
390 dev->poll_limit_ns = limit_ns;
391
392 return dev->poll_limit_ns;
393 }
394
395 /**
396 * cpuidle_install_idle_handler - installs the cpuidle idle loop handler
397 */
cpuidle_install_idle_handler(void)398 void cpuidle_install_idle_handler(void)
399 {
400 if (enabled_devices) {
401 /* Make sure all changes finished before we switch to new idle */
402 smp_wmb();
403 initialized = 1;
404 }
405 }
406
407 /**
408 * cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
409 */
cpuidle_uninstall_idle_handler(void)410 void cpuidle_uninstall_idle_handler(void)
411 {
412 if (enabled_devices) {
413 initialized = 0;
414 wake_up_all_idle_cpus();
415 }
416
417 /*
418 * Make sure external observers (such as the scheduler)
419 * are done looking at pointed idle states.
420 */
421 synchronize_rcu();
422 }
423
424 /**
425 * cpuidle_pause_and_lock - temporarily disables CPUIDLE
426 */
cpuidle_pause_and_lock(void)427 void cpuidle_pause_and_lock(void)
428 {
429 mutex_lock(&cpuidle_lock);
430 cpuidle_uninstall_idle_handler();
431 }
432
433 EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
434
435 /**
436 * cpuidle_resume_and_unlock - resumes CPUIDLE operation
437 */
cpuidle_resume_and_unlock(void)438 void cpuidle_resume_and_unlock(void)
439 {
440 cpuidle_install_idle_handler();
441 mutex_unlock(&cpuidle_lock);
442 }
443
444 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
445
446 /* Currently used in suspend/resume path to suspend cpuidle */
cpuidle_pause(void)447 void cpuidle_pause(void)
448 {
449 mutex_lock(&cpuidle_lock);
450 cpuidle_uninstall_idle_handler();
451 mutex_unlock(&cpuidle_lock);
452 }
453
454 /* Currently used in suspend/resume path to resume cpuidle */
cpuidle_resume(void)455 void cpuidle_resume(void)
456 {
457 mutex_lock(&cpuidle_lock);
458 cpuidle_install_idle_handler();
459 mutex_unlock(&cpuidle_lock);
460 }
461
462 /**
463 * cpuidle_enable_device - enables idle PM for a CPU
464 * @dev: the CPU
465 *
466 * This function must be called between cpuidle_pause_and_lock and
467 * cpuidle_resume_and_unlock when used externally.
468 */
cpuidle_enable_device(struct cpuidle_device * dev)469 int cpuidle_enable_device(struct cpuidle_device *dev)
470 {
471 int ret;
472 struct cpuidle_driver *drv;
473
474 if (!dev)
475 return -EINVAL;
476
477 if (dev->enabled)
478 return 0;
479
480 if (!cpuidle_curr_governor)
481 return -EIO;
482
483 drv = cpuidle_get_cpu_driver(dev);
484
485 if (!drv)
486 return -EIO;
487
488 if (!dev->registered)
489 return -EINVAL;
490
491 ret = cpuidle_add_device_sysfs(dev);
492 if (ret)
493 return ret;
494
495 if (cpuidle_curr_governor->enable) {
496 ret = cpuidle_curr_governor->enable(drv, dev);
497 if (ret)
498 goto fail_sysfs;
499 }
500
501 smp_wmb();
502
503 dev->enabled = 1;
504
505 enabled_devices++;
506 return 0;
507
508 fail_sysfs:
509 cpuidle_remove_device_sysfs(dev);
510
511 return ret;
512 }
513
514 EXPORT_SYMBOL_GPL(cpuidle_enable_device);
515
516 /**
517 * cpuidle_disable_device - disables idle PM for a CPU
518 * @dev: the CPU
519 *
520 * This function must be called between cpuidle_pause_and_lock and
521 * cpuidle_resume_and_unlock when used externally.
522 */
cpuidle_disable_device(struct cpuidle_device * dev)523 void cpuidle_disable_device(struct cpuidle_device *dev)
524 {
525 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
526
527 if (!dev || !dev->enabled)
528 return;
529
530 if (!drv || !cpuidle_curr_governor)
531 return;
532
533 dev->enabled = 0;
534
535 if (cpuidle_curr_governor->disable)
536 cpuidle_curr_governor->disable(drv, dev);
537
538 cpuidle_remove_device_sysfs(dev);
539 enabled_devices--;
540 }
541
542 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
543
__cpuidle_unregister_device(struct cpuidle_device * dev)544 static void __cpuidle_unregister_device(struct cpuidle_device *dev)
545 {
546 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
547
548 list_del(&dev->device_list);
549 per_cpu(cpuidle_devices, dev->cpu) = NULL;
550 module_put(drv->owner);
551
552 dev->registered = 0;
553 }
554
__cpuidle_device_init(struct cpuidle_device * dev)555 static void __cpuidle_device_init(struct cpuidle_device *dev)
556 {
557 memset(dev->states_usage, 0, sizeof(dev->states_usage));
558 dev->last_residency = 0;
559 dev->next_hrtimer = 0;
560 }
561
562 /**
563 * __cpuidle_register_device - internal register function called before register
564 * and enable routines
565 * @dev: the cpu
566 *
567 * cpuidle_lock mutex must be held before this is called
568 */
__cpuidle_register_device(struct cpuidle_device * dev)569 static int __cpuidle_register_device(struct cpuidle_device *dev)
570 {
571 int ret;
572 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
573
574 if (!try_module_get(drv->owner))
575 return -EINVAL;
576
577 per_cpu(cpuidle_devices, dev->cpu) = dev;
578 list_add(&dev->device_list, &cpuidle_detected_devices);
579
580 ret = cpuidle_coupled_register_device(dev);
581 if (ret)
582 __cpuidle_unregister_device(dev);
583 else
584 dev->registered = 1;
585
586 return ret;
587 }
588
589 /**
590 * cpuidle_register_device - registers a CPU's idle PM feature
591 * @dev: the cpu
592 */
cpuidle_register_device(struct cpuidle_device * dev)593 int cpuidle_register_device(struct cpuidle_device *dev)
594 {
595 int ret = -EBUSY;
596
597 if (!dev)
598 return -EINVAL;
599
600 mutex_lock(&cpuidle_lock);
601
602 if (dev->registered)
603 goto out_unlock;
604
605 __cpuidle_device_init(dev);
606
607 ret = __cpuidle_register_device(dev);
608 if (ret)
609 goto out_unlock;
610
611 ret = cpuidle_add_sysfs(dev);
612 if (ret)
613 goto out_unregister;
614
615 ret = cpuidle_enable_device(dev);
616 if (ret)
617 goto out_sysfs;
618
619 cpuidle_install_idle_handler();
620
621 out_unlock:
622 mutex_unlock(&cpuidle_lock);
623
624 return ret;
625
626 out_sysfs:
627 cpuidle_remove_sysfs(dev);
628 out_unregister:
629 __cpuidle_unregister_device(dev);
630 goto out_unlock;
631 }
632
633 EXPORT_SYMBOL_GPL(cpuidle_register_device);
634
635 /**
636 * cpuidle_unregister_device - unregisters a CPU's idle PM feature
637 * @dev: the cpu
638 */
cpuidle_unregister_device(struct cpuidle_device * dev)639 void cpuidle_unregister_device(struct cpuidle_device *dev)
640 {
641 if (!dev || dev->registered == 0)
642 return;
643
644 cpuidle_pause_and_lock();
645
646 cpuidle_disable_device(dev);
647
648 cpuidle_remove_sysfs(dev);
649
650 __cpuidle_unregister_device(dev);
651
652 cpuidle_coupled_unregister_device(dev);
653
654 cpuidle_resume_and_unlock();
655 }
656
657 EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
658
659 /**
660 * cpuidle_unregister: unregister a driver and the devices. This function
661 * can be used only if the driver has been previously registered through
662 * the cpuidle_register function.
663 *
664 * @drv: a valid pointer to a struct cpuidle_driver
665 */
cpuidle_unregister(struct cpuidle_driver * drv)666 void cpuidle_unregister(struct cpuidle_driver *drv)
667 {
668 int cpu;
669 struct cpuidle_device *device;
670
671 for_each_cpu(cpu, drv->cpumask) {
672 device = &per_cpu(cpuidle_dev, cpu);
673 cpuidle_unregister_device(device);
674 }
675
676 cpuidle_unregister_driver(drv);
677 }
678 EXPORT_SYMBOL_GPL(cpuidle_unregister);
679
680 /**
681 * cpuidle_register: registers the driver and the cpu devices with the
682 * coupled_cpus passed as parameter. This function is used for all common
683 * initialization pattern there are in the arch specific drivers. The
684 * devices is globally defined in this file.
685 *
686 * @drv : a valid pointer to a struct cpuidle_driver
687 * @coupled_cpus: a cpumask for the coupled states
688 *
689 * Returns 0 on success, < 0 otherwise
690 */
cpuidle_register(struct cpuidle_driver * drv,const struct cpumask * const coupled_cpus)691 int cpuidle_register(struct cpuidle_driver *drv,
692 const struct cpumask *const coupled_cpus)
693 {
694 int ret, cpu;
695 struct cpuidle_device *device;
696
697 ret = cpuidle_register_driver(drv);
698 if (ret) {
699 pr_err("failed to register cpuidle driver\n");
700 return ret;
701 }
702
703 for_each_cpu(cpu, drv->cpumask) {
704 device = &per_cpu(cpuidle_dev, cpu);
705 device->cpu = cpu;
706
707 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
708 /*
709 * On multiplatform for ARM, the coupled idle states could be
710 * enabled in the kernel even if the cpuidle driver does not
711 * use it. Note, coupled_cpus is a struct copy.
712 */
713 if (coupled_cpus)
714 device->coupled_cpus = *coupled_cpus;
715 #endif
716 ret = cpuidle_register_device(device);
717 if (!ret)
718 continue;
719
720 pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
721
722 cpuidle_unregister(drv);
723 break;
724 }
725
726 return ret;
727 }
728 EXPORT_SYMBOL_GPL(cpuidle_register);
729
730 #ifdef CONFIG_SMP
731
732 /*
733 * This function gets called when a part of the kernel has a new latency
734 * requirement. This means we need to get all processors out of their C-state,
735 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
736 * wakes them all right up.
737 */
cpuidle_latency_notify(struct notifier_block * b,unsigned long l,void * v)738 static int cpuidle_latency_notify(struct notifier_block *b,
739 unsigned long l, void *v)
740 {
741 wake_up_all_idle_cpus();
742 return NOTIFY_OK;
743 }
744
745 static struct notifier_block cpuidle_latency_notifier = {
746 .notifier_call = cpuidle_latency_notify,
747 };
748
latency_notifier_init(struct notifier_block * n)749 static inline void latency_notifier_init(struct notifier_block *n)
750 {
751 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
752 }
753
754 #else /* CONFIG_SMP */
755
756 #define latency_notifier_init(x) do { } while (0)
757
758 #endif /* CONFIG_SMP */
759
760 /**
761 * cpuidle_init - core initializer
762 */
cpuidle_init(void)763 static int __init cpuidle_init(void)
764 {
765 int ret;
766
767 if (cpuidle_disabled())
768 return -ENODEV;
769
770 ret = cpuidle_add_interface(cpu_subsys.dev_root);
771 if (ret)
772 return ret;
773
774 latency_notifier_init(&cpuidle_latency_notifier);
775
776 return 0;
777 }
778
779 module_param(off, int, 0444);
780 module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444);
781 core_initcall(cpuidle_init);
782