1 /*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 *
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 *
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
34
35 #include <trace/events/power.h>
36
37 /**
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
41 */
42 static struct cpufreq_driver *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
47 #endif
48 static DEFINE_RWLOCK(cpufreq_driver_lock);
49
50 /*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
66 */
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
69
70 #define lock_policy_rwsem(mode, cpu) \
71 static int lock_policy_rwsem_##mode(int cpu) \
72 { \
73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
76 \
77 return 0; \
78 }
79
80 lock_policy_rwsem(read, cpu);
81 lock_policy_rwsem(write, cpu);
82
83 #define unlock_policy_rwsem(mode, cpu) \
84 static void unlock_policy_rwsem_##mode(int cpu) \
85 { \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
89 }
90
91 unlock_policy_rwsem(read, cpu);
92 unlock_policy_rwsem(write, cpu);
93
94 /* internal prototypes */
95 static int __cpufreq_governor(struct cpufreq_policy *policy,
96 unsigned int event);
97 static unsigned int __cpufreq_get(unsigned int cpu);
98 static void handle_update(struct work_struct *work);
99
100 /**
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
106 */
107 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
108 static struct srcu_notifier_head cpufreq_transition_notifier_list;
109
110 static bool init_cpufreq_transition_notifier_list_called;
init_cpufreq_transition_notifier_list(void)111 static int __init init_cpufreq_transition_notifier_list(void)
112 {
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
114 init_cpufreq_transition_notifier_list_called = true;
115 return 0;
116 }
117 pure_initcall(init_cpufreq_transition_notifier_list);
118
119 static int off __read_mostly;
cpufreq_disabled(void)120 static int cpufreq_disabled(void)
121 {
122 return off;
123 }
disable_cpufreq(void)124 void disable_cpufreq(void)
125 {
126 off = 1;
127 }
128 static LIST_HEAD(cpufreq_governor_list);
129 static DEFINE_MUTEX(cpufreq_governor_mutex);
130
have_governor_per_policy(void)131 bool have_governor_per_policy(void)
132 {
133 return cpufreq_driver->have_governor_per_policy;
134 }
135 EXPORT_SYMBOL_GPL(have_governor_per_policy);
136
get_governor_parent_kobj(struct cpufreq_policy * policy)137 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
138 {
139 if (have_governor_per_policy())
140 return &policy->kobj;
141 else
142 return cpufreq_global_kobject;
143 }
144 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
145
__cpufreq_cpu_get(unsigned int cpu,bool sysfs)146 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
147 {
148 struct cpufreq_policy *data;
149 unsigned long flags;
150
151 if (cpu >= nr_cpu_ids)
152 goto err_out;
153
154 /* get the cpufreq driver */
155 read_lock_irqsave(&cpufreq_driver_lock, flags);
156
157 if (!cpufreq_driver)
158 goto err_out_unlock;
159
160 if (!try_module_get(cpufreq_driver->owner))
161 goto err_out_unlock;
162
163
164 /* get the CPU */
165 data = per_cpu(cpufreq_cpu_data, cpu);
166
167 if (!data)
168 goto err_out_put_module;
169
170 if (!sysfs && !kobject_get(&data->kobj))
171 goto err_out_put_module;
172
173 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
174 return data;
175
176 err_out_put_module:
177 module_put(cpufreq_driver->owner);
178 err_out_unlock:
179 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
180 err_out:
181 return NULL;
182 }
183
cpufreq_cpu_get(unsigned int cpu)184 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
185 {
186 if (cpufreq_disabled())
187 return NULL;
188
189 return __cpufreq_cpu_get(cpu, false);
190 }
191 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
192
cpufreq_cpu_get_sysfs(unsigned int cpu)193 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
194 {
195 return __cpufreq_cpu_get(cpu, true);
196 }
197
__cpufreq_cpu_put(struct cpufreq_policy * data,bool sysfs)198 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
199 {
200 if (!sysfs)
201 kobject_put(&data->kobj);
202 module_put(cpufreq_driver->owner);
203 }
204
cpufreq_cpu_put(struct cpufreq_policy * data)205 void cpufreq_cpu_put(struct cpufreq_policy *data)
206 {
207 if (cpufreq_disabled())
208 return;
209
210 __cpufreq_cpu_put(data, false);
211 }
212 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
213
cpufreq_cpu_put_sysfs(struct cpufreq_policy * data)214 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
215 {
216 __cpufreq_cpu_put(data, true);
217 }
218
219 /*********************************************************************
220 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
221 *********************************************************************/
222
223 /**
224 * adjust_jiffies - adjust the system "loops_per_jiffy"
225 *
226 * This function alters the system "loops_per_jiffy" for the clock
227 * speed change. Note that loops_per_jiffy cannot be updated on SMP
228 * systems as each CPU might be scaled differently. So, use the arch
229 * per-CPU loops_per_jiffy value wherever possible.
230 */
231 #ifndef CONFIG_SMP
232 static unsigned long l_p_j_ref;
233 static unsigned int l_p_j_ref_freq;
234
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)235 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
236 {
237 if (ci->flags & CPUFREQ_CONST_LOOPS)
238 return;
239
240 if (!l_p_j_ref_freq) {
241 l_p_j_ref = loops_per_jiffy;
242 l_p_j_ref_freq = ci->old;
243 pr_debug("saving %lu as reference value for loops_per_jiffy; "
244 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
245 }
246 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
247 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
248 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
249 ci->new);
250 pr_debug("scaling loops_per_jiffy to %lu "
251 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
252 }
253 }
254 #else
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)255 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
256 {
257 return;
258 }
259 #endif
260
261
__cpufreq_notify_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,unsigned int state)262 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
263 struct cpufreq_freqs *freqs, unsigned int state)
264 {
265 BUG_ON(irqs_disabled());
266
267 if (cpufreq_disabled())
268 return;
269
270 freqs->flags = cpufreq_driver->flags;
271 pr_debug("notification %u of frequency transition to %u kHz\n",
272 state, freqs->new);
273
274 switch (state) {
275
276 case CPUFREQ_PRECHANGE:
277 /* detect if the driver reported a value as "old frequency"
278 * which is not equal to what the cpufreq core thinks is
279 * "old frequency".
280 */
281 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
282 if ((policy) && (policy->cpu == freqs->cpu) &&
283 (policy->cur) && (policy->cur != freqs->old)) {
284 pr_debug("Warning: CPU frequency is"
285 " %u, cpufreq assumed %u kHz.\n",
286 freqs->old, policy->cur);
287 freqs->old = policy->cur;
288 }
289 }
290 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
291 CPUFREQ_PRECHANGE, freqs);
292 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
293 break;
294
295 case CPUFREQ_POSTCHANGE:
296 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
297 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
298 (unsigned long)freqs->cpu);
299 trace_cpu_frequency(freqs->new, freqs->cpu);
300 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
301 CPUFREQ_POSTCHANGE, freqs);
302 if (likely(policy) && likely(policy->cpu == freqs->cpu))
303 policy->cur = freqs->new;
304 break;
305 }
306 }
307 /**
308 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
309 * on frequency transition.
310 *
311 * This function calls the transition notifiers and the "adjust_jiffies"
312 * function. It is called twice on all CPU frequency changes that have
313 * external effects.
314 */
cpufreq_notify_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,unsigned int state)315 void cpufreq_notify_transition(struct cpufreq_policy *policy,
316 struct cpufreq_freqs *freqs, unsigned int state)
317 {
318 for_each_cpu(freqs->cpu, policy->cpus)
319 __cpufreq_notify_transition(policy, freqs, state);
320 }
321 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
322
323
324
325 /*********************************************************************
326 * SYSFS INTERFACE *
327 *********************************************************************/
328
__find_governor(const char * str_governor)329 static struct cpufreq_governor *__find_governor(const char *str_governor)
330 {
331 struct cpufreq_governor *t;
332
333 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
334 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
335 return t;
336
337 return NULL;
338 }
339
340 /**
341 * cpufreq_parse_governor - parse a governor string
342 */
cpufreq_parse_governor(char * str_governor,unsigned int * policy,struct cpufreq_governor ** governor)343 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
344 struct cpufreq_governor **governor)
345 {
346 int err = -EINVAL;
347
348 if (!cpufreq_driver)
349 goto out;
350
351 if (cpufreq_driver->setpolicy) {
352 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
353 *policy = CPUFREQ_POLICY_PERFORMANCE;
354 err = 0;
355 } else if (!strnicmp(str_governor, "powersave",
356 CPUFREQ_NAME_LEN)) {
357 *policy = CPUFREQ_POLICY_POWERSAVE;
358 err = 0;
359 }
360 } else if (cpufreq_driver->target) {
361 struct cpufreq_governor *t;
362
363 mutex_lock(&cpufreq_governor_mutex);
364
365 t = __find_governor(str_governor);
366
367 if (t == NULL) {
368 int ret;
369
370 mutex_unlock(&cpufreq_governor_mutex);
371 ret = request_module("cpufreq_%s", str_governor);
372 mutex_lock(&cpufreq_governor_mutex);
373
374 if (ret == 0)
375 t = __find_governor(str_governor);
376 }
377
378 if (t != NULL) {
379 *governor = t;
380 err = 0;
381 }
382
383 mutex_unlock(&cpufreq_governor_mutex);
384 }
385 out:
386 return err;
387 }
388
389
390 /**
391 * cpufreq_per_cpu_attr_read() / show_##file_name() -
392 * print out cpufreq information
393 *
394 * Write out information from cpufreq_driver->policy[cpu]; object must be
395 * "unsigned int".
396 */
397
398 #define show_one(file_name, object) \
399 static ssize_t show_##file_name \
400 (struct cpufreq_policy *policy, char *buf) \
401 { \
402 return sprintf(buf, "%u\n", policy->object); \
403 }
404
405 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
406 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
407 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
408 show_one(scaling_min_freq, min);
409 show_one(scaling_max_freq, max);
410 show_one(scaling_cur_freq, cur);
411
412 static int __cpufreq_set_policy(struct cpufreq_policy *data,
413 struct cpufreq_policy *policy);
414
415 /**
416 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
417 */
418 #define store_one(file_name, object) \
419 static ssize_t store_##file_name \
420 (struct cpufreq_policy *policy, const char *buf, size_t count) \
421 { \
422 unsigned int ret; \
423 struct cpufreq_policy new_policy; \
424 \
425 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
426 if (ret) \
427 return -EINVAL; \
428 \
429 ret = sscanf(buf, "%u", &new_policy.object); \
430 if (ret != 1) \
431 return -EINVAL; \
432 \
433 ret = __cpufreq_set_policy(policy, &new_policy); \
434 policy->user_policy.object = policy->object; \
435 \
436 return ret ? ret : count; \
437 }
438
439 store_one(scaling_min_freq, min);
440 store_one(scaling_max_freq, max);
441
442 /**
443 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
444 */
show_cpuinfo_cur_freq(struct cpufreq_policy * policy,char * buf)445 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
446 char *buf)
447 {
448 unsigned int cur_freq = __cpufreq_get(policy->cpu);
449 if (!cur_freq)
450 return sprintf(buf, "<unknown>");
451 return sprintf(buf, "%u\n", cur_freq);
452 }
453
454
455 /**
456 * show_scaling_governor - show the current policy for the specified CPU
457 */
show_scaling_governor(struct cpufreq_policy * policy,char * buf)458 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
459 {
460 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
461 return sprintf(buf, "powersave\n");
462 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
463 return sprintf(buf, "performance\n");
464 else if (policy->governor)
465 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
466 policy->governor->name);
467 return -EINVAL;
468 }
469
470
471 /**
472 * store_scaling_governor - store policy for the specified CPU
473 */
store_scaling_governor(struct cpufreq_policy * policy,const char * buf,size_t count)474 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
475 const char *buf, size_t count)
476 {
477 unsigned int ret;
478 char str_governor[16];
479 struct cpufreq_policy new_policy;
480
481 ret = cpufreq_get_policy(&new_policy, policy->cpu);
482 if (ret)
483 return ret;
484
485 ret = sscanf(buf, "%15s", str_governor);
486 if (ret != 1)
487 return -EINVAL;
488
489 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
490 &new_policy.governor))
491 return -EINVAL;
492
493 /* Do not use cpufreq_set_policy here or the user_policy.max
494 will be wrongly overridden */
495 ret = __cpufreq_set_policy(policy, &new_policy);
496
497 policy->user_policy.policy = policy->policy;
498 policy->user_policy.governor = policy->governor;
499
500 if (ret)
501 return ret;
502 else
503 return count;
504 }
505
506 /**
507 * show_scaling_driver - show the cpufreq driver currently loaded
508 */
show_scaling_driver(struct cpufreq_policy * policy,char * buf)509 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
510 {
511 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
512 }
513
514 /**
515 * show_scaling_available_governors - show the available CPUfreq governors
516 */
show_scaling_available_governors(struct cpufreq_policy * policy,char * buf)517 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
518 char *buf)
519 {
520 ssize_t i = 0;
521 struct cpufreq_governor *t;
522
523 if (!cpufreq_driver->target) {
524 i += sprintf(buf, "performance powersave");
525 goto out;
526 }
527
528 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
529 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
530 - (CPUFREQ_NAME_LEN + 2)))
531 goto out;
532 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
533 }
534 out:
535 i += sprintf(&buf[i], "\n");
536 return i;
537 }
538
show_cpus(const struct cpumask * mask,char * buf)539 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
540 {
541 ssize_t i = 0;
542 unsigned int cpu;
543
544 for_each_cpu(cpu, mask) {
545 if (i)
546 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
547 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
548 if (i >= (PAGE_SIZE - 5))
549 break;
550 }
551 i += sprintf(&buf[i], "\n");
552 return i;
553 }
554
555 /**
556 * show_related_cpus - show the CPUs affected by each transition even if
557 * hw coordination is in use
558 */
show_related_cpus(struct cpufreq_policy * policy,char * buf)559 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
560 {
561 return show_cpus(policy->related_cpus, buf);
562 }
563
564 /**
565 * show_affected_cpus - show the CPUs affected by each transition
566 */
show_affected_cpus(struct cpufreq_policy * policy,char * buf)567 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
568 {
569 return show_cpus(policy->cpus, buf);
570 }
571
store_scaling_setspeed(struct cpufreq_policy * policy,const char * buf,size_t count)572 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
573 const char *buf, size_t count)
574 {
575 unsigned int freq = 0;
576 unsigned int ret;
577
578 if (!policy->governor || !policy->governor->store_setspeed)
579 return -EINVAL;
580
581 ret = sscanf(buf, "%u", &freq);
582 if (ret != 1)
583 return -EINVAL;
584
585 policy->governor->store_setspeed(policy, freq);
586
587 return count;
588 }
589
show_scaling_setspeed(struct cpufreq_policy * policy,char * buf)590 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
591 {
592 if (!policy->governor || !policy->governor->show_setspeed)
593 return sprintf(buf, "<unsupported>\n");
594
595 return policy->governor->show_setspeed(policy, buf);
596 }
597
598 /**
599 * show_bios_limit - show the current cpufreq HW/BIOS limitation
600 */
show_bios_limit(struct cpufreq_policy * policy,char * buf)601 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
602 {
603 unsigned int limit;
604 int ret;
605 if (cpufreq_driver->bios_limit) {
606 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
607 if (!ret)
608 return sprintf(buf, "%u\n", limit);
609 }
610 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
611 }
612
613 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
614 cpufreq_freq_attr_ro(cpuinfo_min_freq);
615 cpufreq_freq_attr_ro(cpuinfo_max_freq);
616 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
617 cpufreq_freq_attr_ro(scaling_available_governors);
618 cpufreq_freq_attr_ro(scaling_driver);
619 cpufreq_freq_attr_ro(scaling_cur_freq);
620 cpufreq_freq_attr_ro(bios_limit);
621 cpufreq_freq_attr_ro(related_cpus);
622 cpufreq_freq_attr_ro(affected_cpus);
623 cpufreq_freq_attr_rw(scaling_min_freq);
624 cpufreq_freq_attr_rw(scaling_max_freq);
625 cpufreq_freq_attr_rw(scaling_governor);
626 cpufreq_freq_attr_rw(scaling_setspeed);
627
628 static struct attribute *default_attrs[] = {
629 &cpuinfo_min_freq.attr,
630 &cpuinfo_max_freq.attr,
631 &cpuinfo_transition_latency.attr,
632 &scaling_min_freq.attr,
633 &scaling_max_freq.attr,
634 &affected_cpus.attr,
635 &related_cpus.attr,
636 &scaling_governor.attr,
637 &scaling_driver.attr,
638 &scaling_available_governors.attr,
639 &scaling_setspeed.attr,
640 NULL
641 };
642
643 struct kobject *cpufreq_global_kobject;
644 EXPORT_SYMBOL(cpufreq_global_kobject);
645
646 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
647 #define to_attr(a) container_of(a, struct freq_attr, attr)
648
show(struct kobject * kobj,struct attribute * attr,char * buf)649 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
650 {
651 struct cpufreq_policy *policy = to_policy(kobj);
652 struct freq_attr *fattr = to_attr(attr);
653 ssize_t ret = -EINVAL;
654 policy = cpufreq_cpu_get_sysfs(policy->cpu);
655 if (!policy)
656 goto no_policy;
657
658 if (lock_policy_rwsem_read(policy->cpu) < 0)
659 goto fail;
660
661 if (fattr->show)
662 ret = fattr->show(policy, buf);
663 else
664 ret = -EIO;
665
666 unlock_policy_rwsem_read(policy->cpu);
667 fail:
668 cpufreq_cpu_put_sysfs(policy);
669 no_policy:
670 return ret;
671 }
672
store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)673 static ssize_t store(struct kobject *kobj, struct attribute *attr,
674 const char *buf, size_t count)
675 {
676 struct cpufreq_policy *policy = to_policy(kobj);
677 struct freq_attr *fattr = to_attr(attr);
678 ssize_t ret = -EINVAL;
679 policy = cpufreq_cpu_get_sysfs(policy->cpu);
680 if (!policy)
681 goto no_policy;
682
683 if (lock_policy_rwsem_write(policy->cpu) < 0)
684 goto fail;
685
686 if (fattr->store)
687 ret = fattr->store(policy, buf, count);
688 else
689 ret = -EIO;
690
691 unlock_policy_rwsem_write(policy->cpu);
692 fail:
693 cpufreq_cpu_put_sysfs(policy);
694 no_policy:
695 return ret;
696 }
697
cpufreq_sysfs_release(struct kobject * kobj)698 static void cpufreq_sysfs_release(struct kobject *kobj)
699 {
700 struct cpufreq_policy *policy = to_policy(kobj);
701 pr_debug("last reference is dropped\n");
702 complete(&policy->kobj_unregister);
703 }
704
705 static const struct sysfs_ops sysfs_ops = {
706 .show = show,
707 .store = store,
708 };
709
710 static struct kobj_type ktype_cpufreq = {
711 .sysfs_ops = &sysfs_ops,
712 .default_attrs = default_attrs,
713 .release = cpufreq_sysfs_release,
714 };
715
716 /* symlink affected CPUs */
cpufreq_add_dev_symlink(unsigned int cpu,struct cpufreq_policy * policy)717 static int cpufreq_add_dev_symlink(unsigned int cpu,
718 struct cpufreq_policy *policy)
719 {
720 unsigned int j;
721 int ret = 0;
722
723 for_each_cpu(j, policy->cpus) {
724 struct cpufreq_policy *managed_policy;
725 struct device *cpu_dev;
726
727 if (j == cpu)
728 continue;
729
730 pr_debug("CPU %u already managed, adding link\n", j);
731 managed_policy = cpufreq_cpu_get(cpu);
732 cpu_dev = get_cpu_device(j);
733 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
734 "cpufreq");
735 if (ret) {
736 cpufreq_cpu_put(managed_policy);
737 return ret;
738 }
739 }
740 return ret;
741 }
742
cpufreq_add_dev_interface(unsigned int cpu,struct cpufreq_policy * policy,struct device * dev)743 static int cpufreq_add_dev_interface(unsigned int cpu,
744 struct cpufreq_policy *policy,
745 struct device *dev)
746 {
747 struct cpufreq_policy new_policy;
748 struct freq_attr **drv_attr;
749 unsigned long flags;
750 int ret = 0;
751 unsigned int j;
752
753 /* prepare interface data */
754 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
755 &dev->kobj, "cpufreq");
756 if (ret)
757 return ret;
758
759 /* set up files for this cpu device */
760 drv_attr = cpufreq_driver->attr;
761 while ((drv_attr) && (*drv_attr)) {
762 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
763 if (ret)
764 goto err_out_kobj_put;
765 drv_attr++;
766 }
767 if (cpufreq_driver->get) {
768 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
769 if (ret)
770 goto err_out_kobj_put;
771 }
772 if (cpufreq_driver->target) {
773 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
774 if (ret)
775 goto err_out_kobj_put;
776 }
777 if (cpufreq_driver->bios_limit) {
778 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
779 if (ret)
780 goto err_out_kobj_put;
781 }
782
783 write_lock_irqsave(&cpufreq_driver_lock, flags);
784 for_each_cpu(j, policy->cpus) {
785 per_cpu(cpufreq_cpu_data, j) = policy;
786 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
787 }
788 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
789
790 ret = cpufreq_add_dev_symlink(cpu, policy);
791 if (ret)
792 goto err_out_kobj_put;
793
794 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
795 /* assure that the starting sequence is run in __cpufreq_set_policy */
796 policy->governor = NULL;
797
798 /* set default policy */
799 ret = __cpufreq_set_policy(policy, &new_policy);
800 policy->user_policy.policy = policy->policy;
801 policy->user_policy.governor = policy->governor;
802
803 if (ret) {
804 pr_debug("setting policy failed\n");
805 if (cpufreq_driver->exit)
806 cpufreq_driver->exit(policy);
807 }
808 return ret;
809
810 err_out_kobj_put:
811 kobject_put(&policy->kobj);
812 wait_for_completion(&policy->kobj_unregister);
813 return ret;
814 }
815
816 #ifdef CONFIG_HOTPLUG_CPU
cpufreq_add_policy_cpu(unsigned int cpu,unsigned int sibling,struct device * dev)817 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
818 struct device *dev)
819 {
820 struct cpufreq_policy *policy;
821 int ret = 0, has_target = !!cpufreq_driver->target;
822 unsigned long flags;
823
824 policy = cpufreq_cpu_get(sibling);
825 WARN_ON(!policy);
826
827 if (has_target)
828 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
829
830 lock_policy_rwsem_write(sibling);
831
832 write_lock_irqsave(&cpufreq_driver_lock, flags);
833
834 cpumask_set_cpu(cpu, policy->cpus);
835 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
836 per_cpu(cpufreq_cpu_data, cpu) = policy;
837 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
838
839 unlock_policy_rwsem_write(sibling);
840
841 if (has_target) {
842 __cpufreq_governor(policy, CPUFREQ_GOV_START);
843 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
844 }
845
846 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
847 if (ret) {
848 cpufreq_cpu_put(policy);
849 return ret;
850 }
851
852 return 0;
853 }
854 #endif
855
856 /**
857 * cpufreq_add_dev - add a CPU device
858 *
859 * Adds the cpufreq interface for a CPU device.
860 *
861 * The Oracle says: try running cpufreq registration/unregistration concurrently
862 * with with cpu hotplugging and all hell will break loose. Tried to clean this
863 * mess up, but more thorough testing is needed. - Mathieu
864 */
cpufreq_add_dev(struct device * dev,struct subsys_interface * sif)865 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
866 {
867 unsigned int j, cpu = dev->id;
868 int ret = -ENOMEM;
869 struct cpufreq_policy *policy;
870 unsigned long flags;
871 #ifdef CONFIG_HOTPLUG_CPU
872 struct cpufreq_governor *gov;
873 int sibling;
874 #endif
875
876 if (cpu_is_offline(cpu))
877 return 0;
878
879 pr_debug("adding CPU %u\n", cpu);
880
881 #ifdef CONFIG_SMP
882 /* check whether a different CPU already registered this
883 * CPU because it is in the same boat. */
884 policy = cpufreq_cpu_get(cpu);
885 if (unlikely(policy)) {
886 cpufreq_cpu_put(policy);
887 return 0;
888 }
889
890 #ifdef CONFIG_HOTPLUG_CPU
891 /* Check if this cpu was hot-unplugged earlier and has siblings */
892 read_lock_irqsave(&cpufreq_driver_lock, flags);
893 for_each_online_cpu(sibling) {
894 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
895 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
896 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
897 return cpufreq_add_policy_cpu(cpu, sibling, dev);
898 }
899 }
900 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
901 #endif
902 #endif
903
904 if (!try_module_get(cpufreq_driver->owner)) {
905 ret = -EINVAL;
906 goto module_out;
907 }
908
909 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
910 if (!policy)
911 goto nomem_out;
912
913 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
914 goto err_free_policy;
915
916 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
917 goto err_free_cpumask;
918
919 policy->cpu = cpu;
920 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
921 cpumask_copy(policy->cpus, cpumask_of(cpu));
922
923 /* Initially set CPU itself as the policy_cpu */
924 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
925
926 init_completion(&policy->kobj_unregister);
927 INIT_WORK(&policy->update, handle_update);
928
929 /* call driver. From then on the cpufreq must be able
930 * to accept all calls to ->verify and ->setpolicy for this CPU
931 */
932 ret = cpufreq_driver->init(policy);
933 if (ret) {
934 pr_debug("initialization failed\n");
935 goto err_set_policy_cpu;
936 }
937
938 /* related cpus should atleast have policy->cpus */
939 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
940
941 /*
942 * affected cpus must always be the one, which are online. We aren't
943 * managing offline cpus here.
944 */
945 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
946
947 policy->user_policy.min = policy->min;
948 policy->user_policy.max = policy->max;
949
950 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
951 CPUFREQ_START, policy);
952
953 #ifdef CONFIG_HOTPLUG_CPU
954 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
955 if (gov) {
956 policy->governor = gov;
957 pr_debug("Restoring governor %s for cpu %d\n",
958 policy->governor->name, cpu);
959 }
960 #endif
961
962 ret = cpufreq_add_dev_interface(cpu, policy, dev);
963 if (ret)
964 goto err_out_unregister;
965
966 kobject_uevent(&policy->kobj, KOBJ_ADD);
967 module_put(cpufreq_driver->owner);
968 pr_debug("initialization complete\n");
969
970 return 0;
971
972 err_out_unregister:
973 write_lock_irqsave(&cpufreq_driver_lock, flags);
974 for_each_cpu(j, policy->cpus)
975 per_cpu(cpufreq_cpu_data, j) = NULL;
976 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
977
978 kobject_put(&policy->kobj);
979 wait_for_completion(&policy->kobj_unregister);
980
981 err_set_policy_cpu:
982 per_cpu(cpufreq_policy_cpu, cpu) = -1;
983 free_cpumask_var(policy->related_cpus);
984 err_free_cpumask:
985 free_cpumask_var(policy->cpus);
986 err_free_policy:
987 kfree(policy);
988 nomem_out:
989 module_put(cpufreq_driver->owner);
990 module_out:
991 return ret;
992 }
993
update_policy_cpu(struct cpufreq_policy * policy,unsigned int cpu)994 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
995 {
996 int j;
997
998 policy->last_cpu = policy->cpu;
999 policy->cpu = cpu;
1000
1001 for_each_cpu(j, policy->cpus)
1002 per_cpu(cpufreq_policy_cpu, j) = cpu;
1003
1004 #ifdef CONFIG_CPU_FREQ_TABLE
1005 cpufreq_frequency_table_update_policy_cpu(policy);
1006 #endif
1007 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1008 CPUFREQ_UPDATE_POLICY_CPU, policy);
1009 }
1010
1011 /**
1012 * __cpufreq_remove_dev - remove a CPU device
1013 *
1014 * Removes the cpufreq interface for a CPU device.
1015 * Caller should already have policy_rwsem in write mode for this CPU.
1016 * This routine frees the rwsem before returning.
1017 */
__cpufreq_remove_dev(struct device * dev,struct subsys_interface * sif)1018 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1019 {
1020 unsigned int cpu = dev->id, ret, cpus;
1021 unsigned long flags;
1022 struct cpufreq_policy *data;
1023 struct kobject *kobj;
1024 struct completion *cmp;
1025 struct device *cpu_dev;
1026
1027 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1028
1029 write_lock_irqsave(&cpufreq_driver_lock, flags);
1030
1031 data = per_cpu(cpufreq_cpu_data, cpu);
1032 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1033
1034 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1035
1036 if (!data) {
1037 pr_debug("%s: No cpu_data found\n", __func__);
1038 return -EINVAL;
1039 }
1040
1041 if (cpufreq_driver->target)
1042 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1043
1044 #ifdef CONFIG_HOTPLUG_CPU
1045 if (!cpufreq_driver->setpolicy)
1046 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1047 data->governor->name, CPUFREQ_NAME_LEN);
1048 #endif
1049
1050 WARN_ON(lock_policy_rwsem_write(cpu));
1051 cpus = cpumask_weight(data->cpus);
1052
1053 if (cpus > 1)
1054 cpumask_clear_cpu(cpu, data->cpus);
1055 unlock_policy_rwsem_write(cpu);
1056
1057 if (cpu != data->cpu) {
1058 sysfs_remove_link(&dev->kobj, "cpufreq");
1059 } else if (cpus > 1) {
1060 /* first sibling now owns the new sysfs dir */
1061 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1062 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1063 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1064 if (ret) {
1065 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1066
1067 WARN_ON(lock_policy_rwsem_write(cpu));
1068 cpumask_set_cpu(cpu, data->cpus);
1069
1070 write_lock_irqsave(&cpufreq_driver_lock, flags);
1071 per_cpu(cpufreq_cpu_data, cpu) = data;
1072 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1073
1074 unlock_policy_rwsem_write(cpu);
1075
1076 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1077 "cpufreq");
1078 return -EINVAL;
1079 }
1080
1081 WARN_ON(lock_policy_rwsem_write(cpu));
1082 update_policy_cpu(data, cpu_dev->id);
1083 unlock_policy_rwsem_write(cpu);
1084 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1085 __func__, cpu_dev->id, cpu);
1086 }
1087
1088 if ((cpus == 1) && (cpufreq_driver->target))
1089 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1090
1091 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1092 cpufreq_cpu_put(data);
1093
1094 /* If cpu is last user of policy, free policy */
1095 if (cpus == 1) {
1096 lock_policy_rwsem_read(cpu);
1097 kobj = &data->kobj;
1098 cmp = &data->kobj_unregister;
1099 unlock_policy_rwsem_read(cpu);
1100 kobject_put(kobj);
1101
1102 /* we need to make sure that the underlying kobj is actually
1103 * not referenced anymore by anybody before we proceed with
1104 * unloading.
1105 */
1106 pr_debug("waiting for dropping of refcount\n");
1107 wait_for_completion(cmp);
1108 pr_debug("wait complete\n");
1109
1110 if (cpufreq_driver->exit)
1111 cpufreq_driver->exit(data);
1112
1113 free_cpumask_var(data->related_cpus);
1114 free_cpumask_var(data->cpus);
1115 kfree(data);
1116 } else if (cpufreq_driver->target) {
1117 __cpufreq_governor(data, CPUFREQ_GOV_START);
1118 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1119 }
1120
1121 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1122 return 0;
1123 }
1124
1125
cpufreq_remove_dev(struct device * dev,struct subsys_interface * sif)1126 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1127 {
1128 unsigned int cpu = dev->id;
1129 int retval;
1130
1131 if (cpu_is_offline(cpu))
1132 return 0;
1133
1134 retval = __cpufreq_remove_dev(dev, sif);
1135 return retval;
1136 }
1137
1138
handle_update(struct work_struct * work)1139 static void handle_update(struct work_struct *work)
1140 {
1141 struct cpufreq_policy *policy =
1142 container_of(work, struct cpufreq_policy, update);
1143 unsigned int cpu = policy->cpu;
1144 pr_debug("handle_update for cpu %u called\n", cpu);
1145 cpufreq_update_policy(cpu);
1146 }
1147
1148 /**
1149 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1150 * @cpu: cpu number
1151 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1152 * @new_freq: CPU frequency the CPU actually runs at
1153 *
1154 * We adjust to current frequency first, and need to clean up later.
1155 * So either call to cpufreq_update_policy() or schedule handle_update()).
1156 */
cpufreq_out_of_sync(unsigned int cpu,unsigned int old_freq,unsigned int new_freq)1157 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1158 unsigned int new_freq)
1159 {
1160 struct cpufreq_policy *policy;
1161 struct cpufreq_freqs freqs;
1162 unsigned long flags;
1163
1164
1165 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1166 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1167
1168 freqs.old = old_freq;
1169 freqs.new = new_freq;
1170
1171 read_lock_irqsave(&cpufreq_driver_lock, flags);
1172 policy = per_cpu(cpufreq_cpu_data, cpu);
1173 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1174
1175 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1176 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1177 }
1178
1179
1180 /**
1181 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1182 * @cpu: CPU number
1183 *
1184 * This is the last known freq, without actually getting it from the driver.
1185 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1186 */
cpufreq_quick_get(unsigned int cpu)1187 unsigned int cpufreq_quick_get(unsigned int cpu)
1188 {
1189 struct cpufreq_policy *policy;
1190 unsigned int ret_freq = 0;
1191
1192 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1193 return cpufreq_driver->get(cpu);
1194
1195 policy = cpufreq_cpu_get(cpu);
1196 if (policy) {
1197 ret_freq = policy->cur;
1198 cpufreq_cpu_put(policy);
1199 }
1200
1201 return ret_freq;
1202 }
1203 EXPORT_SYMBOL(cpufreq_quick_get);
1204
1205 /**
1206 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1207 * @cpu: CPU number
1208 *
1209 * Just return the max possible frequency for a given CPU.
1210 */
cpufreq_quick_get_max(unsigned int cpu)1211 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1212 {
1213 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1214 unsigned int ret_freq = 0;
1215
1216 if (policy) {
1217 ret_freq = policy->max;
1218 cpufreq_cpu_put(policy);
1219 }
1220
1221 return ret_freq;
1222 }
1223 EXPORT_SYMBOL(cpufreq_quick_get_max);
1224
1225
__cpufreq_get(unsigned int cpu)1226 static unsigned int __cpufreq_get(unsigned int cpu)
1227 {
1228 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1229 unsigned int ret_freq = 0;
1230
1231 if (!cpufreq_driver->get)
1232 return ret_freq;
1233
1234 ret_freq = cpufreq_driver->get(cpu);
1235
1236 if (ret_freq && policy->cur &&
1237 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1238 /* verify no discrepancy between actual and
1239 saved value exists */
1240 if (unlikely(ret_freq != policy->cur)) {
1241 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1242 schedule_work(&policy->update);
1243 }
1244 }
1245
1246 return ret_freq;
1247 }
1248
1249 /**
1250 * cpufreq_get - get the current CPU frequency (in kHz)
1251 * @cpu: CPU number
1252 *
1253 * Get the CPU current (static) CPU frequency
1254 */
cpufreq_get(unsigned int cpu)1255 unsigned int cpufreq_get(unsigned int cpu)
1256 {
1257 unsigned int ret_freq = 0;
1258 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1259
1260 if (!policy)
1261 goto out;
1262
1263 if (unlikely(lock_policy_rwsem_read(cpu)))
1264 goto out_policy;
1265
1266 ret_freq = __cpufreq_get(cpu);
1267
1268 unlock_policy_rwsem_read(cpu);
1269
1270 out_policy:
1271 cpufreq_cpu_put(policy);
1272 out:
1273 return ret_freq;
1274 }
1275 EXPORT_SYMBOL(cpufreq_get);
1276
1277 static struct subsys_interface cpufreq_interface = {
1278 .name = "cpufreq",
1279 .subsys = &cpu_subsys,
1280 .add_dev = cpufreq_add_dev,
1281 .remove_dev = cpufreq_remove_dev,
1282 };
1283
1284
1285 /**
1286 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1287 *
1288 * This function is only executed for the boot processor. The other CPUs
1289 * have been put offline by means of CPU hotplug.
1290 */
cpufreq_bp_suspend(void)1291 static int cpufreq_bp_suspend(void)
1292 {
1293 int ret = 0;
1294
1295 int cpu = smp_processor_id();
1296 struct cpufreq_policy *cpu_policy;
1297
1298 pr_debug("suspending cpu %u\n", cpu);
1299
1300 /* If there's no policy for the boot CPU, we have nothing to do. */
1301 cpu_policy = cpufreq_cpu_get(cpu);
1302 if (!cpu_policy)
1303 return 0;
1304
1305 if (cpufreq_driver->suspend) {
1306 ret = cpufreq_driver->suspend(cpu_policy);
1307 if (ret)
1308 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1309 "step on CPU %u\n", cpu_policy->cpu);
1310 }
1311
1312 cpufreq_cpu_put(cpu_policy);
1313 return ret;
1314 }
1315
1316 /**
1317 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1318 *
1319 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1320 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1321 * restored. It will verify that the current freq is in sync with
1322 * what we believe it to be. This is a bit later than when it
1323 * should be, but nonethteless it's better than calling
1324 * cpufreq_driver->get() here which might re-enable interrupts...
1325 *
1326 * This function is only executed for the boot CPU. The other CPUs have not
1327 * been turned on yet.
1328 */
cpufreq_bp_resume(void)1329 static void cpufreq_bp_resume(void)
1330 {
1331 int ret = 0;
1332
1333 int cpu = smp_processor_id();
1334 struct cpufreq_policy *cpu_policy;
1335
1336 pr_debug("resuming cpu %u\n", cpu);
1337
1338 /* If there's no policy for the boot CPU, we have nothing to do. */
1339 cpu_policy = cpufreq_cpu_get(cpu);
1340 if (!cpu_policy)
1341 return;
1342
1343 if (cpufreq_driver->resume) {
1344 ret = cpufreq_driver->resume(cpu_policy);
1345 if (ret) {
1346 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1347 "step on CPU %u\n", cpu_policy->cpu);
1348 goto fail;
1349 }
1350 }
1351
1352 schedule_work(&cpu_policy->update);
1353
1354 fail:
1355 cpufreq_cpu_put(cpu_policy);
1356 }
1357
1358 static struct syscore_ops cpufreq_syscore_ops = {
1359 .suspend = cpufreq_bp_suspend,
1360 .resume = cpufreq_bp_resume,
1361 };
1362
1363 /**
1364 * cpufreq_get_current_driver - return current driver's name
1365 *
1366 * Return the name string of the currently loaded cpufreq driver
1367 * or NULL, if none.
1368 */
cpufreq_get_current_driver(void)1369 const char *cpufreq_get_current_driver(void)
1370 {
1371 if (cpufreq_driver)
1372 return cpufreq_driver->name;
1373
1374 return NULL;
1375 }
1376 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1377
1378 /*********************************************************************
1379 * NOTIFIER LISTS INTERFACE *
1380 *********************************************************************/
1381
1382 /**
1383 * cpufreq_register_notifier - register a driver with cpufreq
1384 * @nb: notifier function to register
1385 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1386 *
1387 * Add a driver to one of two lists: either a list of drivers that
1388 * are notified about clock rate changes (once before and once after
1389 * the transition), or a list of drivers that are notified about
1390 * changes in cpufreq policy.
1391 *
1392 * This function may sleep, and has the same return conditions as
1393 * blocking_notifier_chain_register.
1394 */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)1395 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1396 {
1397 int ret;
1398
1399 if (cpufreq_disabled())
1400 return -EINVAL;
1401
1402 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1403
1404 switch (list) {
1405 case CPUFREQ_TRANSITION_NOTIFIER:
1406 ret = srcu_notifier_chain_register(
1407 &cpufreq_transition_notifier_list, nb);
1408 break;
1409 case CPUFREQ_POLICY_NOTIFIER:
1410 ret = blocking_notifier_chain_register(
1411 &cpufreq_policy_notifier_list, nb);
1412 break;
1413 default:
1414 ret = -EINVAL;
1415 }
1416
1417 return ret;
1418 }
1419 EXPORT_SYMBOL(cpufreq_register_notifier);
1420
1421
1422 /**
1423 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1424 * @nb: notifier block to be unregistered
1425 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1426 *
1427 * Remove a driver from the CPU frequency notifier list.
1428 *
1429 * This function may sleep, and has the same return conditions as
1430 * blocking_notifier_chain_unregister.
1431 */
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)1432 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1433 {
1434 int ret;
1435
1436 if (cpufreq_disabled())
1437 return -EINVAL;
1438
1439 switch (list) {
1440 case CPUFREQ_TRANSITION_NOTIFIER:
1441 ret = srcu_notifier_chain_unregister(
1442 &cpufreq_transition_notifier_list, nb);
1443 break;
1444 case CPUFREQ_POLICY_NOTIFIER:
1445 ret = blocking_notifier_chain_unregister(
1446 &cpufreq_policy_notifier_list, nb);
1447 break;
1448 default:
1449 ret = -EINVAL;
1450 }
1451
1452 return ret;
1453 }
1454 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1455
1456
1457 /*********************************************************************
1458 * GOVERNORS *
1459 *********************************************************************/
1460
1461
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)1462 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1463 unsigned int target_freq,
1464 unsigned int relation)
1465 {
1466 int retval = -EINVAL;
1467 unsigned int old_target_freq = target_freq;
1468
1469 if (cpufreq_disabled())
1470 return -ENODEV;
1471
1472 /* Make sure that target_freq is within supported range */
1473 if (target_freq > policy->max)
1474 target_freq = policy->max;
1475 if (target_freq < policy->min)
1476 target_freq = policy->min;
1477
1478 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1479 policy->cpu, target_freq, relation, old_target_freq);
1480
1481 if (target_freq == policy->cur)
1482 return 0;
1483
1484 if (cpufreq_driver->target)
1485 retval = cpufreq_driver->target(policy, target_freq, relation);
1486
1487 return retval;
1488 }
1489 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1490
cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)1491 int cpufreq_driver_target(struct cpufreq_policy *policy,
1492 unsigned int target_freq,
1493 unsigned int relation)
1494 {
1495 int ret = -EINVAL;
1496
1497 policy = cpufreq_cpu_get(policy->cpu);
1498 if (!policy)
1499 goto no_policy;
1500
1501 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1502 goto fail;
1503
1504 ret = __cpufreq_driver_target(policy, target_freq, relation);
1505
1506 unlock_policy_rwsem_write(policy->cpu);
1507
1508 fail:
1509 cpufreq_cpu_put(policy);
1510 no_policy:
1511 return ret;
1512 }
1513 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1514
__cpufreq_driver_getavg(struct cpufreq_policy * policy,unsigned int cpu)1515 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1516 {
1517 int ret = 0;
1518
1519 if (cpufreq_disabled())
1520 return ret;
1521
1522 if (!cpufreq_driver->getavg)
1523 return 0;
1524
1525 policy = cpufreq_cpu_get(policy->cpu);
1526 if (!policy)
1527 return -EINVAL;
1528
1529 ret = cpufreq_driver->getavg(policy, cpu);
1530
1531 cpufreq_cpu_put(policy);
1532 return ret;
1533 }
1534 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1535
1536 /*
1537 * when "event" is CPUFREQ_GOV_LIMITS
1538 */
1539
__cpufreq_governor(struct cpufreq_policy * policy,unsigned int event)1540 static int __cpufreq_governor(struct cpufreq_policy *policy,
1541 unsigned int event)
1542 {
1543 int ret;
1544
1545 /* Only must be defined when default governor is known to have latency
1546 restrictions, like e.g. conservative or ondemand.
1547 That this is the case is already ensured in Kconfig
1548 */
1549 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1550 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1551 #else
1552 struct cpufreq_governor *gov = NULL;
1553 #endif
1554
1555 if (policy->governor->max_transition_latency &&
1556 policy->cpuinfo.transition_latency >
1557 policy->governor->max_transition_latency) {
1558 if (!gov)
1559 return -EINVAL;
1560 else {
1561 printk(KERN_WARNING "%s governor failed, too long"
1562 " transition latency of HW, fallback"
1563 " to %s governor\n",
1564 policy->governor->name,
1565 gov->name);
1566 policy->governor = gov;
1567 }
1568 }
1569
1570 if (!try_module_get(policy->governor->owner))
1571 return -EINVAL;
1572
1573 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1574 policy->cpu, event);
1575 ret = policy->governor->governor(policy, event);
1576
1577 if (!ret) {
1578 if (event == CPUFREQ_GOV_POLICY_INIT)
1579 policy->governor->initialized++;
1580 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1581 policy->governor->initialized--;
1582 }
1583
1584 /* we keep one module reference alive for
1585 each CPU governed by this CPU */
1586 if ((event != CPUFREQ_GOV_START) || ret)
1587 module_put(policy->governor->owner);
1588 if ((event == CPUFREQ_GOV_STOP) && !ret)
1589 module_put(policy->governor->owner);
1590
1591 return ret;
1592 }
1593
1594
cpufreq_register_governor(struct cpufreq_governor * governor)1595 int cpufreq_register_governor(struct cpufreq_governor *governor)
1596 {
1597 int err;
1598
1599 if (!governor)
1600 return -EINVAL;
1601
1602 if (cpufreq_disabled())
1603 return -ENODEV;
1604
1605 mutex_lock(&cpufreq_governor_mutex);
1606
1607 governor->initialized = 0;
1608 err = -EBUSY;
1609 if (__find_governor(governor->name) == NULL) {
1610 err = 0;
1611 list_add(&governor->governor_list, &cpufreq_governor_list);
1612 }
1613
1614 mutex_unlock(&cpufreq_governor_mutex);
1615 return err;
1616 }
1617 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1618
1619
cpufreq_unregister_governor(struct cpufreq_governor * governor)1620 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1621 {
1622 #ifdef CONFIG_HOTPLUG_CPU
1623 int cpu;
1624 #endif
1625
1626 if (!governor)
1627 return;
1628
1629 if (cpufreq_disabled())
1630 return;
1631
1632 #ifdef CONFIG_HOTPLUG_CPU
1633 for_each_present_cpu(cpu) {
1634 if (cpu_online(cpu))
1635 continue;
1636 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1637 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1638 }
1639 #endif
1640
1641 mutex_lock(&cpufreq_governor_mutex);
1642 list_del(&governor->governor_list);
1643 mutex_unlock(&cpufreq_governor_mutex);
1644 return;
1645 }
1646 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1647
1648
1649
1650 /*********************************************************************
1651 * POLICY INTERFACE *
1652 *********************************************************************/
1653
1654 /**
1655 * cpufreq_get_policy - get the current cpufreq_policy
1656 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1657 * is written
1658 *
1659 * Reads the current cpufreq policy.
1660 */
cpufreq_get_policy(struct cpufreq_policy * policy,unsigned int cpu)1661 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1662 {
1663 struct cpufreq_policy *cpu_policy;
1664 if (!policy)
1665 return -EINVAL;
1666
1667 cpu_policy = cpufreq_cpu_get(cpu);
1668 if (!cpu_policy)
1669 return -EINVAL;
1670
1671 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1672
1673 cpufreq_cpu_put(cpu_policy);
1674 return 0;
1675 }
1676 EXPORT_SYMBOL(cpufreq_get_policy);
1677
1678
1679 /*
1680 * data : current policy.
1681 * policy : policy to be set.
1682 */
__cpufreq_set_policy(struct cpufreq_policy * data,struct cpufreq_policy * policy)1683 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1684 struct cpufreq_policy *policy)
1685 {
1686 int ret = 0, failed = 1;
1687
1688 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1689 policy->min, policy->max);
1690
1691 memcpy(&policy->cpuinfo, &data->cpuinfo,
1692 sizeof(struct cpufreq_cpuinfo));
1693
1694 if (policy->min > data->max || policy->max < data->min) {
1695 ret = -EINVAL;
1696 goto error_out;
1697 }
1698
1699 /* verify the cpu speed can be set within this limit */
1700 ret = cpufreq_driver->verify(policy);
1701 if (ret)
1702 goto error_out;
1703
1704 /* adjust if necessary - all reasons */
1705 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1706 CPUFREQ_ADJUST, policy);
1707
1708 /* adjust if necessary - hardware incompatibility*/
1709 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1710 CPUFREQ_INCOMPATIBLE, policy);
1711
1712 /* verify the cpu speed can be set within this limit,
1713 which might be different to the first one */
1714 ret = cpufreq_driver->verify(policy);
1715 if (ret)
1716 goto error_out;
1717
1718 /* notification of the new policy */
1719 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1720 CPUFREQ_NOTIFY, policy);
1721
1722 data->min = policy->min;
1723 data->max = policy->max;
1724 trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
1725
1726 pr_debug("new min and max freqs are %u - %u kHz\n",
1727 data->min, data->max);
1728
1729 if (cpufreq_driver->setpolicy) {
1730 data->policy = policy->policy;
1731 pr_debug("setting range\n");
1732 ret = cpufreq_driver->setpolicy(policy);
1733 } else {
1734 if (policy->governor != data->governor) {
1735 /* save old, working values */
1736 struct cpufreq_governor *old_gov = data->governor;
1737
1738 pr_debug("governor switch\n");
1739
1740 /* end old governor */
1741 if (data->governor) {
1742 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1743 unlock_policy_rwsem_write(policy->cpu);
1744 __cpufreq_governor(data,
1745 CPUFREQ_GOV_POLICY_EXIT);
1746 lock_policy_rwsem_write(policy->cpu);
1747 }
1748
1749 /* start new governor */
1750 data->governor = policy->governor;
1751 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1752 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1753 failed = 0;
1754 } else {
1755 unlock_policy_rwsem_write(policy->cpu);
1756 __cpufreq_governor(data,
1757 CPUFREQ_GOV_POLICY_EXIT);
1758 lock_policy_rwsem_write(policy->cpu);
1759 }
1760 }
1761
1762 if (failed) {
1763 /* new governor failed, so re-start old one */
1764 pr_debug("starting governor %s failed\n",
1765 data->governor->name);
1766 if (old_gov) {
1767 data->governor = old_gov;
1768 __cpufreq_governor(data,
1769 CPUFREQ_GOV_POLICY_INIT);
1770 __cpufreq_governor(data,
1771 CPUFREQ_GOV_START);
1772 }
1773 ret = -EINVAL;
1774 goto error_out;
1775 }
1776 /* might be a policy change, too, so fall through */
1777 }
1778 pr_debug("governor: change or update limits\n");
1779 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1780 }
1781
1782 error_out:
1783 return ret;
1784 }
1785
1786 /**
1787 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1788 * @cpu: CPU which shall be re-evaluated
1789 *
1790 * Useful for policy notifiers which have different necessities
1791 * at different times.
1792 */
cpufreq_update_policy(unsigned int cpu)1793 int cpufreq_update_policy(unsigned int cpu)
1794 {
1795 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1796 struct cpufreq_policy policy;
1797 int ret;
1798
1799 if (!data) {
1800 ret = -ENODEV;
1801 goto no_policy;
1802 }
1803
1804 if (unlikely(lock_policy_rwsem_write(cpu))) {
1805 ret = -EINVAL;
1806 goto fail;
1807 }
1808
1809 pr_debug("updating policy for CPU %u\n", cpu);
1810 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1811 policy.min = data->user_policy.min;
1812 policy.max = data->user_policy.max;
1813 policy.policy = data->user_policy.policy;
1814 policy.governor = data->user_policy.governor;
1815
1816 /* BIOS might change freq behind our back
1817 -> ask driver for current freq and notify governors about a change */
1818 if (cpufreq_driver->get) {
1819 policy.cur = cpufreq_driver->get(cpu);
1820 if (!data->cur) {
1821 pr_debug("Driver did not initialize current freq");
1822 data->cur = policy.cur;
1823 } else {
1824 if (data->cur != policy.cur && cpufreq_driver->target)
1825 cpufreq_out_of_sync(cpu, data->cur,
1826 policy.cur);
1827 }
1828 }
1829
1830 ret = __cpufreq_set_policy(data, &policy);
1831
1832 unlock_policy_rwsem_write(cpu);
1833
1834 fail:
1835 cpufreq_cpu_put(data);
1836 no_policy:
1837 return ret;
1838 }
1839 EXPORT_SYMBOL(cpufreq_update_policy);
1840
cpufreq_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)1841 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1842 unsigned long action, void *hcpu)
1843 {
1844 unsigned int cpu = (unsigned long)hcpu;
1845 struct device *dev;
1846
1847 dev = get_cpu_device(cpu);
1848 if (dev) {
1849 switch (action) {
1850 case CPU_ONLINE:
1851 cpufreq_add_dev(dev, NULL);
1852 break;
1853 case CPU_DOWN_PREPARE:
1854 case CPU_UP_CANCELED_FROZEN:
1855 __cpufreq_remove_dev(dev, NULL);
1856 break;
1857 case CPU_DOWN_FAILED:
1858 cpufreq_add_dev(dev, NULL);
1859 break;
1860 }
1861 }
1862 return NOTIFY_OK;
1863 }
1864
1865 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1866 .notifier_call = cpufreq_cpu_callback,
1867 };
1868
1869 /*********************************************************************
1870 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1871 *********************************************************************/
1872
1873 /**
1874 * cpufreq_register_driver - register a CPU Frequency driver
1875 * @driver_data: A struct cpufreq_driver containing the values#
1876 * submitted by the CPU Frequency driver.
1877 *
1878 * Registers a CPU Frequency driver to this core code. This code
1879 * returns zero on success, -EBUSY when another driver got here first
1880 * (and isn't unregistered in the meantime).
1881 *
1882 */
cpufreq_register_driver(struct cpufreq_driver * driver_data)1883 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1884 {
1885 unsigned long flags;
1886 int ret;
1887
1888 if (cpufreq_disabled())
1889 return -ENODEV;
1890
1891 if (!driver_data || !driver_data->verify || !driver_data->init ||
1892 ((!driver_data->setpolicy) && (!driver_data->target)))
1893 return -EINVAL;
1894
1895 pr_debug("trying to register driver %s\n", driver_data->name);
1896
1897 if (driver_data->setpolicy)
1898 driver_data->flags |= CPUFREQ_CONST_LOOPS;
1899
1900 write_lock_irqsave(&cpufreq_driver_lock, flags);
1901 if (cpufreq_driver) {
1902 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1903 return -EBUSY;
1904 }
1905 cpufreq_driver = driver_data;
1906 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1907
1908 ret = subsys_interface_register(&cpufreq_interface);
1909 if (ret)
1910 goto err_null_driver;
1911
1912 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1913 int i;
1914 ret = -ENODEV;
1915
1916 /* check for at least one working CPU */
1917 for (i = 0; i < nr_cpu_ids; i++)
1918 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1919 ret = 0;
1920 break;
1921 }
1922
1923 /* if all ->init() calls failed, unregister */
1924 if (ret) {
1925 pr_debug("no CPU initialized for driver %s\n",
1926 driver_data->name);
1927 goto err_if_unreg;
1928 }
1929 }
1930
1931 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1932 pr_debug("driver %s up and running\n", driver_data->name);
1933
1934 return 0;
1935 err_if_unreg:
1936 subsys_interface_unregister(&cpufreq_interface);
1937 err_null_driver:
1938 write_lock_irqsave(&cpufreq_driver_lock, flags);
1939 cpufreq_driver = NULL;
1940 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1941 return ret;
1942 }
1943 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1944
1945
1946 /**
1947 * cpufreq_unregister_driver - unregister the current CPUFreq driver
1948 *
1949 * Unregister the current CPUFreq driver. Only call this if you have
1950 * the right to do so, i.e. if you have succeeded in initialising before!
1951 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1952 * currently not initialised.
1953 */
cpufreq_unregister_driver(struct cpufreq_driver * driver)1954 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1955 {
1956 unsigned long flags;
1957
1958 if (!cpufreq_driver || (driver != cpufreq_driver))
1959 return -EINVAL;
1960
1961 pr_debug("unregistering driver %s\n", driver->name);
1962
1963 subsys_interface_unregister(&cpufreq_interface);
1964 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1965
1966 write_lock_irqsave(&cpufreq_driver_lock, flags);
1967 cpufreq_driver = NULL;
1968 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1969
1970 return 0;
1971 }
1972 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1973
cpufreq_core_init(void)1974 static int __init cpufreq_core_init(void)
1975 {
1976 int cpu;
1977
1978 if (cpufreq_disabled())
1979 return -ENODEV;
1980
1981 for_each_possible_cpu(cpu) {
1982 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1983 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1984 }
1985
1986 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
1987 BUG_ON(!cpufreq_global_kobject);
1988 register_syscore_ops(&cpufreq_syscore_ops);
1989
1990 return 0;
1991 }
1992 core_initcall(cpufreq_core_init);
1993