• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *
7  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8  *	Added handling for CPU hotplug
9  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10  *	Fix handling for CPU hotplug -- affected CPUs
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/notifier.h>
22 #include <linux/cpufreq.h>
23 #include <linux/delay.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/device.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/completion.h>
30 #include <linux/mutex.h>
31 
32 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \
33 						"cpufreq-core", msg)
34 
35 /**
36  * The "cpufreq driver" - the arch- or hardware-dependent low
37  * level driver of CPUFreq support, and its spinlock. This lock
38  * also protects the cpufreq_cpu_data array.
39  */
40 static struct cpufreq_driver *cpufreq_driver;
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
42 #ifdef CONFIG_HOTPLUG_CPU
43 /* This one keeps track of the previously set governor of a removed CPU */
44 static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
45 #endif
46 static DEFINE_SPINLOCK(cpufreq_driver_lock);
47 
48 /*
49  * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
50  * all cpufreq/hotplug/workqueue/etc related lock issues.
51  *
52  * The rules for this semaphore:
53  * - Any routine that wants to read from the policy structure will
54  *   do a down_read on this semaphore.
55  * - Any routine that will write to the policy structure and/or may take away
56  *   the policy altogether (eg. CPU hotplug), will hold this lock in write
57  *   mode before doing so.
58  *
59  * Additional rules:
60  * - All holders of the lock should check to make sure that the CPU they
61  *   are concerned with are online after they get the lock.
62  * - Governor routines that can be called in cpufreq hotplug path should not
63  *   take this sem as top level hotplug notifier handler takes this.
64  */
65 static DEFINE_PER_CPU(int, policy_cpu);
66 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
67 
68 #define lock_policy_rwsem(mode, cpu)					\
69 int lock_policy_rwsem_##mode						\
70 (int cpu)								\
71 {									\
72 	int policy_cpu = per_cpu(policy_cpu, cpu);			\
73 	BUG_ON(policy_cpu == -1);					\
74 	down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));		\
75 	if (unlikely(!cpu_online(cpu))) {				\
76 		up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));	\
77 		return -1;						\
78 	}								\
79 									\
80 	return 0;							\
81 }
82 
83 lock_policy_rwsem(read, cpu);
84 EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
85 
86 lock_policy_rwsem(write, cpu);
87 EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
88 
unlock_policy_rwsem_read(int cpu)89 void unlock_policy_rwsem_read(int cpu)
90 {
91 	int policy_cpu = per_cpu(policy_cpu, cpu);
92 	BUG_ON(policy_cpu == -1);
93 	up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
94 }
95 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
96 
unlock_policy_rwsem_write(int cpu)97 void unlock_policy_rwsem_write(int cpu)
98 {
99 	int policy_cpu = per_cpu(policy_cpu, cpu);
100 	BUG_ON(policy_cpu == -1);
101 	up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
102 }
103 EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
104 
105 
106 /* internal prototypes */
107 static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
108 static unsigned int __cpufreq_get(unsigned int cpu);
109 static void handle_update(struct work_struct *work);
110 
111 /**
112  * Two notifier lists: the "policy" list is involved in the
113  * validation process for a new CPU frequency policy; the
114  * "transition" list for kernel code that needs to handle
115  * changes to devices when the CPU clock speed changes.
116  * The mutex locks both lists.
117  */
118 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
119 static struct srcu_notifier_head cpufreq_transition_notifier_list;
120 
121 static bool init_cpufreq_transition_notifier_list_called;
init_cpufreq_transition_notifier_list(void)122 static int __init init_cpufreq_transition_notifier_list(void)
123 {
124 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
125 	init_cpufreq_transition_notifier_list_called = true;
126 	return 0;
127 }
128 pure_initcall(init_cpufreq_transition_notifier_list);
129 
130 static LIST_HEAD(cpufreq_governor_list);
131 static DEFINE_MUTEX (cpufreq_governor_mutex);
132 
cpufreq_cpu_get(unsigned int cpu)133 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
134 {
135 	struct cpufreq_policy *data;
136 	unsigned long flags;
137 
138 	if (cpu >= nr_cpu_ids)
139 		goto err_out;
140 
141 	/* get the cpufreq driver */
142 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
143 
144 	if (!cpufreq_driver)
145 		goto err_out_unlock;
146 
147 	if (!try_module_get(cpufreq_driver->owner))
148 		goto err_out_unlock;
149 
150 
151 	/* get the CPU */
152 	data = per_cpu(cpufreq_cpu_data, cpu);
153 
154 	if (!data)
155 		goto err_out_put_module;
156 
157 	if (!kobject_get(&data->kobj))
158 		goto err_out_put_module;
159 
160 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
161 	return data;
162 
163 err_out_put_module:
164 	module_put(cpufreq_driver->owner);
165 err_out_unlock:
166 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
167 err_out:
168 	return NULL;
169 }
170 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
171 
172 
cpufreq_cpu_put(struct cpufreq_policy * data)173 void cpufreq_cpu_put(struct cpufreq_policy *data)
174 {
175 	kobject_put(&data->kobj);
176 	module_put(cpufreq_driver->owner);
177 }
178 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
179 
180 
181 /*********************************************************************
182  *                     UNIFIED DEBUG HELPERS                         *
183  *********************************************************************/
184 #ifdef CONFIG_CPU_FREQ_DEBUG
185 
186 /* what part(s) of the CPUfreq subsystem are debugged? */
187 static unsigned int debug;
188 
189 /* is the debug output ratelimit'ed using printk_ratelimit? User can
190  * set or modify this value.
191  */
192 static unsigned int debug_ratelimit = 1;
193 
194 /* is the printk_ratelimit'ing enabled? It's enabled after a successful
195  * loading of a cpufreq driver, temporarily disabled when a new policy
196  * is set, and disabled upon cpufreq driver removal
197  */
198 static unsigned int disable_ratelimit = 1;
199 static DEFINE_SPINLOCK(disable_ratelimit_lock);
200 
cpufreq_debug_enable_ratelimit(void)201 static void cpufreq_debug_enable_ratelimit(void)
202 {
203 	unsigned long flags;
204 
205 	spin_lock_irqsave(&disable_ratelimit_lock, flags);
206 	if (disable_ratelimit)
207 		disable_ratelimit--;
208 	spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
209 }
210 
cpufreq_debug_disable_ratelimit(void)211 static void cpufreq_debug_disable_ratelimit(void)
212 {
213 	unsigned long flags;
214 
215 	spin_lock_irqsave(&disable_ratelimit_lock, flags);
216 	disable_ratelimit++;
217 	spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
218 }
219 
cpufreq_debug_printk(unsigned int type,const char * prefix,const char * fmt,...)220 void cpufreq_debug_printk(unsigned int type, const char *prefix,
221 			const char *fmt, ...)
222 {
223 	char s[256];
224 	va_list args;
225 	unsigned int len;
226 	unsigned long flags;
227 
228 	WARN_ON(!prefix);
229 	if (type & debug) {
230 		spin_lock_irqsave(&disable_ratelimit_lock, flags);
231 		if (!disable_ratelimit && debug_ratelimit
232 					&& !printk_ratelimit()) {
233 			spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
234 			return;
235 		}
236 		spin_unlock_irqrestore(&disable_ratelimit_lock, flags);
237 
238 		len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix);
239 
240 		va_start(args, fmt);
241 		len += vsnprintf(&s[len], (256 - len), fmt, args);
242 		va_end(args);
243 
244 		printk(s);
245 
246 		WARN_ON(len < 5);
247 	}
248 }
249 EXPORT_SYMBOL(cpufreq_debug_printk);
250 
251 
252 module_param(debug, uint, 0644);
253 MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core,"
254 			" 2 to debug drivers, and 4 to debug governors.");
255 
256 module_param(debug_ratelimit, uint, 0644);
257 MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:"
258 					" set to 0 to disable ratelimiting.");
259 
260 #else /* !CONFIG_CPU_FREQ_DEBUG */
261 
cpufreq_debug_enable_ratelimit(void)262 static inline void cpufreq_debug_enable_ratelimit(void) { return; }
cpufreq_debug_disable_ratelimit(void)263 static inline void cpufreq_debug_disable_ratelimit(void) { return; }
264 
265 #endif /* CONFIG_CPU_FREQ_DEBUG */
266 
267 
268 /*********************************************************************
269  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
270  *********************************************************************/
271 
272 /**
273  * adjust_jiffies - adjust the system "loops_per_jiffy"
274  *
275  * This function alters the system "loops_per_jiffy" for the clock
276  * speed change. Note that loops_per_jiffy cannot be updated on SMP
277  * systems as each CPU might be scaled differently. So, use the arch
278  * per-CPU loops_per_jiffy value wherever possible.
279  */
280 #ifndef CONFIG_SMP
281 static unsigned long l_p_j_ref;
282 static unsigned int  l_p_j_ref_freq;
283 
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)284 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
285 {
286 	if (ci->flags & CPUFREQ_CONST_LOOPS)
287 		return;
288 
289 	if (!l_p_j_ref_freq) {
290 		l_p_j_ref = loops_per_jiffy;
291 		l_p_j_ref_freq = ci->old;
292 		dprintk("saving %lu as reference value for loops_per_jiffy; "
293 			"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
294 	}
295 	if ((val == CPUFREQ_PRECHANGE  && ci->old < ci->new) ||
296 	    (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
297 	    (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
298 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
299 								ci->new);
300 		dprintk("scaling loops_per_jiffy to %lu "
301 			"for frequency %u kHz\n", loops_per_jiffy, ci->new);
302 	}
303 }
304 #else
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)305 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
306 {
307 	return;
308 }
309 #endif
310 
311 
312 /**
313  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
314  * on frequency transition.
315  *
316  * This function calls the transition notifiers and the "adjust_jiffies"
317  * function. It is called twice on all CPU frequency changes that have
318  * external effects.
319  */
cpufreq_notify_transition(struct cpufreq_freqs * freqs,unsigned int state)320 void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
321 {
322 	struct cpufreq_policy *policy;
323 
324 	BUG_ON(irqs_disabled());
325 
326 	freqs->flags = cpufreq_driver->flags;
327 	dprintk("notification %u of frequency transition to %u kHz\n",
328 		state, freqs->new);
329 
330 	policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
331 	switch (state) {
332 
333 	case CPUFREQ_PRECHANGE:
334 		/* detect if the driver reported a value as "old frequency"
335 		 * which is not equal to what the cpufreq core thinks is
336 		 * "old frequency".
337 		 */
338 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
339 			if ((policy) && (policy->cpu == freqs->cpu) &&
340 			    (policy->cur) && (policy->cur != freqs->old)) {
341 				dprintk("Warning: CPU frequency is"
342 					" %u, cpufreq assumed %u kHz.\n",
343 					freqs->old, policy->cur);
344 				freqs->old = policy->cur;
345 			}
346 		}
347 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
348 				CPUFREQ_PRECHANGE, freqs);
349 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
350 		break;
351 
352 	case CPUFREQ_POSTCHANGE:
353 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
354 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
355 				CPUFREQ_POSTCHANGE, freqs);
356 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
357 			policy->cur = freqs->new;
358 		break;
359 	}
360 }
361 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
362 
363 
364 
365 /*********************************************************************
366  *                          SYSFS INTERFACE                          *
367  *********************************************************************/
368 
__find_governor(const char * str_governor)369 static struct cpufreq_governor *__find_governor(const char *str_governor)
370 {
371 	struct cpufreq_governor *t;
372 
373 	list_for_each_entry(t, &cpufreq_governor_list, governor_list)
374 		if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN))
375 			return t;
376 
377 	return NULL;
378 }
379 
380 /**
381  * cpufreq_parse_governor - parse a governor string
382  */
cpufreq_parse_governor(char * str_governor,unsigned int * policy,struct cpufreq_governor ** governor)383 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
384 				struct cpufreq_governor **governor)
385 {
386 	int err = -EINVAL;
387 
388 	if (!cpufreq_driver)
389 		goto out;
390 
391 	if (cpufreq_driver->setpolicy) {
392 		if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
393 			*policy = CPUFREQ_POLICY_PERFORMANCE;
394 			err = 0;
395 		} else if (!strnicmp(str_governor, "powersave",
396 						CPUFREQ_NAME_LEN)) {
397 			*policy = CPUFREQ_POLICY_POWERSAVE;
398 			err = 0;
399 		}
400 	} else if (cpufreq_driver->target) {
401 		struct cpufreq_governor *t;
402 
403 		mutex_lock(&cpufreq_governor_mutex);
404 
405 		t = __find_governor(str_governor);
406 
407 		if (t == NULL) {
408 			char *name = kasprintf(GFP_KERNEL, "cpufreq_%s",
409 								str_governor);
410 
411 			if (name) {
412 				int ret;
413 
414 				mutex_unlock(&cpufreq_governor_mutex);
415 				ret = request_module("%s", name);
416 				mutex_lock(&cpufreq_governor_mutex);
417 
418 				if (ret == 0)
419 					t = __find_governor(str_governor);
420 			}
421 
422 			kfree(name);
423 		}
424 
425 		if (t != NULL) {
426 			*governor = t;
427 			err = 0;
428 		}
429 
430 		mutex_unlock(&cpufreq_governor_mutex);
431 	}
432   out:
433 	return err;
434 }
435 
436 
437 /* drivers/base/cpu.c */
438 extern struct sysdev_class cpu_sysdev_class;
439 
440 
441 /**
442  * cpufreq_per_cpu_attr_read() / show_##file_name() -
443  * print out cpufreq information
444  *
445  * Write out information from cpufreq_driver->policy[cpu]; object must be
446  * "unsigned int".
447  */
448 
449 #define show_one(file_name, object)			\
450 static ssize_t show_##file_name				\
451 (struct cpufreq_policy *policy, char *buf)		\
452 {							\
453 	return sprintf (buf, "%u\n", policy->object);	\
454 }
455 
456 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
457 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
458 show_one(scaling_min_freq, min);
459 show_one(scaling_max_freq, max);
460 show_one(scaling_cur_freq, cur);
461 
462 static int __cpufreq_set_policy(struct cpufreq_policy *data,
463 				struct cpufreq_policy *policy);
464 
465 /**
466  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
467  */
468 #define store_one(file_name, object)			\
469 static ssize_t store_##file_name					\
470 (struct cpufreq_policy *policy, const char *buf, size_t count)		\
471 {									\
472 	unsigned int ret = -EINVAL;					\
473 	struct cpufreq_policy new_policy;				\
474 									\
475 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
476 	if (ret)							\
477 		return -EINVAL;						\
478 									\
479 	ret = sscanf (buf, "%u", &new_policy.object);			\
480 	if (ret != 1)							\
481 		return -EINVAL;						\
482 									\
483 	ret = __cpufreq_set_policy(policy, &new_policy);		\
484 	policy->user_policy.object = policy->object;			\
485 									\
486 	return ret ? ret : count;					\
487 }
488 
489 store_one(scaling_min_freq,min);
490 store_one(scaling_max_freq,max);
491 
492 /**
493  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
494  */
show_cpuinfo_cur_freq(struct cpufreq_policy * policy,char * buf)495 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
496 					char *buf)
497 {
498 	unsigned int cur_freq = __cpufreq_get(policy->cpu);
499 	if (!cur_freq)
500 		return sprintf(buf, "<unknown>");
501 	return sprintf(buf, "%u\n", cur_freq);
502 }
503 
504 
505 /**
506  * show_scaling_governor - show the current policy for the specified CPU
507  */
show_scaling_governor(struct cpufreq_policy * policy,char * buf)508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
509 {
510 	if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 		return sprintf(buf, "powersave\n");
512 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 		return sprintf(buf, "performance\n");
514 	else if (policy->governor)
515 		return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", policy->governor->name);
516 	return -EINVAL;
517 }
518 
519 
520 /**
521  * store_scaling_governor - store policy for the specified CPU
522  */
store_scaling_governor(struct cpufreq_policy * policy,const char * buf,size_t count)523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524 					const char *buf, size_t count)
525 {
526 	unsigned int ret = -EINVAL;
527 	char	str_governor[16];
528 	struct cpufreq_policy new_policy;
529 
530 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
531 	if (ret)
532 		return ret;
533 
534 	ret = sscanf (buf, "%15s", str_governor);
535 	if (ret != 1)
536 		return -EINVAL;
537 
538 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539 						&new_policy.governor))
540 		return -EINVAL;
541 
542 	/* Do not use cpufreq_set_policy here or the user_policy.max
543 	   will be wrongly overridden */
544 	ret = __cpufreq_set_policy(policy, &new_policy);
545 
546 	policy->user_policy.policy = policy->policy;
547 	policy->user_policy.governor = policy->governor;
548 
549 	if (ret)
550 		return ret;
551 	else
552 		return count;
553 }
554 
555 /**
556  * show_scaling_driver - show the cpufreq driver currently loaded
557  */
show_scaling_driver(struct cpufreq_policy * policy,char * buf)558 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
559 {
560 	return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
561 }
562 
563 /**
564  * show_scaling_available_governors - show the available CPUfreq governors
565  */
show_scaling_available_governors(struct cpufreq_policy * policy,char * buf)566 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
567 						char *buf)
568 {
569 	ssize_t i = 0;
570 	struct cpufreq_governor *t;
571 
572 	if (!cpufreq_driver->target) {
573 		i += sprintf(buf, "performance powersave");
574 		goto out;
575 	}
576 
577 	list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
578 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) - (CPUFREQ_NAME_LEN + 2)))
579 			goto out;
580 		i += scnprintf(&buf[i], CPUFREQ_NAME_LEN, "%s ", t->name);
581 	}
582 out:
583 	i += sprintf(&buf[i], "\n");
584 	return i;
585 }
586 
show_cpus(const struct cpumask * mask,char * buf)587 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
588 {
589 	ssize_t i = 0;
590 	unsigned int cpu;
591 
592 	for_each_cpu(cpu, mask) {
593 		if (i)
594 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
596 		if (i >= (PAGE_SIZE - 5))
597 		    break;
598 	}
599 	i += sprintf(&buf[i], "\n");
600 	return i;
601 }
602 
603 /**
604  * show_related_cpus - show the CPUs affected by each transition even if
605  * hw coordination is in use
606  */
show_related_cpus(struct cpufreq_policy * policy,char * buf)607 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
608 {
609 	if (cpumask_empty(policy->related_cpus))
610 		return show_cpus(policy->cpus, buf);
611 	return show_cpus(policy->related_cpus, buf);
612 }
613 
614 /**
615  * show_affected_cpus - show the CPUs affected by each transition
616  */
show_affected_cpus(struct cpufreq_policy * policy,char * buf)617 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
618 {
619 	return show_cpus(policy->cpus, buf);
620 }
621 
store_scaling_setspeed(struct cpufreq_policy * policy,const char * buf,size_t count)622 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
623 					const char *buf, size_t count)
624 {
625 	unsigned int freq = 0;
626 	unsigned int ret;
627 
628 	if (!policy->governor || !policy->governor->store_setspeed)
629 		return -EINVAL;
630 
631 	ret = sscanf(buf, "%u", &freq);
632 	if (ret != 1)
633 		return -EINVAL;
634 
635 	policy->governor->store_setspeed(policy, freq);
636 
637 	return count;
638 }
639 
show_scaling_setspeed(struct cpufreq_policy * policy,char * buf)640 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
641 {
642 	if (!policy->governor || !policy->governor->show_setspeed)
643 		return sprintf(buf, "<unsupported>\n");
644 
645 	return policy->governor->show_setspeed(policy, buf);
646 }
647 
648 #define define_one_ro(_name) \
649 static struct freq_attr _name = \
650 __ATTR(_name, 0444, show_##_name, NULL)
651 
652 #define define_one_ro0400(_name) \
653 static struct freq_attr _name = \
654 __ATTR(_name, 0400, show_##_name, NULL)
655 
656 #define define_one_rw(_name) \
657 static struct freq_attr _name = \
658 __ATTR(_name, 0644, show_##_name, store_##_name)
659 
660 define_one_ro0400(cpuinfo_cur_freq);
661 define_one_ro(cpuinfo_min_freq);
662 define_one_ro(cpuinfo_max_freq);
663 define_one_ro(scaling_available_governors);
664 define_one_ro(scaling_driver);
665 define_one_ro(scaling_cur_freq);
666 define_one_ro(related_cpus);
667 define_one_ro(affected_cpus);
668 define_one_rw(scaling_min_freq);
669 define_one_rw(scaling_max_freq);
670 define_one_rw(scaling_governor);
671 define_one_rw(scaling_setspeed);
672 
673 static struct attribute *default_attrs[] = {
674 	&cpuinfo_min_freq.attr,
675 	&cpuinfo_max_freq.attr,
676 	&scaling_min_freq.attr,
677 	&scaling_max_freq.attr,
678 	&affected_cpus.attr,
679 	&related_cpus.attr,
680 	&scaling_governor.attr,
681 	&scaling_driver.attr,
682 	&scaling_available_governors.attr,
683 	&scaling_setspeed.attr,
684 	NULL
685 };
686 
687 #define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
688 #define to_attr(a) container_of(a,struct freq_attr,attr)
689 
show(struct kobject * kobj,struct attribute * attr,char * buf)690 static ssize_t show(struct kobject *kobj, struct attribute *attr ,char *buf)
691 {
692 	struct cpufreq_policy *policy = to_policy(kobj);
693 	struct freq_attr *fattr = to_attr(attr);
694 	ssize_t ret = -EINVAL;
695 	policy = cpufreq_cpu_get(policy->cpu);
696 	if (!policy)
697 		goto no_policy;
698 
699 	if (lock_policy_rwsem_read(policy->cpu) < 0)
700 		goto fail;
701 
702 	if (fattr->show)
703 		ret = fattr->show(policy, buf);
704 	else
705 		ret = -EIO;
706 
707 	unlock_policy_rwsem_read(policy->cpu);
708 fail:
709 	cpufreq_cpu_put(policy);
710 no_policy:
711 	return ret;
712 }
713 
store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)714 static ssize_t store(struct kobject *kobj, struct attribute *attr,
715 		     const char *buf, size_t count)
716 {
717 	struct cpufreq_policy *policy = to_policy(kobj);
718 	struct freq_attr *fattr = to_attr(attr);
719 	ssize_t ret = -EINVAL;
720 	policy = cpufreq_cpu_get(policy->cpu);
721 	if (!policy)
722 		goto no_policy;
723 
724 	if (lock_policy_rwsem_write(policy->cpu) < 0)
725 		goto fail;
726 
727 	if (fattr->store)
728 		ret = fattr->store(policy, buf, count);
729 	else
730 		ret = -EIO;
731 
732 	unlock_policy_rwsem_write(policy->cpu);
733 fail:
734 	cpufreq_cpu_put(policy);
735 no_policy:
736 	return ret;
737 }
738 
cpufreq_sysfs_release(struct kobject * kobj)739 static void cpufreq_sysfs_release(struct kobject *kobj)
740 {
741 	struct cpufreq_policy *policy = to_policy(kobj);
742 	dprintk("last reference is dropped\n");
743 	complete(&policy->kobj_unregister);
744 }
745 
746 static struct sysfs_ops sysfs_ops = {
747 	.show	= show,
748 	.store	= store,
749 };
750 
751 static struct kobj_type ktype_cpufreq = {
752 	.sysfs_ops	= &sysfs_ops,
753 	.default_attrs	= default_attrs,
754 	.release	= cpufreq_sysfs_release,
755 };
756 
757 
758 /**
759  * cpufreq_add_dev - add a CPU device
760  *
761  * Adds the cpufreq interface for a CPU device.
762  */
cpufreq_add_dev(struct sys_device * sys_dev)763 static int cpufreq_add_dev(struct sys_device *sys_dev)
764 {
765 	unsigned int cpu = sys_dev->id;
766 	int ret = 0;
767 	struct cpufreq_policy new_policy;
768 	struct cpufreq_policy *policy;
769 	struct freq_attr **drv_attr;
770 	struct sys_device *cpu_sys_dev;
771 	unsigned long flags;
772 	unsigned int j;
773 #ifdef CONFIG_SMP
774 	struct cpufreq_policy *managed_policy;
775 #endif
776 
777 	if (cpu_is_offline(cpu))
778 		return 0;
779 
780 	cpufreq_debug_disable_ratelimit();
781 	dprintk("adding CPU %u\n", cpu);
782 
783 #ifdef CONFIG_SMP
784 	/* check whether a different CPU already registered this
785 	 * CPU because it is in the same boat. */
786 	policy = cpufreq_cpu_get(cpu);
787 	if (unlikely(policy)) {
788 		cpufreq_cpu_put(policy);
789 		cpufreq_debug_enable_ratelimit();
790 		return 0;
791 	}
792 #endif
793 
794 	if (!try_module_get(cpufreq_driver->owner)) {
795 		ret = -EINVAL;
796 		goto module_out;
797 	}
798 
799 	policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
800 	if (!policy) {
801 		ret = -ENOMEM;
802 		goto nomem_out;
803 	}
804 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
805 		kfree(policy);
806 		ret = -ENOMEM;
807 		goto nomem_out;
808 	}
809 	if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
810 		free_cpumask_var(policy->cpus);
811 		kfree(policy);
812 		ret = -ENOMEM;
813 		goto nomem_out;
814 	}
815 
816 	policy->cpu = cpu;
817 	cpumask_copy(policy->cpus, cpumask_of(cpu));
818 
819 	/* Initially set CPU itself as the policy_cpu */
820 	per_cpu(policy_cpu, cpu) = cpu;
821 	lock_policy_rwsem_write(cpu);
822 
823 	init_completion(&policy->kobj_unregister);
824 	INIT_WORK(&policy->update, handle_update);
825 
826 	/* Set governor before ->init, so that driver could check it */
827 	policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
828 	/* call driver. From then on the cpufreq must be able
829 	 * to accept all calls to ->verify and ->setpolicy for this CPU
830 	 */
831 	ret = cpufreq_driver->init(policy);
832 	if (ret) {
833 		dprintk("initialization failed\n");
834 		goto err_out;
835 	}
836 	policy->user_policy.min = policy->min;
837 	policy->user_policy.max = policy->max;
838 
839 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
840 				     CPUFREQ_START, policy);
841 
842 #ifdef CONFIG_SMP
843 
844 #ifdef CONFIG_HOTPLUG_CPU
845 	if (per_cpu(cpufreq_cpu_governor, cpu)) {
846 		policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
847 		dprintk("Restoring governor %s for cpu %d\n",
848 		       policy->governor->name, cpu);
849 	}
850 #endif
851 
852 	for_each_cpu(j, policy->cpus) {
853 		if (cpu == j)
854 			continue;
855 
856 		/* check for existing affected CPUs.  They may not be aware
857 		 * of it due to CPU Hotplug.
858 		 */
859 		managed_policy = cpufreq_cpu_get(j);		// FIXME: Where is this released?  What about error paths?
860 		if (unlikely(managed_policy)) {
861 
862 			/* Set proper policy_cpu */
863 			unlock_policy_rwsem_write(cpu);
864 			per_cpu(policy_cpu, cpu) = managed_policy->cpu;
865 
866 			if (lock_policy_rwsem_write(cpu) < 0)
867 				goto err_out_driver_exit;
868 
869 			spin_lock_irqsave(&cpufreq_driver_lock, flags);
870 			cpumask_copy(managed_policy->cpus, policy->cpus);
871 			per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
872 			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
873 
874 			dprintk("CPU already managed, adding link\n");
875 			ret = sysfs_create_link(&sys_dev->kobj,
876 						&managed_policy->kobj,
877 						"cpufreq");
878 			if (ret)
879 				goto err_out_driver_exit;
880 
881 			cpufreq_debug_enable_ratelimit();
882 			ret = 0;
883 			goto err_out_driver_exit; /* call driver->exit() */
884 		}
885 	}
886 #endif
887 	memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
888 
889 	/* prepare interface data */
890 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
891 				   "cpufreq");
892 	if (ret)
893 		goto err_out_driver_exit;
894 
895 	/* set up files for this cpu device */
896 	drv_attr = cpufreq_driver->attr;
897 	while ((drv_attr) && (*drv_attr)) {
898 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
899 		if (ret)
900 			goto err_out_driver_exit;
901 		drv_attr++;
902 	}
903 	if (cpufreq_driver->get) {
904 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
905 		if (ret)
906 			goto err_out_driver_exit;
907 	}
908 	if (cpufreq_driver->target) {
909 		ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
910 		if (ret)
911 			goto err_out_driver_exit;
912 	}
913 
914 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
915 	for_each_cpu(j, policy->cpus) {
916 		per_cpu(cpufreq_cpu_data, j) = policy;
917 		per_cpu(policy_cpu, j) = policy->cpu;
918 	}
919 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
920 
921 	/* symlink affected CPUs */
922 	for_each_cpu(j, policy->cpus) {
923 		if (j == cpu)
924 			continue;
925 		if (!cpu_online(j))
926 			continue;
927 
928 		dprintk("CPU %u already managed, adding link\n", j);
929 		cpufreq_cpu_get(cpu);
930 		cpu_sys_dev = get_cpu_sysdev(j);
931 		ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
932 					"cpufreq");
933 		if (ret)
934 			goto err_out_unregister;
935 	}
936 
937 	policy->governor = NULL; /* to assure that the starting sequence is
938 				  * run in cpufreq_set_policy */
939 
940 	/* set default policy */
941 	ret = __cpufreq_set_policy(policy, &new_policy);
942 	policy->user_policy.policy = policy->policy;
943 	policy->user_policy.governor = policy->governor;
944 
945 	if (ret) {
946 		dprintk("setting policy failed\n");
947 		goto err_out_unregister;
948 	}
949 
950 	unlock_policy_rwsem_write(cpu);
951 
952 	kobject_uevent(&policy->kobj, KOBJ_ADD);
953 	module_put(cpufreq_driver->owner);
954 	dprintk("initialization complete\n");
955 	cpufreq_debug_enable_ratelimit();
956 
957 	return 0;
958 
959 
960 err_out_unregister:
961 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
962 	for_each_cpu(j, policy->cpus)
963 		per_cpu(cpufreq_cpu_data, j) = NULL;
964 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
965 
966 	kobject_put(&policy->kobj);
967 	wait_for_completion(&policy->kobj_unregister);
968 
969 err_out_driver_exit:
970 	if (cpufreq_driver->exit)
971 		cpufreq_driver->exit(policy);
972 
973 err_out:
974 	unlock_policy_rwsem_write(cpu);
975 	kfree(policy);
976 
977 nomem_out:
978 	module_put(cpufreq_driver->owner);
979 module_out:
980 	cpufreq_debug_enable_ratelimit();
981 	return ret;
982 }
983 
984 
985 /**
986  * __cpufreq_remove_dev - remove a CPU device
987  *
988  * Removes the cpufreq interface for a CPU device.
989  * Caller should already have policy_rwsem in write mode for this CPU.
990  * This routine frees the rwsem before returning.
991  */
__cpufreq_remove_dev(struct sys_device * sys_dev)992 static int __cpufreq_remove_dev(struct sys_device *sys_dev)
993 {
994 	unsigned int cpu = sys_dev->id;
995 	unsigned long flags;
996 	struct cpufreq_policy *data;
997 #ifdef CONFIG_SMP
998 	struct sys_device *cpu_sys_dev;
999 	unsigned int j;
1000 #endif
1001 
1002 	cpufreq_debug_disable_ratelimit();
1003 	dprintk("unregistering CPU %u\n", cpu);
1004 
1005 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1006 	data = per_cpu(cpufreq_cpu_data, cpu);
1007 
1008 	if (!data) {
1009 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1010 		cpufreq_debug_enable_ratelimit();
1011 		unlock_policy_rwsem_write(cpu);
1012 		return -EINVAL;
1013 	}
1014 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1015 
1016 
1017 #ifdef CONFIG_SMP
1018 	/* if this isn't the CPU which is the parent of the kobj, we
1019 	 * only need to unlink, put and exit
1020 	 */
1021 	if (unlikely(cpu != data->cpu)) {
1022 		dprintk("removing link\n");
1023 		cpumask_clear_cpu(cpu, data->cpus);
1024 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1025 		sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1026 		cpufreq_cpu_put(data);
1027 		cpufreq_debug_enable_ratelimit();
1028 		unlock_policy_rwsem_write(cpu);
1029 		return 0;
1030 	}
1031 #endif
1032 
1033 #ifdef CONFIG_SMP
1034 
1035 #ifdef CONFIG_HOTPLUG_CPU
1036 	per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
1037 #endif
1038 
1039 	/* if we have other CPUs still registered, we need to unlink them,
1040 	 * or else wait_for_completion below will lock up. Clean the
1041 	 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1042 	 * the sysfs links afterwards.
1043 	 */
1044 	if (unlikely(cpumask_weight(data->cpus) > 1)) {
1045 		for_each_cpu(j, data->cpus) {
1046 			if (j == cpu)
1047 				continue;
1048 			per_cpu(cpufreq_cpu_data, j) = NULL;
1049 		}
1050 	}
1051 
1052 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1053 
1054 	if (unlikely(cpumask_weight(data->cpus) > 1)) {
1055 		for_each_cpu(j, data->cpus) {
1056 			if (j == cpu)
1057 				continue;
1058 			dprintk("removing link for cpu %u\n", j);
1059 #ifdef CONFIG_HOTPLUG_CPU
1060 			per_cpu(cpufreq_cpu_governor, j) = data->governor;
1061 #endif
1062 			cpu_sys_dev = get_cpu_sysdev(j);
1063 			sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
1064 			cpufreq_cpu_put(data);
1065 		}
1066 	}
1067 #else
1068 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1069 #endif
1070 
1071 	if (cpufreq_driver->target)
1072 		__cpufreq_governor(data, CPUFREQ_GOV_STOP);
1073 
1074 	unlock_policy_rwsem_write(cpu);
1075 
1076 	kobject_put(&data->kobj);
1077 
1078 	/* we need to make sure that the underlying kobj is actually
1079 	 * not referenced anymore by anybody before we proceed with
1080 	 * unloading.
1081 	 */
1082 	dprintk("waiting for dropping of refcount\n");
1083 	wait_for_completion(&data->kobj_unregister);
1084 	dprintk("wait complete\n");
1085 
1086 	if (cpufreq_driver->exit)
1087 		cpufreq_driver->exit(data);
1088 
1089 	free_cpumask_var(data->related_cpus);
1090 	free_cpumask_var(data->cpus);
1091 	kfree(data);
1092 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1093 
1094 	cpufreq_debug_enable_ratelimit();
1095 	return 0;
1096 }
1097 
1098 
cpufreq_remove_dev(struct sys_device * sys_dev)1099 static int cpufreq_remove_dev(struct sys_device *sys_dev)
1100 {
1101 	unsigned int cpu = sys_dev->id;
1102 	int retval;
1103 
1104 	if (cpu_is_offline(cpu))
1105 		return 0;
1106 
1107 	if (unlikely(lock_policy_rwsem_write(cpu)))
1108 		BUG();
1109 
1110 	retval = __cpufreq_remove_dev(sys_dev);
1111 	return retval;
1112 }
1113 
1114 
handle_update(struct work_struct * work)1115 static void handle_update(struct work_struct *work)
1116 {
1117 	struct cpufreq_policy *policy =
1118 		container_of(work, struct cpufreq_policy, update);
1119 	unsigned int cpu = policy->cpu;
1120 	dprintk("handle_update for cpu %u called\n", cpu);
1121 	cpufreq_update_policy(cpu);
1122 }
1123 
1124 /**
1125  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1126  *	@cpu: cpu number
1127  *	@old_freq: CPU frequency the kernel thinks the CPU runs at
1128  *	@new_freq: CPU frequency the CPU actually runs at
1129  *
1130  *	We adjust to current frequency first, and need to clean up later. So either call
1131  *	to cpufreq_update_policy() or schedule handle_update()).
1132  */
cpufreq_out_of_sync(unsigned int cpu,unsigned int old_freq,unsigned int new_freq)1133 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1134 				unsigned int new_freq)
1135 {
1136 	struct cpufreq_freqs freqs;
1137 
1138 	dprintk("Warning: CPU frequency out of sync: cpufreq and timing "
1139 	       "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1140 
1141 	freqs.cpu = cpu;
1142 	freqs.old = old_freq;
1143 	freqs.new = new_freq;
1144 	cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1145 	cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1146 }
1147 
1148 
1149 /**
1150  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1151  * @cpu: CPU number
1152  *
1153  * This is the last known freq, without actually getting it from the driver.
1154  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1155  */
cpufreq_quick_get(unsigned int cpu)1156 unsigned int cpufreq_quick_get(unsigned int cpu)
1157 {
1158 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1159 	unsigned int ret_freq = 0;
1160 
1161 	if (policy) {
1162 		ret_freq = policy->cur;
1163 		cpufreq_cpu_put(policy);
1164 	}
1165 
1166 	return ret_freq;
1167 }
1168 EXPORT_SYMBOL(cpufreq_quick_get);
1169 
1170 
__cpufreq_get(unsigned int cpu)1171 static unsigned int __cpufreq_get(unsigned int cpu)
1172 {
1173 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1174 	unsigned int ret_freq = 0;
1175 
1176 	if (!cpufreq_driver->get)
1177 		return ret_freq;
1178 
1179 	ret_freq = cpufreq_driver->get(cpu);
1180 
1181 	if (ret_freq && policy->cur &&
1182 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1183 		/* verify no discrepancy between actual and
1184 					saved value exists */
1185 		if (unlikely(ret_freq != policy->cur)) {
1186 			cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1187 			schedule_work(&policy->update);
1188 		}
1189 	}
1190 
1191 	return ret_freq;
1192 }
1193 
1194 /**
1195  * cpufreq_get - get the current CPU frequency (in kHz)
1196  * @cpu: CPU number
1197  *
1198  * Get the CPU current (static) CPU frequency
1199  */
cpufreq_get(unsigned int cpu)1200 unsigned int cpufreq_get(unsigned int cpu)
1201 {
1202 	unsigned int ret_freq = 0;
1203 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1204 
1205 	if (!policy)
1206 		goto out;
1207 
1208 	if (unlikely(lock_policy_rwsem_read(cpu)))
1209 		goto out_policy;
1210 
1211 	ret_freq = __cpufreq_get(cpu);
1212 
1213 	unlock_policy_rwsem_read(cpu);
1214 
1215 out_policy:
1216 	cpufreq_cpu_put(policy);
1217 out:
1218 	return ret_freq;
1219 }
1220 EXPORT_SYMBOL(cpufreq_get);
1221 
1222 
1223 /**
1224  *	cpufreq_suspend - let the low level driver prepare for suspend
1225  */
1226 
cpufreq_suspend(struct sys_device * sysdev,pm_message_t pmsg)1227 static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1228 {
1229 	int cpu = sysdev->id;
1230 	int ret = 0;
1231 	unsigned int cur_freq = 0;
1232 	struct cpufreq_policy *cpu_policy;
1233 
1234 	dprintk("suspending cpu %u\n", cpu);
1235 
1236 	if (!cpu_online(cpu))
1237 		return 0;
1238 
1239 	/* we may be lax here as interrupts are off. Nonetheless
1240 	 * we need to grab the correct cpu policy, as to check
1241 	 * whether we really run on this CPU.
1242 	 */
1243 
1244 	cpu_policy = cpufreq_cpu_get(cpu);
1245 	if (!cpu_policy)
1246 		return -EINVAL;
1247 
1248 	/* only handle each CPU group once */
1249 	if (unlikely(cpu_policy->cpu != cpu))
1250 		goto out;
1251 
1252 	if (cpufreq_driver->suspend) {
1253 		ret = cpufreq_driver->suspend(cpu_policy, pmsg);
1254 		if (ret) {
1255 			printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1256 					"step on CPU %u\n", cpu_policy->cpu);
1257 			goto out;
1258 		}
1259 	}
1260 
1261 	if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
1262 		goto out;
1263 
1264 	if (cpufreq_driver->get)
1265 		cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1266 
1267 	if (!cur_freq || !cpu_policy->cur) {
1268 		printk(KERN_ERR "cpufreq: suspend failed to assert current "
1269 		       "frequency is what timing core thinks it is.\n");
1270 		goto out;
1271 	}
1272 
1273 	if (unlikely(cur_freq != cpu_policy->cur)) {
1274 		struct cpufreq_freqs freqs;
1275 
1276 		if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1277 			dprintk("Warning: CPU frequency is %u, "
1278 			       "cpufreq assumed %u kHz.\n",
1279 			       cur_freq, cpu_policy->cur);
1280 
1281 		freqs.cpu = cpu;
1282 		freqs.old = cpu_policy->cur;
1283 		freqs.new = cur_freq;
1284 
1285 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
1286 				    CPUFREQ_SUSPENDCHANGE, &freqs);
1287 		adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
1288 
1289 		cpu_policy->cur = cur_freq;
1290 	}
1291 
1292 out:
1293 	cpufreq_cpu_put(cpu_policy);
1294 	return ret;
1295 }
1296 
1297 /**
1298  *	cpufreq_resume -  restore proper CPU frequency handling after resume
1299  *
1300  *	1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1301  *	2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync
1302  *	3.) schedule call cpufreq_update_policy() ASAP as interrupts are
1303  *	    restored.
1304  */
cpufreq_resume(struct sys_device * sysdev)1305 static int cpufreq_resume(struct sys_device *sysdev)
1306 {
1307 	int cpu = sysdev->id;
1308 	int ret = 0;
1309 	struct cpufreq_policy *cpu_policy;
1310 
1311 	dprintk("resuming cpu %u\n", cpu);
1312 
1313 	if (!cpu_online(cpu))
1314 		return 0;
1315 
1316 	/* we may be lax here as interrupts are off. Nonetheless
1317 	 * we need to grab the correct cpu policy, as to check
1318 	 * whether we really run on this CPU.
1319 	 */
1320 
1321 	cpu_policy = cpufreq_cpu_get(cpu);
1322 	if (!cpu_policy)
1323 		return -EINVAL;
1324 
1325 	/* only handle each CPU group once */
1326 	if (unlikely(cpu_policy->cpu != cpu))
1327 		goto fail;
1328 
1329 	if (cpufreq_driver->resume) {
1330 		ret = cpufreq_driver->resume(cpu_policy);
1331 		if (ret) {
1332 			printk(KERN_ERR "cpufreq: resume failed in ->resume "
1333 					"step on CPU %u\n", cpu_policy->cpu);
1334 			goto fail;
1335 		}
1336 	}
1337 
1338 	if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1339 		unsigned int cur_freq = 0;
1340 
1341 		if (cpufreq_driver->get)
1342 			cur_freq = cpufreq_driver->get(cpu_policy->cpu);
1343 
1344 		if (!cur_freq || !cpu_policy->cur) {
1345 			printk(KERN_ERR "cpufreq: resume failed to assert "
1346 					"current frequency is what timing core "
1347 					"thinks it is.\n");
1348 			goto out;
1349 		}
1350 
1351 		if (unlikely(cur_freq != cpu_policy->cur)) {
1352 			struct cpufreq_freqs freqs;
1353 
1354 			if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN))
1355 				dprintk("Warning: CPU frequency "
1356 				       "is %u, cpufreq assumed %u kHz.\n",
1357 				       cur_freq, cpu_policy->cur);
1358 
1359 			freqs.cpu = cpu;
1360 			freqs.old = cpu_policy->cur;
1361 			freqs.new = cur_freq;
1362 
1363 			srcu_notifier_call_chain(
1364 					&cpufreq_transition_notifier_list,
1365 					CPUFREQ_RESUMECHANGE, &freqs);
1366 			adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
1367 
1368 			cpu_policy->cur = cur_freq;
1369 		}
1370 	}
1371 
1372 out:
1373 	schedule_work(&cpu_policy->update);
1374 fail:
1375 	cpufreq_cpu_put(cpu_policy);
1376 	return ret;
1377 }
1378 
1379 static struct sysdev_driver cpufreq_sysdev_driver = {
1380 	.add		= cpufreq_add_dev,
1381 	.remove		= cpufreq_remove_dev,
1382 	.suspend	= cpufreq_suspend,
1383 	.resume		= cpufreq_resume,
1384 };
1385 
1386 
1387 /*********************************************************************
1388  *                     NOTIFIER LISTS INTERFACE                      *
1389  *********************************************************************/
1390 
1391 /**
1392  *	cpufreq_register_notifier - register a driver with cpufreq
1393  *	@nb: notifier function to register
1394  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1395  *
1396  *	Add a driver to one of two lists: either a list of drivers that
1397  *      are notified about clock rate changes (once before and once after
1398  *      the transition), or a list of drivers that are notified about
1399  *      changes in cpufreq policy.
1400  *
1401  *	This function may sleep, and has the same return conditions as
1402  *	blocking_notifier_chain_register.
1403  */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)1404 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1405 {
1406 	int ret;
1407 
1408 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
1409 
1410 	switch (list) {
1411 	case CPUFREQ_TRANSITION_NOTIFIER:
1412 		ret = srcu_notifier_chain_register(
1413 				&cpufreq_transition_notifier_list, nb);
1414 		break;
1415 	case CPUFREQ_POLICY_NOTIFIER:
1416 		ret = blocking_notifier_chain_register(
1417 				&cpufreq_policy_notifier_list, nb);
1418 		break;
1419 	default:
1420 		ret = -EINVAL;
1421 	}
1422 
1423 	return ret;
1424 }
1425 EXPORT_SYMBOL(cpufreq_register_notifier);
1426 
1427 
1428 /**
1429  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
1430  *	@nb: notifier block to be unregistered
1431  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1432  *
1433  *	Remove a driver from the CPU frequency notifier list.
1434  *
1435  *	This function may sleep, and has the same return conditions as
1436  *	blocking_notifier_chain_unregister.
1437  */
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)1438 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1439 {
1440 	int ret;
1441 
1442 	switch (list) {
1443 	case CPUFREQ_TRANSITION_NOTIFIER:
1444 		ret = srcu_notifier_chain_unregister(
1445 				&cpufreq_transition_notifier_list, nb);
1446 		break;
1447 	case CPUFREQ_POLICY_NOTIFIER:
1448 		ret = blocking_notifier_chain_unregister(
1449 				&cpufreq_policy_notifier_list, nb);
1450 		break;
1451 	default:
1452 		ret = -EINVAL;
1453 	}
1454 
1455 	return ret;
1456 }
1457 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1458 
1459 
1460 /*********************************************************************
1461  *                              GOVERNORS                            *
1462  *********************************************************************/
1463 
1464 
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)1465 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1466 			    unsigned int target_freq,
1467 			    unsigned int relation)
1468 {
1469 	int retval = -EINVAL;
1470 
1471 	dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
1472 		target_freq, relation);
1473 	if (cpu_online(policy->cpu) && cpufreq_driver->target)
1474 		retval = cpufreq_driver->target(policy, target_freq, relation);
1475 
1476 	return retval;
1477 }
1478 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1479 
cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)1480 int cpufreq_driver_target(struct cpufreq_policy *policy,
1481 			  unsigned int target_freq,
1482 			  unsigned int relation)
1483 {
1484 	int ret = -EINVAL;
1485 
1486 	policy = cpufreq_cpu_get(policy->cpu);
1487 	if (!policy)
1488 		goto no_policy;
1489 
1490 	if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1491 		goto fail;
1492 
1493 	ret = __cpufreq_driver_target(policy, target_freq, relation);
1494 
1495 	unlock_policy_rwsem_write(policy->cpu);
1496 
1497 fail:
1498 	cpufreq_cpu_put(policy);
1499 no_policy:
1500 	return ret;
1501 }
1502 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1503 
__cpufreq_driver_getavg(struct cpufreq_policy * policy,unsigned int cpu)1504 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1505 {
1506 	int ret = 0;
1507 
1508 	policy = cpufreq_cpu_get(policy->cpu);
1509 	if (!policy)
1510 		return -EINVAL;
1511 
1512 	if (cpu_online(cpu) && cpufreq_driver->getavg)
1513 		ret = cpufreq_driver->getavg(policy, cpu);
1514 
1515 	cpufreq_cpu_put(policy);
1516 	return ret;
1517 }
1518 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1519 
1520 /*
1521  * when "event" is CPUFREQ_GOV_LIMITS
1522  */
1523 
__cpufreq_governor(struct cpufreq_policy * policy,unsigned int event)1524 static int __cpufreq_governor(struct cpufreq_policy *policy,
1525 					unsigned int event)
1526 {
1527 	int ret;
1528 
1529 	/* Only must be defined when default governor is known to have latency
1530 	   restrictions, like e.g. conservative or ondemand.
1531 	   That this is the case is already ensured in Kconfig
1532 	*/
1533 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1534 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
1535 #else
1536 	struct cpufreq_governor *gov = NULL;
1537 #endif
1538 
1539 	if (policy->governor->max_transition_latency &&
1540 	    policy->cpuinfo.transition_latency >
1541 	    policy->governor->max_transition_latency) {
1542 		if (!gov)
1543 			return -EINVAL;
1544 		else {
1545 			printk(KERN_WARNING "%s governor failed, too long"
1546 			       " transition latency of HW, fallback"
1547 			       " to %s governor\n",
1548 			       policy->governor->name,
1549 			       gov->name);
1550 			policy->governor = gov;
1551 		}
1552 	}
1553 
1554 	if (!try_module_get(policy->governor->owner))
1555 		return -EINVAL;
1556 
1557 	dprintk("__cpufreq_governor for CPU %u, event %u\n",
1558 						policy->cpu, event);
1559 	ret = policy->governor->governor(policy, event);
1560 
1561 	/* we keep one module reference alive for
1562 			each CPU governed by this CPU */
1563 	if ((event != CPUFREQ_GOV_START) || ret)
1564 		module_put(policy->governor->owner);
1565 	if ((event == CPUFREQ_GOV_STOP) && !ret)
1566 		module_put(policy->governor->owner);
1567 
1568 	return ret;
1569 }
1570 
1571 
cpufreq_register_governor(struct cpufreq_governor * governor)1572 int cpufreq_register_governor(struct cpufreq_governor *governor)
1573 {
1574 	int err;
1575 
1576 	if (!governor)
1577 		return -EINVAL;
1578 
1579 	mutex_lock(&cpufreq_governor_mutex);
1580 
1581 	err = -EBUSY;
1582 	if (__find_governor(governor->name) == NULL) {
1583 		err = 0;
1584 		list_add(&governor->governor_list, &cpufreq_governor_list);
1585 	}
1586 
1587 	mutex_unlock(&cpufreq_governor_mutex);
1588 	return err;
1589 }
1590 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1591 
1592 
cpufreq_unregister_governor(struct cpufreq_governor * governor)1593 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1594 {
1595 	if (!governor)
1596 		return;
1597 
1598 	mutex_lock(&cpufreq_governor_mutex);
1599 	list_del(&governor->governor_list);
1600 	mutex_unlock(&cpufreq_governor_mutex);
1601 	return;
1602 }
1603 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1604 
1605 
1606 
1607 /*********************************************************************
1608  *                          POLICY INTERFACE                         *
1609  *********************************************************************/
1610 
1611 /**
1612  * cpufreq_get_policy - get the current cpufreq_policy
1613  * @policy: struct cpufreq_policy into which the current cpufreq_policy is written
1614  *
1615  * Reads the current cpufreq policy.
1616  */
cpufreq_get_policy(struct cpufreq_policy * policy,unsigned int cpu)1617 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1618 {
1619 	struct cpufreq_policy *cpu_policy;
1620 	if (!policy)
1621 		return -EINVAL;
1622 
1623 	cpu_policy = cpufreq_cpu_get(cpu);
1624 	if (!cpu_policy)
1625 		return -EINVAL;
1626 
1627 	memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1628 
1629 	cpufreq_cpu_put(cpu_policy);
1630 	return 0;
1631 }
1632 EXPORT_SYMBOL(cpufreq_get_policy);
1633 
1634 
1635 /*
1636  * data   : current policy.
1637  * policy : policy to be set.
1638  */
__cpufreq_set_policy(struct cpufreq_policy * data,struct cpufreq_policy * policy)1639 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1640 				struct cpufreq_policy *policy)
1641 {
1642 	int ret = 0;
1643 
1644 	cpufreq_debug_disable_ratelimit();
1645 	dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1646 		policy->min, policy->max);
1647 
1648 	memcpy(&policy->cpuinfo, &data->cpuinfo,
1649 				sizeof(struct cpufreq_cpuinfo));
1650 
1651 	if (policy->min > data->max || policy->max < data->min) {
1652 		ret = -EINVAL;
1653 		goto error_out;
1654 	}
1655 
1656 	/* verify the cpu speed can be set within this limit */
1657 	ret = cpufreq_driver->verify(policy);
1658 	if (ret)
1659 		goto error_out;
1660 
1661 	/* adjust if necessary - all reasons */
1662 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1663 			CPUFREQ_ADJUST, policy);
1664 
1665 	/* adjust if necessary - hardware incompatibility*/
1666 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1667 			CPUFREQ_INCOMPATIBLE, policy);
1668 
1669 	/* verify the cpu speed can be set within this limit,
1670 	   which might be different to the first one */
1671 	ret = cpufreq_driver->verify(policy);
1672 	if (ret)
1673 		goto error_out;
1674 
1675 	/* notification of the new policy */
1676 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1677 			CPUFREQ_NOTIFY, policy);
1678 
1679 	data->min = policy->min;
1680 	data->max = policy->max;
1681 
1682 	dprintk("new min and max freqs are %u - %u kHz\n",
1683 					data->min, data->max);
1684 
1685 	if (cpufreq_driver->setpolicy) {
1686 		data->policy = policy->policy;
1687 		dprintk("setting range\n");
1688 		ret = cpufreq_driver->setpolicy(policy);
1689 	} else {
1690 		if (policy->governor != data->governor) {
1691 			/* save old, working values */
1692 			struct cpufreq_governor *old_gov = data->governor;
1693 
1694 			dprintk("governor switch\n");
1695 
1696 			/* end old governor */
1697 			if (data->governor)
1698 				__cpufreq_governor(data, CPUFREQ_GOV_STOP);
1699 
1700 			/* start new governor */
1701 			data->governor = policy->governor;
1702 			if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1703 				/* new governor failed, so re-start old one */
1704 				dprintk("starting governor %s failed\n",
1705 							data->governor->name);
1706 				if (old_gov) {
1707 					data->governor = old_gov;
1708 					__cpufreq_governor(data,
1709 							   CPUFREQ_GOV_START);
1710 				}
1711 				ret = -EINVAL;
1712 				goto error_out;
1713 			}
1714 			/* might be a policy change, too, so fall through */
1715 		}
1716 		dprintk("governor: change or update limits\n");
1717 		__cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1718 	}
1719 
1720 error_out:
1721 	cpufreq_debug_enable_ratelimit();
1722 	return ret;
1723 }
1724 
1725 /**
1726  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
1727  *	@cpu: CPU which shall be re-evaluated
1728  *
1729  *	Usefull for policy notifiers which have different necessities
1730  *	at different times.
1731  */
cpufreq_update_policy(unsigned int cpu)1732 int cpufreq_update_policy(unsigned int cpu)
1733 {
1734 	struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1735 	struct cpufreq_policy policy;
1736 	int ret;
1737 
1738 	if (!data) {
1739 		ret = -ENODEV;
1740 		goto no_policy;
1741 	}
1742 
1743 	if (unlikely(lock_policy_rwsem_write(cpu))) {
1744 		ret = -EINVAL;
1745 		goto fail;
1746 	}
1747 
1748 	dprintk("updating policy for CPU %u\n", cpu);
1749 	memcpy(&policy, data, sizeof(struct cpufreq_policy));
1750 	policy.min = data->user_policy.min;
1751 	policy.max = data->user_policy.max;
1752 	policy.policy = data->user_policy.policy;
1753 	policy.governor = data->user_policy.governor;
1754 
1755 	/* BIOS might change freq behind our back
1756 	  -> ask driver for current freq and notify governors about a change */
1757 	if (cpufreq_driver->get) {
1758 		policy.cur = cpufreq_driver->get(cpu);
1759 		if (!data->cur) {
1760 			dprintk("Driver did not initialize current freq");
1761 			data->cur = policy.cur;
1762 		} else {
1763 			if (data->cur != policy.cur)
1764 				cpufreq_out_of_sync(cpu, data->cur,
1765 								policy.cur);
1766 		}
1767 	}
1768 
1769 	ret = __cpufreq_set_policy(data, &policy);
1770 
1771 	unlock_policy_rwsem_write(cpu);
1772 
1773 fail:
1774 	cpufreq_cpu_put(data);
1775 no_policy:
1776 	return ret;
1777 }
1778 EXPORT_SYMBOL(cpufreq_update_policy);
1779 
cpufreq_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)1780 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1781 					unsigned long action, void *hcpu)
1782 {
1783 	unsigned int cpu = (unsigned long)hcpu;
1784 	struct sys_device *sys_dev;
1785 
1786 	sys_dev = get_cpu_sysdev(cpu);
1787 	if (sys_dev) {
1788 		switch (action) {
1789 		case CPU_ONLINE:
1790 		case CPU_ONLINE_FROZEN:
1791 			cpufreq_add_dev(sys_dev);
1792 			break;
1793 		case CPU_DOWN_PREPARE:
1794 		case CPU_DOWN_PREPARE_FROZEN:
1795 			if (unlikely(lock_policy_rwsem_write(cpu)))
1796 				BUG();
1797 
1798 			__cpufreq_remove_dev(sys_dev);
1799 			break;
1800 		case CPU_DOWN_FAILED:
1801 		case CPU_DOWN_FAILED_FROZEN:
1802 			cpufreq_add_dev(sys_dev);
1803 			break;
1804 		}
1805 	}
1806 	return NOTIFY_OK;
1807 }
1808 
1809 static struct notifier_block __refdata cpufreq_cpu_notifier =
1810 {
1811     .notifier_call = cpufreq_cpu_callback,
1812 };
1813 
1814 /*********************************************************************
1815  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
1816  *********************************************************************/
1817 
1818 /**
1819  * cpufreq_register_driver - register a CPU Frequency driver
1820  * @driver_data: A struct cpufreq_driver containing the values#
1821  * submitted by the CPU Frequency driver.
1822  *
1823  *   Registers a CPU Frequency driver to this core code. This code
1824  * returns zero on success, -EBUSY when another driver got here first
1825  * (and isn't unregistered in the meantime).
1826  *
1827  */
cpufreq_register_driver(struct cpufreq_driver * driver_data)1828 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1829 {
1830 	unsigned long flags;
1831 	int ret;
1832 
1833 	if (!driver_data || !driver_data->verify || !driver_data->init ||
1834 	    ((!driver_data->setpolicy) && (!driver_data->target)))
1835 		return -EINVAL;
1836 
1837 	dprintk("trying to register driver %s\n", driver_data->name);
1838 
1839 	if (driver_data->setpolicy)
1840 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
1841 
1842 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1843 	if (cpufreq_driver) {
1844 		spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1845 		return -EBUSY;
1846 	}
1847 	cpufreq_driver = driver_data;
1848 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1849 
1850 	ret = sysdev_driver_register(&cpu_sysdev_class,
1851 					&cpufreq_sysdev_driver);
1852 
1853 	if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1854 		int i;
1855 		ret = -ENODEV;
1856 
1857 		/* check for at least one working CPU */
1858 		for (i = 0; i < nr_cpu_ids; i++)
1859 			if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
1860 				ret = 0;
1861 				break;
1862 			}
1863 
1864 		/* if all ->init() calls failed, unregister */
1865 		if (ret) {
1866 			dprintk("no CPU initialized for driver %s\n",
1867 							driver_data->name);
1868 			sysdev_driver_unregister(&cpu_sysdev_class,
1869 						&cpufreq_sysdev_driver);
1870 
1871 			spin_lock_irqsave(&cpufreq_driver_lock, flags);
1872 			cpufreq_driver = NULL;
1873 			spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1874 		}
1875 	}
1876 
1877 	if (!ret) {
1878 		register_hotcpu_notifier(&cpufreq_cpu_notifier);
1879 		dprintk("driver %s up and running\n", driver_data->name);
1880 		cpufreq_debug_enable_ratelimit();
1881 	}
1882 
1883 	return ret;
1884 }
1885 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
1886 
1887 
1888 /**
1889  * cpufreq_unregister_driver - unregister the current CPUFreq driver
1890  *
1891  *    Unregister the current CPUFreq driver. Only call this if you have
1892  * the right to do so, i.e. if you have succeeded in initialising before!
1893  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
1894  * currently not initialised.
1895  */
cpufreq_unregister_driver(struct cpufreq_driver * driver)1896 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1897 {
1898 	unsigned long flags;
1899 
1900 	cpufreq_debug_disable_ratelimit();
1901 
1902 	if (!cpufreq_driver || (driver != cpufreq_driver)) {
1903 		cpufreq_debug_enable_ratelimit();
1904 		return -EINVAL;
1905 	}
1906 
1907 	dprintk("unregistering driver %s\n", driver->name);
1908 
1909 	sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
1910 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
1911 
1912 	spin_lock_irqsave(&cpufreq_driver_lock, flags);
1913 	cpufreq_driver = NULL;
1914 	spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1915 
1916 	return 0;
1917 }
1918 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1919 
cpufreq_core_init(void)1920 static int __init cpufreq_core_init(void)
1921 {
1922 	int cpu;
1923 
1924 	for_each_possible_cpu(cpu) {
1925 		per_cpu(policy_cpu, cpu) = -1;
1926 		init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1927 	}
1928 	return 0;
1929 }
1930 
1931 core_initcall(cpufreq_core_init);
1932