• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *	Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *	Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/tick.h>
31 #ifdef CONFIG_SMP
32 #include <linux/sched.h>
33 #endif
34 #include <trace/events/power.h>
35 
36 /**
37  * The "cpufreq driver" - the arch- or hardware-dependent low
38  * level driver of CPUFreq support, and its spinlock. This lock
39  * also protects the cpufreq_cpu_data array.
40  */
41 static struct cpufreq_driver *cpufreq_driver;
42 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
44 static DEFINE_RWLOCK(cpufreq_driver_lock);
45 DEFINE_MUTEX(cpufreq_governor_lock);
46 static LIST_HEAD(cpufreq_policy_list);
47 
48 /* This one keeps track of the previously set governor of a removed CPU */
49 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
50 
51 /* Flag to suspend/resume CPUFreq governors */
52 static bool cpufreq_suspended;
53 
has_target(void)54 static inline bool has_target(void)
55 {
56 	return cpufreq_driver->target_index || cpufreq_driver->target;
57 }
58 
59 /*
60  * rwsem to guarantee that cpufreq driver module doesn't unload during critical
61  * sections
62  */
63 static DECLARE_RWSEM(cpufreq_rwsem);
64 
65 /* internal prototypes */
66 static int __cpufreq_governor(struct cpufreq_policy *policy,
67 		unsigned int event);
68 static unsigned int __cpufreq_get(unsigned int cpu);
69 static void handle_update(struct work_struct *work);
70 
71 /**
72  * Two notifier lists: the "policy" list is involved in the
73  * validation process for a new CPU frequency policy; the
74  * "transition" list for kernel code that needs to handle
75  * changes to devices when the CPU clock speed changes.
76  * The mutex locks both lists.
77  */
78 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
79 static struct srcu_notifier_head cpufreq_transition_notifier_list;
80 
81 static bool init_cpufreq_transition_notifier_list_called;
init_cpufreq_transition_notifier_list(void)82 static int __init init_cpufreq_transition_notifier_list(void)
83 {
84 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
85 	init_cpufreq_transition_notifier_list_called = true;
86 	return 0;
87 }
88 pure_initcall(init_cpufreq_transition_notifier_list);
89 
90 static int off __read_mostly;
cpufreq_disabled(void)91 static int cpufreq_disabled(void)
92 {
93 	return off;
94 }
disable_cpufreq(void)95 void disable_cpufreq(void)
96 {
97 	off = 1;
98 }
99 static LIST_HEAD(cpufreq_governor_list);
100 static DEFINE_MUTEX(cpufreq_governor_mutex);
101 
have_governor_per_policy(void)102 bool have_governor_per_policy(void)
103 {
104 	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
105 }
106 EXPORT_SYMBOL_GPL(have_governor_per_policy);
107 
cpufreq_driver_is_slow(void)108 bool cpufreq_driver_is_slow(void)
109 {
110 	return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
111 }
112 EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
113 
get_governor_parent_kobj(struct cpufreq_policy * policy)114 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
115 {
116 	if (have_governor_per_policy())
117 		return &policy->kobj;
118 	else
119 		return cpufreq_global_kobject;
120 }
121 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
122 
get_cpu_idle_time_jiffy(unsigned int cpu,u64 * wall)123 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
124 {
125 	u64 idle_time;
126 	u64 cur_wall_time;
127 	u64 busy_time;
128 
129 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
130 
131 	busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
132 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
133 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
134 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
135 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
136 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
137 
138 	idle_time = cur_wall_time - busy_time;
139 	if (wall)
140 		*wall = cputime_to_usecs(cur_wall_time);
141 
142 	return cputime_to_usecs(idle_time);
143 }
144 
get_cpu_idle_time(unsigned int cpu,u64 * wall,int io_busy)145 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
146 {
147 	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
148 
149 	if (idle_time == -1ULL)
150 		return get_cpu_idle_time_jiffy(cpu, wall);
151 	else if (!io_busy)
152 		idle_time += get_cpu_iowait_time_us(cpu, wall);
153 
154 	return idle_time;
155 }
156 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
157 
158 /*
159  * This is a generic cpufreq init() routine which can be used by cpufreq
160  * drivers of SMP systems. It will do following:
161  * - validate & show freq table passed
162  * - set policies transition latency
163  * - policy->cpus with all possible CPUs
164  */
cpufreq_generic_init(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table,unsigned int transition_latency)165 int cpufreq_generic_init(struct cpufreq_policy *policy,
166 		struct cpufreq_frequency_table *table,
167 		unsigned int transition_latency)
168 {
169 	int ret;
170 
171 	ret = cpufreq_table_validate_and_show(policy, table);
172 	if (ret) {
173 		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
174 		return ret;
175 	}
176 
177 	policy->cpuinfo.transition_latency = transition_latency;
178 
179 	/*
180 	 * The driver only supports the SMP configuartion where all processors
181 	 * share the clock and voltage and clock.
182 	 */
183 	cpumask_setall(policy->cpus);
184 
185 	return 0;
186 }
187 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
188 
cpufreq_generic_get(unsigned int cpu)189 unsigned int cpufreq_generic_get(unsigned int cpu)
190 {
191 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
192 
193 	if (!policy || IS_ERR(policy->clk)) {
194 		pr_err("%s: No %s associated to cpu: %d\n",
195 		       __func__, policy ? "clk" : "policy", cpu);
196 		return 0;
197 	}
198 
199 	return clk_get_rate(policy->clk) / 1000;
200 }
201 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
202 
203 /* Only for cpufreq core internal use */
cpufreq_cpu_get_raw(unsigned int cpu)204 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
205 {
206 	return per_cpu(cpufreq_cpu_data, cpu);
207 }
208 
cpufreq_cpu_get(unsigned int cpu)209 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
210 {
211 	struct cpufreq_policy *policy = NULL;
212 	unsigned long flags;
213 
214 	if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
215 		return NULL;
216 
217 	if (!down_read_trylock(&cpufreq_rwsem))
218 		return NULL;
219 
220 	/* get the cpufreq driver */
221 	read_lock_irqsave(&cpufreq_driver_lock, flags);
222 
223 	if (cpufreq_driver) {
224 		/* get the CPU */
225 		policy = per_cpu(cpufreq_cpu_data, cpu);
226 		if (policy)
227 			kobject_get(&policy->kobj);
228 	}
229 
230 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
231 
232 	if (!policy)
233 		up_read(&cpufreq_rwsem);
234 
235 	return policy;
236 }
237 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
238 
cpufreq_cpu_put(struct cpufreq_policy * policy)239 void cpufreq_cpu_put(struct cpufreq_policy *policy)
240 {
241 	if (cpufreq_disabled())
242 		return;
243 
244 	kobject_put(&policy->kobj);
245 	up_read(&cpufreq_rwsem);
246 }
247 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
248 
249 /*********************************************************************
250  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
251  *********************************************************************/
252 
253 /**
254  * adjust_jiffies - adjust the system "loops_per_jiffy"
255  *
256  * This function alters the system "loops_per_jiffy" for the clock
257  * speed change. Note that loops_per_jiffy cannot be updated on SMP
258  * systems as each CPU might be scaled differently. So, use the arch
259  * per-CPU loops_per_jiffy value wherever possible.
260  */
261 #ifndef CONFIG_SMP
262 static unsigned long l_p_j_ref;
263 static unsigned int l_p_j_ref_freq;
264 
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)265 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
266 {
267 	if (ci->flags & CPUFREQ_CONST_LOOPS)
268 		return;
269 
270 	if (!l_p_j_ref_freq) {
271 		l_p_j_ref = loops_per_jiffy;
272 		l_p_j_ref_freq = ci->old;
273 		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
274 			 l_p_j_ref, l_p_j_ref_freq);
275 	}
276 	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
277 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
278 								ci->new);
279 		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
280 			 loops_per_jiffy, ci->new);
281 	}
282 }
283 #else
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)284 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
285 {
286 	return;
287 }
288 #endif
289 
290 /*********************************************************************
291  *               FREQUENCY INVARIANT CPU CAPACITY                    *
292  *********************************************************************/
293 
294 static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
295 static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
296 
297 static void
scale_freq_capacity(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs)298 scale_freq_capacity(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs)
299 {
300 	unsigned long cur = freqs ? freqs->new : policy->cur;
301 	unsigned long scale = (cur << SCHED_CAPACITY_SHIFT) / policy->max;
302 	struct cpufreq_cpuinfo *cpuinfo = &policy->cpuinfo;
303 	int cpu;
304 
305 	pr_debug("cpus %*pbl cur/cur max freq %lu/%u kHz freq scale %lu\n",
306 		 cpumask_pr_args(policy->cpus), cur, policy->max, scale);
307 
308 	for_each_cpu(cpu, policy->cpus)
309 		per_cpu(freq_scale, cpu) = scale;
310 
311 	if (freqs)
312 		return;
313 
314 	scale = (policy->max << SCHED_CAPACITY_SHIFT) / cpuinfo->max_freq;
315 
316 	pr_debug("cpus %*pbl cur max/max freq %u/%u kHz max freq scale %lu\n",
317 		 cpumask_pr_args(policy->cpus), policy->max, cpuinfo->max_freq,
318 		 scale);
319 
320 	for_each_cpu(cpu, policy->cpus)
321 		per_cpu(max_freq_scale, cpu) = scale;
322 }
323 
cpufreq_scale_freq_capacity(struct sched_domain * sd,int cpu)324 unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
325 {
326 	return per_cpu(freq_scale, cpu);
327 }
328 
cpufreq_scale_max_freq_capacity(int cpu)329 unsigned long cpufreq_scale_max_freq_capacity(int cpu)
330 {
331 	return per_cpu(max_freq_scale, cpu);
332 }
333 
__cpufreq_notify_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,unsigned int state)334 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
335 		struct cpufreq_freqs *freqs, unsigned int state)
336 {
337 	BUG_ON(irqs_disabled());
338 
339 	if (cpufreq_disabled())
340 		return;
341 
342 	freqs->flags = cpufreq_driver->flags;
343 	pr_debug("notification %u of frequency transition to %u kHz\n",
344 		 state, freqs->new);
345 
346 	switch (state) {
347 
348 	case CPUFREQ_PRECHANGE:
349 		/* detect if the driver reported a value as "old frequency"
350 		 * which is not equal to what the cpufreq core thinks is
351 		 * "old frequency".
352 		 */
353 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
354 			if ((policy) && (policy->cpu == freqs->cpu) &&
355 			    (policy->cur) && (policy->cur != freqs->old)) {
356 				pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
357 					 freqs->old, policy->cur);
358 				freqs->old = policy->cur;
359 			}
360 		}
361 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
362 				CPUFREQ_PRECHANGE, freqs);
363 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
364 		break;
365 
366 	case CPUFREQ_POSTCHANGE:
367 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
368 		pr_debug("FREQ: %lu - CPU: %lu\n",
369 			 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
370 		trace_cpu_frequency(freqs->new, freqs->cpu);
371 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
372 				CPUFREQ_POSTCHANGE, freqs);
373 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
374 			policy->cur = freqs->new;
375 		break;
376 	}
377 }
378 
379 /**
380  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
381  * on frequency transition.
382  *
383  * This function calls the transition notifiers and the "adjust_jiffies"
384  * function. It is called twice on all CPU frequency changes that have
385  * external effects.
386  */
cpufreq_notify_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,unsigned int state)387 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
388 		struct cpufreq_freqs *freqs, unsigned int state)
389 {
390 	for_each_cpu(freqs->cpu, policy->cpus)
391 		__cpufreq_notify_transition(policy, freqs, state);
392 }
393 
394 /* Do post notifications when there are chances that transition has failed */
cpufreq_notify_post_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int transition_failed)395 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
396 		struct cpufreq_freqs *freqs, int transition_failed)
397 {
398 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
399 	if (!transition_failed)
400 		return;
401 
402 	swap(freqs->old, freqs->new);
403 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
404 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
405 }
406 
cpufreq_freq_transition_begin(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs)407 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
408 		struct cpufreq_freqs *freqs)
409 {
410 #ifdef CONFIG_SMP
411 	int cpu;
412 #endif
413 
414 	/*
415 	 * Catch double invocations of _begin() which lead to self-deadlock.
416 	 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
417 	 * doesn't invoke _begin() on their behalf, and hence the chances of
418 	 * double invocations are very low. Moreover, there are scenarios
419 	 * where these checks can emit false-positive warnings in these
420 	 * drivers; so we avoid that by skipping them altogether.
421 	 */
422 	WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
423 				&& current == policy->transition_task);
424 
425 wait:
426 	wait_event(policy->transition_wait, !policy->transition_ongoing);
427 
428 	spin_lock(&policy->transition_lock);
429 
430 	if (unlikely(policy->transition_ongoing)) {
431 		spin_unlock(&policy->transition_lock);
432 		goto wait;
433 	}
434 
435 	policy->transition_ongoing = true;
436 	policy->transition_task = current;
437 
438 	spin_unlock(&policy->transition_lock);
439 
440 	scale_freq_capacity(policy, freqs);
441 #ifdef CONFIG_SMP
442 	for_each_cpu(cpu, policy->cpus)
443 		trace_cpu_capacity(capacity_curr_of(cpu), cpu);
444 #endif
445 
446 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
447 }
448 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
449 
cpufreq_freq_transition_end(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int transition_failed)450 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
451 		struct cpufreq_freqs *freqs, int transition_failed)
452 {
453 	if (unlikely(WARN_ON(!policy->transition_ongoing)))
454 		return;
455 
456 	cpufreq_notify_post_transition(policy, freqs, transition_failed);
457 
458 	policy->transition_ongoing = false;
459 	policy->transition_task = NULL;
460 
461 	wake_up(&policy->transition_wait);
462 }
463 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
464 
465 
466 /*********************************************************************
467  *                          SYSFS INTERFACE                          *
468  *********************************************************************/
show_boost(struct kobject * kobj,struct attribute * attr,char * buf)469 static ssize_t show_boost(struct kobject *kobj,
470 				 struct attribute *attr, char *buf)
471 {
472 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
473 }
474 
store_boost(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)475 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
476 				  const char *buf, size_t count)
477 {
478 	int ret, enable;
479 
480 	ret = sscanf(buf, "%d", &enable);
481 	if (ret != 1 || enable < 0 || enable > 1)
482 		return -EINVAL;
483 
484 	if (cpufreq_boost_trigger_state(enable)) {
485 		pr_err("%s: Cannot %s BOOST!\n",
486 		       __func__, enable ? "enable" : "disable");
487 		return -EINVAL;
488 	}
489 
490 	pr_debug("%s: cpufreq BOOST %s\n",
491 		 __func__, enable ? "enabled" : "disabled");
492 
493 	return count;
494 }
495 define_one_global_rw(boost);
496 
__find_governor(const char * str_governor)497 static struct cpufreq_governor *__find_governor(const char *str_governor)
498 {
499 	struct cpufreq_governor *t;
500 
501 	list_for_each_entry(t, &cpufreq_governor_list, governor_list)
502 		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
503 			return t;
504 
505 	return NULL;
506 }
507 
508 /**
509  * cpufreq_parse_governor - parse a governor string
510  */
cpufreq_parse_governor(char * str_governor,unsigned int * policy,struct cpufreq_governor ** governor)511 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
512 				struct cpufreq_governor **governor)
513 {
514 	int err = -EINVAL;
515 
516 	if (!cpufreq_driver)
517 		goto out;
518 
519 	if (cpufreq_driver->setpolicy) {
520 		if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
521 			*policy = CPUFREQ_POLICY_PERFORMANCE;
522 			err = 0;
523 		} else if (!strncasecmp(str_governor, "powersave",
524 						CPUFREQ_NAME_LEN)) {
525 			*policy = CPUFREQ_POLICY_POWERSAVE;
526 			err = 0;
527 		}
528 	} else if (has_target()) {
529 		struct cpufreq_governor *t;
530 
531 		mutex_lock(&cpufreq_governor_mutex);
532 
533 		t = __find_governor(str_governor);
534 
535 		if (t == NULL) {
536 			int ret;
537 
538 			mutex_unlock(&cpufreq_governor_mutex);
539 			ret = request_module("cpufreq_%s", str_governor);
540 			mutex_lock(&cpufreq_governor_mutex);
541 
542 			if (ret == 0)
543 				t = __find_governor(str_governor);
544 		}
545 
546 		if (t != NULL) {
547 			*governor = t;
548 			err = 0;
549 		}
550 
551 		mutex_unlock(&cpufreq_governor_mutex);
552 	}
553 out:
554 	return err;
555 }
556 
557 /**
558  * cpufreq_per_cpu_attr_read() / show_##file_name() -
559  * print out cpufreq information
560  *
561  * Write out information from cpufreq_driver->policy[cpu]; object must be
562  * "unsigned int".
563  */
564 
565 #define show_one(file_name, object)			\
566 static ssize_t show_##file_name				\
567 (struct cpufreq_policy *policy, char *buf)		\
568 {							\
569 	return sprintf(buf, "%u\n", policy->object);	\
570 }
571 
572 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
573 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
574 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
575 show_one(scaling_min_freq, min);
576 show_one(scaling_max_freq, max);
577 
show_scaling_cur_freq(struct cpufreq_policy * policy,char * buf)578 static ssize_t show_scaling_cur_freq(
579 	struct cpufreq_policy *policy, char *buf)
580 {
581 	ssize_t ret;
582 
583 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
584 		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
585 	else
586 		ret = sprintf(buf, "%u\n", policy->cur);
587 	return ret;
588 }
589 
590 static int cpufreq_set_policy(struct cpufreq_policy *policy,
591 				struct cpufreq_policy *new_policy);
592 
593 /**
594  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
595  */
596 #define store_one(file_name, object)			\
597 static ssize_t store_##file_name					\
598 (struct cpufreq_policy *policy, const char *buf, size_t count)		\
599 {									\
600 	int ret;							\
601 	struct cpufreq_policy new_policy;				\
602 									\
603 	ret = cpufreq_get_policy(&new_policy, policy->cpu);		\
604 	if (ret)							\
605 		return -EINVAL;						\
606 									\
607 	ret = sscanf(buf, "%u", &new_policy.object);			\
608 	if (ret != 1)							\
609 		return -EINVAL;						\
610 									\
611 	ret = cpufreq_set_policy(policy, &new_policy);		\
612 	policy->user_policy.object = policy->object;			\
613 									\
614 	return ret ? ret : count;					\
615 }
616 
617 store_one(scaling_min_freq, min);
618 store_one(scaling_max_freq, max);
619 
620 /**
621  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
622  */
show_cpuinfo_cur_freq(struct cpufreq_policy * policy,char * buf)623 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
624 					char *buf)
625 {
626 	unsigned int cur_freq = __cpufreq_get(policy->cpu);
627 	if (!cur_freq)
628 		return sprintf(buf, "<unknown>");
629 	return sprintf(buf, "%u\n", cur_freq);
630 }
631 
632 /**
633  * show_scaling_governor - show the current policy for the specified CPU
634  */
show_scaling_governor(struct cpufreq_policy * policy,char * buf)635 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
636 {
637 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
638 		return sprintf(buf, "powersave\n");
639 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
640 		return sprintf(buf, "performance\n");
641 	else if (policy->governor)
642 		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
643 				policy->governor->name);
644 	return -EINVAL;
645 }
646 
647 /**
648  * store_scaling_governor - store policy for the specified CPU
649  */
store_scaling_governor(struct cpufreq_policy * policy,const char * buf,size_t count)650 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
651 					const char *buf, size_t count)
652 {
653 	int ret;
654 	char	str_governor[16];
655 	struct cpufreq_policy new_policy;
656 
657 	ret = cpufreq_get_policy(&new_policy, policy->cpu);
658 	if (ret)
659 		return ret;
660 
661 	ret = sscanf(buf, "%15s", str_governor);
662 	if (ret != 1)
663 		return -EINVAL;
664 
665 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
666 						&new_policy.governor))
667 		return -EINVAL;
668 
669 	ret = cpufreq_set_policy(policy, &new_policy);
670 
671 	policy->user_policy.policy = policy->policy;
672 	policy->user_policy.governor = policy->governor;
673 
674 	if (ret)
675 		return ret;
676 	else
677 		return count;
678 }
679 
680 /**
681  * show_scaling_driver - show the cpufreq driver currently loaded
682  */
show_scaling_driver(struct cpufreq_policy * policy,char * buf)683 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
684 {
685 	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
686 }
687 
688 /**
689  * show_scaling_available_governors - show the available CPUfreq governors
690  */
show_scaling_available_governors(struct cpufreq_policy * policy,char * buf)691 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
692 						char *buf)
693 {
694 	ssize_t i = 0;
695 	struct cpufreq_governor *t;
696 
697 	if (!has_target()) {
698 		i += sprintf(buf, "performance powersave");
699 		goto out;
700 	}
701 
702 	list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
703 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
704 		    - (CPUFREQ_NAME_LEN + 2)))
705 			goto out;
706 		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
707 	}
708 out:
709 	i += sprintf(&buf[i], "\n");
710 	return i;
711 }
712 
cpufreq_show_cpus(const struct cpumask * mask,char * buf)713 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
714 {
715 	ssize_t i = 0;
716 	unsigned int cpu;
717 
718 	for_each_cpu(cpu, mask) {
719 		if (i)
720 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
721 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
722 		if (i >= (PAGE_SIZE - 5))
723 			break;
724 	}
725 	i += sprintf(&buf[i], "\n");
726 	return i;
727 }
728 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
729 
730 /**
731  * show_related_cpus - show the CPUs affected by each transition even if
732  * hw coordination is in use
733  */
show_related_cpus(struct cpufreq_policy * policy,char * buf)734 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
735 {
736 	return cpufreq_show_cpus(policy->related_cpus, buf);
737 }
738 
739 /**
740  * show_affected_cpus - show the CPUs affected by each transition
741  */
show_affected_cpus(struct cpufreq_policy * policy,char * buf)742 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
743 {
744 	return cpufreq_show_cpus(policy->cpus, buf);
745 }
746 
store_scaling_setspeed(struct cpufreq_policy * policy,const char * buf,size_t count)747 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
748 					const char *buf, size_t count)
749 {
750 	unsigned int freq = 0;
751 	unsigned int ret;
752 
753 	if (!policy->governor || !policy->governor->store_setspeed)
754 		return -EINVAL;
755 
756 	ret = sscanf(buf, "%u", &freq);
757 	if (ret != 1)
758 		return -EINVAL;
759 
760 	policy->governor->store_setspeed(policy, freq);
761 
762 	return count;
763 }
764 
show_scaling_setspeed(struct cpufreq_policy * policy,char * buf)765 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
766 {
767 	if (!policy->governor || !policy->governor->show_setspeed)
768 		return sprintf(buf, "<unsupported>\n");
769 
770 	return policy->governor->show_setspeed(policy, buf);
771 }
772 
773 /**
774  * show_bios_limit - show the current cpufreq HW/BIOS limitation
775  */
show_bios_limit(struct cpufreq_policy * policy,char * buf)776 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
777 {
778 	unsigned int limit;
779 	int ret;
780 	if (cpufreq_driver->bios_limit) {
781 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
782 		if (!ret)
783 			return sprintf(buf, "%u\n", limit);
784 	}
785 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
786 }
787 
788 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
789 cpufreq_freq_attr_ro(cpuinfo_min_freq);
790 cpufreq_freq_attr_ro(cpuinfo_max_freq);
791 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
792 cpufreq_freq_attr_ro(scaling_available_governors);
793 cpufreq_freq_attr_ro(scaling_driver);
794 cpufreq_freq_attr_ro(scaling_cur_freq);
795 cpufreq_freq_attr_ro(bios_limit);
796 cpufreq_freq_attr_ro(related_cpus);
797 cpufreq_freq_attr_ro(affected_cpus);
798 cpufreq_freq_attr_rw(scaling_min_freq);
799 cpufreq_freq_attr_rw(scaling_max_freq);
800 cpufreq_freq_attr_rw(scaling_governor);
801 cpufreq_freq_attr_rw(scaling_setspeed);
802 
803 static struct attribute *default_attrs[] = {
804 	&cpuinfo_min_freq.attr,
805 	&cpuinfo_max_freq.attr,
806 	&cpuinfo_transition_latency.attr,
807 	&scaling_min_freq.attr,
808 	&scaling_max_freq.attr,
809 	&affected_cpus.attr,
810 	&related_cpus.attr,
811 	&scaling_governor.attr,
812 	&scaling_driver.attr,
813 	&scaling_available_governors.attr,
814 	&scaling_setspeed.attr,
815 	NULL
816 };
817 
818 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
819 #define to_attr(a) container_of(a, struct freq_attr, attr)
820 
show(struct kobject * kobj,struct attribute * attr,char * buf)821 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
822 {
823 	struct cpufreq_policy *policy = to_policy(kobj);
824 	struct freq_attr *fattr = to_attr(attr);
825 	ssize_t ret;
826 
827 	if (!down_read_trylock(&cpufreq_rwsem))
828 		return -EINVAL;
829 
830 	down_read(&policy->rwsem);
831 
832 	if (fattr->show)
833 		ret = fattr->show(policy, buf);
834 	else
835 		ret = -EIO;
836 
837 	up_read(&policy->rwsem);
838 	up_read(&cpufreq_rwsem);
839 
840 	return ret;
841 }
842 
store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)843 static ssize_t store(struct kobject *kobj, struct attribute *attr,
844 		     const char *buf, size_t count)
845 {
846 	struct cpufreq_policy *policy = to_policy(kobj);
847 	struct freq_attr *fattr = to_attr(attr);
848 	ssize_t ret = -EINVAL;
849 
850 	get_online_cpus();
851 
852 	if (!cpu_online(policy->cpu))
853 		goto unlock;
854 
855 	if (!down_read_trylock(&cpufreq_rwsem))
856 		goto unlock;
857 
858 	down_write(&policy->rwsem);
859 
860 	if (fattr->store)
861 		ret = fattr->store(policy, buf, count);
862 	else
863 		ret = -EIO;
864 
865 	up_write(&policy->rwsem);
866 
867 	up_read(&cpufreq_rwsem);
868 unlock:
869 	put_online_cpus();
870 
871 	return ret;
872 }
873 
cpufreq_sysfs_release(struct kobject * kobj)874 static void cpufreq_sysfs_release(struct kobject *kobj)
875 {
876 	struct cpufreq_policy *policy = to_policy(kobj);
877 	pr_debug("last reference is dropped\n");
878 	complete(&policy->kobj_unregister);
879 }
880 
881 static const struct sysfs_ops sysfs_ops = {
882 	.show	= show,
883 	.store	= store,
884 };
885 
886 static struct kobj_type ktype_cpufreq = {
887 	.sysfs_ops	= &sysfs_ops,
888 	.default_attrs	= default_attrs,
889 	.release	= cpufreq_sysfs_release,
890 };
891 
892 struct kobject *cpufreq_global_kobject;
893 EXPORT_SYMBOL(cpufreq_global_kobject);
894 
895 static int cpufreq_global_kobject_usage;
896 
cpufreq_get_global_kobject(void)897 int cpufreq_get_global_kobject(void)
898 {
899 	if (!cpufreq_global_kobject_usage++)
900 		return kobject_add(cpufreq_global_kobject,
901 				&cpu_subsys.dev_root->kobj, "%s", "cpufreq");
902 
903 	return 0;
904 }
905 EXPORT_SYMBOL(cpufreq_get_global_kobject);
906 
cpufreq_put_global_kobject(void)907 void cpufreq_put_global_kobject(void)
908 {
909 	if (!--cpufreq_global_kobject_usage)
910 		kobject_del(cpufreq_global_kobject);
911 }
912 EXPORT_SYMBOL(cpufreq_put_global_kobject);
913 
cpufreq_sysfs_create_file(const struct attribute * attr)914 int cpufreq_sysfs_create_file(const struct attribute *attr)
915 {
916 	int ret = cpufreq_get_global_kobject();
917 
918 	if (!ret) {
919 		ret = sysfs_create_file(cpufreq_global_kobject, attr);
920 		if (ret)
921 			cpufreq_put_global_kobject();
922 	}
923 
924 	return ret;
925 }
926 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
927 
cpufreq_sysfs_remove_file(const struct attribute * attr)928 void cpufreq_sysfs_remove_file(const struct attribute *attr)
929 {
930 	sysfs_remove_file(cpufreq_global_kobject, attr);
931 	cpufreq_put_global_kobject();
932 }
933 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
934 
935 /* symlink affected CPUs */
cpufreq_add_dev_symlink(struct cpufreq_policy * policy)936 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
937 {
938 	unsigned int j;
939 	int ret = 0;
940 
941 	for_each_cpu(j, policy->cpus) {
942 		struct device *cpu_dev;
943 
944 		if (j == policy->cpu)
945 			continue;
946 
947 		pr_debug("Adding link for CPU: %u\n", j);
948 		cpu_dev = get_cpu_device(j);
949 		ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
950 					"cpufreq");
951 		if (ret)
952 			break;
953 	}
954 	return ret;
955 }
956 
cpufreq_add_dev_interface(struct cpufreq_policy * policy,struct device * dev)957 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
958 				     struct device *dev)
959 {
960 	struct freq_attr **drv_attr;
961 	int ret = 0;
962 
963 	/* prepare interface data */
964 	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
965 				   &dev->kobj, "cpufreq");
966 	if (ret)
967 		return ret;
968 
969 	/* set up files for this cpu device */
970 	drv_attr = cpufreq_driver->attr;
971 	while ((drv_attr) && (*drv_attr)) {
972 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
973 		if (ret)
974 			goto err_out_kobj_put;
975 		drv_attr++;
976 	}
977 	if (cpufreq_driver->get) {
978 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
979 		if (ret)
980 			goto err_out_kobj_put;
981 	}
982 
983 	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
984 	if (ret)
985 		goto err_out_kobj_put;
986 
987 	if (cpufreq_driver->bios_limit) {
988 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
989 		if (ret)
990 			goto err_out_kobj_put;
991 	}
992 
993 	ret = cpufreq_add_dev_symlink(policy);
994 	if (ret)
995 		goto err_out_kobj_put;
996 
997 	return ret;
998 
999 err_out_kobj_put:
1000 	kobject_put(&policy->kobj);
1001 	wait_for_completion(&policy->kobj_unregister);
1002 	return ret;
1003 }
1004 
cpufreq_init_policy(struct cpufreq_policy * policy)1005 static void cpufreq_init_policy(struct cpufreq_policy *policy)
1006 {
1007 	struct cpufreq_governor *gov = NULL;
1008 	struct cpufreq_policy new_policy;
1009 	int ret = 0;
1010 
1011 	memcpy(&new_policy, policy, sizeof(*policy));
1012 
1013 	/* Update governor of new_policy to the governor used before hotplug */
1014 	gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
1015 	if (gov)
1016 		pr_debug("Restoring governor %s for cpu %d\n",
1017 				policy->governor->name, policy->cpu);
1018 	else
1019 		gov = CPUFREQ_DEFAULT_GOVERNOR;
1020 
1021 	new_policy.governor = gov;
1022 
1023 	/* Use the default policy if its valid. */
1024 	if (cpufreq_driver->setpolicy)
1025 		cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
1026 
1027 	/* set default policy */
1028 	ret = cpufreq_set_policy(policy, &new_policy);
1029 	if (ret) {
1030 		pr_debug("setting policy failed\n");
1031 		if (cpufreq_driver->exit)
1032 			cpufreq_driver->exit(policy);
1033 	}
1034 }
1035 
1036 #ifdef CONFIG_HOTPLUG_CPU
cpufreq_add_policy_cpu(struct cpufreq_policy * policy,unsigned int cpu,struct device * dev)1037 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
1038 				  unsigned int cpu, struct device *dev)
1039 {
1040 	int ret = 0;
1041 	unsigned long flags;
1042 
1043 	if (has_target()) {
1044 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1045 		if (ret) {
1046 			pr_err("%s: Failed to stop governor\n", __func__);
1047 			return ret;
1048 		}
1049 	}
1050 
1051 	down_write(&policy->rwsem);
1052 
1053 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1054 
1055 	cpumask_set_cpu(cpu, policy->cpus);
1056 	per_cpu(cpufreq_cpu_data, cpu) = policy;
1057 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1058 
1059 	up_write(&policy->rwsem);
1060 
1061 	if (has_target()) {
1062 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1063 		if (!ret)
1064 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1065 
1066 		if (ret) {
1067 			pr_err("%s: Failed to start governor\n", __func__);
1068 			return ret;
1069 		}
1070 	}
1071 
1072 	return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
1073 }
1074 #endif
1075 
cpufreq_policy_restore(unsigned int cpu)1076 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1077 {
1078 	struct cpufreq_policy *policy;
1079 	unsigned long flags;
1080 
1081 	read_lock_irqsave(&cpufreq_driver_lock, flags);
1082 
1083 	policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
1084 
1085 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1086 
1087 	if (policy)
1088 		policy->governor = NULL;
1089 
1090 	return policy;
1091 }
1092 
cpufreq_policy_alloc(void)1093 static struct cpufreq_policy *cpufreq_policy_alloc(void)
1094 {
1095 	struct cpufreq_policy *policy;
1096 
1097 	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1098 	if (!policy)
1099 		return NULL;
1100 
1101 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1102 		goto err_free_policy;
1103 
1104 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1105 		goto err_free_cpumask;
1106 
1107 	INIT_LIST_HEAD(&policy->policy_list);
1108 	init_rwsem(&policy->rwsem);
1109 	spin_lock_init(&policy->transition_lock);
1110 	init_waitqueue_head(&policy->transition_wait);
1111 
1112 	return policy;
1113 
1114 err_free_cpumask:
1115 	free_cpumask_var(policy->cpus);
1116 err_free_policy:
1117 	kfree(policy);
1118 
1119 	return NULL;
1120 }
1121 
cpufreq_policy_put_kobj(struct cpufreq_policy * policy)1122 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1123 {
1124 	struct kobject *kobj;
1125 	struct completion *cmp;
1126 
1127 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1128 			CPUFREQ_REMOVE_POLICY, policy);
1129 
1130 	down_read(&policy->rwsem);
1131 	kobj = &policy->kobj;
1132 	cmp = &policy->kobj_unregister;
1133 	up_read(&policy->rwsem);
1134 	kobject_put(kobj);
1135 
1136 	/*
1137 	 * We need to make sure that the underlying kobj is
1138 	 * actually not referenced anymore by anybody before we
1139 	 * proceed with unloading.
1140 	 */
1141 	pr_debug("waiting for dropping of refcount\n");
1142 	wait_for_completion(cmp);
1143 	pr_debug("wait complete\n");
1144 }
1145 
cpufreq_policy_free(struct cpufreq_policy * policy)1146 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1147 {
1148 	free_cpumask_var(policy->related_cpus);
1149 	free_cpumask_var(policy->cpus);
1150 	kfree(policy);
1151 }
1152 
update_policy_cpu(struct cpufreq_policy * policy,unsigned int cpu,struct device * cpu_dev)1153 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1154 			     struct device *cpu_dev)
1155 {
1156 	int ret;
1157 
1158 	if (WARN_ON(cpu == policy->cpu))
1159 		return 0;
1160 
1161 	/* Move kobject to the new policy->cpu */
1162 	ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1163 	if (ret) {
1164 		pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1165 		return ret;
1166 	}
1167 
1168 	down_write(&policy->rwsem);
1169 
1170 	policy->last_cpu = policy->cpu;
1171 	policy->cpu = cpu;
1172 
1173 	up_write(&policy->rwsem);
1174 
1175 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1176 			CPUFREQ_UPDATE_POLICY_CPU, policy);
1177 
1178 	return 0;
1179 }
1180 
__cpufreq_add_dev(struct device * dev,struct subsys_interface * sif)1181 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1182 {
1183 	unsigned int j, cpu = dev->id;
1184 	int ret = -ENOMEM;
1185 	struct cpufreq_policy *policy;
1186 	unsigned long flags;
1187 	bool recover_policy = cpufreq_suspended;
1188 #ifdef CONFIG_HOTPLUG_CPU
1189 	struct cpufreq_policy *tpolicy;
1190 #endif
1191 
1192 	if (cpu_is_offline(cpu))
1193 		return 0;
1194 
1195 	pr_debug("adding CPU %u\n", cpu);
1196 
1197 #ifdef CONFIG_SMP
1198 	/* check whether a different CPU already registered this
1199 	 * CPU because it is in the same boat. */
1200 	policy = cpufreq_cpu_get(cpu);
1201 	if (unlikely(policy)) {
1202 		cpufreq_cpu_put(policy);
1203 		return 0;
1204 	}
1205 #endif
1206 
1207 	if (!down_read_trylock(&cpufreq_rwsem))
1208 		return 0;
1209 
1210 #ifdef CONFIG_HOTPLUG_CPU
1211 	/* Check if this cpu was hot-unplugged earlier and has siblings */
1212 	read_lock_irqsave(&cpufreq_driver_lock, flags);
1213 	list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1214 		if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1215 			read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1216 			ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1217 			up_read(&cpufreq_rwsem);
1218 			return ret;
1219 		}
1220 	}
1221 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1222 #endif
1223 
1224 	/*
1225 	 * Restore the saved policy when doing light-weight init and fall back
1226 	 * to the full init if that fails.
1227 	 */
1228 	policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1229 	if (!policy) {
1230 		recover_policy = false;
1231 		policy = cpufreq_policy_alloc();
1232 		if (!policy)
1233 			goto nomem_out;
1234 	}
1235 
1236 	/*
1237 	 * In the resume path, since we restore a saved policy, the assignment
1238 	 * to policy->cpu is like an update of the existing policy, rather than
1239 	 * the creation of a brand new one. So we need to perform this update
1240 	 * by invoking update_policy_cpu().
1241 	 */
1242 	if (recover_policy && cpu != policy->cpu)
1243 		WARN_ON(update_policy_cpu(policy, cpu, dev));
1244 	else
1245 		policy->cpu = cpu;
1246 
1247 	cpumask_copy(policy->cpus, cpumask_of(cpu));
1248 
1249 	init_completion(&policy->kobj_unregister);
1250 	INIT_WORK(&policy->update, handle_update);
1251 
1252 	/* call driver. From then on the cpufreq must be able
1253 	 * to accept all calls to ->verify and ->setpolicy for this CPU
1254 	 */
1255 	ret = cpufreq_driver->init(policy);
1256 	if (ret) {
1257 		pr_debug("initialization failed\n");
1258 		goto err_set_policy_cpu;
1259 	}
1260 
1261 	/* related cpus should atleast have policy->cpus */
1262 	cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1263 
1264 	/*
1265 	 * affected cpus must always be the one, which are online. We aren't
1266 	 * managing offline cpus here.
1267 	 */
1268 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1269 
1270 	if (!recover_policy) {
1271 		policy->user_policy.min = policy->min;
1272 		policy->user_policy.max = policy->max;
1273 	}
1274 
1275 	down_write(&policy->rwsem);
1276 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1277 	for_each_cpu(j, policy->cpus)
1278 		per_cpu(cpufreq_cpu_data, j) = policy;
1279 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1280 
1281 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1282 		policy->cur = cpufreq_driver->get(policy->cpu);
1283 		if (!policy->cur) {
1284 			pr_err("%s: ->get() failed\n", __func__);
1285 			goto err_get_freq;
1286 		}
1287 	}
1288 
1289 	/*
1290 	 * Sometimes boot loaders set CPU frequency to a value outside of
1291 	 * frequency table present with cpufreq core. In such cases CPU might be
1292 	 * unstable if it has to run on that frequency for long duration of time
1293 	 * and so its better to set it to a frequency which is specified in
1294 	 * freq-table. This also makes cpufreq stats inconsistent as
1295 	 * cpufreq-stats would fail to register because current frequency of CPU
1296 	 * isn't found in freq-table.
1297 	 *
1298 	 * Because we don't want this change to effect boot process badly, we go
1299 	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1300 	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1301 	 * is initialized to zero).
1302 	 *
1303 	 * We are passing target-freq as "policy->cur - 1" otherwise
1304 	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1305 	 * equal to target-freq.
1306 	 */
1307 	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1308 	    && has_target()) {
1309 		/* Are we running at unknown frequency ? */
1310 		ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1311 		if (ret == -EINVAL) {
1312 			/* Warn user and fix it */
1313 			pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1314 				__func__, policy->cpu, policy->cur);
1315 			ret = __cpufreq_driver_target(policy, policy->cur - 1,
1316 				CPUFREQ_RELATION_L);
1317 
1318 			/*
1319 			 * Reaching here after boot in a few seconds may not
1320 			 * mean that system will remain stable at "unknown"
1321 			 * frequency for longer duration. Hence, a BUG_ON().
1322 			 */
1323 			BUG_ON(ret);
1324 			pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1325 				__func__, policy->cpu, policy->cur);
1326 		}
1327 	}
1328 
1329 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1330 				     CPUFREQ_START, policy);
1331 
1332 	if (!recover_policy) {
1333 		ret = cpufreq_add_dev_interface(policy, dev);
1334 		if (ret)
1335 			goto err_out_unregister;
1336 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1337 				CPUFREQ_CREATE_POLICY, policy);
1338 	}
1339 
1340 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1341 	list_add(&policy->policy_list, &cpufreq_policy_list);
1342 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1343 
1344 	cpufreq_init_policy(policy);
1345 
1346 	if (!recover_policy) {
1347 		policy->user_policy.policy = policy->policy;
1348 		policy->user_policy.governor = policy->governor;
1349 	}
1350 	up_write(&policy->rwsem);
1351 
1352 	kobject_uevent(&policy->kobj, KOBJ_ADD);
1353 	up_read(&cpufreq_rwsem);
1354 
1355 	pr_debug("initialization complete\n");
1356 
1357 	return 0;
1358 
1359 err_out_unregister:
1360 err_get_freq:
1361 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1362 	for_each_cpu(j, policy->cpus)
1363 		per_cpu(cpufreq_cpu_data, j) = NULL;
1364 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1365 
1366 	up_write(&policy->rwsem);
1367 
1368 	if (cpufreq_driver->exit)
1369 		cpufreq_driver->exit(policy);
1370 err_set_policy_cpu:
1371 	if (recover_policy) {
1372 		/* Do not leave stale fallback data behind. */
1373 		per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1374 		cpufreq_policy_put_kobj(policy);
1375 	}
1376 	cpufreq_policy_free(policy);
1377 
1378 nomem_out:
1379 	up_read(&cpufreq_rwsem);
1380 
1381 	return ret;
1382 }
1383 
1384 /**
1385  * cpufreq_add_dev - add a CPU device
1386  *
1387  * Adds the cpufreq interface for a CPU device.
1388  *
1389  * The Oracle says: try running cpufreq registration/unregistration concurrently
1390  * with with cpu hotplugging and all hell will break loose. Tried to clean this
1391  * mess up, but more thorough testing is needed. - Mathieu
1392  */
cpufreq_add_dev(struct device * dev,struct subsys_interface * sif)1393 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1394 {
1395 	return __cpufreq_add_dev(dev, sif);
1396 }
1397 
__cpufreq_remove_dev_prepare(struct device * dev,struct subsys_interface * sif)1398 static int __cpufreq_remove_dev_prepare(struct device *dev,
1399 					struct subsys_interface *sif)
1400 {
1401 	unsigned int cpu = dev->id, cpus;
1402 	int ret;
1403 	unsigned long flags;
1404 	struct cpufreq_policy *policy;
1405 
1406 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1407 
1408 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1409 
1410 	policy = per_cpu(cpufreq_cpu_data, cpu);
1411 
1412 	/* Save the policy somewhere when doing a light-weight tear-down */
1413 	if (cpufreq_suspended)
1414 		per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1415 
1416 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1417 
1418 	if (!policy) {
1419 		pr_debug("%s: No cpu_data found\n", __func__);
1420 		return -EINVAL;
1421 	}
1422 
1423 	if (has_target()) {
1424 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1425 		if (ret) {
1426 			pr_err("%s: Failed to stop governor\n", __func__);
1427 			return ret;
1428 		}
1429 	}
1430 
1431 	if (!cpufreq_driver->setpolicy)
1432 		strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1433 			policy->governor->name, CPUFREQ_NAME_LEN);
1434 
1435 	down_read(&policy->rwsem);
1436 	cpus = cpumask_weight(policy->cpus);
1437 	up_read(&policy->rwsem);
1438 
1439 	if (cpu != policy->cpu) {
1440 		sysfs_remove_link(&dev->kobj, "cpufreq");
1441 	} else if (cpus > 1) {
1442 		/* Nominate new CPU */
1443 		int new_cpu = cpumask_any_but(policy->cpus, cpu);
1444 		struct device *cpu_dev = get_cpu_device(new_cpu);
1445 
1446 		sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1447 		ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1448 		if (ret) {
1449 			if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1450 					      "cpufreq"))
1451 				pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1452 				       __func__, cpu_dev->id);
1453 			return ret;
1454 		}
1455 
1456 		if (!cpufreq_suspended)
1457 			pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1458 				 __func__, new_cpu, cpu);
1459 	} else if (cpufreq_driver->stop_cpu) {
1460 		cpufreq_driver->stop_cpu(policy);
1461 	}
1462 
1463 	return 0;
1464 }
1465 
__cpufreq_remove_dev_finish(struct device * dev,struct subsys_interface * sif)1466 static int __cpufreq_remove_dev_finish(struct device *dev,
1467 				       struct subsys_interface *sif)
1468 {
1469 	unsigned int cpu = dev->id, cpus;
1470 	int ret;
1471 	unsigned long flags;
1472 	struct cpufreq_policy *policy;
1473 
1474 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1475 	policy = per_cpu(cpufreq_cpu_data, cpu);
1476 	per_cpu(cpufreq_cpu_data, cpu) = NULL;
1477 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1478 
1479 	if (!policy) {
1480 		pr_debug("%s: No cpu_data found\n", __func__);
1481 		return -EINVAL;
1482 	}
1483 
1484 	down_write(&policy->rwsem);
1485 	cpus = cpumask_weight(policy->cpus);
1486 
1487 	if (cpus > 1)
1488 		cpumask_clear_cpu(cpu, policy->cpus);
1489 	up_write(&policy->rwsem);
1490 
1491 	/* If cpu is last user of policy, free policy */
1492 	if (cpus == 1) {
1493 		if (has_target()) {
1494 			ret = __cpufreq_governor(policy,
1495 					CPUFREQ_GOV_POLICY_EXIT);
1496 			if (ret) {
1497 				pr_err("%s: Failed to exit governor\n",
1498 				       __func__);
1499 				return ret;
1500 			}
1501 		}
1502 
1503 		if (!cpufreq_suspended)
1504 			cpufreq_policy_put_kobj(policy);
1505 
1506 		/*
1507 		 * Perform the ->exit() even during light-weight tear-down,
1508 		 * since this is a core component, and is essential for the
1509 		 * subsequent light-weight ->init() to succeed.
1510 		 */
1511 		if (cpufreq_driver->exit)
1512 			cpufreq_driver->exit(policy);
1513 
1514 		/* Remove policy from list of active policies */
1515 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1516 		list_del(&policy->policy_list);
1517 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1518 
1519 		if (!cpufreq_suspended)
1520 			cpufreq_policy_free(policy);
1521 	} else if (has_target()) {
1522 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1523 		if (!ret)
1524 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1525 
1526 		if (ret) {
1527 			pr_err("%s: Failed to start governor\n", __func__);
1528 			return ret;
1529 		}
1530 	}
1531 
1532 	return 0;
1533 }
1534 
1535 /**
1536  * cpufreq_remove_dev - remove a CPU device
1537  *
1538  * Removes the cpufreq interface for a CPU device.
1539  */
cpufreq_remove_dev(struct device * dev,struct subsys_interface * sif)1540 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1541 {
1542 	unsigned int cpu = dev->id;
1543 	int ret;
1544 
1545 	if (cpu_is_offline(cpu))
1546 		return 0;
1547 
1548 	ret = __cpufreq_remove_dev_prepare(dev, sif);
1549 
1550 	if (!ret)
1551 		ret = __cpufreq_remove_dev_finish(dev, sif);
1552 
1553 	return ret;
1554 }
1555 
handle_update(struct work_struct * work)1556 static void handle_update(struct work_struct *work)
1557 {
1558 	struct cpufreq_policy *policy =
1559 		container_of(work, struct cpufreq_policy, update);
1560 	unsigned int cpu = policy->cpu;
1561 	pr_debug("handle_update for cpu %u called\n", cpu);
1562 	cpufreq_update_policy(cpu);
1563 }
1564 
1565 /**
1566  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1567  *	in deep trouble.
1568  *	@cpu: cpu number
1569  *	@old_freq: CPU frequency the kernel thinks the CPU runs at
1570  *	@new_freq: CPU frequency the CPU actually runs at
1571  *
1572  *	We adjust to current frequency first, and need to clean up later.
1573  *	So either call to cpufreq_update_policy() or schedule handle_update()).
1574  */
cpufreq_out_of_sync(unsigned int cpu,unsigned int old_freq,unsigned int new_freq)1575 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1576 				unsigned int new_freq)
1577 {
1578 	struct cpufreq_policy *policy;
1579 	struct cpufreq_freqs freqs;
1580 	unsigned long flags;
1581 
1582 	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1583 		 old_freq, new_freq);
1584 
1585 	freqs.old = old_freq;
1586 	freqs.new = new_freq;
1587 
1588 	read_lock_irqsave(&cpufreq_driver_lock, flags);
1589 	policy = per_cpu(cpufreq_cpu_data, cpu);
1590 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1591 
1592 	cpufreq_freq_transition_begin(policy, &freqs);
1593 	cpufreq_freq_transition_end(policy, &freqs, 0);
1594 }
1595 
1596 /**
1597  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1598  * @cpu: CPU number
1599  *
1600  * This is the last known freq, without actually getting it from the driver.
1601  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1602  */
cpufreq_quick_get(unsigned int cpu)1603 unsigned int cpufreq_quick_get(unsigned int cpu)
1604 {
1605 	struct cpufreq_policy *policy;
1606 	unsigned int ret_freq = 0;
1607 
1608 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1609 		return cpufreq_driver->get(cpu);
1610 
1611 	policy = cpufreq_cpu_get(cpu);
1612 	if (policy) {
1613 		ret_freq = policy->cur;
1614 		cpufreq_cpu_put(policy);
1615 	}
1616 
1617 	return ret_freq;
1618 }
1619 EXPORT_SYMBOL(cpufreq_quick_get);
1620 
1621 /**
1622  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1623  * @cpu: CPU number
1624  *
1625  * Just return the max possible frequency for a given CPU.
1626  */
cpufreq_quick_get_max(unsigned int cpu)1627 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1628 {
1629 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1630 	unsigned int ret_freq = 0;
1631 
1632 	if (policy) {
1633 		ret_freq = policy->max;
1634 		cpufreq_cpu_put(policy);
1635 	}
1636 
1637 	return ret_freq;
1638 }
1639 EXPORT_SYMBOL(cpufreq_quick_get_max);
1640 
__cpufreq_get(unsigned int cpu)1641 static unsigned int __cpufreq_get(unsigned int cpu)
1642 {
1643 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1644 	unsigned int ret_freq = 0;
1645 
1646 	if (!cpufreq_driver->get)
1647 		return ret_freq;
1648 
1649 	ret_freq = cpufreq_driver->get(cpu);
1650 
1651 	if (ret_freq && policy->cur &&
1652 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1653 		/* verify no discrepancy between actual and
1654 					saved value exists */
1655 		if (unlikely(ret_freq != policy->cur)) {
1656 			cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1657 			schedule_work(&policy->update);
1658 		}
1659 	}
1660 
1661 	return ret_freq;
1662 }
1663 
1664 /**
1665  * cpufreq_get - get the current CPU frequency (in kHz)
1666  * @cpu: CPU number
1667  *
1668  * Get the CPU current (static) CPU frequency
1669  */
cpufreq_get(unsigned int cpu)1670 unsigned int cpufreq_get(unsigned int cpu)
1671 {
1672 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1673 	unsigned int ret_freq = 0;
1674 
1675 	if (policy) {
1676 		down_read(&policy->rwsem);
1677 		ret_freq = __cpufreq_get(cpu);
1678 		up_read(&policy->rwsem);
1679 
1680 		cpufreq_cpu_put(policy);
1681 	}
1682 
1683 	return ret_freq;
1684 }
1685 EXPORT_SYMBOL(cpufreq_get);
1686 
1687 static struct subsys_interface cpufreq_interface = {
1688 	.name		= "cpufreq",
1689 	.subsys		= &cpu_subsys,
1690 	.add_dev	= cpufreq_add_dev,
1691 	.remove_dev	= cpufreq_remove_dev,
1692 };
1693 
1694 /*
1695  * In case platform wants some specific frequency to be configured
1696  * during suspend..
1697  */
cpufreq_generic_suspend(struct cpufreq_policy * policy)1698 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1699 {
1700 	int ret;
1701 
1702 	if (!policy->suspend_freq) {
1703 		pr_err("%s: suspend_freq can't be zero\n", __func__);
1704 		return -EINVAL;
1705 	}
1706 
1707 	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1708 			policy->suspend_freq);
1709 
1710 	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1711 			CPUFREQ_RELATION_H);
1712 	if (ret)
1713 		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1714 				__func__, policy->suspend_freq, ret);
1715 
1716 	return ret;
1717 }
1718 EXPORT_SYMBOL(cpufreq_generic_suspend);
1719 
1720 /**
1721  * cpufreq_suspend() - Suspend CPUFreq governors
1722  *
1723  * Called during system wide Suspend/Hibernate cycles for suspending governors
1724  * as some platforms can't change frequency after this point in suspend cycle.
1725  * Because some of the devices (like: i2c, regulators, etc) they use for
1726  * changing frequency are suspended quickly after this point.
1727  */
cpufreq_suspend(void)1728 void cpufreq_suspend(void)
1729 {
1730 	struct cpufreq_policy *policy;
1731 
1732 	if (!cpufreq_driver)
1733 		return;
1734 
1735 	if (!has_target())
1736 		goto suspend;
1737 
1738 	pr_debug("%s: Suspending Governors\n", __func__);
1739 
1740 	list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1741 		if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1742 			pr_err("%s: Failed to stop governor for policy: %p\n",
1743 				__func__, policy);
1744 		else if (cpufreq_driver->suspend
1745 		    && cpufreq_driver->suspend(policy))
1746 			pr_err("%s: Failed to suspend driver: %p\n", __func__,
1747 				policy);
1748 	}
1749 
1750 suspend:
1751 	cpufreq_suspended = true;
1752 }
1753 
1754 /**
1755  * cpufreq_resume() - Resume CPUFreq governors
1756  *
1757  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1758  * are suspended with cpufreq_suspend().
1759  */
cpufreq_resume(void)1760 void cpufreq_resume(void)
1761 {
1762 	struct cpufreq_policy *policy;
1763 
1764 	if (!cpufreq_driver)
1765 		return;
1766 
1767 	cpufreq_suspended = false;
1768 
1769 	if (!has_target())
1770 		return;
1771 
1772 	pr_debug("%s: Resuming Governors\n", __func__);
1773 
1774 	list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1775 		if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1776 			pr_err("%s: Failed to resume driver: %p\n", __func__,
1777 				policy);
1778 		else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1779 		    || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1780 			pr_err("%s: Failed to start governor for policy: %p\n",
1781 				__func__, policy);
1782 	}
1783 
1784 	/*
1785 	 * schedule call cpufreq_update_policy() for first-online CPU, as that
1786 	 * wouldn't be hotplugged-out on suspend. It will verify that the
1787 	 * current freq is in sync with what we believe it to be.
1788 	 */
1789 	policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1790 	if (WARN_ON(!policy))
1791 		return;
1792 
1793 	schedule_work(&policy->update);
1794 }
1795 
1796 /**
1797  *	cpufreq_get_current_driver - return current driver's name
1798  *
1799  *	Return the name string of the currently loaded cpufreq driver
1800  *	or NULL, if none.
1801  */
cpufreq_get_current_driver(void)1802 const char *cpufreq_get_current_driver(void)
1803 {
1804 	if (cpufreq_driver)
1805 		return cpufreq_driver->name;
1806 
1807 	return NULL;
1808 }
1809 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1810 
1811 /**
1812  *	cpufreq_get_driver_data - return current driver data
1813  *
1814  *	Return the private data of the currently loaded cpufreq
1815  *	driver, or NULL if no cpufreq driver is loaded.
1816  */
cpufreq_get_driver_data(void)1817 void *cpufreq_get_driver_data(void)
1818 {
1819 	if (cpufreq_driver)
1820 		return cpufreq_driver->driver_data;
1821 
1822 	return NULL;
1823 }
1824 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1825 
1826 /*********************************************************************
1827  *                     NOTIFIER LISTS INTERFACE                      *
1828  *********************************************************************/
1829 
1830 /**
1831  *	cpufreq_register_notifier - register a driver with cpufreq
1832  *	@nb: notifier function to register
1833  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1834  *
1835  *	Add a driver to one of two lists: either a list of drivers that
1836  *      are notified about clock rate changes (once before and once after
1837  *      the transition), or a list of drivers that are notified about
1838  *      changes in cpufreq policy.
1839  *
1840  *	This function may sleep, and has the same return conditions as
1841  *	blocking_notifier_chain_register.
1842  */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)1843 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1844 {
1845 	int ret;
1846 
1847 	if (cpufreq_disabled())
1848 		return -EINVAL;
1849 
1850 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
1851 
1852 	switch (list) {
1853 	case CPUFREQ_TRANSITION_NOTIFIER:
1854 		ret = srcu_notifier_chain_register(
1855 				&cpufreq_transition_notifier_list, nb);
1856 		break;
1857 	case CPUFREQ_POLICY_NOTIFIER:
1858 		ret = blocking_notifier_chain_register(
1859 				&cpufreq_policy_notifier_list, nb);
1860 		break;
1861 	default:
1862 		ret = -EINVAL;
1863 	}
1864 
1865 	return ret;
1866 }
1867 EXPORT_SYMBOL(cpufreq_register_notifier);
1868 
1869 /**
1870  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
1871  *	@nb: notifier block to be unregistered
1872  *	@list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1873  *
1874  *	Remove a driver from the CPU frequency notifier list.
1875  *
1876  *	This function may sleep, and has the same return conditions as
1877  *	blocking_notifier_chain_unregister.
1878  */
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)1879 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1880 {
1881 	int ret;
1882 
1883 	if (cpufreq_disabled())
1884 		return -EINVAL;
1885 
1886 	switch (list) {
1887 	case CPUFREQ_TRANSITION_NOTIFIER:
1888 		ret = srcu_notifier_chain_unregister(
1889 				&cpufreq_transition_notifier_list, nb);
1890 		break;
1891 	case CPUFREQ_POLICY_NOTIFIER:
1892 		ret = blocking_notifier_chain_unregister(
1893 				&cpufreq_policy_notifier_list, nb);
1894 		break;
1895 	default:
1896 		ret = -EINVAL;
1897 	}
1898 
1899 	return ret;
1900 }
1901 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1902 
1903 
1904 /*********************************************************************
1905  *                              GOVERNORS                            *
1906  *********************************************************************/
1907 
1908 /* Must set freqs->new to intermediate frequency */
__target_intermediate(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int index)1909 static int __target_intermediate(struct cpufreq_policy *policy,
1910 				 struct cpufreq_freqs *freqs, int index)
1911 {
1912 	int ret;
1913 
1914 	freqs->new = cpufreq_driver->get_intermediate(policy, index);
1915 
1916 	/* We don't need to switch to intermediate freq */
1917 	if (!freqs->new)
1918 		return 0;
1919 
1920 	pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1921 		 __func__, policy->cpu, freqs->old, freqs->new);
1922 
1923 	cpufreq_freq_transition_begin(policy, freqs);
1924 	ret = cpufreq_driver->target_intermediate(policy, index);
1925 	cpufreq_freq_transition_end(policy, freqs, ret);
1926 
1927 	if (ret)
1928 		pr_err("%s: Failed to change to intermediate frequency: %d\n",
1929 		       __func__, ret);
1930 
1931 	return ret;
1932 }
1933 
__target_index(struct cpufreq_policy * policy,struct cpufreq_frequency_table * freq_table,int index)1934 static int __target_index(struct cpufreq_policy *policy,
1935 			  struct cpufreq_frequency_table *freq_table, int index)
1936 {
1937 	struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1938 	unsigned int intermediate_freq = 0;
1939 	int retval = -EINVAL;
1940 	bool notify;
1941 
1942 	notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1943 	if (notify) {
1944 		/* Handle switching to intermediate frequency */
1945 		if (cpufreq_driver->get_intermediate) {
1946 			retval = __target_intermediate(policy, &freqs, index);
1947 			if (retval)
1948 				return retval;
1949 
1950 			intermediate_freq = freqs.new;
1951 			/* Set old freq to intermediate */
1952 			if (intermediate_freq)
1953 				freqs.old = freqs.new;
1954 		}
1955 
1956 		freqs.new = freq_table[index].frequency;
1957 		pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1958 			 __func__, policy->cpu, freqs.old, freqs.new);
1959 
1960 		cpufreq_freq_transition_begin(policy, &freqs);
1961 	}
1962 
1963 	retval = cpufreq_driver->target_index(policy, index);
1964 	if (retval)
1965 		pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1966 		       retval);
1967 
1968 	if (notify) {
1969 		cpufreq_freq_transition_end(policy, &freqs, retval);
1970 
1971 		/*
1972 		 * Failed after setting to intermediate freq? Driver should have
1973 		 * reverted back to initial frequency and so should we. Check
1974 		 * here for intermediate_freq instead of get_intermediate, in
1975 		 * case we have't switched to intermediate freq at all.
1976 		 */
1977 		if (unlikely(retval && intermediate_freq)) {
1978 			freqs.old = intermediate_freq;
1979 			freqs.new = policy->restore_freq;
1980 			cpufreq_freq_transition_begin(policy, &freqs);
1981 			cpufreq_freq_transition_end(policy, &freqs, 0);
1982 		}
1983 	}
1984 
1985 	return retval;
1986 }
1987 
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)1988 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1989 			    unsigned int target_freq,
1990 			    unsigned int relation)
1991 {
1992 	unsigned int old_target_freq = target_freq;
1993 	int retval = -EINVAL;
1994 
1995 	if (cpufreq_disabled())
1996 		return -ENODEV;
1997 
1998 	/* Make sure that target_freq is within supported range */
1999 	if (target_freq > policy->max)
2000 		target_freq = policy->max;
2001 	if (target_freq < policy->min)
2002 		target_freq = policy->min;
2003 
2004 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2005 		 policy->cpu, target_freq, relation, old_target_freq);
2006 
2007 	/*
2008 	 * This might look like a redundant call as we are checking it again
2009 	 * after finding index. But it is left intentionally for cases where
2010 	 * exactly same freq is called again and so we can save on few function
2011 	 * calls.
2012 	 */
2013 	if (target_freq == policy->cur)
2014 		return 0;
2015 
2016 	/* Save last value to restore later on errors */
2017 	policy->restore_freq = policy->cur;
2018 
2019 	if (cpufreq_driver->target)
2020 		retval = cpufreq_driver->target(policy, target_freq, relation);
2021 	else if (cpufreq_driver->target_index) {
2022 		struct cpufreq_frequency_table *freq_table;
2023 		int index;
2024 
2025 		freq_table = cpufreq_frequency_get_table(policy->cpu);
2026 		if (unlikely(!freq_table)) {
2027 			pr_err("%s: Unable to find freq_table\n", __func__);
2028 			goto out;
2029 		}
2030 
2031 		retval = cpufreq_frequency_table_target(policy, freq_table,
2032 				target_freq, relation, &index);
2033 		if (unlikely(retval)) {
2034 			pr_err("%s: Unable to find matching freq\n", __func__);
2035 			goto out;
2036 		}
2037 
2038 		if (freq_table[index].frequency == policy->cur) {
2039 			retval = 0;
2040 			goto out;
2041 		}
2042 
2043 		retval = __target_index(policy, freq_table, index);
2044 	}
2045 
2046 out:
2047 	return retval;
2048 }
2049 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2050 
cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)2051 int cpufreq_driver_target(struct cpufreq_policy *policy,
2052 			  unsigned int target_freq,
2053 			  unsigned int relation)
2054 {
2055 	int ret = -EINVAL;
2056 
2057 	down_write(&policy->rwsem);
2058 
2059 	ret = __cpufreq_driver_target(policy, target_freq, relation);
2060 
2061 	up_write(&policy->rwsem);
2062 
2063 	return ret;
2064 }
2065 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2066 
2067 /*
2068  * when "event" is CPUFREQ_GOV_LIMITS
2069  */
2070 
__cpufreq_governor(struct cpufreq_policy * policy,unsigned int event)2071 static int __cpufreq_governor(struct cpufreq_policy *policy,
2072 					unsigned int event)
2073 {
2074 	int ret;
2075 
2076 	/* Only must be defined when default governor is known to have latency
2077 	   restrictions, like e.g. conservative or ondemand.
2078 	   That this is the case is already ensured in Kconfig
2079 	*/
2080 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2081 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
2082 #else
2083 	struct cpufreq_governor *gov = NULL;
2084 #endif
2085 
2086 	/* Don't start any governor operations if we are entering suspend */
2087 	if (cpufreq_suspended)
2088 		return 0;
2089 
2090 	if (policy->governor->max_transition_latency &&
2091 	    policy->cpuinfo.transition_latency >
2092 	    policy->governor->max_transition_latency) {
2093 		if (!gov)
2094 			return -EINVAL;
2095 		else {
2096 			pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2097 				policy->governor->name, gov->name);
2098 			policy->governor = gov;
2099 		}
2100 	}
2101 
2102 	if (event == CPUFREQ_GOV_POLICY_INIT)
2103 		if (!try_module_get(policy->governor->owner))
2104 			return -EINVAL;
2105 
2106 	pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2107 		 policy->cpu, event);
2108 
2109 	mutex_lock(&cpufreq_governor_lock);
2110 	if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2111 	    || (!policy->governor_enabled
2112 	    && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2113 		mutex_unlock(&cpufreq_governor_lock);
2114 		return -EBUSY;
2115 	}
2116 
2117 	if (event == CPUFREQ_GOV_STOP)
2118 		policy->governor_enabled = false;
2119 	else if (event == CPUFREQ_GOV_START)
2120 		policy->governor_enabled = true;
2121 
2122 	mutex_unlock(&cpufreq_governor_lock);
2123 
2124 	ret = policy->governor->governor(policy, event);
2125 
2126 	if (!ret) {
2127 		if (event == CPUFREQ_GOV_POLICY_INIT)
2128 			policy->governor->initialized++;
2129 		else if (event == CPUFREQ_GOV_POLICY_EXIT)
2130 			policy->governor->initialized--;
2131 	} else {
2132 		/* Restore original values */
2133 		mutex_lock(&cpufreq_governor_lock);
2134 		if (event == CPUFREQ_GOV_STOP)
2135 			policy->governor_enabled = true;
2136 		else if (event == CPUFREQ_GOV_START)
2137 			policy->governor_enabled = false;
2138 		mutex_unlock(&cpufreq_governor_lock);
2139 	}
2140 
2141 	if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2142 			((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2143 		module_put(policy->governor->owner);
2144 
2145 	return ret;
2146 }
2147 
cpufreq_register_governor(struct cpufreq_governor * governor)2148 int cpufreq_register_governor(struct cpufreq_governor *governor)
2149 {
2150 	int err;
2151 
2152 	if (!governor)
2153 		return -EINVAL;
2154 
2155 	if (cpufreq_disabled())
2156 		return -ENODEV;
2157 
2158 	mutex_lock(&cpufreq_governor_mutex);
2159 
2160 	governor->initialized = 0;
2161 	err = -EBUSY;
2162 	if (__find_governor(governor->name) == NULL) {
2163 		err = 0;
2164 		list_add(&governor->governor_list, &cpufreq_governor_list);
2165 	}
2166 
2167 	mutex_unlock(&cpufreq_governor_mutex);
2168 	return err;
2169 }
2170 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2171 
cpufreq_unregister_governor(struct cpufreq_governor * governor)2172 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2173 {
2174 	int cpu;
2175 
2176 	if (!governor)
2177 		return;
2178 
2179 	if (cpufreq_disabled())
2180 		return;
2181 
2182 	for_each_present_cpu(cpu) {
2183 		if (cpu_online(cpu))
2184 			continue;
2185 		if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
2186 			strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
2187 	}
2188 
2189 	mutex_lock(&cpufreq_governor_mutex);
2190 	list_del(&governor->governor_list);
2191 	mutex_unlock(&cpufreq_governor_mutex);
2192 	return;
2193 }
2194 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2195 
2196 
2197 /*********************************************************************
2198  *                          POLICY INTERFACE                         *
2199  *********************************************************************/
2200 
2201 /**
2202  * cpufreq_get_policy - get the current cpufreq_policy
2203  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2204  *	is written
2205  *
2206  * Reads the current cpufreq policy.
2207  */
cpufreq_get_policy(struct cpufreq_policy * policy,unsigned int cpu)2208 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2209 {
2210 	struct cpufreq_policy *cpu_policy;
2211 	if (!policy)
2212 		return -EINVAL;
2213 
2214 	cpu_policy = cpufreq_cpu_get(cpu);
2215 	if (!cpu_policy)
2216 		return -EINVAL;
2217 
2218 	memcpy(policy, cpu_policy, sizeof(*policy));
2219 
2220 	cpufreq_cpu_put(cpu_policy);
2221 	return 0;
2222 }
2223 EXPORT_SYMBOL(cpufreq_get_policy);
2224 
2225 /*
2226  * policy : current policy.
2227  * new_policy: policy to be set.
2228  */
cpufreq_set_policy(struct cpufreq_policy * policy,struct cpufreq_policy * new_policy)2229 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2230 				struct cpufreq_policy *new_policy)
2231 {
2232 	struct cpufreq_governor *old_gov;
2233 	int ret;
2234 
2235 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2236 		 new_policy->cpu, new_policy->min, new_policy->max);
2237 
2238 	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2239 
2240 	if (new_policy->min > policy->max || new_policy->max < policy->min)
2241 		return -EINVAL;
2242 
2243 	/* verify the cpu speed can be set within this limit */
2244 	ret = cpufreq_driver->verify(new_policy);
2245 	if (ret)
2246 		return ret;
2247 
2248 	/* adjust if necessary - all reasons */
2249 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2250 			CPUFREQ_ADJUST, new_policy);
2251 
2252 	/* adjust if necessary - hardware incompatibility*/
2253 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2254 			CPUFREQ_INCOMPATIBLE, new_policy);
2255 
2256 	/*
2257 	 * verify the cpu speed can be set within this limit, which might be
2258 	 * different to the first one
2259 	 */
2260 	ret = cpufreq_driver->verify(new_policy);
2261 	if (ret)
2262 		return ret;
2263 
2264 	/* notification of the new policy */
2265 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2266 			CPUFREQ_NOTIFY, new_policy);
2267 
2268 	scale_freq_capacity(new_policy, NULL);
2269 
2270 	policy->min = new_policy->min;
2271 	policy->max = new_policy->max;
2272 	trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
2273 
2274 	pr_debug("new min and max freqs are %u - %u kHz\n",
2275 		 policy->min, policy->max);
2276 
2277 	if (cpufreq_driver->setpolicy) {
2278 		policy->policy = new_policy->policy;
2279 		pr_debug("setting range\n");
2280 		return cpufreq_driver->setpolicy(new_policy);
2281 	}
2282 
2283 	if (new_policy->governor == policy->governor)
2284 		goto out;
2285 
2286 	pr_debug("governor switch\n");
2287 
2288 	/* save old, working values */
2289 	old_gov = policy->governor;
2290 	/* end old governor */
2291 	if (old_gov) {
2292 		__cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2293 		up_write(&policy->rwsem);
2294 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2295 		down_write(&policy->rwsem);
2296 	}
2297 
2298 	/* start new governor */
2299 	policy->governor = new_policy->governor;
2300 	if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2301 		if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2302 			goto out;
2303 
2304 		up_write(&policy->rwsem);
2305 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2306 		down_write(&policy->rwsem);
2307 	}
2308 
2309 	/* new governor failed, so re-start old one */
2310 	pr_debug("starting governor %s failed\n", policy->governor->name);
2311 	if (old_gov) {
2312 		policy->governor = old_gov;
2313 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2314 		__cpufreq_governor(policy, CPUFREQ_GOV_START);
2315 	}
2316 
2317 	return -EINVAL;
2318 
2319  out:
2320 	pr_debug("governor: change or update limits\n");
2321 	return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2322 }
2323 
2324 /**
2325  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
2326  *	@cpu: CPU which shall be re-evaluated
2327  *
2328  *	Useful for policy notifiers which have different necessities
2329  *	at different times.
2330  */
cpufreq_update_policy(unsigned int cpu)2331 int cpufreq_update_policy(unsigned int cpu)
2332 {
2333 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2334 	struct cpufreq_policy new_policy;
2335 	int ret;
2336 
2337 	if (!policy)
2338 		return -ENODEV;
2339 
2340 	down_write(&policy->rwsem);
2341 
2342 	pr_debug("updating policy for CPU %u\n", cpu);
2343 	memcpy(&new_policy, policy, sizeof(*policy));
2344 	new_policy.min = policy->user_policy.min;
2345 	new_policy.max = policy->user_policy.max;
2346 	new_policy.policy = policy->user_policy.policy;
2347 	new_policy.governor = policy->user_policy.governor;
2348 
2349 	/*
2350 	 * BIOS might change freq behind our back
2351 	 * -> ask driver for current freq and notify governors about a change
2352 	 */
2353 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2354 		new_policy.cur = cpufreq_driver->get(cpu);
2355 		if (WARN_ON(!new_policy.cur)) {
2356 			ret = -EIO;
2357 			goto unlock;
2358 		}
2359 
2360 		if (!policy->cur) {
2361 			pr_debug("Driver did not initialize current freq\n");
2362 			policy->cur = new_policy.cur;
2363 		} else {
2364 			if (policy->cur != new_policy.cur && has_target())
2365 				cpufreq_out_of_sync(cpu, policy->cur,
2366 								new_policy.cur);
2367 		}
2368 	}
2369 
2370 	ret = cpufreq_set_policy(policy, &new_policy);
2371 
2372 unlock:
2373 	up_write(&policy->rwsem);
2374 
2375 	cpufreq_cpu_put(policy);
2376 	return ret;
2377 }
2378 EXPORT_SYMBOL(cpufreq_update_policy);
2379 
cpufreq_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)2380 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2381 					unsigned long action, void *hcpu)
2382 {
2383 	unsigned int cpu = (unsigned long)hcpu;
2384 	struct device *dev;
2385 
2386 	dev = get_cpu_device(cpu);
2387 	if (dev) {
2388 		switch (action & ~CPU_TASKS_FROZEN) {
2389 		case CPU_ONLINE:
2390 			__cpufreq_add_dev(dev, NULL);
2391 			break;
2392 
2393 		case CPU_DOWN_PREPARE:
2394 			__cpufreq_remove_dev_prepare(dev, NULL);
2395 			break;
2396 
2397 		case CPU_POST_DEAD:
2398 			__cpufreq_remove_dev_finish(dev, NULL);
2399 			break;
2400 
2401 		case CPU_DOWN_FAILED:
2402 			__cpufreq_add_dev(dev, NULL);
2403 			break;
2404 		}
2405 	}
2406 	return NOTIFY_OK;
2407 }
2408 
2409 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2410 	.notifier_call = cpufreq_cpu_callback,
2411 };
2412 
2413 /*********************************************************************
2414  *               BOOST						     *
2415  *********************************************************************/
cpufreq_boost_set_sw(int state)2416 static int cpufreq_boost_set_sw(int state)
2417 {
2418 	struct cpufreq_frequency_table *freq_table;
2419 	struct cpufreq_policy *policy;
2420 	int ret = -EINVAL;
2421 
2422 	list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2423 		freq_table = cpufreq_frequency_get_table(policy->cpu);
2424 		if (freq_table) {
2425 			ret = cpufreq_frequency_table_cpuinfo(policy,
2426 							freq_table);
2427 			if (ret) {
2428 				pr_err("%s: Policy frequency update failed\n",
2429 				       __func__);
2430 				break;
2431 			}
2432 			policy->user_policy.max = policy->max;
2433 			__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2434 		}
2435 	}
2436 
2437 	return ret;
2438 }
2439 
cpufreq_boost_trigger_state(int state)2440 int cpufreq_boost_trigger_state(int state)
2441 {
2442 	unsigned long flags;
2443 	int ret = 0;
2444 
2445 	if (cpufreq_driver->boost_enabled == state)
2446 		return 0;
2447 
2448 	write_lock_irqsave(&cpufreq_driver_lock, flags);
2449 	cpufreq_driver->boost_enabled = state;
2450 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2451 
2452 	ret = cpufreq_driver->set_boost(state);
2453 	if (ret) {
2454 		write_lock_irqsave(&cpufreq_driver_lock, flags);
2455 		cpufreq_driver->boost_enabled = !state;
2456 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2457 
2458 		pr_err("%s: Cannot %s BOOST\n",
2459 		       __func__, state ? "enable" : "disable");
2460 	}
2461 
2462 	return ret;
2463 }
2464 
cpufreq_boost_supported(void)2465 int cpufreq_boost_supported(void)
2466 {
2467 	if (likely(cpufreq_driver))
2468 		return cpufreq_driver->boost_supported;
2469 
2470 	return 0;
2471 }
2472 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2473 
cpufreq_boost_enabled(void)2474 int cpufreq_boost_enabled(void)
2475 {
2476 	return cpufreq_driver->boost_enabled;
2477 }
2478 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2479 
2480 /*********************************************************************
2481  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2482  *********************************************************************/
2483 
2484 /**
2485  * cpufreq_register_driver - register a CPU Frequency driver
2486  * @driver_data: A struct cpufreq_driver containing the values#
2487  * submitted by the CPU Frequency driver.
2488  *
2489  * Registers a CPU Frequency driver to this core code. This code
2490  * returns zero on success, -EBUSY when another driver got here first
2491  * (and isn't unregistered in the meantime).
2492  *
2493  */
cpufreq_register_driver(struct cpufreq_driver * driver_data)2494 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2495 {
2496 	unsigned long flags;
2497 	int ret;
2498 
2499 	if (cpufreq_disabled())
2500 		return -ENODEV;
2501 
2502 	if (!driver_data || !driver_data->verify || !driver_data->init ||
2503 	    !(driver_data->setpolicy || driver_data->target_index ||
2504 		    driver_data->target) ||
2505 	     (driver_data->setpolicy && (driver_data->target_index ||
2506 		    driver_data->target)) ||
2507 	     (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2508 		return -EINVAL;
2509 
2510 	pr_debug("trying to register driver %s\n", driver_data->name);
2511 
2512 	if (driver_data->setpolicy)
2513 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2514 
2515 	write_lock_irqsave(&cpufreq_driver_lock, flags);
2516 	if (cpufreq_driver) {
2517 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2518 		return -EEXIST;
2519 	}
2520 	cpufreq_driver = driver_data;
2521 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2522 
2523 	if (cpufreq_boost_supported()) {
2524 		/*
2525 		 * Check if driver provides function to enable boost -
2526 		 * if not, use cpufreq_boost_set_sw as default
2527 		 */
2528 		if (!cpufreq_driver->set_boost)
2529 			cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2530 
2531 		ret = cpufreq_sysfs_create_file(&boost.attr);
2532 		if (ret) {
2533 			pr_err("%s: cannot register global BOOST sysfs file\n",
2534 			       __func__);
2535 			goto err_null_driver;
2536 		}
2537 	}
2538 
2539 	ret = subsys_interface_register(&cpufreq_interface);
2540 	if (ret)
2541 		goto err_boost_unreg;
2542 
2543 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2544 		int i;
2545 		ret = -ENODEV;
2546 
2547 		/* check for at least one working CPU */
2548 		for (i = 0; i < nr_cpu_ids; i++)
2549 			if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2550 				ret = 0;
2551 				break;
2552 			}
2553 
2554 		/* if all ->init() calls failed, unregister */
2555 		if (ret) {
2556 			pr_debug("no CPU initialized for driver %s\n",
2557 				 driver_data->name);
2558 			goto err_if_unreg;
2559 		}
2560 	}
2561 
2562 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
2563 	pr_debug("driver %s up and running\n", driver_data->name);
2564 
2565 	return 0;
2566 err_if_unreg:
2567 	subsys_interface_unregister(&cpufreq_interface);
2568 err_boost_unreg:
2569 	if (cpufreq_boost_supported())
2570 		cpufreq_sysfs_remove_file(&boost.attr);
2571 err_null_driver:
2572 	write_lock_irqsave(&cpufreq_driver_lock, flags);
2573 	cpufreq_driver = NULL;
2574 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2575 	return ret;
2576 }
2577 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2578 
2579 /**
2580  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2581  *
2582  * Unregister the current CPUFreq driver. Only call this if you have
2583  * the right to do so, i.e. if you have succeeded in initialising before!
2584  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2585  * currently not initialised.
2586  */
cpufreq_unregister_driver(struct cpufreq_driver * driver)2587 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2588 {
2589 	unsigned long flags;
2590 
2591 	if (!cpufreq_driver || (driver != cpufreq_driver))
2592 		return -EINVAL;
2593 
2594 	pr_debug("unregistering driver %s\n", driver->name);
2595 
2596 	subsys_interface_unregister(&cpufreq_interface);
2597 	if (cpufreq_boost_supported())
2598 		cpufreq_sysfs_remove_file(&boost.attr);
2599 
2600 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2601 
2602 	down_write(&cpufreq_rwsem);
2603 	write_lock_irqsave(&cpufreq_driver_lock, flags);
2604 
2605 	cpufreq_driver = NULL;
2606 
2607 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2608 	up_write(&cpufreq_rwsem);
2609 
2610 	return 0;
2611 }
2612 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2613 
cpufreq_core_init(void)2614 static int __init cpufreq_core_init(void)
2615 {
2616 	if (cpufreq_disabled())
2617 		return -ENODEV;
2618 
2619 	cpufreq_global_kobject = kobject_create();
2620 	BUG_ON(!cpufreq_global_kobject);
2621 
2622 	return 0;
2623 }
2624 core_initcall(cpufreq_core_init);
2625