• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/drivers/cpufreq/cpufreq.c
3  *
4  *  Copyright (C) 2001 Russell King
5  *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6  *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7  *
8  *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9  *	Added handling for CPU hotplug
10  *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11  *	Fix handling for CPU hotplug -- affected CPUs
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2 as
15  * published by the Free Software Foundation.
16  */
17 
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/cpufreq_times.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
25 #include <linux/init.h>
26 #include <linux/kernel_stat.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <linux/suspend.h>
31 #include <linux/syscore_ops.h>
32 #include <linux/tick.h>
33 #ifdef CONFIG_SMP
34 #include <linux/sched.h>
35 #endif
36 #include <trace/events/power.h>
37 
38 static LIST_HEAD(cpufreq_policy_list);
39 
policy_is_inactive(struct cpufreq_policy * policy)40 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
41 {
42 	return cpumask_empty(policy->cpus);
43 }
44 
suitable_policy(struct cpufreq_policy * policy,bool active)45 static bool suitable_policy(struct cpufreq_policy *policy, bool active)
46 {
47 	return active == !policy_is_inactive(policy);
48 }
49 
50 /* Finds Next Acive/Inactive policy */
next_policy(struct cpufreq_policy * policy,bool active)51 static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
52 					  bool active)
53 {
54 	do {
55 		policy = list_next_entry(policy, policy_list);
56 
57 		/* No more policies in the list */
58 		if (&policy->policy_list == &cpufreq_policy_list)
59 			return NULL;
60 	} while (!suitable_policy(policy, active));
61 
62 	return policy;
63 }
64 
first_policy(bool active)65 static struct cpufreq_policy *first_policy(bool active)
66 {
67 	struct cpufreq_policy *policy;
68 
69 	/* No policies in the list */
70 	if (list_empty(&cpufreq_policy_list))
71 		return NULL;
72 
73 	policy = list_first_entry(&cpufreq_policy_list, typeof(*policy),
74 				  policy_list);
75 
76 	if (!suitable_policy(policy, active))
77 		policy = next_policy(policy, active);
78 
79 	return policy;
80 }
81 
82 /* Macros to iterate over CPU policies */
83 #define for_each_suitable_policy(__policy, __active)	\
84 	for (__policy = first_policy(__active);		\
85 	     __policy;					\
86 	     __policy = next_policy(__policy, __active))
87 
88 #define for_each_active_policy(__policy)		\
89 	for_each_suitable_policy(__policy, true)
90 #define for_each_inactive_policy(__policy)		\
91 	for_each_suitable_policy(__policy, false)
92 
93 #define for_each_policy(__policy)			\
94 	list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
95 
96 /* Iterate over governors */
97 static LIST_HEAD(cpufreq_governor_list);
98 #define for_each_governor(__governor)				\
99 	list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
100 
101 /**
102  * The "cpufreq driver" - the arch- or hardware-dependent low
103  * level driver of CPUFreq support, and its spinlock. This lock
104  * also protects the cpufreq_cpu_data array.
105  */
106 static struct cpufreq_driver *cpufreq_driver;
107 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
108 static DEFINE_RWLOCK(cpufreq_driver_lock);
109 DEFINE_MUTEX(cpufreq_governor_lock);
110 
111 /* Flag to suspend/resume CPUFreq governors */
112 static bool cpufreq_suspended;
113 
has_target(void)114 static inline bool has_target(void)
115 {
116 	return cpufreq_driver->target_index || cpufreq_driver->target;
117 }
118 
119 /* internal prototypes */
120 static int __cpufreq_governor(struct cpufreq_policy *policy,
121 		unsigned int event);
122 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
123 static void handle_update(struct work_struct *work);
124 
125 /**
126  * Two notifier lists: the "policy" list is involved in the
127  * validation process for a new CPU frequency policy; the
128  * "transition" list for kernel code that needs to handle
129  * changes to devices when the CPU clock speed changes.
130  * The mutex locks both lists.
131  */
132 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
133 static struct srcu_notifier_head cpufreq_transition_notifier_list;
134 
135 static bool init_cpufreq_transition_notifier_list_called;
init_cpufreq_transition_notifier_list(void)136 static int __init init_cpufreq_transition_notifier_list(void)
137 {
138 	srcu_init_notifier_head(&cpufreq_transition_notifier_list);
139 	init_cpufreq_transition_notifier_list_called = true;
140 	return 0;
141 }
142 pure_initcall(init_cpufreq_transition_notifier_list);
143 
144 static int off __read_mostly;
cpufreq_disabled(void)145 static int cpufreq_disabled(void)
146 {
147 	return off;
148 }
disable_cpufreq(void)149 void disable_cpufreq(void)
150 {
151 	off = 1;
152 }
153 static DEFINE_MUTEX(cpufreq_governor_mutex);
154 
have_governor_per_policy(void)155 bool have_governor_per_policy(void)
156 {
157 	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
158 }
159 EXPORT_SYMBOL_GPL(have_governor_per_policy);
160 
cpufreq_driver_is_slow(void)161 bool cpufreq_driver_is_slow(void)
162 {
163 	return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
164 }
165 EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
166 
get_governor_parent_kobj(struct cpufreq_policy * policy)167 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
168 {
169 	if (have_governor_per_policy())
170 		return &policy->kobj;
171 	else
172 		return cpufreq_global_kobject;
173 }
174 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
175 
cpufreq_frequency_get_table(unsigned int cpu)176 struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
177 {
178 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
179 
180 	return policy && !policy_is_inactive(policy) ?
181 		policy->freq_table : NULL;
182 }
183 EXPORT_SYMBOL_GPL(cpufreq_frequency_get_table);
184 
get_cpu_idle_time_jiffy(unsigned int cpu,u64 * wall)185 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
186 {
187 	u64 idle_time;
188 	u64 cur_wall_time;
189 	u64 busy_time;
190 
191 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
192 
193 	busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
194 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
195 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
196 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
197 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
198 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
199 
200 	idle_time = cur_wall_time - busy_time;
201 	if (wall)
202 		*wall = cputime_to_usecs(cur_wall_time);
203 
204 	return cputime_to_usecs(idle_time);
205 }
206 
get_cpu_idle_time(unsigned int cpu,u64 * wall,int io_busy)207 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
208 {
209 	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
210 
211 	if (idle_time == -1ULL)
212 		return get_cpu_idle_time_jiffy(cpu, wall);
213 	else if (!io_busy)
214 		idle_time += get_cpu_iowait_time_us(cpu, wall);
215 
216 	return idle_time;
217 }
218 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
219 
220 /*
221  * This is a generic cpufreq init() routine which can be used by cpufreq
222  * drivers of SMP systems. It will do following:
223  * - validate & show freq table passed
224  * - set policies transition latency
225  * - policy->cpus with all possible CPUs
226  */
cpufreq_generic_init(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table,unsigned int transition_latency)227 int cpufreq_generic_init(struct cpufreq_policy *policy,
228 		struct cpufreq_frequency_table *table,
229 		unsigned int transition_latency)
230 {
231 	int ret;
232 
233 	ret = cpufreq_table_validate_and_show(policy, table);
234 	if (ret) {
235 		pr_err("%s: invalid frequency table: %d\n", __func__, ret);
236 		return ret;
237 	}
238 
239 	policy->cpuinfo.transition_latency = transition_latency;
240 
241 	/*
242 	 * The driver only supports the SMP configuration where all processors
243 	 * share the clock and voltage and clock.
244 	 */
245 	cpumask_setall(policy->cpus);
246 
247 	return 0;
248 }
249 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
250 
cpufreq_cpu_get_raw(unsigned int cpu)251 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
252 {
253 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
254 
255 	return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
256 }
257 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
258 
cpufreq_generic_get(unsigned int cpu)259 unsigned int cpufreq_generic_get(unsigned int cpu)
260 {
261 	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
262 
263 	if (!policy || IS_ERR(policy->clk)) {
264 		pr_err("%s: No %s associated to cpu: %d\n",
265 		       __func__, policy ? "clk" : "policy", cpu);
266 		return 0;
267 	}
268 
269 	return clk_get_rate(policy->clk) / 1000;
270 }
271 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
272 
273 /**
274  * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
275  *
276  * @cpu: cpu to find policy for.
277  *
278  * This returns policy for 'cpu', returns NULL if it doesn't exist.
279  * It also increments the kobject reference count to mark it busy and so would
280  * require a corresponding call to cpufreq_cpu_put() to decrement it back.
281  * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
282  * freed as that depends on the kobj count.
283  *
284  * Return: A valid policy on success, otherwise NULL on failure.
285  */
cpufreq_cpu_get(unsigned int cpu)286 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
287 {
288 	struct cpufreq_policy *policy = NULL;
289 	unsigned long flags;
290 
291 	if (WARN_ON(cpu >= nr_cpu_ids))
292 		return NULL;
293 
294 	/* get the cpufreq driver */
295 	read_lock_irqsave(&cpufreq_driver_lock, flags);
296 
297 	if (cpufreq_driver) {
298 		/* get the CPU */
299 		policy = cpufreq_cpu_get_raw(cpu);
300 		if (policy)
301 			kobject_get(&policy->kobj);
302 	}
303 
304 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
305 
306 	return policy;
307 }
308 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
309 
310 /**
311  * cpufreq_cpu_put: Decrements the usage count of a policy
312  *
313  * @policy: policy earlier returned by cpufreq_cpu_get().
314  *
315  * This decrements the kobject reference count incremented earlier by calling
316  * cpufreq_cpu_get().
317  */
cpufreq_cpu_put(struct cpufreq_policy * policy)318 void cpufreq_cpu_put(struct cpufreq_policy *policy)
319 {
320 	kobject_put(&policy->kobj);
321 }
322 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
323 
324 /*********************************************************************
325  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
326  *********************************************************************/
327 
328 /**
329  * adjust_jiffies - adjust the system "loops_per_jiffy"
330  *
331  * This function alters the system "loops_per_jiffy" for the clock
332  * speed change. Note that loops_per_jiffy cannot be updated on SMP
333  * systems as each CPU might be scaled differently. So, use the arch
334  * per-CPU loops_per_jiffy value wherever possible.
335  */
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)336 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
337 {
338 #ifndef CONFIG_SMP
339 	static unsigned long l_p_j_ref;
340 	static unsigned int l_p_j_ref_freq;
341 
342 	if (ci->flags & CPUFREQ_CONST_LOOPS)
343 		return;
344 
345 	if (!l_p_j_ref_freq) {
346 		l_p_j_ref = loops_per_jiffy;
347 		l_p_j_ref_freq = ci->old;
348 		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
349 			 l_p_j_ref, l_p_j_ref_freq);
350 	}
351 	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
352 		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
353 								ci->new);
354 		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
355 			 loops_per_jiffy, ci->new);
356 	}
357 #endif
358 }
359 
360 /*********************************************************************
361  *               FREQUENCY INVARIANT CPU CAPACITY                    *
362  *********************************************************************/
363 
364 static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
365 static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
366 
367 static void
scale_freq_capacity(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs)368 scale_freq_capacity(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs)
369 {
370 	unsigned long cur = freqs ? freqs->new : policy->cur;
371 	unsigned long scale = (cur << SCHED_CAPACITY_SHIFT) / policy->max;
372 	struct cpufreq_cpuinfo *cpuinfo = &policy->cpuinfo;
373 	int cpu;
374 
375 	pr_debug("cpus %*pbl cur/cur max freq %lu/%u kHz freq scale %lu\n",
376 		 cpumask_pr_args(policy->cpus), cur, policy->max, scale);
377 
378 	for_each_cpu(cpu, policy->cpus)
379 		per_cpu(freq_scale, cpu) = scale;
380 
381 	if (freqs)
382 		return;
383 
384 	scale = (policy->max << SCHED_CAPACITY_SHIFT) / cpuinfo->max_freq;
385 
386 	pr_debug("cpus %*pbl cur max/max freq %u/%u kHz max freq scale %lu\n",
387 		 cpumask_pr_args(policy->cpus), policy->max, cpuinfo->max_freq,
388 		 scale);
389 
390 	for_each_cpu(cpu, policy->cpus)
391 		per_cpu(max_freq_scale, cpu) = scale;
392 }
393 
cpufreq_scale_freq_capacity(struct sched_domain * sd,int cpu)394 unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
395 {
396 	return per_cpu(freq_scale, cpu);
397 }
398 
cpufreq_scale_max_freq_capacity(int cpu)399 unsigned long cpufreq_scale_max_freq_capacity(int cpu)
400 {
401 	return per_cpu(max_freq_scale, cpu);
402 }
403 
__cpufreq_notify_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,unsigned int state)404 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
405 		struct cpufreq_freqs *freqs, unsigned int state)
406 {
407 	BUG_ON(irqs_disabled());
408 
409 	if (cpufreq_disabled())
410 		return;
411 
412 	freqs->flags = cpufreq_driver->flags;
413 	pr_debug("notification %u of frequency transition to %u kHz\n",
414 		 state, freqs->new);
415 
416 	switch (state) {
417 
418 	case CPUFREQ_PRECHANGE:
419 		/* detect if the driver reported a value as "old frequency"
420 		 * which is not equal to what the cpufreq core thinks is
421 		 * "old frequency".
422 		 */
423 		if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
424 			if ((policy) && (policy->cpu == freqs->cpu) &&
425 			    (policy->cur) && (policy->cur != freqs->old)) {
426 				pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
427 					 freqs->old, policy->cur);
428 				freqs->old = policy->cur;
429 			}
430 		}
431 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
432 				CPUFREQ_PRECHANGE, freqs);
433 		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
434 		break;
435 
436 	case CPUFREQ_POSTCHANGE:
437 		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
438 		pr_debug("FREQ: %lu - CPU: %lu\n",
439 			 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
440 		trace_cpu_frequency(freqs->new, freqs->cpu);
441 		cpufreq_times_record_transition(freqs);
442 		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
443 				CPUFREQ_POSTCHANGE, freqs);
444 		if (likely(policy) && likely(policy->cpu == freqs->cpu))
445 			policy->cur = freqs->new;
446 		break;
447 	}
448 }
449 
450 /**
451  * cpufreq_notify_transition - call notifier chain and adjust_jiffies
452  * on frequency transition.
453  *
454  * This function calls the transition notifiers and the "adjust_jiffies"
455  * function. It is called twice on all CPU frequency changes that have
456  * external effects.
457  */
cpufreq_notify_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,unsigned int state)458 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
459 		struct cpufreq_freqs *freqs, unsigned int state)
460 {
461 	for_each_cpu(freqs->cpu, policy->cpus)
462 		__cpufreq_notify_transition(policy, freqs, state);
463 }
464 
465 /* Do post notifications when there are chances that transition has failed */
cpufreq_notify_post_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int transition_failed)466 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
467 		struct cpufreq_freqs *freqs, int transition_failed)
468 {
469 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
470 	if (!transition_failed)
471 		return;
472 
473 	swap(freqs->old, freqs->new);
474 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
475 	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
476 }
477 
cpufreq_freq_transition_begin(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs)478 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
479 		struct cpufreq_freqs *freqs)
480 {
481 #ifdef CONFIG_SMP
482 	int cpu;
483 #endif
484 
485 	/*
486 	 * Catch double invocations of _begin() which lead to self-deadlock.
487 	 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
488 	 * doesn't invoke _begin() on their behalf, and hence the chances of
489 	 * double invocations are very low. Moreover, there are scenarios
490 	 * where these checks can emit false-positive warnings in these
491 	 * drivers; so we avoid that by skipping them altogether.
492 	 */
493 	WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
494 				&& current == policy->transition_task);
495 
496 wait:
497 	wait_event(policy->transition_wait, !policy->transition_ongoing);
498 
499 	spin_lock(&policy->transition_lock);
500 
501 	if (unlikely(policy->transition_ongoing)) {
502 		spin_unlock(&policy->transition_lock);
503 		goto wait;
504 	}
505 
506 	policy->transition_ongoing = true;
507 	policy->transition_task = current;
508 
509 	spin_unlock(&policy->transition_lock);
510 
511 	scale_freq_capacity(policy, freqs);
512 #ifdef CONFIG_SMP
513 	for_each_cpu(cpu, policy->cpus)
514 		trace_cpu_capacity(capacity_curr_of(cpu), cpu);
515 #endif
516 
517 	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
518 }
519 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
520 
cpufreq_freq_transition_end(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int transition_failed)521 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
522 		struct cpufreq_freqs *freqs, int transition_failed)
523 {
524 	if (unlikely(WARN_ON(!policy->transition_ongoing)))
525 		return;
526 
527 	cpufreq_notify_post_transition(policy, freqs, transition_failed);
528 
529 	policy->transition_ongoing = false;
530 	policy->transition_task = NULL;
531 
532 	wake_up(&policy->transition_wait);
533 }
534 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
535 
536 /**
537  * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
538  * one.
539  * @target_freq: target frequency to resolve.
540  *
541  * The target to driver frequency mapping is cached in the policy.
542  *
543  * Return: Lowest driver-supported frequency greater than or equal to the
544  * given target_freq, subject to policy (min/max) and driver limitations.
545  */
cpufreq_driver_resolve_freq(struct cpufreq_policy * policy,unsigned int target_freq)546 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
547 					 unsigned int target_freq)
548 {
549 	target_freq = clamp_val(target_freq, policy->min, policy->max);
550 	policy->cached_target_freq = target_freq;
551 
552 	if (cpufreq_driver->target_index) {
553 		int idx, rv;
554 
555 		rv = cpufreq_frequency_table_target(policy, policy->freq_table,
556 						    target_freq,
557 						    CPUFREQ_RELATION_L,
558 						    &idx);
559 		if (rv)
560 			return target_freq;
561 		policy->cached_resolved_idx = idx;
562 		return policy->freq_table[idx].frequency;
563         }
564 
565 	return target_freq;
566 }
567 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
568 
569 /*********************************************************************
570  *                          SYSFS INTERFACE                          *
571  *********************************************************************/
show_boost(struct kobject * kobj,struct kobj_attribute * attr,char * buf)572 static ssize_t show_boost(struct kobject *kobj,
573 			  struct kobj_attribute *attr, char *buf)
574 {
575 	return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
576 }
577 
store_boost(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)578 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
579 			   const char *buf, size_t count)
580 {
581 	int ret, enable;
582 
583 	ret = sscanf(buf, "%d", &enable);
584 	if (ret != 1 || enable < 0 || enable > 1)
585 		return -EINVAL;
586 
587 	if (cpufreq_boost_trigger_state(enable)) {
588 		pr_err("%s: Cannot %s BOOST!\n",
589 		       __func__, enable ? "enable" : "disable");
590 		return -EINVAL;
591 	}
592 
593 	pr_debug("%s: cpufreq BOOST %s\n",
594 		 __func__, enable ? "enabled" : "disabled");
595 
596 	return count;
597 }
598 define_one_global_rw(boost);
599 
find_governor(const char * str_governor)600 static struct cpufreq_governor *find_governor(const char *str_governor)
601 {
602 	struct cpufreq_governor *t;
603 
604 	for_each_governor(t)
605 		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
606 			return t;
607 
608 	return NULL;
609 }
610 
611 /**
612  * cpufreq_parse_governor - parse a governor string
613  */
cpufreq_parse_governor(char * str_governor,unsigned int * policy,struct cpufreq_governor ** governor)614 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
615 				struct cpufreq_governor **governor)
616 {
617 	int err = -EINVAL;
618 
619 	if (cpufreq_driver->setpolicy) {
620 		if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
621 			*policy = CPUFREQ_POLICY_PERFORMANCE;
622 			err = 0;
623 		} else if (!strncasecmp(str_governor, "powersave",
624 						CPUFREQ_NAME_LEN)) {
625 			*policy = CPUFREQ_POLICY_POWERSAVE;
626 			err = 0;
627 		}
628 	} else {
629 		struct cpufreq_governor *t;
630 
631 		mutex_lock(&cpufreq_governor_mutex);
632 
633 		t = find_governor(str_governor);
634 
635 		if (t == NULL) {
636 			int ret;
637 
638 			mutex_unlock(&cpufreq_governor_mutex);
639 			ret = request_module("cpufreq_%s", str_governor);
640 			mutex_lock(&cpufreq_governor_mutex);
641 
642 			if (ret == 0)
643 				t = find_governor(str_governor);
644 		}
645 
646 		if (t != NULL) {
647 			*governor = t;
648 			err = 0;
649 		}
650 
651 		mutex_unlock(&cpufreq_governor_mutex);
652 	}
653 	return err;
654 }
655 
656 /**
657  * cpufreq_per_cpu_attr_read() / show_##file_name() -
658  * print out cpufreq information
659  *
660  * Write out information from cpufreq_driver->policy[cpu]; object must be
661  * "unsigned int".
662  */
663 
664 #define show_one(file_name, object)			\
665 static ssize_t show_##file_name				\
666 (struct cpufreq_policy *policy, char *buf)		\
667 {							\
668 	return sprintf(buf, "%u\n", policy->object);	\
669 }
670 
671 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
672 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
673 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
674 show_one(scaling_min_freq, min);
675 show_one(scaling_max_freq, max);
676 
show_scaling_cur_freq(struct cpufreq_policy * policy,char * buf)677 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
678 {
679 	ssize_t ret;
680 
681 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
682 		ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
683 	else
684 		ret = sprintf(buf, "%u\n", policy->cur);
685 	return ret;
686 }
687 
688 static int cpufreq_set_policy(struct cpufreq_policy *policy,
689 				struct cpufreq_policy *new_policy);
690 
691 /**
692  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
693  */
694 #define store_one(file_name, object)			\
695 static ssize_t store_##file_name					\
696 (struct cpufreq_policy *policy, const char *buf, size_t count)		\
697 {									\
698 	int ret, temp;							\
699 	struct cpufreq_policy new_policy;				\
700 									\
701 	memcpy(&new_policy, policy, sizeof(*policy));			\
702 	new_policy.min = policy->user_policy.min;			\
703 	new_policy.max = policy->user_policy.max;			\
704 									\
705 	ret = sscanf(buf, "%u", &new_policy.object);			\
706 	if (ret != 1)							\
707 		return -EINVAL;						\
708 									\
709 	temp = new_policy.object;					\
710 	ret = cpufreq_set_policy(policy, &new_policy);		\
711 	if (!ret)							\
712 		policy->user_policy.object = temp;			\
713 									\
714 	return ret ? ret : count;					\
715 }
716 
717 store_one(scaling_min_freq, min);
718 store_one(scaling_max_freq, max);
719 
720 /**
721  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
722  */
show_cpuinfo_cur_freq(struct cpufreq_policy * policy,char * buf)723 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
724 					char *buf)
725 {
726 	unsigned int cur_freq = __cpufreq_get(policy);
727 
728 	if (cur_freq)
729 		return sprintf(buf, "%u\n", cur_freq);
730 
731 	return sprintf(buf, "<unknown>\n");
732 }
733 
734 /**
735  * show_scaling_governor - show the current policy for the specified CPU
736  */
show_scaling_governor(struct cpufreq_policy * policy,char * buf)737 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
738 {
739 	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
740 		return sprintf(buf, "powersave\n");
741 	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
742 		return sprintf(buf, "performance\n");
743 	else if (policy->governor)
744 		return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
745 				policy->governor->name);
746 	return -EINVAL;
747 }
748 
749 /**
750  * store_scaling_governor - store policy for the specified CPU
751  */
store_scaling_governor(struct cpufreq_policy * policy,const char * buf,size_t count)752 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
753 					const char *buf, size_t count)
754 {
755 	int ret;
756 	char	str_governor[16];
757 	struct cpufreq_policy new_policy;
758 
759 	memcpy(&new_policy, policy, sizeof(*policy));
760 
761 	ret = sscanf(buf, "%15s", str_governor);
762 	if (ret != 1)
763 		return -EINVAL;
764 
765 	if (cpufreq_parse_governor(str_governor, &new_policy.policy,
766 						&new_policy.governor))
767 		return -EINVAL;
768 
769 	ret = cpufreq_set_policy(policy, &new_policy);
770 	return ret ? ret : count;
771 }
772 
773 /**
774  * show_scaling_driver - show the cpufreq driver currently loaded
775  */
show_scaling_driver(struct cpufreq_policy * policy,char * buf)776 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
777 {
778 	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
779 }
780 
781 /**
782  * show_scaling_available_governors - show the available CPUfreq governors
783  */
show_scaling_available_governors(struct cpufreq_policy * policy,char * buf)784 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
785 						char *buf)
786 {
787 	ssize_t i = 0;
788 	struct cpufreq_governor *t;
789 
790 	if (!has_target()) {
791 		i += sprintf(buf, "performance powersave");
792 		goto out;
793 	}
794 
795 	for_each_governor(t) {
796 		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
797 		    - (CPUFREQ_NAME_LEN + 2)))
798 			goto out;
799 		i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
800 	}
801 out:
802 	i += sprintf(&buf[i], "\n");
803 	return i;
804 }
805 
cpufreq_show_cpus(const struct cpumask * mask,char * buf)806 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
807 {
808 	ssize_t i = 0;
809 	unsigned int cpu;
810 
811 	for_each_cpu(cpu, mask) {
812 		if (i)
813 			i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
814 		i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
815 		if (i >= (PAGE_SIZE - 5))
816 			break;
817 	}
818 	i += sprintf(&buf[i], "\n");
819 	return i;
820 }
821 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
822 
823 /**
824  * show_related_cpus - show the CPUs affected by each transition even if
825  * hw coordination is in use
826  */
show_related_cpus(struct cpufreq_policy * policy,char * buf)827 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
828 {
829 	return cpufreq_show_cpus(policy->related_cpus, buf);
830 }
831 
832 /**
833  * show_affected_cpus - show the CPUs affected by each transition
834  */
show_affected_cpus(struct cpufreq_policy * policy,char * buf)835 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
836 {
837 	return cpufreq_show_cpus(policy->cpus, buf);
838 }
839 
store_scaling_setspeed(struct cpufreq_policy * policy,const char * buf,size_t count)840 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
841 					const char *buf, size_t count)
842 {
843 	unsigned int freq = 0;
844 	unsigned int ret;
845 
846 	if (!policy->governor || !policy->governor->store_setspeed)
847 		return -EINVAL;
848 
849 	ret = sscanf(buf, "%u", &freq);
850 	if (ret != 1)
851 		return -EINVAL;
852 
853 	policy->governor->store_setspeed(policy, freq);
854 
855 	return count;
856 }
857 
show_scaling_setspeed(struct cpufreq_policy * policy,char * buf)858 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
859 {
860 	if (!policy->governor || !policy->governor->show_setspeed)
861 		return sprintf(buf, "<unsupported>\n");
862 
863 	return policy->governor->show_setspeed(policy, buf);
864 }
865 
866 /**
867  * show_bios_limit - show the current cpufreq HW/BIOS limitation
868  */
show_bios_limit(struct cpufreq_policy * policy,char * buf)869 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
870 {
871 	unsigned int limit;
872 	int ret;
873 	if (cpufreq_driver->bios_limit) {
874 		ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
875 		if (!ret)
876 			return sprintf(buf, "%u\n", limit);
877 	}
878 	return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
879 }
880 
881 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
882 cpufreq_freq_attr_ro(cpuinfo_min_freq);
883 cpufreq_freq_attr_ro(cpuinfo_max_freq);
884 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
885 cpufreq_freq_attr_ro(scaling_available_governors);
886 cpufreq_freq_attr_ro(scaling_driver);
887 cpufreq_freq_attr_ro(scaling_cur_freq);
888 cpufreq_freq_attr_ro(bios_limit);
889 cpufreq_freq_attr_ro(related_cpus);
890 cpufreq_freq_attr_ro(affected_cpus);
891 cpufreq_freq_attr_rw(scaling_min_freq);
892 cpufreq_freq_attr_rw(scaling_max_freq);
893 cpufreq_freq_attr_rw(scaling_governor);
894 cpufreq_freq_attr_rw(scaling_setspeed);
895 
896 static struct attribute *default_attrs[] = {
897 	&cpuinfo_min_freq.attr,
898 	&cpuinfo_max_freq.attr,
899 	&cpuinfo_transition_latency.attr,
900 	&scaling_min_freq.attr,
901 	&scaling_max_freq.attr,
902 	&affected_cpus.attr,
903 	&related_cpus.attr,
904 	&scaling_governor.attr,
905 	&scaling_driver.attr,
906 	&scaling_available_governors.attr,
907 	&scaling_setspeed.attr,
908 	NULL
909 };
910 
911 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
912 #define to_attr(a) container_of(a, struct freq_attr, attr)
913 
show(struct kobject * kobj,struct attribute * attr,char * buf)914 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
915 {
916 	struct cpufreq_policy *policy = to_policy(kobj);
917 	struct freq_attr *fattr = to_attr(attr);
918 	ssize_t ret;
919 
920 	if (!fattr->show)
921 		return -EIO;
922 
923 	down_read(&policy->rwsem);
924 
925 	if (fattr->show)
926 		ret = fattr->show(policy, buf);
927 	else
928 		ret = -EIO;
929 
930 	up_read(&policy->rwsem);
931 
932 	return ret;
933 }
934 
store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)935 static ssize_t store(struct kobject *kobj, struct attribute *attr,
936 		     const char *buf, size_t count)
937 {
938 	struct cpufreq_policy *policy = to_policy(kobj);
939 	struct freq_attr *fattr = to_attr(attr);
940 	ssize_t ret = -EINVAL;
941 
942 	if (!fattr->store)
943 		return -EIO;
944 
945 	get_online_cpus();
946 
947 	if (!cpu_online(policy->cpu))
948 		goto unlock;
949 
950 	down_write(&policy->rwsem);
951 
952 	if (fattr->store)
953 		ret = fattr->store(policy, buf, count);
954 	else
955 		ret = -EIO;
956 
957 	up_write(&policy->rwsem);
958 unlock:
959 	put_online_cpus();
960 
961 	return ret;
962 }
963 
cpufreq_sysfs_release(struct kobject * kobj)964 static void cpufreq_sysfs_release(struct kobject *kobj)
965 {
966 	struct cpufreq_policy *policy = to_policy(kobj);
967 	pr_debug("last reference is dropped\n");
968 	complete(&policy->kobj_unregister);
969 }
970 
971 static const struct sysfs_ops sysfs_ops = {
972 	.show	= show,
973 	.store	= store,
974 };
975 
976 static struct kobj_type ktype_cpufreq = {
977 	.sysfs_ops	= &sysfs_ops,
978 	.default_attrs	= default_attrs,
979 	.release	= cpufreq_sysfs_release,
980 };
981 
add_cpu_dev_symlink(struct cpufreq_policy * policy,int cpu)982 static int add_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
983 {
984 	struct device *cpu_dev;
985 
986 	pr_debug("%s: Adding symlink for CPU: %u\n", __func__, cpu);
987 
988 	if (!policy)
989 		return 0;
990 
991 	cpu_dev = get_cpu_device(cpu);
992 	if (WARN_ON(!cpu_dev))
993 		return 0;
994 
995 	return sysfs_create_link(&cpu_dev->kobj, &policy->kobj, "cpufreq");
996 }
997 
remove_cpu_dev_symlink(struct cpufreq_policy * policy,int cpu)998 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu)
999 {
1000 	struct device *cpu_dev;
1001 
1002 	pr_debug("%s: Removing symlink for CPU: %u\n", __func__, cpu);
1003 
1004 	cpu_dev = get_cpu_device(cpu);
1005 	if (WARN_ON(!cpu_dev))
1006 		return;
1007 
1008 	sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1009 }
1010 
1011 /* Add/remove symlinks for all related CPUs */
cpufreq_add_dev_symlink(struct cpufreq_policy * policy)1012 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1013 {
1014 	unsigned int j;
1015 	int ret = 0;
1016 
1017 	/* Some related CPUs might not be present (physically hotplugged) */
1018 	for_each_cpu(j, policy->real_cpus) {
1019 		ret = add_cpu_dev_symlink(policy, j);
1020 		if (ret)
1021 			break;
1022 	}
1023 
1024 	return ret;
1025 }
1026 
cpufreq_remove_dev_symlink(struct cpufreq_policy * policy)1027 static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1028 {
1029 	unsigned int j;
1030 
1031 	/* Some related CPUs might not be present (physically hotplugged) */
1032 	for_each_cpu(j, policy->real_cpus)
1033 		remove_cpu_dev_symlink(policy, j);
1034 }
1035 
cpufreq_add_dev_interface(struct cpufreq_policy * policy)1036 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1037 {
1038 	struct freq_attr **drv_attr;
1039 	int ret = 0;
1040 
1041 	/* set up files for this cpu device */
1042 	drv_attr = cpufreq_driver->attr;
1043 	while (drv_attr && *drv_attr) {
1044 		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1045 		if (ret)
1046 			return ret;
1047 		drv_attr++;
1048 	}
1049 	if (cpufreq_driver->get) {
1050 		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1051 		if (ret)
1052 			return ret;
1053 	}
1054 
1055 	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1056 	if (ret)
1057 		return ret;
1058 
1059 	if (cpufreq_driver->bios_limit) {
1060 		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1061 		if (ret)
1062 			return ret;
1063 	}
1064 
1065 	return cpufreq_add_dev_symlink(policy);
1066 }
1067 
cpufreq_init_policy(struct cpufreq_policy * policy)1068 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1069 {
1070 	struct cpufreq_governor *gov = NULL;
1071 	struct cpufreq_policy new_policy;
1072 
1073 	memcpy(&new_policy, policy, sizeof(*policy));
1074 
1075 	/* Update governor of new_policy to the governor used before hotplug */
1076 	gov = find_governor(policy->last_governor);
1077 	if (gov)
1078 		pr_debug("Restoring governor %s for cpu %d\n",
1079 				policy->governor->name, policy->cpu);
1080 	else
1081 		gov = CPUFREQ_DEFAULT_GOVERNOR;
1082 
1083 	new_policy.governor = gov;
1084 
1085 	/* Use the default policy if there is no last_policy. */
1086 	if (cpufreq_driver->setpolicy) {
1087 		if (policy->last_policy)
1088 			new_policy.policy = policy->last_policy;
1089 		else
1090 			cpufreq_parse_governor(gov->name, &new_policy.policy,
1091 					       NULL);
1092 	}
1093 	/* set default policy */
1094 	return cpufreq_set_policy(policy, &new_policy);
1095 }
1096 
cpufreq_add_policy_cpu(struct cpufreq_policy * policy,unsigned int cpu)1097 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1098 {
1099 	int ret = 0;
1100 
1101 	/* Has this CPU been taken care of already? */
1102 	if (cpumask_test_cpu(cpu, policy->cpus))
1103 		return 0;
1104 
1105 	if (has_target()) {
1106 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1107 		if (ret) {
1108 			pr_err("%s: Failed to stop governor\n", __func__);
1109 			return ret;
1110 		}
1111 	}
1112 
1113 	down_write(&policy->rwsem);
1114 	cpumask_set_cpu(cpu, policy->cpus);
1115 	up_write(&policy->rwsem);
1116 
1117 	if (has_target()) {
1118 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1119 		if (!ret)
1120 			ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1121 
1122 		if (ret) {
1123 			pr_err("%s: Failed to start governor\n", __func__);
1124 			return ret;
1125 		}
1126 	}
1127 
1128 	return 0;
1129 }
1130 
cpufreq_policy_alloc(unsigned int cpu)1131 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1132 {
1133 	struct device *dev = get_cpu_device(cpu);
1134 	struct cpufreq_policy *policy;
1135 
1136 	if (WARN_ON(!dev))
1137 		return NULL;
1138 
1139 	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1140 	if (!policy)
1141 		return NULL;
1142 
1143 	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1144 		goto err_free_policy;
1145 
1146 	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1147 		goto err_free_cpumask;
1148 
1149 	if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1150 		goto err_free_rcpumask;
1151 
1152 	kobject_init(&policy->kobj, &ktype_cpufreq);
1153 	INIT_LIST_HEAD(&policy->policy_list);
1154 	init_rwsem(&policy->rwsem);
1155 	spin_lock_init(&policy->transition_lock);
1156 	init_waitqueue_head(&policy->transition_wait);
1157 	init_completion(&policy->kobj_unregister);
1158 	INIT_WORK(&policy->update, handle_update);
1159 
1160 	policy->cpu = cpu;
1161 	return policy;
1162 
1163 err_free_rcpumask:
1164 	free_cpumask_var(policy->related_cpus);
1165 err_free_cpumask:
1166 	free_cpumask_var(policy->cpus);
1167 err_free_policy:
1168 	kfree(policy);
1169 
1170 	return NULL;
1171 }
1172 
cpufreq_policy_put_kobj(struct cpufreq_policy * policy,bool notify)1173 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
1174 {
1175 	struct kobject *kobj;
1176 	struct completion *cmp;
1177 
1178 	if (notify)
1179 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1180 					     CPUFREQ_REMOVE_POLICY, policy);
1181 
1182 	down_write(&policy->rwsem);
1183 	cpufreq_remove_dev_symlink(policy);
1184 	kobj = &policy->kobj;
1185 	cmp = &policy->kobj_unregister;
1186 	up_write(&policy->rwsem);
1187 	kobject_put(kobj);
1188 
1189 	/*
1190 	 * We need to make sure that the underlying kobj is
1191 	 * actually not referenced anymore by anybody before we
1192 	 * proceed with unloading.
1193 	 */
1194 	pr_debug("waiting for dropping of refcount\n");
1195 	wait_for_completion(cmp);
1196 	pr_debug("wait complete\n");
1197 }
1198 
cpufreq_policy_free(struct cpufreq_policy * policy,bool notify)1199 static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1200 {
1201 	unsigned long flags;
1202 	int cpu;
1203 
1204 	/* Remove policy from list */
1205 	write_lock_irqsave(&cpufreq_driver_lock, flags);
1206 	list_del(&policy->policy_list);
1207 
1208 	for_each_cpu(cpu, policy->related_cpus)
1209 		per_cpu(cpufreq_cpu_data, cpu) = NULL;
1210 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1211 
1212 	cpufreq_policy_put_kobj(policy, notify);
1213 	free_cpumask_var(policy->real_cpus);
1214 	free_cpumask_var(policy->related_cpus);
1215 	free_cpumask_var(policy->cpus);
1216 	kfree(policy);
1217 }
1218 
cpufreq_online(unsigned int cpu)1219 static int cpufreq_online(unsigned int cpu)
1220 {
1221 	struct cpufreq_policy *policy;
1222 	bool new_policy;
1223 	unsigned long flags;
1224 	unsigned int j;
1225 	int ret;
1226 
1227 	pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1228 
1229 	/* Check if this CPU already has a policy to manage it */
1230 	policy = per_cpu(cpufreq_cpu_data, cpu);
1231 	if (policy) {
1232 		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1233 		if (!policy_is_inactive(policy))
1234 			return cpufreq_add_policy_cpu(policy, cpu);
1235 
1236 		/* This is the only online CPU for the policy.  Start over. */
1237 		new_policy = false;
1238 		down_write(&policy->rwsem);
1239 		policy->cpu = cpu;
1240 		policy->governor = NULL;
1241 		up_write(&policy->rwsem);
1242 	} else {
1243 		new_policy = true;
1244 		policy = cpufreq_policy_alloc(cpu);
1245 		if (!policy)
1246 			return -ENOMEM;
1247 	}
1248 
1249 	cpumask_copy(policy->cpus, cpumask_of(cpu));
1250 
1251 	/* call driver. From then on the cpufreq must be able
1252 	 * to accept all calls to ->verify and ->setpolicy for this CPU
1253 	 */
1254 	ret = cpufreq_driver->init(policy);
1255 	if (ret) {
1256 		pr_debug("initialization failed\n");
1257 		goto out_free_policy;
1258 	}
1259 
1260 	down_write(&policy->rwsem);
1261 
1262 	if (new_policy) {
1263 		/* related_cpus should at least include policy->cpus. */
1264 		cpumask_copy(policy->related_cpus, policy->cpus);
1265 		/* Remember CPUs present at the policy creation time. */
1266 		cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1267 
1268 		/* Name and add the kobject */
1269 		ret = kobject_add(&policy->kobj, cpufreq_global_kobject,
1270 				  "policy%u",
1271 				  cpumask_first(policy->related_cpus));
1272 		if (ret) {
1273 			pr_err("%s: failed to add policy->kobj: %d\n", __func__,
1274 			       ret);
1275 			goto out_exit_policy;
1276 		}
1277 	}
1278 
1279 	/*
1280 	 * affected cpus must always be the one, which are online. We aren't
1281 	 * managing offline cpus here.
1282 	 */
1283 	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1284 
1285 	if (new_policy) {
1286 		policy->user_policy.min = policy->min;
1287 		policy->user_policy.max = policy->max;
1288 
1289 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1290 		for_each_cpu(j, policy->related_cpus)
1291 			per_cpu(cpufreq_cpu_data, j) = policy;
1292 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1293 	} else {
1294 		policy->min = policy->user_policy.min;
1295 		policy->max = policy->user_policy.max;
1296 	}
1297 
1298 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1299 		policy->cur = cpufreq_driver->get(policy->cpu);
1300 		if (!policy->cur) {
1301 			pr_err("%s: ->get() failed\n", __func__);
1302 			goto out_exit_policy;
1303 		}
1304 	}
1305 
1306 	/*
1307 	 * Sometimes boot loaders set CPU frequency to a value outside of
1308 	 * frequency table present with cpufreq core. In such cases CPU might be
1309 	 * unstable if it has to run on that frequency for long duration of time
1310 	 * and so its better to set it to a frequency which is specified in
1311 	 * freq-table. This also makes cpufreq stats inconsistent as
1312 	 * cpufreq-stats would fail to register because current frequency of CPU
1313 	 * isn't found in freq-table.
1314 	 *
1315 	 * Because we don't want this change to effect boot process badly, we go
1316 	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1317 	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1318 	 * is initialized to zero).
1319 	 *
1320 	 * We are passing target-freq as "policy->cur - 1" otherwise
1321 	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1322 	 * equal to target-freq.
1323 	 */
1324 	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1325 	    && has_target()) {
1326 		/* Are we running at unknown frequency ? */
1327 		ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1328 		if (ret == -EINVAL) {
1329 			/* Warn user and fix it */
1330 			pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1331 				__func__, policy->cpu, policy->cur);
1332 			ret = __cpufreq_driver_target(policy, policy->cur - 1,
1333 				CPUFREQ_RELATION_L);
1334 
1335 			/*
1336 			 * Reaching here after boot in a few seconds may not
1337 			 * mean that system will remain stable at "unknown"
1338 			 * frequency for longer duration. Hence, a BUG_ON().
1339 			 */
1340 			BUG_ON(ret);
1341 			pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1342 				__func__, policy->cpu, policy->cur);
1343 		}
1344 	}
1345 
1346 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1347 				     CPUFREQ_START, policy);
1348 
1349 	if (new_policy) {
1350 		ret = cpufreq_add_dev_interface(policy);
1351 		if (ret)
1352 			goto out_exit_policy;
1353 		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1354 				CPUFREQ_CREATE_POLICY, policy);
1355 		cpufreq_times_create_policy(policy);
1356 
1357 		write_lock_irqsave(&cpufreq_driver_lock, flags);
1358 		list_add(&policy->policy_list, &cpufreq_policy_list);
1359 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1360 	}
1361 
1362 	ret = cpufreq_init_policy(policy);
1363 	if (ret) {
1364 		pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1365 		       __func__, cpu, ret);
1366 		/* cpufreq_policy_free() will notify based on this */
1367 		new_policy = false;
1368 		goto out_exit_policy;
1369 	}
1370 
1371 	up_write(&policy->rwsem);
1372 
1373 	kobject_uevent(&policy->kobj, KOBJ_ADD);
1374 
1375 	/* Callback for handling stuff after policy is ready */
1376 	if (cpufreq_driver->ready)
1377 		cpufreq_driver->ready(policy);
1378 
1379 	pr_debug("initialization complete\n");
1380 
1381 	return 0;
1382 
1383 out_exit_policy:
1384 	up_write(&policy->rwsem);
1385 
1386 	if (cpufreq_driver->exit)
1387 		cpufreq_driver->exit(policy);
1388 out_free_policy:
1389 	cpufreq_policy_free(policy, !new_policy);
1390 	return ret;
1391 }
1392 
1393 /**
1394  * cpufreq_add_dev - the cpufreq interface for a CPU device.
1395  * @dev: CPU device.
1396  * @sif: Subsystem interface structure pointer (not used)
1397  */
cpufreq_add_dev(struct device * dev,struct subsys_interface * sif)1398 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1399 {
1400 	unsigned cpu = dev->id;
1401 	int ret;
1402 
1403 	dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1404 
1405 	if (cpu_online(cpu)) {
1406 		ret = cpufreq_online(cpu);
1407 	} else {
1408 		/*
1409 		 * A hotplug notifier will follow and we will handle it as CPU
1410 		 * online then.  For now, just create the sysfs link, unless
1411 		 * there is no policy or the link is already present.
1412 		 */
1413 		struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1414 
1415 		ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1416 			? add_cpu_dev_symlink(policy, cpu) : 0;
1417 	}
1418 
1419 	return ret;
1420 }
1421 
cpufreq_offline_prepare(unsigned int cpu)1422 static void cpufreq_offline_prepare(unsigned int cpu)
1423 {
1424 	struct cpufreq_policy *policy;
1425 
1426 	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1427 
1428 	policy = cpufreq_cpu_get_raw(cpu);
1429 	if (!policy) {
1430 		pr_debug("%s: No cpu_data found\n", __func__);
1431 		return;
1432 	}
1433 
1434 	if (has_target()) {
1435 		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1436 		if (ret)
1437 			pr_err("%s: Failed to stop governor\n", __func__);
1438 	}
1439 
1440 	down_write(&policy->rwsem);
1441 	cpumask_clear_cpu(cpu, policy->cpus);
1442 
1443 	if (policy_is_inactive(policy)) {
1444 		if (has_target())
1445 			strncpy(policy->last_governor, policy->governor->name,
1446 				CPUFREQ_NAME_LEN);
1447 		else
1448 			policy->last_policy = policy->policy;
1449 	} else if (cpu == policy->cpu) {
1450 		/* Nominate new CPU */
1451 		policy->cpu = cpumask_any(policy->cpus);
1452 	}
1453 	up_write(&policy->rwsem);
1454 
1455 	/* Start governor again for active policy */
1456 	if (!policy_is_inactive(policy)) {
1457 		if (has_target()) {
1458 			int ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1459 			if (!ret)
1460 				ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1461 
1462 			if (ret)
1463 				pr_err("%s: Failed to start governor\n", __func__);
1464 		}
1465 	} else if (cpufreq_driver->stop_cpu) {
1466 		cpufreq_driver->stop_cpu(policy);
1467 	}
1468 }
1469 
cpufreq_offline_finish(unsigned int cpu)1470 static void cpufreq_offline_finish(unsigned int cpu)
1471 {
1472 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1473 
1474 	if (!policy) {
1475 		pr_debug("%s: No cpu_data found\n", __func__);
1476 		return;
1477 	}
1478 
1479 	/* Only proceed for inactive policies */
1480 	if (!policy_is_inactive(policy))
1481 		return;
1482 
1483 	/* If cpu is last user of policy, free policy */
1484 	if (has_target()) {
1485 		int ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1486 		if (ret)
1487 			pr_err("%s: Failed to exit governor\n", __func__);
1488 	}
1489 
1490 	/*
1491 	 * Perform the ->exit() even during light-weight tear-down,
1492 	 * since this is a core component, and is essential for the
1493 	 * subsequent light-weight ->init() to succeed.
1494 	 */
1495 	if (cpufreq_driver->exit) {
1496 		cpufreq_driver->exit(policy);
1497 		policy->freq_table = NULL;
1498 	}
1499 }
1500 
1501 /**
1502  * cpufreq_remove_dev - remove a CPU device
1503  *
1504  * Removes the cpufreq interface for a CPU device.
1505  */
cpufreq_remove_dev(struct device * dev,struct subsys_interface * sif)1506 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1507 {
1508 	unsigned int cpu = dev->id;
1509 	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1510 
1511 	if (!policy)
1512 		return;
1513 
1514 	if (cpu_online(cpu)) {
1515 		cpufreq_offline_prepare(cpu);
1516 		cpufreq_offline_finish(cpu);
1517 	}
1518 
1519 	cpumask_clear_cpu(cpu, policy->real_cpus);
1520 	remove_cpu_dev_symlink(policy, cpu);
1521 
1522 	if (cpumask_empty(policy->real_cpus))
1523 		cpufreq_policy_free(policy, true);
1524 }
1525 
handle_update(struct work_struct * work)1526 static void handle_update(struct work_struct *work)
1527 {
1528 	struct cpufreq_policy *policy =
1529 		container_of(work, struct cpufreq_policy, update);
1530 	unsigned int cpu = policy->cpu;
1531 	pr_debug("handle_update for cpu %u called\n", cpu);
1532 	cpufreq_update_policy(cpu);
1533 }
1534 
1535 /**
1536  *	cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1537  *	in deep trouble.
1538  *	@policy: policy managing CPUs
1539  *	@new_freq: CPU frequency the CPU actually runs at
1540  *
1541  *	We adjust to current frequency first, and need to clean up later.
1542  *	So either call to cpufreq_update_policy() or schedule handle_update()).
1543  */
cpufreq_out_of_sync(struct cpufreq_policy * policy,unsigned int new_freq)1544 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1545 				unsigned int new_freq)
1546 {
1547 	struct cpufreq_freqs freqs;
1548 
1549 	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1550 		 policy->cur, new_freq);
1551 
1552 	freqs.old = policy->cur;
1553 	freqs.new = new_freq;
1554 
1555 	cpufreq_freq_transition_begin(policy, &freqs);
1556 	cpufreq_freq_transition_end(policy, &freqs, 0);
1557 }
1558 
1559 /**
1560  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1561  * @cpu: CPU number
1562  *
1563  * This is the last known freq, without actually getting it from the driver.
1564  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1565  */
cpufreq_quick_get(unsigned int cpu)1566 unsigned int cpufreq_quick_get(unsigned int cpu)
1567 {
1568 	struct cpufreq_policy *policy;
1569 	unsigned int ret_freq = 0;
1570 
1571 	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1572 		return cpufreq_driver->get(cpu);
1573 
1574 	policy = cpufreq_cpu_get(cpu);
1575 	if (policy) {
1576 		ret_freq = policy->cur;
1577 		cpufreq_cpu_put(policy);
1578 	}
1579 
1580 	return ret_freq;
1581 }
1582 EXPORT_SYMBOL(cpufreq_quick_get);
1583 
1584 /**
1585  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1586  * @cpu: CPU number
1587  *
1588  * Just return the max possible frequency for a given CPU.
1589  */
cpufreq_quick_get_max(unsigned int cpu)1590 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1591 {
1592 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1593 	unsigned int ret_freq = 0;
1594 
1595 	if (policy) {
1596 		ret_freq = policy->max;
1597 		cpufreq_cpu_put(policy);
1598 	}
1599 
1600 	return ret_freq;
1601 }
1602 EXPORT_SYMBOL(cpufreq_quick_get_max);
1603 
__cpufreq_get(struct cpufreq_policy * policy)1604 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1605 {
1606 	unsigned int ret_freq = 0;
1607 
1608 	if (!cpufreq_driver->get)
1609 		return ret_freq;
1610 
1611 	ret_freq = cpufreq_driver->get(policy->cpu);
1612 
1613 	/* Updating inactive policies is invalid, so avoid doing that. */
1614 	if (unlikely(policy_is_inactive(policy)))
1615 		return ret_freq;
1616 
1617 	if (ret_freq && policy->cur &&
1618 		!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1619 		/* verify no discrepancy between actual and
1620 					saved value exists */
1621 		if (unlikely(ret_freq != policy->cur)) {
1622 			cpufreq_out_of_sync(policy, ret_freq);
1623 			schedule_work(&policy->update);
1624 		}
1625 	}
1626 
1627 	return ret_freq;
1628 }
1629 
1630 /**
1631  * cpufreq_get - get the current CPU frequency (in kHz)
1632  * @cpu: CPU number
1633  *
1634  * Get the CPU current (static) CPU frequency
1635  */
cpufreq_get(unsigned int cpu)1636 unsigned int cpufreq_get(unsigned int cpu)
1637 {
1638 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1639 	unsigned int ret_freq = 0;
1640 
1641 	if (policy) {
1642 		down_read(&policy->rwsem);
1643 		ret_freq = __cpufreq_get(policy);
1644 		up_read(&policy->rwsem);
1645 
1646 		cpufreq_cpu_put(policy);
1647 	}
1648 
1649 	return ret_freq;
1650 }
1651 EXPORT_SYMBOL(cpufreq_get);
1652 
1653 static struct subsys_interface cpufreq_interface = {
1654 	.name		= "cpufreq",
1655 	.subsys		= &cpu_subsys,
1656 	.add_dev	= cpufreq_add_dev,
1657 	.remove_dev	= cpufreq_remove_dev,
1658 };
1659 
1660 /*
1661  * In case platform wants some specific frequency to be configured
1662  * during suspend..
1663  */
cpufreq_generic_suspend(struct cpufreq_policy * policy)1664 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1665 {
1666 	int ret;
1667 
1668 	if (!policy->suspend_freq) {
1669 		pr_debug("%s: suspend_freq not defined\n", __func__);
1670 		return 0;
1671 	}
1672 
1673 	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1674 			policy->suspend_freq);
1675 
1676 	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1677 			CPUFREQ_RELATION_H);
1678 	if (ret)
1679 		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1680 				__func__, policy->suspend_freq, ret);
1681 
1682 	return ret;
1683 }
1684 EXPORT_SYMBOL(cpufreq_generic_suspend);
1685 
1686 /**
1687  * cpufreq_suspend() - Suspend CPUFreq governors
1688  *
1689  * Called during system wide Suspend/Hibernate cycles for suspending governors
1690  * as some platforms can't change frequency after this point in suspend cycle.
1691  * Because some of the devices (like: i2c, regulators, etc) they use for
1692  * changing frequency are suspended quickly after this point.
1693  */
cpufreq_suspend(void)1694 void cpufreq_suspend(void)
1695 {
1696 	struct cpufreq_policy *policy;
1697 
1698 	if (!cpufreq_driver)
1699 		return;
1700 
1701 	if (!has_target())
1702 		goto suspend;
1703 
1704 	pr_debug("%s: Suspending Governors\n", __func__);
1705 
1706 	for_each_active_policy(policy) {
1707 		if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1708 			pr_err("%s: Failed to stop governor for policy: %p\n",
1709 				__func__, policy);
1710 		else if (cpufreq_driver->suspend
1711 		    && cpufreq_driver->suspend(policy))
1712 			pr_err("%s: Failed to suspend driver: %p\n", __func__,
1713 				policy);
1714 	}
1715 
1716 suspend:
1717 	cpufreq_suspended = true;
1718 }
1719 
1720 /**
1721  * cpufreq_resume() - Resume CPUFreq governors
1722  *
1723  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1724  * are suspended with cpufreq_suspend().
1725  */
cpufreq_resume(void)1726 void cpufreq_resume(void)
1727 {
1728 	struct cpufreq_policy *policy;
1729 
1730 	if (!cpufreq_driver)
1731 		return;
1732 
1733 	if (unlikely(!cpufreq_suspended))
1734 		return;
1735 
1736 	cpufreq_suspended = false;
1737 
1738 	if (!has_target())
1739 		return;
1740 
1741 	pr_debug("%s: Resuming Governors\n", __func__);
1742 
1743 	for_each_active_policy(policy) {
1744 		if (cpufreq_driver->resume && cpufreq_driver->resume(policy))
1745 			pr_err("%s: Failed to resume driver: %p\n", __func__,
1746 				policy);
1747 		else if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1748 		    || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1749 			pr_err("%s: Failed to start governor for policy: %p\n",
1750 				__func__, policy);
1751 	}
1752 
1753 	/*
1754 	 * schedule call cpufreq_update_policy() for first-online CPU, as that
1755 	 * wouldn't be hotplugged-out on suspend. It will verify that the
1756 	 * current freq is in sync with what we believe it to be.
1757 	 */
1758 	policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask));
1759 	if (WARN_ON(!policy))
1760 		return;
1761 
1762 	schedule_work(&policy->update);
1763 }
1764 
1765 /**
1766  *	cpufreq_get_current_driver - return current driver's name
1767  *
1768  *	Return the name string of the currently loaded cpufreq driver
1769  *	or NULL, if none.
1770  */
cpufreq_get_current_driver(void)1771 const char *cpufreq_get_current_driver(void)
1772 {
1773 	if (cpufreq_driver)
1774 		return cpufreq_driver->name;
1775 
1776 	return NULL;
1777 }
1778 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1779 
1780 /**
1781  *	cpufreq_get_driver_data - return current driver data
1782  *
1783  *	Return the private data of the currently loaded cpufreq
1784  *	driver, or NULL if no cpufreq driver is loaded.
1785  */
cpufreq_get_driver_data(void)1786 void *cpufreq_get_driver_data(void)
1787 {
1788 	if (cpufreq_driver)
1789 		return cpufreq_driver->driver_data;
1790 
1791 	return NULL;
1792 }
1793 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1794 
1795 /*********************************************************************
1796  *                     NOTIFIER LISTS INTERFACE                      *
1797  *********************************************************************/
1798 
1799 /**
1800  *	cpufreq_register_notifier - register a driver with cpufreq
1801  *	@nb: notifier function to register
1802  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1803  *
1804  *	Add a driver to one of two lists: either a list of drivers that
1805  *      are notified about clock rate changes (once before and once after
1806  *      the transition), or a list of drivers that are notified about
1807  *      changes in cpufreq policy.
1808  *
1809  *	This function may sleep, and has the same return conditions as
1810  *	blocking_notifier_chain_register.
1811  */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)1812 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1813 {
1814 	int ret;
1815 
1816 	if (cpufreq_disabled())
1817 		return -EINVAL;
1818 
1819 	WARN_ON(!init_cpufreq_transition_notifier_list_called);
1820 
1821 	switch (list) {
1822 	case CPUFREQ_TRANSITION_NOTIFIER:
1823 		ret = srcu_notifier_chain_register(
1824 				&cpufreq_transition_notifier_list, nb);
1825 		break;
1826 	case CPUFREQ_POLICY_NOTIFIER:
1827 		ret = blocking_notifier_chain_register(
1828 				&cpufreq_policy_notifier_list, nb);
1829 		break;
1830 	default:
1831 		ret = -EINVAL;
1832 	}
1833 
1834 	return ret;
1835 }
1836 EXPORT_SYMBOL(cpufreq_register_notifier);
1837 
1838 /**
1839  *	cpufreq_unregister_notifier - unregister a driver with cpufreq
1840  *	@nb: notifier block to be unregistered
1841  *	@list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1842  *
1843  *	Remove a driver from the CPU frequency notifier list.
1844  *
1845  *	This function may sleep, and has the same return conditions as
1846  *	blocking_notifier_chain_unregister.
1847  */
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)1848 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1849 {
1850 	int ret;
1851 
1852 	if (cpufreq_disabled())
1853 		return -EINVAL;
1854 
1855 	switch (list) {
1856 	case CPUFREQ_TRANSITION_NOTIFIER:
1857 		ret = srcu_notifier_chain_unregister(
1858 				&cpufreq_transition_notifier_list, nb);
1859 		break;
1860 	case CPUFREQ_POLICY_NOTIFIER:
1861 		ret = blocking_notifier_chain_unregister(
1862 				&cpufreq_policy_notifier_list, nb);
1863 		break;
1864 	default:
1865 		ret = -EINVAL;
1866 	}
1867 
1868 	return ret;
1869 }
1870 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1871 
1872 
1873 /*********************************************************************
1874  *                              GOVERNORS                            *
1875  *********************************************************************/
1876 
1877 /* Must set freqs->new to intermediate frequency */
__target_intermediate(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int index)1878 static int __target_intermediate(struct cpufreq_policy *policy,
1879 				 struct cpufreq_freqs *freqs, int index)
1880 {
1881 	int ret;
1882 
1883 	freqs->new = cpufreq_driver->get_intermediate(policy, index);
1884 
1885 	/* We don't need to switch to intermediate freq */
1886 	if (!freqs->new)
1887 		return 0;
1888 
1889 	pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1890 		 __func__, policy->cpu, freqs->old, freqs->new);
1891 
1892 	cpufreq_freq_transition_begin(policy, freqs);
1893 	ret = cpufreq_driver->target_intermediate(policy, index);
1894 	cpufreq_freq_transition_end(policy, freqs, ret);
1895 
1896 	if (ret)
1897 		pr_err("%s: Failed to change to intermediate frequency: %d\n",
1898 		       __func__, ret);
1899 
1900 	return ret;
1901 }
1902 
__target_index(struct cpufreq_policy * policy,struct cpufreq_frequency_table * freq_table,int index)1903 static int __target_index(struct cpufreq_policy *policy,
1904 			  struct cpufreq_frequency_table *freq_table, int index)
1905 {
1906 	struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1907 	unsigned int intermediate_freq = 0;
1908 	int retval = -EINVAL;
1909 	bool notify;
1910 
1911 	notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1912 	if (notify) {
1913 		/* Handle switching to intermediate frequency */
1914 		if (cpufreq_driver->get_intermediate) {
1915 			retval = __target_intermediate(policy, &freqs, index);
1916 			if (retval)
1917 				return retval;
1918 
1919 			intermediate_freq = freqs.new;
1920 			/* Set old freq to intermediate */
1921 			if (intermediate_freq)
1922 				freqs.old = freqs.new;
1923 		}
1924 
1925 		freqs.new = freq_table[index].frequency;
1926 		pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1927 			 __func__, policy->cpu, freqs.old, freqs.new);
1928 
1929 		cpufreq_freq_transition_begin(policy, &freqs);
1930 	}
1931 
1932 	retval = cpufreq_driver->target_index(policy, index);
1933 	if (retval)
1934 		pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1935 		       retval);
1936 
1937 	if (notify) {
1938 		cpufreq_freq_transition_end(policy, &freqs, retval);
1939 
1940 		/*
1941 		 * Failed after setting to intermediate freq? Driver should have
1942 		 * reverted back to initial frequency and so should we. Check
1943 		 * here for intermediate_freq instead of get_intermediate, in
1944 		 * case we haven't switched to intermediate freq at all.
1945 		 */
1946 		if (unlikely(retval && intermediate_freq)) {
1947 			freqs.old = intermediate_freq;
1948 			freqs.new = policy->restore_freq;
1949 			cpufreq_freq_transition_begin(policy, &freqs);
1950 			cpufreq_freq_transition_end(policy, &freqs, 0);
1951 		}
1952 	}
1953 
1954 	return retval;
1955 }
1956 
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)1957 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1958 			    unsigned int target_freq,
1959 			    unsigned int relation)
1960 {
1961 	unsigned int old_target_freq = target_freq;
1962 	int retval = -EINVAL;
1963 
1964 	if (cpufreq_disabled())
1965 		return -ENODEV;
1966 
1967 	/* Make sure that target_freq is within supported range */
1968 	if (target_freq > policy->max)
1969 		target_freq = policy->max;
1970 	if (target_freq < policy->min)
1971 		target_freq = policy->min;
1972 
1973 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1974 		 policy->cpu, target_freq, relation, old_target_freq);
1975 
1976 	/*
1977 	 * This might look like a redundant call as we are checking it again
1978 	 * after finding index. But it is left intentionally for cases where
1979 	 * exactly same freq is called again and so we can save on few function
1980 	 * calls.
1981 	 */
1982 	if (target_freq == policy->cur)
1983 		return 0;
1984 
1985 	/* Save last value to restore later on errors */
1986 	policy->restore_freq = policy->cur;
1987 
1988 	if (cpufreq_driver->target)
1989 		retval = cpufreq_driver->target(policy, target_freq, relation);
1990 	else if (cpufreq_driver->target_index) {
1991 		struct cpufreq_frequency_table *freq_table;
1992 		int index;
1993 
1994 		freq_table = cpufreq_frequency_get_table(policy->cpu);
1995 		if (unlikely(!freq_table)) {
1996 			pr_err("%s: Unable to find freq_table\n", __func__);
1997 			goto out;
1998 		}
1999 
2000 		retval = cpufreq_frequency_table_target(policy, freq_table,
2001 				target_freq, relation, &index);
2002 		if (unlikely(retval)) {
2003 			pr_err("%s: Unable to find matching freq\n", __func__);
2004 			goto out;
2005 		}
2006 
2007 		if (freq_table[index].frequency == policy->cur) {
2008 			retval = 0;
2009 			goto out;
2010 		}
2011 
2012 		retval = __target_index(policy, freq_table, index);
2013 	}
2014 
2015 out:
2016 	return retval;
2017 }
2018 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2019 
cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)2020 int cpufreq_driver_target(struct cpufreq_policy *policy,
2021 			  unsigned int target_freq,
2022 			  unsigned int relation)
2023 {
2024 	int ret = -EINVAL;
2025 
2026 	down_write(&policy->rwsem);
2027 
2028 	ret = __cpufreq_driver_target(policy, target_freq, relation);
2029 
2030 	up_write(&policy->rwsem);
2031 
2032 	return ret;
2033 }
2034 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2035 
__cpufreq_governor(struct cpufreq_policy * policy,unsigned int event)2036 static int __cpufreq_governor(struct cpufreq_policy *policy,
2037 					unsigned int event)
2038 {
2039 	int ret;
2040 
2041 	/* Only must be defined when default governor is known to have latency
2042 	   restrictions, like e.g. conservative or ondemand.
2043 	   That this is the case is already ensured in Kconfig
2044 	*/
2045 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
2046 	struct cpufreq_governor *gov = &cpufreq_gov_performance;
2047 #else
2048 	struct cpufreq_governor *gov = NULL;
2049 #endif
2050 
2051 	/* Don't start any governor operations if we are entering suspend */
2052 	if (cpufreq_suspended)
2053 		return 0;
2054 	/*
2055 	 * Governor might not be initiated here if ACPI _PPC changed
2056 	 * notification happened, so check it.
2057 	 */
2058 	if (!policy->governor)
2059 		return -EINVAL;
2060 
2061 	if (policy->governor->max_transition_latency &&
2062 	    policy->cpuinfo.transition_latency >
2063 	    policy->governor->max_transition_latency) {
2064 		if (!gov)
2065 			return -EINVAL;
2066 		else {
2067 			pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
2068 				policy->governor->name, gov->name);
2069 			policy->governor = gov;
2070 		}
2071 	}
2072 
2073 	if (event == CPUFREQ_GOV_POLICY_INIT)
2074 		if (!try_module_get(policy->governor->owner))
2075 			return -EINVAL;
2076 
2077 	pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
2078 
2079 	mutex_lock(&cpufreq_governor_lock);
2080 	if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
2081 	    || (!policy->governor_enabled
2082 	    && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
2083 		mutex_unlock(&cpufreq_governor_lock);
2084 		return -EBUSY;
2085 	}
2086 
2087 	if (event == CPUFREQ_GOV_STOP)
2088 		policy->governor_enabled = false;
2089 	else if (event == CPUFREQ_GOV_START)
2090 		policy->governor_enabled = true;
2091 
2092 	mutex_unlock(&cpufreq_governor_lock);
2093 
2094 	ret = policy->governor->governor(policy, event);
2095 
2096 	if (!ret) {
2097 		if (event == CPUFREQ_GOV_POLICY_INIT)
2098 			policy->governor->initialized++;
2099 		else if (event == CPUFREQ_GOV_POLICY_EXIT)
2100 			policy->governor->initialized--;
2101 	} else {
2102 		/* Restore original values */
2103 		mutex_lock(&cpufreq_governor_lock);
2104 		if (event == CPUFREQ_GOV_STOP)
2105 			policy->governor_enabled = true;
2106 		else if (event == CPUFREQ_GOV_START)
2107 			policy->governor_enabled = false;
2108 		mutex_unlock(&cpufreq_governor_lock);
2109 	}
2110 
2111 	if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
2112 			((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
2113 		module_put(policy->governor->owner);
2114 
2115 	return ret;
2116 }
2117 
cpufreq_register_governor(struct cpufreq_governor * governor)2118 int cpufreq_register_governor(struct cpufreq_governor *governor)
2119 {
2120 	int err;
2121 
2122 	if (!governor)
2123 		return -EINVAL;
2124 
2125 	if (cpufreq_disabled())
2126 		return -ENODEV;
2127 
2128 	mutex_lock(&cpufreq_governor_mutex);
2129 
2130 	governor->initialized = 0;
2131 	err = -EBUSY;
2132 	if (!find_governor(governor->name)) {
2133 		err = 0;
2134 		list_add(&governor->governor_list, &cpufreq_governor_list);
2135 	}
2136 
2137 	mutex_unlock(&cpufreq_governor_mutex);
2138 	return err;
2139 }
2140 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2141 
cpufreq_unregister_governor(struct cpufreq_governor * governor)2142 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2143 {
2144 	struct cpufreq_policy *policy;
2145 	unsigned long flags;
2146 
2147 	if (!governor)
2148 		return;
2149 
2150 	if (cpufreq_disabled())
2151 		return;
2152 
2153 	/* clear last_governor for all inactive policies */
2154 	read_lock_irqsave(&cpufreq_driver_lock, flags);
2155 	for_each_inactive_policy(policy) {
2156 		if (!strcmp(policy->last_governor, governor->name)) {
2157 			policy->governor = NULL;
2158 			strcpy(policy->last_governor, "\0");
2159 		}
2160 	}
2161 	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2162 
2163 	mutex_lock(&cpufreq_governor_mutex);
2164 	list_del(&governor->governor_list);
2165 	mutex_unlock(&cpufreq_governor_mutex);
2166 	return;
2167 }
2168 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2169 
2170 
2171 /*********************************************************************
2172  *                          POLICY INTERFACE                         *
2173  *********************************************************************/
2174 
2175 /**
2176  * cpufreq_get_policy - get the current cpufreq_policy
2177  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2178  *	is written
2179  *
2180  * Reads the current cpufreq policy.
2181  */
cpufreq_get_policy(struct cpufreq_policy * policy,unsigned int cpu)2182 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2183 {
2184 	struct cpufreq_policy *cpu_policy;
2185 	if (!policy)
2186 		return -EINVAL;
2187 
2188 	cpu_policy = cpufreq_cpu_get(cpu);
2189 	if (!cpu_policy)
2190 		return -EINVAL;
2191 
2192 	memcpy(policy, cpu_policy, sizeof(*policy));
2193 
2194 	cpufreq_cpu_put(cpu_policy);
2195 	return 0;
2196 }
2197 EXPORT_SYMBOL(cpufreq_get_policy);
2198 
2199 /*
2200  * policy : current policy.
2201  * new_policy: policy to be set.
2202  */
cpufreq_set_policy(struct cpufreq_policy * policy,struct cpufreq_policy * new_policy)2203 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2204 				struct cpufreq_policy *new_policy)
2205 {
2206 	struct cpufreq_governor *old_gov;
2207 	int ret;
2208 
2209 	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2210 		 new_policy->cpu, new_policy->min, new_policy->max);
2211 
2212 	memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2213 
2214 	/*
2215 	* This check works well when we store new min/max freq attributes,
2216 	* because new_policy is a copy of policy with one field updated.
2217 	*/
2218 	if (new_policy->min > new_policy->max)
2219 		return -EINVAL;
2220 
2221 	/* verify the cpu speed can be set within this limit */
2222 	ret = cpufreq_driver->verify(new_policy);
2223 	if (ret)
2224 		return ret;
2225 
2226 	/* adjust if necessary - all reasons */
2227 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2228 			CPUFREQ_ADJUST, new_policy);
2229 
2230 	/*
2231 	 * verify the cpu speed can be set within this limit, which might be
2232 	 * different to the first one
2233 	 */
2234 	ret = cpufreq_driver->verify(new_policy);
2235 	if (ret)
2236 		return ret;
2237 
2238 	/* notification of the new policy */
2239 	blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2240 			CPUFREQ_NOTIFY, new_policy);
2241 
2242 	scale_freq_capacity(new_policy, NULL);
2243 
2244 	policy->min = new_policy->min;
2245 	policy->max = new_policy->max;
2246 	trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
2247 
2248 	pr_debug("new min and max freqs are %u - %u kHz\n",
2249 		 policy->min, policy->max);
2250 
2251 	if (cpufreq_driver->setpolicy) {
2252 		policy->policy = new_policy->policy;
2253 		pr_debug("setting range\n");
2254 		return cpufreq_driver->setpolicy(new_policy);
2255 	}
2256 
2257 	if (new_policy->governor == policy->governor)
2258 		goto out;
2259 
2260 	pr_debug("governor switch\n");
2261 
2262 	/* save old, working values */
2263 	old_gov = policy->governor;
2264 	/* end old governor */
2265 	if (old_gov) {
2266 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2267 		if (ret) {
2268 			/* This can happen due to race with other operations */
2269 			pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
2270 				 __func__, old_gov->name, ret);
2271 			return ret;
2272 		}
2273 
2274 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2275 		if (ret) {
2276 			pr_err("%s: Failed to Exit Governor: %s (%d)\n",
2277 			       __func__, old_gov->name, ret);
2278 			return ret;
2279 		}
2280 	}
2281 
2282 	/* start new governor */
2283 	policy->governor = new_policy->governor;
2284 	ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2285 	if (!ret) {
2286 		ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
2287 		if (!ret)
2288 			goto out;
2289 
2290 		__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2291 	}
2292 
2293 	/* new governor failed, so re-start old one */
2294 	pr_debug("starting governor %s failed\n", policy->governor->name);
2295 	if (old_gov) {
2296 		policy->governor = old_gov;
2297 		if (__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT))
2298 			policy->governor = NULL;
2299 		else
2300 			__cpufreq_governor(policy, CPUFREQ_GOV_START);
2301 	}
2302 
2303 	return ret;
2304 
2305  out:
2306 	pr_debug("governor: change or update limits\n");
2307 	return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2308 }
2309 
2310 /**
2311  *	cpufreq_update_policy - re-evaluate an existing cpufreq policy
2312  *	@cpu: CPU which shall be re-evaluated
2313  *
2314  *	Useful for policy notifiers which have different necessities
2315  *	at different times.
2316  */
cpufreq_update_policy(unsigned int cpu)2317 int cpufreq_update_policy(unsigned int cpu)
2318 {
2319 	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2320 	struct cpufreq_policy new_policy;
2321 	int ret;
2322 
2323 	if (!policy)
2324 		return -ENODEV;
2325 
2326 	down_write(&policy->rwsem);
2327 
2328 	pr_debug("updating policy for CPU %u\n", cpu);
2329 	memcpy(&new_policy, policy, sizeof(*policy));
2330 	new_policy.min = policy->user_policy.min;
2331 	new_policy.max = policy->user_policy.max;
2332 
2333 	/*
2334 	 * BIOS might change freq behind our back
2335 	 * -> ask driver for current freq and notify governors about a change
2336 	 */
2337 	if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2338 		new_policy.cur = cpufreq_driver->get(cpu);
2339 		if (WARN_ON(!new_policy.cur)) {
2340 			ret = -EIO;
2341 			goto unlock;
2342 		}
2343 
2344 		if (!policy->cur) {
2345 			pr_debug("Driver did not initialize current freq\n");
2346 			policy->cur = new_policy.cur;
2347 		} else {
2348 			if (policy->cur != new_policy.cur && has_target())
2349 				cpufreq_out_of_sync(policy, new_policy.cur);
2350 		}
2351 	}
2352 
2353 	ret = cpufreq_set_policy(policy, &new_policy);
2354 
2355 unlock:
2356 	up_write(&policy->rwsem);
2357 
2358 	cpufreq_cpu_put(policy);
2359 	return ret;
2360 }
2361 EXPORT_SYMBOL(cpufreq_update_policy);
2362 
cpufreq_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)2363 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2364 					unsigned long action, void *hcpu)
2365 {
2366 	unsigned int cpu = (unsigned long)hcpu;
2367 
2368 	switch (action & ~CPU_TASKS_FROZEN) {
2369 	case CPU_ONLINE:
2370 		cpufreq_online(cpu);
2371 		break;
2372 
2373 	case CPU_DOWN_PREPARE:
2374 		cpufreq_offline_prepare(cpu);
2375 		break;
2376 
2377 	case CPU_POST_DEAD:
2378 		cpufreq_offline_finish(cpu);
2379 		break;
2380 
2381 	case CPU_DOWN_FAILED:
2382 		cpufreq_online(cpu);
2383 		break;
2384 	}
2385 	return NOTIFY_OK;
2386 }
2387 
2388 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2389 	.notifier_call = cpufreq_cpu_callback,
2390 };
2391 
2392 /*********************************************************************
2393  *               BOOST						     *
2394  *********************************************************************/
cpufreq_boost_set_sw(int state)2395 static int cpufreq_boost_set_sw(int state)
2396 {
2397 	struct cpufreq_frequency_table *freq_table;
2398 	struct cpufreq_policy *policy;
2399 	int ret = -EINVAL;
2400 
2401 	for_each_active_policy(policy) {
2402 		freq_table = cpufreq_frequency_get_table(policy->cpu);
2403 		if (freq_table) {
2404 			ret = cpufreq_frequency_table_cpuinfo(policy,
2405 							freq_table);
2406 			if (ret) {
2407 				pr_err("%s: Policy frequency update failed\n",
2408 				       __func__);
2409 				break;
2410 			}
2411 			policy->user_policy.max = policy->max;
2412 			__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2413 		}
2414 	}
2415 
2416 	return ret;
2417 }
2418 
cpufreq_boost_trigger_state(int state)2419 int cpufreq_boost_trigger_state(int state)
2420 {
2421 	unsigned long flags;
2422 	int ret = 0;
2423 
2424 	if (cpufreq_driver->boost_enabled == state)
2425 		return 0;
2426 
2427 	write_lock_irqsave(&cpufreq_driver_lock, flags);
2428 	cpufreq_driver->boost_enabled = state;
2429 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2430 
2431 	ret = cpufreq_driver->set_boost(state);
2432 	if (ret) {
2433 		write_lock_irqsave(&cpufreq_driver_lock, flags);
2434 		cpufreq_driver->boost_enabled = !state;
2435 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2436 
2437 		pr_err("%s: Cannot %s BOOST\n",
2438 		       __func__, state ? "enable" : "disable");
2439 	}
2440 
2441 	return ret;
2442 }
2443 
cpufreq_boost_supported(void)2444 int cpufreq_boost_supported(void)
2445 {
2446 	if (likely(cpufreq_driver))
2447 		return cpufreq_driver->boost_supported;
2448 
2449 	return 0;
2450 }
2451 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2452 
create_boost_sysfs_file(void)2453 static int create_boost_sysfs_file(void)
2454 {
2455 	int ret;
2456 
2457 	if (!cpufreq_boost_supported())
2458 		return 0;
2459 
2460 	/*
2461 	 * Check if driver provides function to enable boost -
2462 	 * if not, use cpufreq_boost_set_sw as default
2463 	 */
2464 	if (!cpufreq_driver->set_boost)
2465 		cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2466 
2467 	ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2468 	if (ret)
2469 		pr_err("%s: cannot register global BOOST sysfs file\n",
2470 		       __func__);
2471 
2472 	return ret;
2473 }
2474 
remove_boost_sysfs_file(void)2475 static void remove_boost_sysfs_file(void)
2476 {
2477 	if (cpufreq_boost_supported())
2478 		sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2479 }
2480 
cpufreq_enable_boost_support(void)2481 int cpufreq_enable_boost_support(void)
2482 {
2483 	if (!cpufreq_driver)
2484 		return -EINVAL;
2485 
2486 	if (cpufreq_boost_supported())
2487 		return 0;
2488 
2489 	cpufreq_driver->boost_supported = true;
2490 
2491 	/* This will get removed on driver unregister */
2492 	return create_boost_sysfs_file();
2493 }
2494 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2495 
cpufreq_boost_enabled(void)2496 int cpufreq_boost_enabled(void)
2497 {
2498 	return cpufreq_driver->boost_enabled;
2499 }
2500 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2501 
2502 /*********************************************************************
2503  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2504  *********************************************************************/
2505 
2506 /**
2507  * cpufreq_register_driver - register a CPU Frequency driver
2508  * @driver_data: A struct cpufreq_driver containing the values#
2509  * submitted by the CPU Frequency driver.
2510  *
2511  * Registers a CPU Frequency driver to this core code. This code
2512  * returns zero on success, -EBUSY when another driver got here first
2513  * (and isn't unregistered in the meantime).
2514  *
2515  */
cpufreq_register_driver(struct cpufreq_driver * driver_data)2516 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2517 {
2518 	unsigned long flags;
2519 	int ret;
2520 
2521 	if (cpufreq_disabled())
2522 		return -ENODEV;
2523 
2524 	/*
2525 	 * The cpufreq core depends heavily on the availability of device
2526 	 * structure, make sure they are available before proceeding further.
2527 	 */
2528 	if (!get_cpu_device(0))
2529 		return -EPROBE_DEFER;
2530 
2531 	if (!driver_data || !driver_data->verify || !driver_data->init ||
2532 	    !(driver_data->setpolicy || driver_data->target_index ||
2533 		    driver_data->target) ||
2534 	     (driver_data->setpolicy && (driver_data->target_index ||
2535 		    driver_data->target)) ||
2536 	     (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2537 		return -EINVAL;
2538 
2539 	pr_debug("trying to register driver %s\n", driver_data->name);
2540 
2541 	/* Protect against concurrent CPU online/offline. */
2542 	get_online_cpus();
2543 
2544 	write_lock_irqsave(&cpufreq_driver_lock, flags);
2545 	if (cpufreq_driver) {
2546 		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2547 		ret = -EEXIST;
2548 		goto out;
2549 	}
2550 	cpufreq_driver = driver_data;
2551 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2552 
2553 	if (driver_data->setpolicy)
2554 		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2555 
2556 	ret = create_boost_sysfs_file();
2557 	if (ret)
2558 		goto err_null_driver;
2559 
2560 	ret = subsys_interface_register(&cpufreq_interface);
2561 	if (ret)
2562 		goto err_boost_unreg;
2563 
2564 	if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2565 	    list_empty(&cpufreq_policy_list)) {
2566 		/* if all ->init() calls failed, unregister */
2567 		ret = -ENODEV;
2568 		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2569 			 driver_data->name);
2570 		goto err_if_unreg;
2571 	}
2572 
2573 	register_hotcpu_notifier(&cpufreq_cpu_notifier);
2574 	pr_debug("driver %s up and running\n", driver_data->name);
2575 
2576 out:
2577 	put_online_cpus();
2578 	return ret;
2579 
2580 err_if_unreg:
2581 	subsys_interface_unregister(&cpufreq_interface);
2582 err_boost_unreg:
2583 	remove_boost_sysfs_file();
2584 err_null_driver:
2585 	write_lock_irqsave(&cpufreq_driver_lock, flags);
2586 	cpufreq_driver = NULL;
2587 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2588 	goto out;
2589 }
2590 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2591 
2592 /**
2593  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2594  *
2595  * Unregister the current CPUFreq driver. Only call this if you have
2596  * the right to do so, i.e. if you have succeeded in initialising before!
2597  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2598  * currently not initialised.
2599  */
cpufreq_unregister_driver(struct cpufreq_driver * driver)2600 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2601 {
2602 	unsigned long flags;
2603 
2604 	if (!cpufreq_driver || (driver != cpufreq_driver))
2605 		return -EINVAL;
2606 
2607 	pr_debug("unregistering driver %s\n", driver->name);
2608 
2609 	/* Protect against concurrent cpu hotplug */
2610 	get_online_cpus();
2611 	subsys_interface_unregister(&cpufreq_interface);
2612 	remove_boost_sysfs_file();
2613 	unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2614 
2615 	write_lock_irqsave(&cpufreq_driver_lock, flags);
2616 
2617 	cpufreq_driver = NULL;
2618 
2619 	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2620 	put_online_cpus();
2621 
2622 	return 0;
2623 }
2624 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2625 
2626 struct kobject *cpufreq_global_kobject;
2627 EXPORT_SYMBOL(cpufreq_global_kobject);
2628 
cpufreq_core_init(void)2629 static int __init cpufreq_core_init(void)
2630 {
2631 	if (cpufreq_disabled())
2632 		return -ENODEV;
2633 
2634 	cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2635 	BUG_ON(!cpufreq_global_kobject);
2636 
2637 	return 0;
2638 }
2639 core_initcall(cpufreq_core_init);
2640