• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18 
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/sched/rt.h>
27 #include <linux/tick.h>
28 #include <linux/time.h>
29 #include <linux/timer.h>
30 #include <linux/workqueue.h>
31 #include <linux/kthread.h>
32 #include <linux/slab.h>
33 
34 #define CREATE_TRACE_POINTS
35 #include <trace/events/cpufreq_interactive.h>
36 
37 struct cpufreq_interactive_cpuinfo {
38 	struct timer_list cpu_timer;
39 	struct timer_list cpu_slack_timer;
40 	spinlock_t load_lock; /* protects the next 4 fields */
41 	u64 time_in_idle;
42 	u64 time_in_idle_timestamp;
43 	u64 cputime_speedadj;
44 	u64 cputime_speedadj_timestamp;
45 	struct cpufreq_policy *policy;
46 	struct cpufreq_frequency_table *freq_table;
47 	spinlock_t target_freq_lock; /*protects target freq */
48 	unsigned int target_freq;
49 	unsigned int floor_freq;
50 	u64 pol_floor_val_time; /* policy floor_validate_time */
51 	u64 loc_floor_val_time; /* per-cpu floor_validate_time */
52 	u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
53 	u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
54 	struct rw_semaphore enable_sem;
55 	int governor_enabled;
56 };
57 
58 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
59 
60 /* realtime thread handles frequency scaling */
61 static struct task_struct *speedchange_task;
62 static cpumask_t speedchange_cpumask;
63 static spinlock_t speedchange_cpumask_lock;
64 static struct mutex gov_lock;
65 
66 /* Target load.  Lower values result in higher CPU speeds. */
67 #define DEFAULT_TARGET_LOAD 90
68 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
69 
70 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
71 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
72 static unsigned int default_above_hispeed_delay[] = {
73 	DEFAULT_ABOVE_HISPEED_DELAY };
74 
75 struct cpufreq_interactive_tunables {
76 	int usage_count;
77 	/* Hi speed to bump to from lo speed when load burst (default max) */
78 	unsigned int hispeed_freq;
79 	/* Go to hi speed when CPU load at or above this value. */
80 #define DEFAULT_GO_HISPEED_LOAD 99
81 	unsigned long go_hispeed_load;
82 	/* Target load. Lower values result in higher CPU speeds. */
83 	spinlock_t target_loads_lock;
84 	unsigned int *target_loads;
85 	int ntarget_loads;
86 	/*
87 	 * The minimum amount of time to spend at a frequency before we can ramp
88 	 * down.
89 	 */
90 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
91 	unsigned long min_sample_time;
92 	/*
93 	 * The sample rate of the timer used to increase frequency
94 	 */
95 	unsigned long timer_rate;
96 	/*
97 	 * Wait this long before raising speed above hispeed, by default a
98 	 * single timer interval.
99 	 */
100 	spinlock_t above_hispeed_delay_lock;
101 	unsigned int *above_hispeed_delay;
102 	int nabove_hispeed_delay;
103 	/* Non-zero means indefinite speed boost active */
104 	int boost_val;
105 	/* Duration of a boot pulse in usecs */
106 	int boostpulse_duration_val;
107 	/* End time of boost pulse in ktime converted to usecs */
108 	u64 boostpulse_endtime;
109 	bool boosted;
110 	/*
111 	 * Max additional time to wait in idle, beyond timer_rate, at speeds
112 	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
113 	 */
114 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
115 	int timer_slack_val;
116 	bool io_is_busy;
117 };
118 
119 /* For cases where we have single governor instance for system */
120 static struct cpufreq_interactive_tunables *common_tunables;
121 
122 static struct attribute_group *get_sysfs_attr(void);
123 
cpufreq_interactive_timer_resched(struct cpufreq_interactive_cpuinfo * pcpu)124 static void cpufreq_interactive_timer_resched(
125 	struct cpufreq_interactive_cpuinfo *pcpu)
126 {
127 	struct cpufreq_interactive_tunables *tunables =
128 		pcpu->policy->governor_data;
129 	unsigned long expires;
130 	unsigned long flags;
131 
132 	spin_lock_irqsave(&pcpu->load_lock, flags);
133 	pcpu->time_in_idle =
134 		get_cpu_idle_time(smp_processor_id(),
135 				  &pcpu->time_in_idle_timestamp,
136 				  tunables->io_is_busy);
137 	pcpu->cputime_speedadj = 0;
138 	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
139 	expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
140 	mod_timer_pinned(&pcpu->cpu_timer, expires);
141 
142 	if (tunables->timer_slack_val >= 0 &&
143 	    pcpu->target_freq > pcpu->policy->min) {
144 		expires += usecs_to_jiffies(tunables->timer_slack_val);
145 		mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
146 	}
147 
148 	spin_unlock_irqrestore(&pcpu->load_lock, flags);
149 }
150 
151 /* The caller shall take enable_sem write semaphore to avoid any timer race.
152  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
153  * function.
154  */
cpufreq_interactive_timer_start(struct cpufreq_interactive_tunables * tunables,int cpu)155 static void cpufreq_interactive_timer_start(
156 	struct cpufreq_interactive_tunables *tunables, int cpu)
157 {
158 	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
159 	unsigned long expires = jiffies +
160 		usecs_to_jiffies(tunables->timer_rate);
161 	unsigned long flags;
162 
163 	pcpu->cpu_timer.expires = expires;
164 	add_timer_on(&pcpu->cpu_timer, cpu);
165 	if (tunables->timer_slack_val >= 0 &&
166 	    pcpu->target_freq > pcpu->policy->min) {
167 		expires += usecs_to_jiffies(tunables->timer_slack_val);
168 		pcpu->cpu_slack_timer.expires = expires;
169 		add_timer_on(&pcpu->cpu_slack_timer, cpu);
170 	}
171 
172 	spin_lock_irqsave(&pcpu->load_lock, flags);
173 	pcpu->time_in_idle =
174 		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
175 				  tunables->io_is_busy);
176 	pcpu->cputime_speedadj = 0;
177 	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
178 	spin_unlock_irqrestore(&pcpu->load_lock, flags);
179 }
180 
freq_to_above_hispeed_delay(struct cpufreq_interactive_tunables * tunables,unsigned int freq)181 static unsigned int freq_to_above_hispeed_delay(
182 	struct cpufreq_interactive_tunables *tunables,
183 	unsigned int freq)
184 {
185 	int i;
186 	unsigned int ret;
187 	unsigned long flags;
188 
189 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
190 
191 	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
192 			freq >= tunables->above_hispeed_delay[i+1]; i += 2)
193 		;
194 
195 	ret = tunables->above_hispeed_delay[i];
196 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
197 	return ret;
198 }
199 
freq_to_targetload(struct cpufreq_interactive_tunables * tunables,unsigned int freq)200 static unsigned int freq_to_targetload(
201 	struct cpufreq_interactive_tunables *tunables, unsigned int freq)
202 {
203 	int i;
204 	unsigned int ret;
205 	unsigned long flags;
206 
207 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
208 
209 	for (i = 0; i < tunables->ntarget_loads - 1 &&
210 		    freq >= tunables->target_loads[i+1]; i += 2)
211 		;
212 
213 	ret = tunables->target_loads[i];
214 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
215 	return ret;
216 }
217 
218 /*
219  * If increasing frequencies never map to a lower target load then
220  * choose_freq() will find the minimum frequency that does not exceed its
221  * target load given the current load.
222  */
choose_freq(struct cpufreq_interactive_cpuinfo * pcpu,unsigned int loadadjfreq)223 static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
224 		unsigned int loadadjfreq)
225 {
226 	unsigned int freq = pcpu->policy->cur;
227 	unsigned int prevfreq, freqmin, freqmax;
228 	unsigned int tl;
229 	int index;
230 
231 	freqmin = 0;
232 	freqmax = UINT_MAX;
233 
234 	do {
235 		prevfreq = freq;
236 		tl = freq_to_targetload(pcpu->policy->governor_data, freq);
237 
238 		/*
239 		 * Find the lowest frequency where the computed load is less
240 		 * than or equal to the target load.
241 		 */
242 
243 		if (cpufreq_frequency_table_target(
244 			    pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
245 			    CPUFREQ_RELATION_L, &index))
246 			break;
247 		freq = pcpu->freq_table[index].frequency;
248 
249 		if (freq > prevfreq) {
250 			/* The previous frequency is too low. */
251 			freqmin = prevfreq;
252 
253 			if (freq >= freqmax) {
254 				/*
255 				 * Find the highest frequency that is less
256 				 * than freqmax.
257 				 */
258 				if (cpufreq_frequency_table_target(
259 					    pcpu->policy, pcpu->freq_table,
260 					    freqmax - 1, CPUFREQ_RELATION_H,
261 					    &index))
262 					break;
263 				freq = pcpu->freq_table[index].frequency;
264 
265 				if (freq == freqmin) {
266 					/*
267 					 * The first frequency below freqmax
268 					 * has already been found to be too
269 					 * low.  freqmax is the lowest speed
270 					 * we found that is fast enough.
271 					 */
272 					freq = freqmax;
273 					break;
274 				}
275 			}
276 		} else if (freq < prevfreq) {
277 			/* The previous frequency is high enough. */
278 			freqmax = prevfreq;
279 
280 			if (freq <= freqmin) {
281 				/*
282 				 * Find the lowest frequency that is higher
283 				 * than freqmin.
284 				 */
285 				if (cpufreq_frequency_table_target(
286 					    pcpu->policy, pcpu->freq_table,
287 					    freqmin + 1, CPUFREQ_RELATION_L,
288 					    &index))
289 					break;
290 				freq = pcpu->freq_table[index].frequency;
291 
292 				/*
293 				 * If freqmax is the first frequency above
294 				 * freqmin then we have already found that
295 				 * this speed is fast enough.
296 				 */
297 				if (freq == freqmax)
298 					break;
299 			}
300 		}
301 
302 		/* If same frequency chosen as previous then done. */
303 	} while (freq != prevfreq);
304 
305 	return freq;
306 }
307 
update_load(int cpu)308 static u64 update_load(int cpu)
309 {
310 	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
311 	struct cpufreq_interactive_tunables *tunables =
312 		pcpu->policy->governor_data;
313 	u64 now;
314 	u64 now_idle;
315 	u64 delta_idle;
316 	u64 delta_time;
317 	u64 active_time;
318 
319 	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
320 	delta_idle = (now_idle - pcpu->time_in_idle);
321 	delta_time = (now - pcpu->time_in_idle_timestamp);
322 
323 	if (delta_time <= delta_idle)
324 		active_time = 0;
325 	else
326 		active_time = delta_time - delta_idle;
327 
328 	pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
329 
330 	pcpu->time_in_idle = now_idle;
331 	pcpu->time_in_idle_timestamp = now;
332 	return now;
333 }
334 
cpufreq_interactive_timer(unsigned long data)335 static void cpufreq_interactive_timer(unsigned long data)
336 {
337 	u64 now;
338 	unsigned int delta_time;
339 	u64 cputime_speedadj;
340 	int cpu_load;
341 	struct cpufreq_interactive_cpuinfo *pcpu =
342 		&per_cpu(cpuinfo, data);
343 	struct cpufreq_interactive_tunables *tunables =
344 		pcpu->policy->governor_data;
345 	unsigned int new_freq;
346 	unsigned int loadadjfreq;
347 	unsigned int index;
348 	unsigned long flags;
349 	u64 max_fvtime;
350 
351 	if (!down_read_trylock(&pcpu->enable_sem))
352 		return;
353 	if (!pcpu->governor_enabled)
354 		goto exit;
355 
356 	spin_lock_irqsave(&pcpu->load_lock, flags);
357 	now = update_load(data);
358 	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
359 	cputime_speedadj = pcpu->cputime_speedadj;
360 	spin_unlock_irqrestore(&pcpu->load_lock, flags);
361 
362 	if (WARN_ON_ONCE(!delta_time))
363 		goto rearm;
364 
365 	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
366 	do_div(cputime_speedadj, delta_time);
367 	loadadjfreq = (unsigned int)cputime_speedadj * 100;
368 	cpu_load = loadadjfreq / pcpu->policy->cur;
369 	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
370 
371 	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
372 		if (pcpu->policy->cur < tunables->hispeed_freq) {
373 			new_freq = tunables->hispeed_freq;
374 		} else {
375 			new_freq = choose_freq(pcpu, loadadjfreq);
376 
377 			if (new_freq < tunables->hispeed_freq)
378 				new_freq = tunables->hispeed_freq;
379 		}
380 	} else {
381 		new_freq = choose_freq(pcpu, loadadjfreq);
382 		if (new_freq > tunables->hispeed_freq &&
383 				pcpu->policy->cur < tunables->hispeed_freq)
384 			new_freq = tunables->hispeed_freq;
385 	}
386 
387 	if (pcpu->policy->cur >= tunables->hispeed_freq &&
388 	    new_freq > pcpu->policy->cur &&
389 	    now - pcpu->pol_hispeed_val_time <
390 	    freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
391 		trace_cpufreq_interactive_notyet(
392 			data, cpu_load, pcpu->target_freq,
393 			pcpu->policy->cur, new_freq);
394 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
395 		goto rearm;
396 	}
397 
398 	pcpu->loc_hispeed_val_time = now;
399 
400 	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
401 					   new_freq, CPUFREQ_RELATION_L,
402 					   &index)) {
403 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
404 		goto rearm;
405 	}
406 
407 	new_freq = pcpu->freq_table[index].frequency;
408 
409 	/*
410 	 * Do not scale below floor_freq unless we have been at or above the
411 	 * floor frequency for the minimum sample time since last validated.
412 	 */
413 	max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
414 	if (new_freq < pcpu->floor_freq &&
415 	    pcpu->target_freq >= pcpu->policy->cur) {
416 		if (now - max_fvtime < tunables->min_sample_time) {
417 			trace_cpufreq_interactive_notyet(
418 				data, cpu_load, pcpu->target_freq,
419 				pcpu->policy->cur, new_freq);
420 			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
421 			goto rearm;
422 		}
423 	}
424 
425 	/*
426 	 * Update the timestamp for checking whether speed has been held at
427 	 * or above the selected frequency for a minimum of min_sample_time,
428 	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
429 	 * allow the speed to drop as soon as the boostpulse duration expires
430 	 * (or the indefinite boost is turned off).
431 	 */
432 
433 	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
434 		pcpu->floor_freq = new_freq;
435 		if (pcpu->target_freq >= pcpu->policy->cur ||
436 		    new_freq >= pcpu->policy->cur)
437 			pcpu->loc_floor_val_time = now;
438 	}
439 
440 	if (pcpu->target_freq == new_freq &&
441 			pcpu->target_freq <= pcpu->policy->cur) {
442 		trace_cpufreq_interactive_already(
443 			data, cpu_load, pcpu->target_freq,
444 			pcpu->policy->cur, new_freq);
445 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
446 		goto rearm;
447 	}
448 
449 	trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
450 					 pcpu->policy->cur, new_freq);
451 
452 	pcpu->target_freq = new_freq;
453 	spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
454 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
455 	cpumask_set_cpu(data, &speedchange_cpumask);
456 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
457 	wake_up_process(speedchange_task);
458 
459 rearm:
460 	if (!timer_pending(&pcpu->cpu_timer))
461 		cpufreq_interactive_timer_resched(pcpu);
462 
463 exit:
464 	up_read(&pcpu->enable_sem);
465 	return;
466 }
467 
cpufreq_interactive_idle_end(void)468 static void cpufreq_interactive_idle_end(void)
469 {
470 	struct cpufreq_interactive_cpuinfo *pcpu =
471 		&per_cpu(cpuinfo, smp_processor_id());
472 
473 	if (!down_read_trylock(&pcpu->enable_sem))
474 		return;
475 	if (!pcpu->governor_enabled) {
476 		up_read(&pcpu->enable_sem);
477 		return;
478 	}
479 
480 	/* Arm the timer for 1-2 ticks later if not already. */
481 	if (!timer_pending(&pcpu->cpu_timer)) {
482 		cpufreq_interactive_timer_resched(pcpu);
483 	} else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
484 		del_timer(&pcpu->cpu_timer);
485 		del_timer(&pcpu->cpu_slack_timer);
486 		cpufreq_interactive_timer(smp_processor_id());
487 	}
488 
489 	up_read(&pcpu->enable_sem);
490 }
491 
cpufreq_interactive_get_policy_info(struct cpufreq_policy * policy,unsigned int * pmax_freq,u64 * phvt,u64 * pfvt)492 static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
493 						unsigned int *pmax_freq,
494 						u64 *phvt, u64 *pfvt)
495 {
496 	struct cpufreq_interactive_cpuinfo *pcpu;
497 	unsigned int max_freq = 0;
498 	u64 hvt = ~0ULL, fvt = 0;
499 	unsigned int i;
500 
501 	for_each_cpu(i, policy->cpus) {
502 		pcpu = &per_cpu(cpuinfo, i);
503 
504 		fvt = max(fvt, pcpu->loc_floor_val_time);
505 		if (pcpu->target_freq > max_freq) {
506 			max_freq = pcpu->target_freq;
507 			hvt = pcpu->loc_hispeed_val_time;
508 		} else if (pcpu->target_freq == max_freq) {
509 			hvt = min(hvt, pcpu->loc_hispeed_val_time);
510 		}
511 	}
512 
513 	*pmax_freq = max_freq;
514 	*phvt = hvt;
515 	*pfvt = fvt;
516 }
517 
cpufreq_interactive_adjust_cpu(unsigned int cpu,struct cpufreq_policy * policy)518 static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
519 					   struct cpufreq_policy *policy)
520 {
521 	struct cpufreq_interactive_cpuinfo *pcpu;
522 	u64 hvt, fvt;
523 	unsigned int max_freq;
524 	int i;
525 
526 	cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
527 
528 	for_each_cpu(i, policy->cpus) {
529 		pcpu = &per_cpu(cpuinfo, i);
530 		pcpu->pol_floor_val_time = fvt;
531 	}
532 
533 	if (max_freq != policy->cur) {
534 		__cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
535 		for_each_cpu(i, policy->cpus) {
536 			pcpu = &per_cpu(cpuinfo, i);
537 			pcpu->pol_hispeed_val_time = hvt;
538 		}
539 	}
540 
541 	trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
542 }
543 
cpufreq_interactive_speedchange_task(void * data)544 static int cpufreq_interactive_speedchange_task(void *data)
545 {
546 	unsigned int cpu;
547 	cpumask_t tmp_mask;
548 	unsigned long flags;
549 	struct cpufreq_interactive_cpuinfo *pcpu;
550 
551 	while (1) {
552 		set_current_state(TASK_INTERRUPTIBLE);
553 		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
554 
555 		if (cpumask_empty(&speedchange_cpumask)) {
556 			spin_unlock_irqrestore(&speedchange_cpumask_lock,
557 					       flags);
558 			schedule();
559 
560 			if (kthread_should_stop())
561 				break;
562 
563 			spin_lock_irqsave(&speedchange_cpumask_lock, flags);
564 		}
565 
566 		set_current_state(TASK_RUNNING);
567 		tmp_mask = speedchange_cpumask;
568 		cpumask_clear(&speedchange_cpumask);
569 		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
570 
571 		for_each_cpu(cpu, &tmp_mask) {
572 			pcpu = &per_cpu(cpuinfo, cpu);
573 
574 			down_write(&pcpu->policy->rwsem);
575 
576 			if (likely(down_read_trylock(&pcpu->enable_sem))) {
577 				if (likely(pcpu->governor_enabled))
578 					cpufreq_interactive_adjust_cpu(cpu,
579 							pcpu->policy);
580 				up_read(&pcpu->enable_sem);
581 			}
582 
583 			up_write(&pcpu->policy->rwsem);
584 		}
585 	}
586 
587 	return 0;
588 }
589 
cpufreq_interactive_boost(struct cpufreq_interactive_tunables * tunables)590 static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
591 {
592 	int i;
593 	int anyboost = 0;
594 	unsigned long flags[2];
595 	struct cpufreq_interactive_cpuinfo *pcpu;
596 
597 	tunables->boosted = true;
598 
599 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
600 
601 	for_each_online_cpu(i) {
602 		pcpu = &per_cpu(cpuinfo, i);
603 
604 		if (!down_read_trylock(&pcpu->enable_sem))
605 			continue;
606 
607 		if (!pcpu->governor_enabled) {
608 			up_read(&pcpu->enable_sem);
609 			continue;
610 		}
611 
612 		if (tunables != pcpu->policy->governor_data) {
613 			up_read(&pcpu->enable_sem);
614 			continue;
615 		}
616 
617 		spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
618 		if (pcpu->target_freq < tunables->hispeed_freq) {
619 			pcpu->target_freq = tunables->hispeed_freq;
620 			cpumask_set_cpu(i, &speedchange_cpumask);
621 			pcpu->pol_hispeed_val_time =
622 				ktime_to_us(ktime_get());
623 			anyboost = 1;
624 		}
625 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
626 
627 		up_read(&pcpu->enable_sem);
628 	}
629 
630 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
631 
632 	if (anyboost)
633 		wake_up_process(speedchange_task);
634 }
635 
cpufreq_interactive_notifier(struct notifier_block * nb,unsigned long val,void * data)636 static int cpufreq_interactive_notifier(
637 	struct notifier_block *nb, unsigned long val, void *data)
638 {
639 	struct cpufreq_freqs *freq = data;
640 	struct cpufreq_interactive_cpuinfo *pcpu;
641 	int cpu;
642 	unsigned long flags;
643 
644 	if (val == CPUFREQ_POSTCHANGE) {
645 		pcpu = &per_cpu(cpuinfo, freq->cpu);
646 		if (!down_read_trylock(&pcpu->enable_sem))
647 			return 0;
648 		if (!pcpu->governor_enabled) {
649 			up_read(&pcpu->enable_sem);
650 			return 0;
651 		}
652 
653 		for_each_cpu(cpu, pcpu->policy->cpus) {
654 			struct cpufreq_interactive_cpuinfo *pjcpu =
655 				&per_cpu(cpuinfo, cpu);
656 			if (cpu != freq->cpu) {
657 				if (!down_read_trylock(&pjcpu->enable_sem))
658 					continue;
659 				if (!pjcpu->governor_enabled) {
660 					up_read(&pjcpu->enable_sem);
661 					continue;
662 				}
663 			}
664 			spin_lock_irqsave(&pjcpu->load_lock, flags);
665 			update_load(cpu);
666 			spin_unlock_irqrestore(&pjcpu->load_lock, flags);
667 			if (cpu != freq->cpu)
668 				up_read(&pjcpu->enable_sem);
669 		}
670 
671 		up_read(&pcpu->enable_sem);
672 	}
673 	return 0;
674 }
675 
676 static struct notifier_block cpufreq_notifier_block = {
677 	.notifier_call = cpufreq_interactive_notifier,
678 };
679 
get_tokenized_data(const char * buf,int * num_tokens)680 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
681 {
682 	const char *cp;
683 	int i;
684 	int ntokens = 1;
685 	unsigned int *tokenized_data;
686 	int err = -EINVAL;
687 
688 	cp = buf;
689 	while ((cp = strpbrk(cp + 1, " :")))
690 		ntokens++;
691 
692 	if (!(ntokens & 0x1))
693 		goto err;
694 
695 	tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
696 	if (!tokenized_data) {
697 		err = -ENOMEM;
698 		goto err;
699 	}
700 
701 	cp = buf;
702 	i = 0;
703 	while (i < ntokens) {
704 		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
705 			goto err_kfree;
706 
707 		cp = strpbrk(cp, " :");
708 		if (!cp)
709 			break;
710 		cp++;
711 	}
712 
713 	if (i != ntokens)
714 		goto err_kfree;
715 
716 	*num_tokens = ntokens;
717 	return tokenized_data;
718 
719 err_kfree:
720 	kfree(tokenized_data);
721 err:
722 	return ERR_PTR(err);
723 }
724 
show_target_loads(struct cpufreq_interactive_tunables * tunables,char * buf)725 static ssize_t show_target_loads(
726 	struct cpufreq_interactive_tunables *tunables,
727 	char *buf)
728 {
729 	int i;
730 	ssize_t ret = 0;
731 	unsigned long flags;
732 
733 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
734 
735 	for (i = 0; i < tunables->ntarget_loads; i++)
736 		ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
737 			       i & 0x1 ? ":" : " ");
738 
739 	sprintf(buf + ret - 1, "\n");
740 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
741 	return ret;
742 }
743 
store_target_loads(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)744 static ssize_t store_target_loads(
745 	struct cpufreq_interactive_tunables *tunables,
746 	const char *buf, size_t count)
747 {
748 	int ntokens;
749 	unsigned int *new_target_loads = NULL;
750 	unsigned long flags;
751 
752 	new_target_loads = get_tokenized_data(buf, &ntokens);
753 	if (IS_ERR(new_target_loads))
754 		return PTR_RET(new_target_loads);
755 
756 	spin_lock_irqsave(&tunables->target_loads_lock, flags);
757 	if (tunables->target_loads != default_target_loads)
758 		kfree(tunables->target_loads);
759 	tunables->target_loads = new_target_loads;
760 	tunables->ntarget_loads = ntokens;
761 	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
762 	return count;
763 }
764 
show_above_hispeed_delay(struct cpufreq_interactive_tunables * tunables,char * buf)765 static ssize_t show_above_hispeed_delay(
766 	struct cpufreq_interactive_tunables *tunables, char *buf)
767 {
768 	int i;
769 	ssize_t ret = 0;
770 	unsigned long flags;
771 
772 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
773 
774 	for (i = 0; i < tunables->nabove_hispeed_delay; i++)
775 		ret += sprintf(buf + ret, "%u%s",
776 			       tunables->above_hispeed_delay[i],
777 			       i & 0x1 ? ":" : " ");
778 
779 	sprintf(buf + ret - 1, "\n");
780 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
781 	return ret;
782 }
783 
store_above_hispeed_delay(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)784 static ssize_t store_above_hispeed_delay(
785 	struct cpufreq_interactive_tunables *tunables,
786 	const char *buf, size_t count)
787 {
788 	int ntokens;
789 	unsigned int *new_above_hispeed_delay = NULL;
790 	unsigned long flags;
791 
792 	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
793 	if (IS_ERR(new_above_hispeed_delay))
794 		return PTR_RET(new_above_hispeed_delay);
795 
796 	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
797 	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
798 		kfree(tunables->above_hispeed_delay);
799 	tunables->above_hispeed_delay = new_above_hispeed_delay;
800 	tunables->nabove_hispeed_delay = ntokens;
801 	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
802 	return count;
803 
804 }
805 
show_hispeed_freq(struct cpufreq_interactive_tunables * tunables,char * buf)806 static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
807 		char *buf)
808 {
809 	return sprintf(buf, "%u\n", tunables->hispeed_freq);
810 }
811 
store_hispeed_freq(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)812 static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
813 		const char *buf, size_t count)
814 {
815 	int ret;
816 	long unsigned int val;
817 
818 	ret = kstrtoul(buf, 0, &val);
819 	if (ret < 0)
820 		return ret;
821 	tunables->hispeed_freq = val;
822 	return count;
823 }
824 
show_go_hispeed_load(struct cpufreq_interactive_tunables * tunables,char * buf)825 static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
826 		*tunables, char *buf)
827 {
828 	return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
829 }
830 
store_go_hispeed_load(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)831 static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
832 		*tunables, const char *buf, size_t count)
833 {
834 	int ret;
835 	unsigned long val;
836 
837 	ret = kstrtoul(buf, 0, &val);
838 	if (ret < 0)
839 		return ret;
840 	tunables->go_hispeed_load = val;
841 	return count;
842 }
843 
show_min_sample_time(struct cpufreq_interactive_tunables * tunables,char * buf)844 static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
845 		*tunables, char *buf)
846 {
847 	return sprintf(buf, "%lu\n", tunables->min_sample_time);
848 }
849 
store_min_sample_time(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)850 static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
851 		*tunables, const char *buf, size_t count)
852 {
853 	int ret;
854 	unsigned long val;
855 
856 	ret = kstrtoul(buf, 0, &val);
857 	if (ret < 0)
858 		return ret;
859 	tunables->min_sample_time = val;
860 	return count;
861 }
862 
show_timer_rate(struct cpufreq_interactive_tunables * tunables,char * buf)863 static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
864 		char *buf)
865 {
866 	return sprintf(buf, "%lu\n", tunables->timer_rate);
867 }
868 
store_timer_rate(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)869 static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
870 		const char *buf, size_t count)
871 {
872 	int ret;
873 	unsigned long val, val_round;
874 
875 	ret = kstrtoul(buf, 0, &val);
876 	if (ret < 0)
877 		return ret;
878 
879 	val_round = jiffies_to_usecs(usecs_to_jiffies(val));
880 	if (val != val_round)
881 		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
882 			val_round);
883 
884 	tunables->timer_rate = val_round;
885 	return count;
886 }
887 
show_timer_slack(struct cpufreq_interactive_tunables * tunables,char * buf)888 static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
889 		char *buf)
890 {
891 	return sprintf(buf, "%d\n", tunables->timer_slack_val);
892 }
893 
store_timer_slack(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)894 static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
895 		const char *buf, size_t count)
896 {
897 	int ret;
898 	unsigned long val;
899 
900 	ret = kstrtol(buf, 10, &val);
901 	if (ret < 0)
902 		return ret;
903 
904 	tunables->timer_slack_val = val;
905 	return count;
906 }
907 
show_boost(struct cpufreq_interactive_tunables * tunables,char * buf)908 static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
909 			  char *buf)
910 {
911 	return sprintf(buf, "%d\n", tunables->boost_val);
912 }
913 
store_boost(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)914 static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
915 			   const char *buf, size_t count)
916 {
917 	int ret;
918 	unsigned long val;
919 
920 	ret = kstrtoul(buf, 0, &val);
921 	if (ret < 0)
922 		return ret;
923 
924 	tunables->boost_val = val;
925 
926 	if (tunables->boost_val) {
927 		trace_cpufreq_interactive_boost("on");
928 		if (!tunables->boosted)
929 			cpufreq_interactive_boost(tunables);
930 	} else {
931 		tunables->boostpulse_endtime = ktime_to_us(ktime_get());
932 		trace_cpufreq_interactive_unboost("off");
933 	}
934 
935 	return count;
936 }
937 
store_boostpulse(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)938 static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
939 				const char *buf, size_t count)
940 {
941 	int ret;
942 	unsigned long val;
943 
944 	ret = kstrtoul(buf, 0, &val);
945 	if (ret < 0)
946 		return ret;
947 
948 	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
949 		tunables->boostpulse_duration_val;
950 	trace_cpufreq_interactive_boost("pulse");
951 	if (!tunables->boosted)
952 		cpufreq_interactive_boost(tunables);
953 	return count;
954 }
955 
show_boostpulse_duration(struct cpufreq_interactive_tunables * tunables,char * buf)956 static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
957 		*tunables, char *buf)
958 {
959 	return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
960 }
961 
store_boostpulse_duration(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)962 static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
963 		*tunables, const char *buf, size_t count)
964 {
965 	int ret;
966 	unsigned long val;
967 
968 	ret = kstrtoul(buf, 0, &val);
969 	if (ret < 0)
970 		return ret;
971 
972 	tunables->boostpulse_duration_val = val;
973 	return count;
974 }
975 
show_io_is_busy(struct cpufreq_interactive_tunables * tunables,char * buf)976 static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
977 		char *buf)
978 {
979 	return sprintf(buf, "%u\n", tunables->io_is_busy);
980 }
981 
store_io_is_busy(struct cpufreq_interactive_tunables * tunables,const char * buf,size_t count)982 static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
983 		const char *buf, size_t count)
984 {
985 	int ret;
986 	unsigned long val;
987 
988 	ret = kstrtoul(buf, 0, &val);
989 	if (ret < 0)
990 		return ret;
991 	tunables->io_is_busy = val;
992 	return count;
993 }
994 
995 /*
996  * Create show/store routines
997  * - sys: One governor instance for complete SYSTEM
998  * - pol: One governor instance per struct cpufreq_policy
999  */
1000 #define show_gov_pol_sys(file_name)					\
1001 static ssize_t show_##file_name##_gov_sys				\
1002 (struct kobject *kobj, struct attribute *attr, char *buf)		\
1003 {									\
1004 	return show_##file_name(common_tunables, buf);			\
1005 }									\
1006 									\
1007 static ssize_t show_##file_name##_gov_pol				\
1008 (struct cpufreq_policy *policy, char *buf)				\
1009 {									\
1010 	return show_##file_name(policy->governor_data, buf);		\
1011 }
1012 
1013 #define store_gov_pol_sys(file_name)					\
1014 static ssize_t store_##file_name##_gov_sys				\
1015 (struct kobject *kobj, struct attribute *attr, const char *buf,		\
1016 	size_t count)							\
1017 {									\
1018 	return store_##file_name(common_tunables, buf, count);		\
1019 }									\
1020 									\
1021 static ssize_t store_##file_name##_gov_pol				\
1022 (struct cpufreq_policy *policy, const char *buf, size_t count)		\
1023 {									\
1024 	return store_##file_name(policy->governor_data, buf, count);	\
1025 }
1026 
1027 #define show_store_gov_pol_sys(file_name)				\
1028 show_gov_pol_sys(file_name);						\
1029 store_gov_pol_sys(file_name)
1030 
1031 show_store_gov_pol_sys(target_loads);
1032 show_store_gov_pol_sys(above_hispeed_delay);
1033 show_store_gov_pol_sys(hispeed_freq);
1034 show_store_gov_pol_sys(go_hispeed_load);
1035 show_store_gov_pol_sys(min_sample_time);
1036 show_store_gov_pol_sys(timer_rate);
1037 show_store_gov_pol_sys(timer_slack);
1038 show_store_gov_pol_sys(boost);
1039 store_gov_pol_sys(boostpulse);
1040 show_store_gov_pol_sys(boostpulse_duration);
1041 show_store_gov_pol_sys(io_is_busy);
1042 
1043 #define gov_sys_attr_rw(_name)						\
1044 static struct kobj_attribute _name##_gov_sys =				\
1045 __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
1046 
1047 #define gov_pol_attr_rw(_name)						\
1048 static struct freq_attr _name##_gov_pol =				\
1049 __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
1050 
1051 #define gov_sys_pol_attr_rw(_name)					\
1052 	gov_sys_attr_rw(_name);						\
1053 	gov_pol_attr_rw(_name)
1054 
1055 gov_sys_pol_attr_rw(target_loads);
1056 gov_sys_pol_attr_rw(above_hispeed_delay);
1057 gov_sys_pol_attr_rw(hispeed_freq);
1058 gov_sys_pol_attr_rw(go_hispeed_load);
1059 gov_sys_pol_attr_rw(min_sample_time);
1060 gov_sys_pol_attr_rw(timer_rate);
1061 gov_sys_pol_attr_rw(timer_slack);
1062 gov_sys_pol_attr_rw(boost);
1063 gov_sys_pol_attr_rw(boostpulse_duration);
1064 gov_sys_pol_attr_rw(io_is_busy);
1065 
1066 static struct kobj_attribute boostpulse_gov_sys =
1067 	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
1068 
1069 static struct freq_attr boostpulse_gov_pol =
1070 	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
1071 
1072 /* One Governor instance for entire system */
1073 static struct attribute *interactive_attributes_gov_sys[] = {
1074 	&target_loads_gov_sys.attr,
1075 	&above_hispeed_delay_gov_sys.attr,
1076 	&hispeed_freq_gov_sys.attr,
1077 	&go_hispeed_load_gov_sys.attr,
1078 	&min_sample_time_gov_sys.attr,
1079 	&timer_rate_gov_sys.attr,
1080 	&timer_slack_gov_sys.attr,
1081 	&boost_gov_sys.attr,
1082 	&boostpulse_gov_sys.attr,
1083 	&boostpulse_duration_gov_sys.attr,
1084 	&io_is_busy_gov_sys.attr,
1085 	NULL,
1086 };
1087 
1088 static struct attribute_group interactive_attr_group_gov_sys = {
1089 	.attrs = interactive_attributes_gov_sys,
1090 	.name = "interactive",
1091 };
1092 
1093 /* Per policy governor instance */
1094 static struct attribute *interactive_attributes_gov_pol[] = {
1095 	&target_loads_gov_pol.attr,
1096 	&above_hispeed_delay_gov_pol.attr,
1097 	&hispeed_freq_gov_pol.attr,
1098 	&go_hispeed_load_gov_pol.attr,
1099 	&min_sample_time_gov_pol.attr,
1100 	&timer_rate_gov_pol.attr,
1101 	&timer_slack_gov_pol.attr,
1102 	&boost_gov_pol.attr,
1103 	&boostpulse_gov_pol.attr,
1104 	&boostpulse_duration_gov_pol.attr,
1105 	&io_is_busy_gov_pol.attr,
1106 	NULL,
1107 };
1108 
1109 static struct attribute_group interactive_attr_group_gov_pol = {
1110 	.attrs = interactive_attributes_gov_pol,
1111 	.name = "interactive",
1112 };
1113 
get_sysfs_attr(void)1114 static struct attribute_group *get_sysfs_attr(void)
1115 {
1116 	if (have_governor_per_policy())
1117 		return &interactive_attr_group_gov_pol;
1118 	else
1119 		return &interactive_attr_group_gov_sys;
1120 }
1121 
cpufreq_interactive_idle_notifier(struct notifier_block * nb,unsigned long val,void * data)1122 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1123 					     unsigned long val,
1124 					     void *data)
1125 {
1126 	if (val == IDLE_END)
1127 		cpufreq_interactive_idle_end();
1128 
1129 	return 0;
1130 }
1131 
1132 static struct notifier_block cpufreq_interactive_idle_nb = {
1133 	.notifier_call = cpufreq_interactive_idle_notifier,
1134 };
1135 
cpufreq_governor_interactive(struct cpufreq_policy * policy,unsigned int event)1136 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1137 		unsigned int event)
1138 {
1139 	int rc;
1140 	unsigned int j;
1141 	struct cpufreq_interactive_cpuinfo *pcpu;
1142 	struct cpufreq_frequency_table *freq_table;
1143 	struct cpufreq_interactive_tunables *tunables;
1144 	unsigned long flags;
1145 
1146 	if (have_governor_per_policy())
1147 		tunables = policy->governor_data;
1148 	else
1149 		tunables = common_tunables;
1150 
1151 	if (WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT)))
1152 		return -EINVAL;
1153 
1154 	switch (event) {
1155 	case CPUFREQ_GOV_POLICY_INIT:
1156 		if (have_governor_per_policy()) {
1157 			WARN_ON(tunables);
1158 		} else if (tunables) {
1159 			tunables->usage_count++;
1160 			policy->governor_data = tunables;
1161 			return 0;
1162 		}
1163 
1164 		tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
1165 		if (!tunables) {
1166 			pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
1167 			return -ENOMEM;
1168 		}
1169 
1170 		tunables->usage_count = 1;
1171 		tunables->above_hispeed_delay = default_above_hispeed_delay;
1172 		tunables->nabove_hispeed_delay =
1173 			ARRAY_SIZE(default_above_hispeed_delay);
1174 		tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
1175 		tunables->target_loads = default_target_loads;
1176 		tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
1177 		tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
1178 		tunables->timer_rate = DEFAULT_TIMER_RATE;
1179 		tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
1180 		tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
1181 
1182 		spin_lock_init(&tunables->target_loads_lock);
1183 		spin_lock_init(&tunables->above_hispeed_delay_lock);
1184 
1185 		policy->governor_data = tunables;
1186 		if (!have_governor_per_policy()) {
1187 			common_tunables = tunables;
1188 		}
1189 
1190 		rc = sysfs_create_group(get_governor_parent_kobj(policy),
1191 				get_sysfs_attr());
1192 		if (rc) {
1193 			kfree(tunables);
1194 			policy->governor_data = NULL;
1195 			if (!have_governor_per_policy()) {
1196 				common_tunables = NULL;
1197 			}
1198 			return rc;
1199 		}
1200 
1201 		if (!policy->governor->initialized) {
1202 			idle_notifier_register(&cpufreq_interactive_idle_nb);
1203 			cpufreq_register_notifier(&cpufreq_notifier_block,
1204 					CPUFREQ_TRANSITION_NOTIFIER);
1205 		}
1206 
1207 		break;
1208 
1209 	case CPUFREQ_GOV_POLICY_EXIT:
1210 		if (!--tunables->usage_count) {
1211 			if (policy->governor->initialized == 1) {
1212 				cpufreq_unregister_notifier(&cpufreq_notifier_block,
1213 						CPUFREQ_TRANSITION_NOTIFIER);
1214 				idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1215 			}
1216 
1217 			sysfs_remove_group(get_governor_parent_kobj(policy),
1218 					get_sysfs_attr());
1219 
1220 			kfree(tunables);
1221 			common_tunables = NULL;
1222 		}
1223 
1224 		policy->governor_data = NULL;
1225 		break;
1226 
1227 	case CPUFREQ_GOV_START:
1228 		mutex_lock(&gov_lock);
1229 
1230 		freq_table = cpufreq_frequency_get_table(policy->cpu);
1231 		if (!tunables->hispeed_freq)
1232 			tunables->hispeed_freq = policy->max;
1233 
1234 		for_each_cpu(j, policy->cpus) {
1235 			pcpu = &per_cpu(cpuinfo, j);
1236 			pcpu->policy = policy;
1237 			pcpu->target_freq = policy->cur;
1238 			pcpu->freq_table = freq_table;
1239 			pcpu->floor_freq = pcpu->target_freq;
1240 			pcpu->pol_floor_val_time =
1241 				ktime_to_us(ktime_get());
1242 			pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
1243 			pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
1244 			pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
1245 			down_write(&pcpu->enable_sem);
1246 			del_timer_sync(&pcpu->cpu_timer);
1247 			del_timer_sync(&pcpu->cpu_slack_timer);
1248 			cpufreq_interactive_timer_start(tunables, j);
1249 			pcpu->governor_enabled = 1;
1250 			up_write(&pcpu->enable_sem);
1251 		}
1252 
1253 		mutex_unlock(&gov_lock);
1254 		break;
1255 
1256 	case CPUFREQ_GOV_STOP:
1257 		mutex_lock(&gov_lock);
1258 		for_each_cpu(j, policy->cpus) {
1259 			pcpu = &per_cpu(cpuinfo, j);
1260 			down_write(&pcpu->enable_sem);
1261 			pcpu->governor_enabled = 0;
1262 			del_timer_sync(&pcpu->cpu_timer);
1263 			del_timer_sync(&pcpu->cpu_slack_timer);
1264 			up_write(&pcpu->enable_sem);
1265 		}
1266 
1267 		mutex_unlock(&gov_lock);
1268 		break;
1269 
1270 	case CPUFREQ_GOV_LIMITS:
1271 		if (policy->max < policy->cur)
1272 			__cpufreq_driver_target(policy,
1273 					policy->max, CPUFREQ_RELATION_H);
1274 		else if (policy->min > policy->cur)
1275 			__cpufreq_driver_target(policy,
1276 					policy->min, CPUFREQ_RELATION_L);
1277 		for_each_cpu(j, policy->cpus) {
1278 			pcpu = &per_cpu(cpuinfo, j);
1279 
1280 			down_read(&pcpu->enable_sem);
1281 			if (pcpu->governor_enabled == 0) {
1282 				up_read(&pcpu->enable_sem);
1283 				continue;
1284 			}
1285 
1286 			spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1287 			if (policy->max < pcpu->target_freq)
1288 				pcpu->target_freq = policy->max;
1289 			else if (policy->min > pcpu->target_freq)
1290 				pcpu->target_freq = policy->min;
1291 
1292 			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1293 			up_read(&pcpu->enable_sem);
1294 		}
1295 		break;
1296 	}
1297 	return 0;
1298 }
1299 
1300 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1301 static
1302 #endif
1303 struct cpufreq_governor cpufreq_gov_interactive = {
1304 	.name = "interactive",
1305 	.governor = cpufreq_governor_interactive,
1306 	.max_transition_latency = 10000000,
1307 	.owner = THIS_MODULE,
1308 };
1309 
cpufreq_interactive_nop_timer(unsigned long data)1310 static void cpufreq_interactive_nop_timer(unsigned long data)
1311 {
1312 }
1313 
cpufreq_interactive_init(void)1314 static int __init cpufreq_interactive_init(void)
1315 {
1316 	unsigned int i;
1317 	struct cpufreq_interactive_cpuinfo *pcpu;
1318 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1319 	int ret = 0;
1320 
1321 	/* Initalize per-cpu timers */
1322 	for_each_possible_cpu(i) {
1323 		pcpu = &per_cpu(cpuinfo, i);
1324 		init_timer_deferrable(&pcpu->cpu_timer);
1325 		pcpu->cpu_timer.function = cpufreq_interactive_timer;
1326 		pcpu->cpu_timer.data = i;
1327 		init_timer(&pcpu->cpu_slack_timer);
1328 		pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1329 		spin_lock_init(&pcpu->load_lock);
1330 		spin_lock_init(&pcpu->target_freq_lock);
1331 		init_rwsem(&pcpu->enable_sem);
1332 	}
1333 
1334 	spin_lock_init(&speedchange_cpumask_lock);
1335 	mutex_init(&gov_lock);
1336 	speedchange_task =
1337 		kthread_create(cpufreq_interactive_speedchange_task, NULL,
1338 			       "cfinteractive");
1339 	if (IS_ERR(speedchange_task))
1340 		return PTR_ERR(speedchange_task);
1341 
1342 	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1343 	get_task_struct(speedchange_task);
1344 
1345 	/* NB: wake up so the thread does not look hung to the freezer */
1346 	wake_up_process(speedchange_task);
1347 
1348 	ret = cpufreq_register_governor(&cpufreq_gov_interactive);
1349 	if (ret) {
1350 		kthread_stop(speedchange_task);
1351 		put_task_struct(speedchange_task);
1352 	}
1353 	return ret;
1354 }
1355 
1356 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1357 fs_initcall(cpufreq_interactive_init);
1358 #else
1359 module_init(cpufreq_interactive_init);
1360 #endif
1361 
cpufreq_interactive_exit(void)1362 static void __exit cpufreq_interactive_exit(void)
1363 {
1364 	cpufreq_unregister_governor(&cpufreq_gov_interactive);
1365 	kthread_stop(speedchange_task);
1366 	put_task_struct(speedchange_task);
1367 }
1368 
1369 module_exit(cpufreq_interactive_exit);
1370 
1371 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1372 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1373 	"Latency sensitive workloads");
1374 MODULE_LICENSE("GPL");
1375