• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/cpufreq/cpufreq_interactive.c
3  *
4  * Copyright (C) 2010 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * Author: Mike Chan (mike@android.com)
16  *
17  */
18 
19 #include <linux/cpu.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpufreq.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/rwsem.h>
25 #include <linux/sched.h>
26 #include <linux/tick.h>
27 #include <linux/time.h>
28 #include <linux/timer.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/slab.h>
32 #include <linux/kernel_stat.h>
33 #include <asm/cputime.h>
34 
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/cpufreq_interactive.h>
37 
38 static int active_count;
39 
40 struct cpufreq_interactive_cpuinfo {
41 	struct timer_list cpu_timer;
42 	struct timer_list cpu_slack_timer;
43 	spinlock_t load_lock; /* protects the next 4 fields */
44 	u64 time_in_idle;
45 	u64 time_in_idle_timestamp;
46 	u64 cputime_speedadj;
47 	u64 cputime_speedadj_timestamp;
48 	struct cpufreq_policy *policy;
49 	struct cpufreq_frequency_table *freq_table;
50 	spinlock_t target_freq_lock; /*protects target freq */
51 	unsigned int target_freq;
52 	unsigned int floor_freq;
53 	unsigned int max_freq;
54 	u64 floor_validate_time;
55 	u64 hispeed_validate_time;
56 	struct rw_semaphore enable_sem;
57 	int governor_enabled;
58 };
59 
60 static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
61 
62 /* realtime thread handles frequency scaling */
63 static struct task_struct *speedchange_task;
64 static cpumask_t speedchange_cpumask;
65 static spinlock_t speedchange_cpumask_lock;
66 static struct mutex gov_lock;
67 
68 /* Hi speed to bump to from lo speed when load burst (default max) */
69 static unsigned int hispeed_freq;
70 
71 /* Go to hi speed when CPU load at or above this value. */
72 #define DEFAULT_GO_HISPEED_LOAD 99
73 static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
74 
75 /* Target load.  Lower values result in higher CPU speeds. */
76 #define DEFAULT_TARGET_LOAD 90
77 static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
78 static spinlock_t target_loads_lock;
79 static unsigned int *target_loads = default_target_loads;
80 static int ntarget_loads = ARRAY_SIZE(default_target_loads);
81 
82 /*
83  * The minimum amount of time to spend at a frequency before we can ramp down.
84  */
85 #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
86 static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
87 
88 /*
89  * The sample rate of the timer used to increase frequency
90  */
91 #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
92 static unsigned long timer_rate = DEFAULT_TIMER_RATE;
93 
94 /*
95  * Wait this long before raising speed above hispeed, by default a single
96  * timer interval.
97  */
98 #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
99 static unsigned int default_above_hispeed_delay[] = {
100 	DEFAULT_ABOVE_HISPEED_DELAY };
101 static spinlock_t above_hispeed_delay_lock;
102 static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
103 static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
104 
105 /* Non-zero means indefinite speed boost active */
106 static int boost_val;
107 /* Duration of a boot pulse in usecs */
108 static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
109 /* End time of boost pulse in ktime converted to usecs */
110 static u64 boostpulse_endtime;
111 
112 /*
113  * Max additional time to wait in idle, beyond timer_rate, at speeds above
114  * minimum before wakeup to reduce speed, or -1 if unnecessary.
115  */
116 #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
117 static int timer_slack_val = DEFAULT_TIMER_SLACK;
118 
119 static bool io_is_busy;
120 
121 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
122 		unsigned int event);
123 
124 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
125 static
126 #endif
127 struct cpufreq_governor cpufreq_gov_interactive = {
128 	.name = "interactive",
129 	.governor = cpufreq_governor_interactive,
130 	.max_transition_latency = 10000000,
131 	.owner = THIS_MODULE,
132 };
133 
get_cpu_idle_time_jiffy(unsigned int cpu,cputime64_t * wall)134 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
135 						  cputime64_t *wall)
136 {
137 	u64 idle_time;
138 	u64 cur_wall_time;
139 	u64 busy_time;
140 
141 	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
142 
143 	busy_time  = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
144 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
145 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
146 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
147 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
148 	busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
149 
150 	idle_time = cur_wall_time - busy_time;
151 	if (wall)
152 		*wall = jiffies_to_usecs(cur_wall_time);
153 
154 	return jiffies_to_usecs(idle_time);
155 }
156 
get_cpu_idle_time(unsigned int cpu,cputime64_t * wall)157 static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
158 					    cputime64_t *wall)
159 {
160 	u64 idle_time = get_cpu_idle_time_us(cpu, wall);
161 
162 	if (idle_time == -1ULL)
163 		idle_time = get_cpu_idle_time_jiffy(cpu, wall);
164 	else if (!io_is_busy)
165 		idle_time += get_cpu_iowait_time_us(cpu, wall);
166 
167 	return idle_time;
168 }
169 
cpufreq_interactive_timer_resched(struct cpufreq_interactive_cpuinfo * pcpu)170 static void cpufreq_interactive_timer_resched(
171 	struct cpufreq_interactive_cpuinfo *pcpu)
172 {
173 	unsigned long expires;
174 	unsigned long flags;
175 
176 	spin_lock_irqsave(&pcpu->load_lock, flags);
177 	pcpu->time_in_idle =
178 		get_cpu_idle_time(smp_processor_id(),
179 				     &pcpu->time_in_idle_timestamp);
180 	pcpu->cputime_speedadj = 0;
181 	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
182 	expires = jiffies + usecs_to_jiffies(timer_rate);
183 	mod_timer_pinned(&pcpu->cpu_timer, expires);
184 
185 	if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
186 		expires += usecs_to_jiffies(timer_slack_val);
187 		mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
188 	}
189 
190 	spin_unlock_irqrestore(&pcpu->load_lock, flags);
191 }
192 
193 /* The caller shall take enable_sem write semaphore to avoid any timer race.
194  * The cpu_timer and cpu_slack_timer must be deactivated when calling this
195  * function.
196  */
cpufreq_interactive_timer_start(int cpu)197 static void cpufreq_interactive_timer_start(int cpu)
198 {
199 	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
200 	unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
201 	unsigned long flags;
202 
203 	pcpu->cpu_timer.expires = expires;
204 	add_timer_on(&pcpu->cpu_timer, cpu);
205 	if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
206 		expires += usecs_to_jiffies(timer_slack_val);
207 		pcpu->cpu_slack_timer.expires = expires;
208 		add_timer_on(&pcpu->cpu_slack_timer, cpu);
209 	}
210 
211 	spin_lock_irqsave(&pcpu->load_lock, flags);
212 	pcpu->time_in_idle =
213 		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
214 	pcpu->cputime_speedadj = 0;
215 	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
216 	spin_unlock_irqrestore(&pcpu->load_lock, flags);
217 }
218 
freq_to_above_hispeed_delay(unsigned int freq)219 static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
220 {
221 	int i;
222 	unsigned int ret;
223 	unsigned long flags;
224 
225 	spin_lock_irqsave(&above_hispeed_delay_lock, flags);
226 
227 	for (i = 0; i < nabove_hispeed_delay - 1 &&
228 			freq >= above_hispeed_delay[i+1]; i += 2)
229 		;
230 
231 	ret = above_hispeed_delay[i];
232 	spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
233 	return ret;
234 }
235 
freq_to_targetload(unsigned int freq)236 static unsigned int freq_to_targetload(unsigned int freq)
237 {
238 	int i;
239 	unsigned int ret;
240 	unsigned long flags;
241 
242 	spin_lock_irqsave(&target_loads_lock, flags);
243 
244 	for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
245 		;
246 
247 	ret = target_loads[i];
248 	spin_unlock_irqrestore(&target_loads_lock, flags);
249 	return ret;
250 }
251 
252 /*
253  * If increasing frequencies never map to a lower target load then
254  * choose_freq() will find the minimum frequency that does not exceed its
255  * target load given the current load.
256  */
257 
choose_freq(struct cpufreq_interactive_cpuinfo * pcpu,unsigned int loadadjfreq)258 static unsigned int choose_freq(
259 	struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
260 {
261 	unsigned int freq = pcpu->policy->cur;
262 	unsigned int prevfreq, freqmin, freqmax;
263 	unsigned int tl;
264 	int index;
265 
266 	freqmin = 0;
267 	freqmax = UINT_MAX;
268 
269 	do {
270 		prevfreq = freq;
271 		tl = freq_to_targetload(freq);
272 
273 		/*
274 		 * Find the lowest frequency where the computed load is less
275 		 * than or equal to the target load.
276 		 */
277 
278 		if (cpufreq_frequency_table_target(
279 			    pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
280 			    CPUFREQ_RELATION_L, &index))
281 			break;
282 		freq = pcpu->freq_table[index].frequency;
283 
284 		if (freq > prevfreq) {
285 			/* The previous frequency is too low. */
286 			freqmin = prevfreq;
287 
288 			if (freq >= freqmax) {
289 				/*
290 				 * Find the highest frequency that is less
291 				 * than freqmax.
292 				 */
293 				if (cpufreq_frequency_table_target(
294 					    pcpu->policy, pcpu->freq_table,
295 					    freqmax - 1, CPUFREQ_RELATION_H,
296 					    &index))
297 					break;
298 				freq = pcpu->freq_table[index].frequency;
299 
300 				if (freq == freqmin) {
301 					/*
302 					 * The first frequency below freqmax
303 					 * has already been found to be too
304 					 * low.  freqmax is the lowest speed
305 					 * we found that is fast enough.
306 					 */
307 					freq = freqmax;
308 					break;
309 				}
310 			}
311 		} else if (freq < prevfreq) {
312 			/* The previous frequency is high enough. */
313 			freqmax = prevfreq;
314 
315 			if (freq <= freqmin) {
316 				/*
317 				 * Find the lowest frequency that is higher
318 				 * than freqmin.
319 				 */
320 				if (cpufreq_frequency_table_target(
321 					    pcpu->policy, pcpu->freq_table,
322 					    freqmin + 1, CPUFREQ_RELATION_L,
323 					    &index))
324 					break;
325 				freq = pcpu->freq_table[index].frequency;
326 
327 				/*
328 				 * If freqmax is the first frequency above
329 				 * freqmin then we have already found that
330 				 * this speed is fast enough.
331 				 */
332 				if (freq == freqmax)
333 					break;
334 			}
335 		}
336 
337 		/* If same frequency chosen as previous then done. */
338 	} while (freq != prevfreq);
339 
340 	return freq;
341 }
342 
update_load(int cpu)343 static u64 update_load(int cpu)
344 {
345 	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
346 	u64 now;
347 	u64 now_idle;
348 	unsigned int delta_idle;
349 	unsigned int delta_time;
350 	u64 active_time;
351 
352 	now_idle = get_cpu_idle_time(cpu, &now);
353 	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
354 	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
355 
356 	if (delta_time <= delta_idle)
357 		active_time = 0;
358 	else
359 		active_time = delta_time - delta_idle;
360 
361 	pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
362 
363 	pcpu->time_in_idle = now_idle;
364 	pcpu->time_in_idle_timestamp = now;
365 	return now;
366 }
367 
cpufreq_interactive_timer(unsigned long data)368 static void cpufreq_interactive_timer(unsigned long data)
369 {
370 	u64 now;
371 	unsigned int delta_time;
372 	u64 cputime_speedadj;
373 	int cpu_load;
374 	struct cpufreq_interactive_cpuinfo *pcpu =
375 		&per_cpu(cpuinfo, data);
376 	unsigned int new_freq;
377 	unsigned int loadadjfreq;
378 	unsigned int index;
379 	unsigned long flags;
380 	bool boosted;
381 
382 	if (!down_read_trylock(&pcpu->enable_sem))
383 		return;
384 	if (!pcpu->governor_enabled)
385 		goto exit;
386 
387 	spin_lock_irqsave(&pcpu->load_lock, flags);
388 	now = update_load(data);
389 	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
390 	cputime_speedadj = pcpu->cputime_speedadj;
391 	spin_unlock_irqrestore(&pcpu->load_lock, flags);
392 
393 	if (WARN_ON_ONCE(!delta_time))
394 		goto rearm;
395 
396 	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
397 	do_div(cputime_speedadj, delta_time);
398 	loadadjfreq = (unsigned int)cputime_speedadj * 100;
399 	cpu_load = loadadjfreq / pcpu->policy->cur;
400 	boosted = boost_val || now < boostpulse_endtime;
401 
402 	if (cpu_load >= go_hispeed_load || boosted) {
403 		if (pcpu->target_freq < hispeed_freq) {
404 			new_freq = hispeed_freq;
405 		} else {
406 			new_freq = choose_freq(pcpu, loadadjfreq);
407 
408 			if (new_freq < hispeed_freq)
409 				new_freq = hispeed_freq;
410 		}
411 	} else {
412 		new_freq = choose_freq(pcpu, loadadjfreq);
413 	}
414 
415 	if (pcpu->target_freq >= hispeed_freq &&
416 	    new_freq > pcpu->target_freq &&
417 	    now - pcpu->hispeed_validate_time <
418 	    freq_to_above_hispeed_delay(pcpu->target_freq)) {
419 		trace_cpufreq_interactive_notyet(
420 			data, cpu_load, pcpu->target_freq,
421 			pcpu->policy->cur, new_freq);
422 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
423 		goto rearm;
424 	}
425 
426 	pcpu->hispeed_validate_time = now;
427 
428 	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
429 					   new_freq, CPUFREQ_RELATION_L,
430 					   &index)) {
431 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
432 		goto rearm;
433 	}
434 
435 	new_freq = pcpu->freq_table[index].frequency;
436 
437 	/*
438 	 * Do not scale below floor_freq unless we have been at or above the
439 	 * floor frequency for the minimum sample time since last validated.
440 	 */
441 	if (new_freq < pcpu->floor_freq) {
442 		if (now - pcpu->floor_validate_time < min_sample_time) {
443 			trace_cpufreq_interactive_notyet(
444 				data, cpu_load, pcpu->target_freq,
445 				pcpu->policy->cur, new_freq);
446 			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
447 			goto rearm;
448 		}
449 	}
450 
451 	/*
452 	 * Update the timestamp for checking whether speed has been held at
453 	 * or above the selected frequency for a minimum of min_sample_time,
454 	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
455 	 * allow the speed to drop as soon as the boostpulse duration expires
456 	 * (or the indefinite boost is turned off).
457 	 */
458 
459 	if (!boosted || new_freq > hispeed_freq) {
460 		pcpu->floor_freq = new_freq;
461 		pcpu->floor_validate_time = now;
462 	}
463 
464 	if (pcpu->target_freq == new_freq) {
465 		trace_cpufreq_interactive_already(
466 			data, cpu_load, pcpu->target_freq,
467 			pcpu->policy->cur, new_freq);
468 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
469 		goto rearm_if_notmax;
470 	}
471 
472 	trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
473 					 pcpu->policy->cur, new_freq);
474 
475 	pcpu->target_freq = new_freq;
476 	spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
477 	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
478 	cpumask_set_cpu(data, &speedchange_cpumask);
479 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
480 	wake_up_process(speedchange_task);
481 
482 rearm_if_notmax:
483 	/*
484 	 * Already set max speed and don't see a need to change that,
485 	 * wait until next idle to re-evaluate, don't need timer.
486 	 */
487 	if (pcpu->target_freq == pcpu->policy->max)
488 		goto exit;
489 
490 rearm:
491 	if (!timer_pending(&pcpu->cpu_timer))
492 		cpufreq_interactive_timer_resched(pcpu);
493 
494 exit:
495 	up_read(&pcpu->enable_sem);
496 	return;
497 }
498 
cpufreq_interactive_idle_start(void)499 static void cpufreq_interactive_idle_start(void)
500 {
501 	struct cpufreq_interactive_cpuinfo *pcpu =
502 		&per_cpu(cpuinfo, smp_processor_id());
503 	int pending;
504 
505 	if (!down_read_trylock(&pcpu->enable_sem))
506 		return;
507 	if (!pcpu->governor_enabled) {
508 		up_read(&pcpu->enable_sem);
509 		return;
510 	}
511 
512 	pending = timer_pending(&pcpu->cpu_timer);
513 
514 	if (pcpu->target_freq != pcpu->policy->min) {
515 		/*
516 		 * Entering idle while not at lowest speed.  On some
517 		 * platforms this can hold the other CPU(s) at that speed
518 		 * even though the CPU is idle. Set a timer to re-evaluate
519 		 * speed so this idle CPU doesn't hold the other CPUs above
520 		 * min indefinitely.  This should probably be a quirk of
521 		 * the CPUFreq driver.
522 		 */
523 		if (!pending)
524 			cpufreq_interactive_timer_resched(pcpu);
525 	}
526 
527 	up_read(&pcpu->enable_sem);
528 }
529 
cpufreq_interactive_idle_end(void)530 static void cpufreq_interactive_idle_end(void)
531 {
532 	struct cpufreq_interactive_cpuinfo *pcpu =
533 		&per_cpu(cpuinfo, smp_processor_id());
534 
535 	if (!down_read_trylock(&pcpu->enable_sem))
536 		return;
537 	if (!pcpu->governor_enabled) {
538 		up_read(&pcpu->enable_sem);
539 		return;
540 	}
541 
542 	/* Arm the timer for 1-2 ticks later if not already. */
543 	if (!timer_pending(&pcpu->cpu_timer)) {
544 		cpufreq_interactive_timer_resched(pcpu);
545 	} else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
546 		del_timer(&pcpu->cpu_timer);
547 		del_timer(&pcpu->cpu_slack_timer);
548 		cpufreq_interactive_timer(smp_processor_id());
549 	}
550 
551 	up_read(&pcpu->enable_sem);
552 }
553 
cpufreq_interactive_speedchange_task(void * data)554 static int cpufreq_interactive_speedchange_task(void *data)
555 {
556 	unsigned int cpu;
557 	cpumask_t tmp_mask;
558 	unsigned long flags;
559 	struct cpufreq_interactive_cpuinfo *pcpu;
560 
561 	while (1) {
562 		set_current_state(TASK_INTERRUPTIBLE);
563 		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
564 
565 		if (cpumask_empty(&speedchange_cpumask)) {
566 			spin_unlock_irqrestore(&speedchange_cpumask_lock,
567 					       flags);
568 			schedule();
569 
570 			if (kthread_should_stop())
571 				break;
572 
573 			spin_lock_irqsave(&speedchange_cpumask_lock, flags);
574 		}
575 
576 		set_current_state(TASK_RUNNING);
577 		tmp_mask = speedchange_cpumask;
578 		cpumask_clear(&speedchange_cpumask);
579 		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
580 
581 		for_each_cpu(cpu, &tmp_mask) {
582 			unsigned int j;
583 			unsigned int max_freq = 0;
584 
585 			pcpu = &per_cpu(cpuinfo, cpu);
586 			if (!down_read_trylock(&pcpu->enable_sem))
587 				continue;
588 			if (!pcpu->governor_enabled) {
589 				up_read(&pcpu->enable_sem);
590 				continue;
591 			}
592 
593 			for_each_cpu(j, pcpu->policy->cpus) {
594 				struct cpufreq_interactive_cpuinfo *pjcpu =
595 					&per_cpu(cpuinfo, j);
596 
597 				if (pjcpu->target_freq > max_freq)
598 					max_freq = pjcpu->target_freq;
599 			}
600 
601 			if (max_freq != pcpu->policy->cur)
602 				__cpufreq_driver_target(pcpu->policy,
603 							max_freq,
604 							CPUFREQ_RELATION_H);
605 			trace_cpufreq_interactive_setspeed(cpu,
606 						     pcpu->target_freq,
607 						     pcpu->policy->cur);
608 
609 			up_read(&pcpu->enable_sem);
610 		}
611 	}
612 
613 	return 0;
614 }
615 
cpufreq_interactive_boost(void)616 static void cpufreq_interactive_boost(void)
617 {
618 	int i;
619 	int anyboost = 0;
620 	unsigned long flags[2];
621 	struct cpufreq_interactive_cpuinfo *pcpu;
622 
623 	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
624 
625 	for_each_online_cpu(i) {
626 		pcpu = &per_cpu(cpuinfo, i);
627 		spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
628 		if (pcpu->target_freq < hispeed_freq) {
629 			pcpu->target_freq = hispeed_freq;
630 			cpumask_set_cpu(i, &speedchange_cpumask);
631 			pcpu->hispeed_validate_time =
632 				ktime_to_us(ktime_get());
633 			anyboost = 1;
634 		}
635 
636 		/*
637 		 * Set floor freq and (re)start timer for when last
638 		 * validated.
639 		 */
640 
641 		pcpu->floor_freq = hispeed_freq;
642 		pcpu->floor_validate_time = ktime_to_us(ktime_get());
643 		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
644 	}
645 
646 	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
647 
648 	if (anyboost)
649 		wake_up_process(speedchange_task);
650 }
651 
cpufreq_interactive_notifier(struct notifier_block * nb,unsigned long val,void * data)652 static int cpufreq_interactive_notifier(
653 	struct notifier_block *nb, unsigned long val, void *data)
654 {
655 	struct cpufreq_freqs *freq = data;
656 	struct cpufreq_interactive_cpuinfo *pcpu;
657 	int cpu;
658 	unsigned long flags;
659 
660 	if (val == CPUFREQ_POSTCHANGE) {
661 		pcpu = &per_cpu(cpuinfo, freq->cpu);
662 		if (!down_read_trylock(&pcpu->enable_sem))
663 			return 0;
664 		if (!pcpu->governor_enabled) {
665 			up_read(&pcpu->enable_sem);
666 			return 0;
667 		}
668 
669 		for_each_cpu(cpu, pcpu->policy->cpus) {
670 			struct cpufreq_interactive_cpuinfo *pjcpu =
671 				&per_cpu(cpuinfo, cpu);
672 			if (cpu != freq->cpu) {
673 				if (!down_read_trylock(&pjcpu->enable_sem))
674 					continue;
675 				if (!pjcpu->governor_enabled) {
676 					up_read(&pjcpu->enable_sem);
677 					continue;
678 				}
679 			}
680 			spin_lock_irqsave(&pjcpu->load_lock, flags);
681 			update_load(cpu);
682 			spin_unlock_irqrestore(&pjcpu->load_lock, flags);
683 			if (cpu != freq->cpu)
684 				up_read(&pjcpu->enable_sem);
685 		}
686 
687 		up_read(&pcpu->enable_sem);
688 	}
689 	return 0;
690 }
691 
692 static struct notifier_block cpufreq_notifier_block = {
693 	.notifier_call = cpufreq_interactive_notifier,
694 };
695 
get_tokenized_data(const char * buf,int * num_tokens)696 static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
697 {
698 	const char *cp;
699 	int i;
700 	int ntokens = 1;
701 	unsigned int *tokenized_data;
702 	int err = -EINVAL;
703 
704 	cp = buf;
705 	while ((cp = strpbrk(cp + 1, " :")))
706 		ntokens++;
707 
708 	if (!(ntokens & 0x1))
709 		goto err;
710 
711 	tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
712 	if (!tokenized_data) {
713 		err = -ENOMEM;
714 		goto err;
715 	}
716 
717 	cp = buf;
718 	i = 0;
719 	while (i < ntokens) {
720 		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
721 			goto err_kfree;
722 
723 		cp = strpbrk(cp, " :");
724 		if (!cp)
725 			break;
726 		cp++;
727 	}
728 
729 	if (i != ntokens)
730 		goto err_kfree;
731 
732 	*num_tokens = ntokens;
733 	return tokenized_data;
734 
735 err_kfree:
736 	kfree(tokenized_data);
737 err:
738 	return ERR_PTR(err);
739 }
740 
show_target_loads(struct kobject * kobj,struct attribute * attr,char * buf)741 static ssize_t show_target_loads(
742 	struct kobject *kobj, struct attribute *attr, char *buf)
743 {
744 	int i;
745 	ssize_t ret = 0;
746 	unsigned long flags;
747 
748 	spin_lock_irqsave(&target_loads_lock, flags);
749 
750 	for (i = 0; i < ntarget_loads; i++)
751 		ret += sprintf(buf + ret, "%u%s", target_loads[i],
752 			       i & 0x1 ? ":" : " ");
753 
754 	sprintf(buf + ret - 1, "\n");
755 	spin_unlock_irqrestore(&target_loads_lock, flags);
756 	return ret;
757 }
758 
store_target_loads(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)759 static ssize_t store_target_loads(
760 	struct kobject *kobj, struct attribute *attr, const char *buf,
761 	size_t count)
762 {
763 	int ntokens;
764 	unsigned int *new_target_loads = NULL;
765 	unsigned long flags;
766 
767 	new_target_loads = get_tokenized_data(buf, &ntokens);
768 	if (IS_ERR(new_target_loads))
769 		return PTR_RET(new_target_loads);
770 
771 	spin_lock_irqsave(&target_loads_lock, flags);
772 	if (target_loads != default_target_loads)
773 		kfree(target_loads);
774 	target_loads = new_target_loads;
775 	ntarget_loads = ntokens;
776 	spin_unlock_irqrestore(&target_loads_lock, flags);
777 	return count;
778 }
779 
780 static struct global_attr target_loads_attr =
781 	__ATTR(target_loads, S_IRUGO | S_IWUSR,
782 		show_target_loads, store_target_loads);
783 
show_above_hispeed_delay(struct kobject * kobj,struct attribute * attr,char * buf)784 static ssize_t show_above_hispeed_delay(
785 	struct kobject *kobj, struct attribute *attr, char *buf)
786 {
787 	int i;
788 	ssize_t ret = 0;
789 	unsigned long flags;
790 
791 	spin_lock_irqsave(&above_hispeed_delay_lock, flags);
792 
793 	for (i = 0; i < nabove_hispeed_delay; i++)
794 		ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
795 			       i & 0x1 ? ":" : " ");
796 
797 	sprintf(buf + ret - 1, "\n");
798 	spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
799 	return ret;
800 }
801 
store_above_hispeed_delay(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)802 static ssize_t store_above_hispeed_delay(
803 	struct kobject *kobj, struct attribute *attr, const char *buf,
804 	size_t count)
805 {
806 	int ntokens;
807 	unsigned int *new_above_hispeed_delay = NULL;
808 	unsigned long flags;
809 
810 	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
811 	if (IS_ERR(new_above_hispeed_delay))
812 		return PTR_RET(new_above_hispeed_delay);
813 
814 	spin_lock_irqsave(&above_hispeed_delay_lock, flags);
815 	if (above_hispeed_delay != default_above_hispeed_delay)
816 		kfree(above_hispeed_delay);
817 	above_hispeed_delay = new_above_hispeed_delay;
818 	nabove_hispeed_delay = ntokens;
819 	spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
820 	return count;
821 
822 }
823 
824 static struct global_attr above_hispeed_delay_attr =
825 	__ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
826 		show_above_hispeed_delay, store_above_hispeed_delay);
827 
show_hispeed_freq(struct kobject * kobj,struct attribute * attr,char * buf)828 static ssize_t show_hispeed_freq(struct kobject *kobj,
829 				 struct attribute *attr, char *buf)
830 {
831 	return sprintf(buf, "%u\n", hispeed_freq);
832 }
833 
store_hispeed_freq(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)834 static ssize_t store_hispeed_freq(struct kobject *kobj,
835 				  struct attribute *attr, const char *buf,
836 				  size_t count)
837 {
838 	int ret;
839 	long unsigned int val;
840 
841 	ret = strict_strtoul(buf, 0, &val);
842 	if (ret < 0)
843 		return ret;
844 	hispeed_freq = val;
845 	return count;
846 }
847 
848 static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
849 		show_hispeed_freq, store_hispeed_freq);
850 
851 
show_go_hispeed_load(struct kobject * kobj,struct attribute * attr,char * buf)852 static ssize_t show_go_hispeed_load(struct kobject *kobj,
853 				     struct attribute *attr, char *buf)
854 {
855 	return sprintf(buf, "%lu\n", go_hispeed_load);
856 }
857 
store_go_hispeed_load(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)858 static ssize_t store_go_hispeed_load(struct kobject *kobj,
859 			struct attribute *attr, const char *buf, size_t count)
860 {
861 	int ret;
862 	unsigned long val;
863 
864 	ret = strict_strtoul(buf, 0, &val);
865 	if (ret < 0)
866 		return ret;
867 	go_hispeed_load = val;
868 	return count;
869 }
870 
871 static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
872 		show_go_hispeed_load, store_go_hispeed_load);
873 
show_min_sample_time(struct kobject * kobj,struct attribute * attr,char * buf)874 static ssize_t show_min_sample_time(struct kobject *kobj,
875 				struct attribute *attr, char *buf)
876 {
877 	return sprintf(buf, "%lu\n", min_sample_time);
878 }
879 
store_min_sample_time(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)880 static ssize_t store_min_sample_time(struct kobject *kobj,
881 			struct attribute *attr, const char *buf, size_t count)
882 {
883 	int ret;
884 	unsigned long val;
885 
886 	ret = strict_strtoul(buf, 0, &val);
887 	if (ret < 0)
888 		return ret;
889 	min_sample_time = val;
890 	return count;
891 }
892 
893 static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
894 		show_min_sample_time, store_min_sample_time);
895 
show_timer_rate(struct kobject * kobj,struct attribute * attr,char * buf)896 static ssize_t show_timer_rate(struct kobject *kobj,
897 			struct attribute *attr, char *buf)
898 {
899 	return sprintf(buf, "%lu\n", timer_rate);
900 }
901 
store_timer_rate(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)902 static ssize_t store_timer_rate(struct kobject *kobj,
903 			struct attribute *attr, const char *buf, size_t count)
904 {
905 	int ret;
906 	unsigned long val;
907 
908 	ret = strict_strtoul(buf, 0, &val);
909 	if (ret < 0)
910 		return ret;
911 	timer_rate = val;
912 	return count;
913 }
914 
915 static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
916 		show_timer_rate, store_timer_rate);
917 
show_timer_slack(struct kobject * kobj,struct attribute * attr,char * buf)918 static ssize_t show_timer_slack(
919 	struct kobject *kobj, struct attribute *attr, char *buf)
920 {
921 	return sprintf(buf, "%d\n", timer_slack_val);
922 }
923 
store_timer_slack(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)924 static ssize_t store_timer_slack(
925 	struct kobject *kobj, struct attribute *attr, const char *buf,
926 	size_t count)
927 {
928 	int ret;
929 	unsigned long val;
930 
931 	ret = kstrtol(buf, 10, &val);
932 	if (ret < 0)
933 		return ret;
934 
935 	timer_slack_val = val;
936 	return count;
937 }
938 
939 define_one_global_rw(timer_slack);
940 
show_boost(struct kobject * kobj,struct attribute * attr,char * buf)941 static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
942 			  char *buf)
943 {
944 	return sprintf(buf, "%d\n", boost_val);
945 }
946 
store_boost(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)947 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
948 			   const char *buf, size_t count)
949 {
950 	int ret;
951 	unsigned long val;
952 
953 	ret = kstrtoul(buf, 0, &val);
954 	if (ret < 0)
955 		return ret;
956 
957 	boost_val = val;
958 
959 	if (boost_val) {
960 		trace_cpufreq_interactive_boost("on");
961 		cpufreq_interactive_boost();
962 	} else {
963 		boostpulse_endtime = ktime_to_us(ktime_get());
964 		trace_cpufreq_interactive_unboost("off");
965 	}
966 
967 	return count;
968 }
969 
970 define_one_global_rw(boost);
971 
store_boostpulse(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)972 static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
973 				const char *buf, size_t count)
974 {
975 	int ret;
976 	unsigned long val;
977 
978 	ret = kstrtoul(buf, 0, &val);
979 	if (ret < 0)
980 		return ret;
981 
982 	boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
983 	trace_cpufreq_interactive_boost("pulse");
984 	cpufreq_interactive_boost();
985 	return count;
986 }
987 
988 static struct global_attr boostpulse =
989 	__ATTR(boostpulse, 0200, NULL, store_boostpulse);
990 
show_boostpulse_duration(struct kobject * kobj,struct attribute * attr,char * buf)991 static ssize_t show_boostpulse_duration(
992 	struct kobject *kobj, struct attribute *attr, char *buf)
993 {
994 	return sprintf(buf, "%d\n", boostpulse_duration_val);
995 }
996 
store_boostpulse_duration(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)997 static ssize_t store_boostpulse_duration(
998 	struct kobject *kobj, struct attribute *attr, const char *buf,
999 	size_t count)
1000 {
1001 	int ret;
1002 	unsigned long val;
1003 
1004 	ret = kstrtoul(buf, 0, &val);
1005 	if (ret < 0)
1006 		return ret;
1007 
1008 	boostpulse_duration_val = val;
1009 	return count;
1010 }
1011 
1012 define_one_global_rw(boostpulse_duration);
1013 
show_io_is_busy(struct kobject * kobj,struct attribute * attr,char * buf)1014 static ssize_t show_io_is_busy(struct kobject *kobj,
1015 			struct attribute *attr, char *buf)
1016 {
1017 	return sprintf(buf, "%u\n", io_is_busy);
1018 }
1019 
store_io_is_busy(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)1020 static ssize_t store_io_is_busy(struct kobject *kobj,
1021 			struct attribute *attr, const char *buf, size_t count)
1022 {
1023 	int ret;
1024 	unsigned long val;
1025 
1026 	ret = kstrtoul(buf, 0, &val);
1027 	if (ret < 0)
1028 		return ret;
1029 	io_is_busy = val;
1030 	return count;
1031 }
1032 
1033 static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
1034 		show_io_is_busy, store_io_is_busy);
1035 
1036 static struct attribute *interactive_attributes[] = {
1037 	&target_loads_attr.attr,
1038 	&above_hispeed_delay_attr.attr,
1039 	&hispeed_freq_attr.attr,
1040 	&go_hispeed_load_attr.attr,
1041 	&min_sample_time_attr.attr,
1042 	&timer_rate_attr.attr,
1043 	&timer_slack.attr,
1044 	&boost.attr,
1045 	&boostpulse.attr,
1046 	&boostpulse_duration.attr,
1047 	&io_is_busy_attr.attr,
1048 	NULL,
1049 };
1050 
1051 static struct attribute_group interactive_attr_group = {
1052 	.attrs = interactive_attributes,
1053 	.name = "interactive",
1054 };
1055 
cpufreq_interactive_idle_notifier(struct notifier_block * nb,unsigned long val,void * data)1056 static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
1057 					     unsigned long val,
1058 					     void *data)
1059 {
1060 	switch (val) {
1061 	case IDLE_START:
1062 		cpufreq_interactive_idle_start();
1063 		break;
1064 	case IDLE_END:
1065 		cpufreq_interactive_idle_end();
1066 		break;
1067 	}
1068 
1069 	return 0;
1070 }
1071 
1072 static struct notifier_block cpufreq_interactive_idle_nb = {
1073 	.notifier_call = cpufreq_interactive_idle_notifier,
1074 };
1075 
cpufreq_governor_interactive(struct cpufreq_policy * policy,unsigned int event)1076 static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
1077 		unsigned int event)
1078 {
1079 	int rc;
1080 	unsigned int j;
1081 	struct cpufreq_interactive_cpuinfo *pcpu;
1082 	struct cpufreq_frequency_table *freq_table;
1083 	unsigned long flags;
1084 
1085 	switch (event) {
1086 	case CPUFREQ_GOV_START:
1087 		if (!cpu_online(policy->cpu))
1088 			return -EINVAL;
1089 
1090 		mutex_lock(&gov_lock);
1091 
1092 		freq_table =
1093 			cpufreq_frequency_get_table(policy->cpu);
1094 		if (!hispeed_freq)
1095 			hispeed_freq = policy->max;
1096 
1097 		for_each_cpu(j, policy->cpus) {
1098 			pcpu = &per_cpu(cpuinfo, j);
1099 			pcpu->policy = policy;
1100 			pcpu->target_freq = policy->cur;
1101 			pcpu->freq_table = freq_table;
1102 			pcpu->floor_freq = pcpu->target_freq;
1103 			pcpu->floor_validate_time =
1104 				ktime_to_us(ktime_get());
1105 			pcpu->hispeed_validate_time =
1106 				pcpu->floor_validate_time;
1107 			pcpu->max_freq = policy->max;
1108 			down_write(&pcpu->enable_sem);
1109 			cpufreq_interactive_timer_start(j);
1110 			pcpu->governor_enabled = 1;
1111 			up_write(&pcpu->enable_sem);
1112 		}
1113 
1114 		/*
1115 		 * Do not register the idle hook and create sysfs
1116 		 * entries if we have already done so.
1117 		 */
1118 		if (++active_count > 1) {
1119 			mutex_unlock(&gov_lock);
1120 			return 0;
1121 		}
1122 
1123 		rc = sysfs_create_group(cpufreq_global_kobject,
1124 				&interactive_attr_group);
1125 		if (rc) {
1126 			mutex_unlock(&gov_lock);
1127 			return rc;
1128 		}
1129 
1130 		idle_notifier_register(&cpufreq_interactive_idle_nb);
1131 		cpufreq_register_notifier(
1132 			&cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
1133 		mutex_unlock(&gov_lock);
1134 		break;
1135 
1136 	case CPUFREQ_GOV_STOP:
1137 		mutex_lock(&gov_lock);
1138 		for_each_cpu(j, policy->cpus) {
1139 			pcpu = &per_cpu(cpuinfo, j);
1140 			down_write(&pcpu->enable_sem);
1141 			pcpu->governor_enabled = 0;
1142 			del_timer_sync(&pcpu->cpu_timer);
1143 			del_timer_sync(&pcpu->cpu_slack_timer);
1144 			up_write(&pcpu->enable_sem);
1145 		}
1146 
1147 		if (--active_count > 0) {
1148 			mutex_unlock(&gov_lock);
1149 			return 0;
1150 		}
1151 
1152 		cpufreq_unregister_notifier(
1153 			&cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
1154 		idle_notifier_unregister(&cpufreq_interactive_idle_nb);
1155 		sysfs_remove_group(cpufreq_global_kobject,
1156 				&interactive_attr_group);
1157 		mutex_unlock(&gov_lock);
1158 
1159 		break;
1160 
1161 	case CPUFREQ_GOV_LIMITS:
1162 		if (policy->max < policy->cur)
1163 			__cpufreq_driver_target(policy,
1164 					policy->max, CPUFREQ_RELATION_H);
1165 		else if (policy->min > policy->cur)
1166 			__cpufreq_driver_target(policy,
1167 					policy->min, CPUFREQ_RELATION_L);
1168 		for_each_cpu(j, policy->cpus) {
1169 			pcpu = &per_cpu(cpuinfo, j);
1170 
1171 			down_read(&pcpu->enable_sem);
1172 			if (pcpu->governor_enabled == 0) {
1173 				up_read(&pcpu->enable_sem);
1174 				continue;
1175 			}
1176 
1177 			spin_lock_irqsave(&pcpu->target_freq_lock, flags);
1178 			if (policy->max < pcpu->target_freq)
1179 				pcpu->target_freq = policy->max;
1180 			else if (policy->min > pcpu->target_freq)
1181 				pcpu->target_freq = policy->min;
1182 
1183 			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
1184 			up_read(&pcpu->enable_sem);
1185 
1186 			/* Reschedule timer only if policy->max is raised.
1187 			 * Delete the timers, else the timer callback may
1188 			 * return without re-arm the timer when failed
1189 			 * acquire the semaphore. This race may cause timer
1190 			 * stopped unexpectedly.
1191 			 */
1192 
1193 			if (policy->max > pcpu->max_freq) {
1194 				down_write(&pcpu->enable_sem);
1195 				del_timer_sync(&pcpu->cpu_timer);
1196 				del_timer_sync(&pcpu->cpu_slack_timer);
1197 				cpufreq_interactive_timer_start(j);
1198 				up_write(&pcpu->enable_sem);
1199 			}
1200 
1201 			pcpu->max_freq = policy->max;
1202 		}
1203 		break;
1204 	}
1205 	return 0;
1206 }
1207 
cpufreq_interactive_nop_timer(unsigned long data)1208 static void cpufreq_interactive_nop_timer(unsigned long data)
1209 {
1210 }
1211 
cpufreq_interactive_init(void)1212 static int __init cpufreq_interactive_init(void)
1213 {
1214 	unsigned int i;
1215 	struct cpufreq_interactive_cpuinfo *pcpu;
1216 	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
1217 
1218 	/* Initalize per-cpu timers */
1219 	for_each_possible_cpu(i) {
1220 		pcpu = &per_cpu(cpuinfo, i);
1221 		init_timer_deferrable(&pcpu->cpu_timer);
1222 		pcpu->cpu_timer.function = cpufreq_interactive_timer;
1223 		pcpu->cpu_timer.data = i;
1224 		init_timer(&pcpu->cpu_slack_timer);
1225 		pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
1226 		spin_lock_init(&pcpu->load_lock);
1227 		spin_lock_init(&pcpu->target_freq_lock);
1228 		init_rwsem(&pcpu->enable_sem);
1229 	}
1230 
1231 	spin_lock_init(&target_loads_lock);
1232 	spin_lock_init(&speedchange_cpumask_lock);
1233 	spin_lock_init(&above_hispeed_delay_lock);
1234 	mutex_init(&gov_lock);
1235 	speedchange_task =
1236 		kthread_create(cpufreq_interactive_speedchange_task, NULL,
1237 			       "cfinteractive");
1238 	if (IS_ERR(speedchange_task))
1239 		return PTR_ERR(speedchange_task);
1240 
1241 	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
1242 	get_task_struct(speedchange_task);
1243 
1244 	/* NB: wake up so the thread does not look hung to the freezer */
1245 	wake_up_process(speedchange_task);
1246 
1247 	return cpufreq_register_governor(&cpufreq_gov_interactive);
1248 }
1249 
1250 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
1251 fs_initcall(cpufreq_interactive_init);
1252 #else
1253 module_init(cpufreq_interactive_init);
1254 #endif
1255 
cpufreq_interactive_exit(void)1256 static void __exit cpufreq_interactive_exit(void)
1257 {
1258 	cpufreq_unregister_governor(&cpufreq_gov_interactive);
1259 	kthread_stop(speedchange_task);
1260 	put_task_struct(speedchange_task);
1261 }
1262 
1263 module_exit(cpufreq_interactive_exit);
1264 
1265 MODULE_AUTHOR("Mike Chan <mike@android.com>");
1266 MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
1267 	"Latency sensitive workloads");
1268 MODULE_LICENSE("GPL");
1269