1 /*
2 * drivers/cpufreq/cpufreq_stats.c
3 *
4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/cpu.h>
15 #include <linux/sysfs.h>
16 #include <linux/cpufreq.h>
17 #include <linux/module.h>
18 #include <linux/jiffies.h>
19 #include <linux/percpu.h>
20 #include <linux/kobject.h>
21 #include <linux/spinlock.h>
22 #include <linux/notifier.h>
23 #include <linux/sort.h>
24 #include <linux/err.h>
25 #include <linux/of.h>
26 #include <linux/sched.h>
27 #include <asm/cputime.h>
28
29 static spinlock_t cpufreq_stats_lock;
30
31 struct cpufreq_stats {
32 unsigned int cpu;
33 unsigned int total_trans;
34 unsigned long long last_time;
35 unsigned int max_state;
36 unsigned int state_num;
37 unsigned int last_index;
38 u64 *time_in_state;
39 unsigned int *freq_table;
40 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
41 unsigned int *trans_table;
42 #endif
43 };
44
45 struct all_cpufreq_stats {
46 unsigned int state_num;
47 cputime64_t *time_in_state;
48 unsigned int *freq_table;
49 };
50
51 struct cpufreq_power_stats {
52 unsigned int state_num;
53 unsigned int *curr;
54 unsigned int *freq_table;
55 };
56
57 struct all_freq_table {
58 unsigned int *freq_table;
59 unsigned int table_size;
60 };
61
62 static struct all_freq_table *all_freq_table;
63
64 static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
65 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
66 static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
67
68 struct cpufreq_stats_attribute {
69 struct attribute attr;
70 ssize_t(*show) (struct cpufreq_stats *, char *);
71 };
72
cpufreq_stats_update(unsigned int cpu)73 static int cpufreq_stats_update(unsigned int cpu)
74 {
75 struct cpufreq_stats *stat;
76 struct all_cpufreq_stats *all_stat;
77 unsigned long long cur_time;
78
79 cur_time = get_jiffies_64();
80 spin_lock(&cpufreq_stats_lock);
81 stat = per_cpu(cpufreq_stats_table, cpu);
82 all_stat = per_cpu(all_cpufreq_stats, cpu);
83 if (!stat) {
84 spin_unlock(&cpufreq_stats_lock);
85 return 0;
86 }
87 if (stat->time_in_state) {
88 stat->time_in_state[stat->last_index] +=
89 cur_time - stat->last_time;
90 if (all_stat)
91 all_stat->time_in_state[stat->last_index] +=
92 cur_time - stat->last_time;
93 }
94 stat->last_time = cur_time;
95 spin_unlock(&cpufreq_stats_lock);
96 return 0;
97 }
98
show_total_trans(struct cpufreq_policy * policy,char * buf)99 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
100 {
101 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
102 if (!stat)
103 return 0;
104 return sprintf(buf, "%d\n",
105 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
106 }
107
show_time_in_state(struct cpufreq_policy * policy,char * buf)108 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
109 {
110 ssize_t len = 0;
111 int i;
112 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
113 if (!stat)
114 return 0;
115 cpufreq_stats_update(stat->cpu);
116 for (i = 0; i < stat->state_num; i++) {
117 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
118 (unsigned long long)
119 cputime64_to_clock_t(stat->time_in_state[i]));
120 }
121 return len;
122 }
123
get_index_all_cpufreq_stat(struct all_cpufreq_stats * all_stat,unsigned int freq)124 static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
125 unsigned int freq)
126 {
127 int i;
128 if (!all_stat)
129 return -1;
130 for (i = 0; i < all_stat->state_num; i++) {
131 if (all_stat->freq_table[i] == freq)
132 return i;
133 }
134 return -1;
135 }
136
acct_update_power(struct task_struct * task,cputime_t cputime)137 void acct_update_power(struct task_struct *task, cputime_t cputime) {
138 struct cpufreq_power_stats *powerstats;
139 struct cpufreq_stats *stats;
140 unsigned int cpu_num, curr;
141
142 if (!task)
143 return;
144 cpu_num = task_cpu(task);
145 powerstats = per_cpu(cpufreq_power_stats, cpu_num);
146 stats = per_cpu(cpufreq_stats_table, cpu_num);
147 if (!powerstats || !stats)
148 return;
149
150 curr = powerstats->curr[stats->last_index];
151 if (task->cpu_power != ULLONG_MAX)
152 task->cpu_power += curr * cputime_to_usecs(cputime);
153 }
154 EXPORT_SYMBOL_GPL(acct_update_power);
155
show_current_in_state(struct kobject * kobj,struct kobj_attribute * attr,char * buf)156 static ssize_t show_current_in_state(struct kobject *kobj,
157 struct kobj_attribute *attr, char *buf)
158 {
159 ssize_t len = 0;
160 unsigned int i, cpu;
161 struct cpufreq_power_stats *powerstats;
162
163 spin_lock(&cpufreq_stats_lock);
164 for_each_possible_cpu(cpu) {
165 powerstats = per_cpu(cpufreq_power_stats, cpu);
166 if (!powerstats)
167 continue;
168 len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
169 for (i = 0; i < powerstats->state_num; i++)
170 len += scnprintf(buf + len, PAGE_SIZE - len,
171 "%d=%d ", powerstats->freq_table[i],
172 powerstats->curr[i]);
173 len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
174 }
175 spin_unlock(&cpufreq_stats_lock);
176 return len;
177 }
178
show_all_time_in_state(struct kobject * kobj,struct kobj_attribute * attr,char * buf)179 static ssize_t show_all_time_in_state(struct kobject *kobj,
180 struct kobj_attribute *attr, char *buf)
181 {
182 ssize_t len = 0;
183 unsigned int i, cpu, freq, index;
184 struct all_cpufreq_stats *all_stat;
185 struct cpufreq_policy *policy;
186
187 len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
188 for_each_possible_cpu(cpu) {
189 len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
190 if (cpu_online(cpu))
191 cpufreq_stats_update(cpu);
192 }
193
194 if (!all_freq_table)
195 goto out;
196 for (i = 0; i < all_freq_table->table_size; i++) {
197 freq = all_freq_table->freq_table[i];
198 len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
199 for_each_possible_cpu(cpu) {
200 policy = cpufreq_cpu_get(cpu);
201 if (policy == NULL)
202 continue;
203 all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
204 index = get_index_all_cpufreq_stat(all_stat, freq);
205 if (index != -1) {
206 len += scnprintf(buf + len, PAGE_SIZE - len,
207 "%llu\t\t", (unsigned long long)
208 cputime64_to_clock_t(all_stat->time_in_state[index]));
209 } else {
210 len += scnprintf(buf + len, PAGE_SIZE - len,
211 "N/A\t\t");
212 }
213 cpufreq_cpu_put(policy);
214 }
215 }
216
217 out:
218 len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
219 return len;
220 }
221
222 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
show_trans_table(struct cpufreq_policy * policy,char * buf)223 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
224 {
225 ssize_t len = 0;
226 int i, j;
227
228 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
229 if (!stat)
230 return 0;
231 cpufreq_stats_update(stat->cpu);
232 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
233 len += snprintf(buf + len, PAGE_SIZE - len, " : ");
234 for (i = 0; i < stat->state_num; i++) {
235 if (len >= PAGE_SIZE)
236 break;
237 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
238 stat->freq_table[i]);
239 }
240 if (len >= PAGE_SIZE)
241 return PAGE_SIZE;
242
243 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
244
245 for (i = 0; i < stat->state_num; i++) {
246 if (len >= PAGE_SIZE)
247 break;
248
249 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
250 stat->freq_table[i]);
251
252 for (j = 0; j < stat->state_num; j++) {
253 if (len >= PAGE_SIZE)
254 break;
255 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
256 stat->trans_table[i*stat->max_state+j]);
257 }
258 if (len >= PAGE_SIZE)
259 break;
260 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
261 }
262 if (len >= PAGE_SIZE)
263 return PAGE_SIZE;
264 return len;
265 }
266 cpufreq_freq_attr_ro(trans_table);
267 #endif
268
269 cpufreq_freq_attr_ro(total_trans);
270 cpufreq_freq_attr_ro(time_in_state);
271
272 static struct attribute *default_attrs[] = {
273 &total_trans.attr,
274 &time_in_state.attr,
275 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
276 &trans_table.attr,
277 #endif
278 NULL
279 };
280 static struct attribute_group stats_attr_group = {
281 .attrs = default_attrs,
282 .name = "stats"
283 };
284
285 static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
286 0444, show_all_time_in_state, NULL);
287
288 static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
289 0444, show_current_in_state, NULL);
290
freq_table_get_index(struct cpufreq_stats * stat,unsigned int freq)291 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
292 {
293 int index;
294 for (index = 0; index < stat->max_state; index++)
295 if (stat->freq_table[index] == freq)
296 return index;
297 return -1;
298 }
299
300 /* should be called late in the CPU removal sequence so that the stats
301 * memory is still available in case someone tries to use it.
302 */
cpufreq_stats_free_table(unsigned int cpu)303 static void cpufreq_stats_free_table(unsigned int cpu)
304 {
305 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
306
307 if (stat) {
308 pr_debug("%s: Free stat table\n", __func__);
309 kfree(stat->time_in_state);
310 kfree(stat);
311 per_cpu(cpufreq_stats_table, cpu) = NULL;
312 }
313 }
314
315 /* must be called early in the CPU removal sequence (before
316 * cpufreq_remove_dev) so that policy is still valid.
317 */
cpufreq_stats_free_sysfs(unsigned int cpu)318 static void cpufreq_stats_free_sysfs(unsigned int cpu)
319 {
320 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
321
322 if (!policy)
323 return;
324
325 if (!cpufreq_frequency_get_table(cpu))
326 goto put_ref;
327
328 if (!policy_is_shared(policy)) {
329 pr_debug("%s: Free sysfs stat\n", __func__);
330 sysfs_remove_group(&policy->kobj, &stats_attr_group);
331 }
332
333 put_ref:
334 cpufreq_cpu_put(policy);
335 }
336
cpufreq_allstats_free(void)337 static void cpufreq_allstats_free(void)
338 {
339 int cpu;
340 struct all_cpufreq_stats *all_stat;
341
342 sysfs_remove_file(cpufreq_global_kobject,
343 &_attr_all_time_in_state.attr);
344
345 for_each_possible_cpu(cpu) {
346 all_stat = per_cpu(all_cpufreq_stats, cpu);
347 if (!all_stat)
348 continue;
349 kfree(all_stat->time_in_state);
350 kfree(all_stat);
351 per_cpu(all_cpufreq_stats, cpu) = NULL;
352 }
353 if (all_freq_table) {
354 kfree(all_freq_table->freq_table);
355 kfree(all_freq_table);
356 all_freq_table = NULL;
357 }
358 }
359
cpufreq_powerstats_free(void)360 static void cpufreq_powerstats_free(void)
361 {
362 int cpu;
363 struct cpufreq_power_stats *powerstats;
364
365 sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
366
367 for_each_possible_cpu(cpu) {
368 powerstats = per_cpu(cpufreq_power_stats, cpu);
369 if (!powerstats)
370 continue;
371 kfree(powerstats->curr);
372 kfree(powerstats);
373 per_cpu(cpufreq_power_stats, cpu) = NULL;
374 }
375 }
376
cpufreq_stats_create_table(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table,int count)377 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
378 struct cpufreq_frequency_table *table, int count)
379 {
380 unsigned int i, j, ret = 0;
381 struct cpufreq_stats *stat;
382 struct cpufreq_policy *data;
383 unsigned int alloc_size;
384 unsigned int cpu = policy->cpu;
385 if (per_cpu(cpufreq_stats_table, cpu))
386 return -EBUSY;
387 stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
388 if ((stat) == NULL)
389 return -ENOMEM;
390
391 data = cpufreq_cpu_get(cpu);
392 if (data == NULL) {
393 ret = -EINVAL;
394 goto error_get_fail;
395 }
396
397 ret = sysfs_create_group(&data->kobj, &stats_attr_group);
398 if (ret)
399 goto error_out;
400
401 stat->cpu = cpu;
402 per_cpu(cpufreq_stats_table, cpu) = stat;
403
404
405 alloc_size = count * sizeof(int) + count * sizeof(u64);
406
407 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
408 alloc_size += count * count * sizeof(int);
409 #endif
410 stat->max_state = count;
411 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
412 if (!stat->time_in_state) {
413 ret = -ENOMEM;
414 goto error_out;
415 }
416 stat->freq_table = (unsigned int *)(stat->time_in_state + count);
417
418 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
419 stat->trans_table = stat->freq_table + count;
420 #endif
421 j = 0;
422 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
423 unsigned int freq = table[i].frequency;
424 if (freq == CPUFREQ_ENTRY_INVALID)
425 continue;
426 if (freq_table_get_index(stat, freq) == -1)
427 stat->freq_table[j++] = freq;
428 }
429 stat->state_num = j;
430 spin_lock(&cpufreq_stats_lock);
431 stat->last_time = get_jiffies_64();
432 stat->last_index = freq_table_get_index(stat, policy->cur);
433 spin_unlock(&cpufreq_stats_lock);
434 cpufreq_cpu_put(data);
435 return 0;
436 error_out:
437 cpufreq_cpu_put(data);
438 error_get_fail:
439 kfree(stat);
440 per_cpu(cpufreq_stats_table, cpu) = NULL;
441 return ret;
442 }
443
cpufreq_stats_update_policy_cpu(struct cpufreq_policy * policy)444 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
445 {
446 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
447 policy->last_cpu);
448
449 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
450 policy->cpu, policy->last_cpu);
451 per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
452 policy->last_cpu);
453 per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
454 stat->cpu = policy->cpu;
455 }
456
cpufreq_powerstats_create(unsigned int cpu,struct cpufreq_frequency_table * table,int count)457 static void cpufreq_powerstats_create(unsigned int cpu,
458 struct cpufreq_frequency_table *table, int count) {
459 unsigned int alloc_size, i = 0, j = 0, ret = 0;
460 struct cpufreq_power_stats *powerstats;
461 struct device_node *cpu_node;
462 char device_path[16];
463
464 powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
465 GFP_KERNEL);
466 if (!powerstats)
467 return;
468
469 /* Allocate memory for freq table per cpu as well as clockticks per
470 * freq*/
471 alloc_size = count * sizeof(unsigned int) +
472 count * sizeof(unsigned int);
473 powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
474 if (!powerstats->curr) {
475 kfree(powerstats);
476 return;
477 }
478 powerstats->freq_table = powerstats->curr + count;
479
480 spin_lock(&cpufreq_stats_lock);
481 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END && j < count; i++) {
482 unsigned int freq = table[i].frequency;
483
484 if (freq == CPUFREQ_ENTRY_INVALID)
485 continue;
486 powerstats->freq_table[j++] = freq;
487 }
488 powerstats->state_num = j;
489
490 snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
491 cpu_node = of_find_node_by_path(device_path);
492 if (cpu_node) {
493 ret = of_property_read_u32_array(cpu_node, "current",
494 powerstats->curr, count);
495 if (ret) {
496 kfree(powerstats->curr);
497 kfree(powerstats);
498 powerstats = NULL;
499 }
500 }
501 per_cpu(cpufreq_power_stats, cpu) = powerstats;
502 spin_unlock(&cpufreq_stats_lock);
503 }
504
compare_for_sort(const void * lhs_ptr,const void * rhs_ptr)505 static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
506 {
507 unsigned int lhs = *(const unsigned int *)(lhs_ptr);
508 unsigned int rhs = *(const unsigned int *)(rhs_ptr);
509 if (lhs < rhs)
510 return -1;
511 if (lhs > rhs)
512 return 1;
513 return 0;
514 }
515
check_all_freq_table(unsigned int freq)516 static bool check_all_freq_table(unsigned int freq)
517 {
518 int i;
519 for (i = 0; i < all_freq_table->table_size; i++) {
520 if (freq == all_freq_table->freq_table[i])
521 return true;
522 }
523 return false;
524 }
525
create_all_freq_table(void)526 static void create_all_freq_table(void)
527 {
528 all_freq_table = kzalloc(sizeof(struct all_freq_table),
529 GFP_KERNEL);
530 if (!all_freq_table)
531 pr_warn("could not allocate memory for all_freq_table\n");
532 return;
533 }
534
add_all_freq_table(unsigned int freq)535 static void add_all_freq_table(unsigned int freq)
536 {
537 unsigned int size;
538 size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
539 all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
540 size, GFP_ATOMIC);
541 if (IS_ERR(all_freq_table->freq_table)) {
542 pr_warn("Could not reallocate memory for freq_table\n");
543 all_freq_table->freq_table = NULL;
544 return;
545 }
546 all_freq_table->freq_table[all_freq_table->table_size++] = freq;
547 }
548
cpufreq_allstats_create(unsigned int cpu,struct cpufreq_frequency_table * table,int count)549 static void cpufreq_allstats_create(unsigned int cpu,
550 struct cpufreq_frequency_table *table, int count)
551 {
552 int i , j = 0;
553 unsigned int alloc_size;
554 struct all_cpufreq_stats *all_stat;
555 bool sort_needed = false;
556
557 all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
558 GFP_KERNEL);
559 if (!all_stat) {
560 pr_warn("Cannot allocate memory for cpufreq stats\n");
561 return;
562 }
563
564 /*Allocate memory for freq table per cpu as well as clockticks per freq*/
565 alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
566 all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
567 if (!all_stat->time_in_state) {
568 pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
569 kfree(all_stat);
570 all_stat = NULL;
571 return;
572 }
573 all_stat->freq_table = (unsigned int *)
574 (all_stat->time_in_state + count);
575
576 spin_lock(&cpufreq_stats_lock);
577 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
578 unsigned int freq = table[i].frequency;
579 if (freq == CPUFREQ_ENTRY_INVALID)
580 continue;
581 all_stat->freq_table[j++] = freq;
582 if (all_freq_table && !check_all_freq_table(freq)) {
583 add_all_freq_table(freq);
584 sort_needed = true;
585 }
586 }
587 if (sort_needed)
588 sort(all_freq_table->freq_table, all_freq_table->table_size,
589 sizeof(unsigned int), &compare_for_sort, NULL);
590 all_stat->state_num = j;
591 per_cpu(all_cpufreq_stats, cpu) = all_stat;
592 spin_unlock(&cpufreq_stats_lock);
593 }
594
cpufreq_stat_notifier_policy(struct notifier_block * nb,unsigned long val,void * data)595 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
596 unsigned long val, void *data)
597 {
598 int ret, count = 0, i;
599 struct cpufreq_policy *policy = data;
600 struct cpufreq_frequency_table *table;
601 unsigned int cpu_num, cpu = policy->cpu;
602
603 if (val == CPUFREQ_UPDATE_POLICY_CPU) {
604 cpufreq_stats_update_policy_cpu(policy);
605 return 0;
606 }
607
608 if (val != CPUFREQ_NOTIFY)
609 return 0;
610 table = cpufreq_frequency_get_table(cpu);
611 if (!table)
612 return 0;
613
614 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
615 unsigned int freq = table[i].frequency;
616
617 if (freq == CPUFREQ_ENTRY_INVALID)
618 continue;
619 count++;
620 }
621
622 if (!per_cpu(all_cpufreq_stats, cpu))
623 cpufreq_allstats_create(cpu, table, count);
624
625 for_each_possible_cpu(cpu_num) {
626 if (!per_cpu(cpufreq_power_stats, cpu_num))
627 cpufreq_powerstats_create(cpu_num, table, count);
628 }
629
630 ret = cpufreq_stats_create_table(policy, table, count);
631 if (ret)
632 return ret;
633 return 0;
634 }
635
cpufreq_stat_notifier_trans(struct notifier_block * nb,unsigned long val,void * data)636 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
637 unsigned long val, void *data)
638 {
639 struct cpufreq_freqs *freq = data;
640 struct cpufreq_stats *stat;
641 int old_index, new_index;
642
643 if (val != CPUFREQ_POSTCHANGE)
644 return 0;
645
646 stat = per_cpu(cpufreq_stats_table, freq->cpu);
647 if (!stat)
648 return 0;
649
650 old_index = stat->last_index;
651 new_index = freq_table_get_index(stat, freq->new);
652
653 /* We can't do stat->time_in_state[-1]= .. */
654 if (old_index == -1 || new_index == -1)
655 return 0;
656
657 cpufreq_stats_update(freq->cpu);
658
659 if (old_index == new_index)
660 return 0;
661
662 spin_lock(&cpufreq_stats_lock);
663 stat->last_index = new_index;
664 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
665 stat->trans_table[old_index * stat->max_state + new_index]++;
666 #endif
667 stat->total_trans++;
668 spin_unlock(&cpufreq_stats_lock);
669 return 0;
670 }
671
cpufreq_stats_create_table_cpu(unsigned int cpu)672 static int cpufreq_stats_create_table_cpu(unsigned int cpu)
673 {
674 struct cpufreq_policy *policy;
675 struct cpufreq_frequency_table *table;
676 int i, count, cpu_num, ret = -ENODEV;
677
678 policy = cpufreq_cpu_get(cpu);
679 if (!policy)
680 return -ENODEV;
681
682 table = cpufreq_frequency_get_table(cpu);
683 if (!table)
684 goto out;
685
686 count = 0;
687 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
688 unsigned int freq = table[i].frequency;
689
690 if (freq != CPUFREQ_ENTRY_INVALID)
691 count++;
692 }
693
694 if (!per_cpu(all_cpufreq_stats, cpu))
695 cpufreq_allstats_create(cpu, table, count);
696
697 for_each_possible_cpu(cpu_num) {
698 if (!per_cpu(cpufreq_power_stats, cpu_num))
699 cpufreq_powerstats_create(cpu_num, table, count);
700 }
701
702 ret = cpufreq_stats_create_table(policy, table, count);
703
704 out:
705 cpufreq_cpu_put(policy);
706 return ret;
707 }
708
cpufreq_stat_cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)709 static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
710 unsigned long action,
711 void *hcpu)
712 {
713 unsigned int cpu = (unsigned long)hcpu;
714
715 switch (action) {
716 case CPU_ONLINE:
717 cpufreq_update_policy(cpu);
718 break;
719 case CPU_DOWN_PREPARE:
720 cpufreq_stats_free_sysfs(cpu);
721 break;
722 case CPU_DEAD:
723 cpufreq_stats_free_table(cpu);
724 break;
725 case CPU_UP_CANCELED_FROZEN:
726 cpufreq_stats_free_sysfs(cpu);
727 cpufreq_stats_free_table(cpu);
728 break;
729 case CPU_DOWN_FAILED:
730 case CPU_DOWN_FAILED_FROZEN:
731 cpufreq_stats_create_table_cpu(cpu);
732 break;
733 }
734 return NOTIFY_OK;
735 }
736
737 /* priority=1 so this will get called before cpufreq_remove_dev */
738 static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
739 .notifier_call = cpufreq_stat_cpu_callback,
740 .priority = 1,
741 };
742
743 static struct notifier_block notifier_policy_block = {
744 .notifier_call = cpufreq_stat_notifier_policy
745 };
746
747 static struct notifier_block notifier_trans_block = {
748 .notifier_call = cpufreq_stat_notifier_trans
749 };
750
cpufreq_stats_init(void)751 static int __init cpufreq_stats_init(void)
752 {
753 int ret;
754 unsigned int cpu;
755
756 spin_lock_init(&cpufreq_stats_lock);
757 ret = cpufreq_register_notifier(¬ifier_policy_block,
758 CPUFREQ_POLICY_NOTIFIER);
759 if (ret)
760 return ret;
761
762 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
763 for_each_online_cpu(cpu)
764 cpufreq_update_policy(cpu);
765
766 ret = cpufreq_register_notifier(¬ifier_trans_block,
767 CPUFREQ_TRANSITION_NOTIFIER);
768 if (ret) {
769 cpufreq_unregister_notifier(¬ifier_policy_block,
770 CPUFREQ_POLICY_NOTIFIER);
771 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
772 for_each_online_cpu(cpu)
773 cpufreq_stats_free_table(cpu);
774 return ret;
775 }
776
777 create_all_freq_table();
778 ret = sysfs_create_file(cpufreq_global_kobject,
779 &_attr_all_time_in_state.attr);
780 if (ret)
781 pr_warn("Cannot create sysfs file for cpufreq stats\n");
782
783 ret = sysfs_create_file(cpufreq_global_kobject,
784 &_attr_current_in_state.attr);
785 if (ret)
786 pr_warn("Cannot create sysfs file for cpufreq current stats\n");
787
788 return 0;
789 }
cpufreq_stats_exit(void)790 static void __exit cpufreq_stats_exit(void)
791 {
792 unsigned int cpu;
793
794 cpufreq_unregister_notifier(¬ifier_policy_block,
795 CPUFREQ_POLICY_NOTIFIER);
796 cpufreq_unregister_notifier(¬ifier_trans_block,
797 CPUFREQ_TRANSITION_NOTIFIER);
798 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
799 for_each_online_cpu(cpu) {
800 cpufreq_stats_free_table(cpu);
801 cpufreq_stats_free_sysfs(cpu);
802 }
803 cpufreq_allstats_free();
804 cpufreq_powerstats_free();
805 }
806
807 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
808 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
809 "through sysfs filesystem");
810 MODULE_LICENSE("GPL");
811
812 module_init(cpufreq_stats_init);
813 module_exit(cpufreq_stats_exit);
814