1 /*
2 * drivers/cpufreq/cpufreq_stats.c
3 *
4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/cpu.h>
13 #include <linux/cpufreq.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
17 #include <linux/of.h>
18 #include <linux/sched.h>
19 #include <linux/cputime.h>
20
21 static spinlock_t cpufreq_stats_lock;
22
23 struct cpufreq_stats {
24 unsigned int cpu;
25 unsigned int total_trans;
26 unsigned long long last_time;
27 unsigned int max_state;
28 unsigned int state_num;
29 unsigned int last_index;
30 u64 *time_in_state;
31 unsigned int *freq_table;
32 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
33 unsigned int *trans_table;
34 #endif
35 };
36
37 struct all_cpufreq_stats {
38 unsigned int state_num;
39 cputime64_t *time_in_state;
40 unsigned int *freq_table;
41 };
42
43 struct cpufreq_power_stats {
44 unsigned int state_num;
45 unsigned int *curr;
46 unsigned int *freq_table;
47 };
48
49 struct all_freq_table {
50 unsigned int *freq_table;
51 unsigned int table_size;
52 };
53
54 static struct all_freq_table *all_freq_table;
55
56 static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
57 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
58 static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
59
60 struct cpufreq_stats_attribute {
61 struct attribute attr;
62 ssize_t(*show) (struct cpufreq_stats *, char *);
63 };
64
cpufreq_stats_update(unsigned int cpu)65 static int cpufreq_stats_update(unsigned int cpu)
66 {
67 struct cpufreq_stats *stat;
68 struct all_cpufreq_stats *all_stat;
69 unsigned long long cur_time;
70
71 cur_time = get_jiffies_64();
72 spin_lock(&cpufreq_stats_lock);
73 stat = per_cpu(cpufreq_stats_table, cpu);
74 all_stat = per_cpu(all_cpufreq_stats, cpu);
75 if (!stat) {
76 spin_unlock(&cpufreq_stats_lock);
77 return 0;
78 }
79 if (stat->time_in_state) {
80 stat->time_in_state[stat->last_index] +=
81 cur_time - stat->last_time;
82 if (all_stat)
83 all_stat->time_in_state[stat->last_index] +=
84 cur_time - stat->last_time;
85 }
86 stat->last_time = cur_time;
87 spin_unlock(&cpufreq_stats_lock);
88 return 0;
89 }
90
show_total_trans(struct cpufreq_policy * policy,char * buf)91 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
92 {
93 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
94 if (!stat)
95 return 0;
96 return sprintf(buf, "%d\n",
97 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
98 }
99
show_time_in_state(struct cpufreq_policy * policy,char * buf)100 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
101 {
102 ssize_t len = 0;
103 int i;
104 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
105 if (!stat)
106 return 0;
107 cpufreq_stats_update(stat->cpu);
108 for (i = 0; i < stat->state_num; i++) {
109 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
110 (unsigned long long)
111 jiffies_64_to_clock_t(stat->time_in_state[i]));
112 }
113 return len;
114 }
115
get_index_all_cpufreq_stat(struct all_cpufreq_stats * all_stat,unsigned int freq)116 static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
117 unsigned int freq)
118 {
119 int i;
120 if (!all_stat)
121 return -1;
122 for (i = 0; i < all_stat->state_num; i++) {
123 if (all_stat->freq_table[i] == freq)
124 return i;
125 }
126 return -1;
127 }
128
acct_update_power(struct task_struct * task,cputime_t cputime)129 void acct_update_power(struct task_struct *task, cputime_t cputime) {
130 struct cpufreq_power_stats *powerstats;
131 struct cpufreq_stats *stats;
132 unsigned int cpu_num, curr;
133
134 if (!task)
135 return;
136 cpu_num = task_cpu(task);
137 powerstats = per_cpu(cpufreq_power_stats, cpu_num);
138 stats = per_cpu(cpufreq_stats_table, cpu_num);
139 if (!powerstats || !stats)
140 return;
141
142 curr = powerstats->curr[stats->last_index];
143 if (task->cpu_power != ULLONG_MAX)
144 task->cpu_power += curr * cputime_to_usecs(cputime);
145 }
146 EXPORT_SYMBOL_GPL(acct_update_power);
147
show_current_in_state(struct kobject * kobj,struct kobj_attribute * attr,char * buf)148 static ssize_t show_current_in_state(struct kobject *kobj,
149 struct kobj_attribute *attr, char *buf)
150 {
151 ssize_t len = 0;
152 unsigned int i, cpu;
153 struct cpufreq_power_stats *powerstats;
154
155 spin_lock(&cpufreq_stats_lock);
156 for_each_possible_cpu(cpu) {
157 powerstats = per_cpu(cpufreq_power_stats, cpu);
158 if (!powerstats)
159 continue;
160 len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
161 for (i = 0; i < powerstats->state_num; i++)
162 len += scnprintf(buf + len, PAGE_SIZE - len,
163 "%d=%d ", powerstats->freq_table[i],
164 powerstats->curr[i]);
165 len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
166 }
167 spin_unlock(&cpufreq_stats_lock);
168 return len;
169 }
170
show_all_time_in_state(struct kobject * kobj,struct kobj_attribute * attr,char * buf)171 static ssize_t show_all_time_in_state(struct kobject *kobj,
172 struct kobj_attribute *attr, char *buf)
173 {
174 ssize_t len = 0;
175 unsigned int i, cpu, freq, index;
176 struct all_cpufreq_stats *all_stat;
177 struct cpufreq_policy *policy;
178
179 len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
180 for_each_possible_cpu(cpu) {
181 len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
182 if (cpu_online(cpu))
183 cpufreq_stats_update(cpu);
184 }
185
186 if (!all_freq_table)
187 goto out;
188 for (i = 0; i < all_freq_table->table_size; i++) {
189 freq = all_freq_table->freq_table[i];
190 len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
191 for_each_possible_cpu(cpu) {
192 policy = cpufreq_cpu_get(cpu);
193 if (policy == NULL)
194 continue;
195 all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
196 index = get_index_all_cpufreq_stat(all_stat, freq);
197 if (index != -1) {
198 len += scnprintf(buf + len, PAGE_SIZE - len,
199 "%llu\t\t", (unsigned long long)
200 cputime64_to_clock_t(all_stat->time_in_state[index]));
201 } else {
202 len += scnprintf(buf + len, PAGE_SIZE - len,
203 "N/A\t\t");
204 }
205 cpufreq_cpu_put(policy);
206 }
207 }
208
209 out:
210 len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
211 return len;
212 }
213
214 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
show_trans_table(struct cpufreq_policy * policy,char * buf)215 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
216 {
217 ssize_t len = 0;
218 int i, j;
219
220 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
221 if (!stat)
222 return 0;
223 cpufreq_stats_update(stat->cpu);
224 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
225 len += snprintf(buf + len, PAGE_SIZE - len, " : ");
226 for (i = 0; i < stat->state_num; i++) {
227 if (len >= PAGE_SIZE)
228 break;
229 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
230 stat->freq_table[i]);
231 }
232 if (len >= PAGE_SIZE)
233 return PAGE_SIZE;
234
235 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
236
237 for (i = 0; i < stat->state_num; i++) {
238 if (len >= PAGE_SIZE)
239 break;
240
241 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
242 stat->freq_table[i]);
243
244 for (j = 0; j < stat->state_num; j++) {
245 if (len >= PAGE_SIZE)
246 break;
247 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
248 stat->trans_table[i*stat->max_state+j]);
249 }
250 if (len >= PAGE_SIZE)
251 break;
252 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
253 }
254 if (len >= PAGE_SIZE)
255 return PAGE_SIZE;
256 return len;
257 }
258 cpufreq_freq_attr_ro(trans_table);
259 #endif
260
261 cpufreq_freq_attr_ro(total_trans);
262 cpufreq_freq_attr_ro(time_in_state);
263
264 static struct attribute *default_attrs[] = {
265 &total_trans.attr,
266 &time_in_state.attr,
267 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
268 &trans_table.attr,
269 #endif
270 NULL
271 };
272 static struct attribute_group stats_attr_group = {
273 .attrs = default_attrs,
274 .name = "stats"
275 };
276
277 static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
278 0444, show_all_time_in_state, NULL);
279
280 static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
281 0444, show_current_in_state, NULL);
282
freq_table_get_index(struct cpufreq_stats * stat,unsigned int freq)283 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
284 {
285 int index;
286 for (index = 0; index < stat->max_state; index++)
287 if (stat->freq_table[index] == freq)
288 return index;
289 return -1;
290 }
291
__cpufreq_stats_free_table(struct cpufreq_policy * policy)292 static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
293 {
294 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
295
296 if (!stat)
297 return;
298
299 pr_debug("%s: Free stat table\n", __func__);
300
301 sysfs_remove_group(&policy->kobj, &stats_attr_group);
302 kfree(stat->time_in_state);
303 kfree(stat);
304 per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
305 }
306
cpufreq_stats_free_table(unsigned int cpu)307 static void cpufreq_stats_free_table(unsigned int cpu)
308 {
309 struct cpufreq_policy *policy;
310
311 policy = cpufreq_cpu_get(cpu);
312 if (!policy)
313 return;
314
315 if (cpufreq_frequency_get_table(policy->cpu))
316 __cpufreq_stats_free_table(policy);
317
318 cpufreq_cpu_put(policy);
319 }
320
cpufreq_allstats_free(void)321 static void cpufreq_allstats_free(void)
322 {
323 int cpu;
324 struct all_cpufreq_stats *all_stat;
325
326 sysfs_remove_file(cpufreq_global_kobject,
327 &_attr_all_time_in_state.attr);
328
329 for_each_possible_cpu(cpu) {
330 all_stat = per_cpu(all_cpufreq_stats, cpu);
331 if (!all_stat)
332 continue;
333 kfree(all_stat->time_in_state);
334 kfree(all_stat);
335 per_cpu(all_cpufreq_stats, cpu) = NULL;
336 }
337 if (all_freq_table) {
338 kfree(all_freq_table->freq_table);
339 kfree(all_freq_table);
340 all_freq_table = NULL;
341 }
342 }
343
cpufreq_powerstats_free(void)344 static void cpufreq_powerstats_free(void)
345 {
346 int cpu;
347 struct cpufreq_power_stats *powerstats;
348
349 sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
350
351 for_each_possible_cpu(cpu) {
352 powerstats = per_cpu(cpufreq_power_stats, cpu);
353 if (!powerstats)
354 continue;
355 kfree(powerstats->curr);
356 kfree(powerstats);
357 per_cpu(cpufreq_power_stats, cpu) = NULL;
358 }
359 }
360
__cpufreq_stats_create_table(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table,int count)361 static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
362 struct cpufreq_frequency_table *table, int count)
363 {
364 unsigned int i, ret = 0;
365 struct cpufreq_stats *stat;
366 unsigned int alloc_size;
367 unsigned int cpu = policy->cpu;
368 struct cpufreq_frequency_table *pos;
369
370 if (per_cpu(cpufreq_stats_table, cpu))
371 return -EBUSY;
372 stat = kzalloc(sizeof(*stat), GFP_KERNEL);
373 if ((stat) == NULL)
374 return -ENOMEM;
375
376 ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
377 if (ret)
378 goto error_out;
379
380 stat->cpu = cpu;
381 per_cpu(cpufreq_stats_table, cpu) = stat;
382
383 alloc_size = count * sizeof(int) + count * sizeof(u64);
384
385 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
386 alloc_size += count * count * sizeof(int);
387 #endif
388 stat->max_state = count;
389 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
390 if (!stat->time_in_state) {
391 ret = -ENOMEM;
392 goto error_alloc;
393 }
394 stat->freq_table = (unsigned int *)(stat->time_in_state + count);
395
396 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
397 stat->trans_table = stat->freq_table + count;
398 #endif
399 i = 0;
400 cpufreq_for_each_valid_entry(pos, table)
401 if (freq_table_get_index(stat, pos->frequency) == -1)
402 stat->freq_table[i++] = pos->frequency;
403 stat->state_num = i;
404 spin_lock(&cpufreq_stats_lock);
405 stat->last_time = get_jiffies_64();
406 stat->last_index = freq_table_get_index(stat, policy->cur);
407 spin_unlock(&cpufreq_stats_lock);
408 return 0;
409 error_alloc:
410 sysfs_remove_group(&policy->kobj, &stats_attr_group);
411 error_out:
412 kfree(stat);
413 per_cpu(cpufreq_stats_table, cpu) = NULL;
414 return ret;
415 }
416
cpufreq_stats_update_policy_cpu(struct cpufreq_policy * policy)417 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
418 {
419 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
420 policy->last_cpu);
421
422 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
423 policy->cpu, policy->last_cpu);
424 per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
425 policy->last_cpu);
426 per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
427 stat->cpu = policy->cpu;
428 }
429
cpufreq_powerstats_create(unsigned int cpu,struct cpufreq_frequency_table * table,int count)430 static void cpufreq_powerstats_create(unsigned int cpu,
431 struct cpufreq_frequency_table *table, int count) {
432 unsigned int alloc_size, i = 0, ret = 0;
433 struct cpufreq_power_stats *powerstats;
434 struct cpufreq_frequency_table *pos;
435 struct device_node *cpu_node;
436 char device_path[16];
437
438 powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
439 GFP_KERNEL);
440 if (!powerstats)
441 return;
442
443 /* Allocate memory for freq table per cpu as well as clockticks per
444 * freq*/
445 alloc_size = count * sizeof(unsigned int) +
446 count * sizeof(unsigned int);
447 powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
448 if (!powerstats->curr) {
449 kfree(powerstats);
450 return;
451 }
452 powerstats->freq_table = powerstats->curr + count;
453
454 spin_lock(&cpufreq_stats_lock);
455 i = 0;
456 cpufreq_for_each_valid_entry(pos, table)
457 powerstats->freq_table[i++] = pos->frequency;
458 powerstats->state_num = i;
459
460 snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
461 cpu_node = of_find_node_by_path(device_path);
462 if (cpu_node) {
463 ret = of_property_read_u32_array(cpu_node, "current",
464 powerstats->curr, count);
465 if (ret) {
466 kfree(powerstats->curr);
467 kfree(powerstats);
468 powerstats = NULL;
469 }
470 }
471 per_cpu(cpufreq_power_stats, cpu) = powerstats;
472 spin_unlock(&cpufreq_stats_lock);
473 }
474
compare_for_sort(const void * lhs_ptr,const void * rhs_ptr)475 static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
476 {
477 unsigned int lhs = *(const unsigned int *)(lhs_ptr);
478 unsigned int rhs = *(const unsigned int *)(rhs_ptr);
479 if (lhs < rhs)
480 return -1;
481 if (lhs > rhs)
482 return 1;
483 return 0;
484 }
485
check_all_freq_table(unsigned int freq)486 static bool check_all_freq_table(unsigned int freq)
487 {
488 int i;
489 for (i = 0; i < all_freq_table->table_size; i++) {
490 if (freq == all_freq_table->freq_table[i])
491 return true;
492 }
493 return false;
494 }
495
create_all_freq_table(void)496 static void create_all_freq_table(void)
497 {
498 all_freq_table = kzalloc(sizeof(struct all_freq_table),
499 GFP_KERNEL);
500 if (!all_freq_table)
501 pr_warn("could not allocate memory for all_freq_table\n");
502 return;
503 }
504
add_all_freq_table(unsigned int freq)505 static void add_all_freq_table(unsigned int freq)
506 {
507 unsigned int size;
508 size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
509 all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
510 size, GFP_ATOMIC);
511 if (IS_ERR(all_freq_table->freq_table)) {
512 pr_warn("Could not reallocate memory for freq_table\n");
513 all_freq_table->freq_table = NULL;
514 return;
515 }
516 all_freq_table->freq_table[all_freq_table->table_size++] = freq;
517 }
518
cpufreq_allstats_create(unsigned int cpu,struct cpufreq_frequency_table * table,int count)519 static void cpufreq_allstats_create(unsigned int cpu,
520 struct cpufreq_frequency_table *table, int count)
521 {
522 int i , j = 0;
523 unsigned int alloc_size;
524 struct all_cpufreq_stats *all_stat;
525 bool sort_needed = false;
526
527 all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
528 GFP_KERNEL);
529 if (!all_stat) {
530 pr_warn("Cannot allocate memory for cpufreq stats\n");
531 return;
532 }
533
534 /*Allocate memory for freq table per cpu as well as clockticks per freq*/
535 alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
536 all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
537 if (!all_stat->time_in_state) {
538 pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
539 kfree(all_stat);
540 all_stat = NULL;
541 return;
542 }
543 all_stat->freq_table = (unsigned int *)
544 (all_stat->time_in_state + count);
545
546 spin_lock(&cpufreq_stats_lock);
547 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
548 unsigned int freq = table[i].frequency;
549 if (freq == CPUFREQ_ENTRY_INVALID)
550 continue;
551 all_stat->freq_table[j++] = freq;
552 if (all_freq_table && !check_all_freq_table(freq)) {
553 add_all_freq_table(freq);
554 sort_needed = true;
555 }
556 }
557 if (sort_needed)
558 sort(all_freq_table->freq_table, all_freq_table->table_size,
559 sizeof(unsigned int), &compare_for_sort, NULL);
560 all_stat->state_num = j;
561 per_cpu(all_cpufreq_stats, cpu) = all_stat;
562 spin_unlock(&cpufreq_stats_lock);
563 }
564
cpufreq_stats_create_table(unsigned int cpu)565 static void cpufreq_stats_create_table(unsigned int cpu)
566 {
567 struct cpufreq_policy *policy;
568 struct cpufreq_frequency_table *table, *pos;
569 int count = 0;
570 /*
571 * "likely(!policy)" because normally cpufreq_stats will be registered
572 * before cpufreq driver
573 */
574 policy = cpufreq_cpu_get(cpu);
575 if (likely(!policy))
576 return;
577
578 table = cpufreq_frequency_get_table(policy->cpu);
579 if (likely(table)) {
580 cpufreq_for_each_valid_entry(pos, table)
581 count++;
582
583 if (!per_cpu(all_cpufreq_stats, cpu))
584 cpufreq_allstats_create(cpu, table, count);
585
586 if (!per_cpu(cpufreq_power_stats, cpu))
587 cpufreq_powerstats_create(cpu, table, count);
588
589 __cpufreq_stats_create_table(policy, table, count);
590 }
591 cpufreq_cpu_put(policy);
592 }
593
cpufreq_stat_notifier_policy(struct notifier_block * nb,unsigned long val,void * data)594 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
595 unsigned long val, void *data)
596 {
597 int ret = 0, count = 0;
598 struct cpufreq_policy *policy = data;
599 struct cpufreq_frequency_table *table, *pos;
600 unsigned int cpu_num, cpu = policy->cpu;
601
602 if (val == CPUFREQ_UPDATE_POLICY_CPU) {
603 cpufreq_stats_update_policy_cpu(policy);
604 return 0;
605 }
606
607 table = cpufreq_frequency_get_table(cpu);
608 if (!table)
609 return 0;
610
611 cpufreq_for_each_valid_entry(pos, table)
612 count++;
613
614 if (!per_cpu(all_cpufreq_stats, cpu))
615 cpufreq_allstats_create(cpu, table, count);
616
617 for_each_possible_cpu(cpu_num) {
618 if (!per_cpu(cpufreq_power_stats, cpu_num))
619 cpufreq_powerstats_create(cpu_num, table, count);
620 }
621
622 if (val == CPUFREQ_CREATE_POLICY)
623 ret = __cpufreq_stats_create_table(policy, table, count);
624 else if (val == CPUFREQ_REMOVE_POLICY)
625 __cpufreq_stats_free_table(policy);
626
627 return ret;
628 }
629
cpufreq_stat_notifier_trans(struct notifier_block * nb,unsigned long val,void * data)630 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
631 unsigned long val, void *data)
632 {
633 struct cpufreq_freqs *freq = data;
634 struct cpufreq_stats *stat;
635 int old_index, new_index;
636
637 if (val != CPUFREQ_POSTCHANGE)
638 return 0;
639
640 stat = per_cpu(cpufreq_stats_table, freq->cpu);
641 if (!stat)
642 return 0;
643
644 old_index = stat->last_index;
645 new_index = freq_table_get_index(stat, freq->new);
646
647 /* We can't do stat->time_in_state[-1]= .. */
648 if (old_index == -1 || new_index == -1)
649 return 0;
650
651 cpufreq_stats_update(freq->cpu);
652
653 if (old_index == new_index)
654 return 0;
655
656 spin_lock(&cpufreq_stats_lock);
657 stat->last_index = new_index;
658 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
659 stat->trans_table[old_index * stat->max_state + new_index]++;
660 #endif
661 stat->total_trans++;
662 spin_unlock(&cpufreq_stats_lock);
663 return 0;
664 }
665
666 static struct notifier_block notifier_policy_block = {
667 .notifier_call = cpufreq_stat_notifier_policy
668 };
669
670 static struct notifier_block notifier_trans_block = {
671 .notifier_call = cpufreq_stat_notifier_trans
672 };
673
cpufreq_stats_init(void)674 static int __init cpufreq_stats_init(void)
675 {
676 int ret;
677 unsigned int cpu;
678
679 spin_lock_init(&cpufreq_stats_lock);
680 ret = cpufreq_register_notifier(¬ifier_policy_block,
681 CPUFREQ_POLICY_NOTIFIER);
682 if (ret)
683 return ret;
684
685 for_each_online_cpu(cpu)
686 cpufreq_stats_create_table(cpu);
687
688 ret = cpufreq_register_notifier(¬ifier_trans_block,
689 CPUFREQ_TRANSITION_NOTIFIER);
690 if (ret) {
691 cpufreq_unregister_notifier(¬ifier_policy_block,
692 CPUFREQ_POLICY_NOTIFIER);
693 for_each_online_cpu(cpu)
694 cpufreq_stats_free_table(cpu);
695 return ret;
696 }
697
698 create_all_freq_table();
699 WARN_ON(cpufreq_get_global_kobject());
700 ret = sysfs_create_file(cpufreq_global_kobject,
701 &_attr_all_time_in_state.attr);
702 if (ret)
703 pr_warn("Cannot create sysfs file for cpufreq stats\n");
704
705 ret = sysfs_create_file(cpufreq_global_kobject,
706 &_attr_current_in_state.attr);
707 if (ret)
708 pr_warn("Cannot create sysfs file for cpufreq current stats\n");
709
710 return 0;
711 }
cpufreq_stats_exit(void)712 static void __exit cpufreq_stats_exit(void)
713 {
714 unsigned int cpu;
715
716 cpufreq_unregister_notifier(¬ifier_policy_block,
717 CPUFREQ_POLICY_NOTIFIER);
718 cpufreq_unregister_notifier(¬ifier_trans_block,
719 CPUFREQ_TRANSITION_NOTIFIER);
720 for_each_online_cpu(cpu)
721 cpufreq_stats_free_table(cpu);
722 cpufreq_allstats_free();
723 cpufreq_powerstats_free();
724 cpufreq_put_global_kobject();
725 }
726
727 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
728 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
729 "through sysfs filesystem");
730 MODULE_LICENSE("GPL");
731
732 module_init(cpufreq_stats_init);
733 module_exit(cpufreq_stats_exit);
734