1 /* drivers/cpufreq/cpufreq_times.c
2 *
3 * Copyright (C) 2018 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16 #include <linux/cpufreq.h>
17 #include <linux/cpufreq_times.h>
18 #include <linux/jiffies.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/threads.h>
24
25 static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
26
27 /**
28 * struct cpu_freqs - per-cpu frequency information
29 * @offset: start of these freqs' stats in task time_in_state array
30 * @max_state: number of entries in freq_table
31 * @last_index: index in freq_table of last frequency switched to
32 * @freq_table: list of available frequencies
33 */
34 struct cpu_freqs {
35 unsigned int offset;
36 unsigned int max_state;
37 unsigned int last_index;
38 unsigned int freq_table[0];
39 };
40
41 static struct cpu_freqs *all_freqs[NR_CPUS];
42
43 static unsigned int next_offset;
44
cpufreq_task_times_init(struct task_struct * p)45 void cpufreq_task_times_init(struct task_struct *p)
46 {
47 unsigned long flags;
48
49 spin_lock_irqsave(&task_time_in_state_lock, flags);
50 p->time_in_state = NULL;
51 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
52 p->max_state = 0;
53 }
54
cpufreq_task_times_alloc(struct task_struct * p)55 void cpufreq_task_times_alloc(struct task_struct *p)
56 {
57 void *temp;
58 unsigned long flags;
59 unsigned int max_state = READ_ONCE(next_offset);
60
61 /* We use one array to avoid multiple allocs per task */
62 temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
63 if (!temp)
64 return;
65
66 spin_lock_irqsave(&task_time_in_state_lock, flags);
67 p->time_in_state = temp;
68 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
69 p->max_state = max_state;
70 }
71
72 /* Caller must hold task_time_in_state_lock */
cpufreq_task_times_realloc_locked(struct task_struct * p)73 static int cpufreq_task_times_realloc_locked(struct task_struct *p)
74 {
75 void *temp;
76 unsigned int max_state = READ_ONCE(next_offset);
77
78 temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
79 if (!temp)
80 return -ENOMEM;
81 p->time_in_state = temp;
82 memset(p->time_in_state + p->max_state, 0,
83 (max_state - p->max_state) * sizeof(u64));
84 p->max_state = max_state;
85 return 0;
86 }
87
cpufreq_task_times_exit(struct task_struct * p)88 void cpufreq_task_times_exit(struct task_struct *p)
89 {
90 unsigned long flags;
91 void *temp;
92
93 if (!p->time_in_state)
94 return;
95
96 spin_lock_irqsave(&task_time_in_state_lock, flags);
97 temp = p->time_in_state;
98 p->time_in_state = NULL;
99 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
100 kfree(temp);
101 }
102
proc_time_in_state_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * p)103 int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
104 struct pid *pid, struct task_struct *p)
105 {
106 unsigned int cpu, i;
107 u64 cputime;
108 unsigned long flags;
109 struct cpu_freqs *freqs;
110 struct cpu_freqs *last_freqs = NULL;
111
112 spin_lock_irqsave(&task_time_in_state_lock, flags);
113 for_each_possible_cpu(cpu) {
114 freqs = all_freqs[cpu];
115 if (!freqs || freqs == last_freqs)
116 continue;
117 last_freqs = freqs;
118
119 seq_printf(m, "cpu%u\n", cpu);
120 for (i = 0; i < freqs->max_state; i++) {
121 cputime = 0;
122 if (freqs->offset + i < p->max_state &&
123 p->time_in_state)
124 cputime = p->time_in_state[freqs->offset + i];
125 seq_printf(m, "%u %lu\n", freqs->freq_table[i],
126 (unsigned long)nsec_to_clock_t(cputime));
127 }
128 }
129 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
130 return 0;
131 }
132
cpufreq_acct_update_power(struct task_struct * p,u64 cputime)133 void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
134 {
135 unsigned long flags;
136 unsigned int state;
137 struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
138
139 if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
140 return;
141
142 state = freqs->offset + READ_ONCE(freqs->last_index);
143
144 spin_lock_irqsave(&task_time_in_state_lock, flags);
145 if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
146 p->time_in_state)
147 p->time_in_state[state] += cputime;
148 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
149 }
150
cpufreq_times_get_index(struct cpu_freqs * freqs,unsigned int freq)151 static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
152 {
153 int index;
154 for (index = 0; index < freqs->max_state; ++index) {
155 if (freqs->freq_table[index] == freq)
156 return index;
157 }
158 return -1;
159 }
160
cpufreq_times_create_policy(struct cpufreq_policy * policy)161 void cpufreq_times_create_policy(struct cpufreq_policy *policy)
162 {
163 int cpu, index = 0;
164 unsigned int count = 0;
165 struct cpufreq_frequency_table *pos, *table;
166 struct cpu_freqs *freqs;
167 void *tmp;
168
169 if (all_freqs[policy->cpu])
170 return;
171
172 table = policy->freq_table;
173 if (!table)
174 return;
175
176 cpufreq_for_each_valid_entry(pos, table)
177 count++;
178
179 tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
180 GFP_KERNEL);
181 if (!tmp)
182 return;
183
184 freqs = tmp;
185 freqs->max_state = count;
186
187 cpufreq_for_each_valid_entry(pos, table)
188 freqs->freq_table[index++] = pos->frequency;
189
190 index = cpufreq_times_get_index(freqs, policy->cur);
191 if (index >= 0)
192 WRITE_ONCE(freqs->last_index, index);
193
194 freqs->offset = next_offset;
195 WRITE_ONCE(next_offset, freqs->offset + count);
196 for_each_cpu(cpu, policy->related_cpus)
197 all_freqs[cpu] = freqs;
198 }
199
cpufreq_times_record_transition(struct cpufreq_policy * policy,unsigned int new_freq)200 void cpufreq_times_record_transition(struct cpufreq_policy *policy,
201 unsigned int new_freq)
202 {
203 int index;
204 struct cpu_freqs *freqs = all_freqs[policy->cpu];
205 if (!freqs)
206 return;
207
208 index = cpufreq_times_get_index(freqs, new_freq);
209 if (index >= 0)
210 WRITE_ONCE(freqs->last_index, index);
211 }
212