1 /* drivers/cpufreq/cpufreq_times.c
2 *
3 * Copyright (C) 2018 Google, Inc.
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16 #include <linux/cpufreq.h>
17 #include <linux/cpufreq_times.h>
18 #include <linux/jiffies.h>
19 #include <linux/sched.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/threads.h>
24 #include <trace/hooks/cpufreq.h>
25
26 static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
27
28 /**
29 * struct cpu_freqs - per-cpu frequency information
30 * @offset: start of these freqs' stats in task time_in_state array
31 * @max_state: number of entries in freq_table
32 * @last_index: index in freq_table of last frequency switched to
33 * @freq_table: list of available frequencies
34 */
35 struct cpu_freqs {
36 unsigned int offset;
37 unsigned int max_state;
38 unsigned int last_index;
39 unsigned int freq_table[0];
40 };
41
42 static struct cpu_freqs *all_freqs[NR_CPUS];
43
44 static unsigned int next_offset;
45
cpufreq_task_times_init(struct task_struct * p)46 void cpufreq_task_times_init(struct task_struct *p)
47 {
48 unsigned long flags;
49
50 spin_lock_irqsave(&task_time_in_state_lock, flags);
51 p->time_in_state = NULL;
52 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
53 p->max_state = 0;
54 }
55
cpufreq_task_times_alloc(struct task_struct * p)56 void cpufreq_task_times_alloc(struct task_struct *p)
57 {
58 void *temp;
59 unsigned long flags;
60 unsigned int max_state = READ_ONCE(next_offset);
61
62 /* We use one array to avoid multiple allocs per task */
63 temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
64 if (!temp)
65 return;
66
67 spin_lock_irqsave(&task_time_in_state_lock, flags);
68 p->time_in_state = temp;
69 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
70 p->max_state = max_state;
71 }
72
73 /* Caller must hold task_time_in_state_lock */
cpufreq_task_times_realloc_locked(struct task_struct * p)74 static int cpufreq_task_times_realloc_locked(struct task_struct *p)
75 {
76 void *temp;
77 unsigned int max_state = READ_ONCE(next_offset);
78
79 temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
80 if (!temp)
81 return -ENOMEM;
82 p->time_in_state = temp;
83 memset(p->time_in_state + p->max_state, 0,
84 (max_state - p->max_state) * sizeof(u64));
85 p->max_state = max_state;
86 return 0;
87 }
88
cpufreq_task_times_exit(struct task_struct * p)89 void cpufreq_task_times_exit(struct task_struct *p)
90 {
91 unsigned long flags;
92 void *temp;
93
94 if (!p->time_in_state)
95 return;
96
97 spin_lock_irqsave(&task_time_in_state_lock, flags);
98 temp = p->time_in_state;
99 p->time_in_state = NULL;
100 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
101 kfree(temp);
102 }
103
proc_time_in_state_show(struct seq_file * m,struct pid_namespace * ns,struct pid * pid,struct task_struct * p)104 int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
105 struct pid *pid, struct task_struct *p)
106 {
107 unsigned int cpu, i;
108 u64 cputime;
109 unsigned long flags;
110 struct cpu_freqs *freqs;
111 struct cpu_freqs *last_freqs = NULL;
112
113 spin_lock_irqsave(&task_time_in_state_lock, flags);
114 for_each_possible_cpu(cpu) {
115 freqs = all_freqs[cpu];
116 if (!freqs || freqs == last_freqs)
117 continue;
118 last_freqs = freqs;
119
120 seq_printf(m, "cpu%u\n", cpu);
121 for (i = 0; i < freqs->max_state; i++) {
122 cputime = 0;
123 if (freqs->offset + i < p->max_state &&
124 p->time_in_state)
125 cputime = p->time_in_state[freqs->offset + i];
126 seq_printf(m, "%u %lu\n", freqs->freq_table[i],
127 (unsigned long)nsec_to_clock_t(cputime));
128 }
129 }
130 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
131 return 0;
132 }
133
cpufreq_acct_update_power(struct task_struct * p,u64 cputime)134 void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
135 {
136 unsigned long flags;
137 unsigned int state;
138 struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
139
140 if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
141 return;
142
143 state = freqs->offset + READ_ONCE(freqs->last_index);
144
145 spin_lock_irqsave(&task_time_in_state_lock, flags);
146 if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
147 p->time_in_state)
148 p->time_in_state[state] += cputime;
149 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
150
151 trace_android_vh_cpufreq_acct_update_power(cputime, p, state);
152 }
153
cpufreq_times_get_index(struct cpu_freqs * freqs,unsigned int freq)154 static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
155 {
156 int index;
157 for (index = 0; index < freqs->max_state; ++index) {
158 if (freqs->freq_table[index] == freq)
159 return index;
160 }
161 return -1;
162 }
163
cpufreq_times_create_policy(struct cpufreq_policy * policy)164 void cpufreq_times_create_policy(struct cpufreq_policy *policy)
165 {
166 int cpu, index = 0;
167 unsigned int count = 0;
168 struct cpufreq_frequency_table *pos, *table;
169 struct cpu_freqs *freqs;
170 void *tmp;
171
172 if (all_freqs[policy->cpu])
173 return;
174
175 table = policy->freq_table;
176 if (!table)
177 return;
178
179 cpufreq_for_each_valid_entry(pos, table)
180 count++;
181
182 tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
183 GFP_KERNEL);
184 if (!tmp)
185 return;
186
187 freqs = tmp;
188 freqs->max_state = count;
189
190 cpufreq_for_each_valid_entry(pos, table)
191 freqs->freq_table[index++] = pos->frequency;
192
193 index = cpufreq_times_get_index(freqs, policy->cur);
194 if (index >= 0)
195 WRITE_ONCE(freqs->last_index, index);
196
197 freqs->offset = next_offset;
198 WRITE_ONCE(next_offset, freqs->offset + count);
199 for_each_cpu(cpu, policy->related_cpus)
200 all_freqs[cpu] = freqs;
201 }
202
cpufreq_times_record_transition(struct cpufreq_policy * policy,unsigned int new_freq)203 void cpufreq_times_record_transition(struct cpufreq_policy *policy,
204 unsigned int new_freq)
205 {
206 int index;
207 struct cpu_freqs *freqs = all_freqs[policy->cpu];
208 if (!freqs)
209 return;
210
211 index = cpufreq_times_get_index(freqs, new_freq);
212 if (index >= 0)
213 WRITE_ONCE(freqs->last_index, index);
214 }
215