1 /*
2 * This file provides the ACPI based P-state support. This
3 * module works with generic cpufreq infrastructure. Most of
4 * the code is based on i386 version
5 * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c)
6 *
7 * Copyright (C) 2005 Intel Corp
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/cpufreq.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <asm/io.h>
19 #include <asm/uaccess.h>
20 #include <asm/pal.h>
21
22 #include <linux/acpi.h>
23 #include <acpi/processor.h>
24
25 MODULE_AUTHOR("Venkatesh Pallipadi");
26 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
27 MODULE_LICENSE("GPL");
28
29
30 struct cpufreq_acpi_io {
31 struct acpi_processor_performance acpi_data;
32 unsigned int resume;
33 };
34
35 static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
36
37 static struct cpufreq_driver acpi_cpufreq_driver;
38
39
40 static int
processor_set_pstate(u32 value)41 processor_set_pstate (
42 u32 value)
43 {
44 s64 retval;
45
46 pr_debug("processor_set_pstate\n");
47
48 retval = ia64_pal_set_pstate((u64)value);
49
50 if (retval) {
51 pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n",
52 value, retval);
53 return -ENODEV;
54 }
55 return (int)retval;
56 }
57
58
59 static int
processor_get_pstate(u32 * value)60 processor_get_pstate (
61 u32 *value)
62 {
63 u64 pstate_index = 0;
64 s64 retval;
65
66 pr_debug("processor_get_pstate\n");
67
68 retval = ia64_pal_get_pstate(&pstate_index,
69 PAL_GET_PSTATE_TYPE_INSTANT);
70 *value = (u32) pstate_index;
71
72 if (retval)
73 pr_debug("Failed to get current freq with "
74 "error 0x%lx, idx 0x%x\n", retval, *value);
75
76 return (int)retval;
77 }
78
79
80 /* To be used only after data->acpi_data is initialized */
81 static unsigned
extract_clock(struct cpufreq_acpi_io * data,unsigned value,unsigned int cpu)82 extract_clock (
83 struct cpufreq_acpi_io *data,
84 unsigned value,
85 unsigned int cpu)
86 {
87 unsigned long i;
88
89 pr_debug("extract_clock\n");
90
91 for (i = 0; i < data->acpi_data.state_count; i++) {
92 if (value == data->acpi_data.states[i].status)
93 return data->acpi_data.states[i].core_frequency;
94 }
95 return data->acpi_data.states[i-1].core_frequency;
96 }
97
98
99 static unsigned int
processor_get_freq(struct cpufreq_acpi_io * data,unsigned int cpu)100 processor_get_freq (
101 struct cpufreq_acpi_io *data,
102 unsigned int cpu)
103 {
104 int ret = 0;
105 u32 value = 0;
106 cpumask_t saved_mask;
107 unsigned long clock_freq;
108
109 pr_debug("processor_get_freq\n");
110
111 saved_mask = current->cpus_allowed;
112 set_cpus_allowed_ptr(current, cpumask_of(cpu));
113 if (smp_processor_id() != cpu)
114 goto migrate_end;
115
116 /* processor_get_pstate gets the instantaneous frequency */
117 ret = processor_get_pstate(&value);
118
119 if (ret) {
120 set_cpus_allowed_ptr(current, &saved_mask);
121 printk(KERN_WARNING "get performance failed with error %d\n",
122 ret);
123 ret = 0;
124 goto migrate_end;
125 }
126 clock_freq = extract_clock(data, value, cpu);
127 ret = (clock_freq*1000);
128
129 migrate_end:
130 set_cpus_allowed_ptr(current, &saved_mask);
131 return ret;
132 }
133
134
135 static int
processor_set_freq(struct cpufreq_acpi_io * data,struct cpufreq_policy * policy,int state)136 processor_set_freq (
137 struct cpufreq_acpi_io *data,
138 struct cpufreq_policy *policy,
139 int state)
140 {
141 int ret = 0;
142 u32 value = 0;
143 cpumask_t saved_mask;
144 int retval;
145
146 pr_debug("processor_set_freq\n");
147
148 saved_mask = current->cpus_allowed;
149 set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
150 if (smp_processor_id() != policy->cpu) {
151 retval = -EAGAIN;
152 goto migrate_end;
153 }
154
155 if (state == data->acpi_data.state) {
156 if (unlikely(data->resume)) {
157 pr_debug("Called after resume, resetting to P%d\n", state);
158 data->resume = 0;
159 } else {
160 pr_debug("Already at target state (P%d)\n", state);
161 retval = 0;
162 goto migrate_end;
163 }
164 }
165
166 pr_debug("Transitioning from P%d to P%d\n",
167 data->acpi_data.state, state);
168
169 /*
170 * First we write the target state's 'control' value to the
171 * control_register.
172 */
173
174 value = (u32) data->acpi_data.states[state].control;
175
176 pr_debug("Transitioning to state: 0x%08x\n", value);
177
178 ret = processor_set_pstate(value);
179 if (ret) {
180 printk(KERN_WARNING "Transition failed with error %d\n", ret);
181 retval = -ENODEV;
182 goto migrate_end;
183 }
184
185 data->acpi_data.state = state;
186
187 retval = 0;
188
189 migrate_end:
190 set_cpus_allowed_ptr(current, &saved_mask);
191 return (retval);
192 }
193
194
195 static unsigned int
acpi_cpufreq_get(unsigned int cpu)196 acpi_cpufreq_get (
197 unsigned int cpu)
198 {
199 struct cpufreq_acpi_io *data = acpi_io_data[cpu];
200
201 pr_debug("acpi_cpufreq_get\n");
202
203 return processor_get_freq(data, cpu);
204 }
205
206
207 static int
acpi_cpufreq_target(struct cpufreq_policy * policy,unsigned int index)208 acpi_cpufreq_target (
209 struct cpufreq_policy *policy,
210 unsigned int index)
211 {
212 return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
213 }
214
215 static int
acpi_cpufreq_cpu_init(struct cpufreq_policy * policy)216 acpi_cpufreq_cpu_init (
217 struct cpufreq_policy *policy)
218 {
219 unsigned int i;
220 unsigned int cpu = policy->cpu;
221 struct cpufreq_acpi_io *data;
222 unsigned int result = 0;
223 struct cpufreq_frequency_table *freq_table;
224
225 pr_debug("acpi_cpufreq_cpu_init\n");
226
227 data = kzalloc(sizeof(*data), GFP_KERNEL);
228 if (!data)
229 return (-ENOMEM);
230
231 acpi_io_data[cpu] = data;
232
233 result = acpi_processor_register_performance(&data->acpi_data, cpu);
234
235 if (result)
236 goto err_free;
237
238 /* capability check */
239 if (data->acpi_data.state_count <= 1) {
240 pr_debug("No P-States\n");
241 result = -ENODEV;
242 goto err_unreg;
243 }
244
245 if ((data->acpi_data.control_register.space_id !=
246 ACPI_ADR_SPACE_FIXED_HARDWARE) ||
247 (data->acpi_data.status_register.space_id !=
248 ACPI_ADR_SPACE_FIXED_HARDWARE)) {
249 pr_debug("Unsupported address space [%d, %d]\n",
250 (u32) (data->acpi_data.control_register.space_id),
251 (u32) (data->acpi_data.status_register.space_id));
252 result = -ENODEV;
253 goto err_unreg;
254 }
255
256 /* alloc freq_table */
257 freq_table = kzalloc(sizeof(*freq_table) *
258 (data->acpi_data.state_count + 1),
259 GFP_KERNEL);
260 if (!freq_table) {
261 result = -ENOMEM;
262 goto err_unreg;
263 }
264
265 /* detect transition latency */
266 policy->cpuinfo.transition_latency = 0;
267 for (i=0; i<data->acpi_data.state_count; i++) {
268 if ((data->acpi_data.states[i].transition_latency * 1000) >
269 policy->cpuinfo.transition_latency) {
270 policy->cpuinfo.transition_latency =
271 data->acpi_data.states[i].transition_latency * 1000;
272 }
273 }
274
275 /* table init */
276 for (i = 0; i <= data->acpi_data.state_count; i++)
277 {
278 if (i < data->acpi_data.state_count) {
279 freq_table[i].frequency =
280 data->acpi_data.states[i].core_frequency * 1000;
281 } else {
282 freq_table[i].frequency = CPUFREQ_TABLE_END;
283 }
284 }
285
286 result = cpufreq_table_validate_and_show(policy, freq_table);
287 if (result) {
288 goto err_freqfree;
289 }
290
291 /* notify BIOS that we exist */
292 acpi_processor_notify_smm(THIS_MODULE);
293
294 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
295 "activated.\n", cpu);
296
297 for (i = 0; i < data->acpi_data.state_count; i++)
298 pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
299 (i == data->acpi_data.state?'*':' '), i,
300 (u32) data->acpi_data.states[i].core_frequency,
301 (u32) data->acpi_data.states[i].power,
302 (u32) data->acpi_data.states[i].transition_latency,
303 (u32) data->acpi_data.states[i].bus_master_latency,
304 (u32) data->acpi_data.states[i].status,
305 (u32) data->acpi_data.states[i].control);
306
307 /* the first call to ->target() should result in us actually
308 * writing something to the appropriate registers. */
309 data->resume = 1;
310
311 return (result);
312
313 err_freqfree:
314 kfree(freq_table);
315 err_unreg:
316 acpi_processor_unregister_performance(cpu);
317 err_free:
318 kfree(data);
319 acpi_io_data[cpu] = NULL;
320
321 return (result);
322 }
323
324
325 static int
acpi_cpufreq_cpu_exit(struct cpufreq_policy * policy)326 acpi_cpufreq_cpu_exit (
327 struct cpufreq_policy *policy)
328 {
329 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
330
331 pr_debug("acpi_cpufreq_cpu_exit\n");
332
333 if (data) {
334 acpi_io_data[policy->cpu] = NULL;
335 acpi_processor_unregister_performance(policy->cpu);
336 kfree(policy->freq_table);
337 kfree(data);
338 }
339
340 return (0);
341 }
342
343
344 static struct cpufreq_driver acpi_cpufreq_driver = {
345 .verify = cpufreq_generic_frequency_table_verify,
346 .target_index = acpi_cpufreq_target,
347 .get = acpi_cpufreq_get,
348 .init = acpi_cpufreq_cpu_init,
349 .exit = acpi_cpufreq_cpu_exit,
350 .name = "acpi-cpufreq",
351 .attr = cpufreq_generic_attr,
352 };
353
354
355 static int __init
acpi_cpufreq_init(void)356 acpi_cpufreq_init (void)
357 {
358 pr_debug("acpi_cpufreq_init\n");
359
360 return cpufreq_register_driver(&acpi_cpufreq_driver);
361 }
362
363
364 static void __exit
acpi_cpufreq_exit(void)365 acpi_cpufreq_exit (void)
366 {
367 pr_debug("acpi_cpufreq_exit\n");
368
369 cpufreq_unregister_driver(&acpi_cpufreq_driver);
370 return;
371 }
372
373
374 late_initcall(acpi_cpufreq_init);
375 module_exit(acpi_cpufreq_exit);
376
377