1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 */
11
12 #define pr_fmt(fmt) "ACPI: " fmt
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/cpufreq.h>
18 #include <linux/slab.h>
19 #include <linux/acpi.h>
20 #include <acpi/processor.h>
21 #ifdef CONFIG_X86
22 #include <asm/cpufeature.h>
23 #endif
24
25 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
26
27 static DEFINE_MUTEX(performance_mutex);
28
29 /*
30 * _PPC support is implemented as a CPUfreq policy notifier:
31 * This means each time a CPUfreq driver registered also with
32 * the ACPI core is asked to change the speed policy, the maximum
33 * value is adjusted so that it is within the platform limit.
34 *
35 * Also, when a new platform limit value is detected, the CPUfreq
36 * policy is adjusted accordingly.
37 */
38
39 /* ignore_ppc:
40 * -1 -> cpufreq low level drivers not initialized -> _PSS, etc. not called yet
41 * ignore _PPC
42 * 0 -> cpufreq low level drivers initialized -> consider _PPC values
43 * 1 -> ignore _PPC totally -> forced by user through boot param
44 */
45 static int ignore_ppc = -1;
46 module_param(ignore_ppc, int, 0644);
47 MODULE_PARM_DESC(ignore_ppc, "If the frequency of your machine gets wrongly" \
48 "limited by BIOS, this should help");
49
50 static bool acpi_processor_ppc_in_use;
51
acpi_processor_get_platform_limit(struct acpi_processor * pr)52 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
53 {
54 acpi_status status = 0;
55 unsigned long long ppc = 0;
56 s32 qos_value;
57 int index;
58 int ret;
59
60 if (!pr)
61 return -EINVAL;
62
63 /*
64 * _PPC indicates the maximum state currently supported by the platform
65 * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
66 */
67 status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
68 if (status != AE_NOT_FOUND) {
69 acpi_processor_ppc_in_use = true;
70
71 if (ACPI_FAILURE(status)) {
72 acpi_evaluation_failure_warn(pr->handle, "_PPC", status);
73 return -ENODEV;
74 }
75 }
76
77 index = ppc;
78
79 if (pr->performance_platform_limit == index ||
80 ppc >= pr->performance->state_count)
81 return 0;
82
83 pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
84 index, index ? "is" : "is not");
85
86 pr->performance_platform_limit = index;
87
88 if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
89 return 0;
90
91 /*
92 * If _PPC returns 0, it means that all of the available states can be
93 * used ("no limit").
94 */
95 if (index == 0)
96 qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
97 else
98 qos_value = pr->performance->states[index].core_frequency * 1000;
99
100 ret = freq_qos_update_request(&pr->perflib_req, qos_value);
101 if (ret < 0) {
102 pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
103 pr->id, ret);
104 }
105
106 return 0;
107 }
108
109 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
110 /*
111 * acpi_processor_ppc_ost: Notify firmware the _PPC evaluation status
112 * @handle: ACPI processor handle
113 * @status: the status code of _PPC evaluation
114 * 0: success. OSPM is now using the performance state specified.
115 * 1: failure. OSPM has not changed the number of P-states in use
116 */
acpi_processor_ppc_ost(acpi_handle handle,int status)117 static void acpi_processor_ppc_ost(acpi_handle handle, int status)
118 {
119 if (acpi_has_method(handle, "_OST"))
120 acpi_evaluate_ost(handle, ACPI_PROCESSOR_NOTIFY_PERFORMANCE,
121 status, NULL);
122 }
123
acpi_processor_ppc_has_changed(struct acpi_processor * pr,int event_flag)124 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
125 {
126 int ret;
127
128 if (ignore_ppc || !pr->performance) {
129 /*
130 * Only when it is notification event, the _OST object
131 * will be evaluated. Otherwise it is skipped.
132 */
133 if (event_flag)
134 acpi_processor_ppc_ost(pr->handle, 1);
135 return;
136 }
137
138 ret = acpi_processor_get_platform_limit(pr);
139 /*
140 * Only when it is notification event, the _OST object
141 * will be evaluated. Otherwise it is skipped.
142 */
143 if (event_flag) {
144 if (ret < 0)
145 acpi_processor_ppc_ost(pr->handle, 1);
146 else
147 acpi_processor_ppc_ost(pr->handle, 0);
148 }
149 if (ret >= 0)
150 cpufreq_update_limits(pr->id);
151 }
152
acpi_processor_get_bios_limit(int cpu,unsigned int * limit)153 int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
154 {
155 struct acpi_processor *pr;
156
157 pr = per_cpu(processors, cpu);
158 if (!pr || !pr->performance || !pr->performance->state_count)
159 return -ENODEV;
160 *limit = pr->performance->states[pr->performance_platform_limit].
161 core_frequency * 1000;
162 return 0;
163 }
164 EXPORT_SYMBOL(acpi_processor_get_bios_limit);
165
acpi_processor_ignore_ppc_init(void)166 void acpi_processor_ignore_ppc_init(void)
167 {
168 if (ignore_ppc < 0)
169 ignore_ppc = 0;
170 }
171
acpi_processor_ppc_init(struct cpufreq_policy * policy)172 void acpi_processor_ppc_init(struct cpufreq_policy *policy)
173 {
174 unsigned int cpu;
175
176 for_each_cpu(cpu, policy->related_cpus) {
177 struct acpi_processor *pr = per_cpu(processors, cpu);
178 int ret;
179
180 if (!pr)
181 continue;
182
183 /*
184 * Reset performance_platform_limit in case there is a stale
185 * value in it, so as to make it match the "no limit" QoS value
186 * below.
187 */
188 pr->performance_platform_limit = 0;
189
190 ret = freq_qos_add_request(&policy->constraints,
191 &pr->perflib_req, FREQ_QOS_MAX,
192 FREQ_QOS_MAX_DEFAULT_VALUE);
193 if (ret < 0)
194 pr_err("Failed to add freq constraint for CPU%d (%d)\n",
195 cpu, ret);
196 }
197 }
198
acpi_processor_ppc_exit(struct cpufreq_policy * policy)199 void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
200 {
201 unsigned int cpu;
202
203 for_each_cpu(cpu, policy->related_cpus) {
204 struct acpi_processor *pr = per_cpu(processors, cpu);
205
206 if (pr)
207 freq_qos_remove_request(&pr->perflib_req);
208 }
209 }
210
acpi_processor_get_performance_control(struct acpi_processor * pr)211 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
212 {
213 int result = 0;
214 acpi_status status = 0;
215 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
216 union acpi_object *pct = NULL;
217 union acpi_object obj = { 0 };
218
219 status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
220 if (ACPI_FAILURE(status)) {
221 acpi_evaluation_failure_warn(pr->handle, "_PCT", status);
222 return -ENODEV;
223 }
224
225 pct = (union acpi_object *)buffer.pointer;
226 if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
227 || (pct->package.count != 2)) {
228 pr_err("Invalid _PCT data\n");
229 result = -EFAULT;
230 goto end;
231 }
232
233 /*
234 * control_register
235 */
236
237 obj = pct->package.elements[0];
238
239 if ((obj.type != ACPI_TYPE_BUFFER)
240 || (obj.buffer.length < sizeof(struct acpi_pct_register))
241 || (obj.buffer.pointer == NULL)) {
242 pr_err("Invalid _PCT data (control_register)\n");
243 result = -EFAULT;
244 goto end;
245 }
246 memcpy(&pr->performance->control_register, obj.buffer.pointer,
247 sizeof(struct acpi_pct_register));
248
249 /*
250 * status_register
251 */
252
253 obj = pct->package.elements[1];
254
255 if ((obj.type != ACPI_TYPE_BUFFER)
256 || (obj.buffer.length < sizeof(struct acpi_pct_register))
257 || (obj.buffer.pointer == NULL)) {
258 pr_err("Invalid _PCT data (status_register)\n");
259 result = -EFAULT;
260 goto end;
261 }
262
263 memcpy(&pr->performance->status_register, obj.buffer.pointer,
264 sizeof(struct acpi_pct_register));
265
266 end:
267 kfree(buffer.pointer);
268
269 return result;
270 }
271
272 #ifdef CONFIG_X86
273 /*
274 * Some AMDs have 50MHz frequency multiples, but only provide 100MHz rounding
275 * in their ACPI data. Calculate the real values and fix up the _PSS data.
276 */
amd_fixup_frequency(struct acpi_processor_px * px,int i)277 static void amd_fixup_frequency(struct acpi_processor_px *px, int i)
278 {
279 u32 hi, lo, fid, did;
280 int index = px->control & 0x00000007;
281
282 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
283 return;
284
285 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
286 || boot_cpu_data.x86 == 0x11) {
287 rdmsr(MSR_AMD_PSTATE_DEF_BASE + index, lo, hi);
288 /*
289 * MSR C001_0064+:
290 * Bit 63: PstateEn. Read-write. If set, the P-state is valid.
291 */
292 if (!(hi & BIT(31)))
293 return;
294
295 fid = lo & 0x3f;
296 did = (lo >> 6) & 7;
297 if (boot_cpu_data.x86 == 0x10)
298 px->core_frequency = (100 * (fid + 0x10)) >> did;
299 else
300 px->core_frequency = (100 * (fid + 8)) >> did;
301 }
302 }
303 #else
amd_fixup_frequency(struct acpi_processor_px * px,int i)304 static void amd_fixup_frequency(struct acpi_processor_px *px, int i) {};
305 #endif
306
acpi_processor_get_performance_states(struct acpi_processor * pr)307 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
308 {
309 int result = 0;
310 acpi_status status = AE_OK;
311 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
312 struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
313 struct acpi_buffer state = { 0, NULL };
314 union acpi_object *pss = NULL;
315 int i;
316 int last_invalid = -1;
317
318 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
319 if (ACPI_FAILURE(status)) {
320 acpi_evaluation_failure_warn(pr->handle, "_PSS", status);
321 return -ENODEV;
322 }
323
324 pss = buffer.pointer;
325 if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
326 pr_err("Invalid _PSS data\n");
327 result = -EFAULT;
328 goto end;
329 }
330
331 acpi_handle_debug(pr->handle, "Found %d performance states\n",
332 pss->package.count);
333
334 pr->performance->state_count = pss->package.count;
335 pr->performance->states =
336 kmalloc_array(pss->package.count,
337 sizeof(struct acpi_processor_px),
338 GFP_KERNEL);
339 if (!pr->performance->states) {
340 result = -ENOMEM;
341 goto end;
342 }
343
344 for (i = 0; i < pr->performance->state_count; i++) {
345
346 struct acpi_processor_px *px = &(pr->performance->states[i]);
347
348 state.length = sizeof(struct acpi_processor_px);
349 state.pointer = px;
350
351 acpi_handle_debug(pr->handle, "Extracting state %d\n", i);
352
353 status = acpi_extract_package(&(pss->package.elements[i]),
354 &format, &state);
355 if (ACPI_FAILURE(status)) {
356 acpi_handle_warn(pr->handle, "Invalid _PSS data: %s\n",
357 acpi_format_exception(status));
358 result = -EFAULT;
359 kfree(pr->performance->states);
360 goto end;
361 }
362
363 amd_fixup_frequency(px, i);
364
365 acpi_handle_debug(pr->handle,
366 "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
367 i,
368 (u32) px->core_frequency,
369 (u32) px->power,
370 (u32) px->transition_latency,
371 (u32) px->bus_master_latency,
372 (u32) px->control, (u32) px->status);
373
374 /*
375 * Check that ACPI's u64 MHz will be valid as u32 KHz in cpufreq
376 */
377 if (!px->core_frequency ||
378 ((u32)(px->core_frequency * 1000) !=
379 (px->core_frequency * 1000))) {
380 pr_err(FW_BUG
381 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
382 pr->id, px->core_frequency);
383 if (last_invalid == -1)
384 last_invalid = i;
385 } else {
386 if (last_invalid != -1) {
387 /*
388 * Copy this valid entry over last_invalid entry
389 */
390 memcpy(&(pr->performance->states[last_invalid]),
391 px, sizeof(struct acpi_processor_px));
392 ++last_invalid;
393 }
394 }
395 }
396
397 if (last_invalid == 0) {
398 pr_err(FW_BUG
399 "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
400 result = -EFAULT;
401 kfree(pr->performance->states);
402 pr->performance->states = NULL;
403 }
404
405 if (last_invalid > 0)
406 pr->performance->state_count = last_invalid;
407
408 end:
409 kfree(buffer.pointer);
410
411 return result;
412 }
413
acpi_processor_get_performance_info(struct acpi_processor * pr)414 int acpi_processor_get_performance_info(struct acpi_processor *pr)
415 {
416 int result = 0;
417
418 if (!pr || !pr->performance || !pr->handle)
419 return -EINVAL;
420
421 if (!acpi_has_method(pr->handle, "_PCT")) {
422 acpi_handle_debug(pr->handle,
423 "ACPI-based processor performance control unavailable\n");
424 return -ENODEV;
425 }
426
427 result = acpi_processor_get_performance_control(pr);
428 if (result)
429 goto update_bios;
430
431 result = acpi_processor_get_performance_states(pr);
432 if (result)
433 goto update_bios;
434
435 /* We need to call _PPC once when cpufreq starts */
436 if (ignore_ppc != 1)
437 result = acpi_processor_get_platform_limit(pr);
438
439 return result;
440
441 /*
442 * Having _PPC but missing frequencies (_PSS, _PCT) is a very good hint that
443 * the BIOS is older than the CPU and does not know its frequencies
444 */
445 update_bios:
446 #ifdef CONFIG_X86
447 if (acpi_has_method(pr->handle, "_PPC")) {
448 if(boot_cpu_has(X86_FEATURE_EST))
449 pr_warn(FW_BUG "BIOS needs update for CPU "
450 "frequency support\n");
451 }
452 #endif
453 return result;
454 }
455 EXPORT_SYMBOL_GPL(acpi_processor_get_performance_info);
456
acpi_processor_pstate_control(void)457 int acpi_processor_pstate_control(void)
458 {
459 acpi_status status;
460
461 if (!acpi_gbl_FADT.smi_command || !acpi_gbl_FADT.pstate_control)
462 return 0;
463
464 pr_debug("Writing pstate_control [0x%x] to smi_command [0x%x]\n",
465 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command);
466
467 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
468 (u32)acpi_gbl_FADT.pstate_control, 8);
469 if (ACPI_SUCCESS(status))
470 return 1;
471
472 pr_warn("Failed to write pstate_control [0x%x] to smi_command [0x%x]: %s\n",
473 acpi_gbl_FADT.pstate_control, acpi_gbl_FADT.smi_command,
474 acpi_format_exception(status));
475 return -EIO;
476 }
477
acpi_processor_notify_smm(struct module * calling_module)478 int acpi_processor_notify_smm(struct module *calling_module)
479 {
480 static int is_done;
481 int result;
482
483 if (!acpi_processor_cpufreq_init)
484 return -EBUSY;
485
486 if (!try_module_get(calling_module))
487 return -EINVAL;
488
489 /* is_done is set to negative if an error occurred,
490 * and to postitive if _no_ error occurred, but SMM
491 * was already notified. This avoids double notification
492 * which might lead to unexpected results...
493 */
494 if (is_done > 0) {
495 module_put(calling_module);
496 return 0;
497 } else if (is_done < 0) {
498 module_put(calling_module);
499 return is_done;
500 }
501
502 is_done = -EIO;
503
504 result = acpi_processor_pstate_control();
505 if (!result) {
506 pr_debug("No SMI port or pstate_control\n");
507 module_put(calling_module);
508 return 0;
509 }
510 if (result < 0) {
511 module_put(calling_module);
512 return result;
513 }
514
515 /* Success. If there's no _PPC, we need to fear nothing, so
516 * we can allow the cpufreq driver to be rmmod'ed. */
517 is_done = 1;
518
519 if (!acpi_processor_ppc_in_use)
520 module_put(calling_module);
521
522 return 0;
523 }
524
525 EXPORT_SYMBOL(acpi_processor_notify_smm);
526
acpi_processor_get_psd(acpi_handle handle,struct acpi_psd_package * pdomain)527 int acpi_processor_get_psd(acpi_handle handle, struct acpi_psd_package *pdomain)
528 {
529 int result = 0;
530 acpi_status status = AE_OK;
531 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
532 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
533 struct acpi_buffer state = {0, NULL};
534 union acpi_object *psd = NULL;
535
536 status = acpi_evaluate_object(handle, "_PSD", NULL, &buffer);
537 if (ACPI_FAILURE(status)) {
538 return -ENODEV;
539 }
540
541 psd = buffer.pointer;
542 if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
543 pr_err("Invalid _PSD data\n");
544 result = -EFAULT;
545 goto end;
546 }
547
548 if (psd->package.count != 1) {
549 pr_err("Invalid _PSD data\n");
550 result = -EFAULT;
551 goto end;
552 }
553
554 state.length = sizeof(struct acpi_psd_package);
555 state.pointer = pdomain;
556
557 status = acpi_extract_package(&(psd->package.elements[0]),
558 &format, &state);
559 if (ACPI_FAILURE(status)) {
560 pr_err("Invalid _PSD data\n");
561 result = -EFAULT;
562 goto end;
563 }
564
565 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
566 pr_err("Unknown _PSD:num_entries\n");
567 result = -EFAULT;
568 goto end;
569 }
570
571 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
572 pr_err("Unknown _PSD:revision\n");
573 result = -EFAULT;
574 goto end;
575 }
576
577 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
578 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
579 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
580 pr_err("Invalid _PSD:coord_type\n");
581 result = -EFAULT;
582 goto end;
583 }
584 end:
585 kfree(buffer.pointer);
586 return result;
587 }
588 EXPORT_SYMBOL(acpi_processor_get_psd);
589
acpi_processor_preregister_performance(struct acpi_processor_performance __percpu * performance)590 int acpi_processor_preregister_performance(
591 struct acpi_processor_performance __percpu *performance)
592 {
593 int count_target;
594 int retval = 0;
595 unsigned int i, j;
596 cpumask_var_t covered_cpus;
597 struct acpi_processor *pr;
598 struct acpi_psd_package *pdomain;
599 struct acpi_processor *match_pr;
600 struct acpi_psd_package *match_pdomain;
601
602 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
603 return -ENOMEM;
604
605 mutex_lock(&performance_mutex);
606
607 /*
608 * Check if another driver has already registered, and abort before
609 * changing pr->performance if it has. Check input data as well.
610 */
611 for_each_possible_cpu(i) {
612 pr = per_cpu(processors, i);
613 if (!pr) {
614 /* Look only at processors in ACPI namespace */
615 continue;
616 }
617
618 if (pr->performance) {
619 retval = -EBUSY;
620 goto err_out;
621 }
622
623 if (!performance || !per_cpu_ptr(performance, i)) {
624 retval = -EINVAL;
625 goto err_out;
626 }
627 }
628
629 /* Call _PSD for all CPUs */
630 for_each_possible_cpu(i) {
631 pr = per_cpu(processors, i);
632 if (!pr)
633 continue;
634
635 pr->performance = per_cpu_ptr(performance, i);
636 pdomain = &(pr->performance->domain_info);
637 if (acpi_processor_get_psd(pr->handle, pdomain)) {
638 retval = -EINVAL;
639 continue;
640 }
641 }
642 if (retval)
643 goto err_ret;
644
645 /*
646 * Now that we have _PSD data from all CPUs, lets setup P-state
647 * domain info.
648 */
649 for_each_possible_cpu(i) {
650 pr = per_cpu(processors, i);
651 if (!pr)
652 continue;
653
654 if (cpumask_test_cpu(i, covered_cpus))
655 continue;
656
657 pdomain = &(pr->performance->domain_info);
658 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
659 cpumask_set_cpu(i, covered_cpus);
660 if (pdomain->num_processors <= 1)
661 continue;
662
663 /* Validate the Domain info */
664 count_target = pdomain->num_processors;
665 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
666 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
667 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
668 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
669 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
670 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
671
672 for_each_possible_cpu(j) {
673 if (i == j)
674 continue;
675
676 match_pr = per_cpu(processors, j);
677 if (!match_pr)
678 continue;
679
680 match_pdomain = &(match_pr->performance->domain_info);
681 if (match_pdomain->domain != pdomain->domain)
682 continue;
683
684 /* Here i and j are in the same domain */
685
686 if (match_pdomain->num_processors != count_target) {
687 retval = -EINVAL;
688 goto err_ret;
689 }
690
691 if (pdomain->coord_type != match_pdomain->coord_type) {
692 retval = -EINVAL;
693 goto err_ret;
694 }
695
696 cpumask_set_cpu(j, covered_cpus);
697 cpumask_set_cpu(j, pr->performance->shared_cpu_map);
698 }
699
700 for_each_possible_cpu(j) {
701 if (i == j)
702 continue;
703
704 match_pr = per_cpu(processors, j);
705 if (!match_pr)
706 continue;
707
708 match_pdomain = &(match_pr->performance->domain_info);
709 if (match_pdomain->domain != pdomain->domain)
710 continue;
711
712 match_pr->performance->shared_type =
713 pr->performance->shared_type;
714 cpumask_copy(match_pr->performance->shared_cpu_map,
715 pr->performance->shared_cpu_map);
716 }
717 }
718
719 err_ret:
720 for_each_possible_cpu(i) {
721 pr = per_cpu(processors, i);
722 if (!pr || !pr->performance)
723 continue;
724
725 /* Assume no coordination on any error parsing domain info */
726 if (retval) {
727 cpumask_clear(pr->performance->shared_cpu_map);
728 cpumask_set_cpu(i, pr->performance->shared_cpu_map);
729 pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
730 }
731 pr->performance = NULL; /* Will be set for real in register */
732 }
733
734 err_out:
735 mutex_unlock(&performance_mutex);
736 free_cpumask_var(covered_cpus);
737 return retval;
738 }
739 EXPORT_SYMBOL(acpi_processor_preregister_performance);
740
741 int
acpi_processor_register_performance(struct acpi_processor_performance * performance,unsigned int cpu)742 acpi_processor_register_performance(struct acpi_processor_performance
743 *performance, unsigned int cpu)
744 {
745 struct acpi_processor *pr;
746
747 if (!acpi_processor_cpufreq_init)
748 return -EINVAL;
749
750 mutex_lock(&performance_mutex);
751
752 pr = per_cpu(processors, cpu);
753 if (!pr) {
754 mutex_unlock(&performance_mutex);
755 return -ENODEV;
756 }
757
758 if (pr->performance) {
759 mutex_unlock(&performance_mutex);
760 return -EBUSY;
761 }
762
763 WARN_ON(!performance);
764
765 pr->performance = performance;
766
767 if (acpi_processor_get_performance_info(pr)) {
768 pr->performance = NULL;
769 mutex_unlock(&performance_mutex);
770 return -EIO;
771 }
772
773 mutex_unlock(&performance_mutex);
774 return 0;
775 }
776
777 EXPORT_SYMBOL(acpi_processor_register_performance);
778
acpi_processor_unregister_performance(unsigned int cpu)779 void acpi_processor_unregister_performance(unsigned int cpu)
780 {
781 struct acpi_processor *pr;
782
783 mutex_lock(&performance_mutex);
784
785 pr = per_cpu(processors, cpu);
786 if (!pr) {
787 mutex_unlock(&performance_mutex);
788 return;
789 }
790
791 if (pr->performance)
792 kfree(pr->performance->states);
793 pr->performance = NULL;
794
795 mutex_unlock(&performance_mutex);
796
797 return;
798 }
799
800 EXPORT_SYMBOL(acpi_processor_unregister_performance);
801