1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * processor_idle - idle state submodule to the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 * - Added support for C3 on SMP
12 */
13 #define pr_fmt(fmt) "ACPI: " fmt
14
15 #include <linux/module.h>
16 #include <linux/acpi.h>
17 #include <linux/dmi.h>
18 #include <linux/sched.h> /* need_resched() */
19 #include <linux/sort.h>
20 #include <linux/tick.h>
21 #include <linux/cpuidle.h>
22 #include <linux/cpu.h>
23 #include <acpi/processor.h>
24
25 /*
26 * Include the apic definitions for x86 to have the APIC timer related defines
27 * available also for UP (on SMP it gets magically included via linux/smp.h).
28 * asm/acpi.h is not an option, as it would require more include magic. Also
29 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
30 */
31 #ifdef CONFIG_X86
32 #include <asm/apic.h>
33 #include <asm/cpu.h>
34 #endif
35
36 #define ACPI_PROCESSOR_CLASS "processor"
37 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
38 ACPI_MODULE_NAME("processor_idle");
39
40 #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
41
42 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
43 module_param(max_cstate, uint, 0000);
44 static unsigned int nocst __read_mostly;
45 module_param(nocst, uint, 0000);
46 static int bm_check_disable __read_mostly;
47 module_param(bm_check_disable, uint, 0000);
48
49 static unsigned int latency_factor __read_mostly = 2;
50 module_param(latency_factor, uint, 0644);
51
52 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
53
54 struct cpuidle_driver acpi_idle_driver = {
55 .name = "acpi_idle",
56 .owner = THIS_MODULE,
57 };
58
59 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
60 static
61 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
62
disabled_by_idle_boot_param(void)63 static int disabled_by_idle_boot_param(void)
64 {
65 return boot_option_idle_override == IDLE_POLL ||
66 boot_option_idle_override == IDLE_HALT;
67 }
68
69 /*
70 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
71 * For now disable this. Probably a bug somewhere else.
72 *
73 * To skip this limit, boot/load with a large max_cstate limit.
74 */
set_max_cstate(const struct dmi_system_id * id)75 static int set_max_cstate(const struct dmi_system_id *id)
76 {
77 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
78 return 0;
79
80 pr_notice("%s detected - limiting to C%ld max_cstate."
81 " Override with \"processor.max_cstate=%d\"\n", id->ident,
82 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
83
84 max_cstate = (long)id->driver_data;
85
86 return 0;
87 }
88
89 static const struct dmi_system_id processor_power_dmi_table[] = {
90 { set_max_cstate, "Clevo 5600D", {
91 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
92 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
93 (void *)2},
94 { set_max_cstate, "Pavilion zv5000", {
95 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
96 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
97 (void *)1},
98 { set_max_cstate, "Asus L8400B", {
99 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
100 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
101 (void *)1},
102 {},
103 };
104
105
106 /*
107 * Callers should disable interrupts before the call and enable
108 * interrupts after return.
109 */
acpi_safe_halt(void)110 static void __cpuidle acpi_safe_halt(void)
111 {
112 if (!tif_need_resched()) {
113 safe_halt();
114 local_irq_disable();
115 }
116 }
117
118 #ifdef ARCH_APICTIMER_STOPS_ON_C3
119
120 /*
121 * Some BIOS implementations switch to C3 in the published C2 state.
122 * This seems to be a common problem on AMD boxen, but other vendors
123 * are affected too. We pick the most conservative approach: we assume
124 * that the local APIC stops in both C2 and C3.
125 */
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cx)126 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
127 struct acpi_processor_cx *cx)
128 {
129 struct acpi_processor_power *pwr = &pr->power;
130 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
131
132 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
133 return;
134
135 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
136 type = ACPI_STATE_C1;
137
138 /*
139 * Check, if one of the previous states already marked the lapic
140 * unstable
141 */
142 if (pwr->timer_broadcast_on_state < state)
143 return;
144
145 if (cx->type >= type)
146 pr->power.timer_broadcast_on_state = state;
147 }
148
__lapic_timer_propagate_broadcast(void * arg)149 static void __lapic_timer_propagate_broadcast(void *arg)
150 {
151 struct acpi_processor *pr = (struct acpi_processor *) arg;
152
153 if (pr->power.timer_broadcast_on_state < INT_MAX)
154 tick_broadcast_enable();
155 else
156 tick_broadcast_disable();
157 }
158
lapic_timer_propagate_broadcast(struct acpi_processor * pr)159 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
160 {
161 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
162 (void *)pr, 1);
163 }
164
165 /* Power(C) State timer broadcast control */
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)166 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
167 struct acpi_processor_cx *cx)
168 {
169 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
170 }
171
172 #else
173
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cstate)174 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
175 struct acpi_processor_cx *cstate) { }
lapic_timer_propagate_broadcast(struct acpi_processor * pr)176 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
177
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)178 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
179 struct acpi_processor_cx *cx)
180 {
181 return false;
182 }
183
184 #endif
185
186 #if defined(CONFIG_X86)
tsc_check_state(int state)187 static void tsc_check_state(int state)
188 {
189 switch (boot_cpu_data.x86_vendor) {
190 case X86_VENDOR_HYGON:
191 case X86_VENDOR_AMD:
192 case X86_VENDOR_INTEL:
193 case X86_VENDOR_CENTAUR:
194 case X86_VENDOR_ZHAOXIN:
195 /*
196 * AMD Fam10h TSC will tick in all
197 * C/P/S0/S1 states when this bit is set.
198 */
199 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
200 return;
201 fallthrough;
202 default:
203 /* TSC could halt in idle, so notify users */
204 if (state > ACPI_STATE_C1)
205 mark_tsc_unstable("TSC halts in idle");
206 }
207 }
208 #else
tsc_check_state(int state)209 static void tsc_check_state(int state) { return; }
210 #endif
211
acpi_processor_get_power_info_fadt(struct acpi_processor * pr)212 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
213 {
214
215 if (!pr->pblk)
216 return -ENODEV;
217
218 /* if info is obtained from pblk/fadt, type equals state */
219 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
220 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
221
222 #ifndef CONFIG_HOTPLUG_CPU
223 /*
224 * Check for P_LVL2_UP flag before entering C2 and above on
225 * an SMP system.
226 */
227 if ((num_online_cpus() > 1) &&
228 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
229 return -ENODEV;
230 #endif
231
232 /* determine C2 and C3 address from pblk */
233 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
234 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
235
236 /* determine latencies from FADT */
237 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
238 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
239
240 /*
241 * FADT specified C2 latency must be less than or equal to
242 * 100 microseconds.
243 */
244 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
245 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
246 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
247 /* invalidate C2 */
248 pr->power.states[ACPI_STATE_C2].address = 0;
249 }
250
251 /*
252 * FADT supplied C3 latency must be less than or equal to
253 * 1000 microseconds.
254 */
255 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
256 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
257 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
258 /* invalidate C3 */
259 pr->power.states[ACPI_STATE_C3].address = 0;
260 }
261
262 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
263 "lvl2[0x%08x] lvl3[0x%08x]\n",
264 pr->power.states[ACPI_STATE_C2].address,
265 pr->power.states[ACPI_STATE_C3].address));
266
267 snprintf(pr->power.states[ACPI_STATE_C2].desc,
268 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
269 pr->power.states[ACPI_STATE_C2].address);
270 snprintf(pr->power.states[ACPI_STATE_C3].desc,
271 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
272 pr->power.states[ACPI_STATE_C3].address);
273
274 return 0;
275 }
276
acpi_processor_get_power_info_default(struct acpi_processor * pr)277 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
278 {
279 if (!pr->power.states[ACPI_STATE_C1].valid) {
280 /* set the first C-State to C1 */
281 /* all processors need to support C1 */
282 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
283 pr->power.states[ACPI_STATE_C1].valid = 1;
284 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
285
286 snprintf(pr->power.states[ACPI_STATE_C1].desc,
287 ACPI_CX_DESC_LEN, "ACPI HLT");
288 }
289 /* the C0 state only exists as a filler in our array */
290 pr->power.states[ACPI_STATE_C0].valid = 1;
291 return 0;
292 }
293
acpi_processor_get_power_info_cst(struct acpi_processor * pr)294 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
295 {
296 int ret;
297
298 if (nocst)
299 return -ENODEV;
300
301 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
302 if (ret)
303 return ret;
304
305 if (!pr->power.count)
306 return -EFAULT;
307
308 pr->flags.has_cst = 1;
309 return 0;
310 }
311
acpi_processor_power_verify_c3(struct acpi_processor * pr,struct acpi_processor_cx * cx)312 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
313 struct acpi_processor_cx *cx)
314 {
315 static int bm_check_flag = -1;
316 static int bm_control_flag = -1;
317
318
319 if (!cx->address)
320 return;
321
322 /*
323 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
324 * DMA transfers are used by any ISA device to avoid livelock.
325 * Note that we could disable Type-F DMA (as recommended by
326 * the erratum), but this is known to disrupt certain ISA
327 * devices thus we take the conservative approach.
328 */
329 else if (errata.piix4.fdma) {
330 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
331 "C3 not supported on PIIX4 with Type-F DMA\n"));
332 return;
333 }
334
335 /* All the logic here assumes flags.bm_check is same across all CPUs */
336 if (bm_check_flag == -1) {
337 /* Determine whether bm_check is needed based on CPU */
338 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
339 bm_check_flag = pr->flags.bm_check;
340 bm_control_flag = pr->flags.bm_control;
341 } else {
342 pr->flags.bm_check = bm_check_flag;
343 pr->flags.bm_control = bm_control_flag;
344 }
345
346 if (pr->flags.bm_check) {
347 if (!pr->flags.bm_control) {
348 if (pr->flags.has_cst != 1) {
349 /* bus mastering control is necessary */
350 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
351 "C3 support requires BM control\n"));
352 return;
353 } else {
354 /* Here we enter C3 without bus mastering */
355 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
356 "C3 support without BM control\n"));
357 }
358 }
359 } else {
360 /*
361 * WBINVD should be set in fadt, for C3 state to be
362 * supported on when bm_check is not required.
363 */
364 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
365 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
366 "Cache invalidation should work properly"
367 " for C3 to be enabled on SMP systems\n"));
368 return;
369 }
370 }
371
372 /*
373 * Otherwise we've met all of our C3 requirements.
374 * Normalize the C3 latency to expidite policy. Enable
375 * checking of bus mastering status (bm_check) so we can
376 * use this in our C3 policy
377 */
378 cx->valid = 1;
379
380 /*
381 * On older chipsets, BM_RLD needs to be set
382 * in order for Bus Master activity to wake the
383 * system from C3. Newer chipsets handle DMA
384 * during C3 automatically and BM_RLD is a NOP.
385 * In either case, the proper way to
386 * handle BM_RLD is to set it and leave it set.
387 */
388 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
389
390 return;
391 }
392
acpi_cst_latency_cmp(const void * a,const void * b)393 static int acpi_cst_latency_cmp(const void *a, const void *b)
394 {
395 const struct acpi_processor_cx *x = a, *y = b;
396
397 if (!(x->valid && y->valid))
398 return 0;
399 if (x->latency > y->latency)
400 return 1;
401 if (x->latency < y->latency)
402 return -1;
403 return 0;
404 }
acpi_cst_latency_swap(void * a,void * b,int n)405 static void acpi_cst_latency_swap(void *a, void *b, int n)
406 {
407 struct acpi_processor_cx *x = a, *y = b;
408 u32 tmp;
409
410 if (!(x->valid && y->valid))
411 return;
412 tmp = x->latency;
413 x->latency = y->latency;
414 y->latency = tmp;
415 }
416
acpi_processor_power_verify(struct acpi_processor * pr)417 static int acpi_processor_power_verify(struct acpi_processor *pr)
418 {
419 unsigned int i;
420 unsigned int working = 0;
421 unsigned int last_latency = 0;
422 unsigned int last_type = 0;
423 bool buggy_latency = false;
424
425 pr->power.timer_broadcast_on_state = INT_MAX;
426
427 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
428 struct acpi_processor_cx *cx = &pr->power.states[i];
429
430 switch (cx->type) {
431 case ACPI_STATE_C1:
432 cx->valid = 1;
433 break;
434
435 case ACPI_STATE_C2:
436 if (!cx->address)
437 break;
438 cx->valid = 1;
439 break;
440
441 case ACPI_STATE_C3:
442 acpi_processor_power_verify_c3(pr, cx);
443 break;
444 }
445 if (!cx->valid)
446 continue;
447 if (cx->type >= last_type && cx->latency < last_latency)
448 buggy_latency = true;
449 last_latency = cx->latency;
450 last_type = cx->type;
451
452 lapic_timer_check_state(i, pr, cx);
453 tsc_check_state(cx->type);
454 working++;
455 }
456
457 if (buggy_latency) {
458 pr_notice("FW issue: working around C-state latencies out of order\n");
459 sort(&pr->power.states[1], max_cstate,
460 sizeof(struct acpi_processor_cx),
461 acpi_cst_latency_cmp,
462 acpi_cst_latency_swap);
463 }
464
465 lapic_timer_propagate_broadcast(pr);
466
467 return (working);
468 }
469
acpi_processor_get_cstate_info(struct acpi_processor * pr)470 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
471 {
472 unsigned int i;
473 int result;
474
475
476 /* NOTE: the idle thread may not be running while calling
477 * this function */
478
479 /* Zero initialize all the C-states info. */
480 memset(pr->power.states, 0, sizeof(pr->power.states));
481
482 result = acpi_processor_get_power_info_cst(pr);
483 if (result == -ENODEV)
484 result = acpi_processor_get_power_info_fadt(pr);
485
486 if (result)
487 return result;
488
489 acpi_processor_get_power_info_default(pr);
490
491 pr->power.count = acpi_processor_power_verify(pr);
492
493 /*
494 * if one state of type C2 or C3 is available, mark this
495 * CPU as being "idle manageable"
496 */
497 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
498 if (pr->power.states[i].valid) {
499 pr->power.count = i;
500 pr->flags.power = 1;
501 }
502 }
503
504 return 0;
505 }
506
507 /**
508 * acpi_idle_bm_check - checks if bus master activity was detected
509 */
acpi_idle_bm_check(void)510 static int acpi_idle_bm_check(void)
511 {
512 u32 bm_status = 0;
513
514 if (bm_check_disable)
515 return 0;
516
517 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
518 if (bm_status)
519 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
520 /*
521 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
522 * the true state of bus mastering activity; forcing us to
523 * manually check the BMIDEA bit of each IDE channel.
524 */
525 else if (errata.piix4.bmisx) {
526 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
527 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
528 bm_status = 1;
529 }
530 return bm_status;
531 }
532
wait_for_freeze(void)533 static void wait_for_freeze(void)
534 {
535 #ifdef CONFIG_X86
536 /* No delay is needed if we are in guest */
537 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
538 return;
539 /*
540 * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
541 * not this code. Assume that any Intel systems using this
542 * are ancient and may need the dummy wait. This also assumes
543 * that the motivating chipset issue was Intel-only.
544 */
545 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
546 return;
547 #endif
548 /*
549 * Dummy wait op - must do something useless after P_LVL2 read
550 * because chipsets cannot guarantee that STPCLK# signal gets
551 * asserted in time to freeze execution properly
552 *
553 * This workaround has been in place since the original ACPI
554 * implementation was merged, circa 2002.
555 *
556 * If a profile is pointing to this instruction, please first
557 * consider moving your system to a more modern idle
558 * mechanism.
559 */
560 inl(acpi_gbl_FADT.xpm_timer_block.address);
561 }
562
563 /**
564 * acpi_idle_do_entry - enter idle state using the appropriate method
565 * @cx: cstate data
566 *
567 * Caller disables interrupt before call and enables interrupt after return.
568 */
acpi_idle_do_entry(struct acpi_processor_cx * cx)569 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
570 {
571 if (cx->entry_method == ACPI_CSTATE_FFH) {
572 /* Call into architectural FFH based C-state */
573 acpi_processor_ffh_cstate_enter(cx);
574 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
575 acpi_safe_halt();
576 } else {
577 /* IO port based C-state */
578 inb(cx->address);
579 wait_for_freeze();
580 }
581 }
582
583 /**
584 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
585 * @dev: the target CPU
586 * @index: the index of suggested state
587 */
acpi_idle_play_dead(struct cpuidle_device * dev,int index)588 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
589 {
590 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
591
592 ACPI_FLUSH_CPU_CACHE();
593
594 while (1) {
595
596 if (cx->entry_method == ACPI_CSTATE_HALT)
597 safe_halt();
598 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
599 inb(cx->address);
600 wait_for_freeze();
601 } else
602 return -ENODEV;
603
604 #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
605 cond_wakeup_cpu0();
606 #endif
607 }
608
609 /* Never reached */
610 return 0;
611 }
612
acpi_idle_fallback_to_c1(struct acpi_processor * pr)613 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
614 {
615 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
616 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
617 }
618
619 static int c3_cpu_count;
620 static DEFINE_RAW_SPINLOCK(c3_lock);
621
622 /**
623 * acpi_idle_enter_bm - enters C3 with proper BM handling
624 * @drv: cpuidle driver
625 * @pr: Target processor
626 * @cx: Target state context
627 * @index: index of target state
628 */
acpi_idle_enter_bm(struct cpuidle_driver * drv,struct acpi_processor * pr,struct acpi_processor_cx * cx,int index)629 static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
630 struct acpi_processor *pr,
631 struct acpi_processor_cx *cx,
632 int index)
633 {
634 static struct acpi_processor_cx safe_cx = {
635 .entry_method = ACPI_CSTATE_HALT,
636 };
637
638 /*
639 * disable bus master
640 * bm_check implies we need ARB_DIS
641 * bm_control implies whether we can do ARB_DIS
642 *
643 * That leaves a case where bm_check is set and bm_control is not set.
644 * In that case we cannot do much, we enter C3 without doing anything.
645 */
646 bool dis_bm = pr->flags.bm_control;
647
648 /* If we can skip BM, demote to a safe state. */
649 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
650 dis_bm = false;
651 index = drv->safe_state_index;
652 if (index >= 0) {
653 cx = this_cpu_read(acpi_cstate[index]);
654 } else {
655 cx = &safe_cx;
656 index = -EBUSY;
657 }
658 }
659
660 if (dis_bm) {
661 raw_spin_lock(&c3_lock);
662 c3_cpu_count++;
663 /* Disable bus master arbitration when all CPUs are in C3 */
664 if (c3_cpu_count == num_online_cpus())
665 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
666 raw_spin_unlock(&c3_lock);
667 }
668
669 rcu_idle_enter();
670
671 acpi_idle_do_entry(cx);
672
673 rcu_idle_exit();
674
675 /* Re-enable bus master arbitration */
676 if (dis_bm) {
677 raw_spin_lock(&c3_lock);
678 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
679 c3_cpu_count--;
680 raw_spin_unlock(&c3_lock);
681 }
682
683 return index;
684 }
685
acpi_idle_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)686 static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
687 struct cpuidle_driver *drv, int index)
688 {
689 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
690 struct acpi_processor *pr;
691
692 pr = __this_cpu_read(processors);
693 if (unlikely(!pr))
694 return -EINVAL;
695
696 if (cx->type != ACPI_STATE_C1) {
697 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
698 return acpi_idle_enter_bm(drv, pr, cx, index);
699
700 /* C2 to C1 demotion. */
701 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
702 index = ACPI_IDLE_STATE_START;
703 cx = per_cpu(acpi_cstate[index], dev->cpu);
704 }
705 }
706
707 if (cx->type == ACPI_STATE_C3)
708 ACPI_FLUSH_CPU_CACHE();
709
710 acpi_idle_do_entry(cx);
711
712 return index;
713 }
714
acpi_idle_enter_s2idle(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)715 static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
716 struct cpuidle_driver *drv, int index)
717 {
718 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
719
720 if (cx->type == ACPI_STATE_C3) {
721 struct acpi_processor *pr = __this_cpu_read(processors);
722
723 if (unlikely(!pr))
724 return 0;
725
726 if (pr->flags.bm_check) {
727 u8 bm_sts_skip = cx->bm_sts_skip;
728
729 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
730 cx->bm_sts_skip = 1;
731 acpi_idle_enter_bm(drv, pr, cx, index);
732 cx->bm_sts_skip = bm_sts_skip;
733
734 return 0;
735 } else {
736 ACPI_FLUSH_CPU_CACHE();
737 }
738 }
739 acpi_idle_do_entry(cx);
740
741 return 0;
742 }
743
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)744 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
745 struct cpuidle_device *dev)
746 {
747 int i, count = ACPI_IDLE_STATE_START;
748 struct acpi_processor_cx *cx;
749 struct cpuidle_state *state;
750
751 if (max_cstate == 0)
752 max_cstate = 1;
753
754 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
755 state = &acpi_idle_driver.states[count];
756 cx = &pr->power.states[i];
757
758 if (!cx->valid)
759 continue;
760
761 per_cpu(acpi_cstate[count], dev->cpu) = cx;
762
763 if (lapic_timer_needs_broadcast(pr, cx))
764 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
765
766 if (cx->type == ACPI_STATE_C3) {
767 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
768 if (pr->flags.bm_check)
769 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
770 }
771
772 count++;
773 if (count == CPUIDLE_STATE_MAX)
774 break;
775 }
776
777 if (!count)
778 return -EINVAL;
779
780 return 0;
781 }
782
acpi_processor_setup_cstates(struct acpi_processor * pr)783 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
784 {
785 int i, count;
786 struct acpi_processor_cx *cx;
787 struct cpuidle_state *state;
788 struct cpuidle_driver *drv = &acpi_idle_driver;
789
790 if (max_cstate == 0)
791 max_cstate = 1;
792
793 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
794 cpuidle_poll_state_init(drv);
795 count = 1;
796 } else {
797 count = 0;
798 }
799
800 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
801 cx = &pr->power.states[i];
802
803 if (!cx->valid)
804 continue;
805
806 state = &drv->states[count];
807 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
808 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
809 state->exit_latency = cx->latency;
810 state->target_residency = cx->latency * latency_factor;
811 state->enter = acpi_idle_enter;
812
813 state->flags = 0;
814 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
815 state->enter_dead = acpi_idle_play_dead;
816 drv->safe_state_index = count;
817 }
818 /*
819 * Halt-induced C1 is not good for ->enter_s2idle, because it
820 * re-enables interrupts on exit. Moreover, C1 is generally not
821 * particularly interesting from the suspend-to-idle angle, so
822 * avoid C1 and the situations in which we may need to fall back
823 * to it altogether.
824 */
825 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
826 state->enter_s2idle = acpi_idle_enter_s2idle;
827
828 count++;
829 if (count == CPUIDLE_STATE_MAX)
830 break;
831 }
832
833 drv->state_count = count;
834
835 if (!count)
836 return -EINVAL;
837
838 return 0;
839 }
840
acpi_processor_cstate_first_run_checks(void)841 static inline void acpi_processor_cstate_first_run_checks(void)
842 {
843 static int first_run;
844
845 if (first_run)
846 return;
847 dmi_check_system(processor_power_dmi_table);
848 max_cstate = acpi_processor_cstate_check(max_cstate);
849 if (max_cstate < ACPI_C_STATES_MAX)
850 pr_notice("ACPI: processor limited to max C-state %d\n",
851 max_cstate);
852 first_run++;
853
854 if (nocst)
855 return;
856
857 acpi_processor_claim_cst_control();
858 }
859 #else
860
disabled_by_idle_boot_param(void)861 static inline int disabled_by_idle_boot_param(void) { return 0; }
acpi_processor_cstate_first_run_checks(void)862 static inline void acpi_processor_cstate_first_run_checks(void) { }
acpi_processor_get_cstate_info(struct acpi_processor * pr)863 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
864 {
865 return -ENODEV;
866 }
867
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)868 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
869 struct cpuidle_device *dev)
870 {
871 return -EINVAL;
872 }
873
acpi_processor_setup_cstates(struct acpi_processor * pr)874 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
875 {
876 return -EINVAL;
877 }
878
879 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
880
881 struct acpi_lpi_states_array {
882 unsigned int size;
883 unsigned int composite_states_size;
884 struct acpi_lpi_state *entries;
885 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
886 };
887
obj_get_integer(union acpi_object * obj,u32 * value)888 static int obj_get_integer(union acpi_object *obj, u32 *value)
889 {
890 if (obj->type != ACPI_TYPE_INTEGER)
891 return -EINVAL;
892
893 *value = obj->integer.value;
894 return 0;
895 }
896
acpi_processor_evaluate_lpi(acpi_handle handle,struct acpi_lpi_states_array * info)897 static int acpi_processor_evaluate_lpi(acpi_handle handle,
898 struct acpi_lpi_states_array *info)
899 {
900 acpi_status status;
901 int ret = 0;
902 int pkg_count, state_idx = 1, loop;
903 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
904 union acpi_object *lpi_data;
905 struct acpi_lpi_state *lpi_state;
906
907 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
908 if (ACPI_FAILURE(status)) {
909 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
910 return -ENODEV;
911 }
912
913 lpi_data = buffer.pointer;
914
915 /* There must be at least 4 elements = 3 elements + 1 package */
916 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
917 lpi_data->package.count < 4) {
918 pr_debug("not enough elements in _LPI\n");
919 ret = -ENODATA;
920 goto end;
921 }
922
923 pkg_count = lpi_data->package.elements[2].integer.value;
924
925 /* Validate number of power states. */
926 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
927 pr_debug("count given by _LPI is not valid\n");
928 ret = -ENODATA;
929 goto end;
930 }
931
932 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
933 if (!lpi_state) {
934 ret = -ENOMEM;
935 goto end;
936 }
937
938 info->size = pkg_count;
939 info->entries = lpi_state;
940
941 /* LPI States start at index 3 */
942 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
943 union acpi_object *element, *pkg_elem, *obj;
944
945 element = &lpi_data->package.elements[loop];
946 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
947 continue;
948
949 pkg_elem = element->package.elements;
950
951 obj = pkg_elem + 6;
952 if (obj->type == ACPI_TYPE_BUFFER) {
953 struct acpi_power_register *reg;
954
955 reg = (struct acpi_power_register *)obj->buffer.pointer;
956 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
957 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
958 continue;
959
960 lpi_state->address = reg->address;
961 lpi_state->entry_method =
962 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
963 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
964 } else if (obj->type == ACPI_TYPE_INTEGER) {
965 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
966 lpi_state->address = obj->integer.value;
967 } else {
968 continue;
969 }
970
971 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
972
973 obj = pkg_elem + 9;
974 if (obj->type == ACPI_TYPE_STRING)
975 strlcpy(lpi_state->desc, obj->string.pointer,
976 ACPI_CX_DESC_LEN);
977
978 lpi_state->index = state_idx;
979 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
980 pr_debug("No min. residency found, assuming 10 us\n");
981 lpi_state->min_residency = 10;
982 }
983
984 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
985 pr_debug("No wakeup residency found, assuming 10 us\n");
986 lpi_state->wake_latency = 10;
987 }
988
989 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
990 lpi_state->flags = 0;
991
992 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
993 lpi_state->arch_flags = 0;
994
995 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
996 lpi_state->res_cnt_freq = 1;
997
998 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
999 lpi_state->enable_parent_state = 0;
1000 }
1001
1002 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
1003 end:
1004 kfree(buffer.pointer);
1005 return ret;
1006 }
1007
1008 /*
1009 * flat_state_cnt - the number of composite LPI states after the process of flattening
1010 */
1011 static int flat_state_cnt;
1012
1013 /**
1014 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1015 *
1016 * @local: local LPI state
1017 * @parent: parent LPI state
1018 * @result: composite LPI state
1019 */
combine_lpi_states(struct acpi_lpi_state * local,struct acpi_lpi_state * parent,struct acpi_lpi_state * result)1020 static bool combine_lpi_states(struct acpi_lpi_state *local,
1021 struct acpi_lpi_state *parent,
1022 struct acpi_lpi_state *result)
1023 {
1024 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1025 if (!parent->address) /* 0 means autopromotable */
1026 return false;
1027 result->address = local->address + parent->address;
1028 } else {
1029 result->address = parent->address;
1030 }
1031
1032 result->min_residency = max(local->min_residency, parent->min_residency);
1033 result->wake_latency = local->wake_latency + parent->wake_latency;
1034 result->enable_parent_state = parent->enable_parent_state;
1035 result->entry_method = local->entry_method;
1036
1037 result->flags = parent->flags;
1038 result->arch_flags = parent->arch_flags;
1039 result->index = parent->index;
1040
1041 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1042 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1043 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1044 return true;
1045 }
1046
1047 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
1048
stash_composite_state(struct acpi_lpi_states_array * curr_level,struct acpi_lpi_state * t)1049 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1050 struct acpi_lpi_state *t)
1051 {
1052 curr_level->composite_states[curr_level->composite_states_size++] = t;
1053 }
1054
flatten_lpi_states(struct acpi_processor * pr,struct acpi_lpi_states_array * curr_level,struct acpi_lpi_states_array * prev_level)1055 static int flatten_lpi_states(struct acpi_processor *pr,
1056 struct acpi_lpi_states_array *curr_level,
1057 struct acpi_lpi_states_array *prev_level)
1058 {
1059 int i, j, state_count = curr_level->size;
1060 struct acpi_lpi_state *p, *t = curr_level->entries;
1061
1062 curr_level->composite_states_size = 0;
1063 for (j = 0; j < state_count; j++, t++) {
1064 struct acpi_lpi_state *flpi;
1065
1066 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1067 continue;
1068
1069 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1070 pr_warn("Limiting number of LPI states to max (%d)\n",
1071 ACPI_PROCESSOR_MAX_POWER);
1072 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1073 break;
1074 }
1075
1076 flpi = &pr->power.lpi_states[flat_state_cnt];
1077
1078 if (!prev_level) { /* leaf/processor node */
1079 memcpy(flpi, t, sizeof(*t));
1080 stash_composite_state(curr_level, flpi);
1081 flat_state_cnt++;
1082 continue;
1083 }
1084
1085 for (i = 0; i < prev_level->composite_states_size; i++) {
1086 p = prev_level->composite_states[i];
1087 if (t->index <= p->enable_parent_state &&
1088 combine_lpi_states(p, t, flpi)) {
1089 stash_composite_state(curr_level, flpi);
1090 flat_state_cnt++;
1091 flpi++;
1092 }
1093 }
1094 }
1095
1096 kfree(curr_level->entries);
1097 return 0;
1098 }
1099
acpi_processor_ffh_lpi_probe(unsigned int cpu)1100 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1101 {
1102 return -EOPNOTSUPP;
1103 }
1104
acpi_processor_get_lpi_info(struct acpi_processor * pr)1105 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1106 {
1107 int ret, i;
1108 acpi_status status;
1109 acpi_handle handle = pr->handle, pr_ahandle;
1110 struct acpi_device *d = NULL;
1111 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1112
1113 /* make sure our architecture has support */
1114 ret = acpi_processor_ffh_lpi_probe(pr->id);
1115 if (ret == -EOPNOTSUPP)
1116 return ret;
1117
1118 if (!osc_pc_lpi_support_confirmed)
1119 return -EOPNOTSUPP;
1120
1121 if (!acpi_has_method(handle, "_LPI"))
1122 return -EINVAL;
1123
1124 flat_state_cnt = 0;
1125 prev = &info[0];
1126 curr = &info[1];
1127 handle = pr->handle;
1128 ret = acpi_processor_evaluate_lpi(handle, prev);
1129 if (ret)
1130 return ret;
1131 flatten_lpi_states(pr, prev, NULL);
1132
1133 status = acpi_get_parent(handle, &pr_ahandle);
1134 while (ACPI_SUCCESS(status)) {
1135 acpi_bus_get_device(pr_ahandle, &d);
1136 handle = pr_ahandle;
1137
1138 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1139 break;
1140
1141 /* can be optional ? */
1142 if (!acpi_has_method(handle, "_LPI"))
1143 break;
1144
1145 ret = acpi_processor_evaluate_lpi(handle, curr);
1146 if (ret)
1147 break;
1148
1149 /* flatten all the LPI states in this level of hierarchy */
1150 flatten_lpi_states(pr, curr, prev);
1151
1152 tmp = prev, prev = curr, curr = tmp;
1153
1154 status = acpi_get_parent(handle, &pr_ahandle);
1155 }
1156
1157 pr->power.count = flat_state_cnt;
1158 /* reset the index after flattening */
1159 for (i = 0; i < pr->power.count; i++)
1160 pr->power.lpi_states[i].index = i;
1161
1162 /* Tell driver that _LPI is supported. */
1163 pr->flags.has_lpi = 1;
1164 pr->flags.power = 1;
1165
1166 return 0;
1167 }
1168
acpi_processor_ffh_lpi_enter(struct acpi_lpi_state * lpi)1169 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1170 {
1171 return -ENODEV;
1172 }
1173
1174 /**
1175 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1176 * @dev: the target CPU
1177 * @drv: cpuidle driver containing cpuidle state info
1178 * @index: index of target state
1179 *
1180 * Return: 0 for success or negative value for error
1181 */
acpi_idle_lpi_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)1182 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1183 struct cpuidle_driver *drv, int index)
1184 {
1185 struct acpi_processor *pr;
1186 struct acpi_lpi_state *lpi;
1187
1188 pr = __this_cpu_read(processors);
1189
1190 if (unlikely(!pr))
1191 return -EINVAL;
1192
1193 lpi = &pr->power.lpi_states[index];
1194 if (lpi->entry_method == ACPI_CSTATE_FFH)
1195 return acpi_processor_ffh_lpi_enter(lpi);
1196
1197 return -EINVAL;
1198 }
1199
acpi_processor_setup_lpi_states(struct acpi_processor * pr)1200 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1201 {
1202 int i;
1203 struct acpi_lpi_state *lpi;
1204 struct cpuidle_state *state;
1205 struct cpuidle_driver *drv = &acpi_idle_driver;
1206
1207 if (!pr->flags.has_lpi)
1208 return -EOPNOTSUPP;
1209
1210 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1211 lpi = &pr->power.lpi_states[i];
1212
1213 state = &drv->states[i];
1214 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1215 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1216 state->exit_latency = lpi->wake_latency;
1217 state->target_residency = lpi->min_residency;
1218 if (lpi->arch_flags)
1219 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1220 state->enter = acpi_idle_lpi_enter;
1221 drv->safe_state_index = i;
1222 }
1223
1224 drv->state_count = i;
1225
1226 return 0;
1227 }
1228
1229 /**
1230 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1231 * global state data i.e. idle routines
1232 *
1233 * @pr: the ACPI processor
1234 */
acpi_processor_setup_cpuidle_states(struct acpi_processor * pr)1235 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1236 {
1237 int i;
1238 struct cpuidle_driver *drv = &acpi_idle_driver;
1239
1240 if (!pr->flags.power_setup_done || !pr->flags.power)
1241 return -EINVAL;
1242
1243 drv->safe_state_index = -1;
1244 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1245 drv->states[i].name[0] = '\0';
1246 drv->states[i].desc[0] = '\0';
1247 }
1248
1249 if (pr->flags.has_lpi)
1250 return acpi_processor_setup_lpi_states(pr);
1251
1252 return acpi_processor_setup_cstates(pr);
1253 }
1254
1255 /**
1256 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1257 * device i.e. per-cpu data
1258 *
1259 * @pr: the ACPI processor
1260 * @dev : the cpuidle device
1261 */
acpi_processor_setup_cpuidle_dev(struct acpi_processor * pr,struct cpuidle_device * dev)1262 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1263 struct cpuidle_device *dev)
1264 {
1265 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1266 return -EINVAL;
1267
1268 dev->cpu = pr->id;
1269 if (pr->flags.has_lpi)
1270 return acpi_processor_ffh_lpi_probe(pr->id);
1271
1272 return acpi_processor_setup_cpuidle_cx(pr, dev);
1273 }
1274
acpi_processor_get_power_info(struct acpi_processor * pr)1275 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1276 {
1277 int ret;
1278
1279 ret = acpi_processor_get_lpi_info(pr);
1280 if (ret)
1281 ret = acpi_processor_get_cstate_info(pr);
1282
1283 return ret;
1284 }
1285
acpi_processor_hotplug(struct acpi_processor * pr)1286 int acpi_processor_hotplug(struct acpi_processor *pr)
1287 {
1288 int ret = 0;
1289 struct cpuidle_device *dev;
1290
1291 if (disabled_by_idle_boot_param())
1292 return 0;
1293
1294 if (!pr->flags.power_setup_done)
1295 return -ENODEV;
1296
1297 dev = per_cpu(acpi_cpuidle_device, pr->id);
1298 cpuidle_pause_and_lock();
1299 cpuidle_disable_device(dev);
1300 ret = acpi_processor_get_power_info(pr);
1301 if (!ret && pr->flags.power) {
1302 acpi_processor_setup_cpuidle_dev(pr, dev);
1303 ret = cpuidle_enable_device(dev);
1304 }
1305 cpuidle_resume_and_unlock();
1306
1307 return ret;
1308 }
1309
acpi_processor_power_state_has_changed(struct acpi_processor * pr)1310 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1311 {
1312 int cpu;
1313 struct acpi_processor *_pr;
1314 struct cpuidle_device *dev;
1315
1316 if (disabled_by_idle_boot_param())
1317 return 0;
1318
1319 if (!pr->flags.power_setup_done)
1320 return -ENODEV;
1321
1322 /*
1323 * FIXME: Design the ACPI notification to make it once per
1324 * system instead of once per-cpu. This condition is a hack
1325 * to make the code that updates C-States be called once.
1326 */
1327
1328 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1329
1330 /* Protect against cpu-hotplug */
1331 get_online_cpus();
1332 cpuidle_pause_and_lock();
1333
1334 /* Disable all cpuidle devices */
1335 for_each_online_cpu(cpu) {
1336 _pr = per_cpu(processors, cpu);
1337 if (!_pr || !_pr->flags.power_setup_done)
1338 continue;
1339 dev = per_cpu(acpi_cpuidle_device, cpu);
1340 cpuidle_disable_device(dev);
1341 }
1342
1343 /* Populate Updated C-state information */
1344 acpi_processor_get_power_info(pr);
1345 acpi_processor_setup_cpuidle_states(pr);
1346
1347 /* Enable all cpuidle devices */
1348 for_each_online_cpu(cpu) {
1349 _pr = per_cpu(processors, cpu);
1350 if (!_pr || !_pr->flags.power_setup_done)
1351 continue;
1352 acpi_processor_get_power_info(_pr);
1353 if (_pr->flags.power) {
1354 dev = per_cpu(acpi_cpuidle_device, cpu);
1355 acpi_processor_setup_cpuidle_dev(_pr, dev);
1356 cpuidle_enable_device(dev);
1357 }
1358 }
1359 cpuidle_resume_and_unlock();
1360 put_online_cpus();
1361 }
1362
1363 return 0;
1364 }
1365
1366 static int acpi_processor_registered;
1367
acpi_processor_power_init(struct acpi_processor * pr)1368 int acpi_processor_power_init(struct acpi_processor *pr)
1369 {
1370 int retval;
1371 struct cpuidle_device *dev;
1372
1373 if (disabled_by_idle_boot_param())
1374 return 0;
1375
1376 acpi_processor_cstate_first_run_checks();
1377
1378 if (!acpi_processor_get_power_info(pr))
1379 pr->flags.power_setup_done = 1;
1380
1381 /*
1382 * Install the idle handler if processor power management is supported.
1383 * Note that we use previously set idle handler will be used on
1384 * platforms that only support C1.
1385 */
1386 if (pr->flags.power) {
1387 /* Register acpi_idle_driver if not already registered */
1388 if (!acpi_processor_registered) {
1389 acpi_processor_setup_cpuidle_states(pr);
1390 retval = cpuidle_register_driver(&acpi_idle_driver);
1391 if (retval)
1392 return retval;
1393 pr_debug("%s registered with cpuidle\n",
1394 acpi_idle_driver.name);
1395 }
1396
1397 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1398 if (!dev)
1399 return -ENOMEM;
1400 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1401
1402 acpi_processor_setup_cpuidle_dev(pr, dev);
1403
1404 /* Register per-cpu cpuidle_device. Cpuidle driver
1405 * must already be registered before registering device
1406 */
1407 retval = cpuidle_register_device(dev);
1408 if (retval) {
1409 if (acpi_processor_registered == 0)
1410 cpuidle_unregister_driver(&acpi_idle_driver);
1411 return retval;
1412 }
1413 acpi_processor_registered++;
1414 }
1415 return 0;
1416 }
1417
acpi_processor_power_exit(struct acpi_processor * pr)1418 int acpi_processor_power_exit(struct acpi_processor *pr)
1419 {
1420 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1421
1422 if (disabled_by_idle_boot_param())
1423 return 0;
1424
1425 if (pr->flags.power) {
1426 cpuidle_unregister_device(dev);
1427 acpi_processor_registered--;
1428 if (acpi_processor_registered == 0)
1429 cpuidle_unregister_driver(&acpi_idle_driver);
1430 }
1431
1432 pr->flags.power_setup_done = 0;
1433 return 0;
1434 }
1435