• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * processor_idle - idle state submodule to the ACPI processor driver
4  *
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *  			- Added processor hotplug support
10  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11  *  			- Added support for C3 on SMP
12  */
13 #define pr_fmt(fmt) "ACPI: " fmt
14 
15 #include <linux/module.h>
16 #include <linux/acpi.h>
17 #include <linux/dmi.h>
18 #include <linux/sched.h>       /* need_resched() */
19 #include <linux/sort.h>
20 #include <linux/tick.h>
21 #include <linux/cpuidle.h>
22 #include <linux/cpu.h>
23 #include <acpi/processor.h>
24 
25 /*
26  * Include the apic definitions for x86 to have the APIC timer related defines
27  * available also for UP (on SMP it gets magically included via linux/smp.h).
28  * asm/acpi.h is not an option, as it would require more include magic. Also
29  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
30  */
31 #ifdef CONFIG_X86
32 #include <asm/apic.h>
33 #include <asm/cpu.h>
34 #endif
35 
36 #define ACPI_IDLE_STATE_START	(IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
37 
38 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
39 module_param(max_cstate, uint, 0000);
40 static unsigned int nocst __read_mostly;
41 module_param(nocst, uint, 0000);
42 static int bm_check_disable __read_mostly;
43 module_param(bm_check_disable, uint, 0000);
44 
45 static unsigned int latency_factor __read_mostly = 2;
46 module_param(latency_factor, uint, 0644);
47 
48 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
49 
50 struct cpuidle_driver acpi_idle_driver = {
51 	.name =		"acpi_idle",
52 	.owner =	THIS_MODULE,
53 };
54 
55 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
56 static
57 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
58 
disabled_by_idle_boot_param(void)59 static int disabled_by_idle_boot_param(void)
60 {
61 	return boot_option_idle_override == IDLE_POLL ||
62 		boot_option_idle_override == IDLE_HALT;
63 }
64 
65 /*
66  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
67  * For now disable this. Probably a bug somewhere else.
68  *
69  * To skip this limit, boot/load with a large max_cstate limit.
70  */
set_max_cstate(const struct dmi_system_id * id)71 static int set_max_cstate(const struct dmi_system_id *id)
72 {
73 	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
74 		return 0;
75 
76 	pr_notice("%s detected - limiting to C%ld max_cstate."
77 		  " Override with \"processor.max_cstate=%d\"\n", id->ident,
78 		  (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
79 
80 	max_cstate = (long)id->driver_data;
81 
82 	return 0;
83 }
84 
85 static const struct dmi_system_id processor_power_dmi_table[] = {
86 	{ set_max_cstate, "Clevo 5600D", {
87 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
88 	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
89 	 (void *)2},
90 	{ set_max_cstate, "Pavilion zv5000", {
91 	  DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
92 	  DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
93 	 (void *)1},
94 	{ set_max_cstate, "Asus L8400B", {
95 	  DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
96 	  DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
97 	 (void *)1},
98 	{},
99 };
100 
101 
102 /*
103  * Callers should disable interrupts before the call and enable
104  * interrupts after return.
105  */
acpi_safe_halt(void)106 static void __cpuidle acpi_safe_halt(void)
107 {
108 	if (!tif_need_resched()) {
109 		safe_halt();
110 		local_irq_disable();
111 	}
112 }
113 
114 #ifdef ARCH_APICTIMER_STOPS_ON_C3
115 
116 /*
117  * Some BIOS implementations switch to C3 in the published C2 state.
118  * This seems to be a common problem on AMD boxen, but other vendors
119  * are affected too. We pick the most conservative approach: we assume
120  * that the local APIC stops in both C2 and C3.
121  */
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cx)122 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
123 				   struct acpi_processor_cx *cx)
124 {
125 	struct acpi_processor_power *pwr = &pr->power;
126 	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
127 
128 	if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
129 		return;
130 
131 	if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
132 		type = ACPI_STATE_C1;
133 
134 	/*
135 	 * Check, if one of the previous states already marked the lapic
136 	 * unstable
137 	 */
138 	if (pwr->timer_broadcast_on_state < state)
139 		return;
140 
141 	if (cx->type >= type)
142 		pr->power.timer_broadcast_on_state = state;
143 }
144 
__lapic_timer_propagate_broadcast(void * arg)145 static void __lapic_timer_propagate_broadcast(void *arg)
146 {
147 	struct acpi_processor *pr = (struct acpi_processor *) arg;
148 
149 	if (pr->power.timer_broadcast_on_state < INT_MAX)
150 		tick_broadcast_enable();
151 	else
152 		tick_broadcast_disable();
153 }
154 
lapic_timer_propagate_broadcast(struct acpi_processor * pr)155 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
156 {
157 	smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
158 				 (void *)pr, 1);
159 }
160 
161 /* Power(C) State timer broadcast control */
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)162 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
163 					struct acpi_processor_cx *cx)
164 {
165 	return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
166 }
167 
168 #else
169 
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cstate)170 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
171 				   struct acpi_processor_cx *cstate) { }
lapic_timer_propagate_broadcast(struct acpi_processor * pr)172 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
173 
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)174 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
175 					struct acpi_processor_cx *cx)
176 {
177 	return false;
178 }
179 
180 #endif
181 
182 #if defined(CONFIG_X86)
tsc_check_state(int state)183 static void tsc_check_state(int state)
184 {
185 	switch (boot_cpu_data.x86_vendor) {
186 	case X86_VENDOR_HYGON:
187 	case X86_VENDOR_AMD:
188 	case X86_VENDOR_INTEL:
189 	case X86_VENDOR_CENTAUR:
190 	case X86_VENDOR_ZHAOXIN:
191 		/*
192 		 * AMD Fam10h TSC will tick in all
193 		 * C/P/S0/S1 states when this bit is set.
194 		 */
195 		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
196 			return;
197 		fallthrough;
198 	default:
199 		/* TSC could halt in idle, so notify users */
200 		if (state > ACPI_STATE_C1)
201 			mark_tsc_unstable("TSC halts in idle");
202 	}
203 }
204 #else
tsc_check_state(int state)205 static void tsc_check_state(int state) { return; }
206 #endif
207 
acpi_processor_get_power_info_fadt(struct acpi_processor * pr)208 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
209 {
210 
211 	if (!pr->pblk)
212 		return -ENODEV;
213 
214 	/* if info is obtained from pblk/fadt, type equals state */
215 	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
216 	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
217 
218 #ifndef CONFIG_HOTPLUG_CPU
219 	/*
220 	 * Check for P_LVL2_UP flag before entering C2 and above on
221 	 * an SMP system.
222 	 */
223 	if ((num_online_cpus() > 1) &&
224 	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
225 		return -ENODEV;
226 #endif
227 
228 	/* determine C2 and C3 address from pblk */
229 	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
230 	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
231 
232 	/* determine latencies from FADT */
233 	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
234 	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
235 
236 	/*
237 	 * FADT specified C2 latency must be less than or equal to
238 	 * 100 microseconds.
239 	 */
240 	if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
241 		acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
242 				  acpi_gbl_FADT.c2_latency);
243 		/* invalidate C2 */
244 		pr->power.states[ACPI_STATE_C2].address = 0;
245 	}
246 
247 	/*
248 	 * FADT supplied C3 latency must be less than or equal to
249 	 * 1000 microseconds.
250 	 */
251 	if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
252 		acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
253 				  acpi_gbl_FADT.c3_latency);
254 		/* invalidate C3 */
255 		pr->power.states[ACPI_STATE_C3].address = 0;
256 	}
257 
258 	acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
259 			  pr->power.states[ACPI_STATE_C2].address,
260 			  pr->power.states[ACPI_STATE_C3].address);
261 
262 	snprintf(pr->power.states[ACPI_STATE_C2].desc,
263 			 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
264 			 pr->power.states[ACPI_STATE_C2].address);
265 	snprintf(pr->power.states[ACPI_STATE_C3].desc,
266 			 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
267 			 pr->power.states[ACPI_STATE_C3].address);
268 
269 	return 0;
270 }
271 
acpi_processor_get_power_info_default(struct acpi_processor * pr)272 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
273 {
274 	if (!pr->power.states[ACPI_STATE_C1].valid) {
275 		/* set the first C-State to C1 */
276 		/* all processors need to support C1 */
277 		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
278 		pr->power.states[ACPI_STATE_C1].valid = 1;
279 		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
280 
281 		snprintf(pr->power.states[ACPI_STATE_C1].desc,
282 			 ACPI_CX_DESC_LEN, "ACPI HLT");
283 	}
284 	/* the C0 state only exists as a filler in our array */
285 	pr->power.states[ACPI_STATE_C0].valid = 1;
286 	return 0;
287 }
288 
acpi_processor_get_power_info_cst(struct acpi_processor * pr)289 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
290 {
291 	int ret;
292 
293 	if (nocst)
294 		return -ENODEV;
295 
296 	ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
297 	if (ret)
298 		return ret;
299 
300 	if (!pr->power.count)
301 		return -EFAULT;
302 
303 	pr->flags.has_cst = 1;
304 	return 0;
305 }
306 
acpi_processor_power_verify_c3(struct acpi_processor * pr,struct acpi_processor_cx * cx)307 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
308 					   struct acpi_processor_cx *cx)
309 {
310 	static int bm_check_flag = -1;
311 	static int bm_control_flag = -1;
312 
313 
314 	if (!cx->address)
315 		return;
316 
317 	/*
318 	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
319 	 * DMA transfers are used by any ISA device to avoid livelock.
320 	 * Note that we could disable Type-F DMA (as recommended by
321 	 * the erratum), but this is known to disrupt certain ISA
322 	 * devices thus we take the conservative approach.
323 	 */
324 	else if (errata.piix4.fdma) {
325 		acpi_handle_debug(pr->handle,
326 				  "C3 not supported on PIIX4 with Type-F DMA\n");
327 		return;
328 	}
329 
330 	/* All the logic here assumes flags.bm_check is same across all CPUs */
331 	if (bm_check_flag == -1) {
332 		/* Determine whether bm_check is needed based on CPU  */
333 		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
334 		bm_check_flag = pr->flags.bm_check;
335 		bm_control_flag = pr->flags.bm_control;
336 	} else {
337 		pr->flags.bm_check = bm_check_flag;
338 		pr->flags.bm_control = bm_control_flag;
339 	}
340 
341 	if (pr->flags.bm_check) {
342 		if (!pr->flags.bm_control) {
343 			if (pr->flags.has_cst != 1) {
344 				/* bus mastering control is necessary */
345 				acpi_handle_debug(pr->handle,
346 						  "C3 support requires BM control\n");
347 				return;
348 			} else {
349 				/* Here we enter C3 without bus mastering */
350 				acpi_handle_debug(pr->handle,
351 						  "C3 support without BM control\n");
352 			}
353 		}
354 	} else {
355 		/*
356 		 * WBINVD should be set in fadt, for C3 state to be
357 		 * supported on when bm_check is not required.
358 		 */
359 		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
360 			acpi_handle_debug(pr->handle,
361 					  "Cache invalidation should work properly"
362 					  " for C3 to be enabled on SMP systems\n");
363 			return;
364 		}
365 	}
366 
367 	/*
368 	 * Otherwise we've met all of our C3 requirements.
369 	 * Normalize the C3 latency to expidite policy.  Enable
370 	 * checking of bus mastering status (bm_check) so we can
371 	 * use this in our C3 policy
372 	 */
373 	cx->valid = 1;
374 
375 	/*
376 	 * On older chipsets, BM_RLD needs to be set
377 	 * in order for Bus Master activity to wake the
378 	 * system from C3.  Newer chipsets handle DMA
379 	 * during C3 automatically and BM_RLD is a NOP.
380 	 * In either case, the proper way to
381 	 * handle BM_RLD is to set it and leave it set.
382 	 */
383 	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
384 
385 	return;
386 }
387 
acpi_cst_latency_cmp(const void * a,const void * b)388 static int acpi_cst_latency_cmp(const void *a, const void *b)
389 {
390 	const struct acpi_processor_cx *x = a, *y = b;
391 
392 	if (!(x->valid && y->valid))
393 		return 0;
394 	if (x->latency > y->latency)
395 		return 1;
396 	if (x->latency < y->latency)
397 		return -1;
398 	return 0;
399 }
acpi_cst_latency_swap(void * a,void * b,int n)400 static void acpi_cst_latency_swap(void *a, void *b, int n)
401 {
402 	struct acpi_processor_cx *x = a, *y = b;
403 	u32 tmp;
404 
405 	if (!(x->valid && y->valid))
406 		return;
407 	tmp = x->latency;
408 	x->latency = y->latency;
409 	y->latency = tmp;
410 }
411 
acpi_processor_power_verify(struct acpi_processor * pr)412 static int acpi_processor_power_verify(struct acpi_processor *pr)
413 {
414 	unsigned int i;
415 	unsigned int working = 0;
416 	unsigned int last_latency = 0;
417 	unsigned int last_type = 0;
418 	bool buggy_latency = false;
419 
420 	pr->power.timer_broadcast_on_state = INT_MAX;
421 
422 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
423 		struct acpi_processor_cx *cx = &pr->power.states[i];
424 
425 		switch (cx->type) {
426 		case ACPI_STATE_C1:
427 			cx->valid = 1;
428 			break;
429 
430 		case ACPI_STATE_C2:
431 			if (!cx->address)
432 				break;
433 			cx->valid = 1;
434 			break;
435 
436 		case ACPI_STATE_C3:
437 			acpi_processor_power_verify_c3(pr, cx);
438 			break;
439 		}
440 		if (!cx->valid)
441 			continue;
442 		if (cx->type >= last_type && cx->latency < last_latency)
443 			buggy_latency = true;
444 		last_latency = cx->latency;
445 		last_type = cx->type;
446 
447 		lapic_timer_check_state(i, pr, cx);
448 		tsc_check_state(cx->type);
449 		working++;
450 	}
451 
452 	if (buggy_latency) {
453 		pr_notice("FW issue: working around C-state latencies out of order\n");
454 		sort(&pr->power.states[1], max_cstate,
455 		     sizeof(struct acpi_processor_cx),
456 		     acpi_cst_latency_cmp,
457 		     acpi_cst_latency_swap);
458 	}
459 
460 	lapic_timer_propagate_broadcast(pr);
461 
462 	return (working);
463 }
464 
acpi_processor_get_cstate_info(struct acpi_processor * pr)465 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
466 {
467 	unsigned int i;
468 	int result;
469 
470 
471 	/* NOTE: the idle thread may not be running while calling
472 	 * this function */
473 
474 	/* Zero initialize all the C-states info. */
475 	memset(pr->power.states, 0, sizeof(pr->power.states));
476 
477 	result = acpi_processor_get_power_info_cst(pr);
478 	if (result == -ENODEV)
479 		result = acpi_processor_get_power_info_fadt(pr);
480 
481 	if (result)
482 		return result;
483 
484 	acpi_processor_get_power_info_default(pr);
485 
486 	pr->power.count = acpi_processor_power_verify(pr);
487 
488 	/*
489 	 * if one state of type C2 or C3 is available, mark this
490 	 * CPU as being "idle manageable"
491 	 */
492 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
493 		if (pr->power.states[i].valid) {
494 			pr->power.count = i;
495 			pr->flags.power = 1;
496 		}
497 	}
498 
499 	return 0;
500 }
501 
502 /**
503  * acpi_idle_bm_check - checks if bus master activity was detected
504  */
acpi_idle_bm_check(void)505 static int acpi_idle_bm_check(void)
506 {
507 	u32 bm_status = 0;
508 
509 	if (bm_check_disable)
510 		return 0;
511 
512 	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
513 	if (bm_status)
514 		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
515 	/*
516 	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
517 	 * the true state of bus mastering activity; forcing us to
518 	 * manually check the BMIDEA bit of each IDE channel.
519 	 */
520 	else if (errata.piix4.bmisx) {
521 		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
522 		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
523 			bm_status = 1;
524 	}
525 	return bm_status;
526 }
527 
wait_for_freeze(void)528 static void wait_for_freeze(void)
529 {
530 #ifdef	CONFIG_X86
531 	/* No delay is needed if we are in guest */
532 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
533 		return;
534 	/*
535 	 * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
536 	 * not this code.  Assume that any Intel systems using this
537 	 * are ancient and may need the dummy wait.  This also assumes
538 	 * that the motivating chipset issue was Intel-only.
539 	 */
540 	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
541 		return;
542 #endif
543 	/*
544 	 * Dummy wait op - must do something useless after P_LVL2 read
545 	 * because chipsets cannot guarantee that STPCLK# signal gets
546 	 * asserted in time to freeze execution properly
547 	 *
548 	 * This workaround has been in place since the original ACPI
549 	 * implementation was merged, circa 2002.
550 	 *
551 	 * If a profile is pointing to this instruction, please first
552 	 * consider moving your system to a more modern idle
553 	 * mechanism.
554 	 */
555 	inl(acpi_gbl_FADT.xpm_timer_block.address);
556 }
557 
558 /**
559  * acpi_idle_do_entry - enter idle state using the appropriate method
560  * @cx: cstate data
561  *
562  * Caller disables interrupt before call and enables interrupt after return.
563  */
acpi_idle_do_entry(struct acpi_processor_cx * cx)564 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
565 {
566 	if (cx->entry_method == ACPI_CSTATE_FFH) {
567 		/* Call into architectural FFH based C-state */
568 		acpi_processor_ffh_cstate_enter(cx);
569 	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
570 		acpi_safe_halt();
571 	} else {
572 		/* IO port based C-state */
573 		inb(cx->address);
574 		wait_for_freeze();
575 	}
576 }
577 
578 /**
579  * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
580  * @dev: the target CPU
581  * @index: the index of suggested state
582  */
acpi_idle_play_dead(struct cpuidle_device * dev,int index)583 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
584 {
585 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
586 
587 	ACPI_FLUSH_CPU_CACHE();
588 
589 	while (1) {
590 
591 		if (cx->entry_method == ACPI_CSTATE_HALT)
592 			safe_halt();
593 		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
594 			inb(cx->address);
595 			wait_for_freeze();
596 		} else
597 			return -ENODEV;
598 
599 #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
600 		cond_wakeup_cpu0();
601 #endif
602 	}
603 
604 	/* Never reached */
605 	return 0;
606 }
607 
acpi_idle_fallback_to_c1(struct acpi_processor * pr)608 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
609 {
610 	return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
611 		!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
612 }
613 
614 static int c3_cpu_count;
615 static DEFINE_RAW_SPINLOCK(c3_lock);
616 
617 /**
618  * acpi_idle_enter_bm - enters C3 with proper BM handling
619  * @drv: cpuidle driver
620  * @pr: Target processor
621  * @cx: Target state context
622  * @index: index of target state
623  */
acpi_idle_enter_bm(struct cpuidle_driver * drv,struct acpi_processor * pr,struct acpi_processor_cx * cx,int index)624 static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
625 			       struct acpi_processor *pr,
626 			       struct acpi_processor_cx *cx,
627 			       int index)
628 {
629 	static struct acpi_processor_cx safe_cx = {
630 		.entry_method = ACPI_CSTATE_HALT,
631 	};
632 
633 	/*
634 	 * disable bus master
635 	 * bm_check implies we need ARB_DIS
636 	 * bm_control implies whether we can do ARB_DIS
637 	 *
638 	 * That leaves a case where bm_check is set and bm_control is not set.
639 	 * In that case we cannot do much, we enter C3 without doing anything.
640 	 */
641 	bool dis_bm = pr->flags.bm_control;
642 
643 	/* If we can skip BM, demote to a safe state. */
644 	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
645 		dis_bm = false;
646 		index = drv->safe_state_index;
647 		if (index >= 0) {
648 			cx = this_cpu_read(acpi_cstate[index]);
649 		} else {
650 			cx = &safe_cx;
651 			index = -EBUSY;
652 		}
653 	}
654 
655 	if (dis_bm) {
656 		raw_spin_lock(&c3_lock);
657 		c3_cpu_count++;
658 		/* Disable bus master arbitration when all CPUs are in C3 */
659 		if (c3_cpu_count == num_online_cpus())
660 			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
661 		raw_spin_unlock(&c3_lock);
662 	}
663 
664 	rcu_idle_enter();
665 
666 	acpi_idle_do_entry(cx);
667 
668 	rcu_idle_exit();
669 
670 	/* Re-enable bus master arbitration */
671 	if (dis_bm) {
672 		raw_spin_lock(&c3_lock);
673 		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
674 		c3_cpu_count--;
675 		raw_spin_unlock(&c3_lock);
676 	}
677 
678 	return index;
679 }
680 
acpi_idle_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)681 static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
682 			   struct cpuidle_driver *drv, int index)
683 {
684 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
685 	struct acpi_processor *pr;
686 
687 	pr = __this_cpu_read(processors);
688 	if (unlikely(!pr))
689 		return -EINVAL;
690 
691 	if (cx->type != ACPI_STATE_C1) {
692 		if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
693 			return acpi_idle_enter_bm(drv, pr, cx, index);
694 
695 		/* C2 to C1 demotion. */
696 		if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
697 			index = ACPI_IDLE_STATE_START;
698 			cx = per_cpu(acpi_cstate[index], dev->cpu);
699 		}
700 	}
701 
702 	if (cx->type == ACPI_STATE_C3)
703 		ACPI_FLUSH_CPU_CACHE();
704 
705 	acpi_idle_do_entry(cx);
706 
707 	return index;
708 }
709 
acpi_idle_enter_s2idle(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)710 static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
711 				  struct cpuidle_driver *drv, int index)
712 {
713 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
714 
715 	if (cx->type == ACPI_STATE_C3) {
716 		struct acpi_processor *pr = __this_cpu_read(processors);
717 
718 		if (unlikely(!pr))
719 			return 0;
720 
721 		if (pr->flags.bm_check) {
722 			u8 bm_sts_skip = cx->bm_sts_skip;
723 
724 			/* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
725 			cx->bm_sts_skip = 1;
726 			acpi_idle_enter_bm(drv, pr, cx, index);
727 			cx->bm_sts_skip = bm_sts_skip;
728 
729 			return 0;
730 		} else {
731 			ACPI_FLUSH_CPU_CACHE();
732 		}
733 	}
734 	acpi_idle_do_entry(cx);
735 
736 	return 0;
737 }
738 
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)739 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
740 					   struct cpuidle_device *dev)
741 {
742 	int i, count = ACPI_IDLE_STATE_START;
743 	struct acpi_processor_cx *cx;
744 	struct cpuidle_state *state;
745 
746 	if (max_cstate == 0)
747 		max_cstate = 1;
748 
749 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
750 		state = &acpi_idle_driver.states[count];
751 		cx = &pr->power.states[i];
752 
753 		if (!cx->valid)
754 			continue;
755 
756 		per_cpu(acpi_cstate[count], dev->cpu) = cx;
757 
758 		if (lapic_timer_needs_broadcast(pr, cx))
759 			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
760 
761 		if (cx->type == ACPI_STATE_C3) {
762 			state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
763 			if (pr->flags.bm_check)
764 				state->flags |= CPUIDLE_FLAG_RCU_IDLE;
765 		}
766 
767 		count++;
768 		if (count == CPUIDLE_STATE_MAX)
769 			break;
770 	}
771 
772 	if (!count)
773 		return -EINVAL;
774 
775 	return 0;
776 }
777 
acpi_processor_setup_cstates(struct acpi_processor * pr)778 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
779 {
780 	int i, count;
781 	struct acpi_processor_cx *cx;
782 	struct cpuidle_state *state;
783 	struct cpuidle_driver *drv = &acpi_idle_driver;
784 
785 	if (max_cstate == 0)
786 		max_cstate = 1;
787 
788 	if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
789 		cpuidle_poll_state_init(drv);
790 		count = 1;
791 	} else {
792 		count = 0;
793 	}
794 
795 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
796 		cx = &pr->power.states[i];
797 
798 		if (!cx->valid)
799 			continue;
800 
801 		state = &drv->states[count];
802 		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
803 		strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
804 		state->exit_latency = cx->latency;
805 		state->target_residency = cx->latency * latency_factor;
806 		state->enter = acpi_idle_enter;
807 
808 		state->flags = 0;
809 		if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
810 		    cx->type == ACPI_STATE_C3) {
811 			state->enter_dead = acpi_idle_play_dead;
812 			if (cx->type != ACPI_STATE_C3)
813 				drv->safe_state_index = count;
814 		}
815 		/*
816 		 * Halt-induced C1 is not good for ->enter_s2idle, because it
817 		 * re-enables interrupts on exit.  Moreover, C1 is generally not
818 		 * particularly interesting from the suspend-to-idle angle, so
819 		 * avoid C1 and the situations in which we may need to fall back
820 		 * to it altogether.
821 		 */
822 		if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
823 			state->enter_s2idle = acpi_idle_enter_s2idle;
824 
825 		count++;
826 		if (count == CPUIDLE_STATE_MAX)
827 			break;
828 	}
829 
830 	drv->state_count = count;
831 
832 	if (!count)
833 		return -EINVAL;
834 
835 	return 0;
836 }
837 
acpi_processor_cstate_first_run_checks(void)838 static inline void acpi_processor_cstate_first_run_checks(void)
839 {
840 	static int first_run;
841 
842 	if (first_run)
843 		return;
844 	dmi_check_system(processor_power_dmi_table);
845 	max_cstate = acpi_processor_cstate_check(max_cstate);
846 	if (max_cstate < ACPI_C_STATES_MAX)
847 		pr_notice("processor limited to max C-state %d\n", max_cstate);
848 
849 	first_run++;
850 
851 	if (nocst)
852 		return;
853 
854 	acpi_processor_claim_cst_control();
855 }
856 #else
857 
disabled_by_idle_boot_param(void)858 static inline int disabled_by_idle_boot_param(void) { return 0; }
acpi_processor_cstate_first_run_checks(void)859 static inline void acpi_processor_cstate_first_run_checks(void) { }
acpi_processor_get_cstate_info(struct acpi_processor * pr)860 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
861 {
862 	return -ENODEV;
863 }
864 
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)865 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
866 					   struct cpuidle_device *dev)
867 {
868 	return -EINVAL;
869 }
870 
acpi_processor_setup_cstates(struct acpi_processor * pr)871 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
872 {
873 	return -EINVAL;
874 }
875 
876 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
877 
878 struct acpi_lpi_states_array {
879 	unsigned int size;
880 	unsigned int composite_states_size;
881 	struct acpi_lpi_state *entries;
882 	struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
883 };
884 
obj_get_integer(union acpi_object * obj,u32 * value)885 static int obj_get_integer(union acpi_object *obj, u32 *value)
886 {
887 	if (obj->type != ACPI_TYPE_INTEGER)
888 		return -EINVAL;
889 
890 	*value = obj->integer.value;
891 	return 0;
892 }
893 
acpi_processor_evaluate_lpi(acpi_handle handle,struct acpi_lpi_states_array * info)894 static int acpi_processor_evaluate_lpi(acpi_handle handle,
895 				       struct acpi_lpi_states_array *info)
896 {
897 	acpi_status status;
898 	int ret = 0;
899 	int pkg_count, state_idx = 1, loop;
900 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
901 	union acpi_object *lpi_data;
902 	struct acpi_lpi_state *lpi_state;
903 
904 	status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
905 	if (ACPI_FAILURE(status)) {
906 		acpi_handle_debug(handle, "No _LPI, giving up\n");
907 		return -ENODEV;
908 	}
909 
910 	lpi_data = buffer.pointer;
911 
912 	/* There must be at least 4 elements = 3 elements + 1 package */
913 	if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
914 	    lpi_data->package.count < 4) {
915 		pr_debug("not enough elements in _LPI\n");
916 		ret = -ENODATA;
917 		goto end;
918 	}
919 
920 	pkg_count = lpi_data->package.elements[2].integer.value;
921 
922 	/* Validate number of power states. */
923 	if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
924 		pr_debug("count given by _LPI is not valid\n");
925 		ret = -ENODATA;
926 		goto end;
927 	}
928 
929 	lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
930 	if (!lpi_state) {
931 		ret = -ENOMEM;
932 		goto end;
933 	}
934 
935 	info->size = pkg_count;
936 	info->entries = lpi_state;
937 
938 	/* LPI States start at index 3 */
939 	for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
940 		union acpi_object *element, *pkg_elem, *obj;
941 
942 		element = &lpi_data->package.elements[loop];
943 		if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
944 			continue;
945 
946 		pkg_elem = element->package.elements;
947 
948 		obj = pkg_elem + 6;
949 		if (obj->type == ACPI_TYPE_BUFFER) {
950 			struct acpi_power_register *reg;
951 
952 			reg = (struct acpi_power_register *)obj->buffer.pointer;
953 			if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
954 			    reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
955 				continue;
956 
957 			lpi_state->address = reg->address;
958 			lpi_state->entry_method =
959 				reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
960 				ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
961 		} else if (obj->type == ACPI_TYPE_INTEGER) {
962 			lpi_state->entry_method = ACPI_CSTATE_INTEGER;
963 			lpi_state->address = obj->integer.value;
964 		} else {
965 			continue;
966 		}
967 
968 		/* elements[7,8] skipped for now i.e. Residency/Usage counter*/
969 
970 		obj = pkg_elem + 9;
971 		if (obj->type == ACPI_TYPE_STRING)
972 			strlcpy(lpi_state->desc, obj->string.pointer,
973 				ACPI_CX_DESC_LEN);
974 
975 		lpi_state->index = state_idx;
976 		if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
977 			pr_debug("No min. residency found, assuming 10 us\n");
978 			lpi_state->min_residency = 10;
979 		}
980 
981 		if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
982 			pr_debug("No wakeup residency found, assuming 10 us\n");
983 			lpi_state->wake_latency = 10;
984 		}
985 
986 		if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
987 			lpi_state->flags = 0;
988 
989 		if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
990 			lpi_state->arch_flags = 0;
991 
992 		if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
993 			lpi_state->res_cnt_freq = 1;
994 
995 		if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
996 			lpi_state->enable_parent_state = 0;
997 	}
998 
999 	acpi_handle_debug(handle, "Found %d power states\n", state_idx);
1000 end:
1001 	kfree(buffer.pointer);
1002 	return ret;
1003 }
1004 
1005 /*
1006  * flat_state_cnt - the number of composite LPI states after the process of flattening
1007  */
1008 static int flat_state_cnt;
1009 
1010 /**
1011  * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1012  *
1013  * @local: local LPI state
1014  * @parent: parent LPI state
1015  * @result: composite LPI state
1016  */
combine_lpi_states(struct acpi_lpi_state * local,struct acpi_lpi_state * parent,struct acpi_lpi_state * result)1017 static bool combine_lpi_states(struct acpi_lpi_state *local,
1018 			       struct acpi_lpi_state *parent,
1019 			       struct acpi_lpi_state *result)
1020 {
1021 	if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1022 		if (!parent->address) /* 0 means autopromotable */
1023 			return false;
1024 		result->address = local->address + parent->address;
1025 	} else {
1026 		result->address = parent->address;
1027 	}
1028 
1029 	result->min_residency = max(local->min_residency, parent->min_residency);
1030 	result->wake_latency = local->wake_latency + parent->wake_latency;
1031 	result->enable_parent_state = parent->enable_parent_state;
1032 	result->entry_method = local->entry_method;
1033 
1034 	result->flags = parent->flags;
1035 	result->arch_flags = parent->arch_flags;
1036 	result->index = parent->index;
1037 
1038 	strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1039 	strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1040 	strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1041 	return true;
1042 }
1043 
1044 #define ACPI_LPI_STATE_FLAGS_ENABLED			BIT(0)
1045 
stash_composite_state(struct acpi_lpi_states_array * curr_level,struct acpi_lpi_state * t)1046 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1047 				  struct acpi_lpi_state *t)
1048 {
1049 	curr_level->composite_states[curr_level->composite_states_size++] = t;
1050 }
1051 
flatten_lpi_states(struct acpi_processor * pr,struct acpi_lpi_states_array * curr_level,struct acpi_lpi_states_array * prev_level)1052 static int flatten_lpi_states(struct acpi_processor *pr,
1053 			      struct acpi_lpi_states_array *curr_level,
1054 			      struct acpi_lpi_states_array *prev_level)
1055 {
1056 	int i, j, state_count = curr_level->size;
1057 	struct acpi_lpi_state *p, *t = curr_level->entries;
1058 
1059 	curr_level->composite_states_size = 0;
1060 	for (j = 0; j < state_count; j++, t++) {
1061 		struct acpi_lpi_state *flpi;
1062 
1063 		if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1064 			continue;
1065 
1066 		if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1067 			pr_warn("Limiting number of LPI states to max (%d)\n",
1068 				ACPI_PROCESSOR_MAX_POWER);
1069 			pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1070 			break;
1071 		}
1072 
1073 		flpi = &pr->power.lpi_states[flat_state_cnt];
1074 
1075 		if (!prev_level) { /* leaf/processor node */
1076 			memcpy(flpi, t, sizeof(*t));
1077 			stash_composite_state(curr_level, flpi);
1078 			flat_state_cnt++;
1079 			continue;
1080 		}
1081 
1082 		for (i = 0; i < prev_level->composite_states_size; i++) {
1083 			p = prev_level->composite_states[i];
1084 			if (t->index <= p->enable_parent_state &&
1085 			    combine_lpi_states(p, t, flpi)) {
1086 				stash_composite_state(curr_level, flpi);
1087 				flat_state_cnt++;
1088 				flpi++;
1089 			}
1090 		}
1091 	}
1092 
1093 	kfree(curr_level->entries);
1094 	return 0;
1095 }
1096 
acpi_processor_ffh_lpi_probe(unsigned int cpu)1097 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1098 {
1099 	return -EOPNOTSUPP;
1100 }
1101 
acpi_processor_get_lpi_info(struct acpi_processor * pr)1102 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1103 {
1104 	int ret, i;
1105 	acpi_status status;
1106 	acpi_handle handle = pr->handle, pr_ahandle;
1107 	struct acpi_device *d = NULL;
1108 	struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1109 
1110 	/* make sure our architecture has support */
1111 	ret = acpi_processor_ffh_lpi_probe(pr->id);
1112 	if (ret == -EOPNOTSUPP)
1113 		return ret;
1114 
1115 	if (!osc_pc_lpi_support_confirmed)
1116 		return -EOPNOTSUPP;
1117 
1118 	if (!acpi_has_method(handle, "_LPI"))
1119 		return -EINVAL;
1120 
1121 	flat_state_cnt = 0;
1122 	prev = &info[0];
1123 	curr = &info[1];
1124 	handle = pr->handle;
1125 	ret = acpi_processor_evaluate_lpi(handle, prev);
1126 	if (ret)
1127 		return ret;
1128 	flatten_lpi_states(pr, prev, NULL);
1129 
1130 	status = acpi_get_parent(handle, &pr_ahandle);
1131 	while (ACPI_SUCCESS(status)) {
1132 		acpi_bus_get_device(pr_ahandle, &d);
1133 		handle = pr_ahandle;
1134 
1135 		if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1136 			break;
1137 
1138 		/* can be optional ? */
1139 		if (!acpi_has_method(handle, "_LPI"))
1140 			break;
1141 
1142 		ret = acpi_processor_evaluate_lpi(handle, curr);
1143 		if (ret)
1144 			break;
1145 
1146 		/* flatten all the LPI states in this level of hierarchy */
1147 		flatten_lpi_states(pr, curr, prev);
1148 
1149 		tmp = prev, prev = curr, curr = tmp;
1150 
1151 		status = acpi_get_parent(handle, &pr_ahandle);
1152 	}
1153 
1154 	pr->power.count = flat_state_cnt;
1155 	/* reset the index after flattening */
1156 	for (i = 0; i < pr->power.count; i++)
1157 		pr->power.lpi_states[i].index = i;
1158 
1159 	/* Tell driver that _LPI is supported. */
1160 	pr->flags.has_lpi = 1;
1161 	pr->flags.power = 1;
1162 
1163 	return 0;
1164 }
1165 
acpi_processor_ffh_lpi_enter(struct acpi_lpi_state * lpi)1166 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1167 {
1168 	return -ENODEV;
1169 }
1170 
1171 /**
1172  * acpi_idle_lpi_enter - enters an ACPI any LPI state
1173  * @dev: the target CPU
1174  * @drv: cpuidle driver containing cpuidle state info
1175  * @index: index of target state
1176  *
1177  * Return: 0 for success or negative value for error
1178  */
acpi_idle_lpi_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)1179 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1180 			       struct cpuidle_driver *drv, int index)
1181 {
1182 	struct acpi_processor *pr;
1183 	struct acpi_lpi_state *lpi;
1184 
1185 	pr = __this_cpu_read(processors);
1186 
1187 	if (unlikely(!pr))
1188 		return -EINVAL;
1189 
1190 	lpi = &pr->power.lpi_states[index];
1191 	if (lpi->entry_method == ACPI_CSTATE_FFH)
1192 		return acpi_processor_ffh_lpi_enter(lpi);
1193 
1194 	return -EINVAL;
1195 }
1196 
acpi_processor_setup_lpi_states(struct acpi_processor * pr)1197 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1198 {
1199 	int i;
1200 	struct acpi_lpi_state *lpi;
1201 	struct cpuidle_state *state;
1202 	struct cpuidle_driver *drv = &acpi_idle_driver;
1203 
1204 	if (!pr->flags.has_lpi)
1205 		return -EOPNOTSUPP;
1206 
1207 	for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1208 		lpi = &pr->power.lpi_states[i];
1209 
1210 		state = &drv->states[i];
1211 		snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1212 		strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1213 		state->exit_latency = lpi->wake_latency;
1214 		state->target_residency = lpi->min_residency;
1215 		if (lpi->arch_flags)
1216 			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1217 		state->enter = acpi_idle_lpi_enter;
1218 		drv->safe_state_index = i;
1219 	}
1220 
1221 	drv->state_count = i;
1222 
1223 	return 0;
1224 }
1225 
1226 /**
1227  * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1228  * global state data i.e. idle routines
1229  *
1230  * @pr: the ACPI processor
1231  */
acpi_processor_setup_cpuidle_states(struct acpi_processor * pr)1232 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1233 {
1234 	int i;
1235 	struct cpuidle_driver *drv = &acpi_idle_driver;
1236 
1237 	if (!pr->flags.power_setup_done || !pr->flags.power)
1238 		return -EINVAL;
1239 
1240 	drv->safe_state_index = -1;
1241 	for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1242 		drv->states[i].name[0] = '\0';
1243 		drv->states[i].desc[0] = '\0';
1244 	}
1245 
1246 	if (pr->flags.has_lpi)
1247 		return acpi_processor_setup_lpi_states(pr);
1248 
1249 	return acpi_processor_setup_cstates(pr);
1250 }
1251 
1252 /**
1253  * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1254  * device i.e. per-cpu data
1255  *
1256  * @pr: the ACPI processor
1257  * @dev : the cpuidle device
1258  */
acpi_processor_setup_cpuidle_dev(struct acpi_processor * pr,struct cpuidle_device * dev)1259 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1260 					    struct cpuidle_device *dev)
1261 {
1262 	if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1263 		return -EINVAL;
1264 
1265 	dev->cpu = pr->id;
1266 	if (pr->flags.has_lpi)
1267 		return acpi_processor_ffh_lpi_probe(pr->id);
1268 
1269 	return acpi_processor_setup_cpuidle_cx(pr, dev);
1270 }
1271 
acpi_processor_get_power_info(struct acpi_processor * pr)1272 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1273 {
1274 	int ret;
1275 
1276 	ret = acpi_processor_get_lpi_info(pr);
1277 	if (ret)
1278 		ret = acpi_processor_get_cstate_info(pr);
1279 
1280 	return ret;
1281 }
1282 
acpi_processor_hotplug(struct acpi_processor * pr)1283 int acpi_processor_hotplug(struct acpi_processor *pr)
1284 {
1285 	int ret = 0;
1286 	struct cpuidle_device *dev;
1287 
1288 	if (disabled_by_idle_boot_param())
1289 		return 0;
1290 
1291 	if (!pr->flags.power_setup_done)
1292 		return -ENODEV;
1293 
1294 	dev = per_cpu(acpi_cpuidle_device, pr->id);
1295 	cpuidle_pause_and_lock();
1296 	cpuidle_disable_device(dev);
1297 	ret = acpi_processor_get_power_info(pr);
1298 	if (!ret && pr->flags.power) {
1299 		acpi_processor_setup_cpuidle_dev(pr, dev);
1300 		ret = cpuidle_enable_device(dev);
1301 	}
1302 	cpuidle_resume_and_unlock();
1303 
1304 	return ret;
1305 }
1306 
acpi_processor_power_state_has_changed(struct acpi_processor * pr)1307 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1308 {
1309 	int cpu;
1310 	struct acpi_processor *_pr;
1311 	struct cpuidle_device *dev;
1312 
1313 	if (disabled_by_idle_boot_param())
1314 		return 0;
1315 
1316 	if (!pr->flags.power_setup_done)
1317 		return -ENODEV;
1318 
1319 	/*
1320 	 * FIXME:  Design the ACPI notification to make it once per
1321 	 * system instead of once per-cpu.  This condition is a hack
1322 	 * to make the code that updates C-States be called once.
1323 	 */
1324 
1325 	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1326 
1327 		/* Protect against cpu-hotplug */
1328 		cpus_read_lock();
1329 		cpuidle_pause_and_lock();
1330 
1331 		/* Disable all cpuidle devices */
1332 		for_each_online_cpu(cpu) {
1333 			_pr = per_cpu(processors, cpu);
1334 			if (!_pr || !_pr->flags.power_setup_done)
1335 				continue;
1336 			dev = per_cpu(acpi_cpuidle_device, cpu);
1337 			cpuidle_disable_device(dev);
1338 		}
1339 
1340 		/* Populate Updated C-state information */
1341 		acpi_processor_get_power_info(pr);
1342 		acpi_processor_setup_cpuidle_states(pr);
1343 
1344 		/* Enable all cpuidle devices */
1345 		for_each_online_cpu(cpu) {
1346 			_pr = per_cpu(processors, cpu);
1347 			if (!_pr || !_pr->flags.power_setup_done)
1348 				continue;
1349 			acpi_processor_get_power_info(_pr);
1350 			if (_pr->flags.power) {
1351 				dev = per_cpu(acpi_cpuidle_device, cpu);
1352 				acpi_processor_setup_cpuidle_dev(_pr, dev);
1353 				cpuidle_enable_device(dev);
1354 			}
1355 		}
1356 		cpuidle_resume_and_unlock();
1357 		cpus_read_unlock();
1358 	}
1359 
1360 	return 0;
1361 }
1362 
1363 static int acpi_processor_registered;
1364 
acpi_processor_power_init(struct acpi_processor * pr)1365 int acpi_processor_power_init(struct acpi_processor *pr)
1366 {
1367 	int retval;
1368 	struct cpuidle_device *dev;
1369 
1370 	if (disabled_by_idle_boot_param())
1371 		return 0;
1372 
1373 	acpi_processor_cstate_first_run_checks();
1374 
1375 	if (!acpi_processor_get_power_info(pr))
1376 		pr->flags.power_setup_done = 1;
1377 
1378 	/*
1379 	 * Install the idle handler if processor power management is supported.
1380 	 * Note that we use previously set idle handler will be used on
1381 	 * platforms that only support C1.
1382 	 */
1383 	if (pr->flags.power) {
1384 		/* Register acpi_idle_driver if not already registered */
1385 		if (!acpi_processor_registered) {
1386 			acpi_processor_setup_cpuidle_states(pr);
1387 			retval = cpuidle_register_driver(&acpi_idle_driver);
1388 			if (retval)
1389 				return retval;
1390 			pr_debug("%s registered with cpuidle\n",
1391 				 acpi_idle_driver.name);
1392 		}
1393 
1394 		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1395 		if (!dev)
1396 			return -ENOMEM;
1397 		per_cpu(acpi_cpuidle_device, pr->id) = dev;
1398 
1399 		acpi_processor_setup_cpuidle_dev(pr, dev);
1400 
1401 		/* Register per-cpu cpuidle_device. Cpuidle driver
1402 		 * must already be registered before registering device
1403 		 */
1404 		retval = cpuidle_register_device(dev);
1405 		if (retval) {
1406 			if (acpi_processor_registered == 0)
1407 				cpuidle_unregister_driver(&acpi_idle_driver);
1408 			return retval;
1409 		}
1410 		acpi_processor_registered++;
1411 	}
1412 	return 0;
1413 }
1414 
acpi_processor_power_exit(struct acpi_processor * pr)1415 int acpi_processor_power_exit(struct acpi_processor *pr)
1416 {
1417 	struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1418 
1419 	if (disabled_by_idle_boot_param())
1420 		return 0;
1421 
1422 	if (pr->flags.power) {
1423 		cpuidle_unregister_device(dev);
1424 		acpi_processor_registered--;
1425 		if (acpi_processor_registered == 0)
1426 			cpuidle_unregister_driver(&acpi_idle_driver);
1427 	}
1428 
1429 	pr->flags.power_setup_done = 0;
1430 	return 0;
1431 }
1432