• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * processor_idle - idle state submodule to the ACPI processor driver
4  *
5  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9  *  			- Added processor hotplug support
10  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11  *  			- Added support for C3 on SMP
12  */
13 #define pr_fmt(fmt) "ACPI: " fmt
14 
15 #include <linux/module.h>
16 #include <linux/acpi.h>
17 #include <linux/dmi.h>
18 #include <linux/sched.h>       /* need_resched() */
19 #include <linux/sort.h>
20 #include <linux/tick.h>
21 #include <linux/cpuidle.h>
22 #include <linux/cpu.h>
23 #include <acpi/processor.h>
24 
25 /*
26  * Include the apic definitions for x86 to have the APIC timer related defines
27  * available also for UP (on SMP it gets magically included via linux/smp.h).
28  * asm/acpi.h is not an option, as it would require more include magic. Also
29  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
30  */
31 #ifdef CONFIG_X86
32 #include <asm/apic.h>
33 #include <asm/cpu.h>
34 #endif
35 
36 #define ACPI_PROCESSOR_CLASS            "processor"
37 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
38 ACPI_MODULE_NAME("processor_idle");
39 
40 #define ACPI_IDLE_STATE_START	(IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
41 
42 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
43 module_param(max_cstate, uint, 0000);
44 static unsigned int nocst __read_mostly;
45 module_param(nocst, uint, 0000);
46 static int bm_check_disable __read_mostly;
47 module_param(bm_check_disable, uint, 0000);
48 
49 static unsigned int latency_factor __read_mostly = 2;
50 module_param(latency_factor, uint, 0644);
51 
52 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
53 
54 struct cpuidle_driver acpi_idle_driver = {
55 	.name =		"acpi_idle",
56 	.owner =	THIS_MODULE,
57 };
58 
59 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
60 static
61 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
62 
disabled_by_idle_boot_param(void)63 static int disabled_by_idle_boot_param(void)
64 {
65 	return boot_option_idle_override == IDLE_POLL ||
66 		boot_option_idle_override == IDLE_HALT;
67 }
68 
69 /*
70  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
71  * For now disable this. Probably a bug somewhere else.
72  *
73  * To skip this limit, boot/load with a large max_cstate limit.
74  */
set_max_cstate(const struct dmi_system_id * id)75 static int set_max_cstate(const struct dmi_system_id *id)
76 {
77 	if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
78 		return 0;
79 
80 	pr_notice("%s detected - limiting to C%ld max_cstate."
81 		  " Override with \"processor.max_cstate=%d\"\n", id->ident,
82 		  (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
83 
84 	max_cstate = (long)id->driver_data;
85 
86 	return 0;
87 }
88 
89 static const struct dmi_system_id processor_power_dmi_table[] = {
90 	{ set_max_cstate, "Clevo 5600D", {
91 	  DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
92 	  DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
93 	 (void *)2},
94 	{ set_max_cstate, "Pavilion zv5000", {
95 	  DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
96 	  DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
97 	 (void *)1},
98 	{ set_max_cstate, "Asus L8400B", {
99 	  DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
100 	  DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
101 	 (void *)1},
102 	{},
103 };
104 
105 
106 /*
107  * Callers should disable interrupts before the call and enable
108  * interrupts after return.
109  */
acpi_safe_halt(void)110 static void __cpuidle acpi_safe_halt(void)
111 {
112 	if (!tif_need_resched()) {
113 		safe_halt();
114 		local_irq_disable();
115 	}
116 }
117 
118 #ifdef ARCH_APICTIMER_STOPS_ON_C3
119 
120 /*
121  * Some BIOS implementations switch to C3 in the published C2 state.
122  * This seems to be a common problem on AMD boxen, but other vendors
123  * are affected too. We pick the most conservative approach: we assume
124  * that the local APIC stops in both C2 and C3.
125  */
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cx)126 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
127 				   struct acpi_processor_cx *cx)
128 {
129 	struct acpi_processor_power *pwr = &pr->power;
130 	u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
131 
132 	if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
133 		return;
134 
135 	if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
136 		type = ACPI_STATE_C1;
137 
138 	/*
139 	 * Check, if one of the previous states already marked the lapic
140 	 * unstable
141 	 */
142 	if (pwr->timer_broadcast_on_state < state)
143 		return;
144 
145 	if (cx->type >= type)
146 		pr->power.timer_broadcast_on_state = state;
147 }
148 
__lapic_timer_propagate_broadcast(void * arg)149 static void __lapic_timer_propagate_broadcast(void *arg)
150 {
151 	struct acpi_processor *pr = (struct acpi_processor *) arg;
152 
153 	if (pr->power.timer_broadcast_on_state < INT_MAX)
154 		tick_broadcast_enable();
155 	else
156 		tick_broadcast_disable();
157 }
158 
lapic_timer_propagate_broadcast(struct acpi_processor * pr)159 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
160 {
161 	smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
162 				 (void *)pr, 1);
163 }
164 
165 /* Power(C) State timer broadcast control */
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)166 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
167 					struct acpi_processor_cx *cx)
168 {
169 	return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
170 }
171 
172 #else
173 
lapic_timer_check_state(int state,struct acpi_processor * pr,struct acpi_processor_cx * cstate)174 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
175 				   struct acpi_processor_cx *cstate) { }
lapic_timer_propagate_broadcast(struct acpi_processor * pr)176 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
177 
lapic_timer_needs_broadcast(struct acpi_processor * pr,struct acpi_processor_cx * cx)178 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
179 					struct acpi_processor_cx *cx)
180 {
181 	return false;
182 }
183 
184 #endif
185 
186 #if defined(CONFIG_X86)
tsc_check_state(int state)187 static void tsc_check_state(int state)
188 {
189 	switch (boot_cpu_data.x86_vendor) {
190 	case X86_VENDOR_HYGON:
191 	case X86_VENDOR_AMD:
192 	case X86_VENDOR_INTEL:
193 	case X86_VENDOR_CENTAUR:
194 	case X86_VENDOR_ZHAOXIN:
195 		/*
196 		 * AMD Fam10h TSC will tick in all
197 		 * C/P/S0/S1 states when this bit is set.
198 		 */
199 		if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
200 			return;
201 		fallthrough;
202 	default:
203 		/* TSC could halt in idle, so notify users */
204 		if (state > ACPI_STATE_C1)
205 			mark_tsc_unstable("TSC halts in idle");
206 	}
207 }
208 #else
tsc_check_state(int state)209 static void tsc_check_state(int state) { return; }
210 #endif
211 
acpi_processor_get_power_info_fadt(struct acpi_processor * pr)212 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
213 {
214 
215 	if (!pr->pblk)
216 		return -ENODEV;
217 
218 	/* if info is obtained from pblk/fadt, type equals state */
219 	pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
220 	pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
221 
222 #ifndef CONFIG_HOTPLUG_CPU
223 	/*
224 	 * Check for P_LVL2_UP flag before entering C2 and above on
225 	 * an SMP system.
226 	 */
227 	if ((num_online_cpus() > 1) &&
228 	    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
229 		return -ENODEV;
230 #endif
231 
232 	/* determine C2 and C3 address from pblk */
233 	pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
234 	pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
235 
236 	/* determine latencies from FADT */
237 	pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
238 	pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
239 
240 	/*
241 	 * FADT specified C2 latency must be less than or equal to
242 	 * 100 microseconds.
243 	 */
244 	if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
245 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
246 			"C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
247 		/* invalidate C2 */
248 		pr->power.states[ACPI_STATE_C2].address = 0;
249 	}
250 
251 	/*
252 	 * FADT supplied C3 latency must be less than or equal to
253 	 * 1000 microseconds.
254 	 */
255 	if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
256 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
257 			"C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
258 		/* invalidate C3 */
259 		pr->power.states[ACPI_STATE_C3].address = 0;
260 	}
261 
262 	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
263 			  "lvl2[0x%08x] lvl3[0x%08x]\n",
264 			  pr->power.states[ACPI_STATE_C2].address,
265 			  pr->power.states[ACPI_STATE_C3].address));
266 
267 	snprintf(pr->power.states[ACPI_STATE_C2].desc,
268 			 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
269 			 pr->power.states[ACPI_STATE_C2].address);
270 	snprintf(pr->power.states[ACPI_STATE_C3].desc,
271 			 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
272 			 pr->power.states[ACPI_STATE_C3].address);
273 
274 	return 0;
275 }
276 
acpi_processor_get_power_info_default(struct acpi_processor * pr)277 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
278 {
279 	if (!pr->power.states[ACPI_STATE_C1].valid) {
280 		/* set the first C-State to C1 */
281 		/* all processors need to support C1 */
282 		pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
283 		pr->power.states[ACPI_STATE_C1].valid = 1;
284 		pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
285 
286 		snprintf(pr->power.states[ACPI_STATE_C1].desc,
287 			 ACPI_CX_DESC_LEN, "ACPI HLT");
288 	}
289 	/* the C0 state only exists as a filler in our array */
290 	pr->power.states[ACPI_STATE_C0].valid = 1;
291 	return 0;
292 }
293 
acpi_processor_get_power_info_cst(struct acpi_processor * pr)294 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
295 {
296 	int ret;
297 
298 	if (nocst)
299 		return -ENODEV;
300 
301 	ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
302 	if (ret)
303 		return ret;
304 
305 	if (!pr->power.count)
306 		return -EFAULT;
307 
308 	pr->flags.has_cst = 1;
309 	return 0;
310 }
311 
acpi_processor_power_verify_c3(struct acpi_processor * pr,struct acpi_processor_cx * cx)312 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
313 					   struct acpi_processor_cx *cx)
314 {
315 	static int bm_check_flag = -1;
316 	static int bm_control_flag = -1;
317 
318 
319 	if (!cx->address)
320 		return;
321 
322 	/*
323 	 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
324 	 * DMA transfers are used by any ISA device to avoid livelock.
325 	 * Note that we could disable Type-F DMA (as recommended by
326 	 * the erratum), but this is known to disrupt certain ISA
327 	 * devices thus we take the conservative approach.
328 	 */
329 	else if (errata.piix4.fdma) {
330 		ACPI_DEBUG_PRINT((ACPI_DB_INFO,
331 				  "C3 not supported on PIIX4 with Type-F DMA\n"));
332 		return;
333 	}
334 
335 	/* All the logic here assumes flags.bm_check is same across all CPUs */
336 	if (bm_check_flag == -1) {
337 		/* Determine whether bm_check is needed based on CPU  */
338 		acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
339 		bm_check_flag = pr->flags.bm_check;
340 		bm_control_flag = pr->flags.bm_control;
341 	} else {
342 		pr->flags.bm_check = bm_check_flag;
343 		pr->flags.bm_control = bm_control_flag;
344 	}
345 
346 	if (pr->flags.bm_check) {
347 		if (!pr->flags.bm_control) {
348 			if (pr->flags.has_cst != 1) {
349 				/* bus mastering control is necessary */
350 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
351 					"C3 support requires BM control\n"));
352 				return;
353 			} else {
354 				/* Here we enter C3 without bus mastering */
355 				ACPI_DEBUG_PRINT((ACPI_DB_INFO,
356 					"C3 support without BM control\n"));
357 			}
358 		}
359 	} else {
360 		/*
361 		 * WBINVD should be set in fadt, for C3 state to be
362 		 * supported on when bm_check is not required.
363 		 */
364 		if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
365 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
366 					  "Cache invalidation should work properly"
367 					  " for C3 to be enabled on SMP systems\n"));
368 			return;
369 		}
370 	}
371 
372 	/*
373 	 * Otherwise we've met all of our C3 requirements.
374 	 * Normalize the C3 latency to expidite policy.  Enable
375 	 * checking of bus mastering status (bm_check) so we can
376 	 * use this in our C3 policy
377 	 */
378 	cx->valid = 1;
379 
380 	/*
381 	 * On older chipsets, BM_RLD needs to be set
382 	 * in order for Bus Master activity to wake the
383 	 * system from C3.  Newer chipsets handle DMA
384 	 * during C3 automatically and BM_RLD is a NOP.
385 	 * In either case, the proper way to
386 	 * handle BM_RLD is to set it and leave it set.
387 	 */
388 	acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
389 
390 	return;
391 }
392 
acpi_cst_latency_cmp(const void * a,const void * b)393 static int acpi_cst_latency_cmp(const void *a, const void *b)
394 {
395 	const struct acpi_processor_cx *x = a, *y = b;
396 
397 	if (!(x->valid && y->valid))
398 		return 0;
399 	if (x->latency > y->latency)
400 		return 1;
401 	if (x->latency < y->latency)
402 		return -1;
403 	return 0;
404 }
acpi_cst_latency_swap(void * a,void * b,int n)405 static void acpi_cst_latency_swap(void *a, void *b, int n)
406 {
407 	struct acpi_processor_cx *x = a, *y = b;
408 	u32 tmp;
409 
410 	if (!(x->valid && y->valid))
411 		return;
412 	tmp = x->latency;
413 	x->latency = y->latency;
414 	y->latency = tmp;
415 }
416 
acpi_processor_power_verify(struct acpi_processor * pr)417 static int acpi_processor_power_verify(struct acpi_processor *pr)
418 {
419 	unsigned int i;
420 	unsigned int working = 0;
421 	unsigned int last_latency = 0;
422 	unsigned int last_type = 0;
423 	bool buggy_latency = false;
424 
425 	pr->power.timer_broadcast_on_state = INT_MAX;
426 
427 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
428 		struct acpi_processor_cx *cx = &pr->power.states[i];
429 
430 		switch (cx->type) {
431 		case ACPI_STATE_C1:
432 			cx->valid = 1;
433 			break;
434 
435 		case ACPI_STATE_C2:
436 			if (!cx->address)
437 				break;
438 			cx->valid = 1;
439 			break;
440 
441 		case ACPI_STATE_C3:
442 			acpi_processor_power_verify_c3(pr, cx);
443 			break;
444 		}
445 		if (!cx->valid)
446 			continue;
447 		if (cx->type >= last_type && cx->latency < last_latency)
448 			buggy_latency = true;
449 		last_latency = cx->latency;
450 		last_type = cx->type;
451 
452 		lapic_timer_check_state(i, pr, cx);
453 		tsc_check_state(cx->type);
454 		working++;
455 	}
456 
457 	if (buggy_latency) {
458 		pr_notice("FW issue: working around C-state latencies out of order\n");
459 		sort(&pr->power.states[1], max_cstate,
460 		     sizeof(struct acpi_processor_cx),
461 		     acpi_cst_latency_cmp,
462 		     acpi_cst_latency_swap);
463 	}
464 
465 	lapic_timer_propagate_broadcast(pr);
466 
467 	return (working);
468 }
469 
acpi_processor_get_cstate_info(struct acpi_processor * pr)470 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
471 {
472 	unsigned int i;
473 	int result;
474 
475 
476 	/* NOTE: the idle thread may not be running while calling
477 	 * this function */
478 
479 	/* Zero initialize all the C-states info. */
480 	memset(pr->power.states, 0, sizeof(pr->power.states));
481 
482 	result = acpi_processor_get_power_info_cst(pr);
483 	if (result == -ENODEV)
484 		result = acpi_processor_get_power_info_fadt(pr);
485 
486 	if (result)
487 		return result;
488 
489 	acpi_processor_get_power_info_default(pr);
490 
491 	pr->power.count = acpi_processor_power_verify(pr);
492 
493 	/*
494 	 * if one state of type C2 or C3 is available, mark this
495 	 * CPU as being "idle manageable"
496 	 */
497 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
498 		if (pr->power.states[i].valid) {
499 			pr->power.count = i;
500 			pr->flags.power = 1;
501 		}
502 	}
503 
504 	return 0;
505 }
506 
507 /**
508  * acpi_idle_bm_check - checks if bus master activity was detected
509  */
acpi_idle_bm_check(void)510 static int acpi_idle_bm_check(void)
511 {
512 	u32 bm_status = 0;
513 
514 	if (bm_check_disable)
515 		return 0;
516 
517 	acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
518 	if (bm_status)
519 		acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
520 	/*
521 	 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
522 	 * the true state of bus mastering activity; forcing us to
523 	 * manually check the BMIDEA bit of each IDE channel.
524 	 */
525 	else if (errata.piix4.bmisx) {
526 		if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
527 		    || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
528 			bm_status = 1;
529 	}
530 	return bm_status;
531 }
532 
wait_for_freeze(void)533 static void wait_for_freeze(void)
534 {
535 #ifdef	CONFIG_X86
536 	/* No delay is needed if we are in guest */
537 	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
538 		return;
539 #endif
540 	/* Dummy wait op - must do something useless after P_LVL2 read
541 	   because chipsets cannot guarantee that STPCLK# signal
542 	   gets asserted in time to freeze execution properly. */
543 	inl(acpi_gbl_FADT.xpm_timer_block.address);
544 }
545 
546 /**
547  * acpi_idle_do_entry - enter idle state using the appropriate method
548  * @cx: cstate data
549  *
550  * Caller disables interrupt before call and enables interrupt after return.
551  */
acpi_idle_do_entry(struct acpi_processor_cx * cx)552 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
553 {
554 	if (cx->entry_method == ACPI_CSTATE_FFH) {
555 		/* Call into architectural FFH based C-state */
556 		acpi_processor_ffh_cstate_enter(cx);
557 	} else if (cx->entry_method == ACPI_CSTATE_HALT) {
558 		acpi_safe_halt();
559 	} else {
560 		/* IO port based C-state */
561 		inb(cx->address);
562 		wait_for_freeze();
563 	}
564 }
565 
566 /**
567  * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
568  * @dev: the target CPU
569  * @index: the index of suggested state
570  */
acpi_idle_play_dead(struct cpuidle_device * dev,int index)571 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
572 {
573 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
574 
575 	ACPI_FLUSH_CPU_CACHE();
576 
577 	while (1) {
578 
579 		if (cx->entry_method == ACPI_CSTATE_HALT)
580 			safe_halt();
581 		else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
582 			inb(cx->address);
583 			wait_for_freeze();
584 		} else
585 			return -ENODEV;
586 
587 #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
588 		cond_wakeup_cpu0();
589 #endif
590 	}
591 
592 	/* Never reached */
593 	return 0;
594 }
595 
acpi_idle_fallback_to_c1(struct acpi_processor * pr)596 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
597 {
598 	return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
599 		!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
600 }
601 
602 static int c3_cpu_count;
603 static DEFINE_RAW_SPINLOCK(c3_lock);
604 
605 /**
606  * acpi_idle_enter_bm - enters C3 with proper BM handling
607  * @drv: cpuidle driver
608  * @pr: Target processor
609  * @cx: Target state context
610  * @index: index of target state
611  */
acpi_idle_enter_bm(struct cpuidle_driver * drv,struct acpi_processor * pr,struct acpi_processor_cx * cx,int index)612 static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
613 			       struct acpi_processor *pr,
614 			       struct acpi_processor_cx *cx,
615 			       int index)
616 {
617 	static struct acpi_processor_cx safe_cx = {
618 		.entry_method = ACPI_CSTATE_HALT,
619 	};
620 
621 	/*
622 	 * disable bus master
623 	 * bm_check implies we need ARB_DIS
624 	 * bm_control implies whether we can do ARB_DIS
625 	 *
626 	 * That leaves a case where bm_check is set and bm_control is not set.
627 	 * In that case we cannot do much, we enter C3 without doing anything.
628 	 */
629 	bool dis_bm = pr->flags.bm_control;
630 
631 	/* If we can skip BM, demote to a safe state. */
632 	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
633 		dis_bm = false;
634 		index = drv->safe_state_index;
635 		if (index >= 0) {
636 			cx = this_cpu_read(acpi_cstate[index]);
637 		} else {
638 			cx = &safe_cx;
639 			index = -EBUSY;
640 		}
641 	}
642 
643 	if (dis_bm) {
644 		raw_spin_lock(&c3_lock);
645 		c3_cpu_count++;
646 		/* Disable bus master arbitration when all CPUs are in C3 */
647 		if (c3_cpu_count == num_online_cpus())
648 			acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
649 		raw_spin_unlock(&c3_lock);
650 	}
651 
652 	rcu_idle_enter();
653 
654 	acpi_idle_do_entry(cx);
655 
656 	rcu_idle_exit();
657 
658 	/* Re-enable bus master arbitration */
659 	if (dis_bm) {
660 		raw_spin_lock(&c3_lock);
661 		acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
662 		c3_cpu_count--;
663 		raw_spin_unlock(&c3_lock);
664 	}
665 
666 	return index;
667 }
668 
acpi_idle_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)669 static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
670 			   struct cpuidle_driver *drv, int index)
671 {
672 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
673 	struct acpi_processor *pr;
674 
675 	pr = __this_cpu_read(processors);
676 	if (unlikely(!pr))
677 		return -EINVAL;
678 
679 	if (cx->type != ACPI_STATE_C1) {
680 		if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
681 			return acpi_idle_enter_bm(drv, pr, cx, index);
682 
683 		/* C2 to C1 demotion. */
684 		if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
685 			index = ACPI_IDLE_STATE_START;
686 			cx = per_cpu(acpi_cstate[index], dev->cpu);
687 		}
688 	}
689 
690 	if (cx->type == ACPI_STATE_C3)
691 		ACPI_FLUSH_CPU_CACHE();
692 
693 	acpi_idle_do_entry(cx);
694 
695 	return index;
696 }
697 
acpi_idle_enter_s2idle(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)698 static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
699 				  struct cpuidle_driver *drv, int index)
700 {
701 	struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
702 
703 	if (cx->type == ACPI_STATE_C3) {
704 		struct acpi_processor *pr = __this_cpu_read(processors);
705 
706 		if (unlikely(!pr))
707 			return 0;
708 
709 		if (pr->flags.bm_check) {
710 			u8 bm_sts_skip = cx->bm_sts_skip;
711 
712 			/* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
713 			cx->bm_sts_skip = 1;
714 			acpi_idle_enter_bm(drv, pr, cx, index);
715 			cx->bm_sts_skip = bm_sts_skip;
716 
717 			return 0;
718 		} else {
719 			ACPI_FLUSH_CPU_CACHE();
720 		}
721 	}
722 	acpi_idle_do_entry(cx);
723 
724 	return 0;
725 }
726 
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)727 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
728 					   struct cpuidle_device *dev)
729 {
730 	int i, count = ACPI_IDLE_STATE_START;
731 	struct acpi_processor_cx *cx;
732 	struct cpuidle_state *state;
733 
734 	if (max_cstate == 0)
735 		max_cstate = 1;
736 
737 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
738 		state = &acpi_idle_driver.states[count];
739 		cx = &pr->power.states[i];
740 
741 		if (!cx->valid)
742 			continue;
743 
744 		per_cpu(acpi_cstate[count], dev->cpu) = cx;
745 
746 		if (lapic_timer_needs_broadcast(pr, cx))
747 			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
748 
749 		if (cx->type == ACPI_STATE_C3) {
750 			state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
751 			if (pr->flags.bm_check)
752 				state->flags |= CPUIDLE_FLAG_RCU_IDLE;
753 		}
754 
755 		count++;
756 		if (count == CPUIDLE_STATE_MAX)
757 			break;
758 	}
759 
760 	if (!count)
761 		return -EINVAL;
762 
763 	return 0;
764 }
765 
acpi_processor_setup_cstates(struct acpi_processor * pr)766 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
767 {
768 	int i, count;
769 	struct acpi_processor_cx *cx;
770 	struct cpuidle_state *state;
771 	struct cpuidle_driver *drv = &acpi_idle_driver;
772 
773 	if (max_cstate == 0)
774 		max_cstate = 1;
775 
776 	if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
777 		cpuidle_poll_state_init(drv);
778 		count = 1;
779 	} else {
780 		count = 0;
781 	}
782 
783 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
784 		cx = &pr->power.states[i];
785 
786 		if (!cx->valid)
787 			continue;
788 
789 		state = &drv->states[count];
790 		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
791 		strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
792 		state->exit_latency = cx->latency;
793 		state->target_residency = cx->latency * latency_factor;
794 		state->enter = acpi_idle_enter;
795 
796 		state->flags = 0;
797 		if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
798 			state->enter_dead = acpi_idle_play_dead;
799 			drv->safe_state_index = count;
800 		}
801 		/*
802 		 * Halt-induced C1 is not good for ->enter_s2idle, because it
803 		 * re-enables interrupts on exit.  Moreover, C1 is generally not
804 		 * particularly interesting from the suspend-to-idle angle, so
805 		 * avoid C1 and the situations in which we may need to fall back
806 		 * to it altogether.
807 		 */
808 		if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
809 			state->enter_s2idle = acpi_idle_enter_s2idle;
810 
811 		count++;
812 		if (count == CPUIDLE_STATE_MAX)
813 			break;
814 	}
815 
816 	drv->state_count = count;
817 
818 	if (!count)
819 		return -EINVAL;
820 
821 	return 0;
822 }
823 
acpi_processor_cstate_first_run_checks(void)824 static inline void acpi_processor_cstate_first_run_checks(void)
825 {
826 	static int first_run;
827 
828 	if (first_run)
829 		return;
830 	dmi_check_system(processor_power_dmi_table);
831 	max_cstate = acpi_processor_cstate_check(max_cstate);
832 	if (max_cstate < ACPI_C_STATES_MAX)
833 		pr_notice("ACPI: processor limited to max C-state %d\n",
834 			  max_cstate);
835 	first_run++;
836 
837 	if (nocst)
838 		return;
839 
840 	acpi_processor_claim_cst_control();
841 }
842 #else
843 
disabled_by_idle_boot_param(void)844 static inline int disabled_by_idle_boot_param(void) { return 0; }
acpi_processor_cstate_first_run_checks(void)845 static inline void acpi_processor_cstate_first_run_checks(void) { }
acpi_processor_get_cstate_info(struct acpi_processor * pr)846 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
847 {
848 	return -ENODEV;
849 }
850 
acpi_processor_setup_cpuidle_cx(struct acpi_processor * pr,struct cpuidle_device * dev)851 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
852 					   struct cpuidle_device *dev)
853 {
854 	return -EINVAL;
855 }
856 
acpi_processor_setup_cstates(struct acpi_processor * pr)857 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
858 {
859 	return -EINVAL;
860 }
861 
862 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
863 
864 struct acpi_lpi_states_array {
865 	unsigned int size;
866 	unsigned int composite_states_size;
867 	struct acpi_lpi_state *entries;
868 	struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
869 };
870 
obj_get_integer(union acpi_object * obj,u32 * value)871 static int obj_get_integer(union acpi_object *obj, u32 *value)
872 {
873 	if (obj->type != ACPI_TYPE_INTEGER)
874 		return -EINVAL;
875 
876 	*value = obj->integer.value;
877 	return 0;
878 }
879 
acpi_processor_evaluate_lpi(acpi_handle handle,struct acpi_lpi_states_array * info)880 static int acpi_processor_evaluate_lpi(acpi_handle handle,
881 				       struct acpi_lpi_states_array *info)
882 {
883 	acpi_status status;
884 	int ret = 0;
885 	int pkg_count, state_idx = 1, loop;
886 	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
887 	union acpi_object *lpi_data;
888 	struct acpi_lpi_state *lpi_state;
889 
890 	status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
891 	if (ACPI_FAILURE(status)) {
892 		ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
893 		return -ENODEV;
894 	}
895 
896 	lpi_data = buffer.pointer;
897 
898 	/* There must be at least 4 elements = 3 elements + 1 package */
899 	if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
900 	    lpi_data->package.count < 4) {
901 		pr_debug("not enough elements in _LPI\n");
902 		ret = -ENODATA;
903 		goto end;
904 	}
905 
906 	pkg_count = lpi_data->package.elements[2].integer.value;
907 
908 	/* Validate number of power states. */
909 	if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
910 		pr_debug("count given by _LPI is not valid\n");
911 		ret = -ENODATA;
912 		goto end;
913 	}
914 
915 	lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
916 	if (!lpi_state) {
917 		ret = -ENOMEM;
918 		goto end;
919 	}
920 
921 	info->size = pkg_count;
922 	info->entries = lpi_state;
923 
924 	/* LPI States start at index 3 */
925 	for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
926 		union acpi_object *element, *pkg_elem, *obj;
927 
928 		element = &lpi_data->package.elements[loop];
929 		if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
930 			continue;
931 
932 		pkg_elem = element->package.elements;
933 
934 		obj = pkg_elem + 6;
935 		if (obj->type == ACPI_TYPE_BUFFER) {
936 			struct acpi_power_register *reg;
937 
938 			reg = (struct acpi_power_register *)obj->buffer.pointer;
939 			if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
940 			    reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
941 				continue;
942 
943 			lpi_state->address = reg->address;
944 			lpi_state->entry_method =
945 				reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
946 				ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
947 		} else if (obj->type == ACPI_TYPE_INTEGER) {
948 			lpi_state->entry_method = ACPI_CSTATE_INTEGER;
949 			lpi_state->address = obj->integer.value;
950 		} else {
951 			continue;
952 		}
953 
954 		/* elements[7,8] skipped for now i.e. Residency/Usage counter*/
955 
956 		obj = pkg_elem + 9;
957 		if (obj->type == ACPI_TYPE_STRING)
958 			strlcpy(lpi_state->desc, obj->string.pointer,
959 				ACPI_CX_DESC_LEN);
960 
961 		lpi_state->index = state_idx;
962 		if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
963 			pr_debug("No min. residency found, assuming 10 us\n");
964 			lpi_state->min_residency = 10;
965 		}
966 
967 		if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
968 			pr_debug("No wakeup residency found, assuming 10 us\n");
969 			lpi_state->wake_latency = 10;
970 		}
971 
972 		if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
973 			lpi_state->flags = 0;
974 
975 		if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
976 			lpi_state->arch_flags = 0;
977 
978 		if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
979 			lpi_state->res_cnt_freq = 1;
980 
981 		if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
982 			lpi_state->enable_parent_state = 0;
983 	}
984 
985 	acpi_handle_debug(handle, "Found %d power states\n", state_idx);
986 end:
987 	kfree(buffer.pointer);
988 	return ret;
989 }
990 
991 /*
992  * flat_state_cnt - the number of composite LPI states after the process of flattening
993  */
994 static int flat_state_cnt;
995 
996 /**
997  * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
998  *
999  * @local: local LPI state
1000  * @parent: parent LPI state
1001  * @result: composite LPI state
1002  */
combine_lpi_states(struct acpi_lpi_state * local,struct acpi_lpi_state * parent,struct acpi_lpi_state * result)1003 static bool combine_lpi_states(struct acpi_lpi_state *local,
1004 			       struct acpi_lpi_state *parent,
1005 			       struct acpi_lpi_state *result)
1006 {
1007 	if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1008 		if (!parent->address) /* 0 means autopromotable */
1009 			return false;
1010 		result->address = local->address + parent->address;
1011 	} else {
1012 		result->address = parent->address;
1013 	}
1014 
1015 	result->min_residency = max(local->min_residency, parent->min_residency);
1016 	result->wake_latency = local->wake_latency + parent->wake_latency;
1017 	result->enable_parent_state = parent->enable_parent_state;
1018 	result->entry_method = local->entry_method;
1019 
1020 	result->flags = parent->flags;
1021 	result->arch_flags = parent->arch_flags;
1022 	result->index = parent->index;
1023 
1024 	strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1025 	strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1026 	strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1027 	return true;
1028 }
1029 
1030 #define ACPI_LPI_STATE_FLAGS_ENABLED			BIT(0)
1031 
stash_composite_state(struct acpi_lpi_states_array * curr_level,struct acpi_lpi_state * t)1032 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1033 				  struct acpi_lpi_state *t)
1034 {
1035 	curr_level->composite_states[curr_level->composite_states_size++] = t;
1036 }
1037 
flatten_lpi_states(struct acpi_processor * pr,struct acpi_lpi_states_array * curr_level,struct acpi_lpi_states_array * prev_level)1038 static int flatten_lpi_states(struct acpi_processor *pr,
1039 			      struct acpi_lpi_states_array *curr_level,
1040 			      struct acpi_lpi_states_array *prev_level)
1041 {
1042 	int i, j, state_count = curr_level->size;
1043 	struct acpi_lpi_state *p, *t = curr_level->entries;
1044 
1045 	curr_level->composite_states_size = 0;
1046 	for (j = 0; j < state_count; j++, t++) {
1047 		struct acpi_lpi_state *flpi;
1048 
1049 		if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1050 			continue;
1051 
1052 		if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1053 			pr_warn("Limiting number of LPI states to max (%d)\n",
1054 				ACPI_PROCESSOR_MAX_POWER);
1055 			pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1056 			break;
1057 		}
1058 
1059 		flpi = &pr->power.lpi_states[flat_state_cnt];
1060 
1061 		if (!prev_level) { /* leaf/processor node */
1062 			memcpy(flpi, t, sizeof(*t));
1063 			stash_composite_state(curr_level, flpi);
1064 			flat_state_cnt++;
1065 			continue;
1066 		}
1067 
1068 		for (i = 0; i < prev_level->composite_states_size; i++) {
1069 			p = prev_level->composite_states[i];
1070 			if (t->index <= p->enable_parent_state &&
1071 			    combine_lpi_states(p, t, flpi)) {
1072 				stash_composite_state(curr_level, flpi);
1073 				flat_state_cnt++;
1074 				flpi++;
1075 			}
1076 		}
1077 	}
1078 
1079 	kfree(curr_level->entries);
1080 	return 0;
1081 }
1082 
acpi_processor_ffh_lpi_probe(unsigned int cpu)1083 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1084 {
1085 	return -EOPNOTSUPP;
1086 }
1087 
acpi_processor_get_lpi_info(struct acpi_processor * pr)1088 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1089 {
1090 	int ret, i;
1091 	acpi_status status;
1092 	acpi_handle handle = pr->handle, pr_ahandle;
1093 	struct acpi_device *d = NULL;
1094 	struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1095 
1096 	/* make sure our architecture has support */
1097 	ret = acpi_processor_ffh_lpi_probe(pr->id);
1098 	if (ret == -EOPNOTSUPP)
1099 		return ret;
1100 
1101 	if (!osc_pc_lpi_support_confirmed)
1102 		return -EOPNOTSUPP;
1103 
1104 	if (!acpi_has_method(handle, "_LPI"))
1105 		return -EINVAL;
1106 
1107 	flat_state_cnt = 0;
1108 	prev = &info[0];
1109 	curr = &info[1];
1110 	handle = pr->handle;
1111 	ret = acpi_processor_evaluate_lpi(handle, prev);
1112 	if (ret)
1113 		return ret;
1114 	flatten_lpi_states(pr, prev, NULL);
1115 
1116 	status = acpi_get_parent(handle, &pr_ahandle);
1117 	while (ACPI_SUCCESS(status)) {
1118 		acpi_bus_get_device(pr_ahandle, &d);
1119 		handle = pr_ahandle;
1120 
1121 		if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1122 			break;
1123 
1124 		/* can be optional ? */
1125 		if (!acpi_has_method(handle, "_LPI"))
1126 			break;
1127 
1128 		ret = acpi_processor_evaluate_lpi(handle, curr);
1129 		if (ret)
1130 			break;
1131 
1132 		/* flatten all the LPI states in this level of hierarchy */
1133 		flatten_lpi_states(pr, curr, prev);
1134 
1135 		tmp = prev, prev = curr, curr = tmp;
1136 
1137 		status = acpi_get_parent(handle, &pr_ahandle);
1138 	}
1139 
1140 	pr->power.count = flat_state_cnt;
1141 	/* reset the index after flattening */
1142 	for (i = 0; i < pr->power.count; i++)
1143 		pr->power.lpi_states[i].index = i;
1144 
1145 	/* Tell driver that _LPI is supported. */
1146 	pr->flags.has_lpi = 1;
1147 	pr->flags.power = 1;
1148 
1149 	return 0;
1150 }
1151 
acpi_processor_ffh_lpi_enter(struct acpi_lpi_state * lpi)1152 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1153 {
1154 	return -ENODEV;
1155 }
1156 
1157 /**
1158  * acpi_idle_lpi_enter - enters an ACPI any LPI state
1159  * @dev: the target CPU
1160  * @drv: cpuidle driver containing cpuidle state info
1161  * @index: index of target state
1162  *
1163  * Return: 0 for success or negative value for error
1164  */
acpi_idle_lpi_enter(struct cpuidle_device * dev,struct cpuidle_driver * drv,int index)1165 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1166 			       struct cpuidle_driver *drv, int index)
1167 {
1168 	struct acpi_processor *pr;
1169 	struct acpi_lpi_state *lpi;
1170 
1171 	pr = __this_cpu_read(processors);
1172 
1173 	if (unlikely(!pr))
1174 		return -EINVAL;
1175 
1176 	lpi = &pr->power.lpi_states[index];
1177 	if (lpi->entry_method == ACPI_CSTATE_FFH)
1178 		return acpi_processor_ffh_lpi_enter(lpi);
1179 
1180 	return -EINVAL;
1181 }
1182 
acpi_processor_setup_lpi_states(struct acpi_processor * pr)1183 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1184 {
1185 	int i;
1186 	struct acpi_lpi_state *lpi;
1187 	struct cpuidle_state *state;
1188 	struct cpuidle_driver *drv = &acpi_idle_driver;
1189 
1190 	if (!pr->flags.has_lpi)
1191 		return -EOPNOTSUPP;
1192 
1193 	for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1194 		lpi = &pr->power.lpi_states[i];
1195 
1196 		state = &drv->states[i];
1197 		snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1198 		strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1199 		state->exit_latency = lpi->wake_latency;
1200 		state->target_residency = lpi->min_residency;
1201 		if (lpi->arch_flags)
1202 			state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1203 		state->enter = acpi_idle_lpi_enter;
1204 		drv->safe_state_index = i;
1205 	}
1206 
1207 	drv->state_count = i;
1208 
1209 	return 0;
1210 }
1211 
1212 /**
1213  * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1214  * global state data i.e. idle routines
1215  *
1216  * @pr: the ACPI processor
1217  */
acpi_processor_setup_cpuidle_states(struct acpi_processor * pr)1218 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1219 {
1220 	int i;
1221 	struct cpuidle_driver *drv = &acpi_idle_driver;
1222 
1223 	if (!pr->flags.power_setup_done || !pr->flags.power)
1224 		return -EINVAL;
1225 
1226 	drv->safe_state_index = -1;
1227 	for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1228 		drv->states[i].name[0] = '\0';
1229 		drv->states[i].desc[0] = '\0';
1230 	}
1231 
1232 	if (pr->flags.has_lpi)
1233 		return acpi_processor_setup_lpi_states(pr);
1234 
1235 	return acpi_processor_setup_cstates(pr);
1236 }
1237 
1238 /**
1239  * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1240  * device i.e. per-cpu data
1241  *
1242  * @pr: the ACPI processor
1243  * @dev : the cpuidle device
1244  */
acpi_processor_setup_cpuidle_dev(struct acpi_processor * pr,struct cpuidle_device * dev)1245 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1246 					    struct cpuidle_device *dev)
1247 {
1248 	if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1249 		return -EINVAL;
1250 
1251 	dev->cpu = pr->id;
1252 	if (pr->flags.has_lpi)
1253 		return acpi_processor_ffh_lpi_probe(pr->id);
1254 
1255 	return acpi_processor_setup_cpuidle_cx(pr, dev);
1256 }
1257 
acpi_processor_get_power_info(struct acpi_processor * pr)1258 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1259 {
1260 	int ret;
1261 
1262 	ret = acpi_processor_get_lpi_info(pr);
1263 	if (ret)
1264 		ret = acpi_processor_get_cstate_info(pr);
1265 
1266 	return ret;
1267 }
1268 
acpi_processor_hotplug(struct acpi_processor * pr)1269 int acpi_processor_hotplug(struct acpi_processor *pr)
1270 {
1271 	int ret = 0;
1272 	struct cpuidle_device *dev;
1273 
1274 	if (disabled_by_idle_boot_param())
1275 		return 0;
1276 
1277 	if (!pr->flags.power_setup_done)
1278 		return -ENODEV;
1279 
1280 	dev = per_cpu(acpi_cpuidle_device, pr->id);
1281 	cpuidle_pause_and_lock();
1282 	cpuidle_disable_device(dev);
1283 	ret = acpi_processor_get_power_info(pr);
1284 	if (!ret && pr->flags.power) {
1285 		acpi_processor_setup_cpuidle_dev(pr, dev);
1286 		ret = cpuidle_enable_device(dev);
1287 	}
1288 	cpuidle_resume_and_unlock();
1289 
1290 	return ret;
1291 }
1292 
acpi_processor_power_state_has_changed(struct acpi_processor * pr)1293 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1294 {
1295 	int cpu;
1296 	struct acpi_processor *_pr;
1297 	struct cpuidle_device *dev;
1298 
1299 	if (disabled_by_idle_boot_param())
1300 		return 0;
1301 
1302 	if (!pr->flags.power_setup_done)
1303 		return -ENODEV;
1304 
1305 	/*
1306 	 * FIXME:  Design the ACPI notification to make it once per
1307 	 * system instead of once per-cpu.  This condition is a hack
1308 	 * to make the code that updates C-States be called once.
1309 	 */
1310 
1311 	if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1312 
1313 		/* Protect against cpu-hotplug */
1314 		get_online_cpus();
1315 		cpuidle_pause_and_lock();
1316 
1317 		/* Disable all cpuidle devices */
1318 		for_each_online_cpu(cpu) {
1319 			_pr = per_cpu(processors, cpu);
1320 			if (!_pr || !_pr->flags.power_setup_done)
1321 				continue;
1322 			dev = per_cpu(acpi_cpuidle_device, cpu);
1323 			cpuidle_disable_device(dev);
1324 		}
1325 
1326 		/* Populate Updated C-state information */
1327 		acpi_processor_get_power_info(pr);
1328 		acpi_processor_setup_cpuidle_states(pr);
1329 
1330 		/* Enable all cpuidle devices */
1331 		for_each_online_cpu(cpu) {
1332 			_pr = per_cpu(processors, cpu);
1333 			if (!_pr || !_pr->flags.power_setup_done)
1334 				continue;
1335 			acpi_processor_get_power_info(_pr);
1336 			if (_pr->flags.power) {
1337 				dev = per_cpu(acpi_cpuidle_device, cpu);
1338 				acpi_processor_setup_cpuidle_dev(_pr, dev);
1339 				cpuidle_enable_device(dev);
1340 			}
1341 		}
1342 		cpuidle_resume_and_unlock();
1343 		put_online_cpus();
1344 	}
1345 
1346 	return 0;
1347 }
1348 
1349 static int acpi_processor_registered;
1350 
acpi_processor_power_init(struct acpi_processor * pr)1351 int acpi_processor_power_init(struct acpi_processor *pr)
1352 {
1353 	int retval;
1354 	struct cpuidle_device *dev;
1355 
1356 	if (disabled_by_idle_boot_param())
1357 		return 0;
1358 
1359 	acpi_processor_cstate_first_run_checks();
1360 
1361 	if (!acpi_processor_get_power_info(pr))
1362 		pr->flags.power_setup_done = 1;
1363 
1364 	/*
1365 	 * Install the idle handler if processor power management is supported.
1366 	 * Note that we use previously set idle handler will be used on
1367 	 * platforms that only support C1.
1368 	 */
1369 	if (pr->flags.power) {
1370 		/* Register acpi_idle_driver if not already registered */
1371 		if (!acpi_processor_registered) {
1372 			acpi_processor_setup_cpuidle_states(pr);
1373 			retval = cpuidle_register_driver(&acpi_idle_driver);
1374 			if (retval)
1375 				return retval;
1376 			pr_debug("%s registered with cpuidle\n",
1377 				 acpi_idle_driver.name);
1378 		}
1379 
1380 		dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1381 		if (!dev)
1382 			return -ENOMEM;
1383 		per_cpu(acpi_cpuidle_device, pr->id) = dev;
1384 
1385 		acpi_processor_setup_cpuidle_dev(pr, dev);
1386 
1387 		/* Register per-cpu cpuidle_device. Cpuidle driver
1388 		 * must already be registered before registering device
1389 		 */
1390 		retval = cpuidle_register_device(dev);
1391 		if (retval) {
1392 			if (acpi_processor_registered == 0)
1393 				cpuidle_unregister_driver(&acpi_idle_driver);
1394 			return retval;
1395 		}
1396 		acpi_processor_registered++;
1397 	}
1398 	return 0;
1399 }
1400 
acpi_processor_power_exit(struct acpi_processor * pr)1401 int acpi_processor_power_exit(struct acpi_processor *pr)
1402 {
1403 	struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1404 
1405 	if (disabled_by_idle_boot_param())
1406 		return 0;
1407 
1408 	if (pr->flags.power) {
1409 		cpuidle_unregister_device(dev);
1410 		acpi_processor_registered--;
1411 		if (acpi_processor_registered == 0)
1412 			cpuidle_unregister_driver(&acpi_idle_driver);
1413 	}
1414 
1415 	pr->flags.power_setup_done = 0;
1416 	return 0;
1417 }
1418