Lines Matching +full:idle +full:- +full:state +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_idle.c - native hardware idle loop for modern Intel processors
5 * Copyright (c) 2013 - 2020, Intel Corporation.
20 * All CPUs have same idle states as boot CPU
23 * for preventing entry into deep C-states
25 * CPU will flush caches as needed when entering a C-state via MWAIT
33 * ACPI has a .suspend hack to turn off deep c-statees during suspend
39 /* un-comment DEBUG to enable pr_debug() statements */
55 #include <asm/intel-family.h>
56 #include <asm/nospec-branch.h>
65 .name = "intel_idle",
69 static int max_cstate = CPUIDLE_STATE_MAX - 1;
88 * Hardware C-state auto-demotion may not always be optimal.
103 * Enable interrupts before entering the C-state. On some platforms and for
104 * some C-states, this may measurably decrease interrupt latency.
109 * Enable this state by default even if the ACPI _CST does not list it.
114 * Disable IBRS across idle (when KERNEL_IBRS), is exclusive vs IRQ_ENABLE
120 * Initialize large xstate for the C6-state entrance.
125 * MWAIT takes an 8-bit "hint" in EAX "suggesting"
126 * the C-state (top nibble) and sub-state (bottom nibble)
129 * We store the hint at the top of our "flags" for each state.
138 struct cpuidle_state *state = &drv->states[index]; in __intel_idle() local
139 unsigned long eax = flg2MWAIT(state->flags); in __intel_idle()
148 * intel_idle - Ask the processor to enter the given idle state.
151 * @index: Target idle state index.
154 * @dev is idle and it can try to enter the idle state corresponding to @index.
156 * If the local APIC timer is not known to be reliable in the target idle state,
157 * enable one-shot tick broadcasting for the target CPU before executing MWAIT.
199 * intel_idle_s2idle - Ask the processor to enter the given idle state.
202 * @index: Target idle state index.
205 * @dev is idle and it can try to enter the idle state corresponding to @index.
207 * Invoked as a suspend-to-idle callback routine with frozen user space, frozen
214 struct cpuidle_state *state = &drv->states[index]; in intel_idle_s2idle() local
215 unsigned long eax = flg2MWAIT(state->flags); in intel_idle_s2idle()
217 if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) in intel_idle_s2idle()
232 .name = "C1",
240 .name = "C1E",
248 .name = "C3",
256 .name = "C6",
269 .name = "C1",
277 .name = "C1E",
285 .name = "C3",
293 .name = "C6",
301 .name = "C7",
314 .name = "C1",
322 .name = "C6N",
330 .name = "C6S",
338 .name = "C7",
346 .name = "C7S",
359 .name = "C1",
367 .name = "C6N",
375 .name = "C6S",
383 .name = "C7",
391 .name = "C7S",
404 .name = "C1",
412 .name = "C1E",
420 .name = "C3",
428 .name = "C6",
436 .name = "C7",
449 .name = "C1",
457 .name = "C1E",
465 .name = "C3",
473 .name = "C6",
486 .name = "C1",
494 .name = "C1E",
502 .name = "C3",
510 .name = "C6",
523 .name = "C1",
531 .name = "C1E",
539 .name = "C3",
547 .name = "C6",
560 .name = "C1",
568 .name = "C1E",
576 .name = "C3",
584 .name = "C6",
592 .name = "C7s",
600 .name = "C8",
608 .name = "C9",
616 .name = "C10",
628 .name = "C1",
636 .name = "C1E",
644 .name = "C3",
652 .name = "C6",
660 .name = "C7s",
668 .name = "C8",
676 .name = "C9",
684 .name = "C10",
697 .name = "C1",
705 .name = "C1E",
713 .name = "C3",
721 .name = "C6",
729 .name = "C7s",
737 .name = "C8",
745 .name = "C9",
753 .name = "C10",
766 .name = "C1",
774 .name = "C1E",
782 .name = "C6",
795 .name = "C1",
803 .name = "C1E",
811 .name = "C6",
834 .name = "C1",
842 .name = "C1E",
850 .name = "C6",
858 .name = "C8",
866 .name = "C10",
879 .name = "C1",
887 .name = "C1E",
895 .name = "C6",
903 .name = "C8",
911 .name = "C10",
924 .name = "C1",
932 .name = "C1E",
940 .name = "C6",
948 .name = "C8",
956 .name = "C10",
969 .name = "C1",
977 .name = "C1E",
985 .name = "C6",
999 .name = "C1E",
1007 .name = "C2",
1015 .name = "C4",
1023 .name = "C6",
1035 .name = "C1",
1043 .name = "C4",
1051 .name = "C6",
1059 .name = "C7",
1067 .name = "C9",
1079 .name = "C1",
1087 .name = "C6",
1099 .name = "C1",
1107 .name = "C6",
1120 .name = "C1",
1128 .name = "C1E",
1136 .name = "C6",
1144 .name = "C7s",
1152 .name = "C8",
1160 .name = "C9",
1168 .name = "C10",
1181 .name = "C1",
1189 .name = "C1E",
1197 .name = "C6",
1214 .name = "C1",
1222 .name = "C1E",
1230 .name = "C6",
1448 static bool __init intel_idle_state_needs_timer_stop(struct cpuidle_state *state) in intel_idle_state_needs_timer_stop() argument
1450 unsigned long eax = flg2MWAIT(state->flags); in intel_idle_state_needs_timer_stop()
1456 * Switch over to one-shot tick broadcast if the target C-state in intel_idle_state_needs_timer_stop()
1467 MODULE_PARM_DESC(no_acpi, "Do not use ACPI _CST for building the idle states list");
1471 MODULE_PARM_DESC(use_acpi, "Use ACPI _CST for building the idle states list");
1476 * intel_idle_cst_usable - Check if the _CST information can be used.
1478 * Check if all of the C-states listed by _CST in the max_cstate range are
1491 if (cx->entry_method != ACPI_CSTATE_FFH) in intel_idle_cst_usable()
1513 if (acpi_processor_evaluate_cst(pr->handle, cpu, &acpi_state_table)) in intel_idle_acpi_cst_extract()
1542 struct cpuidle_state *state; in intel_idle_init_cstates_acpi() local
1544 if (intel_idle_max_cstate_reached(cstate - 1)) in intel_idle_init_cstates_acpi()
1549 state = &drv->states[drv->state_count++]; in intel_idle_init_cstates_acpi()
1551 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d_ACPI", cstate); in intel_idle_init_cstates_acpi()
1552 strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); in intel_idle_init_cstates_acpi()
1553 state->exit_latency = cx->latency; in intel_idle_init_cstates_acpi()
1555 * For C1-type C-states use the same number for both the exit in intel_idle_init_cstates_acpi()
1557 * C1 in the majority of the static C-states tables above. in intel_idle_init_cstates_acpi()
1558 * For the other types of C-states, however, set the target in intel_idle_init_cstates_acpi()
1560 * a reasonable balance between energy-efficiency and in intel_idle_init_cstates_acpi()
1563 state->target_residency = cx->latency; in intel_idle_init_cstates_acpi()
1564 if (cx->type > ACPI_STATE_C1) in intel_idle_init_cstates_acpi()
1565 state->target_residency *= 3; in intel_idle_init_cstates_acpi()
1567 state->flags = MWAIT2flg(cx->address); in intel_idle_init_cstates_acpi()
1568 if (cx->type > ACPI_STATE_C2) in intel_idle_init_cstates_acpi()
1569 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED; in intel_idle_init_cstates_acpi()
1572 state->flags |= CPUIDLE_FLAG_OFF; in intel_idle_init_cstates_acpi()
1574 if (intel_idle_state_needs_timer_stop(state)) in intel_idle_init_cstates_acpi()
1575 state->flags |= CPUIDLE_FLAG_TIMER_STOP; in intel_idle_init_cstates_acpi()
1577 if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) in intel_idle_init_cstates_acpi()
1578 mark_tsc_unstable("TSC halts in idle"); in intel_idle_init_cstates_acpi()
1580 state->enter = intel_idle; in intel_idle_init_cstates_acpi()
1581 state->enter_s2idle = intel_idle_s2idle; in intel_idle_init_cstates_acpi()
1590 * If there are no _CST C-states, do not disable any C-states by in intel_idle_off_by_default()
1616 * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
1618 * Tune IVT multi-socket targets.
1623 /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */ in ivt_idle_state_table_update()
1645 * irtl_2_usec - IRTL to microseconds conversion.
1666 * bxt_idle_state_table_update - Fix up the Broxton idle states table.
1714 * sklh_idle_state_table_update - Fix up the Sky Lake idle states table.
1716 * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled.
1734 /* PC10 is not enabled in PKG C-state limit */ in sklh_idle_state_table_update()
1751 skl_cstates[5].flags |= CPUIDLE_FLAG_UNUSABLE; /* C8-SKL */ in sklh_idle_state_table_update()
1752 skl_cstates[6].flags |= CPUIDLE_FLAG_UNUSABLE; /* C9-SKL */ in sklh_idle_state_table_update()
1756 * skx_idle_state_table_update - Adjust the Sky Lake/Cascade Lake
1757 * idle states table.
1766 * 000b: C0/C1 (no package C-state support) in skx_idle_state_table_update()
1768 * 010b: C6 (non-retention) in skx_idle_state_table_update()
1770 * 111b: No Package C state limits. in skx_idle_state_table_update()
1786 * adl_idle_state_table_update - Adjust AlderLake idle states table.
1805 * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table.
1812 * By default, the C6 state assumes the worst-case scenario of package in spr_idle_state_table_update()
1831 /* Ignore the C-state if there are NO sub-states in CPUID for it. */ in intel_idle_verify_cstate()
1836 mark_tsc_unstable("TSC halts in idle states deeper than C2"); in intel_idle_verify_cstate()
1841 static void state_update_enter_method(struct cpuidle_state *state, int cstate) in state_update_enter_method() argument
1843 if (state->flags & CPUIDLE_FLAG_INIT_XSTATE) { in state_update_enter_method()
1848 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IBRS); in state_update_enter_method()
1849 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); in state_update_enter_method()
1850 state->enter = intel_idle_xstate; in state_update_enter_method()
1855 state->flags & CPUIDLE_FLAG_IBRS) { in state_update_enter_method()
1857 * IBRS mitigation requires that C-states are entered in state_update_enter_method()
1860 WARN_ON_ONCE(state->flags & CPUIDLE_FLAG_IRQ_ENABLE); in state_update_enter_method()
1861 state->enter = intel_idle_ibrs; in state_update_enter_method()
1865 if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) { in state_update_enter_method()
1866 state->enter = intel_idle_irq; in state_update_enter_method()
1871 pr_info("forced intel_idle_irq for state %d\n", cstate); in state_update_enter_method()
1872 state->enter = intel_idle_irq; in state_update_enter_method()
1906 struct cpuidle_state *state; in intel_idle_init_cstates_icpu() local
1916 /* If marked as unusable, skip this state. */ in intel_idle_init_cstates_icpu()
1918 pr_debug("state %s is disabled\n", in intel_idle_init_cstates_icpu()
1919 cpuidle_state_table[cstate].name); in intel_idle_init_cstates_icpu()
1928 drv->states[drv->state_count] = cpuidle_state_table[cstate]; in intel_idle_init_cstates_icpu()
1929 state = &drv->states[drv->state_count]; in intel_idle_init_cstates_icpu()
1931 state_update_enter_method(state, cstate); in intel_idle_init_cstates_icpu()
1934 if ((disabled_states_mask & BIT(drv->state_count)) || in intel_idle_init_cstates_icpu()
1935 ((icpu->use_acpi || force_use_acpi) && in intel_idle_init_cstates_icpu()
1937 !(state->flags & CPUIDLE_FLAG_ALWAYS_ENABLE))) in intel_idle_init_cstates_icpu()
1938 state->flags |= CPUIDLE_FLAG_OFF; in intel_idle_init_cstates_icpu()
1940 if (intel_idle_state_needs_timer_stop(state)) in intel_idle_init_cstates_icpu()
1941 state->flags |= CPUIDLE_FLAG_TIMER_STOP; in intel_idle_init_cstates_icpu()
1943 drv->state_count++; in intel_idle_init_cstates_icpu()
1946 if (icpu->byt_auto_demotion_disable_flag) { in intel_idle_init_cstates_icpu()
1953 * intel_idle_cpuidle_driver_init - Create the list of available idle states.
1961 drv->states[0].flags |= CPUIDLE_FLAG_OFF; in intel_idle_cpuidle_driver_init()
1963 drv->state_count = 1; in intel_idle_cpuidle_driver_init()
1999 * intel_idle_cpu_init - Register the target CPU with the cpuidle core.
2010 dev->cpu = cpu; in intel_idle_cpu_init()
2014 return -EIO; in intel_idle_cpu_init()
2041 if (!dev->registered) in intel_idle_cpu_online()
2048 * intel_idle_cpuidle_devices_uninit - Unregister all cpuidle devices.
2064 /* Do not load intel_idle at all for now if idle= is passed */ in intel_idle_init()
2066 return -ENODEV; in intel_idle_init()
2070 return -EPERM; in intel_idle_init()
2077 return -ENODEV; in intel_idle_init()
2082 return -ENODEV; in intel_idle_init()
2086 return -ENODEV; in intel_idle_init()
2093 return -ENODEV; in intel_idle_init()
2097 icpu = (const struct idle_cpu *)id->driver_data; in intel_idle_init()
2099 cpuidle_state_table = icpu->state_table; in intel_idle_init()
2100 auto_demotion_disable_flags = icpu->auto_demotion_disable_flags; in intel_idle_init()
2101 if (icpu->disable_promotion_to_c1e) in intel_idle_init()
2103 if (icpu->use_acpi || force_use_acpi) in intel_idle_init()
2106 return -ENODEV; in intel_idle_init()
2114 return -ENOMEM; in intel_idle_init()
2122 drv ? drv->name : "none"); in intel_idle_init()
2126 retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online", in intel_idle_init()
2132 boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1"); in intel_idle_init()
2148 * support "intel_idle.max_cstate=..." at boot and also a read-only export of
2149 * it at /sys/module/intel_idle/parameters/max_cstate -- so using module_param
2155 * idle states to be disabled by default (as reflected by the names of the
2156 * corresponding idle state directories in sysfs, "state0", "state1" ...
2157 * "state<i>" ..., where <i> is the index of the given state).
2160 MODULE_PARM_DESC(states_off, "Mask of disabled idle states");
2162 * Some platforms come with mutually exclusive C-states, so that if one is
2163 * enabled, the other C-states must not be used. Example: C1 and C1E on
2165 * preferred C-states among the groups of mutually exclusive C-states - the
2166 * selected C-states will be registered, the other C-states from the mutually
2168 * exclusive C-states, this parameter has no effect.
2171 MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states");
2173 * Debugging option that forces the driver to enter all C-states with
2174 * interrupts enabled. Does not apply to C-states with