1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #include <assert.h>
4 #include <console/console.h>
5 #include <cpu/cpu.h>
6 #include <cpu/intel/common/common.h>
7 #include <cpu/intel/microcode.h>
8 #include <cpu/intel/smm_reloc.h>
9 #include <cpu/intel/turbo.h>
10 #include <cpu/x86/lapic.h>
11 #include <cpu/x86/mp.h>
12 #include <cpu/x86/msr.h>
13 #include <device/pci.h>
14 #include <fsp/api.h>
15 #include <intelblocks/acpi.h>
16 #include <intelblocks/cpulib.h>
17 #include <intelblocks/mp_init.h>
18 #include <intelblocks/msr.h>
19 #include <soc/cpu.h>
20 #include <soc/msr.h>
21 #include <soc/pci_devs.h>
22 #include <soc/soc_chip.h>
23 #include <soc/soc_info.h>
24
cpu_soc_is_in_untrusted_mode(void)25 bool cpu_soc_is_in_untrusted_mode(void)
26 {
27 msr_t msr;
28
29 msr = rdmsr(MSR_BIOS_DONE);
30 return !!(msr.lo & ENABLE_IA_UNTRUSTED);
31 }
32
cpu_soc_bios_done(void)33 void cpu_soc_bios_done(void)
34 {
35 msr_t msr;
36
37 msr = rdmsr(MSR_BIOS_DONE);
38 msr.lo |= ENABLE_IA_UNTRUSTED;
39 wrmsr(MSR_BIOS_DONE, msr);
40 }
41
get_supported_lpm_mask(void)42 uint8_t get_supported_lpm_mask(void)
43 {
44 return LPM_S0i2_0 | LPM_S0i2_1 | LPM_S0i2_2;
45 }
46
soc_fsp_load(void)47 static void soc_fsp_load(void)
48 {
49 fsps_load();
50 }
51
configure_misc(void)52 static void configure_misc(void)
53 {
54 msr_t msr;
55
56 config_t *conf = (config_t *)config_of_soc();
57
58 msr = rdmsr(IA32_MISC_ENABLE);
59 msr.lo |= (1 << 0); /* Fast String enable */
60 msr.lo |= (1 << 3); /* TM1/TM2/EMTTM enable */
61 wrmsr(IA32_MISC_ENABLE, msr);
62
63 /* Set EIST status */
64 cpu_set_eist(conf->eist_enable);
65
66 /* Disable Thermal interrupts */
67 msr.lo = 0;
68 msr.hi = 0;
69 wrmsr(IA32_THERM_INTERRUPT, msr);
70
71 /* Enable package critical interrupt only */
72 msr.lo = 1 << 4;
73 msr.hi = 0;
74 wrmsr(IA32_PACKAGE_THERM_INTERRUPT, msr);
75
76 /* Enable PROCHOT and Power Performance Platform Override */
77 msr = rdmsr(MSR_POWER_CTL);
78 msr.lo |= (1 << 0); /* Enable Bi-directional PROCHOT as an input*/
79 msr.lo |= (1 << 23); /* Lock it */
80 msr.lo |= (1 << 18); /* Power Performance Platform Override */
81 wrmsr(MSR_POWER_CTL, msr);
82 }
83
get_soc_cpu_type(void)84 enum core_type get_soc_cpu_type(void)
85 {
86 if (cpu_is_hybrid_supported())
87 return cpu_get_cpu_type();
88 else
89 return CPUID_CORE_TYPE_INTEL_CORE;
90 }
91
soc_is_nominal_freq_supported(void)92 bool soc_is_nominal_freq_supported(void)
93 {
94 return true;
95 }
96
enable_x2apic(void)97 static void enable_x2apic(void)
98 {
99 if (!CONFIG(X2APIC_LATE_WORKAROUND))
100 return;
101
102 enable_lapic_mode(true);
103 }
104
105 /* All CPUs including BSP will run the following function. */
soc_core_init(struct device * cpu)106 void soc_core_init(struct device *cpu)
107 {
108 /* Clear out pending MCEs */
109 /* TODO(adurbin): This should only be done on a cold boot. Also, some
110 * of these banks are core vs package scope. For now every CPU clears
111 * every bank. */
112 mca_configure();
113
114 enable_x2apic();
115
116 enable_lapic_tpr();
117
118 /* Configure Enhanced SpeedStep and Thermal Sensors */
119 configure_misc();
120
121 enable_pm_timer_emulation();
122
123 /* Enable Direct Cache Access */
124 configure_dca_cap();
125
126 /* Set energy policy */
127 set_energy_perf_bias(ENERGY_POLICY_NORMAL);
128
129 const config_t *conf = config_of_soc();
130 /* Set energy-performance preference */
131 if (conf->enable_energy_perf_pref)
132 if (check_energy_perf_cap())
133 set_energy_perf_pref(conf->energy_perf_pref_value);
134
135 /* Enable Turbo */
136 enable_turbo();
137
138 /* Set core type in struct cpu_info */
139 set_dev_core_type();
140
141 if (CONFIG(INTEL_TME) && is_tme_supported())
142 set_tme_core_activate();
143
144 if (CONFIG(DROP_CPU_FEATURE_PROGRAM_IN_FSP)) {
145 /* Disable 3-strike error */
146 if (CONFIG(SOC_INTEL_METEORLAKE_PRE_PRODUCTION_SILICON))
147 disable_three_strike_error();
148 else
149 disable_signaling_three_strike_event();
150
151 set_aesni_lock();
152
153 /* Enable VMX */
154 set_feature_ctrl_vmx_arg(CONFIG(ENABLE_VMX) && !conf->disable_vmx);
155
156 /* Feature control lock configure */
157 set_feature_ctrl_lock();
158 }
159 }
160
per_cpu_smm_trigger(void)161 static void per_cpu_smm_trigger(void)
162 {
163 /* Relocate the SMM handler. */
164 smm_relocate();
165 }
166
pre_mp_init(void)167 static void pre_mp_init(void)
168 {
169 soc_fsp_load();
170
171 const config_t *conf = config_of_soc();
172 if (conf->enable_energy_perf_pref) {
173 if (check_energy_perf_cap())
174 enable_energy_perf_pref();
175 else
176 printk(BIOS_WARNING, "Energy Performance Preference not supported!\n");
177 }
178 }
179
post_mp_init(void)180 static void post_mp_init(void)
181 {
182 /* Set Max Ratio */
183 cpu_set_max_ratio();
184
185 /*
186 * 1. Now that all APs have been relocated as well as the BSP let SMIs
187 * start flowing.
188 * 2. Skip enabling power button SMI and enable it after BS_CHIPS_INIT
189 * to avoid shutdown hang due to lack of init on certain IP in FSP-S.
190 */
191 global_smi_enable_no_pwrbtn();
192 }
193
194 static const struct mp_ops mp_ops = {
195 /*
196 * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
197 * that are set prior to ramstage.
198 * Real MTRRs programming are being done after resource allocation.
199 */
200 .pre_mp_init = pre_mp_init,
201 .get_cpu_count = get_cpu_count,
202 .get_smm_info = smm_info,
203 .get_microcode_info = get_microcode_info,
204 .pre_mp_smm_init = smm_initialize,
205 .per_cpu_smm_trigger = per_cpu_smm_trigger,
206 .relocation_handler = smm_relocation_handler,
207 .post_mp_init = post_mp_init,
208 };
209
mp_init_cpus(struct bus * cpu_bus)210 void mp_init_cpus(struct bus *cpu_bus)
211 {
212 if (mp_init_with_smm(cpu_bus, &mp_ops))
213 printk(BIOS_ERR, "MP initialization failure.\n");
214
215 /* Thermal throttle activation offset */
216 configure_tcc_thermal_target();
217 }
218
soc_skip_ucode_update(u32 current_patch_id,u32 new_patch_id)219 int soc_skip_ucode_update(u32 current_patch_id, u32 new_patch_id)
220 {
221 if (!CONFIG(CHROMEOS))
222 return 0;
223 /*
224 * Locked RO Descriptor Implications:
225 *
226 * - A locked descriptor signals the RO binary is fixed; the FIT will load the
227 * RO's microcode during system reset.
228 * - Attempts to load newer microcode from the RW CBFS will cause a boot-time
229 * delay (~60ms, core-dependent), as the microcode must be reloaded on BSP+APs.
230 * - The kernel can load microcode updates without impacting AP FW boot time.
231 * - Skipping RW CBFS microcode loading is low-risk when the RO is locked,
232 * prioritizing fast boot times.
233 */
234 if (CONFIG(LOCK_MANAGEMENT_ENGINE) && current_patch_id)
235 return 1;
236
237 return 0;
238 }
239