• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <console/console.h>
4 #include <cpu/cpu.h>
5 #include <cpu/intel/common/common.h>
6 #include <cpu/intel/em64t100_save_state.h>
7 #include <cpu/intel/microcode.h>
8 #include <cpu/intel/smm_reloc.h>
9 #include <cpu/intel/turbo.h>
10 #include <cpu/x86/lapic.h>
11 #include <cpu/x86/mp.h>
12 #include <cpu/x86/msr.h>
13 #include <cpu/x86/mtrr.h>
14 #include <cpu/x86/smm.h>
15 #include <device/device.h>
16 #include <reg_script.h>
17 #include <soc/iosf.h>
18 #include <soc/msr.h>
19 #include <soc/pattrs.h>
20 #include <soc/ramstage.h>
21 #include <types.h>
22 
23 /* Core level MSRs */
24 static const struct reg_script core_msr_script[] = {
25 	/* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */
26 	REG_MSR_RMW(MSR_PKG_CST_CONFIG_CONTROL, ~0x3f080f, 0xe0008),
27 	REG_MSR_RMW(MSR_POWER_MISC, ~(ENABLE_ULFM_AUTOCM_MASK | ENABLE_INDP_AUTOCM_MASK), 0),
28 
29 	/* Disable C1E */
30 	REG_MSR_RMW(MSR_POWER_CTL, ~0x2, 0),
31 	REG_MSR_OR(MSR_POWER_MISC, 0x44),
32 	REG_SCRIPT_END
33 };
34 
soc_core_init(struct device * cpu)35 static void soc_core_init(struct device *cpu)
36 {
37 	printk(BIOS_DEBUG, "Init Braswell core.\n");
38 
39 	/*
40 	 * The turbo disable bit is actually scoped at building block level -- not package.
41 	 * For non-BSP cores that are within a building block, enable turbo. The cores within
42 	 * the BSP's building block will just see it already enabled and move on.
43 	 */
44 	if (lapicid())
45 		enable_turbo();
46 
47 	/* Set virtualization based on Kconfig option */
48 	set_vmx_and_lock();
49 
50 	/* Set core MSRs */
51 	reg_script_run(core_msr_script);
52 
53 	/* Set this core to max frequency ratio */
54 	set_max_freq();
55 }
56 
57 static struct device_operations cpu_dev_ops = {
58 	.init = soc_core_init,
59 };
60 
61 static const struct cpu_device_id cpu_table[] = {
62 	{ X86_VENDOR_INTEL, 0x406c4, CPUID_EXACT_MATCH_MASK },
63 	{ X86_VENDOR_INTEL, 0x406c3, CPUID_EXACT_MATCH_MASK },
64 	{ X86_VENDOR_INTEL, 0x406c2, CPUID_EXACT_MATCH_MASK },
65 	CPU_TABLE_END
66 };
67 
68 static const struct cpu_driver driver __cpu_driver = {
69 	.ops      = &cpu_dev_ops,
70 	.id_table = cpu_table,
71 };
72 
73 /*
74  * MP and SMM loading initialization.
75  */
76 
77 /* Package level MSRs */
78 static const struct reg_script package_msr_script[] = {
79 	/* Set Package TDP to ~7W */
80 	REG_MSR_WRITE(MSR_PKG_POWER_LIMIT, 0x3880fa),
81 	REG_MSR_RMW(MSR_PP1_POWER_LIMIT, ~(0x7f << 17), 0),
82 	REG_MSR_WRITE(MSR_PKG_TURBO_CFG1, 0x702),
83 	REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1, 0x200b),
84 	REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2, 0),
85 	REG_MSR_WRITE(MSR_CPU_THERM_CFG1, 0x00000305),
86 	REG_MSR_WRITE(MSR_CPU_THERM_CFG2, 0x0405500d),
87 	REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG, 0x27),
88 	REG_SCRIPT_END
89 };
90 
pre_mp_init(void)91 static void pre_mp_init(void)
92 {
93 	uint32_t bsmrwac;
94 
95 	/* Set up MTRRs based on physical address size. */
96 	x86_setup_mtrrs_with_detect();
97 	x86_mtrr_check();
98 
99 	/*
100 	 * Configure the BUNIT to allow dirty cache line evictions in non-SMM mode for lines
101 	 * that were dirtied while in SMM mode. Otherwise the writes would be silently dropped.
102 	 */
103 	bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED;
104 	iosf_bunit_write(BUNIT_SMRWAC, bsmrwac);
105 
106 	/* Set package MSRs */
107 	reg_script_run(package_msr_script);
108 
109 	/* Enable Turbo Mode on BSP and siblings of the BSP's building block. */
110 	enable_turbo();
111 }
112 
get_cpu_count(void)113 static int get_cpu_count(void)
114 {
115 	const struct pattrs *pattrs = pattrs_get();
116 
117 	return pattrs->num_cpus;
118 }
119 
fill_in_relocation_params(struct smm_relocation_params * params)120 static void fill_in_relocation_params(struct smm_relocation_params *params)
121 {
122 	uintptr_t tseg_base;
123 	size_t tseg_size;
124 
125 	/* All range registers are aligned to 4KiB */
126 	const u32 rmask = ~((1 << 12) - 1);
127 
128 	smm_region(&tseg_base, &tseg_size);
129 
130 	/* SMRR has 32-bits of valid address aligned to 4KiB. */
131 	params->smrr_base.lo = (tseg_base & rmask) | MTRR_TYPE_WRBACK;
132 	params->smrr_base.hi = 0;
133 	params->smrr_mask.lo = (~(tseg_size - 1) & rmask) | MTRR_PHYS_MASK_VALID;
134 	params->smrr_mask.hi = 0;
135 }
136 
get_smm_info(uintptr_t * perm_smbase,size_t * perm_smsize,size_t * smm_save_state_size)137 static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
138 				size_t *smm_save_state_size)
139 {
140 	printk(BIOS_DEBUG, "Setting up SMI for CPU\n");
141 
142 	fill_in_relocation_params(&smm_reloc_params);
143 
144 	smm_subregion(SMM_SUBREGION_HANDLER, perm_smbase, perm_smsize);
145 
146 	*smm_save_state_size = sizeof(em64t100_smm_state_save_area_t);
147 }
148 
get_microcode_info(const void ** microcode,int * parallel)149 static void get_microcode_info(const void **microcode, int *parallel)
150 {
151 	const struct pattrs *pattrs = pattrs_get();
152 
153 	*microcode = pattrs->microcode_patch;
154 	*parallel = !intel_ht_supported();
155 }
156 
per_cpu_smm_trigger(void)157 static void per_cpu_smm_trigger(void)
158 {
159 	const struct pattrs *pattrs = pattrs_get();
160 	msr_t msr_value;
161 
162 	/* Need to make sure that all cores have microcode loaded. */
163 	msr_value = rdmsr(IA32_BIOS_SIGN_ID);
164 	if (msr_value.hi == 0)
165 		intel_microcode_load_unlocked(pattrs->microcode_patch);
166 
167 	/* Relocate SMM space. */
168 	smm_initiate_relocation();
169 
170 	/* Load microcode after SMM relocation. */
171 	intel_microcode_load_unlocked(pattrs->microcode_patch);
172 }
173 
relocation_handler(int cpu,uintptr_t curr_smbase,uintptr_t staggered_smbase)174 static void relocation_handler(int cpu, uintptr_t curr_smbase, uintptr_t staggered_smbase)
175 {
176 	struct smm_relocation_params *relo_params = &smm_reloc_params;
177 	em64t100_smm_state_save_area_t *smm_state;
178 
179 	/* Set up SMRR. */
180 	wrmsr(IA32_SMRR_PHYS_BASE, relo_params->smrr_base);
181 	wrmsr(IA32_SMRR_PHYS_MASK, relo_params->smrr_mask);
182 
183 	smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase);
184 	smm_state->smbase = staggered_smbase;
185 }
186 
post_mp_init(void)187 static void post_mp_init(void)
188 {
189 	global_smi_enable();
190 }
191 
192 static const struct mp_ops mp_ops = {
193 	.pre_mp_init         = pre_mp_init,
194 	.get_cpu_count       = get_cpu_count,
195 	.get_smm_info        = get_smm_info,
196 	.get_microcode_info  = get_microcode_info,
197 	.pre_mp_smm_init     = smm_southbridge_clear_state,
198 	.per_cpu_smm_trigger = per_cpu_smm_trigger,
199 	.relocation_handler  = relocation_handler,
200 	.post_mp_init        = post_mp_init,
201 };
202 
mp_init_cpus(struct bus * cpu_bus)203 void mp_init_cpus(struct bus *cpu_bus)
204 {
205 	/* TODO: Handle mp_init_with_smm failure? */
206 	mp_init_with_smm(cpu_bus, &mp_ops);
207 }
208