• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <console/console.h>
4 #include <intelblocks/meminit.h>
5 #include <commonlib/region.h>
6 #include <spd_bin.h>
7 #include <spd_cache.h>
8 #include <string.h>
9 #include <types.h>
10 
11 _Static_assert(CONFIG_MRC_CHANNEL_WIDTH > 0, "MRC channel width must be >0!");
12 _Static_assert(CONFIG_DATA_BUS_WIDTH > 0, "Data bus width must be >0!");
13 _Static_assert(CONFIG_DIMMS_PER_CHANNEL > 0, "DIMMS per channel must be >0!");
14 
15 /*
16  * Given mask of channels that are populated, this function returns the flags
17  * indicating which half of the channels are populated.
18  */
populated_mask_to_flag(uint32_t pop_mask,size_t max_channels)19 static enum channel_population populated_mask_to_flag(uint32_t pop_mask, size_t max_channels)
20 {
21 	uint32_t full_mask = BIT(max_channels) - 1;
22 	uint32_t bottom_mask = BIT(max_channels / 2) - 1;
23 	uint32_t top_mask = ~bottom_mask & full_mask;
24 
25 	if (pop_mask == full_mask)
26 		return FULLY_POPULATED;
27 	else if (pop_mask == bottom_mask)
28 		return BOTTOM_HALF_POPULATED;
29 	else if (pop_mask == top_mask)
30 		return TOP_HALF_POPULATED;
31 	else if (pop_mask == 0)
32 		return NO_CHANNEL_POPULATED;
33 
34 	die("Unsupported channel population mask(0x%x)\n", pop_mask);
35 }
36 
read_spd_md(const struct soc_mem_cfg * soc_mem_cfg,const struct mem_spd * info,bool half_populated,struct mem_channel_data * channel_data,size_t * spd_len)37 static void read_spd_md(const struct soc_mem_cfg *soc_mem_cfg, const struct mem_spd *info,
38 			bool half_populated, struct mem_channel_data *channel_data,
39 			size_t *spd_len)
40 {
41 	size_t ch;
42 	size_t num_phys_ch = soc_mem_cfg->num_phys_channels;
43 	uintptr_t spd_data;
44 
45 	/*
46 	 * For memory down topologies, start with full mask as per the number
47 	 * of physical channels and mask out any channels based on mixed
48 	 * topology or half populated flag as set by the mainboard.
49 	 */
50 	uint32_t pop_mask = BIT(num_phys_ch) - 1;
51 
52 	if (!(info->topo & MEM_TOPO_MEMORY_DOWN))
53 		return;
54 
55 	if (info->topo == MEM_TOPO_MIXED)
56 		pop_mask &= soc_mem_cfg->md_phy_masks.mixed_topo;
57 
58 	if (half_populated)
59 		pop_mask &= soc_mem_cfg->md_phy_masks.half_channel;
60 
61 	if (pop_mask == 0)
62 		die("Memory technology does not support the selected configuration!\n");
63 
64 	printk(BIOS_DEBUG, "SPD index = %zu\n", info->cbfs_index);
65 
66 	/* Memory leak is ok as long as we have memory mapped boot media */
67 	_Static_assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED),
68 		"Function assumes memory-mapped boot media");
69 
70 	*spd_len = CONFIG_DIMM_SPD_SIZE;
71 	spd_data = spd_cbfs_map(info->cbfs_index);
72 	if (!spd_data)
73 		die("SPD not found in CBFS or incorrect index!\n");
74 
75 	print_spd_info((uint8_t *)spd_data);
76 
77 	for (ch = 0; ch < num_phys_ch; ch++) {
78 		if (!(pop_mask & BIT(ch)))
79 			continue;
80 
81 		int mrc_ch = soc_mem_cfg->phys_to_mrc_map[ch];
82 
83 		/*
84 		 * Memory down topology simulates a DIMM. So, the assumption is
85 		 * that there is a single DIMM per channel when using memory
86 		 * down topology. As SPD describes a DIMM, only DIMM0 for each
87 		 * physical channel is filled here.
88 		 */
89 		channel_data->spd[mrc_ch][0] = spd_data;
90 	}
91 
92 	channel_data->ch_population_flags |= populated_mask_to_flag(pop_mask, num_phys_ch);
93 }
94 
95 #define CH_DIMM_OFFSET(ch, dimm)	((ch) * CONFIG_DIMMS_PER_CHANNEL + (dimm))
96 
read_spd_dimm(FSPM_UPD * memupd,const struct soc_mem_cfg * soc_mem_cfg,const struct mem_spd * info,bool half_populated,struct mem_channel_data * channel_data,size_t * spd_len)97 static bool read_spd_dimm(FSPM_UPD *memupd, const struct soc_mem_cfg *soc_mem_cfg,
98 			const struct mem_spd *info, bool half_populated,
99 			struct mem_channel_data *channel_data,
100 			size_t *spd_len)
101 {
102 	size_t ch, dimm;
103 	struct spd_block blk = { 0 };
104 	size_t num_phys_ch = soc_mem_cfg->num_phys_channels;
105 
106 	/*
107 	 * For DIMM modules, start with mask set to no channels populated. If
108 	 * SPD is read successfully from EEPROM for any channel, then that
109 	 * channel is marked as populated.
110 	 */
111 	uint32_t pop_mask = 0;
112 
113 	if (!(info->topo & MEM_TOPO_DIMM_MODULE))
114 		return false;
115 
116 	for (ch = 0; ch < num_phys_ch; ch++) {
117 		for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
118 			blk.addr_map[CH_DIMM_OFFSET(ch, dimm)] =
119 				info->smbus[ch].addr_dimm[dimm];
120 		}
121 	}
122 
123 	if (CONFIG(SPD_CACHE_ENABLE)) {
124 		uint8_t *spd_cache;
125 		size_t spd_cache_sz;
126 		bool need_update_cache = false;
127 		bool dimm_changed = true;
128 
129 		/* load spd cache from RW_SPD_CACHE */
130 		if (load_spd_cache(&spd_cache, &spd_cache_sz) == CB_SUCCESS) {
131 			if (!spd_cache_is_valid(spd_cache, spd_cache_sz)) {
132 				printk(BIOS_WARNING, "Invalid SPD cache\n");
133 			} else {
134 				dimm_changed = check_if_dimm_changed(spd_cache, &blk);
135 				if (dimm_changed && memupd->FspmArchUpd.NvsBufferPtr != 0) {
136 					/*
137 					 * Set FSP-M Arch UPD to indicate that the
138 					 * mrc_cache need to be invalidated
139 					 */
140 					printk(BIOS_INFO, "DIMM change, invalidate cache.\n");
141 					memupd->FspmArchUpd.NvsBufferPtr = 0;
142 					memupd->FspmArchUpd.BootMode =
143 						 FSP_BOOT_WITH_FULL_CONFIGURATION;
144 				}
145 			}
146 			need_update_cache = true;
147 		}
148 
149 		if (!dimm_changed) {
150 			printk(BIOS_INFO, "Use the SPD cache data\n");
151 			spd_fill_from_cache(spd_cache, &blk);
152 		} else {
153 			/* Access memory info through SMBUS. */
154 			get_spd_smbus(&blk);
155 
156 			if (need_update_cache &&
157 				update_spd_cache(&blk) == CB_ERR)
158 				printk(BIOS_ERR, "update SPD cache failed\n");
159 		}
160 	} else {
161 		get_spd_smbus(&blk);
162 	}
163 
164 	*spd_len = blk.len;
165 
166 	for (ch = 0; ch < num_phys_ch; ch++) {
167 		size_t mrc_ch = soc_mem_cfg->phys_to_mrc_map[ch];
168 
169 		for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
170 			uint8_t *spd_data = blk.spd_array[CH_DIMM_OFFSET(ch, dimm)];
171 			if (spd_data == NULL)
172 				continue;
173 
174 			print_spd_info(spd_data);
175 
176 			channel_data->spd[mrc_ch][dimm] = (uintptr_t)(void *)spd_data;
177 			pop_mask |= BIT(ch);
178 		}
179 	}
180 
181 	channel_data->ch_population_flags |= populated_mask_to_flag(pop_mask, num_phys_ch);
182 
183 	return pop_mask != 0;
184 }
185 
mem_populate_channel_data(FSPM_UPD * memupd,const struct soc_mem_cfg * soc_mem_cfg,const struct mem_spd * spd_info,bool half_populated,struct mem_channel_data * data)186 void mem_populate_channel_data(FSPM_UPD *memupd, const struct soc_mem_cfg *soc_mem_cfg,
187 				const struct mem_spd *spd_info,
188 				bool half_populated,
189 				struct mem_channel_data *data)
190 {
191 	size_t spd_md_len = 0, spd_dimm_len = 0;
192 	bool have_dimms;
193 
194 	memset(data, 0, sizeof(*data));
195 
196 	read_spd_md(soc_mem_cfg, spd_info, half_populated, data, &spd_md_len);
197 	have_dimms = read_spd_dimm(memupd, soc_mem_cfg, spd_info, half_populated, data,
198 				&spd_dimm_len);
199 
200 	if (data->ch_population_flags == NO_CHANNEL_POPULATED)
201 		die("No channels are populated. Incorrect memory configuration!\n");
202 
203 	if (spd_info->topo == MEM_TOPO_MEMORY_DOWN) {
204 		data->spd_len = spd_md_len;
205 	} else if (spd_info->topo == MEM_TOPO_DIMM_MODULE) {
206 		data->spd_len = spd_dimm_len;
207 	} else {
208 		/*
209 		 * SPD lengths must match for CBFS and EEPROM SPD for mixed
210 		 * topology. Skip this check when no DIMMs are installed.
211 		 */
212 		if (have_dimms && spd_md_len != spd_dimm_len)
213 			die("Length of SPD does not match for mixed topology!\n");
214 
215 		/* Use memory-down SPD length in case there are no DIMMs installed */
216 		data->spd_len = spd_md_len;
217 	}
218 }
219