• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #include <console/console.h>
4 #include <fsp/util.h>
5 #include <soc/meminit.h>
6 #include <string.h>
7 
8 #define LPX_PHYSICAL_CH_WIDTH		16
9 #define LPX_CHANNELS			CHANNEL_COUNT(LPX_PHYSICAL_CH_WIDTH)
10 
11 #define DDR5_PHYSICAL_CH_WIDTH		32
12 #define DDR5_CHANNELS			CHANNEL_COUNT(DDR5_PHYSICAL_CH_WIDTH)
13 
set_rcomp_config(FSP_M_CONFIG * mem_cfg,const struct mb_cfg * mb_cfg)14 static void set_rcomp_config(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg)
15 {
16 	if (mb_cfg->rcomp.resistor != 0)
17 		mem_cfg->RcompResistor = mb_cfg->rcomp.resistor;
18 
19 	for (size_t i = 0; i < ARRAY_SIZE(mem_cfg->RcompTarget); i++) {
20 		if (mb_cfg->rcomp.targets[i] != 0)
21 			mem_cfg->RcompTarget[i] = mb_cfg->rcomp.targets[i];
22 	}
23 }
24 
meminit_lp5x(FSP_M_CONFIG * mem_cfg,const struct mem_lp5x_config * lp5x_config)25 static void meminit_lp5x(FSP_M_CONFIG *mem_cfg, const struct mem_lp5x_config *lp5x_config)
26 {
27 	mem_cfg->DqPinsInterleaved = 0;
28 	mem_cfg->Lp5CccConfig = lp5x_config->ccc_config;
29 }
30 
meminit_ddr(FSP_M_CONFIG * mem_cfg,const struct mem_ddr_config * ddr_config)31 static void meminit_ddr(FSP_M_CONFIG *mem_cfg, const struct mem_ddr_config *ddr_config)
32 {
33 	mem_cfg->DqPinsInterleaved = ddr_config->dq_pins_interleaved;
34 }
35 
36 static const struct soc_mem_cfg soc_mem_cfg[] = {
37 	[MEM_TYPE_DDR5] = {
38 		.num_phys_channels = DDR5_CHANNELS,
39 		.phys_to_mrc_map = {
40 			[0] = 0,
41 			[1] = 1,
42 			[2] = 4,
43 			[3] = 5,
44 		},
45 		.md_phy_masks = {
46 			/*
47 			 * Physical channels 0 and 1 are populated in case of
48 			 * half-populated configurations.
49 			 */
50 			.half_channel = BIT(0) | BIT(1),
51 			/* In mixed topologies, channels 2 and 3 are always memory-down. */
52 			.mixed_topo = BIT(2) | BIT(3),
53 		},
54 	},
55 	[MEM_TYPE_LP5X] = {
56 		.num_phys_channels = LPX_CHANNELS,
57 		.phys_to_mrc_map = {
58 			[0] = 0,
59 			[1] = 1,
60 			[2] = 2,
61 			[3] = 3,
62 			[4] = 4,
63 			[5] = 5,
64 			[6] = 6,
65 			[7] = 7,
66 		},
67 		.md_phy_masks = {
68 			/*
69 			 * Physical channels 0, 1, 2 and 3 are populated in case of
70 			 * half-populated configurations.
71 			 */
72 			.half_channel = BIT(0) | BIT(1) | BIT(2) | BIT(3),
73 			/* LP5x does not support mixed topologies. */
74 		},
75 	},
76 };
77 
mem_init_spd_upds(FSP_M_CONFIG * mem_cfg,const struct mem_channel_data * data)78 static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data)
79 {
80 	efi_uintn_t *spd_upds[MRC_CHANNELS][CONFIG_DIMMS_PER_CHANNEL] = {
81 		[0] = { &mem_cfg->MemorySpdPtr000, &mem_cfg->MemorySpdPtr001, },
82 		[1] = { &mem_cfg->MemorySpdPtr010, &mem_cfg->MemorySpdPtr011, },
83 		[2] = { &mem_cfg->MemorySpdPtr020, &mem_cfg->MemorySpdPtr021, },
84 		[3] = { &mem_cfg->MemorySpdPtr030, &mem_cfg->MemorySpdPtr031, },
85 		[4] = { &mem_cfg->MemorySpdPtr100, &mem_cfg->MemorySpdPtr101, },
86 		[5] = { &mem_cfg->MemorySpdPtr110, &mem_cfg->MemorySpdPtr111, },
87 		[6] = { &mem_cfg->MemorySpdPtr120, &mem_cfg->MemorySpdPtr121, },
88 		[7] = { &mem_cfg->MemorySpdPtr130, &mem_cfg->MemorySpdPtr131, },
89 	};
90 	uint8_t *disable_channel_upds[MRC_CHANNELS] = {
91 		&mem_cfg->DisableMc0Ch0,
92 		&mem_cfg->DisableMc0Ch1,
93 		&mem_cfg->DisableMc0Ch2,
94 		&mem_cfg->DisableMc0Ch3,
95 		&mem_cfg->DisableMc1Ch0,
96 		&mem_cfg->DisableMc1Ch1,
97 		&mem_cfg->DisableMc1Ch2,
98 		&mem_cfg->DisableMc1Ch3,
99 	};
100 	size_t ch, dimm;
101 
102 	mem_cfg->MemorySpdDataLen = data->spd_len;
103 
104 	for (ch = 0; ch < MRC_CHANNELS; ch++) {
105 		uint8_t *disable_channel_ptr = disable_channel_upds[ch];
106 		bool enable_channel = 0;
107 
108 		for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
109 			efi_uintn_t *spd_ptr = spd_upds[ch][dimm];
110 
111 			*spd_ptr = data->spd[ch][dimm];
112 			if (*spd_ptr)
113 				enable_channel = 1;
114 		}
115 		*disable_channel_ptr = !enable_channel;
116 	}
117 }
118 
mem_init_dq_dqs_upds(void * upds[MRC_CHANNELS],const void * map,size_t upd_size,const struct mem_channel_data * data,bool auto_detect)119 static void mem_init_dq_dqs_upds(void *upds[MRC_CHANNELS], const void *map, size_t upd_size,
120 				const struct mem_channel_data *data, bool auto_detect)
121 {
122 	size_t i;
123 
124 	for (i = 0; i < MRC_CHANNELS; i++, map += upd_size) {
125 		if (auto_detect ||
126 			!channel_is_populated(i, MRC_CHANNELS, data->ch_population_flags))
127 			memset(upds[i], 0, upd_size);
128 		else
129 			memcpy(upds[i], map, upd_size);
130 	}
131 }
132 
mem_init_dq_upds(FSP_M_CONFIG * mem_cfg,const struct mem_channel_data * data,const struct mb_cfg * mb_cfg,bool auto_detect)133 static void mem_init_dq_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
134 				const struct mb_cfg *mb_cfg, bool auto_detect)
135 {
136 	void *dq_upds[MRC_CHANNELS] = {
137 		&mem_cfg->DqMapCpu2DramMc0Ch0,
138 		&mem_cfg->DqMapCpu2DramMc0Ch1,
139 		&mem_cfg->DqMapCpu2DramMc0Ch2,
140 		&mem_cfg->DqMapCpu2DramMc0Ch3,
141 		&mem_cfg->DqMapCpu2DramMc1Ch0,
142 		&mem_cfg->DqMapCpu2DramMc1Ch1,
143 		&mem_cfg->DqMapCpu2DramMc1Ch2,
144 		&mem_cfg->DqMapCpu2DramMc1Ch3,
145 	};
146 
147 	const size_t upd_size = sizeof(mem_cfg->DqMapCpu2DramMc0Ch0);
148 
149 	_Static_assert(sizeof(mem_cfg->DqMapCpu2DramMc0Ch0) == CONFIG_MRC_CHANNEL_WIDTH,
150 		       "Incorrect DQ UPD size!");
151 
152 	mem_init_dq_dqs_upds(dq_upds, mb_cfg->dq_map, upd_size, data, auto_detect);
153 }
154 
mem_init_dqs_upds(FSP_M_CONFIG * mem_cfg,const struct mem_channel_data * data,const struct mb_cfg * mb_cfg,bool auto_detect)155 static void mem_init_dqs_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
156 				const struct mb_cfg *mb_cfg, bool auto_detect)
157 {
158 	void *dqs_upds[MRC_CHANNELS] = {
159 		&mem_cfg->DqsMapCpu2DramMc0Ch0,
160 		&mem_cfg->DqsMapCpu2DramMc0Ch1,
161 		&mem_cfg->DqsMapCpu2DramMc0Ch2,
162 		&mem_cfg->DqsMapCpu2DramMc0Ch3,
163 		&mem_cfg->DqsMapCpu2DramMc1Ch0,
164 		&mem_cfg->DqsMapCpu2DramMc1Ch1,
165 		&mem_cfg->DqsMapCpu2DramMc1Ch2,
166 		&mem_cfg->DqsMapCpu2DramMc1Ch3,
167 	};
168 
169 	const size_t upd_size = sizeof(mem_cfg->DqsMapCpu2DramMc0Ch0);
170 
171 	_Static_assert(sizeof(mem_cfg->DqsMapCpu2DramMc0Ch0) == CONFIG_MRC_CHANNEL_WIDTH / 8,
172 		       "Incorrect DQS UPD size!");
173 
174 	mem_init_dq_dqs_upds(dqs_upds, mb_cfg->dqs_map, upd_size, data, auto_detect);
175 }
176 
177 #define DDR5_CH_DIMM_OFFSET(ch, dimm)        ((ch) * CONFIG_DIMMS_PER_CHANNEL + (dimm))
178 
ddr5_fill_dimm_module_info(FSP_M_CONFIG * mem_cfg,const struct mb_cfg * mb_cfg,const struct mem_spd * spd_info)179 static void ddr5_fill_dimm_module_info(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg,
180 					const struct mem_spd *spd_info)
181 {
182 	for (size_t ch = 0; ch < soc_mem_cfg[MEM_TYPE_DDR5].num_phys_channels; ch++) {
183 		for (size_t dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
184 			size_t mrc_ch = soc_mem_cfg[MEM_TYPE_DDR5].phys_to_mrc_map[ch];
185 			mem_cfg->SpdAddressTable[DDR5_CH_DIMM_OFFSET(mrc_ch, dimm)] =
186 				spd_info->smbus[ch].addr_dimm[dimm] << 1;
187 		}
188 	}
189 	mem_init_dq_upds(mem_cfg, NULL, mb_cfg, true);
190 	mem_init_dqs_upds(mem_cfg, NULL, mb_cfg, true);
191 }
192 
memcfg_init(FSPM_UPD * memupd,const struct mb_cfg * mb_cfg,const struct mem_spd * spd_info,bool half_populated)193 void memcfg_init(FSPM_UPD *memupd, const struct mb_cfg *mb_cfg,
194 		 const struct mem_spd *spd_info, bool half_populated)
195 {
196 	struct mem_channel_data data;
197 	bool dq_dqs_auto_detect = false;
198 	FSP_M_CONFIG *mem_cfg = &memupd->FspmConfig;
199 
200 	mem_cfg->ECT = mb_cfg->ect;
201 	mem_cfg->UserBd = mb_cfg->UserBd;
202 	set_rcomp_config(mem_cfg, mb_cfg);
203 
204 	switch (mb_cfg->type) {
205 	case MEM_TYPE_DDR5:
206 		meminit_ddr(mem_cfg, &mb_cfg->ddr_config);
207 		dq_dqs_auto_detect = true;
208 		/*
209 		* TODO: Drop this workaround once SMBus driver in coreboot is updated to
210 		* support DDR5 EEPROM reading.
211 		*/
212 		if (spd_info->topo == MEM_TOPO_DIMM_MODULE) {
213 			ddr5_fill_dimm_module_info(mem_cfg, mb_cfg, spd_info);
214 			return;
215 		}
216 		break;
217 	case MEM_TYPE_LP5X:
218 		meminit_lp5x(mem_cfg, &mb_cfg->lp5x_config);
219 		break;
220 	default:
221 		die("Unsupported memory type(%d)\n", mb_cfg->type);
222 	}
223 
224 	mem_populate_channel_data(memupd, &soc_mem_cfg[mb_cfg->type], spd_info,
225 					 half_populated, &data);
226 	mem_init_spd_upds(mem_cfg, &data);
227 	mem_init_dq_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);
228 	mem_init_dqs_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);
229 }
230