1 /* SPDX-License-Identifier: Apache-2.0 */
2 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 /* See the file LICENSE for further information */
4
5 #ifndef _SIFIVE_SDRAM_H
6 #define _SIFIVE_SDRAM_H
7
8 #include <arch/mmio.h>
9 #include <console/console.h>
10 #include <device/mmio.h>
11 #include <soc/addressmap.h>
12 #include <soc/sdram.h>
13 #include <stdint.h>
14 #include <stddef.h>
15
16 #define DRAM_CLASS_OFFSET 8
17 #define DRAM_CLASS_DDR4 0xA
18 #define OPTIMAL_RMODW_EN_OFFSET 0
19 #define DISABLE_RD_INTERLEAVE_OFFSET 16
20 #define OUT_OF_RANGE_FLAG (1 << 1)
21 #define MULTIPLE_OUT_OF_RANGE_FLAG (1 << 2)
22 #define PORT_COMMAND_CHANNEL_ERROR_FLAG (1 << 7)
23 #define MC_INIT_COMPLETE_FLAG (1 << 8) // Memory Controller init complete
24 #define LEVELING_OPERATION_COMPLETED_FLAG (1 << 22)
25 #define DFI_PHY_WRLELV_MODE_OFFSET 24
26 #define DFI_PHY_RDLVL_MODE_OFFSET 24
27 #define DFI_PHY_RDLVL_GATE_MODE_OFFSET 0
28 #define VREF_EN_OFFSET 24
29 #define PORT_ADDR_PROTECTION_EN_OFFSET 0
30 #define AXI0_ADDRESS_RANGE_ENABLE_OFFSET 8
31 #define AXI0_RANGE_PROT_BITS_0_OFFSET 24
32 #define RDLVL_EN_OFFSET 16
33 #define RDLVL_GATE_EN_OFFSET 24
34 #define WRLVL_EN_OFFSET 0
35
36 #define PHY_RX_CAL_DQ0_0_OFFSET 0
37 #define PHY_RX_CAL_DQ1_0_OFFSET 16
38
39 // reference: fu740-c000 manual chapter 32: DDR Subsystem
40 // Cahpter 32.2: Memory Map
41 #define FU740_DDRCTRL 0x100b0000
42 #define FU740_DDRPHY 0x100b2000
43 #define FU740_PHYSICAL_FILTER 0x100b8000 // formerly called DDRBUSBLOCKER (FU540)
44 #define FU740_DDRMGMT 0x100c0000
45
phy_reset(u32 * ddrphyreg,const u32 * physettings)46 static void phy_reset(u32 *ddrphyreg, const u32 *physettings)
47 {
48 for (int i = 1152; i <= 1214; i++)
49 write32(&ddrphyreg[i], physettings[i]);
50
51 for (int i = 0; i <= 1151; i++)
52 write32(&ddrphyreg[i], physettings[i]);
53 }
54
ux00ddr_writeregmap(u32 * ahbregaddr,const u32 * ctlsettings,const u32 * physettings)55 static void ux00ddr_writeregmap(u32 *ahbregaddr, const u32 *ctlsettings, const u32 *physettings)
56 {
57 u32 *ddrctlreg = (u32 *) ahbregaddr;
58 u32 *ddrphyreg = ((u32 *) ahbregaddr) + (0x2000 / sizeof(u32)); //TODO use FU740_DDRPHY instead
59
60 for (int i = 0; i <= 264; i++)
61 write32((void *)&ddrctlreg[i], ctlsettings[i]);
62
63 phy_reset(ddrphyreg, physettings);
64 }
65
ux00ddr_start(u32 * ahbregaddr,u64 * filteraddr,uint64_t ddrend)66 static void ux00ddr_start(u32 *ahbregaddr, u64 *filteraddr, uint64_t ddrend)
67 {
68 // start calibration and training operation
69 setbits32(ahbregaddr, 0x1);
70
71 // wait for memory initialization complete
72 // bit 8 of INT_STATUS (DENALI_CTL_132) 0x210
73 while (!(read32(&ahbregaddr[132]) & MC_INIT_COMPLETE_FLAG))
74 ;
75
76 // Disable the BusBlocker in front of the controller AXI slave ports
77 write64(filteraddr, 0x0f00000000000000UL | (ddrend >> 2));
78 // ^ RWX + TOR
79 }
80
ux00ddr_mask_mc_init_complete_interrupt(u32 * ahbregaddr)81 static void ux00ddr_mask_mc_init_complete_interrupt(u32 *ahbregaddr)
82 {
83 // Mask off Bit 8 of Interrupt Status
84 // Bit [8] The MC initialization has been completed
85 setbits32(&ahbregaddr[136], MC_INIT_COMPLETE_FLAG);
86 }
87
ux00ddr_mask_outofrange_interrupts(u32 * ahbregaddr)88 static void ux00ddr_mask_outofrange_interrupts(u32 *ahbregaddr)
89 {
90 // Mask off Bit 8, Bit 2 and Bit 1 of Interrupt Status
91 // Bit [2] Multiple accesses outside the defined PHYSICAL memory space have occurred
92 // Bit [1] A memory access outside the defined PHYSICAL memory space has occurred
93 setbits32(&ahbregaddr[136], OUT_OF_RANGE_FLAG | MULTIPLE_OUT_OF_RANGE_FLAG);
94 }
95
ux00ddr_mask_port_command_error_interrupt(u32 * ahbregaddr)96 static void ux00ddr_mask_port_command_error_interrupt(u32 *ahbregaddr)
97 {
98 // Mask off Bit 7 of Interrupt Status
99 // Bit [7] An error occurred on the port command channel
100 setbits32(&ahbregaddr[136], PORT_COMMAND_CHANNEL_ERROR_FLAG);
101 }
102
ux00ddr_mask_leveling_completed_interrupt(u32 * ahbregaddr)103 static void ux00ddr_mask_leveling_completed_interrupt(u32 *ahbregaddr)
104 {
105 // Mask off Bit 22 of Interrupt Status
106 // Bit [22] The leveling operation has completed
107 setbits32(&ahbregaddr[136], LEVELING_OPERATION_COMPLETED_FLAG);
108 }
109
ux00ddr_setuprangeprotection(u32 * ahbregaddr,size_t size)110 static void ux00ddr_setuprangeprotection(u32 *ahbregaddr, size_t size)
111 {
112 write32(&ahbregaddr[209], 0x0);
113 u32 size_16Kblocks = ((size >> 14) & 0x7FFFFF) - 1;
114 write32(&ahbregaddr[210], size_16Kblocks);
115 write32(&ahbregaddr[212], 0x0);
116 write32(&ahbregaddr[214], 0x0);
117 write32(&ahbregaddr[216], 0x0);
118 setbits32(&ahbregaddr[224], (0x3 << AXI0_RANGE_PROT_BITS_0_OFFSET));
119 write32(&ahbregaddr[225], 0xFFFFFFFF);
120 setbits32(&ahbregaddr[208], (1 << AXI0_ADDRESS_RANGE_ENABLE_OFFSET));
121 setbits32(&ahbregaddr[208], (1 << PORT_ADDR_PROTECTION_EN_OFFSET));
122 }
123
ux00ddr_disableaxireadinterleave(u32 * ahbregaddr)124 static void ux00ddr_disableaxireadinterleave(u32 *ahbregaddr)
125 {
126 setbits32(&ahbregaddr[120], (1 << DISABLE_RD_INTERLEAVE_OFFSET));
127 }
128
ux00ddr_disableoptimalrmodw(u32 * ahbregaddr)129 static void ux00ddr_disableoptimalrmodw(u32 *ahbregaddr)
130 {
131 clrbits32(&ahbregaddr[21], (1 << OPTIMAL_RMODW_EN_OFFSET));
132 }
133
ux00ddr_enablewriteleveling(u32 * ahbregaddr)134 static void ux00ddr_enablewriteleveling(u32 *ahbregaddr)
135 {
136 setbits32(&ahbregaddr[170], (1 << WRLVL_EN_OFFSET) | (1 << DFI_PHY_WRLELV_MODE_OFFSET));
137 }
138
ux00ddr_enablereadleveling(u32 * ahbregaddr)139 static void ux00ddr_enablereadleveling(u32 *ahbregaddr)
140 {
141 setbits32(&ahbregaddr[181], (1 << DFI_PHY_RDLVL_MODE_OFFSET));
142 setbits32(&ahbregaddr[260], (1 << RDLVL_EN_OFFSET));
143 }
144
ux00ddr_enablereadlevelinggate(u32 * ahbregaddr)145 static void ux00ddr_enablereadlevelinggate(u32 *ahbregaddr)
146 {
147 setbits32(&ahbregaddr[260], (1 << RDLVL_GATE_EN_OFFSET));
148 setbits32(&ahbregaddr[182], (1 << DFI_PHY_RDLVL_GATE_MODE_OFFSET));
149 }
150
ux00ddr_enablevreftraining(u32 * ahbregaddr)151 static void ux00ddr_enablevreftraining(u32 *ahbregaddr)
152 {
153 setbits32(&ahbregaddr[184], (1 << VREF_EN_OFFSET));
154 }
155
ux00ddr_getdramclass(u32 * ahbregaddr)156 static u32 ux00ddr_getdramclass(u32 *ahbregaddr)
157 {
158 return ((read32(ahbregaddr) >> DRAM_CLASS_OFFSET) & 0xF);
159 }
160
ux00ddr_phy_fixup(void * ahbregaddr)161 static void ux00ddr_phy_fixup(void *ahbregaddr)
162 {
163 void *ddrphyreg = ahbregaddr + 0x2000;
164
165 // bitmask of failed lanes
166 uint64_t fails = 0;
167 u32 slicebase = 0;
168 u32 dq = 0;
169
170 // check errata condition
171 for (u32 slice = 0; slice < 8; slice++) {
172 u32 regbase = slicebase + 34;
173 for (u32 reg = 0 ; reg < 4; reg++) {
174 u32 updownreg = read32(ddrphyreg + ((regbase+reg) << 2));
175 for (u32 bit = 0; bit < 2; bit++) {
176 u32 phy_rx_cal_dqn_0_offset;
177
178 if (bit == 0)
179 phy_rx_cal_dqn_0_offset = PHY_RX_CAL_DQ0_0_OFFSET;
180 else
181 phy_rx_cal_dqn_0_offset = PHY_RX_CAL_DQ1_0_OFFSET;
182
183 u32 down = (updownreg >> phy_rx_cal_dqn_0_offset) & 0x3F;
184 u32 up = (updownreg >> (phy_rx_cal_dqn_0_offset + 6)) & 0x3F;
185
186 uint8_t failc0 = ((down == 0) && (up == 0x3F));
187 uint8_t failc1 = ((up == 0) && (down == 0x3F));
188
189 // print error message on failure
190 if (failc0 || failc1) {
191 fails |= (1 << dq);
192
193 char slicelsc = '0';
194 char slicemsc = '0';
195 slicelsc += (dq % 10);
196 slicemsc += (dq / 10);
197 printk(BIOS_ERR, "S %c%c%c\n", slicelsc, slicemsc, failc0 ? 'U' : 'D');
198 }
199 dq++;
200 }
201 }
202 slicebase += 128;
203 }
204 if (fails)
205 printk(BIOS_ERR, "DDR error in fixing up: %llx\n", fails);
206 }
207
208 extern const u32 denali_ddr_phy_data[1215];
209 extern const u32 denali_ddr_ctl_data[265];
210
sdram_init(size_t dram_size)211 void sdram_init(size_t dram_size)
212 {
213 u32 *ddrctrl = (u32 *)FU740_DDRCTRL;
214 u64 *ddr_physical_filter = (u64 *)FU740_PHYSICAL_FILTER;
215 ux00ddr_writeregmap(ddrctrl, denali_ddr_ctl_data, denali_ddr_phy_data);
216 ux00ddr_disableaxireadinterleave(ddrctrl);
217
218 ux00ddr_disableoptimalrmodw(ddrctrl);
219
220 ux00ddr_enablewriteleveling(ddrctrl);
221 ux00ddr_enablereadleveling(ddrctrl);
222 ux00ddr_enablereadlevelinggate(ddrctrl);
223 if (ux00ddr_getdramclass(ddrctrl) == DRAM_CLASS_DDR4)
224 ux00ddr_enablevreftraining(ddrctrl);
225
226 // mask off interrupts for leveling completion
227 ux00ddr_mask_leveling_completed_interrupt(ddrctrl);
228
229 ux00ddr_mask_mc_init_complete_interrupt(ddrctrl);
230 ux00ddr_mask_outofrange_interrupts(ddrctrl);
231 ux00ddr_setuprangeprotection(ddrctrl, dram_size);
232 ux00ddr_mask_port_command_error_interrupt(ddrctrl);
233
234 ux00ddr_start(ddrctrl, ddr_physical_filter, FU740_DRAM + dram_size);
235
236 ux00ddr_phy_fixup(ddrctrl);
237 }
238
239 // sdram_init MUST be called before sdram_size
sdram_size(void)240 size_t sdram_size(void)
241 {
242 u64 devicepmp0 = read64((u64 *)FU740_PHYSICAL_FILTER);
243 return ((devicepmp0 & 0xFFFFFFFFFFFFFF) << 2) - FU740_DRAM;
244 }
245
246 #endif
247