• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 #include "dwxlgmac2.h"
13 #include "dwxgmac2.h"
14 
dwxgmac2_core_init(struct mac_device_info * hw,struct net_device * dev)15 static void dwxgmac2_core_init(struct mac_device_info *hw,
16 			       struct net_device *dev)
17 {
18 	void __iomem *ioaddr = hw->pcsr;
19 	u32 tx, rx;
20 
21 	tx = readl(ioaddr + XGMAC_TX_CONFIG);
22 	rx = readl(ioaddr + XGMAC_RX_CONFIG);
23 
24 	tx |= XGMAC_CORE_INIT_TX;
25 	rx |= XGMAC_CORE_INIT_RX;
26 
27 	if (hw->ps) {
28 		tx |= XGMAC_CONFIG_TE;
29 		tx &= ~hw->link.speed_mask;
30 
31 		switch (hw->ps) {
32 		case SPEED_10000:
33 			tx |= hw->link.xgmii.speed10000;
34 			break;
35 		case SPEED_2500:
36 			tx |= hw->link.speed2500;
37 			break;
38 		case SPEED_1000:
39 		default:
40 			tx |= hw->link.speed1000;
41 			break;
42 		}
43 	}
44 
45 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
46 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
47 	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
48 }
49 
dwxgmac2_update_caps(struct stmmac_priv * priv)50 static void dwxgmac2_update_caps(struct stmmac_priv *priv)
51 {
52 	if (!priv->dma_cap.mbps_10_100)
53 		priv->hw->link.caps &= ~(MAC_10 | MAC_100);
54 	else if (!priv->dma_cap.half_duplex)
55 		priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD);
56 }
57 
dwxgmac2_set_mac(void __iomem * ioaddr,bool enable)58 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
59 {
60 	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
61 	u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
62 
63 	if (enable) {
64 		tx |= XGMAC_CONFIG_TE;
65 		rx |= XGMAC_CONFIG_RE;
66 	} else {
67 		tx &= ~XGMAC_CONFIG_TE;
68 		rx &= ~XGMAC_CONFIG_RE;
69 	}
70 
71 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
72 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
73 }
74 
dwxgmac2_rx_ipc(struct mac_device_info * hw)75 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
76 {
77 	void __iomem *ioaddr = hw->pcsr;
78 	u32 value;
79 
80 	value = readl(ioaddr + XGMAC_RX_CONFIG);
81 	if (hw->rx_csum)
82 		value |= XGMAC_CONFIG_IPC;
83 	else
84 		value &= ~XGMAC_CONFIG_IPC;
85 	writel(value, ioaddr + XGMAC_RX_CONFIG);
86 
87 	return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
88 }
89 
dwxgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)90 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
91 				     u32 queue)
92 {
93 	void __iomem *ioaddr = hw->pcsr;
94 	u32 value;
95 
96 	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
97 	if (mode == MTL_QUEUE_AVB)
98 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
99 	else if (mode == MTL_QUEUE_DCB)
100 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
101 	writel(value, ioaddr + XGMAC_RXQ_CTRL0);
102 }
103 
dwxgmac2_rx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)104 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
105 				   u32 queue)
106 {
107 	void __iomem *ioaddr = hw->pcsr;
108 	u32 clear_mask = 0;
109 	u32 ctrl2, ctrl3;
110 	int i;
111 
112 	ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
113 	ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
114 
115 	/* The software must ensure that the same priority
116 	 * is not mapped to multiple Rx queues
117 	 */
118 	for (i = 0; i < 4; i++)
119 		clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
120 						XGMAC_PSRQ(i));
121 
122 	ctrl2 &= ~clear_mask;
123 	ctrl3 &= ~clear_mask;
124 
125 	/* First assign new priorities to a queue, then
126 	 * clear them from others queues
127 	 */
128 	if (queue < 4) {
129 		ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
130 						XGMAC_PSRQ(queue);
131 
132 		writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
133 		writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
134 	} else {
135 		queue -= 4;
136 
137 		ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
138 						XGMAC_PSRQ(queue);
139 
140 		writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
141 		writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
142 	}
143 }
144 
dwxgmac2_tx_queue_prio(struct mac_device_info * hw,u32 prio,u32 queue)145 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
146 				   u32 queue)
147 {
148 	void __iomem *ioaddr = hw->pcsr;
149 	u32 value, reg;
150 
151 	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
152 	if (queue >= 4)
153 		queue -= 4;
154 
155 	value = readl(ioaddr + reg);
156 	value &= ~XGMAC_PSTC(queue);
157 	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
158 
159 	writel(value, ioaddr + reg);
160 }
161 
dwxgmac2_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)162 static void dwxgmac2_rx_queue_routing(struct mac_device_info *hw,
163 				      u8 packet, u32 queue)
164 {
165 	void __iomem *ioaddr = hw->pcsr;
166 	u32 value;
167 
168 	static const struct stmmac_rx_routing dwxgmac2_route_possibilities[] = {
169 		{ XGMAC_AVCPQ, XGMAC_AVCPQ_SHIFT },
170 		{ XGMAC_PTPQ, XGMAC_PTPQ_SHIFT },
171 		{ XGMAC_DCBCPQ, XGMAC_DCBCPQ_SHIFT },
172 		{ XGMAC_UPQ, XGMAC_UPQ_SHIFT },
173 		{ XGMAC_MCBCQ, XGMAC_MCBCQ_SHIFT },
174 	};
175 
176 	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
177 
178 	/* routing configuration */
179 	value &= ~dwxgmac2_route_possibilities[packet - 1].reg_mask;
180 	value |= (queue << dwxgmac2_route_possibilities[packet - 1].reg_shift) &
181 		 dwxgmac2_route_possibilities[packet - 1].reg_mask;
182 
183 	/* some packets require extra ops */
184 	if (packet == PACKET_AVCPQ)
185 		value |= FIELD_PREP(XGMAC_TACPQE, 1);
186 	else if (packet == PACKET_MCBCQ)
187 		value |= FIELD_PREP(XGMAC_MCBCQEN, 1);
188 
189 	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
190 }
191 
dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)192 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
193 					    u32 rx_alg)
194 {
195 	void __iomem *ioaddr = hw->pcsr;
196 	u32 value;
197 
198 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
199 	value &= ~XGMAC_RAA;
200 
201 	switch (rx_alg) {
202 	case MTL_RX_ALGORITHM_SP:
203 		break;
204 	case MTL_RX_ALGORITHM_WSP:
205 		value |= XGMAC_RAA;
206 		break;
207 	default:
208 		break;
209 	}
210 
211 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
212 }
213 
dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)214 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
215 					    u32 tx_alg)
216 {
217 	void __iomem *ioaddr = hw->pcsr;
218 	bool ets = true;
219 	u32 value;
220 	int i;
221 
222 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
223 	value &= ~XGMAC_ETSALG;
224 
225 	switch (tx_alg) {
226 	case MTL_TX_ALGORITHM_WRR:
227 		value |= XGMAC_WRR;
228 		break;
229 	case MTL_TX_ALGORITHM_WFQ:
230 		value |= XGMAC_WFQ;
231 		break;
232 	case MTL_TX_ALGORITHM_DWRR:
233 		value |= XGMAC_DWRR;
234 		break;
235 	default:
236 		ets = false;
237 		break;
238 	}
239 
240 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
241 
242 	/* Set ETS if desired */
243 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
244 		value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
245 		value &= ~XGMAC_TSA;
246 		if (ets)
247 			value |= XGMAC_ETS;
248 		writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
249 	}
250 }
251 
dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)252 static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
253 					     struct mac_device_info *hw,
254 					     u32 weight, u32 queue)
255 {
256 	void __iomem *ioaddr = hw->pcsr;
257 
258 	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
259 }
260 
dwxgmac2_map_mtl_to_dma(struct mac_device_info * hw,u32 queue,u32 chan)261 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
262 				    u32 chan)
263 {
264 	void __iomem *ioaddr = hw->pcsr;
265 	u32 value, reg;
266 
267 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
268 	if (queue >= 4)
269 		queue -= 4;
270 
271 	value = readl(ioaddr + reg);
272 	value &= ~XGMAC_QxMDMACH(queue);
273 	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
274 
275 	writel(value, ioaddr + reg);
276 }
277 
dwxgmac2_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)278 static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
279 				struct mac_device_info *hw,
280 				u32 send_slope, u32 idle_slope,
281 				u32 high_credit, u32 low_credit, u32 queue)
282 {
283 	void __iomem *ioaddr = hw->pcsr;
284 	u32 value;
285 
286 	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
287 	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
288 	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
289 	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
290 
291 	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
292 	value &= ~XGMAC_TSA;
293 	value |= XGMAC_CC | XGMAC_CBS;
294 	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
295 }
296 
dwxgmac2_dump_regs(struct mac_device_info * hw,u32 * reg_space)297 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
298 {
299 	void __iomem *ioaddr = hw->pcsr;
300 	int i;
301 
302 	for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
303 		reg_space[i] = readl(ioaddr + i * 4);
304 }
305 
dwxgmac2_host_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)306 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
307 				    struct stmmac_extra_stats *x)
308 {
309 	void __iomem *ioaddr = hw->pcsr;
310 	u32 stat, en;
311 	int ret = 0;
312 
313 	en = readl(ioaddr + XGMAC_INT_EN);
314 	stat = readl(ioaddr + XGMAC_INT_STATUS);
315 
316 	stat &= en;
317 
318 	if (stat & XGMAC_PMTIS) {
319 		x->irq_receive_pmt_irq_n++;
320 		readl(ioaddr + XGMAC_PMT);
321 	}
322 
323 	if (stat & XGMAC_LPIIS) {
324 		u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
325 
326 		if (lpi & XGMAC_TLPIEN) {
327 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
328 			x->irq_tx_path_in_lpi_mode_n++;
329 		}
330 		if (lpi & XGMAC_TLPIEX) {
331 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
332 			x->irq_tx_path_exit_lpi_mode_n++;
333 		}
334 		if (lpi & XGMAC_RLPIEN)
335 			x->irq_rx_path_in_lpi_mode_n++;
336 		if (lpi & XGMAC_RLPIEX)
337 			x->irq_rx_path_exit_lpi_mode_n++;
338 	}
339 
340 	return ret;
341 }
342 
dwxgmac2_host_mtl_irq_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)343 static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
344 					struct mac_device_info *hw, u32 chan)
345 {
346 	void __iomem *ioaddr = hw->pcsr;
347 	int ret = 0;
348 	u32 status;
349 
350 	status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
351 	if (status & BIT(chan)) {
352 		u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
353 
354 		if (chan_status & XGMAC_RXOVFIS)
355 			ret |= CORE_IRQ_MTL_RX_OVERFLOW;
356 
357 		writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
358 	}
359 
360 	return ret;
361 }
362 
dwxgmac2_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)363 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
364 			       unsigned int fc, unsigned int pause_time,
365 			       u32 tx_cnt)
366 {
367 	void __iomem *ioaddr = hw->pcsr;
368 	u32 i;
369 
370 	if (fc & FLOW_RX)
371 		writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
372 	if (fc & FLOW_TX) {
373 		for (i = 0; i < tx_cnt; i++) {
374 			u32 value = XGMAC_TFE;
375 
376 			if (duplex)
377 				value |= pause_time << XGMAC_PT_SHIFT;
378 
379 			writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
380 		}
381 	}
382 }
383 
dwxgmac2_pmt(struct mac_device_info * hw,unsigned long mode)384 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
385 {
386 	void __iomem *ioaddr = hw->pcsr;
387 	u32 val = 0x0;
388 
389 	if (mode & WAKE_MAGIC)
390 		val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
391 	if (mode & WAKE_UCAST)
392 		val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
393 	if (val) {
394 		u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
395 		cfg |= XGMAC_CONFIG_RE;
396 		writel(cfg, ioaddr + XGMAC_RX_CONFIG);
397 	}
398 
399 	writel(val, ioaddr + XGMAC_PMT);
400 }
401 
dwxgmac2_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)402 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
403 				   const unsigned char *addr,
404 				   unsigned int reg_n)
405 {
406 	void __iomem *ioaddr = hw->pcsr;
407 	u32 value;
408 
409 	value = (addr[5] << 8) | addr[4];
410 	writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
411 
412 	value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
413 	writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
414 }
415 
dwxgmac2_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)416 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
417 				   unsigned char *addr, unsigned int reg_n)
418 {
419 	void __iomem *ioaddr = hw->pcsr;
420 	u32 hi_addr, lo_addr;
421 
422 	/* Read the MAC address from the hardware */
423 	hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
424 	lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
425 
426 	/* Extract the MAC address from the high and low words */
427 	addr[0] = lo_addr & 0xff;
428 	addr[1] = (lo_addr >> 8) & 0xff;
429 	addr[2] = (lo_addr >> 16) & 0xff;
430 	addr[3] = (lo_addr >> 24) & 0xff;
431 	addr[4] = hi_addr & 0xff;
432 	addr[5] = (hi_addr >> 8) & 0xff;
433 }
434 
dwxgmac2_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)435 static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
436 				  bool en_tx_lpi_clockgating)
437 {
438 	void __iomem *ioaddr = hw->pcsr;
439 	u32 value;
440 
441 	value = readl(ioaddr + XGMAC_LPI_CTRL);
442 
443 	value |= XGMAC_LPITXEN | XGMAC_LPITXA;
444 	if (en_tx_lpi_clockgating)
445 		value |= XGMAC_TXCGE;
446 
447 	writel(value, ioaddr + XGMAC_LPI_CTRL);
448 }
449 
dwxgmac2_reset_eee_mode(struct mac_device_info * hw)450 static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
451 {
452 	void __iomem *ioaddr = hw->pcsr;
453 	u32 value;
454 
455 	value = readl(ioaddr + XGMAC_LPI_CTRL);
456 	value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
457 	writel(value, ioaddr + XGMAC_LPI_CTRL);
458 }
459 
dwxgmac2_set_eee_pls(struct mac_device_info * hw,int link)460 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
461 {
462 	void __iomem *ioaddr = hw->pcsr;
463 	u32 value;
464 
465 	value = readl(ioaddr + XGMAC_LPI_CTRL);
466 	if (link)
467 		value |= XGMAC_PLS;
468 	else
469 		value &= ~XGMAC_PLS;
470 	writel(value, ioaddr + XGMAC_LPI_CTRL);
471 }
472 
dwxgmac2_set_eee_timer(struct mac_device_info * hw,int ls,int tw)473 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
474 {
475 	void __iomem *ioaddr = hw->pcsr;
476 	u32 value;
477 
478 	value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
479 	writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
480 }
481 
dwxgmac2_set_mchash(void __iomem * ioaddr,u32 * mcfilterbits,int mcbitslog2)482 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
483 				int mcbitslog2)
484 {
485 	int numhashregs, regs;
486 
487 	switch (mcbitslog2) {
488 	case 6:
489 		numhashregs = 2;
490 		break;
491 	case 7:
492 		numhashregs = 4;
493 		break;
494 	case 8:
495 		numhashregs = 8;
496 		break;
497 	default:
498 		return;
499 	}
500 
501 	for (regs = 0; regs < numhashregs; regs++)
502 		writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
503 }
504 
dwxgmac2_set_filter(struct mac_device_info * hw,struct net_device * dev)505 static void dwxgmac2_set_filter(struct mac_device_info *hw,
506 				struct net_device *dev)
507 {
508 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
509 	u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
510 	int mcbitslog2 = hw->mcast_bits_log2;
511 	u32 mc_filter[8];
512 	int i;
513 
514 	value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
515 	value |= XGMAC_FILTER_HPF;
516 
517 	memset(mc_filter, 0, sizeof(mc_filter));
518 
519 	if (dev->flags & IFF_PROMISC) {
520 		value |= XGMAC_FILTER_PR;
521 		value |= XGMAC_FILTER_PCF;
522 	} else if ((dev->flags & IFF_ALLMULTI) ||
523 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
524 		value |= XGMAC_FILTER_PM;
525 
526 		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
527 			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
528 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
529 		struct netdev_hw_addr *ha;
530 
531 		value |= XGMAC_FILTER_HMC;
532 
533 		netdev_for_each_mc_addr(ha, dev) {
534 			u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
535 					(32 - mcbitslog2));
536 			mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
537 		}
538 	}
539 
540 	dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
541 
542 	/* Handle multiple unicast addresses */
543 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
544 		value |= XGMAC_FILTER_PR;
545 	} else {
546 		struct netdev_hw_addr *ha;
547 		int reg = 1;
548 
549 		netdev_for_each_uc_addr(ha, dev) {
550 			dwxgmac2_set_umac_addr(hw, ha->addr, reg);
551 			reg++;
552 		}
553 
554 		for ( ; reg < XGMAC_ADDR_MAX; reg++) {
555 			writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
556 			writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
557 		}
558 	}
559 
560 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
561 }
562 
dwxgmac2_set_mac_loopback(void __iomem * ioaddr,bool enable)563 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
564 {
565 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
566 
567 	if (enable)
568 		value |= XGMAC_CONFIG_LM;
569 	else
570 		value &= ~XGMAC_CONFIG_LM;
571 
572 	writel(value, ioaddr + XGMAC_RX_CONFIG);
573 }
574 
dwxgmac2_rss_write_reg(void __iomem * ioaddr,bool is_key,int idx,u32 val)575 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
576 				  u32 val)
577 {
578 	u32 ctrl = 0;
579 
580 	writel(val, ioaddr + XGMAC_RSS_DATA);
581 	ctrl |= idx << XGMAC_RSSIA_SHIFT;
582 	ctrl |= is_key ? XGMAC_ADDRT : 0x0;
583 	ctrl |= XGMAC_OB;
584 	writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
585 
586 	return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
587 				  !(ctrl & XGMAC_OB), 100, 10000);
588 }
589 
dwxgmac2_rss_configure(struct mac_device_info * hw,struct stmmac_rss * cfg,u32 num_rxq)590 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
591 				  struct stmmac_rss *cfg, u32 num_rxq)
592 {
593 	void __iomem *ioaddr = hw->pcsr;
594 	u32 value, *key;
595 	int i, ret;
596 
597 	value = readl(ioaddr + XGMAC_RSS_CTRL);
598 	if (!cfg || !cfg->enable) {
599 		value &= ~XGMAC_RSSE;
600 		writel(value, ioaddr + XGMAC_RSS_CTRL);
601 		return 0;
602 	}
603 
604 	key = (u32 *)cfg->key;
605 	for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
606 		ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
607 		if (ret)
608 			return ret;
609 	}
610 
611 	for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
612 		ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
613 		if (ret)
614 			return ret;
615 	}
616 
617 	for (i = 0; i < num_rxq; i++)
618 		dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
619 
620 	value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
621 	writel(value, ioaddr + XGMAC_RSS_CTRL);
622 	return 0;
623 }
624 
dwxgmac2_update_vlan_hash(struct mac_device_info * hw,u32 hash,u16 perfect_match,bool is_double)625 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
626 				      u16 perfect_match, bool is_double)
627 {
628 	void __iomem *ioaddr = hw->pcsr;
629 
630 	writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
631 
632 	if (hash) {
633 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
634 
635 		value |= XGMAC_FILTER_VTFE;
636 
637 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
638 
639 		value = readl(ioaddr + XGMAC_VLAN_TAG);
640 
641 		value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
642 		if (is_double) {
643 			value |= XGMAC_VLAN_EDVLP;
644 			value |= XGMAC_VLAN_ESVL;
645 			value |= XGMAC_VLAN_DOVLTC;
646 		} else {
647 			value &= ~XGMAC_VLAN_EDVLP;
648 			value &= ~XGMAC_VLAN_ESVL;
649 			value &= ~XGMAC_VLAN_DOVLTC;
650 		}
651 
652 		value &= ~XGMAC_VLAN_VID;
653 		writel(value, ioaddr + XGMAC_VLAN_TAG);
654 	} else if (perfect_match) {
655 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
656 
657 		value |= XGMAC_FILTER_VTFE;
658 
659 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
660 
661 		value = readl(ioaddr + XGMAC_VLAN_TAG);
662 
663 		value &= ~XGMAC_VLAN_VTHM;
664 		value |= XGMAC_VLAN_ETV;
665 		if (is_double) {
666 			value |= XGMAC_VLAN_EDVLP;
667 			value |= XGMAC_VLAN_ESVL;
668 			value |= XGMAC_VLAN_DOVLTC;
669 		} else {
670 			value &= ~XGMAC_VLAN_EDVLP;
671 			value &= ~XGMAC_VLAN_ESVL;
672 			value &= ~XGMAC_VLAN_DOVLTC;
673 		}
674 
675 		value &= ~XGMAC_VLAN_VID;
676 		writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
677 	} else {
678 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
679 
680 		value &= ~XGMAC_FILTER_VTFE;
681 
682 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
683 
684 		value = readl(ioaddr + XGMAC_VLAN_TAG);
685 
686 		value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
687 		value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
688 		value &= ~XGMAC_VLAN_DOVLTC;
689 		value &= ~XGMAC_VLAN_VID;
690 
691 		writel(value, ioaddr + XGMAC_VLAN_TAG);
692 	}
693 }
694 
695 struct dwxgmac3_error_desc {
696 	bool valid;
697 	const char *desc;
698 	const char *detailed_desc;
699 };
700 
701 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
702 
dwxgmac3_log_error(struct net_device * ndev,u32 value,bool corr,const char * module_name,const struct dwxgmac3_error_desc * desc,unsigned long field_offset,struct stmmac_safety_stats * stats)703 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
704 			       const char *module_name,
705 			       const struct dwxgmac3_error_desc *desc,
706 			       unsigned long field_offset,
707 			       struct stmmac_safety_stats *stats)
708 {
709 	unsigned long loc, mask;
710 	u8 *bptr = (u8 *)stats;
711 	unsigned long *ptr;
712 
713 	ptr = (unsigned long *)(bptr + field_offset);
714 
715 	mask = value;
716 	for_each_set_bit(loc, &mask, 32) {
717 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
718 				"correctable" : "uncorrectable", module_name,
719 				desc[loc].desc, desc[loc].detailed_desc);
720 
721 		/* Update counters */
722 		ptr[loc]++;
723 	}
724 }
725 
726 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
727 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
728 	{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
729 	{ true, "TPES", "TSO Data Path Parity Check Error" },
730 	{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
731 	{ true, "MTPES", "MTL Data Path Parity Check Error" },
732 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
733 	{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
734 	{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
735 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
736 	{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
737 	{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
738 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
739 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
740 	{ true, "TTES", "TX FSM Timeout Error" },
741 	{ true, "RTES", "RX FSM Timeout Error" },
742 	{ true, "CTES", "CSR FSM Timeout Error" },
743 	{ true, "ATES", "APP FSM Timeout Error" },
744 	{ true, "PTES", "PTP FSM Timeout Error" },
745 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
746 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
747 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
748 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
749 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
750 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
751 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
752 	{ true, "FSMPES", "FSM State Parity Error" },
753 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
754 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
755 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
756 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
757 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
758 	{ true, "CPI", "Control Register Parity Check Error" },
759 };
760 
dwxgmac3_handle_mac_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)761 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
762 				    void __iomem *ioaddr, bool correctable,
763 				    struct stmmac_safety_stats *stats)
764 {
765 	u32 value;
766 
767 	value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
768 	writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
769 
770 	dwxgmac3_log_error(ndev, value, correctable, "MAC",
771 			   dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
772 }
773 
774 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
775 	{ true, "TXCES", "MTL TX Memory Error" },
776 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
777 	{ true, "TXUES", "MTL TX Memory Error" },
778 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
779 	{ true, "RXCES", "MTL RX Memory Error" },
780 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
781 	{ true, "RXUES", "MTL RX Memory Error" },
782 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
783 	{ true, "ECES", "MTL EST Memory Error" },
784 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
785 	{ true, "EUES", "MTL EST Memory Error" },
786 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
787 	{ true, "RPCES", "MTL RX Parser Memory Error" },
788 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
789 	{ true, "RPUES", "MTL RX Parser Memory Error" },
790 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
791 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
792 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
793 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
794 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
795 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
796 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
797 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
798 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
799 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
800 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
801 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
802 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
803 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
804 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
805 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
806 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
807 };
808 
dwxgmac3_handle_mtl_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)809 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
810 				    void __iomem *ioaddr, bool correctable,
811 				    struct stmmac_safety_stats *stats)
812 {
813 	u32 value;
814 
815 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
816 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
817 
818 	dwxgmac3_log_error(ndev, value, correctable, "MTL",
819 			   dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
820 }
821 
822 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
823 	{ true, "TCES", "DMA TSO Memory Error" },
824 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
825 	{ true, "TUES", "DMA TSO Memory Error" },
826 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
827 	{ true, "DCES", "DMA DCACHE Memory Error" },
828 	{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
829 	{ true, "DUES", "DMA DCACHE Memory Error" },
830 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
831 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
832 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
833 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
834 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
835 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
836 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
837 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
838 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
839 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
840 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
841 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
842 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
843 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
844 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
845 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
846 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
847 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
848 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
849 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
850 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
851 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
852 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
853 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
854 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
855 };
856 
857 static const char dpp_rx_err[] = "Read Rx Descriptor Parity checker Error";
858 static const char dpp_tx_err[] = "Read Tx Descriptor Parity checker Error";
859 static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
860 	{ true, "TDPES0", dpp_tx_err },
861 	{ true, "TDPES1", dpp_tx_err },
862 	{ true, "TDPES2", dpp_tx_err },
863 	{ true, "TDPES3", dpp_tx_err },
864 	{ true, "TDPES4", dpp_tx_err },
865 	{ true, "TDPES5", dpp_tx_err },
866 	{ true, "TDPES6", dpp_tx_err },
867 	{ true, "TDPES7", dpp_tx_err },
868 	{ true, "TDPES8", dpp_tx_err },
869 	{ true, "TDPES9", dpp_tx_err },
870 	{ true, "TDPES10", dpp_tx_err },
871 	{ true, "TDPES11", dpp_tx_err },
872 	{ true, "TDPES12", dpp_tx_err },
873 	{ true, "TDPES13", dpp_tx_err },
874 	{ true, "TDPES14", dpp_tx_err },
875 	{ true, "TDPES15", dpp_tx_err },
876 	{ true, "RDPES0", dpp_rx_err },
877 	{ true, "RDPES1", dpp_rx_err },
878 	{ true, "RDPES2", dpp_rx_err },
879 	{ true, "RDPES3", dpp_rx_err },
880 	{ true, "RDPES4", dpp_rx_err },
881 	{ true, "RDPES5", dpp_rx_err },
882 	{ true, "RDPES6", dpp_rx_err },
883 	{ true, "RDPES7", dpp_rx_err },
884 	{ true, "RDPES8", dpp_rx_err },
885 	{ true, "RDPES9", dpp_rx_err },
886 	{ true, "RDPES10", dpp_rx_err },
887 	{ true, "RDPES11", dpp_rx_err },
888 	{ true, "RDPES12", dpp_rx_err },
889 	{ true, "RDPES13", dpp_rx_err },
890 	{ true, "RDPES14", dpp_rx_err },
891 	{ true, "RDPES15", dpp_rx_err },
892 };
893 
dwxgmac3_handle_dma_err(struct net_device * ndev,void __iomem * ioaddr,bool correctable,struct stmmac_safety_stats * stats)894 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
895 				    void __iomem *ioaddr, bool correctable,
896 				    struct stmmac_safety_stats *stats)
897 {
898 	u32 value;
899 
900 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
901 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
902 
903 	dwxgmac3_log_error(ndev, value, correctable, "DMA",
904 			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
905 
906 	value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
907 	writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
908 
909 	dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
910 			   dwxgmac3_dma_dpp_errors,
911 			   STAT_OFF(dma_dpp_errors), stats);
912 }
913 
914 static int
dwxgmac3_safety_feat_config(void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_feature_cfg * safety_cfg)915 dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
916 			    struct stmmac_safety_feature_cfg *safety_cfg)
917 {
918 	u32 value;
919 
920 	if (!asp)
921 		return -EINVAL;
922 
923 	/* 1. Enable Safety Features */
924 	writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
925 
926 	/* 2. Enable MTL Safety Interrupts */
927 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
928 	value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
929 	value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
930 	value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
931 	value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
932 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
933 
934 	/* 3. Enable DMA Safety Interrupts */
935 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
936 	value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
937 	value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
938 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
939 
940 	/* 0x2: Without ECC or Parity Ports on External Application Interface
941 	 * 0x4: Only ECC Protection for External Memory feature is selected
942 	 */
943 	if (asp == 0x2 || asp == 0x4)
944 		return 0;
945 
946 	/* 4. Enable Parity and Timeout for FSM */
947 	value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
948 	value |= XGMAC_PRTYEN; /* FSM Parity Feature */
949 	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
950 	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
951 
952 	/* 5. Enable Data Path Parity Protection */
953 	value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
954 	/* already enabled by default, explicit enable it again */
955 	value &= ~XGMAC_DPP_DISABLE;
956 	writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
957 
958 	return 0;
959 }
960 
dwxgmac3_safety_feat_irq_status(struct net_device * ndev,void __iomem * ioaddr,unsigned int asp,struct stmmac_safety_stats * stats)961 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
962 					   void __iomem *ioaddr,
963 					   unsigned int asp,
964 					   struct stmmac_safety_stats *stats)
965 {
966 	bool err, corr;
967 	u32 mtl, dma;
968 	int ret = 0;
969 
970 	if (!asp)
971 		return -EINVAL;
972 
973 	mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
974 	dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
975 
976 	err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
977 	corr = false;
978 	if (err) {
979 		dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
980 		ret |= !corr;
981 	}
982 
983 	err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
984 	      (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
985 	corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
986 	if (err) {
987 		dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
988 		ret |= !corr;
989 	}
990 
991 	/* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
992 	 * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
993 	 * Parity Errors here
994 	 */
995 	err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
996 	corr = dma & XGMAC_DECIS;
997 	if (err) {
998 		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
999 		ret |= !corr;
1000 	}
1001 
1002 	return ret;
1003 }
1004 
1005 static const struct dwxgmac3_error {
1006 	const struct dwxgmac3_error_desc *desc;
1007 } dwxgmac3_all_errors[] = {
1008 	{ dwxgmac3_mac_errors },
1009 	{ dwxgmac3_mtl_errors },
1010 	{ dwxgmac3_dma_errors },
1011 	{ dwxgmac3_dma_dpp_errors },
1012 };
1013 
dwxgmac3_safety_feat_dump(struct stmmac_safety_stats * stats,int index,unsigned long * count,const char ** desc)1014 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
1015 				     int index, unsigned long *count,
1016 				     const char **desc)
1017 {
1018 	int module = index / 32, offset = index % 32;
1019 	unsigned long *ptr = (unsigned long *)stats;
1020 
1021 	if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
1022 		return -EINVAL;
1023 	if (!dwxgmac3_all_errors[module].desc[offset].valid)
1024 		return -EINVAL;
1025 	if (count)
1026 		*count = *(ptr + index);
1027 	if (desc)
1028 		*desc = dwxgmac3_all_errors[module].desc[offset].desc;
1029 	return 0;
1030 }
1031 
dwxgmac3_rxp_disable(void __iomem * ioaddr)1032 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
1033 {
1034 	u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
1035 
1036 	val &= ~XGMAC_FRPE;
1037 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
1038 
1039 	return 0;
1040 }
1041 
dwxgmac3_rxp_enable(void __iomem * ioaddr)1042 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
1043 {
1044 	u32 val;
1045 
1046 	val = readl(ioaddr + XGMAC_MTL_OPMODE);
1047 	val |= XGMAC_FRPE;
1048 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
1049 }
1050 
dwxgmac3_rxp_update_single_entry(void __iomem * ioaddr,struct stmmac_tc_entry * entry,int pos)1051 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
1052 					    struct stmmac_tc_entry *entry,
1053 					    int pos)
1054 {
1055 	int ret, i;
1056 
1057 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
1058 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
1059 		u32 val;
1060 
1061 		/* Wait for ready */
1062 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1063 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1064 		if (ret)
1065 			return ret;
1066 
1067 		/* Write data */
1068 		val = *((u32 *)&entry->val + i);
1069 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
1070 
1071 		/* Write pos */
1072 		val = real_pos & XGMAC_ADDR;
1073 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1074 
1075 		/* Write OP */
1076 		val |= XGMAC_WRRDN;
1077 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1078 
1079 		/* Start Write */
1080 		val |= XGMAC_STARTBUSY;
1081 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1082 
1083 		/* Wait for done */
1084 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1085 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1086 		if (ret)
1087 			return ret;
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static struct stmmac_tc_entry *
dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry * entries,unsigned int count,u32 curr_prio)1094 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
1095 			    unsigned int count, u32 curr_prio)
1096 {
1097 	struct stmmac_tc_entry *entry;
1098 	u32 min_prio = ~0x0;
1099 	int i, min_prio_idx;
1100 	bool found = false;
1101 
1102 	for (i = count - 1; i >= 0; i--) {
1103 		entry = &entries[i];
1104 
1105 		/* Do not update unused entries */
1106 		if (!entry->in_use)
1107 			continue;
1108 		/* Do not update already updated entries (i.e. fragments) */
1109 		if (entry->in_hw)
1110 			continue;
1111 		/* Let last entry be updated last */
1112 		if (entry->is_last)
1113 			continue;
1114 		/* Do not return fragments */
1115 		if (entry->is_frag)
1116 			continue;
1117 		/* Check if we already checked this prio */
1118 		if (entry->prio < curr_prio)
1119 			continue;
1120 		/* Check if this is the minimum prio */
1121 		if (entry->prio < min_prio) {
1122 			min_prio = entry->prio;
1123 			min_prio_idx = i;
1124 			found = true;
1125 		}
1126 	}
1127 
1128 	if (found)
1129 		return &entries[min_prio_idx];
1130 	return NULL;
1131 }
1132 
dwxgmac3_rxp_config(void __iomem * ioaddr,struct stmmac_tc_entry * entries,unsigned int count)1133 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1134 			       struct stmmac_tc_entry *entries,
1135 			       unsigned int count)
1136 {
1137 	struct stmmac_tc_entry *entry, *frag;
1138 	int i, ret, nve = 0;
1139 	u32 curr_prio = 0;
1140 	u32 old_val, val;
1141 
1142 	/* Force disable RX */
1143 	old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1144 	val = old_val & ~XGMAC_CONFIG_RE;
1145 	writel(val, ioaddr + XGMAC_RX_CONFIG);
1146 
1147 	/* Disable RX Parser */
1148 	ret = dwxgmac3_rxp_disable(ioaddr);
1149 	if (ret)
1150 		goto re_enable;
1151 
1152 	/* Set all entries as NOT in HW */
1153 	for (i = 0; i < count; i++) {
1154 		entry = &entries[i];
1155 		entry->in_hw = false;
1156 	}
1157 
1158 	/* Update entries by reverse order */
1159 	while (1) {
1160 		entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1161 		if (!entry)
1162 			break;
1163 
1164 		curr_prio = entry->prio;
1165 		frag = entry->frag_ptr;
1166 
1167 		/* Set special fragment requirements */
1168 		if (frag) {
1169 			entry->val.af = 0;
1170 			entry->val.rf = 0;
1171 			entry->val.nc = 1;
1172 			entry->val.ok_index = nve + 2;
1173 		}
1174 
1175 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1176 		if (ret)
1177 			goto re_enable;
1178 
1179 		entry->table_pos = nve++;
1180 		entry->in_hw = true;
1181 
1182 		if (frag && !frag->in_hw) {
1183 			ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1184 			if (ret)
1185 				goto re_enable;
1186 			frag->table_pos = nve++;
1187 			frag->in_hw = true;
1188 		}
1189 	}
1190 
1191 	if (!nve)
1192 		goto re_enable;
1193 
1194 	/* Update all pass entry */
1195 	for (i = 0; i < count; i++) {
1196 		entry = &entries[i];
1197 		if (!entry->is_last)
1198 			continue;
1199 
1200 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1201 		if (ret)
1202 			goto re_enable;
1203 
1204 		entry->table_pos = nve++;
1205 	}
1206 
1207 	/* Assume n. of parsable entries == n. of valid entries */
1208 	val = (nve << 16) & XGMAC_NPE;
1209 	val |= nve & XGMAC_NVE;
1210 	writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1211 
1212 	/* Enable RX Parser */
1213 	dwxgmac3_rxp_enable(ioaddr);
1214 
1215 re_enable:
1216 	/* Re-enable RX */
1217 	writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1218 	return ret;
1219 }
1220 
dwxgmac2_get_mac_tx_timestamp(struct mac_device_info * hw,u64 * ts)1221 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1222 {
1223 	void __iomem *ioaddr = hw->pcsr;
1224 	u32 value;
1225 
1226 	if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1227 				      value, value & XGMAC_TXTSC, 100, 10000))
1228 		return -EBUSY;
1229 
1230 	*ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1231 	*ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1232 	return 0;
1233 }
1234 
dwxgmac2_flex_pps_config(void __iomem * ioaddr,int index,struct stmmac_pps_cfg * cfg,bool enable,u32 sub_second_inc,u32 systime_flags)1235 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1236 				    struct stmmac_pps_cfg *cfg, bool enable,
1237 				    u32 sub_second_inc, u32 systime_flags)
1238 {
1239 	u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1240 	u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1241 	u64 period;
1242 
1243 	if (!cfg->available)
1244 		return -EINVAL;
1245 	if (tnsec & XGMAC_TRGTBUSY0)
1246 		return -EBUSY;
1247 	if (!sub_second_inc || !systime_flags)
1248 		return -EINVAL;
1249 
1250 	val &= ~XGMAC_PPSx_MASK(index);
1251 
1252 	if (!enable) {
1253 		val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1254 		writel(val, ioaddr + XGMAC_PPS_CONTROL);
1255 		return 0;
1256 	}
1257 
1258 	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1259 	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1260 
1261 	/* XGMAC Core has 4 PPS outputs at most.
1262 	 *
1263 	 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
1264 	 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
1265 	 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
1266 	 * read-only reserved to 0.
1267 	 * But we always set PPSEN{1,2,3} do not make things worse ;-)
1268 	 *
1269 	 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
1270 	 * be set, or the PPS outputs stay in Fixed PPS mode by default.
1271 	 */
1272 	val |= XGMAC_PPSENx(index);
1273 
1274 	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1275 
1276 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1277 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1278 	writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1279 
1280 	period = cfg->period.tv_sec * 1000000000;
1281 	period += cfg->period.tv_nsec;
1282 
1283 	do_div(period, sub_second_inc);
1284 
1285 	if (period <= 1)
1286 		return -EINVAL;
1287 
1288 	writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1289 
1290 	period >>= 1;
1291 	if (period <= 1)
1292 		return -EINVAL;
1293 
1294 	writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1295 
1296 	/* Finally, activate it */
1297 	writel(val, ioaddr + XGMAC_PPS_CONTROL);
1298 	return 0;
1299 }
1300 
dwxgmac2_sarc_configure(void __iomem * ioaddr,int val)1301 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1302 {
1303 	u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1304 
1305 	value &= ~XGMAC_CONFIG_SARC;
1306 	value |= val << XGMAC_CONFIG_SARC_SHIFT;
1307 
1308 	writel(value, ioaddr + XGMAC_TX_CONFIG);
1309 }
1310 
dwxgmac2_enable_vlan(struct mac_device_info * hw,u32 type)1311 static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1312 {
1313 	void __iomem *ioaddr = hw->pcsr;
1314 	u32 value;
1315 
1316 	value = readl(ioaddr + XGMAC_VLAN_INCL);
1317 	value |= XGMAC_VLAN_VLTI;
1318 	value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1319 	value &= ~XGMAC_VLAN_VLC;
1320 	value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1321 	writel(value, ioaddr + XGMAC_VLAN_INCL);
1322 }
1323 
dwxgmac2_filter_wait(struct mac_device_info * hw)1324 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1325 {
1326 	void __iomem *ioaddr = hw->pcsr;
1327 	u32 value;
1328 
1329 	if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1330 			       !(value & XGMAC_XB), 100, 10000))
1331 		return -EBUSY;
1332 	return 0;
1333 }
1334 
dwxgmac2_filter_read(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 * data)1335 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1336 				u8 reg, u32 *data)
1337 {
1338 	void __iomem *ioaddr = hw->pcsr;
1339 	u32 value;
1340 	int ret;
1341 
1342 	ret = dwxgmac2_filter_wait(hw);
1343 	if (ret)
1344 		return ret;
1345 
1346 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1347 	value |= XGMAC_TT | XGMAC_XB;
1348 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1349 
1350 	ret = dwxgmac2_filter_wait(hw);
1351 	if (ret)
1352 		return ret;
1353 
1354 	*data = readl(ioaddr + XGMAC_L3L4_DATA);
1355 	return 0;
1356 }
1357 
dwxgmac2_filter_write(struct mac_device_info * hw,u32 filter_no,u8 reg,u32 data)1358 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1359 				 u8 reg, u32 data)
1360 {
1361 	void __iomem *ioaddr = hw->pcsr;
1362 	u32 value;
1363 	int ret;
1364 
1365 	ret = dwxgmac2_filter_wait(hw);
1366 	if (ret)
1367 		return ret;
1368 
1369 	writel(data, ioaddr + XGMAC_L3L4_DATA);
1370 
1371 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1372 	value |= XGMAC_XB;
1373 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1374 
1375 	return dwxgmac2_filter_wait(hw);
1376 }
1377 
dwxgmac2_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1378 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1379 				     bool en, bool ipv6, bool sa, bool inv,
1380 				     u32 match)
1381 {
1382 	void __iomem *ioaddr = hw->pcsr;
1383 	u32 value;
1384 	int ret;
1385 
1386 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1387 	value |= XGMAC_FILTER_IPFE;
1388 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1389 
1390 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1391 	if (ret)
1392 		return ret;
1393 
1394 	/* For IPv6 not both SA/DA filters can be active */
1395 	if (ipv6) {
1396 		value |= XGMAC_L3PEN0;
1397 		value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1398 		value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1399 		if (sa) {
1400 			value |= XGMAC_L3SAM0;
1401 			if (inv)
1402 				value |= XGMAC_L3SAIM0;
1403 		} else {
1404 			value |= XGMAC_L3DAM0;
1405 			if (inv)
1406 				value |= XGMAC_L3DAIM0;
1407 		}
1408 	} else {
1409 		value &= ~XGMAC_L3PEN0;
1410 		if (sa) {
1411 			value |= XGMAC_L3SAM0;
1412 			if (inv)
1413 				value |= XGMAC_L3SAIM0;
1414 		} else {
1415 			value |= XGMAC_L3DAM0;
1416 			if (inv)
1417 				value |= XGMAC_L3DAIM0;
1418 		}
1419 	}
1420 
1421 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1422 	if (ret)
1423 		return ret;
1424 
1425 	if (sa) {
1426 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1427 		if (ret)
1428 			return ret;
1429 	} else {
1430 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1431 		if (ret)
1432 			return ret;
1433 	}
1434 
1435 	if (!en)
1436 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1437 
1438 	return 0;
1439 }
1440 
dwxgmac2_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1441 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1442 				     bool en, bool udp, bool sa, bool inv,
1443 				     u32 match)
1444 {
1445 	void __iomem *ioaddr = hw->pcsr;
1446 	u32 value;
1447 	int ret;
1448 
1449 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1450 	value |= XGMAC_FILTER_IPFE;
1451 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1452 
1453 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1454 	if (ret)
1455 		return ret;
1456 
1457 	if (udp) {
1458 		value |= XGMAC_L4PEN0;
1459 	} else {
1460 		value &= ~XGMAC_L4PEN0;
1461 	}
1462 
1463 	value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1464 	value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1465 	if (sa) {
1466 		value |= XGMAC_L4SPM0;
1467 		if (inv)
1468 			value |= XGMAC_L4SPIM0;
1469 	} else {
1470 		value |= XGMAC_L4DPM0;
1471 		if (inv)
1472 			value |= XGMAC_L4DPIM0;
1473 	}
1474 
1475 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1476 	if (ret)
1477 		return ret;
1478 
1479 	if (sa) {
1480 		value = match & XGMAC_L4SP0;
1481 
1482 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1483 		if (ret)
1484 			return ret;
1485 	} else {
1486 		value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1487 
1488 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1489 		if (ret)
1490 			return ret;
1491 	}
1492 
1493 	if (!en)
1494 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1495 
1496 	return 0;
1497 }
1498 
dwxgmac2_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1499 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1500 				     u32 addr)
1501 {
1502 	void __iomem *ioaddr = hw->pcsr;
1503 	u32 value;
1504 
1505 	writel(addr, ioaddr + XGMAC_ARP_ADDR);
1506 
1507 	value = readl(ioaddr + XGMAC_RX_CONFIG);
1508 	if (en)
1509 		value |= XGMAC_CONFIG_ARPEN;
1510 	else
1511 		value &= ~XGMAC_CONFIG_ARPEN;
1512 	writel(value, ioaddr + XGMAC_RX_CONFIG);
1513 }
1514 
dwxgmac3_fpe_configure(void __iomem * ioaddr,struct stmmac_fpe_cfg * cfg,u32 num_txq,u32 num_rxq,bool tx_enable,bool pmac_enable)1515 static void dwxgmac3_fpe_configure(void __iomem *ioaddr,
1516 				   struct stmmac_fpe_cfg *cfg,
1517 				   u32 num_txq, u32 num_rxq,
1518 				   bool tx_enable, bool pmac_enable)
1519 {
1520 	u32 value;
1521 
1522 	if (!tx_enable) {
1523 		value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1524 
1525 		value &= ~XGMAC_EFPE;
1526 
1527 		writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1528 		return;
1529 	}
1530 
1531 	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1532 	value &= ~XGMAC_RQ;
1533 	value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1534 	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1535 
1536 	value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1537 	value |= XGMAC_EFPE;
1538 	writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1539 }
1540 
1541 const struct stmmac_ops dwxgmac210_ops = {
1542 	.core_init = dwxgmac2_core_init,
1543 	.update_caps = dwxgmac2_update_caps,
1544 	.set_mac = dwxgmac2_set_mac,
1545 	.rx_ipc = dwxgmac2_rx_ipc,
1546 	.rx_queue_enable = dwxgmac2_rx_queue_enable,
1547 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1548 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1549 	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1550 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1551 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1552 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1553 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1554 	.config_cbs = dwxgmac2_config_cbs,
1555 	.dump_regs = dwxgmac2_dump_regs,
1556 	.host_irq_status = dwxgmac2_host_irq_status,
1557 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1558 	.flow_ctrl = dwxgmac2_flow_ctrl,
1559 	.pmt = dwxgmac2_pmt,
1560 	.set_umac_addr = dwxgmac2_set_umac_addr,
1561 	.get_umac_addr = dwxgmac2_get_umac_addr,
1562 	.set_eee_mode = dwxgmac2_set_eee_mode,
1563 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1564 	.set_eee_timer = dwxgmac2_set_eee_timer,
1565 	.set_eee_pls = dwxgmac2_set_eee_pls,
1566 	.debug = NULL,
1567 	.set_filter = dwxgmac2_set_filter,
1568 	.safety_feat_config = dwxgmac3_safety_feat_config,
1569 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1570 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1571 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1572 	.rss_configure = dwxgmac2_rss_configure,
1573 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1574 	.rxp_config = dwxgmac3_rxp_config,
1575 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1576 	.flex_pps_config = dwxgmac2_flex_pps_config,
1577 	.sarc_configure = dwxgmac2_sarc_configure,
1578 	.enable_vlan = dwxgmac2_enable_vlan,
1579 	.config_l3_filter = dwxgmac2_config_l3_filter,
1580 	.config_l4_filter = dwxgmac2_config_l4_filter,
1581 	.set_arp_offload = dwxgmac2_set_arp_offload,
1582 	.fpe_configure = dwxgmac3_fpe_configure,
1583 };
1584 
dwxlgmac2_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)1585 static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1586 				      u32 queue)
1587 {
1588 	void __iomem *ioaddr = hw->pcsr;
1589 	u32 value;
1590 
1591 	value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1592 	if (mode == MTL_QUEUE_AVB)
1593 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1594 	else if (mode == MTL_QUEUE_DCB)
1595 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1596 	writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1597 }
1598 
1599 const struct stmmac_ops dwxlgmac2_ops = {
1600 	.core_init = dwxgmac2_core_init,
1601 	.set_mac = dwxgmac2_set_mac,
1602 	.rx_ipc = dwxgmac2_rx_ipc,
1603 	.rx_queue_enable = dwxlgmac2_rx_queue_enable,
1604 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1605 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1606 	.rx_queue_routing = dwxgmac2_rx_queue_routing,
1607 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1608 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1609 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1610 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1611 	.config_cbs = dwxgmac2_config_cbs,
1612 	.dump_regs = dwxgmac2_dump_regs,
1613 	.host_irq_status = dwxgmac2_host_irq_status,
1614 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1615 	.flow_ctrl = dwxgmac2_flow_ctrl,
1616 	.pmt = dwxgmac2_pmt,
1617 	.set_umac_addr = dwxgmac2_set_umac_addr,
1618 	.get_umac_addr = dwxgmac2_get_umac_addr,
1619 	.set_eee_mode = dwxgmac2_set_eee_mode,
1620 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1621 	.set_eee_timer = dwxgmac2_set_eee_timer,
1622 	.set_eee_pls = dwxgmac2_set_eee_pls,
1623 	.debug = NULL,
1624 	.set_filter = dwxgmac2_set_filter,
1625 	.safety_feat_config = dwxgmac3_safety_feat_config,
1626 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1627 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1628 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1629 	.rss_configure = dwxgmac2_rss_configure,
1630 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1631 	.rxp_config = dwxgmac3_rxp_config,
1632 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1633 	.flex_pps_config = dwxgmac2_flex_pps_config,
1634 	.sarc_configure = dwxgmac2_sarc_configure,
1635 	.enable_vlan = dwxgmac2_enable_vlan,
1636 	.config_l3_filter = dwxgmac2_config_l3_filter,
1637 	.config_l4_filter = dwxgmac2_config_l4_filter,
1638 	.set_arp_offload = dwxgmac2_set_arp_offload,
1639 	.fpe_configure = dwxgmac3_fpe_configure,
1640 };
1641 
dwxgmac2_setup(struct stmmac_priv * priv)1642 int dwxgmac2_setup(struct stmmac_priv *priv)
1643 {
1644 	struct mac_device_info *mac = priv->hw;
1645 
1646 	dev_info(priv->device, "\tXGMAC2\n");
1647 
1648 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1649 	mac->pcsr = priv->ioaddr;
1650 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1651 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1652 	mac->mcast_bits_log2 = 0;
1653 
1654 	if (mac->multicast_filter_bins)
1655 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1656 
1657 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1658 			 MAC_10 | MAC_100 | MAC_1000FD |
1659 			 MAC_2500FD | MAC_5000FD | MAC_10000FD;
1660 	mac->link.duplex = 0;
1661 	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1662 	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1663 	mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1664 	mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1665 	mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1666 	mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1667 	mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1668 	mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1669 
1670 	mac->mii.addr = XGMAC_MDIO_ADDR;
1671 	mac->mii.data = XGMAC_MDIO_DATA;
1672 	mac->mii.addr_shift = 16;
1673 	mac->mii.addr_mask = GENMASK(20, 16);
1674 	mac->mii.reg_shift = 0;
1675 	mac->mii.reg_mask = GENMASK(15, 0);
1676 	mac->mii.clk_csr_shift = 19;
1677 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1678 
1679 	return 0;
1680 }
1681 
dwxlgmac2_setup(struct stmmac_priv * priv)1682 int dwxlgmac2_setup(struct stmmac_priv *priv)
1683 {
1684 	struct mac_device_info *mac = priv->hw;
1685 
1686 	dev_info(priv->device, "\tXLGMAC\n");
1687 
1688 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1689 	mac->pcsr = priv->ioaddr;
1690 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1691 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1692 	mac->mcast_bits_log2 = 0;
1693 
1694 	if (mac->multicast_filter_bins)
1695 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1696 
1697 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1698 			 MAC_1000FD | MAC_2500FD | MAC_5000FD |
1699 			 MAC_10000FD | MAC_25000FD |
1700 			 MAC_40000FD | MAC_50000FD |
1701 			 MAC_100000FD;
1702 	mac->link.duplex = 0;
1703 	mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1704 	mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1705 	mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1706 	mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1707 	mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1708 	mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1709 	mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1710 	mac->link.speed_mask = XLGMAC_CONFIG_SS;
1711 
1712 	mac->mii.addr = XGMAC_MDIO_ADDR;
1713 	mac->mii.data = XGMAC_MDIO_DATA;
1714 	mac->mii.addr_shift = 16;
1715 	mac->mii.addr_mask = GENMASK(20, 16);
1716 	mac->mii.reg_shift = 0;
1717 	mac->mii.reg_mask = GENMASK(15, 0);
1718 	mac->mii.clk_csr_shift = 19;
1719 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1720 
1721 	return 0;
1722 }
1723