• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4  * DWC Ether MAC version 4.00  has been used for developing this code.
5  *
6  * This only implements the mac core functions for this chip.
7  *
8  * Copyright (C) 2015  STMicroelectronics Ltd
9  *
10  * Author: Alexandre Torgue <alexandre.torgue@st.com>
11  */
12 
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22 
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)23 static void dwmac4_core_init(struct mac_device_info *hw,
24 			     struct net_device *dev)
25 {
26 	void __iomem *ioaddr = hw->pcsr;
27 	u32 value = readl(ioaddr + GMAC_CONFIG);
28 
29 	value |= GMAC_CORE_INIT;
30 
31 	if (hw->ps) {
32 		value |= GMAC_CONFIG_TE;
33 
34 		value &= hw->link.speed_mask;
35 		switch (hw->ps) {
36 		case SPEED_1000:
37 			value |= hw->link.speed1000;
38 			break;
39 		case SPEED_100:
40 			value |= hw->link.speed100;
41 			break;
42 		case SPEED_10:
43 			value |= hw->link.speed10;
44 			break;
45 		}
46 	}
47 
48 	writel(value, ioaddr + GMAC_CONFIG);
49 
50 	/* Enable GMAC interrupts */
51 	value = GMAC_INT_DEFAULT_ENABLE;
52 
53 	if (hw->pcs)
54 		value |= GMAC_PCS_IRQ_DEFAULT;
55 
56 	writel(value, ioaddr + GMAC_INT_EN);
57 }
58 
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)59 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
60 				   u8 mode, u32 queue)
61 {
62 	void __iomem *ioaddr = hw->pcsr;
63 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
64 
65 	value &= GMAC_RX_QUEUE_CLEAR(queue);
66 	if (mode == MTL_QUEUE_AVB)
67 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
68 	else if (mode == MTL_QUEUE_DCB)
69 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
70 
71 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
72 }
73 
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)74 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
75 				     u32 prio, u32 queue)
76 {
77 	void __iomem *ioaddr = hw->pcsr;
78 	u32 base_register;
79 	u32 value;
80 
81 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
82 	if (queue >= 4)
83 		queue -= 4;
84 
85 	value = readl(ioaddr + base_register);
86 
87 	value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
88 	value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
89 						GMAC_RXQCTRL_PSRQX_MASK(queue);
90 	writel(value, ioaddr + base_register);
91 }
92 
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)93 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
94 				     u32 prio, u32 queue)
95 {
96 	void __iomem *ioaddr = hw->pcsr;
97 	u32 base_register;
98 	u32 value;
99 
100 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
101 	if (queue >= 4)
102 		queue -= 4;
103 
104 	value = readl(ioaddr + base_register);
105 
106 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
107 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
108 						GMAC_TXQCTRL_PSTQX_MASK(queue);
109 
110 	writel(value, ioaddr + base_register);
111 }
112 
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)113 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
114 				    u8 packet, u32 queue)
115 {
116 	void __iomem *ioaddr = hw->pcsr;
117 	u32 value;
118 
119 	static const struct stmmac_rx_routing route_possibilities[] = {
120 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
121 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
122 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
123 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
124 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
125 	};
126 
127 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
128 
129 	/* routing configuration */
130 	value &= ~route_possibilities[packet - 1].reg_mask;
131 	value |= (queue << route_possibilities[packet-1].reg_shift) &
132 		 route_possibilities[packet - 1].reg_mask;
133 
134 	/* some packets require extra ops */
135 	if (packet == PACKET_AVCPQ) {
136 		value &= ~GMAC_RXQCTRL_TACPQE;
137 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
138 	} else if (packet == PACKET_MCBCQ) {
139 		value &= ~GMAC_RXQCTRL_MCBCQEN;
140 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
141 	}
142 
143 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
144 }
145 
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)146 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
147 					  u32 rx_alg)
148 {
149 	void __iomem *ioaddr = hw->pcsr;
150 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
151 
152 	value &= ~MTL_OPERATION_RAA;
153 	switch (rx_alg) {
154 	case MTL_RX_ALGORITHM_SP:
155 		value |= MTL_OPERATION_RAA_SP;
156 		break;
157 	case MTL_RX_ALGORITHM_WSP:
158 		value |= MTL_OPERATION_RAA_WSP;
159 		break;
160 	default:
161 		break;
162 	}
163 
164 	writel(value, ioaddr + MTL_OPERATION_MODE);
165 }
166 
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)167 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
168 					  u32 tx_alg)
169 {
170 	void __iomem *ioaddr = hw->pcsr;
171 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
172 
173 	value &= ~MTL_OPERATION_SCHALG_MASK;
174 	switch (tx_alg) {
175 	case MTL_TX_ALGORITHM_WRR:
176 		value |= MTL_OPERATION_SCHALG_WRR;
177 		break;
178 	case MTL_TX_ALGORITHM_WFQ:
179 		value |= MTL_OPERATION_SCHALG_WFQ;
180 		break;
181 	case MTL_TX_ALGORITHM_DWRR:
182 		value |= MTL_OPERATION_SCHALG_DWRR;
183 		break;
184 	case MTL_TX_ALGORITHM_SP:
185 		value |= MTL_OPERATION_SCHALG_SP;
186 		break;
187 	default:
188 		break;
189 	}
190 
191 	writel(value, ioaddr + MTL_OPERATION_MODE);
192 }
193 
dwmac4_set_mtl_tx_queue_weight(struct mac_device_info * hw,u32 weight,u32 queue)194 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
195 					   u32 weight, u32 queue)
196 {
197 	void __iomem *ioaddr = hw->pcsr;
198 	u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
199 
200 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
201 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
202 	writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
203 }
204 
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)205 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
206 {
207 	void __iomem *ioaddr = hw->pcsr;
208 	u32 value;
209 
210 	if (queue < 4)
211 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
212 	else
213 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
214 
215 	if (queue == 0 || queue == 4) {
216 		value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
217 		value |= MTL_RXQ_DMA_Q04MDMACH(chan);
218 	} else if (queue > 4) {
219 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
220 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
221 	} else {
222 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
223 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
224 	}
225 
226 	if (queue < 4)
227 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
228 	else
229 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
230 }
231 
dwmac4_config_cbs(struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)232 static void dwmac4_config_cbs(struct mac_device_info *hw,
233 			      u32 send_slope, u32 idle_slope,
234 			      u32 high_credit, u32 low_credit, u32 queue)
235 {
236 	void __iomem *ioaddr = hw->pcsr;
237 	u32 value;
238 
239 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
240 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
241 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
242 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
243 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
244 
245 	/* enable AV algorithm */
246 	value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
247 	value |= MTL_ETS_CTRL_AVALG;
248 	value |= MTL_ETS_CTRL_CC;
249 	writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
250 
251 	/* configure send slope */
252 	value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
253 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
254 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
255 	writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
256 
257 	/* configure idle slope (same register as tx weight) */
258 	dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
259 
260 	/* configure high credit */
261 	value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
262 	value &= ~MTL_HIGH_CRED_HC_MASK;
263 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
264 	writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
265 
266 	/* configure high credit */
267 	value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
268 	value &= ~MTL_HIGH_CRED_LC_MASK;
269 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
270 	writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
271 }
272 
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)273 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
274 {
275 	void __iomem *ioaddr = hw->pcsr;
276 	int i;
277 
278 	for (i = 0; i < GMAC_REG_NUM; i++)
279 		reg_space[i] = readl(ioaddr + i * 4);
280 }
281 
dwmac4_rx_ipc_enable(struct mac_device_info * hw)282 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
283 {
284 	void __iomem *ioaddr = hw->pcsr;
285 	u32 value = readl(ioaddr + GMAC_CONFIG);
286 
287 	if (hw->rx_csum)
288 		value |= GMAC_CONFIG_IPC;
289 	else
290 		value &= ~GMAC_CONFIG_IPC;
291 
292 	writel(value, ioaddr + GMAC_CONFIG);
293 
294 	value = readl(ioaddr + GMAC_CONFIG);
295 
296 	return !!(value & GMAC_CONFIG_IPC);
297 }
298 
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)299 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
300 {
301 	void __iomem *ioaddr = hw->pcsr;
302 	unsigned int pmt = 0;
303 	u32 config;
304 
305 	if (mode & WAKE_MAGIC) {
306 		pr_debug("GMAC: WOL Magic frame\n");
307 		pmt |= power_down | magic_pkt_en;
308 	}
309 	if (mode & WAKE_UCAST) {
310 		pr_debug("GMAC: WOL on global unicast\n");
311 		pmt |= power_down | global_unicast | wake_up_frame_en;
312 	}
313 
314 	if (pmt) {
315 		/* The receiver must be enabled for WOL before powering down */
316 		config = readl(ioaddr + GMAC_CONFIG);
317 		config |= GMAC_CONFIG_RE;
318 		writel(config, ioaddr + GMAC_CONFIG);
319 	}
320 	writel(pmt, ioaddr + GMAC_PMT);
321 }
322 
dwmac4_set_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)323 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
324 				 unsigned char *addr, unsigned int reg_n)
325 {
326 	void __iomem *ioaddr = hw->pcsr;
327 
328 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
329 				   GMAC_ADDR_LOW(reg_n));
330 }
331 
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)332 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
333 				 unsigned char *addr, unsigned int reg_n)
334 {
335 	void __iomem *ioaddr = hw->pcsr;
336 
337 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
338 				   GMAC_ADDR_LOW(reg_n));
339 }
340 
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)341 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
342 				bool en_tx_lpi_clockgating)
343 {
344 	void __iomem *ioaddr = hw->pcsr;
345 	u32 value;
346 
347 	/* Enable the link status receive on RGMII, SGMII ore SMII
348 	 * receive path and instruct the transmit to enter in LPI
349 	 * state.
350 	 */
351 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
352 	value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
353 
354 	if (en_tx_lpi_clockgating)
355 		value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
356 
357 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
358 }
359 
dwmac4_reset_eee_mode(struct mac_device_info * hw)360 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
361 {
362 	void __iomem *ioaddr = hw->pcsr;
363 	u32 value;
364 
365 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
366 	value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
367 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
368 }
369 
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)370 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
371 {
372 	void __iomem *ioaddr = hw->pcsr;
373 	u32 value;
374 
375 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
376 
377 	if (link)
378 		value |= GMAC4_LPI_CTRL_STATUS_PLS;
379 	else
380 		value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
381 
382 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
383 }
384 
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)385 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
386 {
387 	void __iomem *ioaddr = hw->pcsr;
388 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
389 
390 	/* Program the timers in the LPI timer control register:
391 	 * LS: minimum time (ms) for which the link
392 	 *  status from PHY should be ok before transmitting
393 	 *  the LPI pattern.
394 	 * TW: minimum time (us) for which the core waits
395 	 *  after it has stopped transmitting the LPI pattern.
396 	 */
397 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
398 }
399 
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)400 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
401 {
402 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
403 	u32 val;
404 
405 	val = readl(ioaddr + GMAC_VLAN_TAG);
406 	val &= ~GMAC_VLAN_TAG_VID;
407 	val |= GMAC_VLAN_TAG_ETV | vid;
408 
409 	writel(val, ioaddr + GMAC_VLAN_TAG);
410 }
411 
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)412 static int dwmac4_write_vlan_filter(struct net_device *dev,
413 				    struct mac_device_info *hw,
414 				    u8 index, u32 data)
415 {
416 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
417 	int i, timeout = 10;
418 	u32 val;
419 
420 	if (index >= hw->num_vlan)
421 		return -EINVAL;
422 
423 	writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
424 
425 	val = readl(ioaddr + GMAC_VLAN_TAG);
426 	val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
427 		GMAC_VLAN_TAG_CTRL_CT |
428 		GMAC_VLAN_TAG_CTRL_OB);
429 	val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
430 
431 	writel(val, ioaddr + GMAC_VLAN_TAG);
432 
433 	for (i = 0; i < timeout; i++) {
434 		val = readl(ioaddr + GMAC_VLAN_TAG);
435 		if (!(val & GMAC_VLAN_TAG_CTRL_OB))
436 			return 0;
437 		udelay(1);
438 	}
439 
440 	netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
441 
442 	return -EBUSY;
443 }
444 
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)445 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
446 				      struct mac_device_info *hw,
447 				      __be16 proto, u16 vid)
448 {
449 	int index = -1;
450 	u32 val = 0;
451 	int i, ret;
452 
453 	if (vid > 4095)
454 		return -EINVAL;
455 
456 	/* Single Rx VLAN Filter */
457 	if (hw->num_vlan == 1) {
458 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
459 		if (vid == 0) {
460 			netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
461 			return -EPERM;
462 		}
463 
464 		if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
465 			netdev_err(dev, "Only single VLAN ID supported\n");
466 			return -EPERM;
467 		}
468 
469 		hw->vlan_filter[0] = vid;
470 		dwmac4_write_single_vlan(dev, vid);
471 
472 		return 0;
473 	}
474 
475 	/* Extended Rx VLAN Filter Enable */
476 	val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
477 
478 	for (i = 0; i < hw->num_vlan; i++) {
479 		if (hw->vlan_filter[i] == val)
480 			return 0;
481 		else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
482 			index = i;
483 	}
484 
485 	if (index == -1) {
486 		netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
487 			   hw->num_vlan);
488 		return -EPERM;
489 	}
490 
491 	ret = dwmac4_write_vlan_filter(dev, hw, index, val);
492 
493 	if (!ret)
494 		hw->vlan_filter[index] = val;
495 
496 	return ret;
497 }
498 
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)499 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
500 				      struct mac_device_info *hw,
501 				      __be16 proto, u16 vid)
502 {
503 	int i, ret = 0;
504 
505 	/* Single Rx VLAN Filter */
506 	if (hw->num_vlan == 1) {
507 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
508 			hw->vlan_filter[0] = 0;
509 			dwmac4_write_single_vlan(dev, 0);
510 		}
511 		return 0;
512 	}
513 
514 	/* Extended Rx VLAN Filter Enable */
515 	for (i = 0; i < hw->num_vlan; i++) {
516 		if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
517 			ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
518 
519 			if (!ret)
520 				hw->vlan_filter[i] = 0;
521 			else
522 				return ret;
523 		}
524 	}
525 
526 	return ret;
527 }
528 
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)529 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
530 					   struct mac_device_info *hw)
531 {
532 	void __iomem *ioaddr = hw->pcsr;
533 	u32 value;
534 	u32 hash;
535 	u32 val;
536 	int i;
537 
538 	/* Single Rx VLAN Filter */
539 	if (hw->num_vlan == 1) {
540 		dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
541 		return;
542 	}
543 
544 	/* Extended Rx VLAN Filter Enable */
545 	for (i = 0; i < hw->num_vlan; i++) {
546 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
547 			val = hw->vlan_filter[i];
548 			dwmac4_write_vlan_filter(dev, hw, i, val);
549 		}
550 	}
551 
552 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
553 	if (hash & GMAC_VLAN_VLHT) {
554 		value = readl(ioaddr + GMAC_VLAN_TAG);
555 		value |= GMAC_VLAN_VTHM;
556 		writel(value, ioaddr + GMAC_VLAN_TAG);
557 	}
558 }
559 
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)560 static void dwmac4_set_filter(struct mac_device_info *hw,
561 			      struct net_device *dev)
562 {
563 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
564 	int numhashregs = (hw->multicast_filter_bins >> 5);
565 	int mcbitslog2 = hw->mcast_bits_log2;
566 	unsigned int value;
567 	u32 mc_filter[8];
568 	int i;
569 
570 	memset(mc_filter, 0, sizeof(mc_filter));
571 
572 	value = readl(ioaddr + GMAC_PACKET_FILTER);
573 	value &= ~GMAC_PACKET_FILTER_HMC;
574 	value &= ~GMAC_PACKET_FILTER_HPF;
575 	value &= ~GMAC_PACKET_FILTER_PCF;
576 	value &= ~GMAC_PACKET_FILTER_PM;
577 	value &= ~GMAC_PACKET_FILTER_PR;
578 	value &= ~GMAC_PACKET_FILTER_RA;
579 	if (dev->flags & IFF_PROMISC) {
580 		/* VLAN Tag Filter Fail Packets Queuing */
581 		if (hw->vlan_fail_q_en) {
582 			value = readl(ioaddr + GMAC_RXQ_CTRL4);
583 			value &= ~GMAC_RXQCTRL_VFFQ_MASK;
584 			value |= GMAC_RXQCTRL_VFFQE |
585 				 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
586 			writel(value, ioaddr + GMAC_RXQ_CTRL4);
587 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
588 		} else {
589 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
590 		}
591 
592 	} else if ((dev->flags & IFF_ALLMULTI) ||
593 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
594 		/* Pass all multi */
595 		value |= GMAC_PACKET_FILTER_PM;
596 		/* Set all the bits of the HASH tab */
597 		memset(mc_filter, 0xff, sizeof(mc_filter));
598 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
599 		struct netdev_hw_addr *ha;
600 
601 		/* Hash filter for multicast */
602 		value |= GMAC_PACKET_FILTER_HMC;
603 
604 		netdev_for_each_mc_addr(ha, dev) {
605 			/* The upper n bits of the calculated CRC are used to
606 			 * index the contents of the hash table. The number of
607 			 * bits used depends on the hardware configuration
608 			 * selected at core configuration time.
609 			 */
610 			u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
611 					ETH_ALEN)) >> (32 - mcbitslog2);
612 			/* The most significant bit determines the register to
613 			 * use (H/L) while the other 5 bits determine the bit
614 			 * within the register.
615 			 */
616 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
617 		}
618 	}
619 
620 	for (i = 0; i < numhashregs; i++)
621 		writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
622 
623 	value |= GMAC_PACKET_FILTER_HPF;
624 
625 	/* Handle multiple unicast addresses */
626 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
627 		/* Switch to promiscuous mode if more than 128 addrs
628 		 * are required
629 		 */
630 		value |= GMAC_PACKET_FILTER_PR;
631 	} else {
632 		struct netdev_hw_addr *ha;
633 		int reg = 1;
634 
635 		netdev_for_each_uc_addr(ha, dev) {
636 			dwmac4_set_umac_addr(hw, ha->addr, reg);
637 			reg++;
638 		}
639 
640 		while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
641 			writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
642 			writel(0, ioaddr + GMAC_ADDR_LOW(reg));
643 			reg++;
644 		}
645 	}
646 
647 	/* VLAN filtering */
648 	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
649 		value &= ~GMAC_PACKET_FILTER_VTFE;
650 	else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
651 		value |= GMAC_PACKET_FILTER_VTFE;
652 
653 	writel(value, ioaddr + GMAC_PACKET_FILTER);
654 }
655 
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)656 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
657 			     unsigned int fc, unsigned int pause_time,
658 			     u32 tx_cnt)
659 {
660 	void __iomem *ioaddr = hw->pcsr;
661 	unsigned int flow = 0;
662 	u32 queue = 0;
663 
664 	pr_debug("GMAC Flow-Control:\n");
665 	if (fc & FLOW_RX) {
666 		pr_debug("\tReceive Flow-Control ON\n");
667 		flow |= GMAC_RX_FLOW_CTRL_RFE;
668 	} else {
669 		pr_debug("\tReceive Flow-Control OFF\n");
670 	}
671 	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
672 
673 	if (fc & FLOW_TX) {
674 		pr_debug("\tTransmit Flow-Control ON\n");
675 
676 		if (duplex)
677 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
678 
679 		for (queue = 0; queue < tx_cnt; queue++) {
680 			flow = GMAC_TX_FLOW_CTRL_TFE;
681 
682 			if (duplex)
683 				flow |=
684 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
685 
686 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
687 		}
688 	} else {
689 		for (queue = 0; queue < tx_cnt; queue++)
690 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
691 	}
692 }
693 
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)694 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
695 			    bool loopback)
696 {
697 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
698 }
699 
dwmac4_rane(void __iomem * ioaddr,bool restart)700 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
701 {
702 	dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
703 }
704 
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)705 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
706 {
707 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
708 }
709 
710 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)711 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
712 {
713 	u32 status;
714 
715 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
716 	x->irq_rgmii_n++;
717 
718 	/* Check the link status */
719 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
720 		int speed_value;
721 
722 		x->pcs_link = 1;
723 
724 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
725 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
726 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
727 			x->pcs_speed = SPEED_1000;
728 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
729 			x->pcs_speed = SPEED_100;
730 		else
731 			x->pcs_speed = SPEED_10;
732 
733 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
734 
735 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
736 			x->pcs_duplex ? "Full" : "Half");
737 	} else {
738 		x->pcs_link = 0;
739 		pr_info("Link is Down\n");
740 	}
741 }
742 
dwmac4_irq_mtl_status(struct mac_device_info * hw,u32 chan)743 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
744 {
745 	void __iomem *ioaddr = hw->pcsr;
746 	u32 mtl_int_qx_status;
747 	int ret = 0;
748 
749 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
750 
751 	/* Check MTL Interrupt */
752 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
753 		/* read Queue x Interrupt status */
754 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
755 
756 		if (status & MTL_RX_OVERFLOW_INT) {
757 			/*  clear Interrupt */
758 			writel(status | MTL_RX_OVERFLOW_INT,
759 			       ioaddr + MTL_CHAN_INT_CTRL(chan));
760 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
761 		}
762 	}
763 
764 	return ret;
765 }
766 
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)767 static int dwmac4_irq_status(struct mac_device_info *hw,
768 			     struct stmmac_extra_stats *x)
769 {
770 	void __iomem *ioaddr = hw->pcsr;
771 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
772 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
773 	int ret = 0;
774 
775 	/* Discard disabled bits */
776 	intr_status &= intr_enable;
777 
778 	/* Not used events (e.g. MMC interrupts) are not handled. */
779 	if ((intr_status & mmc_tx_irq))
780 		x->mmc_tx_irq_n++;
781 	if (unlikely(intr_status & mmc_rx_irq))
782 		x->mmc_rx_irq_n++;
783 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
784 		x->mmc_rx_csum_offload_irq_n++;
785 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
786 	if (unlikely(intr_status & pmt_irq)) {
787 		readl(ioaddr + GMAC_PMT);
788 		x->irq_receive_pmt_irq_n++;
789 	}
790 
791 	/* MAC tx/rx EEE LPI entry/exit interrupts */
792 	if (intr_status & lpi_irq) {
793 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
794 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
795 
796 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
797 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
798 			x->irq_tx_path_in_lpi_mode_n++;
799 		}
800 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
801 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
802 			x->irq_tx_path_exit_lpi_mode_n++;
803 		}
804 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
805 			x->irq_rx_path_in_lpi_mode_n++;
806 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
807 			x->irq_rx_path_exit_lpi_mode_n++;
808 	}
809 
810 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
811 	if (intr_status & PCS_RGSMIIIS_IRQ)
812 		dwmac4_phystatus(ioaddr, x);
813 
814 	return ret;
815 }
816 
dwmac4_debug(void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)817 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
818 			 u32 rx_queues, u32 tx_queues)
819 {
820 	u32 value;
821 	u32 queue;
822 
823 	for (queue = 0; queue < tx_queues; queue++) {
824 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
825 
826 		if (value & MTL_DEBUG_TXSTSFSTS)
827 			x->mtl_tx_status_fifo_full++;
828 		if (value & MTL_DEBUG_TXFSTS)
829 			x->mtl_tx_fifo_not_empty++;
830 		if (value & MTL_DEBUG_TWCSTS)
831 			x->mmtl_fifo_ctrl++;
832 		if (value & MTL_DEBUG_TRCSTS_MASK) {
833 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
834 				     >> MTL_DEBUG_TRCSTS_SHIFT;
835 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
836 				x->mtl_tx_fifo_read_ctrl_write++;
837 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
838 				x->mtl_tx_fifo_read_ctrl_wait++;
839 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
840 				x->mtl_tx_fifo_read_ctrl_read++;
841 			else
842 				x->mtl_tx_fifo_read_ctrl_idle++;
843 		}
844 		if (value & MTL_DEBUG_TXPAUSED)
845 			x->mac_tx_in_pause++;
846 	}
847 
848 	for (queue = 0; queue < rx_queues; queue++) {
849 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
850 
851 		if (value & MTL_DEBUG_RXFSTS_MASK) {
852 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
853 				     >> MTL_DEBUG_RRCSTS_SHIFT;
854 
855 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
856 				x->mtl_rx_fifo_fill_level_full++;
857 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
858 				x->mtl_rx_fifo_fill_above_thresh++;
859 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
860 				x->mtl_rx_fifo_fill_below_thresh++;
861 			else
862 				x->mtl_rx_fifo_fill_level_empty++;
863 		}
864 		if (value & MTL_DEBUG_RRCSTS_MASK) {
865 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
866 				     MTL_DEBUG_RRCSTS_SHIFT;
867 
868 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
869 				x->mtl_rx_fifo_read_ctrl_flush++;
870 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
871 				x->mtl_rx_fifo_read_ctrl_read_data++;
872 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
873 				x->mtl_rx_fifo_read_ctrl_status++;
874 			else
875 				x->mtl_rx_fifo_read_ctrl_idle++;
876 		}
877 		if (value & MTL_DEBUG_RWCSTS)
878 			x->mtl_rx_fifo_ctrl_active++;
879 	}
880 
881 	/* GMAC debug */
882 	value = readl(ioaddr + GMAC_DEBUG);
883 
884 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
885 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
886 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
887 
888 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
889 			x->mac_tx_frame_ctrl_xfer++;
890 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
891 			x->mac_tx_frame_ctrl_pause++;
892 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
893 			x->mac_tx_frame_ctrl_wait++;
894 		else
895 			x->mac_tx_frame_ctrl_idle++;
896 	}
897 	if (value & GMAC_DEBUG_TPESTS)
898 		x->mac_gmii_tx_proto_engine++;
899 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
900 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
901 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
902 	if (value & GMAC_DEBUG_RPESTS)
903 		x->mac_gmii_rx_proto_engine++;
904 }
905 
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)906 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
907 {
908 	u32 value = readl(ioaddr + GMAC_CONFIG);
909 
910 	if (enable)
911 		value |= GMAC_CONFIG_LM;
912 	else
913 		value &= ~GMAC_CONFIG_LM;
914 
915 	writel(value, ioaddr + GMAC_CONFIG);
916 }
917 
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)918 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
919 				    __le16 perfect_match, bool is_double)
920 {
921 	void __iomem *ioaddr = hw->pcsr;
922 	u32 value;
923 
924 	writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
925 
926 	value = readl(ioaddr + GMAC_VLAN_TAG);
927 
928 	if (hash) {
929 		value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
930 		if (is_double) {
931 			value |= GMAC_VLAN_EDVLP;
932 			value |= GMAC_VLAN_ESVL;
933 			value |= GMAC_VLAN_DOVLTC;
934 		}
935 
936 		writel(value, ioaddr + GMAC_VLAN_TAG);
937 	} else if (perfect_match) {
938 		u32 value = GMAC_VLAN_ETV;
939 
940 		if (is_double) {
941 			value |= GMAC_VLAN_EDVLP;
942 			value |= GMAC_VLAN_ESVL;
943 			value |= GMAC_VLAN_DOVLTC;
944 		}
945 
946 		writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
947 	} else {
948 		value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
949 		value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
950 		value &= ~GMAC_VLAN_DOVLTC;
951 		value &= ~GMAC_VLAN_VID;
952 
953 		writel(value, ioaddr + GMAC_VLAN_TAG);
954 	}
955 }
956 
dwmac4_sarc_configure(void __iomem * ioaddr,int val)957 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
958 {
959 	u32 value = readl(ioaddr + GMAC_CONFIG);
960 
961 	value &= ~GMAC_CONFIG_SARC;
962 	value |= val << GMAC_CONFIG_SARC_SHIFT;
963 
964 	writel(value, ioaddr + GMAC_CONFIG);
965 }
966 
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)967 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
968 {
969 	void __iomem *ioaddr = hw->pcsr;
970 	u32 value;
971 
972 	value = readl(ioaddr + GMAC_VLAN_INCL);
973 	value |= GMAC_VLAN_VLTI;
974 	value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
975 	value &= ~GMAC_VLAN_VLC;
976 	value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
977 	writel(value, ioaddr + GMAC_VLAN_INCL);
978 }
979 
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)980 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
981 				   u32 addr)
982 {
983 	void __iomem *ioaddr = hw->pcsr;
984 	u32 value;
985 
986 	writel(addr, ioaddr + GMAC_ARP_ADDR);
987 
988 	value = readl(ioaddr + GMAC_CONFIG);
989 	if (en)
990 		value |= GMAC_CONFIG_ARPEN;
991 	else
992 		value &= ~GMAC_CONFIG_ARPEN;
993 	writel(value, ioaddr + GMAC_CONFIG);
994 }
995 
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)996 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
997 				   bool en, bool ipv6, bool sa, bool inv,
998 				   u32 match)
999 {
1000 	void __iomem *ioaddr = hw->pcsr;
1001 	u32 value;
1002 
1003 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1004 	value |= GMAC_PACKET_FILTER_IPFE;
1005 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1006 
1007 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1008 
1009 	/* For IPv6 not both SA/DA filters can be active */
1010 	if (ipv6) {
1011 		value |= GMAC_L3PEN0;
1012 		value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1013 		value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1014 		if (sa) {
1015 			value |= GMAC_L3SAM0;
1016 			if (inv)
1017 				value |= GMAC_L3SAIM0;
1018 		} else {
1019 			value |= GMAC_L3DAM0;
1020 			if (inv)
1021 				value |= GMAC_L3DAIM0;
1022 		}
1023 	} else {
1024 		value &= ~GMAC_L3PEN0;
1025 		if (sa) {
1026 			value |= GMAC_L3SAM0;
1027 			if (inv)
1028 				value |= GMAC_L3SAIM0;
1029 		} else {
1030 			value |= GMAC_L3DAM0;
1031 			if (inv)
1032 				value |= GMAC_L3DAIM0;
1033 		}
1034 	}
1035 
1036 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1037 
1038 	if (sa) {
1039 		writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1040 	} else {
1041 		writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1042 	}
1043 
1044 	if (!en)
1045 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1046 
1047 	return 0;
1048 }
1049 
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1050 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1051 				   bool en, bool udp, bool sa, bool inv,
1052 				   u32 match)
1053 {
1054 	void __iomem *ioaddr = hw->pcsr;
1055 	u32 value;
1056 
1057 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1058 	value |= GMAC_PACKET_FILTER_IPFE;
1059 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1060 
1061 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1062 	if (udp) {
1063 		value |= GMAC_L4PEN0;
1064 	} else {
1065 		value &= ~GMAC_L4PEN0;
1066 	}
1067 
1068 	value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1069 	value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1070 	if (sa) {
1071 		value |= GMAC_L4SPM0;
1072 		if (inv)
1073 			value |= GMAC_L4SPIM0;
1074 	} else {
1075 		value |= GMAC_L4DPM0;
1076 		if (inv)
1077 			value |= GMAC_L4DPIM0;
1078 	}
1079 
1080 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1081 
1082 	if (sa) {
1083 		value = match & GMAC_L4SP0;
1084 	} else {
1085 		value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1086 	}
1087 
1088 	writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1089 
1090 	if (!en)
1091 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1092 
1093 	return 0;
1094 }
1095 
1096 const struct stmmac_ops dwmac4_ops = {
1097 	.core_init = dwmac4_core_init,
1098 	.set_mac = stmmac_set_mac,
1099 	.rx_ipc = dwmac4_rx_ipc_enable,
1100 	.rx_queue_enable = dwmac4_rx_queue_enable,
1101 	.rx_queue_prio = dwmac4_rx_queue_priority,
1102 	.tx_queue_prio = dwmac4_tx_queue_priority,
1103 	.rx_queue_routing = dwmac4_rx_queue_routing,
1104 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1105 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1106 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1107 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1108 	.config_cbs = dwmac4_config_cbs,
1109 	.dump_regs = dwmac4_dump_regs,
1110 	.host_irq_status = dwmac4_irq_status,
1111 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1112 	.flow_ctrl = dwmac4_flow_ctrl,
1113 	.pmt = dwmac4_pmt,
1114 	.set_umac_addr = dwmac4_set_umac_addr,
1115 	.get_umac_addr = dwmac4_get_umac_addr,
1116 	.set_eee_mode = dwmac4_set_eee_mode,
1117 	.reset_eee_mode = dwmac4_reset_eee_mode,
1118 	.set_eee_timer = dwmac4_set_eee_timer,
1119 	.set_eee_pls = dwmac4_set_eee_pls,
1120 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1121 	.pcs_rane = dwmac4_rane,
1122 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1123 	.debug = dwmac4_debug,
1124 	.set_filter = dwmac4_set_filter,
1125 	.set_mac_loopback = dwmac4_set_mac_loopback,
1126 	.update_vlan_hash = dwmac4_update_vlan_hash,
1127 	.sarc_configure = dwmac4_sarc_configure,
1128 	.enable_vlan = dwmac4_enable_vlan,
1129 	.set_arp_offload = dwmac4_set_arp_offload,
1130 	.config_l3_filter = dwmac4_config_l3_filter,
1131 	.config_l4_filter = dwmac4_config_l4_filter,
1132 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1133 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1134 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1135 };
1136 
1137 const struct stmmac_ops dwmac410_ops = {
1138 	.core_init = dwmac4_core_init,
1139 	.set_mac = stmmac_dwmac4_set_mac,
1140 	.rx_ipc = dwmac4_rx_ipc_enable,
1141 	.rx_queue_enable = dwmac4_rx_queue_enable,
1142 	.rx_queue_prio = dwmac4_rx_queue_priority,
1143 	.tx_queue_prio = dwmac4_tx_queue_priority,
1144 	.rx_queue_routing = dwmac4_rx_queue_routing,
1145 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1146 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1147 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1148 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1149 	.config_cbs = dwmac4_config_cbs,
1150 	.dump_regs = dwmac4_dump_regs,
1151 	.host_irq_status = dwmac4_irq_status,
1152 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1153 	.flow_ctrl = dwmac4_flow_ctrl,
1154 	.pmt = dwmac4_pmt,
1155 	.set_umac_addr = dwmac4_set_umac_addr,
1156 	.get_umac_addr = dwmac4_get_umac_addr,
1157 	.set_eee_mode = dwmac4_set_eee_mode,
1158 	.reset_eee_mode = dwmac4_reset_eee_mode,
1159 	.set_eee_timer = dwmac4_set_eee_timer,
1160 	.set_eee_pls = dwmac4_set_eee_pls,
1161 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1162 	.pcs_rane = dwmac4_rane,
1163 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1164 	.debug = dwmac4_debug,
1165 	.set_filter = dwmac4_set_filter,
1166 	.flex_pps_config = dwmac5_flex_pps_config,
1167 	.set_mac_loopback = dwmac4_set_mac_loopback,
1168 	.update_vlan_hash = dwmac4_update_vlan_hash,
1169 	.sarc_configure = dwmac4_sarc_configure,
1170 	.enable_vlan = dwmac4_enable_vlan,
1171 	.set_arp_offload = dwmac4_set_arp_offload,
1172 	.config_l3_filter = dwmac4_config_l3_filter,
1173 	.config_l4_filter = dwmac4_config_l4_filter,
1174 	.est_configure = dwmac5_est_configure,
1175 	.fpe_configure = dwmac5_fpe_configure,
1176 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1177 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1178 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1179 };
1180 
1181 const struct stmmac_ops dwmac510_ops = {
1182 	.core_init = dwmac4_core_init,
1183 	.set_mac = stmmac_dwmac4_set_mac,
1184 	.rx_ipc = dwmac4_rx_ipc_enable,
1185 	.rx_queue_enable = dwmac4_rx_queue_enable,
1186 	.rx_queue_prio = dwmac4_rx_queue_priority,
1187 	.tx_queue_prio = dwmac4_tx_queue_priority,
1188 	.rx_queue_routing = dwmac4_rx_queue_routing,
1189 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1190 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1191 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1192 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1193 	.config_cbs = dwmac4_config_cbs,
1194 	.dump_regs = dwmac4_dump_regs,
1195 	.host_irq_status = dwmac4_irq_status,
1196 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1197 	.flow_ctrl = dwmac4_flow_ctrl,
1198 	.pmt = dwmac4_pmt,
1199 	.set_umac_addr = dwmac4_set_umac_addr,
1200 	.get_umac_addr = dwmac4_get_umac_addr,
1201 	.set_eee_mode = dwmac4_set_eee_mode,
1202 	.reset_eee_mode = dwmac4_reset_eee_mode,
1203 	.set_eee_timer = dwmac4_set_eee_timer,
1204 	.set_eee_pls = dwmac4_set_eee_pls,
1205 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1206 	.pcs_rane = dwmac4_rane,
1207 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1208 	.debug = dwmac4_debug,
1209 	.set_filter = dwmac4_set_filter,
1210 	.safety_feat_config = dwmac5_safety_feat_config,
1211 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1212 	.safety_feat_dump = dwmac5_safety_feat_dump,
1213 	.rxp_config = dwmac5_rxp_config,
1214 	.flex_pps_config = dwmac5_flex_pps_config,
1215 	.set_mac_loopback = dwmac4_set_mac_loopback,
1216 	.update_vlan_hash = dwmac4_update_vlan_hash,
1217 	.sarc_configure = dwmac4_sarc_configure,
1218 	.enable_vlan = dwmac4_enable_vlan,
1219 	.set_arp_offload = dwmac4_set_arp_offload,
1220 	.config_l3_filter = dwmac4_config_l3_filter,
1221 	.config_l4_filter = dwmac4_config_l4_filter,
1222 	.est_configure = dwmac5_est_configure,
1223 	.fpe_configure = dwmac5_fpe_configure,
1224 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1225 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1226 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1227 };
1228 
dwmac4_get_num_vlan(void __iomem * ioaddr)1229 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1230 {
1231 	u32 val, num_vlan;
1232 
1233 	val = readl(ioaddr + GMAC_HW_FEATURE3);
1234 	switch (val & GMAC_HW_FEAT_NRVF) {
1235 	case 0:
1236 		num_vlan = 1;
1237 		break;
1238 	case 1:
1239 		num_vlan = 4;
1240 		break;
1241 	case 2:
1242 		num_vlan = 8;
1243 		break;
1244 	case 3:
1245 		num_vlan = 16;
1246 		break;
1247 	case 4:
1248 		num_vlan = 24;
1249 		break;
1250 	case 5:
1251 		num_vlan = 32;
1252 		break;
1253 	default:
1254 		num_vlan = 1;
1255 	}
1256 
1257 	return num_vlan;
1258 }
1259 
dwmac4_setup(struct stmmac_priv * priv)1260 int dwmac4_setup(struct stmmac_priv *priv)
1261 {
1262 	struct mac_device_info *mac = priv->hw;
1263 
1264 	dev_info(priv->device, "\tDWMAC4/5\n");
1265 
1266 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1267 	mac->pcsr = priv->ioaddr;
1268 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1269 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1270 	mac->mcast_bits_log2 = 0;
1271 
1272 	if (mac->multicast_filter_bins)
1273 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1274 
1275 	mac->link.duplex = GMAC_CONFIG_DM;
1276 	mac->link.speed10 = GMAC_CONFIG_PS;
1277 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1278 	mac->link.speed1000 = 0;
1279 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1280 	mac->mii.addr = GMAC_MDIO_ADDR;
1281 	mac->mii.data = GMAC_MDIO_DATA;
1282 	mac->mii.addr_shift = 21;
1283 	mac->mii.addr_mask = GENMASK(25, 21);
1284 	mac->mii.reg_shift = 16;
1285 	mac->mii.reg_mask = GENMASK(20, 16);
1286 	mac->mii.clk_csr_shift = 8;
1287 	mac->mii.clk_csr_mask = GENMASK(11, 8);
1288 	mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1289 
1290 	return 0;
1291 }
1292