• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4  * DWC Ether MAC version 4.00  has been used for developing this code.
5  *
6  * This only implements the mac core functions for this chip.
7  *
8  * Copyright (C) 2015  STMicroelectronics Ltd
9  *
10  * Author: Alexandre Torgue <alexandre.torgue@st.com>
11  */
12 
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22 
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)23 static void dwmac4_core_init(struct mac_device_info *hw,
24 			     struct net_device *dev)
25 {
26 	struct stmmac_priv *priv = netdev_priv(dev);
27 	void __iomem *ioaddr = hw->pcsr;
28 	u32 value = readl(ioaddr + GMAC_CONFIG);
29 	u32 clk_rate;
30 
31 	value |= GMAC_CORE_INIT;
32 
33 	if (hw->ps) {
34 		value |= GMAC_CONFIG_TE;
35 
36 		value &= hw->link.speed_mask;
37 		switch (hw->ps) {
38 		case SPEED_1000:
39 			value |= hw->link.speed1000;
40 			break;
41 		case SPEED_100:
42 			value |= hw->link.speed100;
43 			break;
44 		case SPEED_10:
45 			value |= hw->link.speed10;
46 			break;
47 		}
48 	}
49 
50 	writel(value, ioaddr + GMAC_CONFIG);
51 
52 	/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
53 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
54 	writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
55 
56 	/* Enable GMAC interrupts */
57 	value = GMAC_INT_DEFAULT_ENABLE;
58 
59 	if (hw->pcs)
60 		value |= GMAC_PCS_IRQ_DEFAULT;
61 
62 	/* Enable FPE interrupt */
63 	if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
64 		value |= GMAC_INT_FPE_EN;
65 
66 	writel(value, ioaddr + GMAC_INT_EN);
67 
68 	if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
69 		init_waitqueue_head(&priv->tstamp_busy_wait);
70 }
71 
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)72 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
73 				   u8 mode, u32 queue)
74 {
75 	void __iomem *ioaddr = hw->pcsr;
76 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
77 
78 	value &= GMAC_RX_QUEUE_CLEAR(queue);
79 	if (mode == MTL_QUEUE_AVB)
80 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
81 	else if (mode == MTL_QUEUE_DCB)
82 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
83 
84 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
85 }
86 
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)87 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
88 				     u32 prio, u32 queue)
89 {
90 	void __iomem *ioaddr = hw->pcsr;
91 	u32 base_register;
92 	u32 value;
93 
94 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
95 	if (queue >= 4)
96 		queue -= 4;
97 
98 	value = readl(ioaddr + base_register);
99 
100 	value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
101 	value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
102 						GMAC_RXQCTRL_PSRQX_MASK(queue);
103 	writel(value, ioaddr + base_register);
104 }
105 
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)106 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
107 				     u32 prio, u32 queue)
108 {
109 	void __iomem *ioaddr = hw->pcsr;
110 	u32 base_register;
111 	u32 value;
112 
113 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
114 	if (queue >= 4)
115 		queue -= 4;
116 
117 	value = readl(ioaddr + base_register);
118 
119 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
120 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
121 						GMAC_TXQCTRL_PSTQX_MASK(queue);
122 
123 	writel(value, ioaddr + base_register);
124 }
125 
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)126 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
127 				    u8 packet, u32 queue)
128 {
129 	void __iomem *ioaddr = hw->pcsr;
130 	u32 value;
131 
132 	static const struct stmmac_rx_routing route_possibilities[] = {
133 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
134 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
135 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
136 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
137 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
138 	};
139 
140 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
141 
142 	/* routing configuration */
143 	value &= ~route_possibilities[packet - 1].reg_mask;
144 	value |= (queue << route_possibilities[packet-1].reg_shift) &
145 		 route_possibilities[packet - 1].reg_mask;
146 
147 	/* some packets require extra ops */
148 	if (packet == PACKET_AVCPQ) {
149 		value &= ~GMAC_RXQCTRL_TACPQE;
150 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
151 	} else if (packet == PACKET_MCBCQ) {
152 		value &= ~GMAC_RXQCTRL_MCBCQEN;
153 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
154 	}
155 
156 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
157 }
158 
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)159 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
160 					  u32 rx_alg)
161 {
162 	void __iomem *ioaddr = hw->pcsr;
163 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
164 
165 	value &= ~MTL_OPERATION_RAA;
166 	switch (rx_alg) {
167 	case MTL_RX_ALGORITHM_SP:
168 		value |= MTL_OPERATION_RAA_SP;
169 		break;
170 	case MTL_RX_ALGORITHM_WSP:
171 		value |= MTL_OPERATION_RAA_WSP;
172 		break;
173 	default:
174 		break;
175 	}
176 
177 	writel(value, ioaddr + MTL_OPERATION_MODE);
178 }
179 
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)180 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
181 					  u32 tx_alg)
182 {
183 	void __iomem *ioaddr = hw->pcsr;
184 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
185 
186 	value &= ~MTL_OPERATION_SCHALG_MASK;
187 	switch (tx_alg) {
188 	case MTL_TX_ALGORITHM_WRR:
189 		value |= MTL_OPERATION_SCHALG_WRR;
190 		break;
191 	case MTL_TX_ALGORITHM_WFQ:
192 		value |= MTL_OPERATION_SCHALG_WFQ;
193 		break;
194 	case MTL_TX_ALGORITHM_DWRR:
195 		value |= MTL_OPERATION_SCHALG_DWRR;
196 		break;
197 	case MTL_TX_ALGORITHM_SP:
198 		value |= MTL_OPERATION_SCHALG_SP;
199 		break;
200 	default:
201 		break;
202 	}
203 
204 	writel(value, ioaddr + MTL_OPERATION_MODE);
205 }
206 
dwmac4_set_mtl_tx_queue_weight(struct mac_device_info * hw,u32 weight,u32 queue)207 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
208 					   u32 weight, u32 queue)
209 {
210 	void __iomem *ioaddr = hw->pcsr;
211 	u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
212 
213 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
214 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
215 	writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
216 }
217 
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)218 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
219 {
220 	void __iomem *ioaddr = hw->pcsr;
221 	u32 value;
222 
223 	if (queue < 4)
224 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
225 	else
226 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
227 
228 	if (queue == 0 || queue == 4) {
229 		value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
230 		value |= MTL_RXQ_DMA_Q04MDMACH(chan);
231 	} else if (queue > 4) {
232 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
233 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
234 	} else {
235 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
236 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
237 	}
238 
239 	if (queue < 4)
240 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
241 	else
242 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
243 }
244 
dwmac4_config_cbs(struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)245 static void dwmac4_config_cbs(struct mac_device_info *hw,
246 			      u32 send_slope, u32 idle_slope,
247 			      u32 high_credit, u32 low_credit, u32 queue)
248 {
249 	void __iomem *ioaddr = hw->pcsr;
250 	u32 value;
251 
252 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
253 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
254 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
255 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
256 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
257 
258 	/* enable AV algorithm */
259 	value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
260 	value |= MTL_ETS_CTRL_AVALG;
261 	value |= MTL_ETS_CTRL_CC;
262 	writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
263 
264 	/* configure send slope */
265 	value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
266 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
267 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
268 	writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
269 
270 	/* configure idle slope (same register as tx weight) */
271 	dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
272 
273 	/* configure high credit */
274 	value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
275 	value &= ~MTL_HIGH_CRED_HC_MASK;
276 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
277 	writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
278 
279 	/* configure high credit */
280 	value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
281 	value &= ~MTL_HIGH_CRED_LC_MASK;
282 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
283 	writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
284 }
285 
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)286 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
287 {
288 	void __iomem *ioaddr = hw->pcsr;
289 	int i;
290 
291 	for (i = 0; i < GMAC_REG_NUM; i++)
292 		reg_space[i] = readl(ioaddr + i * 4);
293 }
294 
dwmac4_rx_ipc_enable(struct mac_device_info * hw)295 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
296 {
297 	void __iomem *ioaddr = hw->pcsr;
298 	u32 value = readl(ioaddr + GMAC_CONFIG);
299 
300 	if (hw->rx_csum)
301 		value |= GMAC_CONFIG_IPC;
302 	else
303 		value &= ~GMAC_CONFIG_IPC;
304 
305 	writel(value, ioaddr + GMAC_CONFIG);
306 
307 	value = readl(ioaddr + GMAC_CONFIG);
308 
309 	return !!(value & GMAC_CONFIG_IPC);
310 }
311 
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)312 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
313 {
314 	void __iomem *ioaddr = hw->pcsr;
315 	unsigned int pmt = 0;
316 	u32 config;
317 
318 	if (mode & WAKE_MAGIC) {
319 		pr_debug("GMAC: WOL Magic frame\n");
320 		pmt |= power_down | magic_pkt_en;
321 	}
322 	if (mode & WAKE_UCAST) {
323 		pr_debug("GMAC: WOL on global unicast\n");
324 		pmt |= power_down | global_unicast | wake_up_frame_en;
325 	}
326 
327 	if (pmt) {
328 		/* The receiver must be enabled for WOL before powering down */
329 		config = readl(ioaddr + GMAC_CONFIG);
330 		config |= GMAC_CONFIG_RE;
331 		writel(config, ioaddr + GMAC_CONFIG);
332 	}
333 	writel(pmt, ioaddr + GMAC_PMT);
334 }
335 
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)336 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
337 				 const unsigned char *addr, unsigned int reg_n)
338 {
339 	void __iomem *ioaddr = hw->pcsr;
340 
341 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
342 				   GMAC_ADDR_LOW(reg_n));
343 }
344 
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)345 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
346 				 unsigned char *addr, unsigned int reg_n)
347 {
348 	void __iomem *ioaddr = hw->pcsr;
349 
350 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
351 				   GMAC_ADDR_LOW(reg_n));
352 }
353 
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)354 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
355 				bool en_tx_lpi_clockgating)
356 {
357 	void __iomem *ioaddr = hw->pcsr;
358 	u32 value;
359 
360 	/* Enable the link status receive on RGMII, SGMII ore SMII
361 	 * receive path and instruct the transmit to enter in LPI
362 	 * state.
363 	 */
364 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
365 	value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
366 
367 	if (en_tx_lpi_clockgating)
368 		value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
369 
370 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
371 }
372 
dwmac4_reset_eee_mode(struct mac_device_info * hw)373 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
374 {
375 	void __iomem *ioaddr = hw->pcsr;
376 	u32 value;
377 
378 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
379 	value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
380 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
381 }
382 
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)383 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
384 {
385 	void __iomem *ioaddr = hw->pcsr;
386 	u32 value;
387 
388 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
389 
390 	if (link)
391 		value |= GMAC4_LPI_CTRL_STATUS_PLS;
392 	else
393 		value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
394 
395 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
396 }
397 
dwmac4_set_eee_lpi_entry_timer(struct mac_device_info * hw,int et)398 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
399 {
400 	void __iomem *ioaddr = hw->pcsr;
401 	int value = et & STMMAC_ET_MAX;
402 	int regval;
403 
404 	/* Program LPI entry timer value into register */
405 	writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
406 
407 	/* Enable/disable LPI entry timer */
408 	regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
409 	regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
410 
411 	if (et)
412 		regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
413 	else
414 		regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
415 
416 	writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
417 }
418 
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)419 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
420 {
421 	void __iomem *ioaddr = hw->pcsr;
422 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
423 
424 	/* Program the timers in the LPI timer control register:
425 	 * LS: minimum time (ms) for which the link
426 	 *  status from PHY should be ok before transmitting
427 	 *  the LPI pattern.
428 	 * TW: minimum time (us) for which the core waits
429 	 *  after it has stopped transmitting the LPI pattern.
430 	 */
431 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
432 }
433 
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)434 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
435 {
436 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
437 	u32 val;
438 
439 	val = readl(ioaddr + GMAC_VLAN_TAG);
440 	val &= ~GMAC_VLAN_TAG_VID;
441 	val |= GMAC_VLAN_TAG_ETV | vid;
442 
443 	writel(val, ioaddr + GMAC_VLAN_TAG);
444 }
445 
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)446 static int dwmac4_write_vlan_filter(struct net_device *dev,
447 				    struct mac_device_info *hw,
448 				    u8 index, u32 data)
449 {
450 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
451 	int i, timeout = 10;
452 	u32 val;
453 
454 	if (index >= hw->num_vlan)
455 		return -EINVAL;
456 
457 	writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
458 
459 	val = readl(ioaddr + GMAC_VLAN_TAG);
460 	val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
461 		GMAC_VLAN_TAG_CTRL_CT |
462 		GMAC_VLAN_TAG_CTRL_OB);
463 	val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
464 
465 	writel(val, ioaddr + GMAC_VLAN_TAG);
466 
467 	for (i = 0; i < timeout; i++) {
468 		val = readl(ioaddr + GMAC_VLAN_TAG);
469 		if (!(val & GMAC_VLAN_TAG_CTRL_OB))
470 			return 0;
471 		udelay(1);
472 	}
473 
474 	netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
475 
476 	return -EBUSY;
477 }
478 
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)479 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
480 				      struct mac_device_info *hw,
481 				      __be16 proto, u16 vid)
482 {
483 	int index = -1;
484 	u32 val = 0;
485 	int i, ret;
486 
487 	if (vid > 4095)
488 		return -EINVAL;
489 
490 	/* Single Rx VLAN Filter */
491 	if (hw->num_vlan == 1) {
492 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
493 		if (vid == 0) {
494 			netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
495 			return -EPERM;
496 		}
497 
498 		if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
499 			netdev_err(dev, "Only single VLAN ID supported\n");
500 			return -EPERM;
501 		}
502 
503 		hw->vlan_filter[0] = vid;
504 		dwmac4_write_single_vlan(dev, vid);
505 
506 		return 0;
507 	}
508 
509 	/* Extended Rx VLAN Filter Enable */
510 	val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
511 
512 	for (i = 0; i < hw->num_vlan; i++) {
513 		if (hw->vlan_filter[i] == val)
514 			return 0;
515 		else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
516 			index = i;
517 	}
518 
519 	if (index == -1) {
520 		netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
521 			   hw->num_vlan);
522 		return -EPERM;
523 	}
524 
525 	ret = dwmac4_write_vlan_filter(dev, hw, index, val);
526 
527 	if (!ret)
528 		hw->vlan_filter[index] = val;
529 
530 	return ret;
531 }
532 
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)533 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
534 				      struct mac_device_info *hw,
535 				      __be16 proto, u16 vid)
536 {
537 	int i, ret = 0;
538 
539 	/* Single Rx VLAN Filter */
540 	if (hw->num_vlan == 1) {
541 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
542 			hw->vlan_filter[0] = 0;
543 			dwmac4_write_single_vlan(dev, 0);
544 		}
545 		return 0;
546 	}
547 
548 	/* Extended Rx VLAN Filter Enable */
549 	for (i = 0; i < hw->num_vlan; i++) {
550 		if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
551 			ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
552 
553 			if (!ret)
554 				hw->vlan_filter[i] = 0;
555 			else
556 				return ret;
557 		}
558 	}
559 
560 	return ret;
561 }
562 
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)563 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
564 					   struct mac_device_info *hw)
565 {
566 	void __iomem *ioaddr = hw->pcsr;
567 	u32 value;
568 	u32 hash;
569 	u32 val;
570 	int i;
571 
572 	/* Single Rx VLAN Filter */
573 	if (hw->num_vlan == 1) {
574 		dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
575 		return;
576 	}
577 
578 	/* Extended Rx VLAN Filter Enable */
579 	for (i = 0; i < hw->num_vlan; i++) {
580 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
581 			val = hw->vlan_filter[i];
582 			dwmac4_write_vlan_filter(dev, hw, i, val);
583 		}
584 	}
585 
586 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
587 	if (hash & GMAC_VLAN_VLHT) {
588 		value = readl(ioaddr + GMAC_VLAN_TAG);
589 		value |= GMAC_VLAN_VTHM;
590 		writel(value, ioaddr + GMAC_VLAN_TAG);
591 	}
592 }
593 
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)594 static void dwmac4_set_filter(struct mac_device_info *hw,
595 			      struct net_device *dev)
596 {
597 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
598 	int numhashregs = (hw->multicast_filter_bins >> 5);
599 	int mcbitslog2 = hw->mcast_bits_log2;
600 	unsigned int value;
601 	u32 mc_filter[8];
602 	int i;
603 
604 	memset(mc_filter, 0, sizeof(mc_filter));
605 
606 	value = readl(ioaddr + GMAC_PACKET_FILTER);
607 	value &= ~GMAC_PACKET_FILTER_HMC;
608 	value &= ~GMAC_PACKET_FILTER_HPF;
609 	value &= ~GMAC_PACKET_FILTER_PCF;
610 	value &= ~GMAC_PACKET_FILTER_PM;
611 	value &= ~GMAC_PACKET_FILTER_PR;
612 	value &= ~GMAC_PACKET_FILTER_RA;
613 	if (dev->flags & IFF_PROMISC) {
614 		/* VLAN Tag Filter Fail Packets Queuing */
615 		if (hw->vlan_fail_q_en) {
616 			value = readl(ioaddr + GMAC_RXQ_CTRL4);
617 			value &= ~GMAC_RXQCTRL_VFFQ_MASK;
618 			value |= GMAC_RXQCTRL_VFFQE |
619 				 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
620 			writel(value, ioaddr + GMAC_RXQ_CTRL4);
621 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
622 		} else {
623 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
624 		}
625 
626 	} else if ((dev->flags & IFF_ALLMULTI) ||
627 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
628 		/* Pass all multi */
629 		value |= GMAC_PACKET_FILTER_PM;
630 		/* Set all the bits of the HASH tab */
631 		memset(mc_filter, 0xff, sizeof(mc_filter));
632 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
633 		struct netdev_hw_addr *ha;
634 
635 		/* Hash filter for multicast */
636 		value |= GMAC_PACKET_FILTER_HMC;
637 
638 		netdev_for_each_mc_addr(ha, dev) {
639 			/* The upper n bits of the calculated CRC are used to
640 			 * index the contents of the hash table. The number of
641 			 * bits used depends on the hardware configuration
642 			 * selected at core configuration time.
643 			 */
644 			u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
645 					ETH_ALEN)) >> (32 - mcbitslog2);
646 			/* The most significant bit determines the register to
647 			 * use (H/L) while the other 5 bits determine the bit
648 			 * within the register.
649 			 */
650 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
651 		}
652 	}
653 
654 	for (i = 0; i < numhashregs; i++)
655 		writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
656 
657 	value |= GMAC_PACKET_FILTER_HPF;
658 
659 	/* Handle multiple unicast addresses */
660 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
661 		/* Switch to promiscuous mode if more than 128 addrs
662 		 * are required
663 		 */
664 		value |= GMAC_PACKET_FILTER_PR;
665 	} else {
666 		struct netdev_hw_addr *ha;
667 		int reg = 1;
668 
669 		netdev_for_each_uc_addr(ha, dev) {
670 			dwmac4_set_umac_addr(hw, ha->addr, reg);
671 			reg++;
672 		}
673 
674 		while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
675 			writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
676 			writel(0, ioaddr + GMAC_ADDR_LOW(reg));
677 			reg++;
678 		}
679 	}
680 
681 	/* VLAN filtering */
682 	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
683 		value &= ~GMAC_PACKET_FILTER_VTFE;
684 	else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
685 		value |= GMAC_PACKET_FILTER_VTFE;
686 
687 	writel(value, ioaddr + GMAC_PACKET_FILTER);
688 }
689 
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)690 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
691 			     unsigned int fc, unsigned int pause_time,
692 			     u32 tx_cnt)
693 {
694 	void __iomem *ioaddr = hw->pcsr;
695 	unsigned int flow = 0;
696 	u32 queue = 0;
697 
698 	pr_debug("GMAC Flow-Control:\n");
699 	if (fc & FLOW_RX) {
700 		pr_debug("\tReceive Flow-Control ON\n");
701 		flow |= GMAC_RX_FLOW_CTRL_RFE;
702 	} else {
703 		pr_debug("\tReceive Flow-Control OFF\n");
704 	}
705 	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
706 
707 	if (fc & FLOW_TX) {
708 		pr_debug("\tTransmit Flow-Control ON\n");
709 
710 		if (duplex)
711 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
712 
713 		for (queue = 0; queue < tx_cnt; queue++) {
714 			flow = GMAC_TX_FLOW_CTRL_TFE;
715 
716 			if (duplex)
717 				flow |=
718 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
719 
720 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
721 		}
722 	} else {
723 		for (queue = 0; queue < tx_cnt; queue++)
724 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
725 	}
726 }
727 
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)728 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
729 			    bool loopback)
730 {
731 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
732 }
733 
dwmac4_rane(void __iomem * ioaddr,bool restart)734 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
735 {
736 	dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
737 }
738 
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)739 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
740 {
741 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
742 }
743 
744 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)745 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
746 {
747 	u32 status;
748 
749 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
750 	x->irq_rgmii_n++;
751 
752 	/* Check the link status */
753 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
754 		int speed_value;
755 
756 		x->pcs_link = 1;
757 
758 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
759 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
760 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
761 			x->pcs_speed = SPEED_1000;
762 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
763 			x->pcs_speed = SPEED_100;
764 		else
765 			x->pcs_speed = SPEED_10;
766 
767 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
768 
769 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
770 			x->pcs_duplex ? "Full" : "Half");
771 	} else {
772 		x->pcs_link = 0;
773 		pr_info("Link is Down\n");
774 	}
775 }
776 
dwmac4_irq_mtl_status(struct mac_device_info * hw,u32 chan)777 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
778 {
779 	void __iomem *ioaddr = hw->pcsr;
780 	u32 mtl_int_qx_status;
781 	int ret = 0;
782 
783 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
784 
785 	/* Check MTL Interrupt */
786 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
787 		/* read Queue x Interrupt status */
788 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
789 
790 		if (status & MTL_RX_OVERFLOW_INT) {
791 			/*  clear Interrupt */
792 			writel(status | MTL_RX_OVERFLOW_INT,
793 			       ioaddr + MTL_CHAN_INT_CTRL(chan));
794 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
795 		}
796 	}
797 
798 	return ret;
799 }
800 
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)801 static int dwmac4_irq_status(struct mac_device_info *hw,
802 			     struct stmmac_extra_stats *x)
803 {
804 	void __iomem *ioaddr = hw->pcsr;
805 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
806 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
807 	int ret = 0;
808 
809 	/* Discard disabled bits */
810 	intr_status &= intr_enable;
811 
812 	/* Not used events (e.g. MMC interrupts) are not handled. */
813 	if ((intr_status & mmc_tx_irq))
814 		x->mmc_tx_irq_n++;
815 	if (unlikely(intr_status & mmc_rx_irq))
816 		x->mmc_rx_irq_n++;
817 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
818 		x->mmc_rx_csum_offload_irq_n++;
819 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
820 	if (unlikely(intr_status & pmt_irq)) {
821 		readl(ioaddr + GMAC_PMT);
822 		x->irq_receive_pmt_irq_n++;
823 	}
824 
825 	/* MAC tx/rx EEE LPI entry/exit interrupts */
826 	if (intr_status & lpi_irq) {
827 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
828 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
829 
830 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
831 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
832 			x->irq_tx_path_in_lpi_mode_n++;
833 		}
834 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
835 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
836 			x->irq_tx_path_exit_lpi_mode_n++;
837 		}
838 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
839 			x->irq_rx_path_in_lpi_mode_n++;
840 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
841 			x->irq_rx_path_exit_lpi_mode_n++;
842 	}
843 
844 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
845 	if (intr_status & PCS_RGSMIIIS_IRQ)
846 		dwmac4_phystatus(ioaddr, x);
847 
848 	return ret;
849 }
850 
dwmac4_debug(void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)851 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
852 			 u32 rx_queues, u32 tx_queues)
853 {
854 	u32 value;
855 	u32 queue;
856 
857 	for (queue = 0; queue < tx_queues; queue++) {
858 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
859 
860 		if (value & MTL_DEBUG_TXSTSFSTS)
861 			x->mtl_tx_status_fifo_full++;
862 		if (value & MTL_DEBUG_TXFSTS)
863 			x->mtl_tx_fifo_not_empty++;
864 		if (value & MTL_DEBUG_TWCSTS)
865 			x->mmtl_fifo_ctrl++;
866 		if (value & MTL_DEBUG_TRCSTS_MASK) {
867 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
868 				     >> MTL_DEBUG_TRCSTS_SHIFT;
869 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
870 				x->mtl_tx_fifo_read_ctrl_write++;
871 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
872 				x->mtl_tx_fifo_read_ctrl_wait++;
873 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
874 				x->mtl_tx_fifo_read_ctrl_read++;
875 			else
876 				x->mtl_tx_fifo_read_ctrl_idle++;
877 		}
878 		if (value & MTL_DEBUG_TXPAUSED)
879 			x->mac_tx_in_pause++;
880 	}
881 
882 	for (queue = 0; queue < rx_queues; queue++) {
883 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
884 
885 		if (value & MTL_DEBUG_RXFSTS_MASK) {
886 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
887 				     >> MTL_DEBUG_RRCSTS_SHIFT;
888 
889 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
890 				x->mtl_rx_fifo_fill_level_full++;
891 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
892 				x->mtl_rx_fifo_fill_above_thresh++;
893 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
894 				x->mtl_rx_fifo_fill_below_thresh++;
895 			else
896 				x->mtl_rx_fifo_fill_level_empty++;
897 		}
898 		if (value & MTL_DEBUG_RRCSTS_MASK) {
899 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
900 				     MTL_DEBUG_RRCSTS_SHIFT;
901 
902 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
903 				x->mtl_rx_fifo_read_ctrl_flush++;
904 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
905 				x->mtl_rx_fifo_read_ctrl_read_data++;
906 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
907 				x->mtl_rx_fifo_read_ctrl_status++;
908 			else
909 				x->mtl_rx_fifo_read_ctrl_idle++;
910 		}
911 		if (value & MTL_DEBUG_RWCSTS)
912 			x->mtl_rx_fifo_ctrl_active++;
913 	}
914 
915 	/* GMAC debug */
916 	value = readl(ioaddr + GMAC_DEBUG);
917 
918 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
919 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
920 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
921 
922 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
923 			x->mac_tx_frame_ctrl_xfer++;
924 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
925 			x->mac_tx_frame_ctrl_pause++;
926 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
927 			x->mac_tx_frame_ctrl_wait++;
928 		else
929 			x->mac_tx_frame_ctrl_idle++;
930 	}
931 	if (value & GMAC_DEBUG_TPESTS)
932 		x->mac_gmii_tx_proto_engine++;
933 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
934 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
935 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
936 	if (value & GMAC_DEBUG_RPESTS)
937 		x->mac_gmii_rx_proto_engine++;
938 }
939 
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)940 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
941 {
942 	u32 value = readl(ioaddr + GMAC_CONFIG);
943 
944 	if (enable)
945 		value |= GMAC_CONFIG_LM;
946 	else
947 		value &= ~GMAC_CONFIG_LM;
948 
949 	writel(value, ioaddr + GMAC_CONFIG);
950 }
951 
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)952 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
953 				    __le16 perfect_match, bool is_double)
954 {
955 	void __iomem *ioaddr = hw->pcsr;
956 	u32 value;
957 
958 	writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
959 
960 	value = readl(ioaddr + GMAC_VLAN_TAG);
961 
962 	if (hash) {
963 		value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
964 		if (is_double) {
965 			value |= GMAC_VLAN_EDVLP;
966 			value |= GMAC_VLAN_ESVL;
967 			value |= GMAC_VLAN_DOVLTC;
968 		}
969 
970 		writel(value, ioaddr + GMAC_VLAN_TAG);
971 	} else if (perfect_match) {
972 		u32 value = GMAC_VLAN_ETV;
973 
974 		if (is_double) {
975 			value |= GMAC_VLAN_EDVLP;
976 			value |= GMAC_VLAN_ESVL;
977 			value |= GMAC_VLAN_DOVLTC;
978 		}
979 
980 		writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
981 	} else {
982 		value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
983 		value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
984 		value &= ~GMAC_VLAN_DOVLTC;
985 		value &= ~GMAC_VLAN_VID;
986 
987 		writel(value, ioaddr + GMAC_VLAN_TAG);
988 	}
989 }
990 
dwmac4_sarc_configure(void __iomem * ioaddr,int val)991 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
992 {
993 	u32 value = readl(ioaddr + GMAC_CONFIG);
994 
995 	value &= ~GMAC_CONFIG_SARC;
996 	value |= val << GMAC_CONFIG_SARC_SHIFT;
997 
998 	writel(value, ioaddr + GMAC_CONFIG);
999 }
1000 
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1001 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1002 {
1003 	void __iomem *ioaddr = hw->pcsr;
1004 	u32 value;
1005 
1006 	value = readl(ioaddr + GMAC_VLAN_INCL);
1007 	value |= GMAC_VLAN_VLTI;
1008 	value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1009 	value &= ~GMAC_VLAN_VLC;
1010 	value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1011 	writel(value, ioaddr + GMAC_VLAN_INCL);
1012 }
1013 
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1014 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1015 				   u32 addr)
1016 {
1017 	void __iomem *ioaddr = hw->pcsr;
1018 	u32 value;
1019 
1020 	writel(addr, ioaddr + GMAC_ARP_ADDR);
1021 
1022 	value = readl(ioaddr + GMAC_CONFIG);
1023 	if (en)
1024 		value |= GMAC_CONFIG_ARPEN;
1025 	else
1026 		value &= ~GMAC_CONFIG_ARPEN;
1027 	writel(value, ioaddr + GMAC_CONFIG);
1028 }
1029 
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1030 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1031 				   bool en, bool ipv6, bool sa, bool inv,
1032 				   u32 match)
1033 {
1034 	void __iomem *ioaddr = hw->pcsr;
1035 	u32 value;
1036 
1037 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1038 	value |= GMAC_PACKET_FILTER_IPFE;
1039 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1040 
1041 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1042 
1043 	/* For IPv6 not both SA/DA filters can be active */
1044 	if (ipv6) {
1045 		value |= GMAC_L3PEN0;
1046 		value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1047 		value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1048 		if (sa) {
1049 			value |= GMAC_L3SAM0;
1050 			if (inv)
1051 				value |= GMAC_L3SAIM0;
1052 		} else {
1053 			value |= GMAC_L3DAM0;
1054 			if (inv)
1055 				value |= GMAC_L3DAIM0;
1056 		}
1057 	} else {
1058 		value &= ~GMAC_L3PEN0;
1059 		if (sa) {
1060 			value |= GMAC_L3SAM0;
1061 			if (inv)
1062 				value |= GMAC_L3SAIM0;
1063 		} else {
1064 			value |= GMAC_L3DAM0;
1065 			if (inv)
1066 				value |= GMAC_L3DAIM0;
1067 		}
1068 	}
1069 
1070 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1071 
1072 	if (sa) {
1073 		writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1074 	} else {
1075 		writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1076 	}
1077 
1078 	if (!en)
1079 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1080 
1081 	return 0;
1082 }
1083 
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1084 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1085 				   bool en, bool udp, bool sa, bool inv,
1086 				   u32 match)
1087 {
1088 	void __iomem *ioaddr = hw->pcsr;
1089 	u32 value;
1090 
1091 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1092 	value |= GMAC_PACKET_FILTER_IPFE;
1093 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1094 
1095 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1096 	if (udp) {
1097 		value |= GMAC_L4PEN0;
1098 	} else {
1099 		value &= ~GMAC_L4PEN0;
1100 	}
1101 
1102 	value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1103 	value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1104 	if (sa) {
1105 		value |= GMAC_L4SPM0;
1106 		if (inv)
1107 			value |= GMAC_L4SPIM0;
1108 	} else {
1109 		value |= GMAC_L4DPM0;
1110 		if (inv)
1111 			value |= GMAC_L4DPIM0;
1112 	}
1113 
1114 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1115 
1116 	if (sa) {
1117 		value = match & GMAC_L4SP0;
1118 	} else {
1119 		value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1120 	}
1121 
1122 	writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1123 
1124 	if (!en)
1125 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1126 
1127 	return 0;
1128 }
1129 
1130 const struct stmmac_ops dwmac4_ops = {
1131 	.core_init = dwmac4_core_init,
1132 	.set_mac = stmmac_set_mac,
1133 	.rx_ipc = dwmac4_rx_ipc_enable,
1134 	.rx_queue_enable = dwmac4_rx_queue_enable,
1135 	.rx_queue_prio = dwmac4_rx_queue_priority,
1136 	.tx_queue_prio = dwmac4_tx_queue_priority,
1137 	.rx_queue_routing = dwmac4_rx_queue_routing,
1138 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1139 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1140 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1141 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1142 	.config_cbs = dwmac4_config_cbs,
1143 	.dump_regs = dwmac4_dump_regs,
1144 	.host_irq_status = dwmac4_irq_status,
1145 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1146 	.flow_ctrl = dwmac4_flow_ctrl,
1147 	.pmt = dwmac4_pmt,
1148 	.set_umac_addr = dwmac4_set_umac_addr,
1149 	.get_umac_addr = dwmac4_get_umac_addr,
1150 	.set_eee_mode = dwmac4_set_eee_mode,
1151 	.reset_eee_mode = dwmac4_reset_eee_mode,
1152 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1153 	.set_eee_timer = dwmac4_set_eee_timer,
1154 	.set_eee_pls = dwmac4_set_eee_pls,
1155 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1156 	.pcs_rane = dwmac4_rane,
1157 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1158 	.debug = dwmac4_debug,
1159 	.set_filter = dwmac4_set_filter,
1160 	.set_mac_loopback = dwmac4_set_mac_loopback,
1161 	.update_vlan_hash = dwmac4_update_vlan_hash,
1162 	.sarc_configure = dwmac4_sarc_configure,
1163 	.enable_vlan = dwmac4_enable_vlan,
1164 	.set_arp_offload = dwmac4_set_arp_offload,
1165 	.config_l3_filter = dwmac4_config_l3_filter,
1166 	.config_l4_filter = dwmac4_config_l4_filter,
1167 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1168 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1169 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1170 };
1171 
1172 const struct stmmac_ops dwmac410_ops = {
1173 	.core_init = dwmac4_core_init,
1174 	.set_mac = stmmac_dwmac4_set_mac,
1175 	.rx_ipc = dwmac4_rx_ipc_enable,
1176 	.rx_queue_enable = dwmac4_rx_queue_enable,
1177 	.rx_queue_prio = dwmac4_rx_queue_priority,
1178 	.tx_queue_prio = dwmac4_tx_queue_priority,
1179 	.rx_queue_routing = dwmac4_rx_queue_routing,
1180 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1181 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1182 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1183 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1184 	.config_cbs = dwmac4_config_cbs,
1185 	.dump_regs = dwmac4_dump_regs,
1186 	.host_irq_status = dwmac4_irq_status,
1187 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1188 	.flow_ctrl = dwmac4_flow_ctrl,
1189 	.pmt = dwmac4_pmt,
1190 	.set_umac_addr = dwmac4_set_umac_addr,
1191 	.get_umac_addr = dwmac4_get_umac_addr,
1192 	.set_eee_mode = dwmac4_set_eee_mode,
1193 	.reset_eee_mode = dwmac4_reset_eee_mode,
1194 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1195 	.set_eee_timer = dwmac4_set_eee_timer,
1196 	.set_eee_pls = dwmac4_set_eee_pls,
1197 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1198 	.pcs_rane = dwmac4_rane,
1199 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1200 	.debug = dwmac4_debug,
1201 	.set_filter = dwmac4_set_filter,
1202 	.flex_pps_config = dwmac5_flex_pps_config,
1203 	.set_mac_loopback = dwmac4_set_mac_loopback,
1204 	.update_vlan_hash = dwmac4_update_vlan_hash,
1205 	.sarc_configure = dwmac4_sarc_configure,
1206 	.enable_vlan = dwmac4_enable_vlan,
1207 	.set_arp_offload = dwmac4_set_arp_offload,
1208 	.config_l3_filter = dwmac4_config_l3_filter,
1209 	.config_l4_filter = dwmac4_config_l4_filter,
1210 	.est_configure = dwmac5_est_configure,
1211 	.est_irq_status = dwmac5_est_irq_status,
1212 	.fpe_configure = dwmac5_fpe_configure,
1213 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1214 	.fpe_irq_status = dwmac5_fpe_irq_status,
1215 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1216 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1217 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1218 };
1219 
1220 const struct stmmac_ops dwmac510_ops = {
1221 	.core_init = dwmac4_core_init,
1222 	.set_mac = stmmac_dwmac4_set_mac,
1223 	.rx_ipc = dwmac4_rx_ipc_enable,
1224 	.rx_queue_enable = dwmac4_rx_queue_enable,
1225 	.rx_queue_prio = dwmac4_rx_queue_priority,
1226 	.tx_queue_prio = dwmac4_tx_queue_priority,
1227 	.rx_queue_routing = dwmac4_rx_queue_routing,
1228 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1229 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1230 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1231 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1232 	.config_cbs = dwmac4_config_cbs,
1233 	.dump_regs = dwmac4_dump_regs,
1234 	.host_irq_status = dwmac4_irq_status,
1235 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1236 	.flow_ctrl = dwmac4_flow_ctrl,
1237 	.pmt = dwmac4_pmt,
1238 	.set_umac_addr = dwmac4_set_umac_addr,
1239 	.get_umac_addr = dwmac4_get_umac_addr,
1240 	.set_eee_mode = dwmac4_set_eee_mode,
1241 	.reset_eee_mode = dwmac4_reset_eee_mode,
1242 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1243 	.set_eee_timer = dwmac4_set_eee_timer,
1244 	.set_eee_pls = dwmac4_set_eee_pls,
1245 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1246 	.pcs_rane = dwmac4_rane,
1247 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1248 	.debug = dwmac4_debug,
1249 	.set_filter = dwmac4_set_filter,
1250 	.safety_feat_config = dwmac5_safety_feat_config,
1251 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1252 	.safety_feat_dump = dwmac5_safety_feat_dump,
1253 	.rxp_config = dwmac5_rxp_config,
1254 	.flex_pps_config = dwmac5_flex_pps_config,
1255 	.set_mac_loopback = dwmac4_set_mac_loopback,
1256 	.update_vlan_hash = dwmac4_update_vlan_hash,
1257 	.sarc_configure = dwmac4_sarc_configure,
1258 	.enable_vlan = dwmac4_enable_vlan,
1259 	.set_arp_offload = dwmac4_set_arp_offload,
1260 	.config_l3_filter = dwmac4_config_l3_filter,
1261 	.config_l4_filter = dwmac4_config_l4_filter,
1262 	.est_configure = dwmac5_est_configure,
1263 	.est_irq_status = dwmac5_est_irq_status,
1264 	.fpe_configure = dwmac5_fpe_configure,
1265 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1266 	.fpe_irq_status = dwmac5_fpe_irq_status,
1267 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1268 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1269 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1270 };
1271 
dwmac4_get_num_vlan(void __iomem * ioaddr)1272 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1273 {
1274 	u32 val, num_vlan;
1275 
1276 	val = readl(ioaddr + GMAC_HW_FEATURE3);
1277 	switch (val & GMAC_HW_FEAT_NRVF) {
1278 	case 0:
1279 		num_vlan = 1;
1280 		break;
1281 	case 1:
1282 		num_vlan = 4;
1283 		break;
1284 	case 2:
1285 		num_vlan = 8;
1286 		break;
1287 	case 3:
1288 		num_vlan = 16;
1289 		break;
1290 	case 4:
1291 		num_vlan = 24;
1292 		break;
1293 	case 5:
1294 		num_vlan = 32;
1295 		break;
1296 	default:
1297 		num_vlan = 1;
1298 	}
1299 
1300 	return num_vlan;
1301 }
1302 
dwmac4_setup(struct stmmac_priv * priv)1303 int dwmac4_setup(struct stmmac_priv *priv)
1304 {
1305 	struct mac_device_info *mac = priv->hw;
1306 
1307 	dev_info(priv->device, "\tDWMAC4/5\n");
1308 
1309 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1310 	mac->pcsr = priv->ioaddr;
1311 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1312 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1313 	mac->mcast_bits_log2 = 0;
1314 
1315 	if (mac->multicast_filter_bins)
1316 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1317 
1318 	mac->link.duplex = GMAC_CONFIG_DM;
1319 	mac->link.speed10 = GMAC_CONFIG_PS;
1320 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1321 	mac->link.speed1000 = 0;
1322 	mac->link.speed2500 = GMAC_CONFIG_FES;
1323 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1324 	mac->mii.addr = GMAC_MDIO_ADDR;
1325 	mac->mii.data = GMAC_MDIO_DATA;
1326 	mac->mii.addr_shift = 21;
1327 	mac->mii.addr_mask = GENMASK(25, 21);
1328 	mac->mii.reg_shift = 16;
1329 	mac->mii.reg_mask = GENMASK(20, 16);
1330 	mac->mii.clk_csr_shift = 8;
1331 	mac->mii.clk_csr_mask = GENMASK(11, 8);
1332 	mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1333 
1334 	return 0;
1335 }
1336