1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)23 static void dwmac4_core_init(struct mac_device_info *hw,
24 struct net_device *dev)
25 {
26 void __iomem *ioaddr = hw->pcsr;
27 u32 value = readl(ioaddr + GMAC_CONFIG);
28
29 value |= GMAC_CORE_INIT;
30
31 if (hw->ps) {
32 value |= GMAC_CONFIG_TE;
33
34 value &= hw->link.speed_mask;
35 switch (hw->ps) {
36 case SPEED_1000:
37 value |= hw->link.speed1000;
38 break;
39 case SPEED_100:
40 value |= hw->link.speed100;
41 break;
42 case SPEED_10:
43 value |= hw->link.speed10;
44 break;
45 }
46 }
47
48 writel(value, ioaddr + GMAC_CONFIG);
49
50 /* Enable GMAC interrupts */
51 value = GMAC_INT_DEFAULT_ENABLE;
52
53 if (hw->pcs)
54 value |= GMAC_PCS_IRQ_DEFAULT;
55
56 writel(value, ioaddr + GMAC_INT_EN);
57 }
58
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)59 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
60 u8 mode, u32 queue)
61 {
62 void __iomem *ioaddr = hw->pcsr;
63 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
64
65 value &= GMAC_RX_QUEUE_CLEAR(queue);
66 if (mode == MTL_QUEUE_AVB)
67 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
68 else if (mode == MTL_QUEUE_DCB)
69 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
70
71 writel(value, ioaddr + GMAC_RXQ_CTRL0);
72 }
73
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)74 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
75 u32 prio, u32 queue)
76 {
77 void __iomem *ioaddr = hw->pcsr;
78 u32 base_register;
79 u32 value;
80
81 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
82 if (queue >= 4)
83 queue -= 4;
84
85 value = readl(ioaddr + base_register);
86
87 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
88 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
89 GMAC_RXQCTRL_PSRQX_MASK(queue);
90 writel(value, ioaddr + base_register);
91 }
92
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)93 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
94 u32 prio, u32 queue)
95 {
96 void __iomem *ioaddr = hw->pcsr;
97 u32 base_register;
98 u32 value;
99
100 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
101 if (queue >= 4)
102 queue -= 4;
103
104 value = readl(ioaddr + base_register);
105
106 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
107 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
108 GMAC_TXQCTRL_PSTQX_MASK(queue);
109
110 writel(value, ioaddr + base_register);
111 }
112
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)113 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
114 u8 packet, u32 queue)
115 {
116 void __iomem *ioaddr = hw->pcsr;
117 u32 value;
118
119 static const struct stmmac_rx_routing route_possibilities[] = {
120 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
121 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
122 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
123 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
124 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
125 };
126
127 value = readl(ioaddr + GMAC_RXQ_CTRL1);
128
129 /* routing configuration */
130 value &= ~route_possibilities[packet - 1].reg_mask;
131 value |= (queue << route_possibilities[packet-1].reg_shift) &
132 route_possibilities[packet - 1].reg_mask;
133
134 /* some packets require extra ops */
135 if (packet == PACKET_AVCPQ) {
136 value &= ~GMAC_RXQCTRL_TACPQE;
137 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
138 } else if (packet == PACKET_MCBCQ) {
139 value &= ~GMAC_RXQCTRL_MCBCQEN;
140 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
141 }
142
143 writel(value, ioaddr + GMAC_RXQ_CTRL1);
144 }
145
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)146 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
147 u32 rx_alg)
148 {
149 void __iomem *ioaddr = hw->pcsr;
150 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
151
152 value &= ~MTL_OPERATION_RAA;
153 switch (rx_alg) {
154 case MTL_RX_ALGORITHM_SP:
155 value |= MTL_OPERATION_RAA_SP;
156 break;
157 case MTL_RX_ALGORITHM_WSP:
158 value |= MTL_OPERATION_RAA_WSP;
159 break;
160 default:
161 break;
162 }
163
164 writel(value, ioaddr + MTL_OPERATION_MODE);
165 }
166
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)167 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
168 u32 tx_alg)
169 {
170 void __iomem *ioaddr = hw->pcsr;
171 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
172
173 value &= ~MTL_OPERATION_SCHALG_MASK;
174 switch (tx_alg) {
175 case MTL_TX_ALGORITHM_WRR:
176 value |= MTL_OPERATION_SCHALG_WRR;
177 break;
178 case MTL_TX_ALGORITHM_WFQ:
179 value |= MTL_OPERATION_SCHALG_WFQ;
180 break;
181 case MTL_TX_ALGORITHM_DWRR:
182 value |= MTL_OPERATION_SCHALG_DWRR;
183 break;
184 case MTL_TX_ALGORITHM_SP:
185 value |= MTL_OPERATION_SCHALG_SP;
186 break;
187 default:
188 break;
189 }
190
191 writel(value, ioaddr + MTL_OPERATION_MODE);
192 }
193
dwmac4_set_mtl_tx_queue_weight(struct mac_device_info * hw,u32 weight,u32 queue)194 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
195 u32 weight, u32 queue)
196 {
197 void __iomem *ioaddr = hw->pcsr;
198 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
199
200 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
201 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
202 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
203 }
204
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)205 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
206 {
207 void __iomem *ioaddr = hw->pcsr;
208 u32 value;
209
210 if (queue < 4)
211 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
212 else
213 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
214
215 if (queue == 0 || queue == 4) {
216 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
217 value |= MTL_RXQ_DMA_Q04MDMACH(chan);
218 } else {
219 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
220 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
221 }
222
223 if (queue < 4)
224 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
225 else
226 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
227 }
228
dwmac4_config_cbs(struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)229 static void dwmac4_config_cbs(struct mac_device_info *hw,
230 u32 send_slope, u32 idle_slope,
231 u32 high_credit, u32 low_credit, u32 queue)
232 {
233 void __iomem *ioaddr = hw->pcsr;
234 u32 value;
235
236 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
237 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
238 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
239 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
240 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
241
242 /* enable AV algorithm */
243 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
244 value |= MTL_ETS_CTRL_AVALG;
245 value |= MTL_ETS_CTRL_CC;
246 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
247
248 /* configure send slope */
249 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
250 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
251 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
252 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
253
254 /* configure idle slope (same register as tx weight) */
255 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
256
257 /* configure high credit */
258 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
259 value &= ~MTL_HIGH_CRED_HC_MASK;
260 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
261 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
262
263 /* configure high credit */
264 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
265 value &= ~MTL_HIGH_CRED_LC_MASK;
266 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
267 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
268 }
269
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)270 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
271 {
272 void __iomem *ioaddr = hw->pcsr;
273 int i;
274
275 for (i = 0; i < GMAC_REG_NUM; i++)
276 reg_space[i] = readl(ioaddr + i * 4);
277 }
278
dwmac4_rx_ipc_enable(struct mac_device_info * hw)279 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
280 {
281 void __iomem *ioaddr = hw->pcsr;
282 u32 value = readl(ioaddr + GMAC_CONFIG);
283
284 if (hw->rx_csum)
285 value |= GMAC_CONFIG_IPC;
286 else
287 value &= ~GMAC_CONFIG_IPC;
288
289 writel(value, ioaddr + GMAC_CONFIG);
290
291 value = readl(ioaddr + GMAC_CONFIG);
292
293 return !!(value & GMAC_CONFIG_IPC);
294 }
295
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)296 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
297 {
298 void __iomem *ioaddr = hw->pcsr;
299 unsigned int pmt = 0;
300 u32 config;
301
302 if (mode & WAKE_MAGIC) {
303 pr_debug("GMAC: WOL Magic frame\n");
304 pmt |= power_down | magic_pkt_en;
305 }
306 if (mode & WAKE_UCAST) {
307 pr_debug("GMAC: WOL on global unicast\n");
308 pmt |= power_down | global_unicast | wake_up_frame_en;
309 }
310
311 if (pmt) {
312 /* The receiver must be enabled for WOL before powering down */
313 config = readl(ioaddr + GMAC_CONFIG);
314 config |= GMAC_CONFIG_RE;
315 writel(config, ioaddr + GMAC_CONFIG);
316 }
317 writel(pmt, ioaddr + GMAC_PMT);
318 }
319
dwmac4_set_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)320 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
321 unsigned char *addr, unsigned int reg_n)
322 {
323 void __iomem *ioaddr = hw->pcsr;
324
325 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
326 GMAC_ADDR_LOW(reg_n));
327 }
328
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)329 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
330 unsigned char *addr, unsigned int reg_n)
331 {
332 void __iomem *ioaddr = hw->pcsr;
333
334 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
335 GMAC_ADDR_LOW(reg_n));
336 }
337
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)338 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
339 bool en_tx_lpi_clockgating)
340 {
341 void __iomem *ioaddr = hw->pcsr;
342 u32 value;
343
344 /* Enable the link status receive on RGMII, SGMII ore SMII
345 * receive path and instruct the transmit to enter in LPI
346 * state.
347 */
348 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
349 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
350
351 if (en_tx_lpi_clockgating)
352 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
353
354 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
355 }
356
dwmac4_reset_eee_mode(struct mac_device_info * hw)357 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
358 {
359 void __iomem *ioaddr = hw->pcsr;
360 u32 value;
361
362 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
363 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
364 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
365 }
366
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)367 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
368 {
369 void __iomem *ioaddr = hw->pcsr;
370 u32 value;
371
372 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
373
374 if (link)
375 value |= GMAC4_LPI_CTRL_STATUS_PLS;
376 else
377 value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
378
379 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
380 }
381
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)382 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
383 {
384 void __iomem *ioaddr = hw->pcsr;
385 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
386
387 /* Program the timers in the LPI timer control register:
388 * LS: minimum time (ms) for which the link
389 * status from PHY should be ok before transmitting
390 * the LPI pattern.
391 * TW: minimum time (us) for which the core waits
392 * after it has stopped transmitting the LPI pattern.
393 */
394 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
395 }
396
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)397 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
398 {
399 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
400 u32 val;
401
402 val = readl(ioaddr + GMAC_VLAN_TAG);
403 val &= ~GMAC_VLAN_TAG_VID;
404 val |= GMAC_VLAN_TAG_ETV | vid;
405
406 writel(val, ioaddr + GMAC_VLAN_TAG);
407 }
408
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)409 static int dwmac4_write_vlan_filter(struct net_device *dev,
410 struct mac_device_info *hw,
411 u8 index, u32 data)
412 {
413 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
414 int i, timeout = 10;
415 u32 val;
416
417 if (index >= hw->num_vlan)
418 return -EINVAL;
419
420 writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
421
422 val = readl(ioaddr + GMAC_VLAN_TAG);
423 val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
424 GMAC_VLAN_TAG_CTRL_CT |
425 GMAC_VLAN_TAG_CTRL_OB);
426 val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
427
428 writel(val, ioaddr + GMAC_VLAN_TAG);
429
430 for (i = 0; i < timeout; i++) {
431 val = readl(ioaddr + GMAC_VLAN_TAG);
432 if (!(val & GMAC_VLAN_TAG_CTRL_OB))
433 return 0;
434 udelay(1);
435 }
436
437 netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
438
439 return -EBUSY;
440 }
441
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)442 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
443 struct mac_device_info *hw,
444 __be16 proto, u16 vid)
445 {
446 int index = -1;
447 u32 val = 0;
448 int i, ret;
449
450 if (vid > 4095)
451 return -EINVAL;
452
453 if (hw->promisc) {
454 netdev_err(dev,
455 "Adding VLAN in promisc mode not supported\n");
456 return -EPERM;
457 }
458
459 /* Single Rx VLAN Filter */
460 if (hw->num_vlan == 1) {
461 /* For single VLAN filter, VID 0 means VLAN promiscuous */
462 if (vid == 0) {
463 netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
464 return -EPERM;
465 }
466
467 if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
468 netdev_err(dev, "Only single VLAN ID supported\n");
469 return -EPERM;
470 }
471
472 hw->vlan_filter[0] = vid;
473 dwmac4_write_single_vlan(dev, vid);
474
475 return 0;
476 }
477
478 /* Extended Rx VLAN Filter Enable */
479 val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
480
481 for (i = 0; i < hw->num_vlan; i++) {
482 if (hw->vlan_filter[i] == val)
483 return 0;
484 else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
485 index = i;
486 }
487
488 if (index == -1) {
489 netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
490 hw->num_vlan);
491 return -EPERM;
492 }
493
494 ret = dwmac4_write_vlan_filter(dev, hw, index, val);
495
496 if (!ret)
497 hw->vlan_filter[index] = val;
498
499 return ret;
500 }
501
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)502 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
503 struct mac_device_info *hw,
504 __be16 proto, u16 vid)
505 {
506 int i, ret = 0;
507
508 if (hw->promisc) {
509 netdev_err(dev,
510 "Deleting VLAN in promisc mode not supported\n");
511 return -EPERM;
512 }
513
514 /* Single Rx VLAN Filter */
515 if (hw->num_vlan == 1) {
516 if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
517 hw->vlan_filter[0] = 0;
518 dwmac4_write_single_vlan(dev, 0);
519 }
520 return 0;
521 }
522
523 /* Extended Rx VLAN Filter Enable */
524 for (i = 0; i < hw->num_vlan; i++) {
525 if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
526 ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
527
528 if (!ret)
529 hw->vlan_filter[i] = 0;
530 else
531 return ret;
532 }
533 }
534
535 return ret;
536 }
537
dwmac4_vlan_promisc_enable(struct net_device * dev,struct mac_device_info * hw)538 static void dwmac4_vlan_promisc_enable(struct net_device *dev,
539 struct mac_device_info *hw)
540 {
541 void __iomem *ioaddr = hw->pcsr;
542 u32 value;
543 u32 hash;
544 u32 val;
545 int i;
546
547 /* Single Rx VLAN Filter */
548 if (hw->num_vlan == 1) {
549 dwmac4_write_single_vlan(dev, 0);
550 return;
551 }
552
553 /* Extended Rx VLAN Filter Enable */
554 for (i = 0; i < hw->num_vlan; i++) {
555 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
556 val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
557 dwmac4_write_vlan_filter(dev, hw, i, val);
558 }
559 }
560
561 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
562 if (hash & GMAC_VLAN_VLHT) {
563 value = readl(ioaddr + GMAC_VLAN_TAG);
564 if (value & GMAC_VLAN_VTHM) {
565 value &= ~GMAC_VLAN_VTHM;
566 writel(value, ioaddr + GMAC_VLAN_TAG);
567 }
568 }
569 }
570
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)571 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
572 struct mac_device_info *hw)
573 {
574 void __iomem *ioaddr = hw->pcsr;
575 u32 value;
576 u32 hash;
577 u32 val;
578 int i;
579
580 /* Single Rx VLAN Filter */
581 if (hw->num_vlan == 1) {
582 dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
583 return;
584 }
585
586 /* Extended Rx VLAN Filter Enable */
587 for (i = 0; i < hw->num_vlan; i++) {
588 if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
589 val = hw->vlan_filter[i];
590 dwmac4_write_vlan_filter(dev, hw, i, val);
591 }
592 }
593
594 hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
595 if (hash & GMAC_VLAN_VLHT) {
596 value = readl(ioaddr + GMAC_VLAN_TAG);
597 value |= GMAC_VLAN_VTHM;
598 writel(value, ioaddr + GMAC_VLAN_TAG);
599 }
600 }
601
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)602 static void dwmac4_set_filter(struct mac_device_info *hw,
603 struct net_device *dev)
604 {
605 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
606 int numhashregs = (hw->multicast_filter_bins >> 5);
607 int mcbitslog2 = hw->mcast_bits_log2;
608 unsigned int value;
609 u32 mc_filter[8];
610 int i;
611
612 memset(mc_filter, 0, sizeof(mc_filter));
613
614 value = readl(ioaddr + GMAC_PACKET_FILTER);
615 value &= ~GMAC_PACKET_FILTER_HMC;
616 value &= ~GMAC_PACKET_FILTER_HPF;
617 value &= ~GMAC_PACKET_FILTER_PCF;
618 value &= ~GMAC_PACKET_FILTER_PM;
619 value &= ~GMAC_PACKET_FILTER_PR;
620 value &= ~GMAC_PACKET_FILTER_RA;
621 if (dev->flags & IFF_PROMISC) {
622 /* VLAN Tag Filter Fail Packets Queuing */
623 if (hw->vlan_fail_q_en) {
624 value = readl(ioaddr + GMAC_RXQ_CTRL4);
625 value &= ~GMAC_RXQCTRL_VFFQ_MASK;
626 value |= GMAC_RXQCTRL_VFFQE |
627 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
628 writel(value, ioaddr + GMAC_RXQ_CTRL4);
629 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
630 } else {
631 value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
632 }
633
634 } else if ((dev->flags & IFF_ALLMULTI) ||
635 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
636 /* Pass all multi */
637 value |= GMAC_PACKET_FILTER_PM;
638 /* Set all the bits of the HASH tab */
639 memset(mc_filter, 0xff, sizeof(mc_filter));
640 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
641 struct netdev_hw_addr *ha;
642
643 /* Hash filter for multicast */
644 value |= GMAC_PACKET_FILTER_HMC;
645
646 netdev_for_each_mc_addr(ha, dev) {
647 /* The upper n bits of the calculated CRC are used to
648 * index the contents of the hash table. The number of
649 * bits used depends on the hardware configuration
650 * selected at core configuration time.
651 */
652 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
653 ETH_ALEN)) >> (32 - mcbitslog2);
654 /* The most significant bit determines the register to
655 * use (H/L) while the other 5 bits determine the bit
656 * within the register.
657 */
658 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
659 }
660 }
661
662 for (i = 0; i < numhashregs; i++)
663 writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
664
665 value |= GMAC_PACKET_FILTER_HPF;
666
667 /* Handle multiple unicast addresses */
668 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
669 /* Switch to promiscuous mode if more than 128 addrs
670 * are required
671 */
672 value |= GMAC_PACKET_FILTER_PR;
673 } else {
674 struct netdev_hw_addr *ha;
675 int reg = 1;
676
677 netdev_for_each_uc_addr(ha, dev) {
678 dwmac4_set_umac_addr(hw, ha->addr, reg);
679 reg++;
680 }
681
682 while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
683 writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
684 writel(0, ioaddr + GMAC_ADDR_LOW(reg));
685 reg++;
686 }
687 }
688
689 /* VLAN filtering */
690 if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
691 value |= GMAC_PACKET_FILTER_VTFE;
692
693 writel(value, ioaddr + GMAC_PACKET_FILTER);
694
695 if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
696 if (!hw->promisc) {
697 hw->promisc = 1;
698 dwmac4_vlan_promisc_enable(dev, hw);
699 }
700 } else {
701 if (hw->promisc) {
702 hw->promisc = 0;
703 dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
704 }
705 }
706 }
707
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)708 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
709 unsigned int fc, unsigned int pause_time,
710 u32 tx_cnt)
711 {
712 void __iomem *ioaddr = hw->pcsr;
713 unsigned int flow = 0;
714 u32 queue = 0;
715
716 pr_debug("GMAC Flow-Control:\n");
717 if (fc & FLOW_RX) {
718 pr_debug("\tReceive Flow-Control ON\n");
719 flow |= GMAC_RX_FLOW_CTRL_RFE;
720 }
721 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
722
723 if (fc & FLOW_TX) {
724 pr_debug("\tTransmit Flow-Control ON\n");
725
726 if (duplex)
727 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
728
729 for (queue = 0; queue < tx_cnt; queue++) {
730 flow = GMAC_TX_FLOW_CTRL_TFE;
731
732 if (duplex)
733 flow |=
734 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
735
736 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
737 }
738 } else {
739 for (queue = 0; queue < tx_cnt; queue++)
740 writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
741 }
742 }
743
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)744 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
745 bool loopback)
746 {
747 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
748 }
749
dwmac4_rane(void __iomem * ioaddr,bool restart)750 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
751 {
752 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
753 }
754
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)755 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
756 {
757 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
758 }
759
760 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)761 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
762 {
763 u32 status;
764
765 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
766 x->irq_rgmii_n++;
767
768 /* Check the link status */
769 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
770 int speed_value;
771
772 x->pcs_link = 1;
773
774 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
775 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
776 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
777 x->pcs_speed = SPEED_1000;
778 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
779 x->pcs_speed = SPEED_100;
780 else
781 x->pcs_speed = SPEED_10;
782
783 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
784
785 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
786 x->pcs_duplex ? "Full" : "Half");
787 } else {
788 x->pcs_link = 0;
789 pr_info("Link is Down\n");
790 }
791 }
792
dwmac4_irq_mtl_status(struct mac_device_info * hw,u32 chan)793 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
794 {
795 void __iomem *ioaddr = hw->pcsr;
796 u32 mtl_int_qx_status;
797 int ret = 0;
798
799 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
800
801 /* Check MTL Interrupt */
802 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
803 /* read Queue x Interrupt status */
804 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
805
806 if (status & MTL_RX_OVERFLOW_INT) {
807 /* clear Interrupt */
808 writel(status | MTL_RX_OVERFLOW_INT,
809 ioaddr + MTL_CHAN_INT_CTRL(chan));
810 ret = CORE_IRQ_MTL_RX_OVERFLOW;
811 }
812 }
813
814 return ret;
815 }
816
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)817 static int dwmac4_irq_status(struct mac_device_info *hw,
818 struct stmmac_extra_stats *x)
819 {
820 void __iomem *ioaddr = hw->pcsr;
821 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
822 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
823 int ret = 0;
824
825 /* Discard disabled bits */
826 intr_status &= intr_enable;
827
828 /* Not used events (e.g. MMC interrupts) are not handled. */
829 if ((intr_status & mmc_tx_irq))
830 x->mmc_tx_irq_n++;
831 if (unlikely(intr_status & mmc_rx_irq))
832 x->mmc_rx_irq_n++;
833 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
834 x->mmc_rx_csum_offload_irq_n++;
835 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
836 if (unlikely(intr_status & pmt_irq)) {
837 readl(ioaddr + GMAC_PMT);
838 x->irq_receive_pmt_irq_n++;
839 }
840
841 /* MAC tx/rx EEE LPI entry/exit interrupts */
842 if (intr_status & lpi_irq) {
843 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
844 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
845
846 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
847 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
848 x->irq_tx_path_in_lpi_mode_n++;
849 }
850 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
851 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
852 x->irq_tx_path_exit_lpi_mode_n++;
853 }
854 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
855 x->irq_rx_path_in_lpi_mode_n++;
856 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
857 x->irq_rx_path_exit_lpi_mode_n++;
858 }
859
860 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
861 if (intr_status & PCS_RGSMIIIS_IRQ)
862 dwmac4_phystatus(ioaddr, x);
863
864 return ret;
865 }
866
dwmac4_debug(void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)867 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
868 u32 rx_queues, u32 tx_queues)
869 {
870 u32 value;
871 u32 queue;
872
873 for (queue = 0; queue < tx_queues; queue++) {
874 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
875
876 if (value & MTL_DEBUG_TXSTSFSTS)
877 x->mtl_tx_status_fifo_full++;
878 if (value & MTL_DEBUG_TXFSTS)
879 x->mtl_tx_fifo_not_empty++;
880 if (value & MTL_DEBUG_TWCSTS)
881 x->mmtl_fifo_ctrl++;
882 if (value & MTL_DEBUG_TRCSTS_MASK) {
883 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
884 >> MTL_DEBUG_TRCSTS_SHIFT;
885 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
886 x->mtl_tx_fifo_read_ctrl_write++;
887 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
888 x->mtl_tx_fifo_read_ctrl_wait++;
889 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
890 x->mtl_tx_fifo_read_ctrl_read++;
891 else
892 x->mtl_tx_fifo_read_ctrl_idle++;
893 }
894 if (value & MTL_DEBUG_TXPAUSED)
895 x->mac_tx_in_pause++;
896 }
897
898 for (queue = 0; queue < rx_queues; queue++) {
899 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
900
901 if (value & MTL_DEBUG_RXFSTS_MASK) {
902 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
903 >> MTL_DEBUG_RRCSTS_SHIFT;
904
905 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
906 x->mtl_rx_fifo_fill_level_full++;
907 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
908 x->mtl_rx_fifo_fill_above_thresh++;
909 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
910 x->mtl_rx_fifo_fill_below_thresh++;
911 else
912 x->mtl_rx_fifo_fill_level_empty++;
913 }
914 if (value & MTL_DEBUG_RRCSTS_MASK) {
915 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
916 MTL_DEBUG_RRCSTS_SHIFT;
917
918 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
919 x->mtl_rx_fifo_read_ctrl_flush++;
920 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
921 x->mtl_rx_fifo_read_ctrl_read_data++;
922 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
923 x->mtl_rx_fifo_read_ctrl_status++;
924 else
925 x->mtl_rx_fifo_read_ctrl_idle++;
926 }
927 if (value & MTL_DEBUG_RWCSTS)
928 x->mtl_rx_fifo_ctrl_active++;
929 }
930
931 /* GMAC debug */
932 value = readl(ioaddr + GMAC_DEBUG);
933
934 if (value & GMAC_DEBUG_TFCSTS_MASK) {
935 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
936 >> GMAC_DEBUG_TFCSTS_SHIFT;
937
938 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
939 x->mac_tx_frame_ctrl_xfer++;
940 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
941 x->mac_tx_frame_ctrl_pause++;
942 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
943 x->mac_tx_frame_ctrl_wait++;
944 else
945 x->mac_tx_frame_ctrl_idle++;
946 }
947 if (value & GMAC_DEBUG_TPESTS)
948 x->mac_gmii_tx_proto_engine++;
949 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
950 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
951 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
952 if (value & GMAC_DEBUG_RPESTS)
953 x->mac_gmii_rx_proto_engine++;
954 }
955
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)956 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
957 {
958 u32 value = readl(ioaddr + GMAC_CONFIG);
959
960 if (enable)
961 value |= GMAC_CONFIG_LM;
962 else
963 value &= ~GMAC_CONFIG_LM;
964
965 writel(value, ioaddr + GMAC_CONFIG);
966 }
967
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)968 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
969 __le16 perfect_match, bool is_double)
970 {
971 void __iomem *ioaddr = hw->pcsr;
972 u32 value;
973
974 writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
975
976 value = readl(ioaddr + GMAC_VLAN_TAG);
977
978 if (hash) {
979 value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
980 if (is_double) {
981 value |= GMAC_VLAN_EDVLP;
982 value |= GMAC_VLAN_ESVL;
983 value |= GMAC_VLAN_DOVLTC;
984 }
985
986 writel(value, ioaddr + GMAC_VLAN_TAG);
987 } else if (perfect_match) {
988 u32 value = GMAC_VLAN_ETV;
989
990 if (is_double) {
991 value |= GMAC_VLAN_EDVLP;
992 value |= GMAC_VLAN_ESVL;
993 value |= GMAC_VLAN_DOVLTC;
994 }
995
996 writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
997 } else {
998 value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
999 value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1000 value &= ~GMAC_VLAN_DOVLTC;
1001 value &= ~GMAC_VLAN_VID;
1002
1003 writel(value, ioaddr + GMAC_VLAN_TAG);
1004 }
1005 }
1006
dwmac4_sarc_configure(void __iomem * ioaddr,int val)1007 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1008 {
1009 u32 value = readl(ioaddr + GMAC_CONFIG);
1010
1011 value &= ~GMAC_CONFIG_SARC;
1012 value |= val << GMAC_CONFIG_SARC_SHIFT;
1013
1014 writel(value, ioaddr + GMAC_CONFIG);
1015 }
1016
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1017 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1018 {
1019 void __iomem *ioaddr = hw->pcsr;
1020 u32 value;
1021
1022 value = readl(ioaddr + GMAC_VLAN_INCL);
1023 value |= GMAC_VLAN_VLTI;
1024 value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1025 value &= ~GMAC_VLAN_VLC;
1026 value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1027 writel(value, ioaddr + GMAC_VLAN_INCL);
1028 }
1029
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1030 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1031 u32 addr)
1032 {
1033 void __iomem *ioaddr = hw->pcsr;
1034 u32 value;
1035
1036 writel(addr, ioaddr + GMAC_ARP_ADDR);
1037
1038 value = readl(ioaddr + GMAC_CONFIG);
1039 if (en)
1040 value |= GMAC_CONFIG_ARPEN;
1041 else
1042 value &= ~GMAC_CONFIG_ARPEN;
1043 writel(value, ioaddr + GMAC_CONFIG);
1044 }
1045
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1046 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1047 bool en, bool ipv6, bool sa, bool inv,
1048 u32 match)
1049 {
1050 void __iomem *ioaddr = hw->pcsr;
1051 u32 value;
1052
1053 value = readl(ioaddr + GMAC_PACKET_FILTER);
1054 value |= GMAC_PACKET_FILTER_IPFE;
1055 writel(value, ioaddr + GMAC_PACKET_FILTER);
1056
1057 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1058
1059 /* For IPv6 not both SA/DA filters can be active */
1060 if (ipv6) {
1061 value |= GMAC_L3PEN0;
1062 value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1063 value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1064 if (sa) {
1065 value |= GMAC_L3SAM0;
1066 if (inv)
1067 value |= GMAC_L3SAIM0;
1068 } else {
1069 value |= GMAC_L3DAM0;
1070 if (inv)
1071 value |= GMAC_L3DAIM0;
1072 }
1073 } else {
1074 value &= ~GMAC_L3PEN0;
1075 if (sa) {
1076 value |= GMAC_L3SAM0;
1077 if (inv)
1078 value |= GMAC_L3SAIM0;
1079 } else {
1080 value |= GMAC_L3DAM0;
1081 if (inv)
1082 value |= GMAC_L3DAIM0;
1083 }
1084 }
1085
1086 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1087
1088 if (sa) {
1089 writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1090 } else {
1091 writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1092 }
1093
1094 if (!en)
1095 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1096
1097 return 0;
1098 }
1099
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1100 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1101 bool en, bool udp, bool sa, bool inv,
1102 u32 match)
1103 {
1104 void __iomem *ioaddr = hw->pcsr;
1105 u32 value;
1106
1107 value = readl(ioaddr + GMAC_PACKET_FILTER);
1108 value |= GMAC_PACKET_FILTER_IPFE;
1109 writel(value, ioaddr + GMAC_PACKET_FILTER);
1110
1111 value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1112 if (udp) {
1113 value |= GMAC_L4PEN0;
1114 } else {
1115 value &= ~GMAC_L4PEN0;
1116 }
1117
1118 value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1119 value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1120 if (sa) {
1121 value |= GMAC_L4SPM0;
1122 if (inv)
1123 value |= GMAC_L4SPIM0;
1124 } else {
1125 value |= GMAC_L4DPM0;
1126 if (inv)
1127 value |= GMAC_L4DPIM0;
1128 }
1129
1130 writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1131
1132 if (sa) {
1133 value = match & GMAC_L4SP0;
1134 } else {
1135 value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1136 }
1137
1138 writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1139
1140 if (!en)
1141 writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1142
1143 return 0;
1144 }
1145
1146 const struct stmmac_ops dwmac4_ops = {
1147 .core_init = dwmac4_core_init,
1148 .set_mac = stmmac_set_mac,
1149 .rx_ipc = dwmac4_rx_ipc_enable,
1150 .rx_queue_enable = dwmac4_rx_queue_enable,
1151 .rx_queue_prio = dwmac4_rx_queue_priority,
1152 .tx_queue_prio = dwmac4_tx_queue_priority,
1153 .rx_queue_routing = dwmac4_rx_queue_routing,
1154 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1155 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1156 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1157 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1158 .config_cbs = dwmac4_config_cbs,
1159 .dump_regs = dwmac4_dump_regs,
1160 .host_irq_status = dwmac4_irq_status,
1161 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1162 .flow_ctrl = dwmac4_flow_ctrl,
1163 .pmt = dwmac4_pmt,
1164 .set_umac_addr = dwmac4_set_umac_addr,
1165 .get_umac_addr = dwmac4_get_umac_addr,
1166 .set_eee_mode = dwmac4_set_eee_mode,
1167 .reset_eee_mode = dwmac4_reset_eee_mode,
1168 .set_eee_timer = dwmac4_set_eee_timer,
1169 .set_eee_pls = dwmac4_set_eee_pls,
1170 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1171 .pcs_rane = dwmac4_rane,
1172 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1173 .debug = dwmac4_debug,
1174 .set_filter = dwmac4_set_filter,
1175 .set_mac_loopback = dwmac4_set_mac_loopback,
1176 .update_vlan_hash = dwmac4_update_vlan_hash,
1177 .sarc_configure = dwmac4_sarc_configure,
1178 .enable_vlan = dwmac4_enable_vlan,
1179 .set_arp_offload = dwmac4_set_arp_offload,
1180 .config_l3_filter = dwmac4_config_l3_filter,
1181 .config_l4_filter = dwmac4_config_l4_filter,
1182 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1183 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1184 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1185 };
1186
1187 const struct stmmac_ops dwmac410_ops = {
1188 .core_init = dwmac4_core_init,
1189 .set_mac = stmmac_dwmac4_set_mac,
1190 .rx_ipc = dwmac4_rx_ipc_enable,
1191 .rx_queue_enable = dwmac4_rx_queue_enable,
1192 .rx_queue_prio = dwmac4_rx_queue_priority,
1193 .tx_queue_prio = dwmac4_tx_queue_priority,
1194 .rx_queue_routing = dwmac4_rx_queue_routing,
1195 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1196 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1197 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1198 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1199 .config_cbs = dwmac4_config_cbs,
1200 .dump_regs = dwmac4_dump_regs,
1201 .host_irq_status = dwmac4_irq_status,
1202 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1203 .flow_ctrl = dwmac4_flow_ctrl,
1204 .pmt = dwmac4_pmt,
1205 .set_umac_addr = dwmac4_set_umac_addr,
1206 .get_umac_addr = dwmac4_get_umac_addr,
1207 .set_eee_mode = dwmac4_set_eee_mode,
1208 .reset_eee_mode = dwmac4_reset_eee_mode,
1209 .set_eee_timer = dwmac4_set_eee_timer,
1210 .set_eee_pls = dwmac4_set_eee_pls,
1211 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1212 .pcs_rane = dwmac4_rane,
1213 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1214 .debug = dwmac4_debug,
1215 .set_filter = dwmac4_set_filter,
1216 .flex_pps_config = dwmac5_flex_pps_config,
1217 .set_mac_loopback = dwmac4_set_mac_loopback,
1218 .update_vlan_hash = dwmac4_update_vlan_hash,
1219 .sarc_configure = dwmac4_sarc_configure,
1220 .enable_vlan = dwmac4_enable_vlan,
1221 .set_arp_offload = dwmac4_set_arp_offload,
1222 .config_l3_filter = dwmac4_config_l3_filter,
1223 .config_l4_filter = dwmac4_config_l4_filter,
1224 .est_configure = dwmac5_est_configure,
1225 .fpe_configure = dwmac5_fpe_configure,
1226 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1227 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1228 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1229 };
1230
1231 const struct stmmac_ops dwmac510_ops = {
1232 .core_init = dwmac4_core_init,
1233 .set_mac = stmmac_dwmac4_set_mac,
1234 .rx_ipc = dwmac4_rx_ipc_enable,
1235 .rx_queue_enable = dwmac4_rx_queue_enable,
1236 .rx_queue_prio = dwmac4_rx_queue_priority,
1237 .tx_queue_prio = dwmac4_tx_queue_priority,
1238 .rx_queue_routing = dwmac4_rx_queue_routing,
1239 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1240 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1241 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1242 .map_mtl_to_dma = dwmac4_map_mtl_dma,
1243 .config_cbs = dwmac4_config_cbs,
1244 .dump_regs = dwmac4_dump_regs,
1245 .host_irq_status = dwmac4_irq_status,
1246 .host_mtl_irq_status = dwmac4_irq_mtl_status,
1247 .flow_ctrl = dwmac4_flow_ctrl,
1248 .pmt = dwmac4_pmt,
1249 .set_umac_addr = dwmac4_set_umac_addr,
1250 .get_umac_addr = dwmac4_get_umac_addr,
1251 .set_eee_mode = dwmac4_set_eee_mode,
1252 .reset_eee_mode = dwmac4_reset_eee_mode,
1253 .set_eee_timer = dwmac4_set_eee_timer,
1254 .set_eee_pls = dwmac4_set_eee_pls,
1255 .pcs_ctrl_ane = dwmac4_ctrl_ane,
1256 .pcs_rane = dwmac4_rane,
1257 .pcs_get_adv_lp = dwmac4_get_adv_lp,
1258 .debug = dwmac4_debug,
1259 .set_filter = dwmac4_set_filter,
1260 .safety_feat_config = dwmac5_safety_feat_config,
1261 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1262 .safety_feat_dump = dwmac5_safety_feat_dump,
1263 .rxp_config = dwmac5_rxp_config,
1264 .flex_pps_config = dwmac5_flex_pps_config,
1265 .set_mac_loopback = dwmac4_set_mac_loopback,
1266 .update_vlan_hash = dwmac4_update_vlan_hash,
1267 .sarc_configure = dwmac4_sarc_configure,
1268 .enable_vlan = dwmac4_enable_vlan,
1269 .set_arp_offload = dwmac4_set_arp_offload,
1270 .config_l3_filter = dwmac4_config_l3_filter,
1271 .config_l4_filter = dwmac4_config_l4_filter,
1272 .est_configure = dwmac5_est_configure,
1273 .fpe_configure = dwmac5_fpe_configure,
1274 .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1275 .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1276 .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1277 };
1278
dwmac4_get_num_vlan(void __iomem * ioaddr)1279 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1280 {
1281 u32 val, num_vlan;
1282
1283 val = readl(ioaddr + GMAC_HW_FEATURE3);
1284 switch (val & GMAC_HW_FEAT_NRVF) {
1285 case 0:
1286 num_vlan = 1;
1287 break;
1288 case 1:
1289 num_vlan = 4;
1290 break;
1291 case 2:
1292 num_vlan = 8;
1293 break;
1294 case 3:
1295 num_vlan = 16;
1296 break;
1297 case 4:
1298 num_vlan = 24;
1299 break;
1300 case 5:
1301 num_vlan = 32;
1302 break;
1303 default:
1304 num_vlan = 1;
1305 }
1306
1307 return num_vlan;
1308 }
1309
dwmac4_setup(struct stmmac_priv * priv)1310 int dwmac4_setup(struct stmmac_priv *priv)
1311 {
1312 struct mac_device_info *mac = priv->hw;
1313
1314 dev_info(priv->device, "\tDWMAC4/5\n");
1315
1316 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1317 mac->pcsr = priv->ioaddr;
1318 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1319 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1320 mac->mcast_bits_log2 = 0;
1321
1322 if (mac->multicast_filter_bins)
1323 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1324
1325 mac->link.duplex = GMAC_CONFIG_DM;
1326 mac->link.speed10 = GMAC_CONFIG_PS;
1327 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1328 mac->link.speed1000 = 0;
1329 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1330 mac->mii.addr = GMAC_MDIO_ADDR;
1331 mac->mii.data = GMAC_MDIO_DATA;
1332 mac->mii.addr_shift = 21;
1333 mac->mii.addr_mask = GENMASK(25, 21);
1334 mac->mii.reg_shift = 16;
1335 mac->mii.reg_mask = GENMASK(20, 16);
1336 mac->mii.clk_csr_shift = 8;
1337 mac->mii.clk_csr_mask = GENMASK(11, 8);
1338 mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1339
1340 return 0;
1341 }
1342