Lines Matching refs:bp
54 #define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ argument
55 * (bp)->rx_ring_size)
60 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ argument
61 * (bp)->tx_ring_size)
64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) argument
127 static unsigned int macb_dma_desc_get_size(struct macb *bp) in macb_dma_desc_get_size() argument
132 switch (bp->hw_dma_cap) { in macb_dma_desc_get_size()
154 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) in macb_adj_dma_desc_idx() argument
157 switch (bp->hw_dma_cap) { in macb_adj_dma_desc_idx()
173 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) in macb_64b_desc() argument
181 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) in macb_tx_ring_wrap() argument
183 return index & (bp->tx_ring_size - 1); in macb_tx_ring_wrap()
189 index = macb_tx_ring_wrap(queue->bp, index); in macb_tx_desc()
190 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_tx_desc()
197 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; in macb_tx_skb()
204 offset = macb_tx_ring_wrap(queue->bp, index) * in macb_tx_dma()
205 macb_dma_desc_get_size(queue->bp); in macb_tx_dma()
210 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) in macb_rx_ring_wrap() argument
212 return index & (bp->rx_ring_size - 1); in macb_rx_ring_wrap()
217 index = macb_rx_ring_wrap(queue->bp, index); in macb_rx_desc()
218 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_rx_desc()
224 return queue->rx_buffers + queue->bp->rx_buffer_size * in macb_rx_buffer()
225 macb_rx_ring_wrap(queue->bp, index); in macb_rx_buffer()
229 static u32 hw_readl_native(struct macb *bp, int offset) in hw_readl_native() argument
231 return __raw_readl(bp->regs + offset); in hw_readl_native()
234 static void hw_writel_native(struct macb *bp, int offset, u32 value) in hw_writel_native() argument
236 __raw_writel(value, bp->regs + offset); in hw_writel_native()
239 static u32 hw_readl(struct macb *bp, int offset) in hw_readl() argument
241 return readl_relaxed(bp->regs + offset); in hw_readl()
244 static void hw_writel(struct macb *bp, int offset, u32 value) in hw_writel() argument
246 writel_relaxed(value, bp->regs + offset); in hw_writel()
278 static void macb_set_hwaddr(struct macb *bp) in macb_set_hwaddr() argument
283 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); in macb_set_hwaddr()
284 macb_or_gem_writel(bp, SA1B, bottom); in macb_set_hwaddr()
285 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); in macb_set_hwaddr()
286 macb_or_gem_writel(bp, SA1T, top); in macb_set_hwaddr()
289 macb_or_gem_writel(bp, SA2B, 0); in macb_set_hwaddr()
290 macb_or_gem_writel(bp, SA2T, 0); in macb_set_hwaddr()
291 macb_or_gem_writel(bp, SA3B, 0); in macb_set_hwaddr()
292 macb_or_gem_writel(bp, SA3T, 0); in macb_set_hwaddr()
293 macb_or_gem_writel(bp, SA4B, 0); in macb_set_hwaddr()
294 macb_or_gem_writel(bp, SA4T, 0); in macb_set_hwaddr()
297 static void macb_get_hwaddr(struct macb *bp) in macb_get_hwaddr() argument
306 bottom = macb_or_gem_readl(bp, SA1B + i * 8); in macb_get_hwaddr()
307 top = macb_or_gem_readl(bp, SA1T + i * 8); in macb_get_hwaddr()
317 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); in macb_get_hwaddr()
322 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
323 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
326 static int macb_mdio_wait_for_idle(struct macb *bp) in macb_mdio_wait_for_idle() argument
330 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), in macb_mdio_wait_for_idle()
336 struct macb *bp = bus->priv; in macb_mdio_read() local
339 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_read()
341 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_read()
345 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
350 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_read()
357 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
361 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_read()
367 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) in macb_mdio_read()
374 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
378 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); in macb_mdio_read()
381 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read()
382 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read()
390 struct macb *bp = bus->priv; in macb_mdio_write() local
393 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_write()
395 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_write()
399 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
404 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_write()
411 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
415 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_write()
422 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) in macb_mdio_write()
430 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
435 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write()
436 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write()
441 static void macb_init_buffers(struct macb *bp) in macb_init_buffers() argument
446 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
449 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
455 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
467 static void macb_set_tx_clk(struct macb *bp, int speed) in macb_set_tx_clk() argument
471 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) in macb_set_tx_clk()
475 if (bp->phy_interface == PHY_INTERFACE_MODE_MII) in macb_set_tx_clk()
492 rate_rounded = clk_round_rate(bp->tx_clk, rate); in macb_set_tx_clk()
502 netdev_warn(bp->dev, in macb_set_tx_clk()
506 if (clk_set_rate(bp->tx_clk, rate_rounded)) in macb_set_tx_clk()
507 netdev_err(bp->dev, "adjusting tx_clk failed.\n"); in macb_set_tx_clk()
516 struct macb *bp = netdev_priv(ndev); in macb_validate() local
530 if (!macb_is_gem(bp) && in macb_validate()
538 !(bp->caps & MACB_CAPS_HIGH_SPEED && in macb_validate()
539 bp->caps & MACB_CAPS_PCS)) { in macb_validate()
548 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && in macb_validate()
567 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && in macb_validate()
575 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) in macb_validate()
588 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); in macb_usx_pcs_link_up() local
591 config = gem_readl(bp, USX_CONTROL); in macb_usx_pcs_link_up()
596 gem_writel(bp, USX_CONTROL, config); in macb_usx_pcs_link_up()
602 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); in macb_usx_pcs_get_state() local
609 val = gem_readl(bp, USX_STATUS); in macb_usx_pcs_get_state()
611 val = gem_readl(bp, NCFGR); in macb_usx_pcs_get_state()
622 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); in macb_usx_pcs_config() local
624 gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | in macb_usx_pcs_config()
666 struct macb *bp = netdev_priv(ndev); in macb_mac_config() local
671 spin_lock_irqsave(&bp->lock, flags); in macb_mac_config()
673 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); in macb_mac_config()
674 old_ncr = ncr = macb_or_gem_readl(bp, NCR); in macb_mac_config()
676 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { in macb_mac_config()
679 } else if (macb_is_gem(bp)) { in macb_mac_config()
693 macb_or_gem_writel(bp, NCFGR, ctrl); in macb_mac_config()
696 macb_or_gem_writel(bp, NCR, ncr); in macb_mac_config()
702 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
705 old_pcsctrl = gem_readl(bp, PCSCNTRL); in macb_mac_config()
711 gem_writel(bp, PCSCNTRL, pcsctrl); in macb_mac_config()
714 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_config()
721 struct macb *bp = netdev_priv(ndev); in macb_mac_link_down() local
726 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_down()
727 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
729 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_down()
732 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); in macb_mac_link_down()
733 macb_writel(bp, NCR, ctrl); in macb_mac_link_down()
745 struct macb *bp = netdev_priv(ndev); in macb_mac_link_up() local
751 spin_lock_irqsave(&bp->lock, flags); in macb_mac_link_up()
753 ctrl = macb_or_gem_readl(bp, NCFGR); in macb_mac_link_up()
763 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { in macb_mac_link_up()
765 if (macb_is_gem(bp)) { in macb_mac_link_up()
775 macb_set_tx_clk(bp, speed); in macb_mac_link_up()
780 bp->macbgem_ops.mog_init_rings(bp); in macb_mac_link_up()
781 macb_init_buffers(bp); in macb_mac_link_up()
783 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
785 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_up()
788 macb_or_gem_writel(bp, NCFGR, ctrl); in macb_mac_link_up()
790 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) in macb_mac_link_up()
791 gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, in macb_mac_link_up()
792 gem_readl(bp, HS_MAC_CONFIG))); in macb_mac_link_up()
794 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_link_up()
797 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); in macb_mac_link_up()
806 struct macb *bp = netdev_priv(ndev); in macb_mac_prepare() local
809 bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops; in macb_mac_prepare()
811 bp->phylink_pcs.ops = &macb_phylink_pcs_ops; in macb_mac_prepare()
813 bp->phylink_pcs.ops = NULL; in macb_mac_prepare()
815 if (bp->phylink_pcs.ops) in macb_mac_prepare()
816 phylink_set_pcs(bp->phylink, &bp->phylink_pcs); in macb_mac_prepare()
836 static int macb_phylink_connect(struct macb *bp) in macb_phylink_connect() argument
838 struct device_node *dn = bp->pdev->dev.of_node; in macb_phylink_connect()
839 struct net_device *dev = bp->dev; in macb_phylink_connect()
844 ret = phylink_of_phy_connect(bp->phylink, dn, 0); in macb_phylink_connect()
847 phydev = phy_find_first(bp->mii_bus); in macb_phylink_connect()
854 ret = phylink_connect_phy(bp->phylink, phydev); in macb_phylink_connect()
862 phylink_start(bp->phylink); in macb_phylink_connect()
871 struct macb *bp = netdev_priv(ndev); in macb_get_pcs_fixed_state() local
873 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; in macb_get_pcs_fixed_state()
879 struct macb *bp = netdev_priv(dev); in macb_mii_probe() local
881 bp->phylink_config.dev = &dev->dev; in macb_mii_probe()
882 bp->phylink_config.type = PHYLINK_NETDEV; in macb_mii_probe()
883 bp->phylink_config.mac_managed_pm = true; in macb_mii_probe()
885 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in macb_mii_probe()
886 bp->phylink_config.poll_fixed_state = true; in macb_mii_probe()
887 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; in macb_mii_probe()
890 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, in macb_mii_probe()
891 bp->phy_interface, &macb_phylink_ops); in macb_mii_probe()
892 if (IS_ERR(bp->phylink)) { in macb_mii_probe()
894 PTR_ERR(bp->phylink)); in macb_mii_probe()
895 return PTR_ERR(bp->phylink); in macb_mii_probe()
901 static int macb_mdiobus_register(struct macb *bp) in macb_mdiobus_register() argument
903 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mdiobus_register()
906 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
920 return of_mdiobus_register(bp->mii_bus, np); in macb_mdiobus_register()
923 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
926 static int macb_mii_init(struct macb *bp) in macb_mii_init() argument
931 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_mii_init()
933 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
934 if (!bp->mii_bus) { in macb_mii_init()
939 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
940 bp->mii_bus->read = &macb_mdio_read; in macb_mii_init()
941 bp->mii_bus->write = &macb_mdio_write; in macb_mii_init()
942 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
943 bp->pdev->name, bp->pdev->id); in macb_mii_init()
944 bp->mii_bus->priv = bp; in macb_mii_init()
945 bp->mii_bus->parent = &bp->pdev->dev; in macb_mii_init()
947 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
949 err = macb_mdiobus_register(bp); in macb_mii_init()
953 err = macb_mii_probe(bp->dev); in macb_mii_init()
960 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
962 mdiobus_free(bp->mii_bus); in macb_mii_init()
967 static void macb_update_stats(struct macb *bp) in macb_update_stats() argument
969 u32 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
970 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
976 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
979 static int macb_halt_tx(struct macb *bp) in macb_halt_tx() argument
984 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); in macb_halt_tx()
989 status = macb_readl(bp, TSR); in macb_halt_tx()
999 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) in macb_tx_unmap() argument
1003 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1006 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1017 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) in macb_set_addr() argument
1022 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_set_addr()
1023 desc_64 = macb_64b_desc(bp, desc); in macb_set_addr()
1035 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) in macb_get_addr() argument
1041 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_get_addr()
1042 desc_64 = macb_64b_desc(bp, desc); in macb_get_addr()
1048 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_get_addr()
1058 struct macb *bp = queue->bp; in macb_tx_error_task() local
1065 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
1066 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
1075 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
1078 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
1084 if (macb_halt_tx(bp)) in macb_tx_error_task()
1086 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
1102 macb_tx_unmap(bp, tx_skb); in macb_tx_error_task()
1112 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
1113 macb_tx_ring_wrap(bp, tail), in macb_tx_error_task()
1115 bp->dev->stats.tx_packets++; in macb_tx_error_task()
1117 bp->dev->stats.tx_bytes += skb->len; in macb_tx_error_task()
1126 netdev_err(bp->dev, in macb_tx_error_task()
1132 macb_tx_unmap(bp, tx_skb); in macb_tx_error_task()
1137 macb_set_addr(bp, desc, 0); in macb_tx_error_task()
1146 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_tx_error_task()
1154 macb_writel(bp, TSR, macb_readl(bp, TSR)); in macb_tx_error_task()
1158 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
1159 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_error_task()
1161 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
1199 struct macb *bp = queue->bp; in macb_tx_interrupt() local
1200 u16 queue_index = queue - bp->queues; in macb_tx_interrupt()
1202 status = macb_readl(bp, TSR); in macb_tx_interrupt()
1203 macb_writel(bp, TSR, status); in macb_tx_interrupt()
1205 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_interrupt()
1208 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", in macb_tx_interrupt()
1246 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_interrupt()
1247 macb_tx_ring_wrap(bp, tail), in macb_tx_interrupt()
1249 bp->dev->stats.tx_packets++; in macb_tx_interrupt()
1251 bp->dev->stats.tx_bytes += skb->len; in macb_tx_interrupt()
1256 macb_tx_unmap(bp, tx_skb); in macb_tx_interrupt()
1268 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_interrupt()
1270 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) in macb_tx_interrupt()
1271 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_interrupt()
1279 struct macb *bp = queue->bp; in gem_rx_refill() local
1283 bp->rx_ring_size) > 0) { in gem_rx_refill()
1284 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); in gem_rx_refill()
1293 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
1295 netdev_err(bp->dev, in gem_rx_refill()
1301 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
1302 bp->rx_buffer_size, in gem_rx_refill()
1304 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
1311 if (entry == bp->rx_ring_size - 1) in gem_rx_refill()
1318 macb_set_addr(bp, desc, paddr); in gem_rx_refill()
1333 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", in gem_rx_refill()
1361 struct macb *bp = queue->bp; in gem_rx() local
1373 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in gem_rx()
1380 addr = macb_get_addr(bp, desc); in gem_rx()
1394 netdev_err(bp->dev, in gem_rx()
1396 bp->dev->stats.rx_dropped++; in gem_rx()
1402 netdev_err(bp->dev, in gem_rx()
1404 bp->dev->stats.rx_dropped++; in gem_rx()
1410 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
1412 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
1415 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
1416 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
1418 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
1420 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
1421 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
1425 bp->dev->stats.rx_packets++; in gem_rx()
1427 bp->dev->stats.rx_bytes += skb->len; in gem_rx()
1430 gem_ptp_do_rxstamp(bp, skb, desc); in gem_rx()
1433 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
1457 struct macb *bp = queue->bp; in macb_rx_frame() local
1460 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
1462 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
1463 macb_rx_ring_wrap(bp, first_frag), in macb_rx_frame()
1464 macb_rx_ring_wrap(bp, last_frag), len); in macb_rx_frame()
1474 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
1476 bp->dev->stats.rx_dropped++; in macb_rx_frame()
1496 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
1508 offset += bp->rx_buffer_size; in macb_rx_frame()
1520 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
1522 bp->dev->stats.rx_packets++; in macb_rx_frame()
1523 bp->dev->stats.rx_bytes += skb->len; in macb_rx_frame()
1524 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
1533 struct macb *bp = queue->bp; in macb_init_rx_ring() local
1539 for (i = 0; i < bp->rx_ring_size; i++) { in macb_init_rx_ring()
1541 macb_set_addr(bp, desc, addr); in macb_init_rx_ring()
1543 addr += bp->rx_buffer_size; in macb_init_rx_ring()
1552 struct macb *bp = queue->bp; in macb_rx() local
1604 netdev_err(bp->dev, "RX queue corruption: reset it\n"); in macb_rx()
1606 spin_lock_irqsave(&bp->lock, flags); in macb_rx()
1608 ctrl = macb_readl(bp, NCR); in macb_rx()
1609 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_rx()
1614 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_rx()
1616 spin_unlock_irqrestore(&bp->lock, flags); in macb_rx()
1631 struct macb *bp = queue->bp; in macb_poll() local
1635 status = macb_readl(bp, RSR); in macb_poll()
1636 macb_writel(bp, RSR, status); in macb_poll()
1638 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", in macb_poll()
1641 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); in macb_poll()
1653 status = macb_readl(bp, RSR); in macb_poll()
1655 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1659 queue_writel(queue, IER, bp->rx_intr_mask); in macb_poll()
1669 status = macb_readl(bp, RSR); in macb_poll()
1671 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_poll()
1672 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1686 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); in macb_hresp_error_task() local
1687 struct net_device *dev = bp->dev; in macb_hresp_error_task()
1692 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1693 queue_writel(queue, IDR, bp->rx_intr_mask | in macb_hresp_error_task()
1697 ctrl = macb_readl(bp, NCR); in macb_hresp_error_task()
1699 macb_writel(bp, NCR, ctrl); in macb_hresp_error_task()
1704 bp->macbgem_ops.mog_init_rings(bp); in macb_hresp_error_task()
1707 macb_init_buffers(bp); in macb_hresp_error_task()
1710 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1712 bp->rx_intr_mask | in macb_hresp_error_task()
1717 macb_writel(bp, NCR, ctrl); in macb_hresp_error_task()
1727 struct macb *bp = queue->bp; in macb_tx_restart() local
1730 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_restart()
1736 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); in macb_tx_restart()
1737 tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); in macb_tx_restart()
1738 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); in macb_tx_restart()
1743 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_restart()
1749 struct macb *bp = queue->bp; in macb_wol_interrupt() local
1757 spin_lock(&bp->lock); in macb_wol_interrupt()
1761 macb_writel(bp, WOL, 0); in macb_wol_interrupt()
1762 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", in macb_wol_interrupt()
1763 (unsigned int)(queue - bp->queues), in macb_wol_interrupt()
1765 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_wol_interrupt()
1767 pm_wakeup_event(&bp->pdev->dev, 0); in macb_wol_interrupt()
1770 spin_unlock(&bp->lock); in macb_wol_interrupt()
1778 struct macb *bp = queue->bp; in gem_wol_interrupt() local
1786 spin_lock(&bp->lock); in gem_wol_interrupt()
1790 gem_writel(bp, WOL, 0); in gem_wol_interrupt()
1791 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", in gem_wol_interrupt()
1792 (unsigned int)(queue - bp->queues), in gem_wol_interrupt()
1794 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in gem_wol_interrupt()
1796 pm_wakeup_event(&bp->pdev->dev, 0); in gem_wol_interrupt()
1799 spin_unlock(&bp->lock); in gem_wol_interrupt()
1807 struct macb *bp = queue->bp; in macb_interrupt() local
1808 struct net_device *dev = bp->dev; in macb_interrupt()
1816 spin_lock(&bp->lock); in macb_interrupt()
1822 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1827 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1828 (unsigned int)(queue - bp->queues), in macb_interrupt()
1831 if (status & bp->rx_intr_mask) { in macb_interrupt()
1838 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_interrupt()
1839 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1843 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1852 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1876 ctrl = macb_readl(bp, NCR); in macb_interrupt()
1877 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_interrupt()
1879 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_interrupt()
1881 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1887 if (macb_is_gem(bp)) in macb_interrupt()
1888 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1890 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
1892 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1897 tasklet_schedule(&bp->hresp_err_tasklet); in macb_interrupt()
1900 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1906 spin_unlock(&bp->lock); in macb_interrupt()
1917 struct macb *bp = netdev_priv(dev); in macb_poll_controller() local
1923 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
1929 static unsigned int macb_tx_map(struct macb *bp, in macb_tx_map() argument
1961 entry = macb_tx_ring_wrap(bp, tx_head); in macb_tx_map()
1964 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
1967 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1981 size = min(len, bp->max_tx_length); in macb_tx_map()
1991 size = min(len, bp->max_tx_length); in macb_tx_map()
1992 entry = macb_tx_ring_wrap(bp, tx_head); in macb_tx_map()
1995 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
1997 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
2015 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
2030 entry = macb_tx_ring_wrap(bp, i); in macb_tx_map()
2052 entry = macb_tx_ring_wrap(bp, i); in macb_tx_map()
2061 if (unlikely(entry == (bp->tx_ring_size - 1))) in macb_tx_map()
2068 if ((bp->dev->features & NETIF_F_HW_CSUM) && in macb_tx_map()
2079 macb_set_addr(bp, desc, tx_skb->mapping); in macb_tx_map()
2092 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
2097 macb_tx_unmap(bp, tx_skb); in macb_tx_map()
2210 struct macb *bp = netdev_priv(dev); in macb_start_xmit() local
2211 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
2238 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); in macb_start_xmit()
2243 hdrlen = min(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2246 netdev_vdbg(bp->dev, in macb_start_xmit()
2260 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; in macb_start_xmit()
2262 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2266 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
2269 spin_lock_irqsave(&bp->lock, flags); in macb_start_xmit()
2273 bp->tx_ring_size) < desc_cnt) { in macb_start_xmit()
2275 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2276 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
2282 if (!macb_tx_map(bp, queue, skb, hdrlen)) { in macb_start_xmit()
2291 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_start_xmit()
2293 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) in macb_start_xmit()
2297 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2302 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) in macb_init_rx_buffer_size() argument
2304 if (!macb_is_gem(bp)) { in macb_init_rx_buffer_size()
2305 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
2307 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
2309 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
2310 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
2313 bp->rx_buffer_size = in macb_init_rx_buffer_size()
2314 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
2318 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", in macb_init_rx_buffer_size()
2319 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
2322 static void gem_free_rx_buffers(struct macb *bp) in gem_free_rx_buffers() argument
2331 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2335 for (i = 0; i < bp->rx_ring_size; i++) { in gem_free_rx_buffers()
2342 addr = macb_get_addr(bp, desc); in gem_free_rx_buffers()
2344 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
2355 static void macb_free_rx_buffers(struct macb *bp) in macb_free_rx_buffers() argument
2357 struct macb_queue *queue = &bp->queues[0]; in macb_free_rx_buffers()
2360 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
2361 bp->rx_ring_size * bp->rx_buffer_size, in macb_free_rx_buffers()
2367 static void macb_free_consistent(struct macb *bp) in macb_free_consistent() argument
2373 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
2375 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2379 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_free_consistent()
2380 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2385 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_free_consistent()
2386 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2393 static int gem_alloc_rx_buffers(struct macb *bp) in gem_alloc_rx_buffers() argument
2399 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2400 size = bp->rx_ring_size * sizeof(struct sk_buff *); in gem_alloc_rx_buffers()
2405 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
2407 bp->rx_ring_size, queue->rx_skbuff); in gem_alloc_rx_buffers()
2412 static int macb_alloc_rx_buffers(struct macb *bp) in macb_alloc_rx_buffers() argument
2414 struct macb_queue *queue = &bp->queues[0]; in macb_alloc_rx_buffers()
2417 size = bp->rx_ring_size * bp->rx_buffer_size; in macb_alloc_rx_buffers()
2418 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
2423 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
2429 static int macb_alloc_consistent(struct macb *bp) in macb_alloc_consistent() argument
2435 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
2436 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_alloc_consistent()
2437 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2442 netdev_dbg(bp->dev, in macb_alloc_consistent()
2447 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); in macb_alloc_consistent()
2452 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_alloc_consistent()
2453 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2457 netdev_dbg(bp->dev, in macb_alloc_consistent()
2461 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
2467 macb_free_consistent(bp); in macb_alloc_consistent()
2471 static void gem_init_rings(struct macb *bp) in gem_init_rings() argument
2478 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
2479 for (i = 0; i < bp->tx_ring_size; i++) { in gem_init_rings()
2481 macb_set_addr(bp, desc, 0); in gem_init_rings()
2496 static void macb_init_rings(struct macb *bp) in macb_init_rings() argument
2501 macb_init_rx_ring(&bp->queues[0]); in macb_init_rings()
2503 for (i = 0; i < bp->tx_ring_size; i++) { in macb_init_rings()
2504 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings()
2505 macb_set_addr(bp, desc, 0); in macb_init_rings()
2508 bp->queues[0].tx_head = 0; in macb_init_rings()
2509 bp->queues[0].tx_tail = 0; in macb_init_rings()
2513 static void macb_reset_hw(struct macb *bp) in macb_reset_hw() argument
2517 u32 ctrl = macb_readl(bp, NCR); in macb_reset_hw()
2527 macb_writel(bp, NCR, ctrl); in macb_reset_hw()
2530 macb_writel(bp, TSR, -1); in macb_reset_hw()
2531 macb_writel(bp, RSR, -1); in macb_reset_hw()
2534 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
2537 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_reset_hw()
2542 static u32 gem_mdc_clk_div(struct macb *bp) in gem_mdc_clk_div() argument
2545 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
2563 static u32 macb_mdc_clk_div(struct macb *bp) in macb_mdc_clk_div() argument
2568 if (macb_is_gem(bp)) in macb_mdc_clk_div()
2569 return gem_mdc_clk_div(bp); in macb_mdc_clk_div()
2571 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
2588 static u32 macb_dbw(struct macb *bp) in macb_dbw() argument
2590 if (!macb_is_gem(bp)) in macb_dbw()
2593 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { in macb_dbw()
2611 static void macb_configure_dma(struct macb *bp) in macb_configure_dma() argument
2618 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; in macb_configure_dma()
2619 if (macb_is_gem(bp)) { in macb_configure_dma()
2620 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
2621 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_configure_dma()
2627 if (bp->dma_burst_length) in macb_configure_dma()
2628 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
2632 if (bp->native_io) in macb_configure_dma()
2637 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
2644 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_configure_dma()
2648 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_configure_dma()
2651 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
2653 gem_writel(bp, DMACFG, dmacfg); in macb_configure_dma()
2657 static void macb_init_hw(struct macb *bp) in macb_init_hw() argument
2661 macb_reset_hw(bp); in macb_init_hw()
2662 macb_set_hwaddr(bp); in macb_init_hw()
2664 config = macb_mdc_clk_div(bp); in macb_init_hw()
2667 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2671 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
2673 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
2675 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
2677 config |= macb_dbw(bp); in macb_init_hw()
2678 macb_writel(bp, NCFGR, config); in macb_init_hw()
2679 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
2680 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
2681 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
2682 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2683 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
2685 macb_configure_dma(bp); in macb_init_hw()
2750 struct macb *bp = netdev_priv(dev); in macb_sethashtable() local
2760 macb_or_gem_writel(bp, HRB, mc_filter[0]); in macb_sethashtable()
2761 macb_or_gem_writel(bp, HRT, mc_filter[1]); in macb_sethashtable()
2768 struct macb *bp = netdev_priv(dev); in macb_set_rx_mode() local
2770 cfg = macb_readl(bp, NCFGR); in macb_set_rx_mode()
2777 if (macb_is_gem(bp)) in macb_set_rx_mode()
2784 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
2790 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
2791 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
2799 macb_or_gem_writel(bp, HRB, 0); in macb_set_rx_mode()
2800 macb_or_gem_writel(bp, HRT, 0); in macb_set_rx_mode()
2804 macb_writel(bp, NCFGR, cfg); in macb_set_rx_mode()
2810 struct macb *bp = netdev_priv(dev); in macb_open() local
2815 netdev_dbg(bp->dev, "open\n"); in macb_open()
2817 err = pm_runtime_get_sync(&bp->pdev->dev); in macb_open()
2822 macb_init_rx_buffer_size(bp, bufsz); in macb_open()
2824 err = macb_alloc_consistent(bp); in macb_open()
2831 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2834 macb_init_hw(bp); in macb_open()
2836 err = macb_phylink_connect(bp); in macb_open()
2842 if (bp->ptp_info) in macb_open()
2843 bp->ptp_info->ptp_init(dev); in macb_open()
2848 macb_reset_hw(bp); in macb_open()
2849 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2851 macb_free_consistent(bp); in macb_open()
2853 pm_runtime_put_sync(&bp->pdev->dev); in macb_open()
2859 struct macb *bp = netdev_priv(dev); in macb_close() local
2866 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_close()
2869 phylink_stop(bp->phylink); in macb_close()
2870 phylink_disconnect_phy(bp->phylink); in macb_close()
2872 spin_lock_irqsave(&bp->lock, flags); in macb_close()
2873 macb_reset_hw(bp); in macb_close()
2875 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
2877 macb_free_consistent(bp); in macb_close()
2879 if (bp->ptp_info) in macb_close()
2880 bp->ptp_info->ptp_remove(dev); in macb_close()
2882 pm_runtime_put(&bp->pdev->dev); in macb_close()
2897 static void gem_update_stats(struct macb *bp) in gem_update_stats() argument
2903 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; in gem_update_stats()
2907 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
2909 bp->ethtool_stats[i] += val; in gem_update_stats()
2914 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
2915 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
2921 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in gem_update_stats()
2923 bp->ethtool_stats[idx++] = *stat; in gem_update_stats()
2926 static struct net_device_stats *gem_get_stats(struct macb *bp) in gem_get_stats() argument
2928 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
2929 struct net_device_stats *nstat = &bp->dev->stats; in gem_get_stats()
2931 if (!netif_running(bp->dev)) in gem_get_stats()
2934 gem_update_stats(bp); in gem_get_stats()
2970 struct macb *bp; in gem_get_ethtool_stats() local
2972 bp = netdev_priv(dev); in gem_get_ethtool_stats()
2973 gem_update_stats(bp); in gem_get_ethtool_stats()
2974 memcpy(data, &bp->ethtool_stats, sizeof(u64) in gem_get_ethtool_stats()
2980 struct macb *bp = netdev_priv(dev); in gem_get_sset_count() local
2984 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; in gem_get_sset_count()
2993 struct macb *bp = netdev_priv(dev); in gem_get_ethtool_strings() local
3004 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_get_ethtool_strings()
3017 struct macb *bp = netdev_priv(dev); in macb_get_stats() local
3018 struct net_device_stats *nstat = &bp->dev->stats; in macb_get_stats()
3019 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
3021 if (macb_is_gem(bp)) in macb_get_stats()
3022 return gem_get_stats(bp); in macb_get_stats()
3025 macb_update_stats(bp); in macb_get_stats()
3070 struct macb *bp = netdev_priv(dev); in macb_get_regs() local
3074 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
3077 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); in macb_get_regs()
3078 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); in macb_get_regs()
3080 regs_buff[0] = macb_readl(bp, NCR); in macb_get_regs()
3081 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); in macb_get_regs()
3082 regs_buff[2] = macb_readl(bp, NSR); in macb_get_regs()
3083 regs_buff[3] = macb_readl(bp, TSR); in macb_get_regs()
3084 regs_buff[4] = macb_readl(bp, RBQP); in macb_get_regs()
3085 regs_buff[5] = macb_readl(bp, TBQP); in macb_get_regs()
3086 regs_buff[6] = macb_readl(bp, RSR); in macb_get_regs()
3087 regs_buff[7] = macb_readl(bp, IMR); in macb_get_regs()
3091 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
3092 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
3094 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_get_regs()
3095 regs_buff[12] = macb_or_gem_readl(bp, USRIO); in macb_get_regs()
3096 if (macb_is_gem(bp)) in macb_get_regs()
3097 regs_buff[13] = gem_readl(bp, DMACFG); in macb_get_regs()
3102 struct macb *bp = netdev_priv(netdev); in macb_get_wol() local
3104 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { in macb_get_wol()
3105 phylink_ethtool_get_wol(bp->phylink, wol); in macb_get_wol()
3108 if (bp->wol & MACB_WOL_ENABLED) in macb_get_wol()
3115 struct macb *bp = netdev_priv(netdev); in macb_set_wol() local
3119 ret = phylink_ethtool_set_wol(bp->phylink, wol); in macb_set_wol()
3126 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || in macb_set_wol()
3131 bp->wol |= MACB_WOL_ENABLED; in macb_set_wol()
3133 bp->wol &= ~MACB_WOL_ENABLED; in macb_set_wol()
3135 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); in macb_set_wol()
3143 struct macb *bp = netdev_priv(netdev); in macb_get_link_ksettings() local
3145 return phylink_ethtool_ksettings_get(bp->phylink, kset); in macb_get_link_ksettings()
3151 struct macb *bp = netdev_priv(netdev); in macb_set_link_ksettings() local
3153 return phylink_ethtool_ksettings_set(bp->phylink, kset); in macb_set_link_ksettings()
3159 struct macb *bp = netdev_priv(netdev); in macb_get_ringparam() local
3164 ring->rx_pending = bp->rx_ring_size; in macb_get_ringparam()
3165 ring->tx_pending = bp->tx_ring_size; in macb_get_ringparam()
3171 struct macb *bp = netdev_priv(netdev); in macb_set_ringparam() local
3186 if ((new_tx_size == bp->tx_ring_size) && in macb_set_ringparam()
3187 (new_rx_size == bp->rx_ring_size)) { in macb_set_ringparam()
3192 if (netif_running(bp->dev)) { in macb_set_ringparam()
3194 macb_close(bp->dev); in macb_set_ringparam()
3197 bp->rx_ring_size = new_rx_size; in macb_set_ringparam()
3198 bp->tx_ring_size = new_tx_size; in macb_set_ringparam()
3201 macb_open(bp->dev); in macb_set_ringparam()
3207 static unsigned int gem_get_tsu_rate(struct macb *bp) in gem_get_tsu_rate() argument
3212 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); in gem_get_tsu_rate()
3216 else if (!IS_ERR(bp->pclk)) { in gem_get_tsu_rate()
3217 tsu_clk = bp->pclk; in gem_get_tsu_rate()
3232 struct macb *bp = netdev_priv(dev); in gem_get_ts_info() local
3234 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { in gem_get_ts_info()
3254 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; in gem_get_ts_info()
3273 struct macb *bp = netdev_priv(netdev); in macb_get_ts_info() local
3275 if (bp->ptp_info) in macb_get_ts_info()
3276 return bp->ptp_info->get_ts_info(netdev, info); in macb_get_ts_info()
3281 static void gem_enable_flow_filters(struct macb *bp, bool enable) in gem_enable_flow_filters() argument
3283 struct net_device *netdev = bp->dev; in gem_enable_flow_filters()
3291 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); in gem_enable_flow_filters()
3293 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_enable_flow_filters()
3300 t2_scr = gem_readl_n(bp, SCRT2, fs->location); in gem_enable_flow_filters()
3323 gem_writel_n(bp, SCRT2, fs->location, t2_scr); in gem_enable_flow_filters()
3327 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) in gem_prog_cmp_regs() argument
3336 if (!macb_is_gem(bp)) in gem_prog_cmp_regs()
3351 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); in gem_prog_cmp_regs()
3352 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); in gem_prog_cmp_regs()
3365 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); in gem_prog_cmp_regs()
3366 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); in gem_prog_cmp_regs()
3393 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); in gem_prog_cmp_regs()
3394 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); in gem_prog_cmp_regs()
3407 gem_writel_n(bp, SCRT2, index, t2_scr); in gem_prog_cmp_regs()
3413 struct macb *bp = netdev_priv(netdev); in gem_add_flow_filter() local
3432 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3435 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_add_flow_filter()
3448 list_add_tail(&newfs->list, &bp->rx_fs_list.list); in gem_add_flow_filter()
3450 gem_prog_cmp_regs(bp, fs); in gem_add_flow_filter()
3451 bp->rx_fs_list.count++; in gem_add_flow_filter()
3453 gem_enable_flow_filters(bp, 1); in gem_add_flow_filter()
3455 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3459 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3467 struct macb *bp = netdev_priv(netdev); in gem_del_flow_filter() local
3472 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3474 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_del_flow_filter()
3486 gem_writel_n(bp, SCRT2, fs->location, 0); in gem_del_flow_filter()
3489 bp->rx_fs_list.count--; in gem_del_flow_filter()
3490 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3496 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3503 struct macb *bp = netdev_priv(netdev); in gem_get_flow_entry() local
3506 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_flow_entry()
3518 struct macb *bp = netdev_priv(netdev); in gem_get_all_flow_entries() local
3522 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_all_flow_entries()
3528 cmd->data = bp->max_tuples; in gem_get_all_flow_entries()
3537 struct macb *bp = netdev_priv(netdev); in gem_get_rxnfc() local
3542 cmd->data = bp->num_queues; in gem_get_rxnfc()
3545 cmd->rule_cnt = bp->rx_fs_list.count; in gem_get_rxnfc()
3564 struct macb *bp = netdev_priv(netdev); in gem_set_rxnfc() local
3569 if ((cmd->fs.location >= bp->max_tuples) in gem_set_rxnfc()
3570 || (cmd->fs.ring_cookie >= bp->num_queues)) { in gem_set_rxnfc()
3621 struct macb *bp = netdev_priv(dev); in macb_ioctl() local
3626 if (bp->ptp_info) { in macb_ioctl()
3629 return bp->ptp_info->set_hwtst(dev, rq, cmd); in macb_ioctl()
3631 return bp->ptp_info->get_hwtst(dev, rq); in macb_ioctl()
3635 return phylink_mii_ioctl(bp->phylink, rq, cmd); in macb_ioctl()
3638 static inline void macb_set_txcsum_feature(struct macb *bp, in macb_set_txcsum_feature() argument
3643 if (!macb_is_gem(bp)) in macb_set_txcsum_feature()
3646 val = gem_readl(bp, DMACFG); in macb_set_txcsum_feature()
3652 gem_writel(bp, DMACFG, val); in macb_set_txcsum_feature()
3655 static inline void macb_set_rxcsum_feature(struct macb *bp, in macb_set_rxcsum_feature() argument
3658 struct net_device *netdev = bp->dev; in macb_set_rxcsum_feature()
3661 if (!macb_is_gem(bp)) in macb_set_rxcsum_feature()
3664 val = gem_readl(bp, NCFGR); in macb_set_rxcsum_feature()
3670 gem_writel(bp, NCFGR, val); in macb_set_rxcsum_feature()
3673 static inline void macb_set_rxflow_feature(struct macb *bp, in macb_set_rxflow_feature() argument
3676 if (!macb_is_gem(bp)) in macb_set_rxflow_feature()
3679 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); in macb_set_rxflow_feature()
3685 struct macb *bp = netdev_priv(netdev); in macb_set_features() local
3690 macb_set_txcsum_feature(bp, features); in macb_set_features()
3694 macb_set_rxcsum_feature(bp, features); in macb_set_features()
3698 macb_set_rxflow_feature(bp, features); in macb_set_features()
3703 static void macb_restore_features(struct macb *bp) in macb_restore_features() argument
3705 struct net_device *netdev = bp->dev; in macb_restore_features()
3710 macb_set_txcsum_feature(bp, features); in macb_restore_features()
3713 macb_set_rxcsum_feature(bp, features); in macb_restore_features()
3716 list_for_each_entry(item, &bp->rx_fs_list.list, list) in macb_restore_features()
3717 gem_prog_cmp_regs(bp, &item->fs); in macb_restore_features()
3719 macb_set_rxflow_feature(bp, features); in macb_restore_features()
3742 static void macb_configure_caps(struct macb *bp, in macb_configure_caps() argument
3748 bp->caps = dt_conf->caps; in macb_configure_caps()
3750 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
3751 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
3753 dcfg = gem_readl(bp, DCFG1); in macb_configure_caps()
3755 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
3757 bp->caps |= MACB_CAPS_PCS; in macb_configure_caps()
3758 dcfg = gem_readl(bp, DCFG12); in macb_configure_caps()
3760 bp->caps |= MACB_CAPS_HIGH_SPEED; in macb_configure_caps()
3761 dcfg = gem_readl(bp, DCFG2); in macb_configure_caps()
3763 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
3765 if (gem_has_ptp(bp)) { in macb_configure_caps()
3766 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) in macb_configure_caps()
3767 dev_err(&bp->pdev->dev, in macb_configure_caps()
3770 bp->hw_dma_cap |= HW_DMA_CAP_PTP; in macb_configure_caps()
3771 bp->ptp_info = &gem_ptp_info; in macb_configure_caps()
3777 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
3905 struct macb *bp = netdev_priv(dev); in macb_init() local
3910 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; in macb_init()
3911 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; in macb_init()
3918 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
3921 queue = &bp->queues[q]; in macb_init()
3922 queue->bp = bp; in macb_init()
3933 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3947 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3976 if (macb_is_gem(bp)) { in macb_init()
3977 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_init()
3978 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
3979 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
3980 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
3981 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
3984 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_init()
3985 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
3986 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
3987 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
3988 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
3996 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) in macb_init()
4000 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
4002 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
4010 reg = gem_readl(bp, DCFG8); in macb_init()
4011 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), in macb_init()
4013 INIT_LIST_HEAD(&bp->rx_fs_list.list); in macb_init()
4014 if (bp->max_tuples > 0) { in macb_init()
4020 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); in macb_init()
4024 bp->rx_fs_list.count = 0; in macb_init()
4025 spin_lock_init(&bp->rx_fs_lock); in macb_init()
4027 bp->max_tuples = 0; in macb_init()
4030 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { in macb_init()
4032 if (phy_interface_mode_is_rgmii(bp->phy_interface)) in macb_init()
4033 val = bp->usrio->rgmii; in macb_init()
4034 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
4035 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4036 val = bp->usrio->rmii; in macb_init()
4037 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4038 val = bp->usrio->mii; in macb_init()
4040 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
4041 val |= bp->usrio->refclk; in macb_init()
4043 macb_or_gem_writel(bp, USRIO, val); in macb_init()
4047 val = macb_mdc_clk_div(bp); in macb_init()
4048 val |= macb_dbw(bp); in macb_init()
4049 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
4051 macb_writel(bp, NCFGR, val); in macb_init()
4421 struct macb *bp = netdev_priv(dev); in at91ether_init() local
4424 bp->queues[0].bp = bp; in at91ether_init()
4434 macb_writel(bp, NCR, 0); in at91ether_init()
4436 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); in at91ether_init()
4725 struct macb *bp; in macb_probe() local
4755 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); in macb_probe()
4765 bp = netdev_priv(dev); in macb_probe()
4766 bp->pdev = pdev; in macb_probe()
4767 bp->dev = dev; in macb_probe()
4768 bp->regs = mem; in macb_probe()
4769 bp->native_io = native_io; in macb_probe()
4771 bp->macb_reg_readl = hw_readl_native; in macb_probe()
4772 bp->macb_reg_writel = hw_writel_native; in macb_probe()
4774 bp->macb_reg_readl = hw_readl; in macb_probe()
4775 bp->macb_reg_writel = hw_writel; in macb_probe()
4777 bp->num_queues = num_queues; in macb_probe()
4778 bp->queue_mask = queue_mask; in macb_probe()
4780 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
4781 bp->pclk = pclk; in macb_probe()
4782 bp->hclk = hclk; in macb_probe()
4783 bp->tx_clk = tx_clk; in macb_probe()
4784 bp->rx_clk = rx_clk; in macb_probe()
4785 bp->tsu_clk = tsu_clk; in macb_probe()
4787 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
4789 bp->wol = 0; in macb_probe()
4791 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; in macb_probe()
4792 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); in macb_probe()
4794 bp->usrio = macb_config->usrio; in macb_probe()
4796 spin_lock_init(&bp->lock); in macb_probe()
4799 macb_configure_caps(bp, macb_config); in macb_probe()
4802 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { in macb_probe()
4804 bp->hw_dma_cap |= HW_DMA_CAP_64B; in macb_probe()
4817 if (bp->caps & MACB_CAPS_JUMBO) in macb_probe()
4818 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
4822 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { in macb_probe()
4823 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); in macb_probe()
4825 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4826 macb_dma_desc_get_size(bp); in macb_probe()
4828 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); in macb_probe()
4830 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4831 macb_dma_desc_get_size(bp); in macb_probe()
4834 bp->rx_intr_mask = MACB_RX_INT_FLAGS; in macb_probe()
4835 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) in macb_probe()
4836 bp->rx_intr_mask |= MACB_BIT(RXUBR); in macb_probe()
4838 err = of_get_ethdev_address(np, bp->dev); in macb_probe()
4842 macb_get_hwaddr(bp); in macb_probe()
4847 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
4849 bp->phy_interface = interface; in macb_probe()
4856 err = macb_mii_init(bp); in macb_probe()
4868 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); in macb_probe()
4871 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), in macb_probe()
4874 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_probe()
4875 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_probe()
4880 mdiobus_unregister(bp->mii_bus); in macb_probe()
4881 mdiobus_free(bp->mii_bus); in macb_probe()
4898 struct macb *bp; in macb_remove() local
4903 bp = netdev_priv(dev); in macb_remove()
4904 mdiobus_unregister(bp->mii_bus); in macb_remove()
4905 mdiobus_free(bp->mii_bus); in macb_remove()
4908 tasklet_kill(&bp->hresp_err_tasklet); in macb_remove()
4912 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, in macb_remove()
4913 bp->rx_clk, bp->tsu_clk); in macb_remove()
4916 phylink_destroy(bp->phylink); in macb_remove()
4926 struct macb *bp = netdev_priv(netdev); in macb_suspend() local
4935 if (bp->wol & MACB_WOL_ENABLED) { in macb_suspend()
4936 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4938 macb_writel(bp, TSR, -1); in macb_suspend()
4939 macb_writel(bp, RSR, -1); in macb_suspend()
4940 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4945 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_suspend()
4951 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend()
4952 if (macb_is_gem(bp)) { in macb_suspend()
4953 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, in macb_suspend()
4954 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4958 bp->queues[0].irq, err); in macb_suspend()
4959 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4962 queue_writel(bp->queues, IER, GEM_BIT(WOL)); in macb_suspend()
4963 gem_writel(bp, WOL, MACB_BIT(MAG)); in macb_suspend()
4965 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, in macb_suspend()
4966 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4970 bp->queues[0].irq, err); in macb_suspend()
4971 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4974 queue_writel(bp->queues, IER, MACB_BIT(WOL)); in macb_suspend()
4975 macb_writel(bp, WOL, MACB_BIT(MAG)); in macb_suspend()
4977 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4979 enable_irq_wake(bp->queues[0].irq); in macb_suspend()
4983 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4987 if (!(bp->wol & MACB_WOL_ENABLED)) { in macb_suspend()
4989 phylink_stop(bp->phylink); in macb_suspend()
4991 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4992 macb_reset_hw(bp); in macb_suspend()
4993 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4996 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_suspend()
4997 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); in macb_suspend()
5000 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); in macb_suspend()
5002 if (bp->ptp_info) in macb_suspend()
5003 bp->ptp_info->ptp_remove(netdev); in macb_suspend()
5013 struct macb *bp = netdev_priv(netdev); in macb_resume() local
5025 if (bp->wol & MACB_WOL_ENABLED) { in macb_resume()
5026 spin_lock_irqsave(&bp->lock, flags); in macb_resume()
5028 if (macb_is_gem(bp)) { in macb_resume()
5029 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); in macb_resume()
5030 gem_writel(bp, WOL, 0); in macb_resume()
5032 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); in macb_resume()
5033 macb_writel(bp, WOL, 0); in macb_resume()
5036 queue_readl(bp->queues, ISR); in macb_resume()
5037 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_resume()
5038 queue_writel(bp->queues, ISR, -1); in macb_resume()
5040 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume()
5041 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, in macb_resume()
5042 IRQF_SHARED, netdev->name, bp->queues); in macb_resume()
5046 bp->queues[0].irq, err); in macb_resume()
5047 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5050 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5052 disable_irq_wake(bp->queues[0].irq); in macb_resume()
5058 phylink_stop(bp->phylink); in macb_resume()
5062 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_resume()
5067 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); in macb_resume()
5069 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_resume()
5070 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); in macb_resume()
5072 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_resume()
5073 macb_init_hw(bp); in macb_resume()
5075 macb_restore_features(bp); in macb_resume()
5077 phylink_start(bp->phylink); in macb_resume()
5081 if (bp->ptp_info) in macb_resume()
5082 bp->ptp_info->ptp_init(netdev); in macb_resume()
5090 struct macb *bp = netdev_priv(netdev); in macb_runtime_suspend() local
5093 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); in macb_runtime_suspend()
5095 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); in macb_runtime_suspend()
5103 struct macb *bp = netdev_priv(netdev); in macb_runtime_resume() local
5106 clk_prepare_enable(bp->pclk); in macb_runtime_resume()
5107 clk_prepare_enable(bp->hclk); in macb_runtime_resume()
5108 clk_prepare_enable(bp->tx_clk); in macb_runtime_resume()
5109 clk_prepare_enable(bp->rx_clk); in macb_runtime_resume()
5111 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()