• Home
  • Raw
  • Download

Lines Matching refs:bp

54 #define RX_RING_BYTES(bp)	(macb_dma_desc_get_size(bp)	\  argument
55 * (bp)->rx_ring_size)
60 #define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ argument
61 * (bp)->tx_ring_size)
64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) argument
127 static unsigned int macb_dma_desc_get_size(struct macb *bp) in macb_dma_desc_get_size() argument
132 switch (bp->hw_dma_cap) { in macb_dma_desc_get_size()
154 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) in macb_adj_dma_desc_idx() argument
157 switch (bp->hw_dma_cap) { in macb_adj_dma_desc_idx()
173 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) in macb_64b_desc() argument
181 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) in macb_tx_ring_wrap() argument
183 return index & (bp->tx_ring_size - 1); in macb_tx_ring_wrap()
189 index = macb_tx_ring_wrap(queue->bp, index); in macb_tx_desc()
190 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_tx_desc()
197 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)]; in macb_tx_skb()
204 offset = macb_tx_ring_wrap(queue->bp, index) * in macb_tx_dma()
205 macb_dma_desc_get_size(queue->bp); in macb_tx_dma()
210 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) in macb_rx_ring_wrap() argument
212 return index & (bp->rx_ring_size - 1); in macb_rx_ring_wrap()
217 index = macb_rx_ring_wrap(queue->bp, index); in macb_rx_desc()
218 index = macb_adj_dma_desc_idx(queue->bp, index); in macb_rx_desc()
224 return queue->rx_buffers + queue->bp->rx_buffer_size * in macb_rx_buffer()
225 macb_rx_ring_wrap(queue->bp, index); in macb_rx_buffer()
229 static u32 hw_readl_native(struct macb *bp, int offset) in hw_readl_native() argument
231 return __raw_readl(bp->regs + offset); in hw_readl_native()
234 static void hw_writel_native(struct macb *bp, int offset, u32 value) in hw_writel_native() argument
236 __raw_writel(value, bp->regs + offset); in hw_writel_native()
239 static u32 hw_readl(struct macb *bp, int offset) in hw_readl() argument
241 return readl_relaxed(bp->regs + offset); in hw_readl()
244 static void hw_writel(struct macb *bp, int offset, u32 value) in hw_writel() argument
246 writel_relaxed(value, bp->regs + offset); in hw_writel()
278 static void macb_set_hwaddr(struct macb *bp) in macb_set_hwaddr() argument
283 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); in macb_set_hwaddr()
284 macb_or_gem_writel(bp, SA1B, bottom); in macb_set_hwaddr()
285 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); in macb_set_hwaddr()
286 macb_or_gem_writel(bp, SA1T, top); in macb_set_hwaddr()
289 macb_or_gem_writel(bp, SA2B, 0); in macb_set_hwaddr()
290 macb_or_gem_writel(bp, SA2T, 0); in macb_set_hwaddr()
291 macb_or_gem_writel(bp, SA3B, 0); in macb_set_hwaddr()
292 macb_or_gem_writel(bp, SA3T, 0); in macb_set_hwaddr()
293 macb_or_gem_writel(bp, SA4B, 0); in macb_set_hwaddr()
294 macb_or_gem_writel(bp, SA4T, 0); in macb_set_hwaddr()
297 static void macb_get_hwaddr(struct macb *bp) in macb_get_hwaddr() argument
306 bottom = macb_or_gem_readl(bp, SA1B + i * 8); in macb_get_hwaddr()
307 top = macb_or_gem_readl(bp, SA1T + i * 8); in macb_get_hwaddr()
317 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); in macb_get_hwaddr()
322 dev_info(&bp->pdev->dev, "invalid hw address, using random\n"); in macb_get_hwaddr()
323 eth_hw_addr_random(bp->dev); in macb_get_hwaddr()
326 static int macb_mdio_wait_for_idle(struct macb *bp) in macb_mdio_wait_for_idle() argument
330 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE), in macb_mdio_wait_for_idle()
336 struct macb *bp = bus->priv; in macb_mdio_read() local
339 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_read()
341 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_read()
345 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
350 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_read()
357 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
361 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_read()
367 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) in macb_mdio_read()
374 status = macb_mdio_wait_for_idle(bp); in macb_mdio_read()
378 status = MACB_BFEXT(DATA, macb_readl(bp, MAN)); in macb_mdio_read()
381 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_read()
382 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_read()
390 struct macb *bp = bus->priv; in macb_mdio_write() local
393 status = pm_runtime_get_sync(&bp->pdev->dev); in macb_mdio_write()
395 pm_runtime_put_noidle(&bp->pdev->dev); in macb_mdio_write()
399 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
404 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_write()
411 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
415 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF) in macb_mdio_write()
422 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF) in macb_mdio_write()
430 status = macb_mdio_wait_for_idle(bp); in macb_mdio_write()
435 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_mdio_write()
436 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_mdio_write()
441 static void macb_init_buffers(struct macb *bp) in macb_init_buffers() argument
446 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_init_buffers()
449 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
455 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_init_buffers()
467 static void macb_set_tx_clk(struct macb *bp, int speed) in macb_set_tx_clk() argument
471 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG)) in macb_set_tx_clk()
475 if (bp->phy_interface == PHY_INTERFACE_MODE_MII) in macb_set_tx_clk()
492 rate_rounded = clk_round_rate(bp->tx_clk, rate); in macb_set_tx_clk()
502 netdev_warn(bp->dev, in macb_set_tx_clk()
506 if (clk_set_rate(bp->tx_clk, rate_rounded)) in macb_set_tx_clk()
507 netdev_err(bp->dev, "adjusting tx_clk failed.\n"); in macb_set_tx_clk()
516 struct macb *bp = netdev_priv(ndev); in macb_validate() local
530 if (!macb_is_gem(bp) && in macb_validate()
538 !(bp->caps & MACB_CAPS_HIGH_SPEED && in macb_validate()
539 bp->caps & MACB_CAPS_PCS)) { in macb_validate()
548 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && in macb_validate()
567 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE && in macb_validate()
575 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF)) in macb_validate()
588 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); in macb_usx_pcs_link_up() local
591 config = gem_readl(bp, USX_CONTROL); in macb_usx_pcs_link_up()
596 gem_writel(bp, USX_CONTROL, config); in macb_usx_pcs_link_up()
602 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); in macb_usx_pcs_get_state() local
609 val = gem_readl(bp, USX_STATUS); in macb_usx_pcs_get_state()
611 val = gem_readl(bp, NCFGR); in macb_usx_pcs_get_state()
622 struct macb *bp = container_of(pcs, struct macb, phylink_pcs); in macb_usx_pcs_config() local
624 gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) | in macb_usx_pcs_config()
666 struct macb *bp = netdev_priv(ndev); in macb_mac_config() local
671 spin_lock_irqsave(&bp->lock, flags); in macb_mac_config()
673 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR); in macb_mac_config()
674 old_ncr = ncr = macb_or_gem_readl(bp, NCR); in macb_mac_config()
676 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) { in macb_mac_config()
679 } else if (macb_is_gem(bp)) { in macb_mac_config()
693 macb_or_gem_writel(bp, NCFGR, ctrl); in macb_mac_config()
696 macb_or_gem_writel(bp, NCR, ncr); in macb_mac_config()
702 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) { in macb_mac_config()
705 old_pcsctrl = gem_readl(bp, PCSCNTRL); in macb_mac_config()
711 gem_writel(bp, PCSCNTRL, pcsctrl); in macb_mac_config()
714 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_config()
721 struct macb *bp = netdev_priv(ndev); in macb_mac_link_down() local
726 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) in macb_mac_link_down()
727 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_down()
729 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_down()
732 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE)); in macb_mac_link_down()
733 macb_writel(bp, NCR, ctrl); in macb_mac_link_down()
745 struct macb *bp = netdev_priv(ndev); in macb_mac_link_up() local
751 spin_lock_irqsave(&bp->lock, flags); in macb_mac_link_up()
753 ctrl = macb_or_gem_readl(bp, NCFGR); in macb_mac_link_up()
763 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) { in macb_mac_link_up()
765 if (macb_is_gem(bp)) { in macb_mac_link_up()
775 macb_set_tx_clk(bp, speed); in macb_mac_link_up()
780 bp->macbgem_ops.mog_init_rings(bp); in macb_mac_link_up()
781 macb_init_buffers(bp); in macb_mac_link_up()
783 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_mac_link_up()
785 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP)); in macb_mac_link_up()
788 macb_or_gem_writel(bp, NCFGR, ctrl); in macb_mac_link_up()
790 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER) in macb_mac_link_up()
791 gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M, in macb_mac_link_up()
792 gem_readl(bp, HS_MAC_CONFIG))); in macb_mac_link_up()
794 spin_unlock_irqrestore(&bp->lock, flags); in macb_mac_link_up()
797 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE)); in macb_mac_link_up()
806 struct macb *bp = netdev_priv(ndev); in macb_mac_prepare() local
809 bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops; in macb_mac_prepare()
811 bp->phylink_pcs.ops = &macb_phylink_pcs_ops; in macb_mac_prepare()
813 bp->phylink_pcs.ops = NULL; in macb_mac_prepare()
815 if (bp->phylink_pcs.ops) in macb_mac_prepare()
816 phylink_set_pcs(bp->phylink, &bp->phylink_pcs); in macb_mac_prepare()
836 static int macb_phylink_connect(struct macb *bp) in macb_phylink_connect() argument
838 struct device_node *dn = bp->pdev->dev.of_node; in macb_phylink_connect()
839 struct net_device *dev = bp->dev; in macb_phylink_connect()
844 ret = phylink_of_phy_connect(bp->phylink, dn, 0); in macb_phylink_connect()
847 phydev = phy_find_first(bp->mii_bus); in macb_phylink_connect()
854 ret = phylink_connect_phy(bp->phylink, phydev); in macb_phylink_connect()
862 phylink_start(bp->phylink); in macb_phylink_connect()
871 struct macb *bp = netdev_priv(ndev); in macb_get_pcs_fixed_state() local
873 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0; in macb_get_pcs_fixed_state()
879 struct macb *bp = netdev_priv(dev); in macb_mii_probe() local
881 bp->phylink_config.dev = &dev->dev; in macb_mii_probe()
882 bp->phylink_config.type = PHYLINK_NETDEV; in macb_mii_probe()
884 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { in macb_mii_probe()
885 bp->phylink_config.poll_fixed_state = true; in macb_mii_probe()
886 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state; in macb_mii_probe()
889 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode, in macb_mii_probe()
890 bp->phy_interface, &macb_phylink_ops); in macb_mii_probe()
891 if (IS_ERR(bp->phylink)) { in macb_mii_probe()
893 PTR_ERR(bp->phylink)); in macb_mii_probe()
894 return PTR_ERR(bp->phylink); in macb_mii_probe()
900 static int macb_mdiobus_register(struct macb *bp) in macb_mdiobus_register() argument
902 struct device_node *child, *np = bp->pdev->dev.of_node; in macb_mdiobus_register()
905 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
919 return of_mdiobus_register(bp->mii_bus, np); in macb_mdiobus_register()
922 return mdiobus_register(bp->mii_bus); in macb_mdiobus_register()
925 static int macb_mii_init(struct macb *bp) in macb_mii_init() argument
930 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_mii_init()
932 bp->mii_bus = mdiobus_alloc(); in macb_mii_init()
933 if (!bp->mii_bus) { in macb_mii_init()
938 bp->mii_bus->name = "MACB_mii_bus"; in macb_mii_init()
939 bp->mii_bus->read = &macb_mdio_read; in macb_mii_init()
940 bp->mii_bus->write = &macb_mdio_write; in macb_mii_init()
941 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in macb_mii_init()
942 bp->pdev->name, bp->pdev->id); in macb_mii_init()
943 bp->mii_bus->priv = bp; in macb_mii_init()
944 bp->mii_bus->parent = &bp->pdev->dev; in macb_mii_init()
946 dev_set_drvdata(&bp->dev->dev, bp->mii_bus); in macb_mii_init()
948 err = macb_mdiobus_register(bp); in macb_mii_init()
952 err = macb_mii_probe(bp->dev); in macb_mii_init()
959 mdiobus_unregister(bp->mii_bus); in macb_mii_init()
961 mdiobus_free(bp->mii_bus); in macb_mii_init()
966 static void macb_update_stats(struct macb *bp) in macb_update_stats() argument
968 u32 *p = &bp->hw_stats.macb.rx_pause_frames; in macb_update_stats()
969 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; in macb_update_stats()
975 *p += bp->macb_reg_readl(bp, offset); in macb_update_stats()
978 static int macb_halt_tx(struct macb *bp) in macb_halt_tx() argument
983 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); in macb_halt_tx()
988 status = macb_readl(bp, TSR); in macb_halt_tx()
998 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) in macb_tx_unmap() argument
1002 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1005 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, in macb_tx_unmap()
1016 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) in macb_set_addr() argument
1021 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_set_addr()
1022 desc_64 = macb_64b_desc(bp, desc); in macb_set_addr()
1034 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) in macb_get_addr() argument
1040 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_get_addr()
1041 desc_64 = macb_64b_desc(bp, desc); in macb_get_addr()
1047 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_get_addr()
1057 struct macb *bp = queue->bp; in macb_tx_error_task() local
1064 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n", in macb_tx_error_task()
1065 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
1074 spin_lock_irqsave(&bp->lock, flags); in macb_tx_error_task()
1077 netif_tx_stop_all_queues(bp->dev); in macb_tx_error_task()
1083 if (macb_halt_tx(bp)) in macb_tx_error_task()
1085 netdev_err(bp->dev, "BUG: halt tx timed out\n"); in macb_tx_error_task()
1101 macb_tx_unmap(bp, tx_skb); in macb_tx_error_task()
1111 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", in macb_tx_error_task()
1112 macb_tx_ring_wrap(bp, tail), in macb_tx_error_task()
1114 bp->dev->stats.tx_packets++; in macb_tx_error_task()
1116 bp->dev->stats.tx_bytes += skb->len; in macb_tx_error_task()
1125 netdev_err(bp->dev, in macb_tx_error_task()
1131 macb_tx_unmap(bp, tx_skb); in macb_tx_error_task()
1136 macb_set_addr(bp, desc, 0); in macb_tx_error_task()
1145 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_tx_error_task()
1153 macb_writel(bp, TSR, macb_readl(bp, TSR)); in macb_tx_error_task()
1157 netif_tx_start_all_queues(bp->dev); in macb_tx_error_task()
1158 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_error_task()
1160 spin_unlock_irqrestore(&bp->lock, flags); in macb_tx_error_task()
1198 struct macb *bp = queue->bp; in macb_tx_interrupt() local
1199 u16 queue_index = queue - bp->queues; in macb_tx_interrupt()
1201 status = macb_readl(bp, TSR); in macb_tx_interrupt()
1202 macb_writel(bp, TSR, status); in macb_tx_interrupt()
1204 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_interrupt()
1207 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", in macb_tx_interrupt()
1245 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", in macb_tx_interrupt()
1246 macb_tx_ring_wrap(bp, tail), in macb_tx_interrupt()
1248 bp->dev->stats.tx_packets++; in macb_tx_interrupt()
1250 bp->dev->stats.tx_bytes += skb->len; in macb_tx_interrupt()
1255 macb_tx_unmap(bp, tx_skb); in macb_tx_interrupt()
1267 if (__netif_subqueue_stopped(bp->dev, queue_index) && in macb_tx_interrupt()
1269 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp)) in macb_tx_interrupt()
1270 netif_wake_subqueue(bp->dev, queue_index); in macb_tx_interrupt()
1278 struct macb *bp = queue->bp; in gem_rx_refill() local
1282 bp->rx_ring_size) > 0) { in gem_rx_refill()
1283 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head); in gem_rx_refill()
1292 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); in gem_rx_refill()
1294 netdev_err(bp->dev, in gem_rx_refill()
1300 paddr = dma_map_single(&bp->pdev->dev, skb->data, in gem_rx_refill()
1301 bp->rx_buffer_size, in gem_rx_refill()
1303 if (dma_mapping_error(&bp->pdev->dev, paddr)) { in gem_rx_refill()
1310 if (entry == bp->rx_ring_size - 1) in gem_rx_refill()
1317 macb_set_addr(bp, desc, paddr); in gem_rx_refill()
1332 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n", in gem_rx_refill()
1360 struct macb *bp = queue->bp; in gem_rx() local
1372 entry = macb_rx_ring_wrap(bp, queue->rx_tail); in gem_rx()
1379 addr = macb_get_addr(bp, desc); in gem_rx()
1393 netdev_err(bp->dev, in gem_rx()
1395 bp->dev->stats.rx_dropped++; in gem_rx()
1401 netdev_err(bp->dev, in gem_rx()
1403 bp->dev->stats.rx_dropped++; in gem_rx()
1409 len = ctrl & bp->rx_frm_len_mask; in gem_rx()
1411 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); in gem_rx()
1414 dma_unmap_single(&bp->pdev->dev, addr, in gem_rx()
1415 bp->rx_buffer_size, DMA_FROM_DEVICE); in gem_rx()
1417 skb->protocol = eth_type_trans(skb, bp->dev); in gem_rx()
1419 if (bp->dev->features & NETIF_F_RXCSUM && in gem_rx()
1420 !(bp->dev->flags & IFF_PROMISC) && in gem_rx()
1424 bp->dev->stats.rx_packets++; in gem_rx()
1426 bp->dev->stats.rx_bytes += skb->len; in gem_rx()
1429 gem_ptp_do_rxstamp(bp, skb, desc); in gem_rx()
1432 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in gem_rx()
1456 struct macb *bp = queue->bp; in macb_rx_frame() local
1459 len = desc->ctrl & bp->rx_frm_len_mask; in macb_rx_frame()
1461 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", in macb_rx_frame()
1462 macb_rx_ring_wrap(bp, first_frag), in macb_rx_frame()
1463 macb_rx_ring_wrap(bp, last_frag), len); in macb_rx_frame()
1473 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN); in macb_rx_frame()
1475 bp->dev->stats.rx_dropped++; in macb_rx_frame()
1495 unsigned int frag_len = bp->rx_buffer_size; in macb_rx_frame()
1507 offset += bp->rx_buffer_size; in macb_rx_frame()
1519 skb->protocol = eth_type_trans(skb, bp->dev); in macb_rx_frame()
1521 bp->dev->stats.rx_packets++; in macb_rx_frame()
1522 bp->dev->stats.rx_bytes += skb->len; in macb_rx_frame()
1523 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", in macb_rx_frame()
1532 struct macb *bp = queue->bp; in macb_init_rx_ring() local
1538 for (i = 0; i < bp->rx_ring_size; i++) { in macb_init_rx_ring()
1540 macb_set_addr(bp, desc, addr); in macb_init_rx_ring()
1542 addr += bp->rx_buffer_size; in macb_init_rx_ring()
1551 struct macb *bp = queue->bp; in macb_rx() local
1603 netdev_err(bp->dev, "RX queue corruption: reset it\n"); in macb_rx()
1605 spin_lock_irqsave(&bp->lock, flags); in macb_rx()
1607 ctrl = macb_readl(bp, NCR); in macb_rx()
1608 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_rx()
1613 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_rx()
1615 spin_unlock_irqrestore(&bp->lock, flags); in macb_rx()
1630 struct macb *bp = queue->bp; in macb_poll() local
1634 status = macb_readl(bp, RSR); in macb_poll()
1635 macb_writel(bp, RSR, status); in macb_poll()
1637 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", in macb_poll()
1640 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget); in macb_poll()
1652 status = macb_readl(bp, RSR); in macb_poll()
1654 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1658 queue_writel(queue, IER, bp->rx_intr_mask); in macb_poll()
1668 status = macb_readl(bp, RSR); in macb_poll()
1670 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_poll()
1671 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_poll()
1685 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet); in macb_hresp_error_task() local
1686 struct net_device *dev = bp->dev; in macb_hresp_error_task()
1691 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_hresp_error_task()
1692 queue_writel(queue, IDR, bp->rx_intr_mask | in macb_hresp_error_task()
1696 ctrl = macb_readl(bp, NCR); in macb_hresp_error_task()
1698 macb_writel(bp, NCR, ctrl); in macb_hresp_error_task()
1703 bp->macbgem_ops.mog_init_rings(bp); in macb_hresp_error_task()
1706 macb_init_buffers(bp); in macb_hresp_error_task()
1709 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_hresp_error_task()
1711 bp->rx_intr_mask | in macb_hresp_error_task()
1716 macb_writel(bp, NCR, ctrl); in macb_hresp_error_task()
1726 struct macb *bp = queue->bp; in macb_tx_restart() local
1729 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_tx_restart()
1735 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp); in macb_tx_restart()
1736 tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp)); in macb_tx_restart()
1737 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head)); in macb_tx_restart()
1742 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_tx_restart()
1748 struct macb *bp = queue->bp; in macb_wol_interrupt() local
1756 spin_lock(&bp->lock); in macb_wol_interrupt()
1760 macb_writel(bp, WOL, 0); in macb_wol_interrupt()
1761 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n", in macb_wol_interrupt()
1762 (unsigned int)(queue - bp->queues), in macb_wol_interrupt()
1764 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_wol_interrupt()
1766 pm_wakeup_event(&bp->pdev->dev, 0); in macb_wol_interrupt()
1769 spin_unlock(&bp->lock); in macb_wol_interrupt()
1777 struct macb *bp = queue->bp; in gem_wol_interrupt() local
1785 spin_lock(&bp->lock); in gem_wol_interrupt()
1789 gem_writel(bp, WOL, 0); in gem_wol_interrupt()
1790 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n", in gem_wol_interrupt()
1791 (unsigned int)(queue - bp->queues), in gem_wol_interrupt()
1793 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in gem_wol_interrupt()
1795 pm_wakeup_event(&bp->pdev->dev, 0); in gem_wol_interrupt()
1798 spin_unlock(&bp->lock); in gem_wol_interrupt()
1806 struct macb *bp = queue->bp; in macb_interrupt() local
1807 struct net_device *dev = bp->dev; in macb_interrupt()
1815 spin_lock(&bp->lock); in macb_interrupt()
1821 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1826 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", in macb_interrupt()
1827 (unsigned int)(queue - bp->queues), in macb_interrupt()
1830 if (status & bp->rx_intr_mask) { in macb_interrupt()
1837 queue_writel(queue, IDR, bp->rx_intr_mask); in macb_interrupt()
1838 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1842 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); in macb_interrupt()
1851 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1875 ctrl = macb_readl(bp, NCR); in macb_interrupt()
1876 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); in macb_interrupt()
1878 macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); in macb_interrupt()
1880 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1886 if (macb_is_gem(bp)) in macb_interrupt()
1887 bp->hw_stats.gem.rx_overruns++; in macb_interrupt()
1889 bp->hw_stats.macb.rx_overruns++; in macb_interrupt()
1891 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1896 tasklet_schedule(&bp->hresp_err_tasklet); in macb_interrupt()
1899 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_interrupt()
1905 spin_unlock(&bp->lock); in macb_interrupt()
1916 struct macb *bp = netdev_priv(dev); in macb_poll_controller() local
1922 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
1928 static unsigned int macb_tx_map(struct macb *bp, in macb_tx_map() argument
1960 entry = macb_tx_ring_wrap(bp, tx_head); in macb_tx_map()
1963 mapping = dma_map_single(&bp->pdev->dev, in macb_tx_map()
1966 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
1980 size = min(len, bp->max_tx_length); in macb_tx_map()
1990 size = min(len, bp->max_tx_length); in macb_tx_map()
1991 entry = macb_tx_ring_wrap(bp, tx_head); in macb_tx_map()
1994 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, in macb_tx_map()
1996 if (dma_mapping_error(&bp->pdev->dev, mapping)) in macb_tx_map()
2014 netdev_err(bp->dev, "BUG! empty skb!\n"); in macb_tx_map()
2029 entry = macb_tx_ring_wrap(bp, i); in macb_tx_map()
2051 entry = macb_tx_ring_wrap(bp, i); in macb_tx_map()
2060 if (unlikely(entry == (bp->tx_ring_size - 1))) in macb_tx_map()
2067 if ((bp->dev->features & NETIF_F_HW_CSUM) && in macb_tx_map()
2078 macb_set_addr(bp, desc, tx_skb->mapping); in macb_tx_map()
2091 netdev_err(bp->dev, "TX DMA map failed\n"); in macb_tx_map()
2096 macb_tx_unmap(bp, tx_skb); in macb_tx_map()
2209 struct macb *bp = netdev_priv(dev); in macb_start_xmit() local
2210 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
2237 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n"); in macb_start_xmit()
2242 hdrlen = min(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2245 netdev_vdbg(bp->dev, in macb_start_xmit()
2259 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1; in macb_start_xmit()
2261 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); in macb_start_xmit()
2265 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length); in macb_start_xmit()
2268 spin_lock_irqsave(&bp->lock, flags); in macb_start_xmit()
2272 bp->tx_ring_size) < desc_cnt) { in macb_start_xmit()
2274 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2275 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", in macb_start_xmit()
2281 if (!macb_tx_map(bp, queue, skb, hdrlen)) { in macb_start_xmit()
2290 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); in macb_start_xmit()
2292 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1) in macb_start_xmit()
2296 spin_unlock_irqrestore(&bp->lock, flags); in macb_start_xmit()
2301 static void macb_init_rx_buffer_size(struct macb *bp, size_t size) in macb_init_rx_buffer_size() argument
2303 if (!macb_is_gem(bp)) { in macb_init_rx_buffer_size()
2304 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE; in macb_init_rx_buffer_size()
2306 bp->rx_buffer_size = size; in macb_init_rx_buffer_size()
2308 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { in macb_init_rx_buffer_size()
2309 netdev_dbg(bp->dev, in macb_init_rx_buffer_size()
2312 bp->rx_buffer_size = in macb_init_rx_buffer_size()
2313 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); in macb_init_rx_buffer_size()
2317 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n", in macb_init_rx_buffer_size()
2318 bp->dev->mtu, bp->rx_buffer_size); in macb_init_rx_buffer_size()
2321 static void gem_free_rx_buffers(struct macb *bp) in gem_free_rx_buffers() argument
2330 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_free_rx_buffers()
2334 for (i = 0; i < bp->rx_ring_size; i++) { in gem_free_rx_buffers()
2341 addr = macb_get_addr(bp, desc); in gem_free_rx_buffers()
2343 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, in gem_free_rx_buffers()
2354 static void macb_free_rx_buffers(struct macb *bp) in macb_free_rx_buffers() argument
2356 struct macb_queue *queue = &bp->queues[0]; in macb_free_rx_buffers()
2359 dma_free_coherent(&bp->pdev->dev, in macb_free_rx_buffers()
2360 bp->rx_ring_size * bp->rx_buffer_size, in macb_free_rx_buffers()
2366 static void macb_free_consistent(struct macb *bp) in macb_free_consistent() argument
2372 bp->macbgem_ops.mog_free_rx_buffers(bp); in macb_free_consistent()
2374 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
2378 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_free_consistent()
2379 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2384 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_free_consistent()
2385 dma_free_coherent(&bp->pdev->dev, size, in macb_free_consistent()
2392 static int gem_alloc_rx_buffers(struct macb *bp) in gem_alloc_rx_buffers() argument
2398 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_alloc_rx_buffers()
2399 size = bp->rx_ring_size * sizeof(struct sk_buff *); in gem_alloc_rx_buffers()
2404 netdev_dbg(bp->dev, in gem_alloc_rx_buffers()
2406 bp->rx_ring_size, queue->rx_skbuff); in gem_alloc_rx_buffers()
2411 static int macb_alloc_rx_buffers(struct macb *bp) in macb_alloc_rx_buffers() argument
2413 struct macb_queue *queue = &bp->queues[0]; in macb_alloc_rx_buffers()
2416 size = bp->rx_ring_size * bp->rx_buffer_size; in macb_alloc_rx_buffers()
2417 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_rx_buffers()
2422 netdev_dbg(bp->dev, in macb_alloc_rx_buffers()
2428 static int macb_alloc_consistent(struct macb *bp) in macb_alloc_consistent() argument
2434 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
2435 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch; in macb_alloc_consistent()
2436 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2441 netdev_dbg(bp->dev, in macb_alloc_consistent()
2446 size = bp->tx_ring_size * sizeof(struct macb_tx_skb); in macb_alloc_consistent()
2451 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch; in macb_alloc_consistent()
2452 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, in macb_alloc_consistent()
2456 netdev_dbg(bp->dev, in macb_alloc_consistent()
2460 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) in macb_alloc_consistent()
2466 macb_free_consistent(bp); in macb_alloc_consistent()
2470 static void gem_init_rings(struct macb *bp) in gem_init_rings() argument
2477 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
2478 for (i = 0; i < bp->tx_ring_size; i++) { in gem_init_rings()
2480 macb_set_addr(bp, desc, 0); in gem_init_rings()
2495 static void macb_init_rings(struct macb *bp) in macb_init_rings() argument
2500 macb_init_rx_ring(&bp->queues[0]); in macb_init_rings()
2502 for (i = 0; i < bp->tx_ring_size; i++) { in macb_init_rings()
2503 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings()
2504 macb_set_addr(bp, desc, 0); in macb_init_rings()
2507 bp->queues[0].tx_head = 0; in macb_init_rings()
2508 bp->queues[0].tx_tail = 0; in macb_init_rings()
2512 static void macb_reset_hw(struct macb *bp) in macb_reset_hw() argument
2516 u32 ctrl = macb_readl(bp, NCR); in macb_reset_hw()
2526 macb_writel(bp, NCR, ctrl); in macb_reset_hw()
2529 macb_writel(bp, TSR, -1); in macb_reset_hw()
2530 macb_writel(bp, RSR, -1); in macb_reset_hw()
2533 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_reset_hw()
2536 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_reset_hw()
2541 static u32 gem_mdc_clk_div(struct macb *bp) in gem_mdc_clk_div() argument
2544 unsigned long pclk_hz = clk_get_rate(bp->pclk); in gem_mdc_clk_div()
2562 static u32 macb_mdc_clk_div(struct macb *bp) in macb_mdc_clk_div() argument
2567 if (macb_is_gem(bp)) in macb_mdc_clk_div()
2568 return gem_mdc_clk_div(bp); in macb_mdc_clk_div()
2570 pclk_hz = clk_get_rate(bp->pclk); in macb_mdc_clk_div()
2587 static u32 macb_dbw(struct macb *bp) in macb_dbw() argument
2589 if (!macb_is_gem(bp)) in macb_dbw()
2592 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { in macb_dbw()
2610 static void macb_configure_dma(struct macb *bp) in macb_configure_dma() argument
2617 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE; in macb_configure_dma()
2618 if (macb_is_gem(bp)) { in macb_configure_dma()
2619 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); in macb_configure_dma()
2620 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_configure_dma()
2626 if (bp->dma_burst_length) in macb_configure_dma()
2627 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); in macb_configure_dma()
2631 if (bp->native_io) in macb_configure_dma()
2636 if (bp->dev->features & NETIF_F_HW_CSUM) in macb_configure_dma()
2643 if (bp->hw_dma_cap & HW_DMA_CAP_64B) in macb_configure_dma()
2647 if (bp->hw_dma_cap & HW_DMA_CAP_PTP) in macb_configure_dma()
2650 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", in macb_configure_dma()
2652 gem_writel(bp, DMACFG, dmacfg); in macb_configure_dma()
2656 static void macb_init_hw(struct macb *bp) in macb_init_hw() argument
2660 macb_reset_hw(bp); in macb_init_hw()
2661 macb_set_hwaddr(bp); in macb_init_hw()
2663 config = macb_mdc_clk_div(bp); in macb_init_hw()
2666 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2670 if (bp->dev->flags & IFF_PROMISC) in macb_init_hw()
2672 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM) in macb_init_hw()
2674 if (!(bp->dev->flags & IFF_BROADCAST)) in macb_init_hw()
2676 config |= macb_dbw(bp); in macb_init_hw()
2677 macb_writel(bp, NCFGR, config); in macb_init_hw()
2678 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len) in macb_init_hw()
2679 gem_writel(bp, JML, bp->jumbo_max_len); in macb_init_hw()
2680 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK; in macb_init_hw()
2681 if (bp->caps & MACB_CAPS_JUMBO) in macb_init_hw()
2682 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK; in macb_init_hw()
2684 macb_configure_dma(bp); in macb_init_hw()
2749 struct macb *bp = netdev_priv(dev); in macb_sethashtable() local
2759 macb_or_gem_writel(bp, HRB, mc_filter[0]); in macb_sethashtable()
2760 macb_or_gem_writel(bp, HRT, mc_filter[1]); in macb_sethashtable()
2767 struct macb *bp = netdev_priv(dev); in macb_set_rx_mode() local
2769 cfg = macb_readl(bp, NCFGR); in macb_set_rx_mode()
2776 if (macb_is_gem(bp)) in macb_set_rx_mode()
2783 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM) in macb_set_rx_mode()
2789 macb_or_gem_writel(bp, HRB, -1); in macb_set_rx_mode()
2790 macb_or_gem_writel(bp, HRT, -1); in macb_set_rx_mode()
2798 macb_or_gem_writel(bp, HRB, 0); in macb_set_rx_mode()
2799 macb_or_gem_writel(bp, HRT, 0); in macb_set_rx_mode()
2803 macb_writel(bp, NCFGR, cfg); in macb_set_rx_mode()
2809 struct macb *bp = netdev_priv(dev); in macb_open() local
2814 netdev_dbg(bp->dev, "open\n"); in macb_open()
2816 err = pm_runtime_get_sync(&bp->pdev->dev); in macb_open()
2821 macb_init_rx_buffer_size(bp, bufsz); in macb_open()
2823 err = macb_alloc_consistent(bp); in macb_open()
2830 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2833 macb_init_hw(bp); in macb_open()
2835 err = macb_phylink_connect(bp); in macb_open()
2841 if (bp->ptp_info) in macb_open()
2842 bp->ptp_info->ptp_init(dev); in macb_open()
2847 macb_reset_hw(bp); in macb_open()
2848 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_open()
2850 macb_free_consistent(bp); in macb_open()
2852 pm_runtime_put_sync(&bp->pdev->dev); in macb_open()
2858 struct macb *bp = netdev_priv(dev); in macb_close() local
2865 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_close()
2868 phylink_stop(bp->phylink); in macb_close()
2869 phylink_disconnect_phy(bp->phylink); in macb_close()
2871 spin_lock_irqsave(&bp->lock, flags); in macb_close()
2872 macb_reset_hw(bp); in macb_close()
2874 spin_unlock_irqrestore(&bp->lock, flags); in macb_close()
2876 macb_free_consistent(bp); in macb_close()
2878 if (bp->ptp_info) in macb_close()
2879 bp->ptp_info->ptp_remove(dev); in macb_close()
2881 pm_runtime_put(&bp->pdev->dev); in macb_close()
2896 static void gem_update_stats(struct macb *bp) in gem_update_stats() argument
2902 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; in gem_update_stats()
2906 u64 val = bp->macb_reg_readl(bp, offset); in gem_update_stats()
2908 bp->ethtool_stats[i] += val; in gem_update_stats()
2913 val = bp->macb_reg_readl(bp, offset + 4); in gem_update_stats()
2914 bp->ethtool_stats[i] += ((u64)val) << 32; in gem_update_stats()
2920 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in gem_update_stats()
2922 bp->ethtool_stats[idx++] = *stat; in gem_update_stats()
2925 static struct net_device_stats *gem_get_stats(struct macb *bp) in gem_get_stats() argument
2927 struct gem_stats *hwstat = &bp->hw_stats.gem; in gem_get_stats()
2928 struct net_device_stats *nstat = &bp->dev->stats; in gem_get_stats()
2930 if (!netif_running(bp->dev)) in gem_get_stats()
2933 gem_update_stats(bp); in gem_get_stats()
2969 struct macb *bp; in gem_get_ethtool_stats() local
2971 bp = netdev_priv(dev); in gem_get_ethtool_stats()
2972 gem_update_stats(bp); in gem_get_ethtool_stats()
2973 memcpy(data, &bp->ethtool_stats, sizeof(u64) in gem_get_ethtool_stats()
2979 struct macb *bp = netdev_priv(dev); in gem_get_sset_count() local
2983 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN; in gem_get_sset_count()
2992 struct macb *bp = netdev_priv(dev); in gem_get_ethtool_strings() local
3003 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_get_ethtool_strings()
3016 struct macb *bp = netdev_priv(dev); in macb_get_stats() local
3017 struct net_device_stats *nstat = &bp->dev->stats; in macb_get_stats()
3018 struct macb_stats *hwstat = &bp->hw_stats.macb; in macb_get_stats()
3020 if (macb_is_gem(bp)) in macb_get_stats()
3021 return gem_get_stats(bp); in macb_get_stats()
3024 macb_update_stats(bp); in macb_get_stats()
3069 struct macb *bp = netdev_priv(dev); in macb_get_regs() local
3073 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) in macb_get_regs()
3076 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail); in macb_get_regs()
3077 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head); in macb_get_regs()
3079 regs_buff[0] = macb_readl(bp, NCR); in macb_get_regs()
3080 regs_buff[1] = macb_or_gem_readl(bp, NCFGR); in macb_get_regs()
3081 regs_buff[2] = macb_readl(bp, NSR); in macb_get_regs()
3082 regs_buff[3] = macb_readl(bp, TSR); in macb_get_regs()
3083 regs_buff[4] = macb_readl(bp, RBQP); in macb_get_regs()
3084 regs_buff[5] = macb_readl(bp, TBQP); in macb_get_regs()
3085 regs_buff[6] = macb_readl(bp, RSR); in macb_get_regs()
3086 regs_buff[7] = macb_readl(bp, IMR); in macb_get_regs()
3090 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); in macb_get_regs()
3091 regs_buff[11] = macb_tx_dma(&bp->queues[0], head); in macb_get_regs()
3093 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_get_regs()
3094 regs_buff[12] = macb_or_gem_readl(bp, USRIO); in macb_get_regs()
3095 if (macb_is_gem(bp)) in macb_get_regs()
3096 regs_buff[13] = gem_readl(bp, DMACFG); in macb_get_regs()
3101 struct macb *bp = netdev_priv(netdev); in macb_get_wol() local
3103 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { in macb_get_wol()
3104 phylink_ethtool_get_wol(bp->phylink, wol); in macb_get_wol()
3107 if (bp->wol & MACB_WOL_ENABLED) in macb_get_wol()
3114 struct macb *bp = netdev_priv(netdev); in macb_set_wol() local
3118 ret = phylink_ethtool_set_wol(bp->phylink, wol); in macb_set_wol()
3125 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || in macb_set_wol()
3130 bp->wol |= MACB_WOL_ENABLED; in macb_set_wol()
3132 bp->wol &= ~MACB_WOL_ENABLED; in macb_set_wol()
3134 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); in macb_set_wol()
3142 struct macb *bp = netdev_priv(netdev); in macb_get_link_ksettings() local
3144 return phylink_ethtool_ksettings_get(bp->phylink, kset); in macb_get_link_ksettings()
3150 struct macb *bp = netdev_priv(netdev); in macb_set_link_ksettings() local
3152 return phylink_ethtool_ksettings_set(bp->phylink, kset); in macb_set_link_ksettings()
3158 struct macb *bp = netdev_priv(netdev); in macb_get_ringparam() local
3163 ring->rx_pending = bp->rx_ring_size; in macb_get_ringparam()
3164 ring->tx_pending = bp->tx_ring_size; in macb_get_ringparam()
3170 struct macb *bp = netdev_priv(netdev); in macb_set_ringparam() local
3185 if ((new_tx_size == bp->tx_ring_size) && in macb_set_ringparam()
3186 (new_rx_size == bp->rx_ring_size)) { in macb_set_ringparam()
3191 if (netif_running(bp->dev)) { in macb_set_ringparam()
3193 macb_close(bp->dev); in macb_set_ringparam()
3196 bp->rx_ring_size = new_rx_size; in macb_set_ringparam()
3197 bp->tx_ring_size = new_tx_size; in macb_set_ringparam()
3200 macb_open(bp->dev); in macb_set_ringparam()
3206 static unsigned int gem_get_tsu_rate(struct macb *bp) in gem_get_tsu_rate() argument
3211 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk"); in gem_get_tsu_rate()
3215 else if (!IS_ERR(bp->pclk)) { in gem_get_tsu_rate()
3216 tsu_clk = bp->pclk; in gem_get_tsu_rate()
3231 struct macb *bp = netdev_priv(dev); in gem_get_ts_info() local
3233 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { in gem_get_ts_info()
3253 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1; in gem_get_ts_info()
3272 struct macb *bp = netdev_priv(netdev); in macb_get_ts_info() local
3274 if (bp->ptp_info) in macb_get_ts_info()
3275 return bp->ptp_info->get_ts_info(netdev, info); in macb_get_ts_info()
3280 static void gem_enable_flow_filters(struct macb *bp, bool enable) in gem_enable_flow_filters() argument
3282 struct net_device *netdev = bp->dev; in gem_enable_flow_filters()
3290 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8)); in gem_enable_flow_filters()
3292 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_enable_flow_filters()
3299 t2_scr = gem_readl_n(bp, SCRT2, fs->location); in gem_enable_flow_filters()
3322 gem_writel_n(bp, SCRT2, fs->location, t2_scr); in gem_enable_flow_filters()
3326 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs) in gem_prog_cmp_regs() argument
3335 if (!macb_is_gem(bp)) in gem_prog_cmp_regs()
3350 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0); in gem_prog_cmp_regs()
3351 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1); in gem_prog_cmp_regs()
3364 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0); in gem_prog_cmp_regs()
3365 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1); in gem_prog_cmp_regs()
3392 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0); in gem_prog_cmp_regs()
3393 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1); in gem_prog_cmp_regs()
3406 gem_writel_n(bp, SCRT2, index, t2_scr); in gem_prog_cmp_regs()
3412 struct macb *bp = netdev_priv(netdev); in gem_add_flow_filter() local
3431 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3434 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_add_flow_filter()
3447 list_add_tail(&newfs->list, &bp->rx_fs_list.list); in gem_add_flow_filter()
3449 gem_prog_cmp_regs(bp, fs); in gem_add_flow_filter()
3450 bp->rx_fs_list.count++; in gem_add_flow_filter()
3452 gem_enable_flow_filters(bp, 1); in gem_add_flow_filter()
3454 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3458 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_add_flow_filter()
3466 struct macb *bp = netdev_priv(netdev); in gem_del_flow_filter() local
3471 spin_lock_irqsave(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3473 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_del_flow_filter()
3485 gem_writel_n(bp, SCRT2, fs->location, 0); in gem_del_flow_filter()
3488 bp->rx_fs_list.count--; in gem_del_flow_filter()
3489 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3495 spin_unlock_irqrestore(&bp->rx_fs_lock, flags); in gem_del_flow_filter()
3502 struct macb *bp = netdev_priv(netdev); in gem_get_flow_entry() local
3505 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_flow_entry()
3517 struct macb *bp = netdev_priv(netdev); in gem_get_all_flow_entries() local
3521 list_for_each_entry(item, &bp->rx_fs_list.list, list) { in gem_get_all_flow_entries()
3527 cmd->data = bp->max_tuples; in gem_get_all_flow_entries()
3536 struct macb *bp = netdev_priv(netdev); in gem_get_rxnfc() local
3541 cmd->data = bp->num_queues; in gem_get_rxnfc()
3544 cmd->rule_cnt = bp->rx_fs_list.count; in gem_get_rxnfc()
3563 struct macb *bp = netdev_priv(netdev); in gem_set_rxnfc() local
3568 if ((cmd->fs.location >= bp->max_tuples) in gem_set_rxnfc()
3569 || (cmd->fs.ring_cookie >= bp->num_queues)) { in gem_set_rxnfc()
3620 struct macb *bp = netdev_priv(dev); in macb_ioctl() local
3625 if (bp->ptp_info) { in macb_ioctl()
3628 return bp->ptp_info->set_hwtst(dev, rq, cmd); in macb_ioctl()
3630 return bp->ptp_info->get_hwtst(dev, rq); in macb_ioctl()
3634 return phylink_mii_ioctl(bp->phylink, rq, cmd); in macb_ioctl()
3637 static inline void macb_set_txcsum_feature(struct macb *bp, in macb_set_txcsum_feature() argument
3642 if (!macb_is_gem(bp)) in macb_set_txcsum_feature()
3645 val = gem_readl(bp, DMACFG); in macb_set_txcsum_feature()
3651 gem_writel(bp, DMACFG, val); in macb_set_txcsum_feature()
3654 static inline void macb_set_rxcsum_feature(struct macb *bp, in macb_set_rxcsum_feature() argument
3657 struct net_device *netdev = bp->dev; in macb_set_rxcsum_feature()
3660 if (!macb_is_gem(bp)) in macb_set_rxcsum_feature()
3663 val = gem_readl(bp, NCFGR); in macb_set_rxcsum_feature()
3669 gem_writel(bp, NCFGR, val); in macb_set_rxcsum_feature()
3672 static inline void macb_set_rxflow_feature(struct macb *bp, in macb_set_rxflow_feature() argument
3675 if (!macb_is_gem(bp)) in macb_set_rxflow_feature()
3678 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE)); in macb_set_rxflow_feature()
3684 struct macb *bp = netdev_priv(netdev); in macb_set_features() local
3689 macb_set_txcsum_feature(bp, features); in macb_set_features()
3693 macb_set_rxcsum_feature(bp, features); in macb_set_features()
3697 macb_set_rxflow_feature(bp, features); in macb_set_features()
3702 static void macb_restore_features(struct macb *bp) in macb_restore_features() argument
3704 struct net_device *netdev = bp->dev; in macb_restore_features()
3709 macb_set_txcsum_feature(bp, features); in macb_restore_features()
3712 macb_set_rxcsum_feature(bp, features); in macb_restore_features()
3715 list_for_each_entry(item, &bp->rx_fs_list.list, list) in macb_restore_features()
3716 gem_prog_cmp_regs(bp, &item->fs); in macb_restore_features()
3718 macb_set_rxflow_feature(bp, features); in macb_restore_features()
3741 static void macb_configure_caps(struct macb *bp, in macb_configure_caps() argument
3747 bp->caps = dt_conf->caps; in macb_configure_caps()
3749 if (hw_is_gem(bp->regs, bp->native_io)) { in macb_configure_caps()
3750 bp->caps |= MACB_CAPS_MACB_IS_GEM; in macb_configure_caps()
3752 dcfg = gem_readl(bp, DCFG1); in macb_configure_caps()
3754 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; in macb_configure_caps()
3756 bp->caps |= MACB_CAPS_PCS; in macb_configure_caps()
3757 dcfg = gem_readl(bp, DCFG12); in macb_configure_caps()
3759 bp->caps |= MACB_CAPS_HIGH_SPEED; in macb_configure_caps()
3760 dcfg = gem_readl(bp, DCFG2); in macb_configure_caps()
3762 bp->caps |= MACB_CAPS_FIFO_MODE; in macb_configure_caps()
3764 if (gem_has_ptp(bp)) { in macb_configure_caps()
3765 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5))) in macb_configure_caps()
3766 dev_err(&bp->pdev->dev, in macb_configure_caps()
3769 bp->hw_dma_cap |= HW_DMA_CAP_PTP; in macb_configure_caps()
3770 bp->ptp_info = &gem_ptp_info; in macb_configure_caps()
3776 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); in macb_configure_caps()
3904 struct macb *bp = netdev_priv(dev); in macb_init() local
3909 bp->tx_ring_size = DEFAULT_TX_RING_SIZE; in macb_init()
3910 bp->rx_ring_size = DEFAULT_RX_RING_SIZE; in macb_init()
3917 if (!(bp->queue_mask & (1 << hw_q))) in macb_init()
3920 queue = &bp->queues[q]; in macb_init()
3921 queue->bp = bp; in macb_init()
3932 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3946 if (bp->hw_dma_cap & HW_DMA_CAP_64B) { in macb_init()
3975 if (macb_is_gem(bp)) { in macb_init()
3976 bp->max_tx_length = GEM_MAX_TX_LEN; in macb_init()
3977 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers; in macb_init()
3978 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers; in macb_init()
3979 bp->macbgem_ops.mog_init_rings = gem_init_rings; in macb_init()
3980 bp->macbgem_ops.mog_rx = gem_rx; in macb_init()
3983 bp->max_tx_length = MACB_MAX_TX_LEN; in macb_init()
3984 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers; in macb_init()
3985 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers; in macb_init()
3986 bp->macbgem_ops.mog_init_rings = macb_init_rings; in macb_init()
3987 bp->macbgem_ops.mog_rx = macb_rx; in macb_init()
3995 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6))) in macb_init()
3999 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE)) in macb_init()
4001 if (bp->caps & MACB_CAPS_SG_DISABLED) in macb_init()
4009 reg = gem_readl(bp, DCFG8); in macb_init()
4010 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), in macb_init()
4012 INIT_LIST_HEAD(&bp->rx_fs_list.list); in macb_init()
4013 if (bp->max_tuples > 0) { in macb_init()
4019 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg); in macb_init()
4023 bp->rx_fs_list.count = 0; in macb_init()
4024 spin_lock_init(&bp->rx_fs_lock); in macb_init()
4026 bp->max_tuples = 0; in macb_init()
4029 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { in macb_init()
4031 if (phy_interface_mode_is_rgmii(bp->phy_interface)) in macb_init()
4032 val = bp->usrio->rgmii; in macb_init()
4033 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && in macb_init()
4034 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4035 val = bp->usrio->rmii; in macb_init()
4036 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) in macb_init()
4037 val = bp->usrio->mii; in macb_init()
4039 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) in macb_init()
4040 val |= bp->usrio->refclk; in macb_init()
4042 macb_or_gem_writel(bp, USRIO, val); in macb_init()
4046 val = macb_mdc_clk_div(bp); in macb_init()
4047 val |= macb_dbw(bp); in macb_init()
4048 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) in macb_init()
4050 macb_writel(bp, NCFGR, val); in macb_init()
4420 struct macb *bp = netdev_priv(dev); in at91ether_init() local
4423 bp->queues[0].bp = bp; in at91ether_init()
4433 macb_writel(bp, NCR, 0); in at91ether_init()
4435 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG)); in at91ether_init()
4724 struct macb *bp; in macb_probe() local
4754 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); in macb_probe()
4764 bp = netdev_priv(dev); in macb_probe()
4765 bp->pdev = pdev; in macb_probe()
4766 bp->dev = dev; in macb_probe()
4767 bp->regs = mem; in macb_probe()
4768 bp->native_io = native_io; in macb_probe()
4770 bp->macb_reg_readl = hw_readl_native; in macb_probe()
4771 bp->macb_reg_writel = hw_writel_native; in macb_probe()
4773 bp->macb_reg_readl = hw_readl; in macb_probe()
4774 bp->macb_reg_writel = hw_writel; in macb_probe()
4776 bp->num_queues = num_queues; in macb_probe()
4777 bp->queue_mask = queue_mask; in macb_probe()
4779 bp->dma_burst_length = macb_config->dma_burst_length; in macb_probe()
4780 bp->pclk = pclk; in macb_probe()
4781 bp->hclk = hclk; in macb_probe()
4782 bp->tx_clk = tx_clk; in macb_probe()
4783 bp->rx_clk = rx_clk; in macb_probe()
4784 bp->tsu_clk = tsu_clk; in macb_probe()
4786 bp->jumbo_max_len = macb_config->jumbo_max_len; in macb_probe()
4788 bp->wol = 0; in macb_probe()
4790 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; in macb_probe()
4791 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); in macb_probe()
4793 bp->usrio = macb_config->usrio; in macb_probe()
4795 spin_lock_init(&bp->lock); in macb_probe()
4798 macb_configure_caps(bp, macb_config); in macb_probe()
4801 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { in macb_probe()
4803 bp->hw_dma_cap |= HW_DMA_CAP_64B; in macb_probe()
4816 if (bp->caps & MACB_CAPS_JUMBO) in macb_probe()
4817 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN; in macb_probe()
4821 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) { in macb_probe()
4822 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10)); in macb_probe()
4824 bp->rx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4825 macb_dma_desc_get_size(bp); in macb_probe()
4827 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10)); in macb_probe()
4829 bp->tx_bd_rd_prefetch = (2 << (val - 1)) * in macb_probe()
4830 macb_dma_desc_get_size(bp); in macb_probe()
4833 bp->rx_intr_mask = MACB_RX_INT_FLAGS; in macb_probe()
4834 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) in macb_probe()
4835 bp->rx_intr_mask |= MACB_BIT(RXUBR); in macb_probe()
4837 err = of_get_ethdev_address(np, bp->dev); in macb_probe()
4841 macb_get_hwaddr(bp); in macb_probe()
4846 bp->phy_interface = PHY_INTERFACE_MODE_MII; in macb_probe()
4848 bp->phy_interface = interface; in macb_probe()
4855 err = macb_mii_init(bp); in macb_probe()
4867 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task); in macb_probe()
4870 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), in macb_probe()
4873 pm_runtime_mark_last_busy(&bp->pdev->dev); in macb_probe()
4874 pm_runtime_put_autosuspend(&bp->pdev->dev); in macb_probe()
4879 mdiobus_unregister(bp->mii_bus); in macb_probe()
4880 mdiobus_free(bp->mii_bus); in macb_probe()
4897 struct macb *bp; in macb_remove() local
4902 bp = netdev_priv(dev); in macb_remove()
4903 mdiobus_unregister(bp->mii_bus); in macb_remove()
4904 mdiobus_free(bp->mii_bus); in macb_remove()
4907 tasklet_kill(&bp->hresp_err_tasklet); in macb_remove()
4911 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, in macb_remove()
4912 bp->rx_clk, bp->tsu_clk); in macb_remove()
4915 phylink_destroy(bp->phylink); in macb_remove()
4925 struct macb *bp = netdev_priv(netdev); in macb_suspend() local
4934 if (bp->wol & MACB_WOL_ENABLED) { in macb_suspend()
4935 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4937 macb_writel(bp, TSR, -1); in macb_suspend()
4938 macb_writel(bp, RSR, -1); in macb_suspend()
4939 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4944 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_suspend()
4950 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_suspend()
4951 if (macb_is_gem(bp)) { in macb_suspend()
4952 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt, in macb_suspend()
4953 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4957 bp->queues[0].irq, err); in macb_suspend()
4958 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4961 queue_writel(bp->queues, IER, GEM_BIT(WOL)); in macb_suspend()
4962 gem_writel(bp, WOL, MACB_BIT(MAG)); in macb_suspend()
4964 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, in macb_suspend()
4965 IRQF_SHARED, netdev->name, bp->queues); in macb_suspend()
4969 bp->queues[0].irq, err); in macb_suspend()
4970 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4973 queue_writel(bp->queues, IER, MACB_BIT(WOL)); in macb_suspend()
4974 macb_writel(bp, WOL, MACB_BIT(MAG)); in macb_suspend()
4976 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4978 enable_irq_wake(bp->queues[0].irq); in macb_suspend()
4982 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_suspend()
4986 if (!(bp->wol & MACB_WOL_ENABLED)) { in macb_suspend()
4988 phylink_stop(bp->phylink); in macb_suspend()
4990 spin_lock_irqsave(&bp->lock, flags); in macb_suspend()
4991 macb_reset_hw(bp); in macb_suspend()
4992 spin_unlock_irqrestore(&bp->lock, flags); in macb_suspend()
4995 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_suspend()
4996 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); in macb_suspend()
4999 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); in macb_suspend()
5001 if (bp->ptp_info) in macb_suspend()
5002 bp->ptp_info->ptp_remove(netdev); in macb_suspend()
5012 struct macb *bp = netdev_priv(netdev); in macb_resume() local
5024 if (bp->wol & MACB_WOL_ENABLED) { in macb_resume()
5025 spin_lock_irqsave(&bp->lock, flags); in macb_resume()
5027 if (macb_is_gem(bp)) { in macb_resume()
5028 queue_writel(bp->queues, IDR, GEM_BIT(WOL)); in macb_resume()
5029 gem_writel(bp, WOL, 0); in macb_resume()
5031 queue_writel(bp->queues, IDR, MACB_BIT(WOL)); in macb_resume()
5032 macb_writel(bp, WOL, 0); in macb_resume()
5035 queue_readl(bp->queues, ISR); in macb_resume()
5036 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) in macb_resume()
5037 queue_writel(bp->queues, ISR, -1); in macb_resume()
5039 devm_free_irq(dev, bp->queues[0].irq, bp->queues); in macb_resume()
5040 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt, in macb_resume()
5041 IRQF_SHARED, netdev->name, bp->queues); in macb_resume()
5045 bp->queues[0].irq, err); in macb_resume()
5046 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5049 spin_unlock_irqrestore(&bp->lock, flags); in macb_resume()
5051 disable_irq_wake(bp->queues[0].irq); in macb_resume()
5057 phylink_stop(bp->phylink); in macb_resume()
5061 for (q = 0, queue = bp->queues; q < bp->num_queues; in macb_resume()
5066 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); in macb_resume()
5068 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) in macb_resume()
5069 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); in macb_resume()
5071 macb_writel(bp, NCR, MACB_BIT(MPE)); in macb_resume()
5072 macb_init_hw(bp); in macb_resume()
5074 macb_restore_features(bp); in macb_resume()
5076 phylink_start(bp->phylink); in macb_resume()
5080 if (bp->ptp_info) in macb_resume()
5081 bp->ptp_info->ptp_init(netdev); in macb_resume()
5089 struct macb *bp = netdev_priv(netdev); in macb_runtime_suspend() local
5092 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk); in macb_runtime_suspend()
5094 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk); in macb_runtime_suspend()
5102 struct macb *bp = netdev_priv(netdev); in macb_runtime_resume() local
5105 clk_prepare_enable(bp->pclk); in macb_runtime_resume()
5106 clk_prepare_enable(bp->hclk); in macb_runtime_resume()
5107 clk_prepare_enable(bp->tx_clk); in macb_runtime_resume()
5108 clk_prepare_enable(bp->rx_clk); in macb_runtime_resume()
5110 clk_prepare_enable(bp->tsu_clk); in macb_runtime_resume()