Lines Matching refs:ndev
43 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) in ravb_wait() argument
48 if ((ravb_read(ndev, reg) & mask) == value) in ravb_wait()
55 static int ravb_config(struct net_device *ndev) in ravb_config() argument
60 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, in ravb_config()
63 error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); in ravb_config()
65 netdev_err(ndev, "failed to switch device to config mode\n"); in ravb_config()
70 static void ravb_set_duplex(struct net_device *ndev) in ravb_set_duplex() argument
72 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_duplex()
73 u32 ecmr = ravb_read(ndev, ECMR); in ravb_set_duplex()
79 ravb_write(ndev, ecmr, ECMR); in ravb_set_duplex()
82 static void ravb_set_rate(struct net_device *ndev) in ravb_set_rate() argument
84 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_rate()
88 ravb_write(ndev, GECMR_SPEED_100, GECMR); in ravb_set_rate()
91 ravb_write(ndev, GECMR_SPEED_1000, GECMR); in ravb_set_rate()
111 static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac) in ravb_read_mac_address() argument
114 ether_addr_copy(ndev->dev_addr, mac); in ravb_read_mac_address()
116 ndev->dev_addr[0] = (ravb_read(ndev, MAHR) >> 24); in ravb_read_mac_address()
117 ndev->dev_addr[1] = (ravb_read(ndev, MAHR) >> 16) & 0xFF; in ravb_read_mac_address()
118 ndev->dev_addr[2] = (ravb_read(ndev, MAHR) >> 8) & 0xFF; in ravb_read_mac_address()
119 ndev->dev_addr[3] = (ravb_read(ndev, MAHR) >> 0) & 0xFF; in ravb_read_mac_address()
120 ndev->dev_addr[4] = (ravb_read(ndev, MALR) >> 8) & 0xFF; in ravb_read_mac_address()
121 ndev->dev_addr[5] = (ravb_read(ndev, MALR) >> 0) & 0xFF; in ravb_read_mac_address()
129 u32 pir = ravb_read(priv->ndev, PIR); in ravb_mdio_ctrl()
135 ravb_write(priv->ndev, pir, PIR); in ravb_mdio_ctrl()
162 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; in ravb_get_mdio_data()
175 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) in ravb_tx_free() argument
177 struct ravb_private *priv = netdev_priv(ndev); in ravb_tx_free()
198 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_tx_free()
218 static void ravb_ring_free(struct net_device *ndev, int q) in ravb_ring_free() argument
220 struct ravb_private *priv = netdev_priv(ndev); in ravb_ring_free()
228 if (!dma_mapping_error(ndev->dev.parent, in ravb_ring_free()
230 dma_unmap_single(ndev->dev.parent, in ravb_ring_free()
237 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], in ravb_ring_free()
243 ravb_tx_free(ndev, q, false); in ravb_ring_free()
247 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], in ravb_ring_free()
272 static void ravb_ring_format(struct net_device *ndev, int q) in ravb_ring_format() argument
274 struct ravb_private *priv = netdev_priv(ndev); in ravb_ring_format()
296 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, in ravb_ring_format()
302 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_ring_format()
334 static int ravb_ring_init(struct net_device *ndev, int q) in ravb_ring_init() argument
336 struct ravb_private *priv = netdev_priv(ndev); in ravb_ring_init()
350 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); in ravb_ring_init()
365 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_ring_init()
376 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_ring_init()
385 ravb_ring_free(ndev, q); in ravb_ring_init()
391 static void ravb_emac_init(struct net_device *ndev) in ravb_emac_init() argument
393 struct ravb_private *priv = netdev_priv(ndev); in ravb_emac_init()
397 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); in ravb_emac_init()
400 ecmr = ravb_read(ndev, ECMR); in ravb_emac_init()
403 ravb_write(ndev, ecmr, ECMR); in ravb_emac_init()
405 ravb_set_rate(ndev); in ravb_emac_init()
408 ravb_write(ndev, in ravb_emac_init()
409 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init()
410 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init()
411 ravb_write(ndev, in ravb_emac_init()
412 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init()
414 ravb_write(ndev, 1, MPR); in ravb_emac_init()
417 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); in ravb_emac_init()
420 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); in ravb_emac_init()
424 static int ravb_dmac_init(struct net_device *ndev) in ravb_dmac_init() argument
429 error = ravb_config(ndev); in ravb_dmac_init()
433 error = ravb_ring_init(ndev, RAVB_BE); in ravb_dmac_init()
436 error = ravb_ring_init(ndev, RAVB_NC); in ravb_dmac_init()
438 ravb_ring_free(ndev, RAVB_BE); in ravb_dmac_init()
443 ravb_ring_format(ndev, RAVB_BE); in ravb_dmac_init()
444 ravb_ring_format(ndev, RAVB_NC); in ravb_dmac_init()
447 ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC); in ravb_dmac_init()
449 ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC); in ravb_dmac_init()
453 ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR); in ravb_dmac_init()
456 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC); in ravb_dmac_init()
459 ravb_write(ndev, TCCR_TFEN, TCCR); in ravb_dmac_init()
463 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); in ravb_dmac_init()
465 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); in ravb_dmac_init()
467 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); in ravb_dmac_init()
470 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION, in ravb_dmac_init()
476 static void ravb_get_tx_tstamp(struct net_device *ndev) in ravb_get_tx_tstamp() argument
478 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_tx_tstamp()
487 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8; in ravb_get_tx_tstamp()
489 tfa2 = ravb_read(ndev, TFA2); in ravb_get_tx_tstamp()
491 ts.tv_nsec = (u64)ravb_read(ndev, TFA0); in ravb_get_tx_tstamp()
493 ravb_read(ndev, TFA1); in ravb_get_tx_tstamp()
510 ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR); in ravb_get_tx_tstamp()
515 static bool ravb_rx(struct net_device *ndev, int *quota, int q) in ravb_rx() argument
517 struct ravb_private *priv = netdev_priv(ndev); in ravb_rx()
565 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_rx()
582 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx()
600 skb = netdev_alloc_skb(ndev, in ravb_rx()
605 dma_addr = dma_map_single(ndev->dev.parent, skb->data, in ravb_rx()
612 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx()
627 static void ravb_rcv_snd_disable(struct net_device *ndev) in ravb_rcv_snd_disable() argument
630 ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR); in ravb_rcv_snd_disable()
633 static void ravb_rcv_snd_enable(struct net_device *ndev) in ravb_rcv_snd_enable() argument
636 ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR); in ravb_rcv_snd_enable()
640 static int ravb_stop_dma(struct net_device *ndev) in ravb_stop_dma() argument
645 error = ravb_wait(ndev, TCCR, in ravb_stop_dma()
650 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3, in ravb_stop_dma()
656 ravb_rcv_snd_disable(ndev); in ravb_stop_dma()
659 error = ravb_wait(ndev, CSR, CSR_RPO, 0); in ravb_stop_dma()
664 return ravb_config(ndev); in ravb_stop_dma()
668 static void ravb_emac_interrupt(struct net_device *ndev) in ravb_emac_interrupt() argument
670 struct ravb_private *priv = netdev_priv(ndev); in ravb_emac_interrupt()
673 ecsr = ravb_read(ndev, ECSR); in ravb_emac_interrupt()
674 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ in ravb_emac_interrupt()
676 ndev->stats.tx_carrier_errors++; in ravb_emac_interrupt()
681 psr = ravb_read(ndev, PSR); in ravb_emac_interrupt()
686 ravb_rcv_snd_disable(ndev); in ravb_emac_interrupt()
689 ravb_rcv_snd_enable(ndev); in ravb_emac_interrupt()
695 static void ravb_error_interrupt(struct net_device *ndev) in ravb_error_interrupt() argument
697 struct ravb_private *priv = netdev_priv(ndev); in ravb_error_interrupt()
700 eis = ravb_read(ndev, EIS); in ravb_error_interrupt()
701 ravb_write(ndev, ~EIS_QFS, EIS); in ravb_error_interrupt()
703 ris2 = ravb_read(ndev, RIS2); in ravb_error_interrupt()
704 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); in ravb_error_interrupt()
722 struct net_device *ndev = dev_id; in ravb_interrupt() local
723 struct ravb_private *priv = netdev_priv(ndev); in ravb_interrupt()
729 iss = ravb_read(ndev, ISS); in ravb_interrupt()
733 u32 ris0 = ravb_read(ndev, RIS0); in ravb_interrupt()
734 u32 ric0 = ravb_read(ndev, RIC0); in ravb_interrupt()
735 u32 tis = ravb_read(ndev, TIS); in ravb_interrupt()
736 u32 tic = ravb_read(ndev, TIC); in ravb_interrupt()
741 ravb_write(ndev, ~TIS_TFUF, TIS); in ravb_interrupt()
742 ravb_get_tx_tstamp(ndev); in ravb_interrupt()
754 ravb_write(ndev, ric0, RIC0); in ravb_interrupt()
755 ravb_write(ndev, tic, TIC); in ravb_interrupt()
758 netdev_warn(ndev, in ravb_interrupt()
761 netdev_warn(ndev, in ravb_interrupt()
772 ravb_emac_interrupt(ndev); in ravb_interrupt()
778 ravb_error_interrupt(ndev); in ravb_interrupt()
783 result = ravb_ptp_interrupt(ndev); in ravb_interrupt()
792 struct net_device *ndev = napi->dev; in ravb_poll() local
793 struct ravb_private *priv = netdev_priv(ndev); in ravb_poll()
801 tis = ravb_read(ndev, TIS); in ravb_poll()
802 ris0 = ravb_read(ndev, RIS0); in ravb_poll()
809 ravb_write(ndev, ~mask, RIS0); in ravb_poll()
810 if (ravb_rx(ndev, "a, q)) in ravb_poll()
817 ravb_write(ndev, ~mask, TIS); in ravb_poll()
818 ravb_tx_free(ndev, q, true); in ravb_poll()
819 netif_wake_subqueue(ndev, q); in ravb_poll()
829 ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0); in ravb_poll()
830 ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC); in ravb_poll()
837 if (priv->rx_over_errors != ndev->stats.rx_over_errors) in ravb_poll()
838 ndev->stats.rx_over_errors = priv->rx_over_errors; in ravb_poll()
839 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) in ravb_poll()
840 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; in ravb_poll()
846 static void ravb_adjust_link(struct net_device *ndev) in ravb_adjust_link() argument
848 struct ravb_private *priv = netdev_priv(ndev); in ravb_adjust_link()
856 ravb_set_duplex(ndev); in ravb_adjust_link()
862 ravb_set_rate(ndev); in ravb_adjust_link()
865 ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF, in ravb_adjust_link()
870 ravb_rcv_snd_enable(ndev); in ravb_adjust_link()
878 ravb_rcv_snd_disable(ndev); in ravb_adjust_link()
886 static int ravb_phy_init(struct net_device *ndev) in ravb_phy_init() argument
888 struct device_node *np = ndev->dev.parent->of_node; in ravb_phy_init()
889 struct ravb_private *priv = netdev_priv(ndev); in ravb_phy_init()
899 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, in ravb_phy_init()
902 netdev_err(ndev, "failed to connect PHY\n"); in ravb_phy_init()
914 netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n"); in ravb_phy_init()
919 netdev_info(ndev, "limited PHY to 100Mbit/s\n"); in ravb_phy_init()
925 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", in ravb_phy_init()
934 static int ravb_phy_start(struct net_device *ndev) in ravb_phy_start() argument
936 struct ravb_private *priv = netdev_priv(ndev); in ravb_phy_start()
939 error = ravb_phy_init(ndev); in ravb_phy_start()
948 static int ravb_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) in ravb_get_settings() argument
950 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_settings()
963 static int ravb_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) in ravb_set_settings() argument
965 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_settings()
975 ravb_rcv_snd_disable(ndev); in ravb_set_settings()
986 ravb_set_duplex(ndev); in ravb_set_settings()
992 ravb_rcv_snd_enable(ndev); in ravb_set_settings()
1000 static int ravb_nway_reset(struct net_device *ndev) in ravb_nway_reset() argument
1002 struct ravb_private *priv = netdev_priv(ndev); in ravb_nway_reset()
1015 static u32 ravb_get_msglevel(struct net_device *ndev) in ravb_get_msglevel() argument
1017 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_msglevel()
1022 static void ravb_set_msglevel(struct net_device *ndev, u32 value) in ravb_set_msglevel() argument
1024 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_msglevel()
1075 static void ravb_get_ethtool_stats(struct net_device *ndev, in ravb_get_ethtool_stats() argument
1078 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_ethtool_stats()
1104 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) in ravb_get_strings() argument
1113 static void ravb_get_ringparam(struct net_device *ndev, in ravb_get_ringparam() argument
1116 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_ringparam()
1124 static int ravb_set_ringparam(struct net_device *ndev, in ravb_set_ringparam() argument
1127 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_ringparam()
1138 if (netif_running(ndev)) { in ravb_set_ringparam()
1139 netif_device_detach(ndev); in ravb_set_ringparam()
1141 ravb_ptp_stop(ndev); in ravb_set_ringparam()
1143 error = ravb_stop_dma(ndev); in ravb_set_ringparam()
1145 netdev_err(ndev, in ravb_set_ringparam()
1149 synchronize_irq(ndev->irq); in ravb_set_ringparam()
1152 ravb_ring_free(ndev, RAVB_BE); in ravb_set_ringparam()
1153 ravb_ring_free(ndev, RAVB_NC); in ravb_set_ringparam()
1160 if (netif_running(ndev)) { in ravb_set_ringparam()
1161 error = ravb_dmac_init(ndev); in ravb_set_ringparam()
1163 netdev_err(ndev, in ravb_set_ringparam()
1169 ravb_emac_init(ndev); in ravb_set_ringparam()
1172 ravb_ptp_init(ndev, priv->pdev); in ravb_set_ringparam()
1174 netif_device_attach(ndev); in ravb_set_ringparam()
1180 static int ravb_get_ts_info(struct net_device *ndev, in ravb_get_ts_info() argument
1183 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_ts_info()
1218 static int ravb_open(struct net_device *ndev) in ravb_open() argument
1220 struct ravb_private *priv = netdev_priv(ndev); in ravb_open()
1226 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name, in ravb_open()
1227 ndev); in ravb_open()
1229 netdev_err(ndev, "cannot request IRQ\n"); in ravb_open()
1235 IRQF_SHARED, ndev->name, ndev); in ravb_open()
1237 netdev_err(ndev, "cannot request IRQ\n"); in ravb_open()
1243 error = ravb_dmac_init(ndev); in ravb_open()
1246 ravb_emac_init(ndev); in ravb_open()
1249 ravb_ptp_init(ndev, priv->pdev); in ravb_open()
1251 netif_tx_start_all_queues(ndev); in ravb_open()
1254 error = ravb_phy_start(ndev); in ravb_open()
1262 ravb_ptp_stop(ndev); in ravb_open()
1265 free_irq(priv->emac_irq, ndev); in ravb_open()
1267 free_irq(ndev->irq, ndev); in ravb_open()
1275 static void ravb_tx_timeout(struct net_device *ndev) in ravb_tx_timeout() argument
1277 struct ravb_private *priv = netdev_priv(ndev); in ravb_tx_timeout()
1279 netif_err(priv, tx_err, ndev, in ravb_tx_timeout()
1281 ravb_read(ndev, ISS)); in ravb_tx_timeout()
1284 ndev->stats.tx_errors++; in ravb_tx_timeout()
1293 struct net_device *ndev = priv->ndev; in ravb_tx_timeout_work() local
1296 netif_tx_stop_all_queues(ndev); in ravb_tx_timeout_work()
1299 ravb_ptp_stop(ndev); in ravb_tx_timeout_work()
1302 if (ravb_stop_dma(ndev)) { in ravb_tx_timeout_work()
1311 ravb_rcv_snd_enable(ndev); in ravb_tx_timeout_work()
1315 ravb_ring_free(ndev, RAVB_BE); in ravb_tx_timeout_work()
1316 ravb_ring_free(ndev, RAVB_NC); in ravb_tx_timeout_work()
1319 error = ravb_dmac_init(ndev); in ravb_tx_timeout_work()
1325 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", in ravb_tx_timeout_work()
1329 ravb_emac_init(ndev); in ravb_tx_timeout_work()
1333 ravb_ptp_init(ndev, priv->pdev); in ravb_tx_timeout_work()
1335 netif_tx_start_all_queues(ndev); in ravb_tx_timeout_work()
1339 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) in ravb_start_xmit() argument
1341 struct ravb_private *priv = netdev_priv(ndev); in ravb_start_xmit()
1354 netif_err(priv, tx_queued, ndev, in ravb_start_xmit()
1356 netif_stop_subqueue(ndev, q); in ravb_start_xmit()
1383 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); in ravb_start_xmit()
1384 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
1393 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE); in ravb_start_xmit()
1394 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
1406 dma_unmap_single(ndev->dev.parent, dma_addr, len, in ravb_start_xmit()
1428 ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); in ravb_start_xmit()
1433 !ravb_tx_free(ndev, q, true)) in ravb_start_xmit()
1434 netif_stop_subqueue(ndev, q); in ravb_start_xmit()
1442 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_start_xmit()
1450 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, in ravb_select_queue() argument
1459 static struct net_device_stats *ravb_get_stats(struct net_device *ndev) in ravb_get_stats() argument
1461 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_stats()
1464 nstats = &ndev->stats; in ravb_get_stats()
1468 nstats->tx_dropped += ravb_read(ndev, TROCR); in ravb_get_stats()
1469 ravb_write(ndev, 0, TROCR); /* (write clear) */ in ravb_get_stats()
1470 nstats->collisions += ravb_read(ndev, CDCR); in ravb_get_stats()
1471 ravb_write(ndev, 0, CDCR); /* (write clear) */ in ravb_get_stats()
1472 nstats->tx_carrier_errors += ravb_read(ndev, LCCR); in ravb_get_stats()
1473 ravb_write(ndev, 0, LCCR); /* (write clear) */ in ravb_get_stats()
1475 nstats->tx_carrier_errors += ravb_read(ndev, CERCR); in ravb_get_stats()
1476 ravb_write(ndev, 0, CERCR); /* (write clear) */ in ravb_get_stats()
1477 nstats->tx_carrier_errors += ravb_read(ndev, CEECR); in ravb_get_stats()
1478 ravb_write(ndev, 0, CEECR); /* (write clear) */ in ravb_get_stats()
1500 static void ravb_set_rx_mode(struct net_device *ndev) in ravb_set_rx_mode() argument
1502 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_rx_mode()
1507 ecmr = ravb_read(ndev, ECMR); in ravb_set_rx_mode()
1508 if (ndev->flags & IFF_PROMISC) in ravb_set_rx_mode()
1512 ravb_write(ndev, ecmr, ECMR); in ravb_set_rx_mode()
1518 static int ravb_close(struct net_device *ndev) in ravb_close() argument
1520 struct ravb_private *priv = netdev_priv(ndev); in ravb_close()
1523 netif_tx_stop_all_queues(ndev); in ravb_close()
1526 ravb_write(ndev, 0, RIC0); in ravb_close()
1527 ravb_write(ndev, 0, RIC1); in ravb_close()
1528 ravb_write(ndev, 0, RIC2); in ravb_close()
1529 ravb_write(ndev, 0, TIC); in ravb_close()
1532 ravb_ptp_stop(ndev); in ravb_close()
1535 if (ravb_stop_dma(ndev) < 0) in ravb_close()
1536 netdev_err(ndev, in ravb_close()
1554 free_irq(priv->emac_irq, ndev); in ravb_close()
1555 free_irq(ndev->irq, ndev); in ravb_close()
1561 ravb_ring_free(ndev, RAVB_BE); in ravb_close()
1562 ravb_ring_free(ndev, RAVB_NC); in ravb_close()
1567 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) in ravb_hwtstamp_get() argument
1569 struct ravb_private *priv = netdev_priv(ndev); in ravb_hwtstamp_get()
1591 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) in ravb_hwtstamp_set() argument
1593 struct ravb_private *priv = netdev_priv(ndev); in ravb_hwtstamp_set()
1636 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) in ravb_do_ioctl() argument
1638 struct ravb_private *priv = netdev_priv(ndev); in ravb_do_ioctl()
1641 if (!netif_running(ndev)) in ravb_do_ioctl()
1649 return ravb_hwtstamp_get(ndev, req); in ravb_do_ioctl()
1651 return ravb_hwtstamp_set(ndev, req); in ravb_do_ioctl()
1730 struct net_device *ndev; in ravb_probe() local
1747 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), in ravb_probe()
1749 if (!ndev) in ravb_probe()
1756 ndev->base_addr = res->start; in ravb_probe()
1757 ndev->dma = -1; in ravb_probe()
1770 ndev->irq = irq; in ravb_probe()
1772 SET_NETDEV_DEV(ndev, &pdev->dev); in ravb_probe()
1774 priv = netdev_priv(ndev); in ravb_probe()
1775 priv->ndev = ndev; in ravb_probe()
1808 ndev->netdev_ops = &ravb_netdev_ops; in ravb_probe()
1809 ndev->ethtool_ops = &ravb_ethtool_ops; in ravb_probe()
1812 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, in ravb_probe()
1816 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB, in ravb_probe()
1820 ravb_write(ndev, ((1000 << 20) / 130) & GTI_TIV, GTI); in ravb_probe()
1823 ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR); in ravb_probe()
1827 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, in ravb_probe()
1838 ravb_write(ndev, priv->desc_bat_dma, DBAT); in ravb_probe()
1847 ravb_read_mac_address(ndev, of_get_mac_address(np)); in ravb_probe()
1848 if (!is_valid_ether_addr(ndev->dev_addr)) { in ravb_probe()
1851 eth_hw_addr_random(ndev); in ravb_probe()
1861 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64); in ravb_probe()
1862 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64); in ravb_probe()
1865 error = register_netdev(ndev); in ravb_probe()
1870 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", in ravb_probe()
1871 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); in ravb_probe()
1873 platform_set_drvdata(pdev, ndev); in ravb_probe()
1882 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_probe()
1885 if (ndev) in ravb_probe()
1886 free_netdev(ndev); in ravb_probe()
1895 struct net_device *ndev = platform_get_drvdata(pdev); in ravb_remove() local
1896 struct ravb_private *priv = netdev_priv(ndev); in ravb_remove()
1898 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_remove()
1901 ravb_write(ndev, CCC_OPC_RESET, CCC); in ravb_remove()
1903 unregister_netdev(ndev); in ravb_remove()
1908 free_netdev(ndev); in ravb_remove()