Lines Matching refs:ndev
51 void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, in ravb_modify() argument
54 ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg); in ravb_modify()
57 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) in ravb_wait() argument
62 if ((ravb_read(ndev, reg) & mask) == value) in ravb_wait()
69 static int ravb_set_opmode(struct net_device *ndev, u32 opmode) in ravb_set_opmode() argument
83 ravb_modify(ndev, CCC, ccc_mask, opmode); in ravb_set_opmode()
85 error = ravb_wait(ndev, CSR, CSR_OPS, csr_ops); in ravb_set_opmode()
87 netdev_err(ndev, "failed to switch device to requested mode (%u)\n", in ravb_set_opmode()
94 static void ravb_set_rate_gbeth(struct net_device *ndev) in ravb_set_rate_gbeth() argument
96 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_rate_gbeth()
100 ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR); in ravb_set_rate_gbeth()
103 ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR); in ravb_set_rate_gbeth()
106 ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR); in ravb_set_rate_gbeth()
111 static void ravb_set_rate_rcar(struct net_device *ndev) in ravb_set_rate_rcar() argument
113 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_rate_rcar()
117 ravb_write(ndev, GECMR_SPEED_100, GECMR); in ravb_set_rate_rcar()
120 ravb_write(ndev, GECMR_SPEED_1000, GECMR); in ravb_set_rate_rcar()
139 struct net_device *ndev) in ravb_read_mac_address() argument
143 ret = of_get_ethdev_address(np, ndev); in ravb_read_mac_address()
145 u32 mahr = ravb_read(ndev, MAHR); in ravb_read_mac_address()
146 u32 malr = ravb_read(ndev, MALR); in ravb_read_mac_address()
155 eth_hw_addr_set(ndev, addr); in ravb_read_mac_address()
164 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); in ravb_mdio_ctrl()
191 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; in ravb_get_mdio_data()
204 static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) in ravb_tx_free() argument
206 struct ravb_private *priv = netdev_priv(ndev); in ravb_tx_free()
228 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_tx_free()
247 static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q) in ravb_rx_ring_free_gbeth() argument
249 struct ravb_private *priv = netdev_priv(ndev); in ravb_rx_ring_free_gbeth()
259 if (!dma_mapping_error(ndev->dev.parent, in ravb_rx_ring_free_gbeth()
261 dma_unmap_single(ndev->dev.parent, in ravb_rx_ring_free_gbeth()
267 dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, in ravb_rx_ring_free_gbeth()
272 static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q) in ravb_rx_ring_free_rcar() argument
274 struct ravb_private *priv = netdev_priv(ndev); in ravb_rx_ring_free_rcar()
284 if (!dma_mapping_error(ndev->dev.parent, in ravb_rx_ring_free_rcar()
286 dma_unmap_single(ndev->dev.parent, in ravb_rx_ring_free_rcar()
293 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], in ravb_rx_ring_free_rcar()
299 static void ravb_ring_free(struct net_device *ndev, int q) in ravb_ring_free() argument
301 struct ravb_private *priv = netdev_priv(ndev); in ravb_ring_free()
307 info->rx_ring_free(ndev, q); in ravb_ring_free()
310 ravb_tx_free(ndev, q, false); in ravb_ring_free()
314 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], in ravb_ring_free()
338 static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q) in ravb_rx_ring_format_gbeth() argument
340 struct ravb_private *priv = netdev_priv(ndev); in ravb_rx_ring_format_gbeth()
353 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, in ravb_rx_ring_format_gbeth()
359 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx_ring_format_gbeth()
369 static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q) in ravb_rx_ring_format_rcar() argument
371 struct ravb_private *priv = netdev_priv(ndev); in ravb_rx_ring_format_rcar()
383 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, in ravb_rx_ring_format_rcar()
389 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx_ring_format_rcar()
400 static void ravb_ring_format(struct net_device *ndev, int q) in ravb_ring_format() argument
402 struct ravb_private *priv = netdev_priv(ndev); in ravb_ring_format()
416 info->rx_ring_format(ndev, q); in ravb_ring_format()
442 static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q) in ravb_alloc_rx_desc_gbeth() argument
444 struct ravb_private *priv = netdev_priv(ndev); in ravb_alloc_rx_desc_gbeth()
449 priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_alloc_rx_desc_gbeth()
455 static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q) in ravb_alloc_rx_desc_rcar() argument
457 struct ravb_private *priv = netdev_priv(ndev); in ravb_alloc_rx_desc_rcar()
462 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_alloc_rx_desc_rcar()
469 static int ravb_ring_init(struct net_device *ndev, int q) in ravb_ring_init() argument
471 struct ravb_private *priv = netdev_priv(ndev); in ravb_ring_init()
487 skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL); in ravb_ring_init()
503 if (!info->alloc_rx_desc(ndev, q)) in ravb_ring_init()
511 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_ring_init()
520 ravb_ring_free(ndev, q); in ravb_ring_init()
525 static void ravb_emac_init_gbeth(struct net_device *ndev) in ravb_emac_init_gbeth() argument
527 struct ravb_private *priv = netdev_priv(ndev); in ravb_emac_init_gbeth()
530 ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_MII, CXR35); in ravb_emac_init_gbeth()
531 ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, 0); in ravb_emac_init_gbeth()
533 ravb_write(ndev, (1000 << 16) | CXR35_SEL_XMII_RGMII, CXR35); in ravb_emac_init_gbeth()
534 ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, in ravb_emac_init_gbeth()
539 ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR); in ravb_emac_init_gbeth()
542 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | in ravb_emac_init_gbeth()
546 ravb_set_rate_gbeth(ndev); in ravb_emac_init_gbeth()
549 ravb_write(ndev, in ravb_emac_init_gbeth()
550 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_gbeth()
551 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_gbeth()
552 ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_gbeth()
555 ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR); in ravb_emac_init_gbeth()
556 ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0); in ravb_emac_init_gbeth()
559 ravb_write(ndev, ECSIPR_ICDIP, ECSIPR); in ravb_emac_init_gbeth()
562 static void ravb_emac_init_rcar(struct net_device *ndev) in ravb_emac_init_rcar() argument
565 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); in ravb_emac_init_rcar()
568 ravb_write(ndev, ECMR_ZPF | ECMR_DM | in ravb_emac_init_rcar()
569 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | in ravb_emac_init_rcar()
572 ravb_set_rate_rcar(ndev); in ravb_emac_init_rcar()
575 ravb_write(ndev, in ravb_emac_init_rcar()
576 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_rcar()
577 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_rcar()
578 ravb_write(ndev, in ravb_emac_init_rcar()
579 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_rcar()
582 ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR); in ravb_emac_init_rcar()
585 ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); in ravb_emac_init_rcar()
589 static void ravb_emac_init(struct net_device *ndev) in ravb_emac_init() argument
591 struct ravb_private *priv = netdev_priv(ndev); in ravb_emac_init()
594 info->emac_init(ndev); in ravb_emac_init()
597 static int ravb_dmac_init_gbeth(struct net_device *ndev) in ravb_dmac_init_gbeth() argument
601 error = ravb_ring_init(ndev, RAVB_BE); in ravb_dmac_init_gbeth()
606 ravb_ring_format(ndev, RAVB_BE); in ravb_dmac_init_gbeth()
609 ravb_write(ndev, 0x60000000, RCR); in ravb_dmac_init_gbeth()
612 ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC); in ravb_dmac_init_gbeth()
615 ravb_write(ndev, 0x00222200, TGC); in ravb_dmac_init_gbeth()
617 ravb_write(ndev, 0, TCCR); in ravb_dmac_init_gbeth()
620 ravb_write(ndev, RIC0_FRE0, RIC0); in ravb_dmac_init_gbeth()
622 ravb_write(ndev, 0x0, RIC1); in ravb_dmac_init_gbeth()
624 ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2); in ravb_dmac_init_gbeth()
626 ravb_write(ndev, TIC_FTE0, TIC); in ravb_dmac_init_gbeth()
631 static int ravb_dmac_init_rcar(struct net_device *ndev) in ravb_dmac_init_rcar() argument
633 struct ravb_private *priv = netdev_priv(ndev); in ravb_dmac_init_rcar()
637 error = ravb_ring_init(ndev, RAVB_BE); in ravb_dmac_init_rcar()
640 error = ravb_ring_init(ndev, RAVB_NC); in ravb_dmac_init_rcar()
642 ravb_ring_free(ndev, RAVB_BE); in ravb_dmac_init_rcar()
647 ravb_ring_format(ndev, RAVB_BE); in ravb_dmac_init_rcar()
648 ravb_ring_format(ndev, RAVB_NC); in ravb_dmac_init_rcar()
651 ravb_write(ndev, in ravb_dmac_init_rcar()
655 ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC); in ravb_dmac_init_rcar()
658 ravb_write(ndev, TCCR_TFEN, TCCR); in ravb_dmac_init_rcar()
663 ravb_write(ndev, 0, DIL); in ravb_dmac_init_rcar()
665 ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE); in ravb_dmac_init_rcar()
668 ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); in ravb_dmac_init_rcar()
670 ravb_write(ndev, 0, RIC1); in ravb_dmac_init_rcar()
672 ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); in ravb_dmac_init_rcar()
674 ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); in ravb_dmac_init_rcar()
680 static int ravb_dmac_init(struct net_device *ndev) in ravb_dmac_init() argument
682 struct ravb_private *priv = netdev_priv(ndev); in ravb_dmac_init()
687 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); in ravb_dmac_init()
691 error = info->dmac_init(ndev); in ravb_dmac_init()
696 return ravb_set_opmode(ndev, CCC_OPC_OPERATION); in ravb_dmac_init()
699 static void ravb_get_tx_tstamp(struct net_device *ndev) in ravb_get_tx_tstamp() argument
701 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_tx_tstamp()
710 count = (ravb_read(ndev, TSR) & TSR_TFFL) >> 8; in ravb_get_tx_tstamp()
712 tfa2 = ravb_read(ndev, TFA2); in ravb_get_tx_tstamp()
714 ts.tv_nsec = (u64)ravb_read(ndev, TFA0); in ravb_get_tx_tstamp()
716 ravb_read(ndev, TFA1); in ravb_get_tx_tstamp()
733 ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); in ravb_get_tx_tstamp()
752 static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry, in ravb_get_skb_gbeth() argument
755 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_skb_gbeth()
760 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_get_skb_gbeth()
767 static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) in ravb_rx_gbeth() argument
769 struct ravb_private *priv = netdev_priv(ndev); in ravb_rx_gbeth()
819 skb = ravb_get_skb_gbeth(ndev, entry, desc); in ravb_rx_gbeth()
821 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_gbeth()
827 priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); in ravb_rx_gbeth()
831 skb = ravb_get_skb_gbeth(ndev, entry, desc); in ravb_rx_gbeth()
840 skb = ravb_get_skb_gbeth(ndev, entry, desc); in ravb_rx_gbeth()
848 eth_type_trans(priv->rx_1st_skb, ndev); in ravb_rx_gbeth()
868 skb = netdev_alloc_skb(ndev, info->max_rx_len); in ravb_rx_gbeth()
872 dma_addr = dma_map_single(ndev->dev.parent, in ravb_rx_gbeth()
880 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx_gbeth()
896 static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) in ravb_rx_rcar() argument
898 struct ravb_private *priv = netdev_priv(ndev); in ravb_rx_rcar()
947 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_rx_rcar()
965 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_rcar()
966 if (ndev->features & NETIF_F_RXCSUM) in ravb_rx_rcar()
984 skb = netdev_alloc_skb(ndev, info->max_rx_len); in ravb_rx_rcar()
988 dma_addr = dma_map_single(ndev->dev.parent, skb->data, in ravb_rx_rcar()
995 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx_rcar()
1011 static bool ravb_rx(struct net_device *ndev, int *quota, int q) in ravb_rx() argument
1013 struct ravb_private *priv = netdev_priv(ndev); in ravb_rx()
1016 return info->receive(ndev, quota, q); in ravb_rx()
1019 static void ravb_rcv_snd_disable(struct net_device *ndev) in ravb_rcv_snd_disable() argument
1022 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); in ravb_rcv_snd_disable()
1025 static void ravb_rcv_snd_enable(struct net_device *ndev) in ravb_rcv_snd_enable() argument
1028 ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); in ravb_rcv_snd_enable()
1032 static int ravb_stop_dma(struct net_device *ndev) in ravb_stop_dma() argument
1034 struct ravb_private *priv = netdev_priv(ndev); in ravb_stop_dma()
1039 error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); in ravb_stop_dma()
1044 error = ravb_wait(ndev, CSR, CSR_TPO0 | CSR_TPO1 | CSR_TPO2 | CSR_TPO3, in ravb_stop_dma()
1050 ravb_rcv_snd_disable(ndev); in ravb_stop_dma()
1053 error = ravb_wait(ndev, CSR, CSR_RPO, 0); in ravb_stop_dma()
1058 return ravb_set_opmode(ndev, CCC_OPC_CONFIG); in ravb_stop_dma()
1062 static void ravb_emac_interrupt_unlocked(struct net_device *ndev) in ravb_emac_interrupt_unlocked() argument
1064 struct ravb_private *priv = netdev_priv(ndev); in ravb_emac_interrupt_unlocked()
1067 ecsr = ravb_read(ndev, ECSR); in ravb_emac_interrupt_unlocked()
1068 ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ in ravb_emac_interrupt_unlocked()
1073 ndev->stats.tx_carrier_errors++; in ravb_emac_interrupt_unlocked()
1078 psr = ravb_read(ndev, PSR); in ravb_emac_interrupt_unlocked()
1083 ravb_rcv_snd_disable(ndev); in ravb_emac_interrupt_unlocked()
1086 ravb_rcv_snd_enable(ndev); in ravb_emac_interrupt_unlocked()
1093 struct net_device *ndev = dev_id; in ravb_emac_interrupt() local
1094 struct ravb_private *priv = netdev_priv(ndev); in ravb_emac_interrupt()
1097 ravb_emac_interrupt_unlocked(ndev); in ravb_emac_interrupt()
1103 static void ravb_error_interrupt(struct net_device *ndev) in ravb_error_interrupt() argument
1105 struct ravb_private *priv = netdev_priv(ndev); in ravb_error_interrupt()
1108 eis = ravb_read(ndev, EIS); in ravb_error_interrupt()
1109 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); in ravb_error_interrupt()
1111 ris2 = ravb_read(ndev, RIS2); in ravb_error_interrupt()
1112 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), in ravb_error_interrupt()
1129 static bool ravb_queue_interrupt(struct net_device *ndev, int q) in ravb_queue_interrupt() argument
1131 struct ravb_private *priv = netdev_priv(ndev); in ravb_queue_interrupt()
1133 u32 ris0 = ravb_read(ndev, RIS0); in ravb_queue_interrupt()
1134 u32 ric0 = ravb_read(ndev, RIC0); in ravb_queue_interrupt()
1135 u32 tis = ravb_read(ndev, TIS); in ravb_queue_interrupt()
1136 u32 tic = ravb_read(ndev, TIC); in ravb_queue_interrupt()
1142 ravb_write(ndev, ric0 & ~BIT(q), RIC0); in ravb_queue_interrupt()
1143 ravb_write(ndev, tic & ~BIT(q), TIC); in ravb_queue_interrupt()
1145 ravb_write(ndev, BIT(q), RID0); in ravb_queue_interrupt()
1146 ravb_write(ndev, BIT(q), TID); in ravb_queue_interrupt()
1150 netdev_warn(ndev, in ravb_queue_interrupt()
1153 netdev_warn(ndev, in ravb_queue_interrupt()
1162 static bool ravb_timestamp_interrupt(struct net_device *ndev) in ravb_timestamp_interrupt() argument
1164 u32 tis = ravb_read(ndev, TIS); in ravb_timestamp_interrupt()
1167 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS); in ravb_timestamp_interrupt()
1168 ravb_get_tx_tstamp(ndev); in ravb_timestamp_interrupt()
1176 struct net_device *ndev = dev_id; in ravb_interrupt() local
1177 struct ravb_private *priv = netdev_priv(ndev); in ravb_interrupt()
1184 iss = ravb_read(ndev, ISS); in ravb_interrupt()
1191 if (ravb_timestamp_interrupt(ndev)) in ravb_interrupt()
1197 if (ravb_queue_interrupt(ndev, q)) in ravb_interrupt()
1201 if (ravb_queue_interrupt(ndev, RAVB_BE)) in ravb_interrupt()
1208 ravb_emac_interrupt_unlocked(ndev); in ravb_interrupt()
1214 ravb_error_interrupt(ndev); in ravb_interrupt()
1220 ravb_ptp_interrupt(ndev); in ravb_interrupt()
1231 struct net_device *ndev = dev_id; in ravb_multi_interrupt() local
1232 struct ravb_private *priv = netdev_priv(ndev); in ravb_multi_interrupt()
1238 iss = ravb_read(ndev, ISS); in ravb_multi_interrupt()
1241 if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev)) in ravb_multi_interrupt()
1246 ravb_error_interrupt(ndev); in ravb_multi_interrupt()
1252 ravb_ptp_interrupt(ndev); in ravb_multi_interrupt()
1262 struct net_device *ndev = dev_id; in ravb_dma_interrupt() local
1263 struct ravb_private *priv = netdev_priv(ndev); in ravb_dma_interrupt()
1269 if (ravb_queue_interrupt(ndev, q)) in ravb_dma_interrupt()
1288 struct net_device *ndev = napi->dev; in ravb_poll() local
1289 struct ravb_private *priv = netdev_priv(ndev); in ravb_poll()
1299 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); in ravb_poll()
1300 unmask = !ravb_rx(ndev, "a, q); in ravb_poll()
1305 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); in ravb_poll()
1306 ravb_tx_free(ndev, q, true); in ravb_poll()
1307 netif_wake_subqueue(ndev, q); in ravb_poll()
1314 if (priv->rx_over_errors != ndev->stats.rx_over_errors) in ravb_poll()
1315 ndev->stats.rx_over_errors = priv->rx_over_errors; in ravb_poll()
1316 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) in ravb_poll()
1317 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; in ravb_poll()
1327 ravb_modify(ndev, RIC0, mask, mask); in ravb_poll()
1328 ravb_modify(ndev, TIC, mask, mask); in ravb_poll()
1330 ravb_write(ndev, mask, RIE0); in ravb_poll()
1331 ravb_write(ndev, mask, TIE); in ravb_poll()
1339 static void ravb_set_duplex_gbeth(struct net_device *ndev) in ravb_set_duplex_gbeth() argument
1341 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_duplex_gbeth()
1343 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); in ravb_set_duplex_gbeth()
1347 static void ravb_adjust_link(struct net_device *ndev) in ravb_adjust_link() argument
1349 struct ravb_private *priv = netdev_priv(ndev); in ravb_adjust_link()
1351 struct phy_device *phydev = ndev->phydev; in ravb_adjust_link()
1359 ravb_rcv_snd_disable(ndev); in ravb_adjust_link()
1365 ravb_set_duplex_gbeth(ndev); in ravb_adjust_link()
1371 info->set_rate(ndev); in ravb_adjust_link()
1374 ravb_modify(ndev, ECMR, ECMR_TXF, 0); in ravb_adjust_link()
1388 ravb_rcv_snd_enable(ndev); in ravb_adjust_link()
1397 static int ravb_phy_init(struct net_device *ndev) in ravb_phy_init() argument
1399 struct device_node *np = ndev->dev.parent->of_node; in ravb_phy_init()
1400 struct ravb_private *priv = netdev_priv(ndev); in ravb_phy_init()
1427 phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface); in ravb_phy_init()
1430 netdev_err(ndev, "failed to connect PHY\n"); in ravb_phy_init()
1459 static int ravb_phy_start(struct net_device *ndev) in ravb_phy_start() argument
1463 error = ravb_phy_init(ndev); in ravb_phy_start()
1467 phy_start(ndev->phydev); in ravb_phy_start()
1472 static u32 ravb_get_msglevel(struct net_device *ndev) in ravb_get_msglevel() argument
1474 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_msglevel()
1479 static void ravb_set_msglevel(struct net_device *ndev, u32 value) in ravb_set_msglevel() argument
1481 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_msglevel()
1551 static void ravb_get_ethtool_stats(struct net_device *ndev, in ravb_get_ethtool_stats() argument
1554 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_ethtool_stats()
1583 static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) in ravb_get_strings() argument
1585 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_strings()
1595 static void ravb_get_ringparam(struct net_device *ndev, in ravb_get_ringparam() argument
1600 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_ringparam()
1608 static int ravb_set_ringparam(struct net_device *ndev, in ravb_set_ringparam() argument
1613 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_ringparam()
1625 if (netif_running(ndev)) { in ravb_set_ringparam()
1626 netif_device_detach(ndev); in ravb_set_ringparam()
1629 ravb_ptp_stop(ndev); in ravb_set_ringparam()
1631 error = ravb_stop_dma(ndev); in ravb_set_ringparam()
1633 netdev_err(ndev, in ravb_set_ringparam()
1637 synchronize_irq(ndev->irq); in ravb_set_ringparam()
1640 ravb_ring_free(ndev, RAVB_BE); in ravb_set_ringparam()
1642 ravb_ring_free(ndev, RAVB_NC); in ravb_set_ringparam()
1649 if (netif_running(ndev)) { in ravb_set_ringparam()
1650 error = ravb_dmac_init(ndev); in ravb_set_ringparam()
1652 netdev_err(ndev, in ravb_set_ringparam()
1658 ravb_emac_init(ndev); in ravb_set_ringparam()
1662 ravb_ptp_init(ndev, priv->pdev); in ravb_set_ringparam()
1664 netif_device_attach(ndev); in ravb_set_ringparam()
1670 static int ravb_get_ts_info(struct net_device *ndev, in ravb_get_ts_info() argument
1673 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_ts_info()
1694 static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) in ravb_get_wol() argument
1696 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_wol()
1702 static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) in ravb_set_wol() argument
1704 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_wol()
1735 struct net_device *ndev, struct device *dev, in ravb_hook_irq() argument
1741 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); in ravb_hook_irq()
1744 error = request_irq(irq, handler, 0, name, ndev); in ravb_hook_irq()
1746 netdev_err(ndev, "cannot request IRQ %s\n", name); in ravb_hook_irq()
1752 static int ravb_open(struct net_device *ndev) in ravb_open() argument
1754 struct ravb_private *priv = netdev_priv(ndev); in ravb_open()
1765 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, in ravb_open()
1766 ndev->name, ndev); in ravb_open()
1768 netdev_err(ndev, "cannot request IRQ\n"); in ravb_open()
1772 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, in ravb_open()
1776 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, in ravb_open()
1781 ndev, dev, "ch0:rx_be"); in ravb_open()
1785 ndev, dev, "ch18:tx_be"); in ravb_open()
1789 ndev, dev, "ch1:rx_nc"); in ravb_open()
1793 ndev, dev, "ch19:tx_nc"); in ravb_open()
1799 ndev, dev, "err_a"); in ravb_open()
1803 ndev, dev, "mgmt_a"); in ravb_open()
1810 error = ravb_dmac_init(ndev); in ravb_open()
1813 ravb_emac_init(ndev); in ravb_open()
1817 ravb_ptp_init(ndev, priv->pdev); in ravb_open()
1820 error = ravb_phy_start(ndev); in ravb_open()
1824 netif_tx_start_all_queues(ndev); in ravb_open()
1831 ravb_ptp_stop(ndev); in ravb_open()
1832 ravb_stop_dma(ndev); in ravb_open()
1837 free_irq(priv->mgmta_irq, ndev); in ravb_open()
1840 free_irq(priv->erra_irq, ndev); in ravb_open()
1842 free_irq(priv->tx_irqs[RAVB_NC], ndev); in ravb_open()
1844 free_irq(priv->rx_irqs[RAVB_NC], ndev); in ravb_open()
1846 free_irq(priv->tx_irqs[RAVB_BE], ndev); in ravb_open()
1848 free_irq(priv->rx_irqs[RAVB_BE], ndev); in ravb_open()
1850 free_irq(priv->emac_irq, ndev); in ravb_open()
1852 free_irq(ndev->irq, ndev); in ravb_open()
1861 static void ravb_tx_timeout(struct net_device *ndev, unsigned int txqueue) in ravb_tx_timeout() argument
1863 struct ravb_private *priv = netdev_priv(ndev); in ravb_tx_timeout()
1865 netif_err(priv, tx_err, ndev, in ravb_tx_timeout()
1867 ravb_read(ndev, ISS)); in ravb_tx_timeout()
1870 ndev->stats.tx_errors++; in ravb_tx_timeout()
1880 struct net_device *ndev = priv->ndev; in ravb_tx_timeout_work() local
1889 netif_tx_stop_all_queues(ndev); in ravb_tx_timeout_work()
1893 ravb_ptp_stop(ndev); in ravb_tx_timeout_work()
1896 if (ravb_stop_dma(ndev)) { in ravb_tx_timeout_work()
1905 ravb_rcv_snd_enable(ndev); in ravb_tx_timeout_work()
1909 ravb_ring_free(ndev, RAVB_BE); in ravb_tx_timeout_work()
1911 ravb_ring_free(ndev, RAVB_NC); in ravb_tx_timeout_work()
1914 error = ravb_dmac_init(ndev); in ravb_tx_timeout_work()
1920 netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n", in ravb_tx_timeout_work()
1924 ravb_emac_init(ndev); in ravb_tx_timeout_work()
1929 ravb_ptp_init(ndev, priv->pdev); in ravb_tx_timeout_work()
1931 netif_tx_start_all_queues(ndev); in ravb_tx_timeout_work()
1938 static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) in ravb_start_xmit() argument
1940 struct ravb_private *priv = netdev_priv(ndev); in ravb_start_xmit()
1955 netif_err(priv, tx_queued, ndev, in ravb_start_xmit()
1957 netif_stop_subqueue(ndev, q); in ravb_start_xmit()
1989 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
1991 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2000 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
2002 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2009 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, in ravb_start_xmit()
2011 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2024 dma_unmap_single(ndev->dev.parent, dma_addr, in ravb_start_xmit()
2051 ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); in ravb_start_xmit()
2056 !ravb_tx_free(ndev, q, true)) in ravb_start_xmit()
2057 netif_stop_subqueue(ndev, q); in ravb_start_xmit()
2064 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_start_xmit()
2072 static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, in ravb_select_queue() argument
2081 static struct net_device_stats *ravb_get_stats(struct net_device *ndev) in ravb_get_stats() argument
2083 struct ravb_private *priv = netdev_priv(ndev); in ravb_get_stats()
2087 nstats = &ndev->stats; in ravb_get_stats()
2091 nstats->tx_dropped += ravb_read(ndev, TROCR); in ravb_get_stats()
2092 ravb_write(ndev, 0, TROCR); /* (write clear) */ in ravb_get_stats()
2096 nstats->collisions += ravb_read(ndev, CXR41); in ravb_get_stats()
2097 ravb_write(ndev, 0, CXR41); /* (write clear) */ in ravb_get_stats()
2098 nstats->tx_carrier_errors += ravb_read(ndev, CXR42); in ravb_get_stats()
2099 ravb_write(ndev, 0, CXR42); /* (write clear) */ in ravb_get_stats()
2133 static void ravb_set_rx_mode(struct net_device *ndev) in ravb_set_rx_mode() argument
2135 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_rx_mode()
2139 ravb_modify(ndev, ECMR, ECMR_PRM, in ravb_set_rx_mode()
2140 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); in ravb_set_rx_mode()
2145 static int ravb_close(struct net_device *ndev) in ravb_close() argument
2147 struct device_node *np = ndev->dev.parent->of_node; in ravb_close()
2148 struct ravb_private *priv = netdev_priv(ndev); in ravb_close()
2152 netif_tx_stop_all_queues(ndev); in ravb_close()
2155 ravb_write(ndev, 0, RIC0); in ravb_close()
2156 ravb_write(ndev, 0, RIC2); in ravb_close()
2157 ravb_write(ndev, 0, TIC); in ravb_close()
2161 ravb_ptp_stop(ndev); in ravb_close()
2164 if (ravb_stop_dma(ndev) < 0) in ravb_close()
2165 netdev_err(ndev, in ravb_close()
2178 if (ndev->phydev) { in ravb_close()
2179 phy_stop(ndev->phydev); in ravb_close()
2180 phy_disconnect(ndev->phydev); in ravb_close()
2188 free_irq(priv->tx_irqs[RAVB_NC], ndev); in ravb_close()
2189 free_irq(priv->rx_irqs[RAVB_NC], ndev); in ravb_close()
2190 free_irq(priv->tx_irqs[RAVB_BE], ndev); in ravb_close()
2191 free_irq(priv->rx_irqs[RAVB_BE], ndev); in ravb_close()
2192 free_irq(priv->emac_irq, ndev); in ravb_close()
2194 free_irq(priv->erra_irq, ndev); in ravb_close()
2195 free_irq(priv->mgmta_irq, ndev); in ravb_close()
2198 free_irq(ndev->irq, ndev); in ravb_close()
2205 ravb_ring_free(ndev, RAVB_BE); in ravb_close()
2207 ravb_ring_free(ndev, RAVB_NC); in ravb_close()
2212 static int ravb_hwtstamp_get(struct net_device *ndev, struct ifreq *req) in ravb_hwtstamp_get() argument
2214 struct ravb_private *priv = netdev_priv(ndev); in ravb_hwtstamp_get()
2236 static int ravb_hwtstamp_set(struct net_device *ndev, struct ifreq *req) in ravb_hwtstamp_set() argument
2238 struct ravb_private *priv = netdev_priv(ndev); in ravb_hwtstamp_set()
2277 static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) in ravb_do_ioctl() argument
2279 struct phy_device *phydev = ndev->phydev; in ravb_do_ioctl()
2281 if (!netif_running(ndev)) in ravb_do_ioctl()
2289 return ravb_hwtstamp_get(ndev, req); in ravb_do_ioctl()
2291 return ravb_hwtstamp_set(ndev, req); in ravb_do_ioctl()
2297 static int ravb_change_mtu(struct net_device *ndev, int new_mtu) in ravb_change_mtu() argument
2299 struct ravb_private *priv = netdev_priv(ndev); in ravb_change_mtu()
2301 ndev->mtu = new_mtu; in ravb_change_mtu()
2303 if (netif_running(ndev)) { in ravb_change_mtu()
2305 ravb_emac_init(ndev); in ravb_change_mtu()
2308 netdev_update_features(ndev); in ravb_change_mtu()
2313 static void ravb_set_rx_csum(struct net_device *ndev, bool enable) in ravb_set_rx_csum() argument
2315 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_rx_csum()
2321 ravb_rcv_snd_disable(ndev); in ravb_set_rx_csum()
2324 ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0); in ravb_set_rx_csum()
2327 ravb_rcv_snd_enable(ndev); in ravb_set_rx_csum()
2332 static int ravb_set_features_gbeth(struct net_device *ndev, in ravb_set_features_gbeth() argument
2339 static int ravb_set_features_rcar(struct net_device *ndev, in ravb_set_features_rcar() argument
2342 netdev_features_t changed = ndev->features ^ features; in ravb_set_features_rcar()
2345 ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM); in ravb_set_features_rcar()
2347 ndev->features = features; in ravb_set_features_rcar()
2352 static int ravb_set_features(struct net_device *ndev, in ravb_set_features() argument
2355 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_features()
2358 return info->set_feature(ndev, features); in ravb_set_features()
2539 static int ravb_set_gti(struct net_device *ndev) in ravb_set_gti() argument
2541 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_gti()
2543 struct device *dev = ndev->dev.parent; in ravb_set_gti()
2562 ravb_write(ndev, inc, GTI); in ravb_set_gti()
2567 static int ravb_set_config_mode(struct net_device *ndev) in ravb_set_config_mode() argument
2569 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_config_mode()
2574 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); in ravb_set_config_mode()
2578 ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); in ravb_set_config_mode()
2580 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB); in ravb_set_config_mode()
2582 error = ravb_set_opmode(ndev, CCC_OPC_CONFIG); in ravb_set_config_mode()
2589 static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) in ravb_parse_delay_mode() argument
2591 struct ravb_private *priv = netdev_priv(ndev); in ravb_parse_delay_mode()
2623 static void ravb_set_delay_mode(struct net_device *ndev) in ravb_set_delay_mode() argument
2625 struct ravb_private *priv = netdev_priv(ndev); in ravb_set_delay_mode()
2632 ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set); in ravb_set_delay_mode()
2641 struct net_device *ndev; in ravb_probe() local
2657 ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), in ravb_probe()
2659 if (!ndev) in ravb_probe()
2664 ndev->features = info->net_features; in ravb_probe()
2665 ndev->hw_features = info->net_hw_features; in ravb_probe()
2688 ndev->irq = irq; in ravb_probe()
2690 SET_NETDEV_DEV(ndev, &pdev->dev); in ravb_probe()
2692 priv = netdev_priv(ndev); in ravb_probe()
2695 priv->ndev = ndev; in ravb_probe()
2711 ndev->base_addr = res->start; in ravb_probe()
2790 ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); in ravb_probe()
2791 ndev->min_mtu = ETH_MIN_MTU; in ravb_probe()
2801 ndev->netdev_ops = &ravb_netdev_ops; in ravb_probe()
2802 ndev->ethtool_ops = &ravb_ethtool_ops; in ravb_probe()
2805 error = ravb_set_config_mode(ndev); in ravb_probe()
2811 error = ravb_set_gti(ndev); in ravb_probe()
2816 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); in ravb_probe()
2820 ravb_parse_delay_mode(np, ndev); in ravb_probe()
2821 ravb_set_delay_mode(ndev); in ravb_probe()
2826 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, in ravb_probe()
2837 ravb_write(ndev, priv->desc_bat_dma, DBAT); in ravb_probe()
2844 ravb_ptp_init(ndev, pdev); in ravb_probe()
2850 ravb_read_mac_address(np, ndev); in ravb_probe()
2851 if (!is_valid_ether_addr(ndev->dev_addr)) { in ravb_probe()
2854 eth_hw_addr_random(ndev); in ravb_probe()
2864 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll); in ravb_probe()
2866 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); in ravb_probe()
2869 error = register_netdev(ndev); in ravb_probe()
2876 netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", in ravb_probe()
2877 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); in ravb_probe()
2879 platform_set_drvdata(pdev, ndev); in ravb_probe()
2890 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_probe()
2895 ravb_ptp_stop(ndev); in ravb_probe()
2906 free_netdev(ndev); in ravb_probe()
2912 struct net_device *ndev = platform_get_drvdata(pdev); in ravb_remove() local
2913 struct ravb_private *priv = netdev_priv(ndev); in ravb_remove()
2916 unregister_netdev(ndev); in ravb_remove()
2925 ravb_ptp_stop(ndev); in ravb_remove()
2927 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_remove()
2930 ravb_set_opmode(ndev, CCC_OPC_RESET); in ravb_remove()
2938 free_netdev(ndev); in ravb_remove()
2944 static int ravb_wol_setup(struct net_device *ndev) in ravb_wol_setup() argument
2946 struct ravb_private *priv = netdev_priv(ndev); in ravb_wol_setup()
2950 ravb_write(ndev, 0, RIC0); in ravb_wol_setup()
2951 ravb_write(ndev, 0, RIC2); in ravb_wol_setup()
2952 ravb_write(ndev, 0, TIC); in ravb_wol_setup()
2959 ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); in ravb_wol_setup()
2962 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); in ravb_wol_setup()
2967 static int ravb_wol_restore(struct net_device *ndev) in ravb_wol_restore() argument
2969 struct ravb_private *priv = netdev_priv(ndev); in ravb_wol_restore()
2977 ravb_modify(ndev, ECMR, ECMR_MPDE, 0); in ravb_wol_restore()
2979 ravb_close(ndev); in ravb_wol_restore()
2986 struct net_device *ndev = dev_get_drvdata(dev); in ravb_suspend() local
2987 struct ravb_private *priv = netdev_priv(ndev); in ravb_suspend()
2990 if (!netif_running(ndev)) in ravb_suspend()
2993 netif_device_detach(ndev); in ravb_suspend()
2996 ret = ravb_wol_setup(ndev); in ravb_suspend()
2998 ret = ravb_close(ndev); in ravb_suspend()
3001 ravb_ptp_stop(ndev); in ravb_suspend()
3008 struct net_device *ndev = dev_get_drvdata(dev); in ravb_resume() local
3009 struct ravb_private *priv = netdev_priv(ndev); in ravb_resume()
3015 ret = ravb_set_opmode(ndev, CCC_OPC_RESET); in ravb_resume()
3026 ret = ravb_set_config_mode(ndev); in ravb_resume()
3032 ret = ravb_set_gti(ndev); in ravb_resume()
3037 ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); in ravb_resume()
3041 ravb_set_delay_mode(ndev); in ravb_resume()
3044 ravb_write(ndev, priv->desc_bat_dma, DBAT); in ravb_resume()
3047 ravb_ptp_init(ndev, priv->pdev); in ravb_resume()
3049 if (netif_running(ndev)) { in ravb_resume()
3051 ret = ravb_wol_restore(ndev); in ravb_resume()
3055 ret = ravb_open(ndev); in ravb_resume()
3058 ravb_set_rx_mode(ndev); in ravb_resume()
3059 netif_device_attach(ndev); in ravb_resume()