Lines Matching refs:mdp
351 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_write() local
352 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_write()
357 iowrite32(data, mdp->addr + offset); in sh_eth_write()
362 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_read() local
363 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_read()
368 return ioread32(mdp->addr + offset); in sh_eth_read()
378 static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) in sh_eth_tsu_get_offset() argument
380 return mdp->reg_offset[enum_index]; in sh_eth_tsu_get_offset()
383 static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, in sh_eth_tsu_write() argument
386 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); in sh_eth_tsu_write()
391 iowrite32(data, mdp->tsu_addr + offset); in sh_eth_tsu_write()
394 static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) in sh_eth_tsu_read() argument
396 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); in sh_eth_tsu_read()
401 return ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read()
417 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_select_mii() local
420 switch (mdp->phy_interface) { in sh_eth_select_mii()
445 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_duplex() local
447 sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); in sh_eth_set_duplex()
452 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_chip_reset() local
455 sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); in sh_eth_chip_reset()
484 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_soft_reset_gether() local
505 if (mdp->cd->csmr) in sh_eth_soft_reset_gether()
509 if (mdp->cd->select_mii) in sh_eth_soft_reset_gether()
517 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_gether() local
519 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_gether()
522 switch (mdp->speed) { in sh_eth_set_rate_gether()
636 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_rcar() local
638 switch (mdp->speed) { in sh_eth_set_rate_rcar()
799 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7724() local
801 switch (mdp->speed) { in sh_eth_set_rate_sh7724()
843 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7757() local
845 switch (mdp->speed) { in sh_eth_set_rate_sh7757()
914 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_giga() local
916 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_giga()
919 switch (mdp->speed) { in sh_eth_set_rate_giga()
1233 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tx_free() local
1239 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { in sh_eth_tx_free()
1240 entry = mdp->dirty_tx % mdp->num_tx_ring; in sh_eth_tx_free()
1241 txdesc = &mdp->tx_ring[entry]; in sh_eth_tx_free()
1247 netif_info(mdp, tx_done, ndev, in sh_eth_tx_free()
1251 if (mdp->tx_skbuff[entry]) { in sh_eth_tx_free()
1252 dma_unmap_single(&mdp->pdev->dev, in sh_eth_tx_free()
1256 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); in sh_eth_tx_free()
1257 mdp->tx_skbuff[entry] = NULL; in sh_eth_tx_free()
1261 if (entry >= mdp->num_tx_ring - 1) in sh_eth_tx_free()
1275 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_free() local
1278 if (mdp->rx_ring) { in sh_eth_ring_free()
1279 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_free()
1280 if (mdp->rx_skbuff[i]) { in sh_eth_ring_free()
1281 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_free()
1283 dma_unmap_single(&mdp->pdev->dev, in sh_eth_ring_free()
1285 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_ring_free()
1289 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_free()
1290 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring, in sh_eth_ring_free()
1291 mdp->rx_desc_dma); in sh_eth_ring_free()
1292 mdp->rx_ring = NULL; in sh_eth_ring_free()
1296 if (mdp->rx_skbuff) { in sh_eth_ring_free()
1297 for (i = 0; i < mdp->num_rx_ring; i++) in sh_eth_ring_free()
1298 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_ring_free()
1300 kfree(mdp->rx_skbuff); in sh_eth_ring_free()
1301 mdp->rx_skbuff = NULL; in sh_eth_ring_free()
1303 if (mdp->tx_ring) { in sh_eth_ring_free()
1306 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_free()
1307 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring, in sh_eth_ring_free()
1308 mdp->tx_desc_dma); in sh_eth_ring_free()
1309 mdp->tx_ring = NULL; in sh_eth_ring_free()
1313 kfree(mdp->tx_skbuff); in sh_eth_ring_free()
1314 mdp->tx_skbuff = NULL; in sh_eth_ring_free()
1320 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_format() local
1325 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; in sh_eth_ring_format()
1326 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; in sh_eth_ring_format()
1327 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_ring_format()
1331 mdp->cur_rx = 0; in sh_eth_ring_format()
1332 mdp->cur_tx = 0; in sh_eth_ring_format()
1333 mdp->dirty_rx = 0; in sh_eth_ring_format()
1334 mdp->dirty_tx = 0; in sh_eth_ring_format()
1336 memset(mdp->rx_ring, 0, rx_ringsize); in sh_eth_ring_format()
1339 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_format()
1341 mdp->rx_skbuff[i] = NULL; in sh_eth_ring_format()
1348 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_ring_format()
1349 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len, in sh_eth_ring_format()
1351 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_ring_format()
1355 mdp->rx_skbuff[i] = skb; in sh_eth_ring_format()
1358 rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_format()
1365 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); in sh_eth_ring_format()
1366 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1367 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); in sh_eth_ring_format()
1371 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); in sh_eth_ring_format()
1377 memset(mdp->tx_ring, 0, tx_ringsize); in sh_eth_ring_format()
1380 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_ring_format()
1381 mdp->tx_skbuff[i] = NULL; in sh_eth_ring_format()
1382 txdesc = &mdp->tx_ring[i]; in sh_eth_ring_format()
1387 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); in sh_eth_ring_format()
1388 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1389 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); in sh_eth_ring_format()
1399 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_init() local
1407 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : in sh_eth_ring_init()
1409 if (mdp->cd->rpadir) in sh_eth_ring_init()
1410 mdp->rx_buf_sz += NET_IP_ALIGN; in sh_eth_ring_init()
1413 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), in sh_eth_ring_init()
1415 if (!mdp->rx_skbuff) in sh_eth_ring_init()
1418 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), in sh_eth_ring_init()
1420 if (!mdp->tx_skbuff) in sh_eth_ring_init()
1424 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_init()
1425 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, in sh_eth_ring_init()
1426 &mdp->rx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1427 if (!mdp->rx_ring) in sh_eth_ring_init()
1430 mdp->dirty_rx = 0; in sh_eth_ring_init()
1433 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_init()
1434 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, in sh_eth_ring_init()
1435 &mdp->tx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1436 if (!mdp->tx_ring) in sh_eth_ring_init()
1449 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_init() local
1453 ret = mdp->cd->soft_reset(ndev); in sh_eth_dev_init()
1457 if (mdp->cd->rmiimode) in sh_eth_dev_init()
1462 if (mdp->cd->rpadir) in sh_eth_dev_init()
1469 if (mdp->cd->hw_swap) in sh_eth_dev_init()
1476 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); in sh_eth_dev_init()
1482 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); in sh_eth_dev_init()
1485 if (mdp->cd->nbst) in sh_eth_dev_init()
1489 if (mdp->cd->bculr) in sh_eth_dev_init()
1492 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); in sh_eth_dev_init()
1494 if (!mdp->cd->no_trimd) in sh_eth_dev_init()
1502 mdp->irq_enabled = true; in sh_eth_dev_init()
1503 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_dev_init()
1506 sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | in sh_eth_dev_init()
1510 if (mdp->cd->set_rate) in sh_eth_dev_init()
1511 mdp->cd->set_rate(ndev); in sh_eth_dev_init()
1514 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); in sh_eth_dev_init()
1517 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); in sh_eth_dev_init()
1523 if (mdp->cd->apr) in sh_eth_dev_init()
1525 if (mdp->cd->mpr) in sh_eth_dev_init()
1527 if (mdp->cd->tpauser) in sh_eth_dev_init()
1538 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_exit() local
1544 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_dev_exit()
1545 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); in sh_eth_dev_exit()
1560 mdp->cd->soft_reset(ndev); in sh_eth_dev_exit()
1563 if (mdp->cd->rmiimode) in sh_eth_dev_exit()
1586 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_rx() local
1589 int entry = mdp->cur_rx % mdp->num_rx_ring; in sh_eth_rx()
1590 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; in sh_eth_rx()
1594 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_rx()
1601 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1611 netif_info(mdp, rx_status, ndev, in sh_eth_rx()
1624 if (mdp->cd->csmr) in sh_eth_rx()
1627 skb = mdp->rx_skbuff[entry]; in sh_eth_rx()
1645 if (!mdp->cd->hw_swap) in sh_eth_rx()
1649 mdp->rx_skbuff[entry] = NULL; in sh_eth_rx()
1650 if (mdp->cd->rpadir) in sh_eth_rx()
1652 dma_unmap_single(&mdp->pdev->dev, dma_addr, in sh_eth_rx()
1653 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_rx()
1665 entry = (++mdp->cur_rx) % mdp->num_rx_ring; in sh_eth_rx()
1666 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1670 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { in sh_eth_rx()
1671 entry = mdp->dirty_rx % mdp->num_rx_ring; in sh_eth_rx()
1672 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1674 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_rx()
1677 if (mdp->rx_skbuff[entry] == NULL) { in sh_eth_rx()
1682 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, in sh_eth_rx()
1684 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_rx()
1688 mdp->rx_skbuff[entry] = skb; in sh_eth_rx()
1694 if (entry >= mdp->num_rx_ring - 1) in sh_eth_rx()
1705 if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) { in sh_eth_rx()
1709 mdp->cur_rx = count; in sh_eth_rx()
1710 mdp->dirty_rx = count; in sh_eth_rx()
1735 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_emac_interrupt() local
1744 pm_wakeup_event(&mdp->pdev->dev, 0); in sh_eth_emac_interrupt()
1747 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_emac_interrupt()
1750 if (mdp->ether_link_active_low) in sh_eth_emac_interrupt()
1769 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_error() local
1776 netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); in sh_eth_error()
1791 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); in sh_eth_error()
1797 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); in sh_eth_error()
1810 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { in sh_eth_error()
1813 netif_err(mdp, tx_err, ndev, "Address Error\n"); in sh_eth_error()
1817 if (mdp->cd->no_ade) in sh_eth_error()
1825 intr_status, mdp->cur_tx, mdp->dirty_tx, in sh_eth_error()
1831 if (edtrr ^ mdp->cd->edtrr_trns) { in sh_eth_error()
1833 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_error()
1843 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_interrupt() local
1844 struct sh_eth_cpu_data *cd = mdp->cd; in sh_eth_interrupt()
1848 spin_lock(&mdp->lock); in sh_eth_interrupt()
1866 if (unlikely(!mdp->irq_enabled)) { in sh_eth_interrupt()
1872 if (napi_schedule_prep(&mdp->napi)) { in sh_eth_interrupt()
1876 __napi_schedule(&mdp->napi); in sh_eth_interrupt()
1905 spin_unlock(&mdp->lock); in sh_eth_interrupt()
1912 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, in sh_eth_poll() local
1932 if (mdp->irq_enabled) in sh_eth_poll()
1933 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_poll()
1941 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_adjust_link() local
1946 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_adjust_link()
1949 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1953 if (phydev->duplex != mdp->duplex) { in sh_eth_adjust_link()
1955 mdp->duplex = phydev->duplex; in sh_eth_adjust_link()
1956 if (mdp->cd->set_duplex) in sh_eth_adjust_link()
1957 mdp->cd->set_duplex(ndev); in sh_eth_adjust_link()
1960 if (phydev->speed != mdp->speed) { in sh_eth_adjust_link()
1962 mdp->speed = phydev->speed; in sh_eth_adjust_link()
1963 if (mdp->cd->set_rate) in sh_eth_adjust_link()
1964 mdp->cd->set_rate(ndev); in sh_eth_adjust_link()
1966 if (!mdp->link) { in sh_eth_adjust_link()
1969 mdp->link = phydev->link; in sh_eth_adjust_link()
1971 } else if (mdp->link) { in sh_eth_adjust_link()
1973 mdp->link = 0; in sh_eth_adjust_link()
1974 mdp->speed = 0; in sh_eth_adjust_link()
1975 mdp->duplex = -1; in sh_eth_adjust_link()
1979 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) in sh_eth_adjust_link()
1982 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_adjust_link()
1984 if (new_state && netif_msg_link(mdp)) in sh_eth_adjust_link()
1992 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_phy_init() local
1995 mdp->link = 0; in sh_eth_phy_init()
1996 mdp->speed = 0; in sh_eth_phy_init()
1997 mdp->duplex = -1; in sh_eth_phy_init()
2006 mdp->phy_interface); in sh_eth_phy_init()
2015 mdp->mii_bus->id, mdp->phy_id); in sh_eth_phy_init()
2018 mdp->phy_interface); in sh_eth_phy_init()
2027 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) { in sh_eth_phy_init()
2065 struct sh_eth_private *mdp = netdev_priv(ndev); in __sh_eth_get_regs() local
2066 struct sh_eth_cpu_data *cd = mdp->cd; in __sh_eth_get_regs()
2090 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ in __sh_eth_get_regs()
2099 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) in __sh_eth_get_regs()
2213 *buf++ = ioread32(mdp->tsu_addr + in __sh_eth_get_regs()
2214 mdp->reg_offset[TSU_ADRH0] + in __sh_eth_get_regs()
2236 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_regs() local
2240 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2242 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2247 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_msglevel() local
2248 return mdp->msg_enable; in sh_eth_get_msglevel()
2253 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_msglevel() local
2254 mdp->msg_enable = value; in sh_eth_set_msglevel()
2276 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ethtool_stats() local
2280 data[i++] = mdp->cur_rx; in sh_eth_get_ethtool_stats()
2281 data[i++] = mdp->cur_tx; in sh_eth_get_ethtool_stats()
2282 data[i++] = mdp->dirty_rx; in sh_eth_get_ethtool_stats()
2283 data[i++] = mdp->dirty_tx; in sh_eth_get_ethtool_stats()
2299 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ringparam() local
2303 ring->rx_pending = mdp->num_rx_ring; in sh_eth_get_ringparam()
2304 ring->tx_pending = mdp->num_tx_ring; in sh_eth_get_ringparam()
2310 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_ringparam() local
2330 mdp->irq_enabled = false; in sh_eth_set_ringparam()
2332 napi_synchronize(&mdp->napi); in sh_eth_set_ringparam()
2342 mdp->num_rx_ring = ring->rx_pending; in sh_eth_set_ringparam()
2343 mdp->num_tx_ring = ring->tx_pending; in sh_eth_set_ringparam()
2367 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_wol() local
2372 if (mdp->cd->magic) { in sh_eth_get_wol()
2374 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; in sh_eth_get_wol()
2380 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_wol() local
2382 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) in sh_eth_set_wol()
2385 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in sh_eth_set_wol()
2387 device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); in sh_eth_set_wol()
2413 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_open() local
2416 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_open()
2418 napi_enable(&mdp->napi); in sh_eth_open()
2421 mdp->cd->irq_flags, ndev->name, ndev); in sh_eth_open()
2444 mdp->is_opened = 1; in sh_eth_open()
2451 napi_disable(&mdp->napi); in sh_eth_open()
2452 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_open()
2459 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tx_timeout() local
2465 netif_err(mdp, timer, ndev, in sh_eth_tx_timeout()
2473 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_tx_timeout()
2474 rxdesc = &mdp->rx_ring[i]; in sh_eth_tx_timeout()
2477 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_tx_timeout()
2478 mdp->rx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2480 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_tx_timeout()
2481 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_tx_timeout()
2482 mdp->tx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2495 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_start_xmit() local
2501 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_start_xmit()
2502 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { in sh_eth_start_xmit()
2504 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); in sh_eth_start_xmit()
2506 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2510 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2515 entry = mdp->cur_tx % mdp->num_tx_ring; in sh_eth_start_xmit()
2516 mdp->tx_skbuff[entry] = skb; in sh_eth_start_xmit()
2517 txdesc = &mdp->tx_ring[entry]; in sh_eth_start_xmit()
2519 if (!mdp->cd->hw_swap) in sh_eth_start_xmit()
2521 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len, in sh_eth_start_xmit()
2523 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_start_xmit()
2531 if (entry >= mdp->num_tx_ring - 1) in sh_eth_start_xmit()
2537 mdp->cur_tx++; in sh_eth_start_xmit()
2539 if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) in sh_eth_start_xmit()
2540 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_start_xmit()
2563 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_stats() local
2565 if (mdp->cd->no_tx_cntrs) in sh_eth_get_stats()
2568 if (!mdp->is_opened) in sh_eth_get_stats()
2575 if (mdp->cd->cexcr) { in sh_eth_get_stats()
2591 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_close() local
2599 mdp->irq_enabled = false; in sh_eth_close()
2601 napi_disable(&mdp->napi); in sh_eth_close()
2617 mdp->is_opened = 0; in sh_eth_close()
2619 pm_runtime_put(&mdp->pdev->dev); in sh_eth_close()
2641 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) in sh_eth_tsu_get_post_bit() argument
2643 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_bit()
2649 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_enable_cam_entry_post() local
2653 tmp = sh_eth_tsu_read(mdp, reg); in sh_eth_tsu_enable_cam_entry_post()
2654 sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg); in sh_eth_tsu_enable_cam_entry_post()
2660 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_post() local
2665 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; in sh_eth_tsu_disable_cam_entry_post()
2667 tmp = sh_eth_tsu_read(mdp, reg); in sh_eth_tsu_disable_cam_entry_post()
2668 sh_eth_tsu_write(mdp, tmp & ~post_mask, reg); in sh_eth_tsu_disable_cam_entry_post()
2677 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_busy() local
2679 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { in sh_eth_tsu_busy()
2694 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_write_entry() local
2698 iowrite32(val, mdp->tsu_addr + offset); in sh_eth_tsu_write_entry()
2703 iowrite32(val, mdp->tsu_addr + offset + 4); in sh_eth_tsu_write_entry()
2712 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_read_entry() local
2715 val = ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read_entry()
2720 val = ioread32(mdp->tsu_addr + offset + 4); in sh_eth_tsu_read_entry()
2728 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_find_entry() local
2729 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_find_entry()
2755 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_table() local
2756 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_disable_cam_entry_table()
2760 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & in sh_eth_tsu_disable_cam_entry_table()
2772 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_add_entry() local
2773 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_add_entry()
2776 if (!mdp->cd->tsu) in sh_eth_tsu_add_entry()
2790 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | in sh_eth_tsu_add_entry()
2802 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_del_entry() local
2805 if (!mdp->cd->tsu) in sh_eth_tsu_del_entry()
2825 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_all() local
2828 if (!mdp->cd->tsu) in sh_eth_tsu_purge_all()
2846 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_mcast() local
2847 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_purge_mcast()
2851 if (!mdp->cd->tsu) in sh_eth_tsu_purge_mcast()
2864 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rx_mode() local
2869 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_mode()
2874 if (mdp->cd->tsu) in sh_eth_set_rx_mode()
2890 } else if (mdp->cd->tsu) { in sh_eth_set_rx_mode()
2909 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_mode()
2914 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rx_csum() local
2917 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_csum()
2928 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_csum()
2935 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_features() local
2937 if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum) in sh_eth_set_features()
2945 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) in sh_eth_get_vtag_index() argument
2947 if (!mdp->port) in sh_eth_get_vtag_index()
2956 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_add_vid() local
2957 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_add_vid()
2959 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_add_vid()
2966 mdp->vlan_num_ids++; in sh_eth_vlan_rx_add_vid()
2971 if (mdp->vlan_num_ids > 1) { in sh_eth_vlan_rx_add_vid()
2973 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_add_vid()
2977 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), in sh_eth_vlan_rx_add_vid()
2986 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_kill_vid() local
2987 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_kill_vid()
2989 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_kill_vid()
2996 mdp->vlan_num_ids--; in sh_eth_vlan_rx_kill_vid()
2997 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_kill_vid()
3003 static void sh_eth_tsu_init(struct sh_eth_private *mdp) in sh_eth_tsu_init() argument
3005 if (!mdp->cd->dual_port) { in sh_eth_tsu_init()
3006 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
3007 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, in sh_eth_tsu_init()
3012 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ in sh_eth_tsu_init()
3013 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ in sh_eth_tsu_init()
3014 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ in sh_eth_tsu_init()
3015 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); in sh_eth_tsu_init()
3016 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); in sh_eth_tsu_init()
3017 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); in sh_eth_tsu_init()
3018 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); in sh_eth_tsu_init()
3019 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); in sh_eth_tsu_init()
3020 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); in sh_eth_tsu_init()
3021 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); in sh_eth_tsu_init()
3022 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
3023 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
3024 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ in sh_eth_tsu_init()
3025 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ in sh_eth_tsu_init()
3026 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
3027 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ in sh_eth_tsu_init()
3028 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ in sh_eth_tsu_init()
3029 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ in sh_eth_tsu_init()
3030 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ in sh_eth_tsu_init()
3034 static int sh_mdio_release(struct sh_eth_private *mdp) in sh_mdio_release() argument
3037 mdiobus_unregister(mdp->mii_bus); in sh_mdio_release()
3040 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_release()
3046 static int sh_mdio_init(struct sh_eth_private *mdp, in sh_mdio_init() argument
3051 struct platform_device *pdev = mdp->pdev; in sh_mdio_init()
3052 struct device *dev = &mdp->pdev->dev; in sh_mdio_init()
3060 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; in sh_mdio_init()
3065 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); in sh_mdio_init()
3066 if (!mdp->mii_bus) in sh_mdio_init()
3070 mdp->mii_bus->name = "sh_mii"; in sh_mdio_init()
3071 mdp->mii_bus->parent = dev; in sh_mdio_init()
3072 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in sh_mdio_init()
3077 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; in sh_mdio_init()
3079 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); in sh_mdio_init()
3086 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_init()
3202 struct sh_eth_private *mdp; in sh_eth_drv_probe() local
3223 mdp = netdev_priv(ndev); in sh_eth_drv_probe()
3224 mdp->num_tx_ring = TX_RING_SIZE; in sh_eth_drv_probe()
3225 mdp->num_rx_ring = RX_RING_SIZE; in sh_eth_drv_probe()
3226 mdp->addr = devm_ioremap_resource(&pdev->dev, res); in sh_eth_drv_probe()
3227 if (IS_ERR(mdp->addr)) { in sh_eth_drv_probe()
3228 ret = PTR_ERR(mdp->addr); in sh_eth_drv_probe()
3234 spin_lock_init(&mdp->lock); in sh_eth_drv_probe()
3235 mdp->pdev = pdev; in sh_eth_drv_probe()
3246 mdp->phy_id = pd->phy; in sh_eth_drv_probe()
3247 mdp->phy_interface = pd->phy_interface; in sh_eth_drv_probe()
3248 mdp->no_ether_link = pd->no_ether_link; in sh_eth_drv_probe()
3249 mdp->ether_link_active_low = pd->ether_link_active_low; in sh_eth_drv_probe()
3253 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; in sh_eth_drv_probe()
3255 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); in sh_eth_drv_probe()
3257 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); in sh_eth_drv_probe()
3258 if (!mdp->reg_offset) { in sh_eth_drv_probe()
3260 mdp->cd->register_type); in sh_eth_drv_probe()
3264 sh_eth_set_default_cpu_data(mdp->cd); in sh_eth_drv_probe()
3273 if (mdp->cd->rx_csum) { in sh_eth_drv_probe()
3279 if (mdp->cd->tsu) in sh_eth_drv_probe()
3287 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; in sh_eth_drv_probe()
3297 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3319 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, in sh_eth_drv_probe()
3321 if (!mdp->tsu_addr) { in sh_eth_drv_probe()
3326 mdp->port = port; in sh_eth_drv_probe()
3331 if (mdp->cd->chip_reset) in sh_eth_drv_probe()
3332 mdp->cd->chip_reset(ndev); in sh_eth_drv_probe()
3335 sh_eth_tsu_init(mdp); in sh_eth_drv_probe()
3339 if (mdp->cd->rmiimode) in sh_eth_drv_probe()
3343 ret = sh_mdio_init(mdp, pd); in sh_eth_drv_probe()
3350 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); in sh_eth_drv_probe()
3357 if (mdp->cd->magic) in sh_eth_drv_probe()
3370 netif_napi_del(&mdp->napi); in sh_eth_drv_probe()
3371 sh_mdio_release(mdp); in sh_eth_drv_probe()
3385 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_drv_remove() local
3388 netif_napi_del(&mdp->napi); in sh_eth_drv_remove()
3389 sh_mdio_release(mdp); in sh_eth_drv_remove()
3400 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_wol_setup() local
3404 napi_disable(&mdp->napi); in sh_eth_wol_setup()
3415 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_wol_restore() local
3418 napi_enable(&mdp->napi); in sh_eth_wol_restore()
3441 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_suspend() local
3449 if (mdp->wol_enabled) in sh_eth_suspend()
3460 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_resume() local
3466 if (mdp->wol_enabled) in sh_eth_resume()