Lines Matching full:rp
521 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low) in rhine_wait_bit() argument
523 void __iomem *ioaddr = rp->base; in rhine_wait_bit()
534 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " in rhine_wait_bit()
539 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask) in rhine_wait_bit_high() argument
541 rhine_wait_bit(rp, reg, mask, false); in rhine_wait_bit_high()
544 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask) in rhine_wait_bit_low() argument
546 rhine_wait_bit(rp, reg, mask, true); in rhine_wait_bit_low()
549 static u32 rhine_get_events(struct rhine_private *rp) in rhine_get_events() argument
551 void __iomem *ioaddr = rp->base; in rhine_get_events()
556 if (rp->quirks & rqStatusWBRace) in rhine_get_events()
561 static void rhine_ack_events(struct rhine_private *rp, u32 mask) in rhine_ack_events() argument
563 void __iomem *ioaddr = rp->base; in rhine_ack_events()
565 if (rp->quirks & rqStatusWBRace) in rhine_ack_events()
576 struct rhine_private *rp = netdev_priv(dev); in rhine_power_init() local
577 void __iomem *ioaddr = rp->base; in rhine_power_init()
580 if (rp->quirks & rqWOL) { in rhine_power_init()
590 if (rp->quirks & rq6patterns) in rhine_power_init()
595 if (rp->quirks & rq6patterns) in rhine_power_init()
600 if (rp->quirks & rq6patterns) in rhine_power_init()
632 struct rhine_private *rp = netdev_priv(dev); in rhine_chip_reset() local
633 void __iomem *ioaddr = rp->base; in rhine_chip_reset()
643 if (rp->quirks & rqForceReset) in rhine_chip_reset()
647 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); in rhine_chip_reset()
651 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? in rhine_chip_reset()
702 struct rhine_private *rp = netdev_priv(dev); in rhine_reload_eeprom() local
703 void __iomem *ioaddr = rp->base; in rhine_reload_eeprom()
719 enable_mmio(pioaddr, rp->quirks); in rhine_reload_eeprom()
722 if (rp->quirks & rqWOL) in rhine_reload_eeprom()
730 struct rhine_private *rp = netdev_priv(dev); in rhine_poll() local
731 const int irq = rp->irq; in rhine_poll()
739 static void rhine_kick_tx_threshold(struct rhine_private *rp) in rhine_kick_tx_threshold() argument
741 if (rp->tx_thresh < 0xe0) { in rhine_kick_tx_threshold()
742 void __iomem *ioaddr = rp->base; in rhine_kick_tx_threshold()
744 rp->tx_thresh += 0x20; in rhine_kick_tx_threshold()
745 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig); in rhine_kick_tx_threshold()
749 static void rhine_tx_err(struct rhine_private *rp, u32 status) in rhine_tx_err() argument
751 struct net_device *dev = rp->dev; in rhine_tx_err()
754 netif_info(rp, tx_err, dev, in rhine_tx_err()
759 rhine_kick_tx_threshold(rp); in rhine_tx_err()
760 netif_info(rp, tx_err ,dev, "Transmitter underrun, " in rhine_tx_err()
761 "Tx threshold now %02x\n", rp->tx_thresh); in rhine_tx_err()
765 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); in rhine_tx_err()
769 rhine_kick_tx_threshold(rp); in rhine_tx_err()
770 netif_info(rp, tx_err, dev, "Unspecified error. " in rhine_tx_err()
771 "Tx threshold now %02x\n", rp->tx_thresh); in rhine_tx_err()
777 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp) in rhine_update_rx_crc_and_missed_errord() argument
779 void __iomem *ioaddr = rp->base; in rhine_update_rx_crc_and_missed_errord()
780 struct net_device_stats *stats = &rp->dev->stats; in rhine_update_rx_crc_and_missed_errord()
818 struct rhine_private *rp = container_of(napi, struct rhine_private, napi); in rhine_napipoll() local
819 struct net_device *dev = rp->dev; in rhine_napipoll()
820 void __iomem *ioaddr = rp->base; in rhine_napipoll()
825 status = rhine_get_events(rp); in rhine_napipoll()
826 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW); in rhine_napipoll()
834 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); in rhine_napipoll()
836 netif_warn(rp, tx_err, dev, "Tx still on\n"); in rhine_napipoll()
842 rhine_tx_err(rp, status); in rhine_napipoll()
846 spin_lock(&rp->lock); in rhine_napipoll()
847 rhine_update_rx_crc_and_missed_errord(rp); in rhine_napipoll()
848 spin_unlock(&rp->lock); in rhine_napipoll()
853 schedule_work(&rp->slow_event_task); in rhine_napipoll()
865 struct rhine_private *rp = netdev_priv(dev); in rhine_hw_init() local
871 if (rp->quirks & rqRhineI) in rhine_hw_init()
900 struct rhine_private *rp; in rhine_init_one_common() local
918 rp = netdev_priv(dev); in rhine_init_one_common()
919 rp->dev = dev; in rhine_init_one_common()
920 rp->quirks = quirks; in rhine_init_one_common()
921 rp->pioaddr = pioaddr; in rhine_init_one_common()
922 rp->base = ioaddr; in rhine_init_one_common()
923 rp->irq = irq; in rhine_init_one_common()
924 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); in rhine_init_one_common()
926 phy_id = rp->quirks & rqIntPHY ? 1 : 0; in rhine_init_one_common()
928 u64_stats_init(&rp->tx_stats.syncp); in rhine_init_one_common()
929 u64_stats_init(&rp->rx_stats.syncp); in rhine_init_one_common()
950 spin_lock_init(&rp->lock); in rhine_init_one_common()
951 mutex_init(&rp->task_lock); in rhine_init_one_common()
952 INIT_WORK(&rp->reset_task, rhine_reset_task); in rhine_init_one_common()
953 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task); in rhine_init_one_common()
955 rp->mii_if.dev = dev; in rhine_init_one_common()
956 rp->mii_if.mdio_read = mdio_read; in rhine_init_one_common()
957 rp->mii_if.mdio_write = mdio_write; in rhine_init_one_common()
958 rp->mii_if.phy_id_mask = 0x1f; in rhine_init_one_common()
959 rp->mii_if.reg_num_mask = 0x1f; in rhine_init_one_common()
966 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); in rhine_init_one_common()
968 if (rp->quirks & rqRhineI) in rhine_init_one_common()
971 if (rp->quirks & rqMgmt) in rhine_init_one_common()
981 if (rp->quirks & rqRhineI) in rhine_init_one_common()
983 else if (rp->quirks & rqStatusWBRace) in rhine_init_one_common()
985 else if (rp->quirks & rqMgmt) in rhine_init_one_common()
991 name, ioaddr, dev->dev_addr, rp->irq); in rhine_init_one_common()
1001 rp->mii_if.advertising = mdio_read(dev, phy_id, 4); in rhine_init_one_common()
1005 mii_status, rp->mii_if.advertising, in rhine_init_one_common()
1016 rp->mii_if.phy_id = phy_id; in rhine_init_one_common()
1018 netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); in rhine_init_one_common()
1143 struct rhine_private *rp = netdev_priv(dev); in alloc_ring() local
1157 if (rp->quirks & rqRhineI) { in alloc_ring()
1158 rp->tx_bufs = dma_alloc_coherent(hwdev, in alloc_ring()
1160 &rp->tx_bufs_dma, in alloc_ring()
1162 if (rp->tx_bufs == NULL) { in alloc_ring()
1171 rp->rx_ring = ring; in alloc_ring()
1172 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1173 rp->rx_ring_dma = ring_dma; in alloc_ring()
1174 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc); in alloc_ring()
1181 struct rhine_private *rp = netdev_priv(dev); in free_ring() local
1187 rp->rx_ring, rp->rx_ring_dma); in free_ring()
1188 rp->tx_ring = NULL; in free_ring()
1190 if (rp->tx_bufs) in free_ring()
1192 rp->tx_bufs, rp->tx_bufs_dma); in free_ring()
1194 rp->tx_bufs = NULL; in free_ring()
1206 struct rhine_private *rp = netdev_priv(dev); in rhine_skb_dma_init() local
1208 const int size = rp->rx_buf_sz; in rhine_skb_dma_init()
1216 netif_err(rp, drv, dev, "Rx DMA mapping failure\n"); in rhine_skb_dma_init()
1224 static void rhine_reset_rbufs(struct rhine_private *rp) in rhine_reset_rbufs() argument
1228 rp->cur_rx = 0; in rhine_reset_rbufs()
1231 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn); in rhine_reset_rbufs()
1234 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp, in rhine_skb_dma_nic_store() argument
1237 rp->rx_skbuff_dma[entry] = sd->dma; in rhine_skb_dma_nic_store()
1238 rp->rx_skbuff[entry] = sd->skb; in rhine_skb_dma_nic_store()
1240 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma); in rhine_skb_dma_nic_store()
1248 struct rhine_private *rp = netdev_priv(dev); in alloc_rbufs() local
1252 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in alloc_rbufs()
1253 next = rp->rx_ring_dma; in alloc_rbufs()
1257 rp->rx_ring[i].rx_status = 0; in alloc_rbufs()
1258 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz); in alloc_rbufs()
1260 rp->rx_ring[i].next_desc = cpu_to_le32(next); in alloc_rbufs()
1261 rp->rx_skbuff[i] = NULL; in alloc_rbufs()
1264 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma); in alloc_rbufs()
1276 rhine_skb_dma_nic_store(rp, &sd, i); in alloc_rbufs()
1279 rhine_reset_rbufs(rp); in alloc_rbufs()
1286 struct rhine_private *rp = netdev_priv(dev); in free_rbufs() local
1292 rp->rx_ring[i].rx_status = 0; in free_rbufs()
1293 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in free_rbufs()
1294 if (rp->rx_skbuff[i]) { in free_rbufs()
1296 rp->rx_skbuff_dma[i], in free_rbufs()
1297 rp->rx_buf_sz, DMA_FROM_DEVICE); in free_rbufs()
1298 dev_kfree_skb(rp->rx_skbuff[i]); in free_rbufs()
1300 rp->rx_skbuff[i] = NULL; in free_rbufs()
1306 struct rhine_private *rp = netdev_priv(dev); in alloc_tbufs() local
1310 rp->dirty_tx = rp->cur_tx = 0; in alloc_tbufs()
1311 next = rp->tx_ring_dma; in alloc_tbufs()
1313 rp->tx_skbuff[i] = NULL; in alloc_tbufs()
1314 rp->tx_ring[i].tx_status = 0; in alloc_tbufs()
1315 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); in alloc_tbufs()
1317 rp->tx_ring[i].next_desc = cpu_to_le32(next); in alloc_tbufs()
1318 if (rp->quirks & rqRhineI) in alloc_tbufs()
1319 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ]; in alloc_tbufs()
1321 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma); in alloc_tbufs()
1328 struct rhine_private *rp = netdev_priv(dev); in free_tbufs() local
1333 rp->tx_ring[i].tx_status = 0; in free_tbufs()
1334 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC); in free_tbufs()
1335 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in free_tbufs()
1336 if (rp->tx_skbuff[i]) { in free_tbufs()
1337 if (rp->tx_skbuff_dma[i]) { in free_tbufs()
1339 rp->tx_skbuff_dma[i], in free_tbufs()
1340 rp->tx_skbuff[i]->len, in free_tbufs()
1343 dev_kfree_skb(rp->tx_skbuff[i]); in free_tbufs()
1345 rp->tx_skbuff[i] = NULL; in free_tbufs()
1346 rp->tx_buf[i] = NULL; in free_tbufs()
1352 struct rhine_private *rp = netdev_priv(dev); in rhine_check_media() local
1353 void __iomem *ioaddr = rp->base; in rhine_check_media()
1355 if (!rp->mii_if.force_media) in rhine_check_media()
1356 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); in rhine_check_media()
1358 if (rp->mii_if.full_duplex) in rhine_check_media()
1365 netif_info(rp, link, dev, "force_media %d, carrier %d\n", in rhine_check_media()
1366 rp->mii_if.force_media, netif_carrier_ok(dev)); in rhine_check_media()
1373 struct rhine_private *rp = netdev_priv(dev); in rhine_set_carrier() local
1383 netif_info(rp, link, dev, "force_media %d, carrier %d\n", in rhine_set_carrier()
1493 struct rhine_private *rp = netdev_priv(dev); in rhine_init_cam_filter() local
1494 void __iomem *ioaddr = rp->base; in rhine_init_cam_filter()
1513 struct rhine_private *rp = netdev_priv(dev); in rhine_update_vcam() local
1514 void __iomem *ioaddr = rp->base; in rhine_update_vcam()
1519 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) { in rhine_update_vcam()
1530 struct rhine_private *rp = netdev_priv(dev); in rhine_vlan_rx_add_vid() local
1532 spin_lock_bh(&rp->lock); in rhine_vlan_rx_add_vid()
1533 set_bit(vid, rp->active_vlans); in rhine_vlan_rx_add_vid()
1535 spin_unlock_bh(&rp->lock); in rhine_vlan_rx_add_vid()
1541 struct rhine_private *rp = netdev_priv(dev); in rhine_vlan_rx_kill_vid() local
1543 spin_lock_bh(&rp->lock); in rhine_vlan_rx_kill_vid()
1544 clear_bit(vid, rp->active_vlans); in rhine_vlan_rx_kill_vid()
1546 spin_unlock_bh(&rp->lock); in rhine_vlan_rx_kill_vid()
1552 struct rhine_private *rp = netdev_priv(dev); in init_registers() local
1553 void __iomem *ioaddr = rp->base; in init_registers()
1563 rp->tx_thresh = 0x20; in init_registers()
1564 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */ in init_registers()
1566 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr); in init_registers()
1567 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr); in init_registers()
1571 if (rp->quirks & rqMgmt) in init_registers()
1574 napi_enable(&rp->napi); in init_registers()
1584 static void rhine_enable_linkmon(struct rhine_private *rp) in rhine_enable_linkmon() argument
1586 void __iomem *ioaddr = rp->base; in rhine_enable_linkmon()
1592 rhine_wait_bit_high(rp, MIIRegAddr, 0x20); in rhine_enable_linkmon()
1598 static void rhine_disable_linkmon(struct rhine_private *rp) in rhine_disable_linkmon() argument
1600 void __iomem *ioaddr = rp->base; in rhine_disable_linkmon()
1604 if (rp->quirks & rqRhineI) { in rhine_disable_linkmon()
1613 rhine_wait_bit_high(rp, MIIRegAddr, 0x20); in rhine_disable_linkmon()
1619 rhine_wait_bit_high(rp, MIIRegAddr, 0x80); in rhine_disable_linkmon()
1626 struct rhine_private *rp = netdev_priv(dev); in mdio_read() local
1627 void __iomem *ioaddr = rp->base; in mdio_read()
1630 rhine_disable_linkmon(rp); in mdio_read()
1636 rhine_wait_bit_low(rp, MIICmd, 0x40); in mdio_read()
1639 rhine_enable_linkmon(rp); in mdio_read()
1645 struct rhine_private *rp = netdev_priv(dev); in mdio_write() local
1646 void __iomem *ioaddr = rp->base; in mdio_write()
1648 rhine_disable_linkmon(rp); in mdio_write()
1655 rhine_wait_bit_low(rp, MIICmd, 0x20); in mdio_write()
1657 rhine_enable_linkmon(rp); in mdio_write()
1660 static void rhine_task_disable(struct rhine_private *rp) in rhine_task_disable() argument
1662 mutex_lock(&rp->task_lock); in rhine_task_disable()
1663 rp->task_enable = false; in rhine_task_disable()
1664 mutex_unlock(&rp->task_lock); in rhine_task_disable()
1666 cancel_work_sync(&rp->slow_event_task); in rhine_task_disable()
1667 cancel_work_sync(&rp->reset_task); in rhine_task_disable()
1670 static void rhine_task_enable(struct rhine_private *rp) in rhine_task_enable() argument
1672 mutex_lock(&rp->task_lock); in rhine_task_enable()
1673 rp->task_enable = true; in rhine_task_enable()
1674 mutex_unlock(&rp->task_lock); in rhine_task_enable()
1679 struct rhine_private *rp = netdev_priv(dev); in rhine_open() local
1680 void __iomem *ioaddr = rp->base; in rhine_open()
1683 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev); in rhine_open()
1687 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq); in rhine_open()
1698 enable_mmio(rp->pioaddr, rp->quirks); in rhine_open()
1701 rhine_task_enable(rp); in rhine_open()
1704 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", in rhine_open()
1706 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); in rhine_open()
1716 free_irq(rp->irq, dev); in rhine_open()
1722 struct rhine_private *rp = container_of(work, struct rhine_private, in rhine_reset_task() local
1724 struct net_device *dev = rp->dev; in rhine_reset_task()
1726 mutex_lock(&rp->task_lock); in rhine_reset_task()
1728 if (!rp->task_enable) in rhine_reset_task()
1731 napi_disable(&rp->napi); in rhine_reset_task()
1733 spin_lock_bh(&rp->lock); in rhine_reset_task()
1739 rhine_reset_rbufs(rp); in rhine_reset_task()
1745 spin_unlock_bh(&rp->lock); in rhine_reset_task()
1752 mutex_unlock(&rp->task_lock); in rhine_reset_task()
1757 struct rhine_private *rp = netdev_priv(dev); in rhine_tx_timeout() local
1758 void __iomem *ioaddr = rp->base; in rhine_tx_timeout()
1762 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); in rhine_tx_timeout()
1764 schedule_work(&rp->reset_task); in rhine_tx_timeout()
1767 static inline bool rhine_tx_queue_full(struct rhine_private *rp) in rhine_tx_queue_full() argument
1769 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN; in rhine_tx_queue_full()
1775 struct rhine_private *rp = netdev_priv(dev); in rhine_start_tx() local
1777 void __iomem *ioaddr = rp->base; in rhine_start_tx()
1784 entry = rp->cur_tx % TX_RING_SIZE; in rhine_start_tx()
1789 rp->tx_skbuff[entry] = skb; in rhine_start_tx()
1791 if ((rp->quirks & rqRhineI) && in rhine_start_tx()
1797 rp->tx_skbuff[entry] = NULL; in rhine_start_tx()
1803 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]); in rhine_start_tx()
1805 memset(rp->tx_buf[entry] + skb->len, 0, in rhine_start_tx()
1807 rp->tx_skbuff_dma[entry] = 0; in rhine_start_tx()
1808 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma + in rhine_start_tx()
1809 (rp->tx_buf[entry] - in rhine_start_tx()
1810 rp->tx_bufs)); in rhine_start_tx()
1812 rp->tx_skbuff_dma[entry] = in rhine_start_tx()
1815 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) { in rhine_start_tx()
1817 rp->tx_skbuff_dma[entry] = 0; in rhine_start_tx()
1821 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]); in rhine_start_tx()
1824 rp->tx_ring[entry].desc_length = in rhine_start_tx()
1833 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); in rhine_start_tx()
1835 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); in rhine_start_tx()
1838 rp->tx_ring[entry].tx_status = 0; in rhine_start_tx()
1843 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn); in rhine_start_tx()
1846 rp->cur_tx++; in rhine_start_tx()
1866 if (rhine_tx_queue_full(rp)) { in rhine_start_tx()
1870 if (!rhine_tx_queue_full(rp)) in rhine_start_tx()
1874 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", in rhine_start_tx()
1875 rp->cur_tx - 1, entry); in rhine_start_tx()
1880 static void rhine_irq_disable(struct rhine_private *rp) in rhine_irq_disable() argument
1882 iowrite16(0x0000, rp->base + IntrEnable); in rhine_irq_disable()
1890 struct rhine_private *rp = netdev_priv(dev); in rhine_interrupt() local
1894 status = rhine_get_events(rp); in rhine_interrupt()
1896 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); in rhine_interrupt()
1901 rhine_irq_disable(rp); in rhine_interrupt()
1902 napi_schedule(&rp->napi); in rhine_interrupt()
1906 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", in rhine_interrupt()
1917 struct rhine_private *rp = netdev_priv(dev); in rhine_tx() local
1920 unsigned int dirty_tx = rp->dirty_tx; in rhine_tx()
1931 cur_tx = rp->cur_tx; in rhine_tx()
1935 u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); in rhine_tx()
1937 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", in rhine_tx()
1941 skb = rp->tx_skbuff[entry]; in rhine_tx()
1943 netif_dbg(rp, tx_done, dev, in rhine_tx()
1954 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) || in rhine_tx()
1957 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); in rhine_tx()
1962 if (rp->quirks & rqRhineI) in rhine_tx()
1966 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", in rhine_tx()
1969 u64_stats_update_begin(&rp->tx_stats.syncp); in rhine_tx()
1970 rp->tx_stats.bytes += skb->len; in rhine_tx()
1971 rp->tx_stats.packets++; in rhine_tx()
1972 u64_stats_update_end(&rp->tx_stats.syncp); in rhine_tx()
1975 if (rp->tx_skbuff_dma[entry]) { in rhine_tx()
1977 rp->tx_skbuff_dma[entry], in rhine_tx()
1984 rp->tx_skbuff[entry] = NULL; in rhine_tx()
1988 rp->dirty_tx = dirty_tx; in rhine_tx()
1995 if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) { in rhine_tx()
1999 if (rhine_tx_queue_full(rp)) in rhine_tx()
2034 struct rhine_private *rp = netdev_priv(dev); in rhine_rx() local
2036 int entry = rp->cur_rx % RX_RING_SIZE; in rhine_rx()
2039 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, in rhine_rx()
2040 entry, le32_to_cpu(rp->rx_ring[entry].rx_status)); in rhine_rx()
2044 struct rx_desc *desc = rp->rx_ring + entry; in rhine_rx()
2051 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, in rhine_rx()
2064 netif_dbg(rp, rx_err, dev, in rhine_rx()
2076 spin_lock(&rp->lock); in rhine_rx()
2078 spin_unlock(&rp->lock); in rhine_rx()
2094 rp->rx_skbuff_dma[entry], in rhine_rx()
2095 rp->rx_buf_sz, in rhine_rx()
2099 rp->rx_skbuff[entry]->data, in rhine_rx()
2103 rp->rx_skbuff_dma[entry], in rhine_rx()
2104 rp->rx_buf_sz, in rhine_rx()
2112 skb = rp->rx_skbuff[entry]; in rhine_rx()
2115 rp->rx_skbuff_dma[entry], in rhine_rx()
2116 rp->rx_buf_sz, in rhine_rx()
2118 rhine_skb_dma_nic_store(rp, &sd, entry); in rhine_rx()
2129 u64_stats_update_begin(&rp->rx_stats.syncp); in rhine_rx()
2130 rp->rx_stats.bytes += pkt_len; in rhine_rx()
2131 rp->rx_stats.packets++; in rhine_rx()
2132 u64_stats_update_end(&rp->rx_stats.syncp); in rhine_rx()
2136 entry = (++rp->cur_rx) % RX_RING_SIZE; in rhine_rx()
2147 struct rhine_private *rp = netdev_priv(dev); in rhine_restart_tx() local
2148 void __iomem *ioaddr = rp->base; in rhine_restart_tx()
2149 int entry = rp->dirty_tx % TX_RING_SIZE; in rhine_restart_tx()
2156 intr_status = rhine_get_events(rp); in rhine_restart_tx()
2161 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc), in rhine_restart_tx()
2167 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000)) in rhine_restart_tx()
2177 netif_warn(rp, tx_err, dev, "another error occurred %08x\n", in rhine_restart_tx()
2185 struct rhine_private *rp = in rhine_slow_event_task() local
2187 struct net_device *dev = rp->dev; in rhine_slow_event_task()
2190 mutex_lock(&rp->task_lock); in rhine_slow_event_task()
2192 if (!rp->task_enable) in rhine_slow_event_task()
2195 intr_status = rhine_get_events(rp); in rhine_slow_event_task()
2196 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW); in rhine_slow_event_task()
2202 netif_warn(rp, hw, dev, "PCI error\n"); in rhine_slow_event_task()
2204 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); in rhine_slow_event_task()
2207 mutex_unlock(&rp->task_lock); in rhine_slow_event_task()
2213 struct rhine_private *rp = netdev_priv(dev); in rhine_get_stats64() local
2216 spin_lock_bh(&rp->lock); in rhine_get_stats64()
2217 rhine_update_rx_crc_and_missed_errord(rp); in rhine_get_stats64()
2218 spin_unlock_bh(&rp->lock); in rhine_get_stats64()
2223 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp); in rhine_get_stats64()
2224 stats->rx_packets = rp->rx_stats.packets; in rhine_get_stats64()
2225 stats->rx_bytes = rp->rx_stats.bytes; in rhine_get_stats64()
2226 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start)); in rhine_get_stats64()
2229 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp); in rhine_get_stats64()
2230 stats->tx_packets = rp->tx_stats.packets; in rhine_get_stats64()
2231 stats->tx_bytes = rp->tx_stats.bytes; in rhine_get_stats64()
2232 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start)); in rhine_get_stats64()
2237 struct rhine_private *rp = netdev_priv(dev); in rhine_set_rx_mode() local
2238 void __iomem *ioaddr = rp->base; in rhine_set_rx_mode()
2252 } else if (rp->quirks & rqMgmt) { in rhine_set_rx_mode()
2274 if (rp->quirks & rqMgmt) { in rhine_set_rx_mode()
2294 struct rhine_private *rp = netdev_priv(dev); in netdev_get_link_ksettings() local
2296 mutex_lock(&rp->task_lock); in netdev_get_link_ksettings()
2297 mii_ethtool_get_link_ksettings(&rp->mii_if, cmd); in netdev_get_link_ksettings()
2298 mutex_unlock(&rp->task_lock); in netdev_get_link_ksettings()
2306 struct rhine_private *rp = netdev_priv(dev); in netdev_set_link_ksettings() local
2309 mutex_lock(&rp->task_lock); in netdev_set_link_ksettings()
2310 rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd); in netdev_set_link_ksettings()
2311 rhine_set_carrier(&rp->mii_if); in netdev_set_link_ksettings()
2312 mutex_unlock(&rp->task_lock); in netdev_set_link_ksettings()
2319 struct rhine_private *rp = netdev_priv(dev); in netdev_nway_reset() local
2321 return mii_nway_restart(&rp->mii_if); in netdev_nway_reset()
2326 struct rhine_private *rp = netdev_priv(dev); in netdev_get_link() local
2328 return mii_link_ok(&rp->mii_if); in netdev_get_link()
2333 struct rhine_private *rp = netdev_priv(dev); in netdev_get_msglevel() local
2335 return rp->msg_enable; in netdev_get_msglevel()
2340 struct rhine_private *rp = netdev_priv(dev); in netdev_set_msglevel() local
2342 rp->msg_enable = value; in netdev_set_msglevel()
2347 struct rhine_private *rp = netdev_priv(dev); in rhine_get_wol() local
2349 if (!(rp->quirks & rqWOL)) in rhine_get_wol()
2352 spin_lock_irq(&rp->lock); in rhine_get_wol()
2355 wol->wolopts = rp->wolopts; in rhine_get_wol()
2356 spin_unlock_irq(&rp->lock); in rhine_get_wol()
2361 struct rhine_private *rp = netdev_priv(dev); in rhine_set_wol() local
2365 if (!(rp->quirks & rqWOL)) in rhine_set_wol()
2371 spin_lock_irq(&rp->lock); in rhine_set_wol()
2372 rp->wolopts = wol->wolopts; in rhine_set_wol()
2373 spin_unlock_irq(&rp->lock); in rhine_set_wol()
2392 struct rhine_private *rp = netdev_priv(dev); in netdev_ioctl() local
2398 mutex_lock(&rp->task_lock); in netdev_ioctl()
2399 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL); in netdev_ioctl()
2400 rhine_set_carrier(&rp->mii_if); in netdev_ioctl()
2401 mutex_unlock(&rp->task_lock); in netdev_ioctl()
2408 struct rhine_private *rp = netdev_priv(dev); in rhine_close() local
2409 void __iomem *ioaddr = rp->base; in rhine_close()
2411 rhine_task_disable(rp); in rhine_close()
2412 napi_disable(&rp->napi); in rhine_close()
2415 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", in rhine_close()
2419 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); in rhine_close()
2421 rhine_irq_disable(rp); in rhine_close()
2426 free_irq(rp->irq, dev); in rhine_close()
2438 struct rhine_private *rp = netdev_priv(dev); in rhine_remove_one_pci() local
2442 pci_iounmap(pdev, rp->base); in rhine_remove_one_pci()
2452 struct rhine_private *rp = netdev_priv(dev); in rhine_remove_one_platform() local
2456 iounmap(rp->base); in rhine_remove_one_platform()
2466 struct rhine_private *rp = netdev_priv(dev); in rhine_shutdown_pci() local
2467 void __iomem *ioaddr = rp->base; in rhine_shutdown_pci()
2469 if (!(rp->quirks & rqWOL)) in rhine_shutdown_pci()
2475 if (rp->quirks & rq6patterns) in rhine_shutdown_pci()
2478 spin_lock(&rp->lock); in rhine_shutdown_pci()
2480 if (rp->wolopts & WAKE_MAGIC) { in rhine_shutdown_pci()
2489 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST)) in rhine_shutdown_pci()
2492 if (rp->wolopts & WAKE_PHY) in rhine_shutdown_pci()
2495 if (rp->wolopts & WAKE_UCAST) in rhine_shutdown_pci()
2498 if (rp->wolopts) { in rhine_shutdown_pci()
2504 spin_unlock(&rp->lock); in rhine_shutdown_pci()
2518 struct rhine_private *rp = netdev_priv(dev); in rhine_suspend() local
2523 rhine_task_disable(rp); in rhine_suspend()
2524 rhine_irq_disable(rp); in rhine_suspend()
2525 napi_disable(&rp->napi); in rhine_suspend()
2538 struct rhine_private *rp = netdev_priv(dev); in rhine_resume() local
2543 enable_mmio(rp->pioaddr, rp->quirks); in rhine_resume()
2547 rhine_reset_rbufs(rp); in rhine_resume()
2548 rhine_task_enable(rp); in rhine_resume()
2549 spin_lock_bh(&rp->lock); in rhine_resume()
2551 spin_unlock_bh(&rp->lock); in rhine_resume()