• Home
  • Raw
  • Download

Lines Matching refs:cp

355 #define cpr8(reg)	readb(cp->regs + (reg))
356 #define cpr16(reg) readw(cp->regs + (reg))
357 #define cpr32(reg) readl(cp->regs + (reg))
358 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
359 #define cpw16(reg,val) writew((val), cp->regs + (reg))
360 #define cpw32(reg,val) writel((val), cp->regs + (reg))
362 writeb((val), cp->regs + (reg)); \
363 readb(cp->regs + (reg)); \
366 writew((val), cp->regs + (reg)); \
367 readw(cp->regs + (reg)); \
370 writel((val), cp->regs + (reg)); \
371 readl(cp->regs + (reg)); \
376 static void cp_tx (struct cp_private *cp);
377 static void cp_clean_rings (struct cp_private *cp);
407 static inline void cp_set_rxbufsize (struct cp_private *cp) in cp_set_rxbufsize() argument
409 unsigned int mtu = cp->dev->mtu; in cp_set_rxbufsize()
413 cp->rx_buf_sz = mtu + ETH_HLEN + 8; in cp_set_rxbufsize()
415 cp->rx_buf_sz = PKT_BUF_SZ; in cp_set_rxbufsize()
418 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb, in cp_rx_skb() argument
423 skb->protocol = eth_type_trans (skb, cp->dev); in cp_rx_skb()
425 cp->dev->stats.rx_packets++; in cp_rx_skb()
426 cp->dev->stats.rx_bytes += skb->len; in cp_rx_skb()
431 napi_gro_receive(&cp->napi, skb); in cp_rx_skb()
434 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail, in cp_rx_err_acct() argument
437 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n", in cp_rx_err_acct()
439 cp->dev->stats.rx_errors++; in cp_rx_err_acct()
441 cp->dev->stats.rx_frame_errors++; in cp_rx_err_acct()
443 cp->dev->stats.rx_crc_errors++; in cp_rx_err_acct()
445 cp->dev->stats.rx_length_errors++; in cp_rx_err_acct()
447 cp->dev->stats.rx_length_errors++; in cp_rx_err_acct()
449 cp->dev->stats.rx_fifo_errors++; in cp_rx_err_acct()
465 struct cp_private *cp = container_of(napi, struct cp_private, napi); in cp_rx_poll() local
466 struct net_device *dev = cp->dev; in cp_rx_poll()
467 unsigned int rx_tail = cp->rx_tail; in cp_rx_poll()
479 const unsigned buflen = cp->rx_buf_sz; in cp_rx_poll()
481 skb = cp->rx_skb[rx_tail]; in cp_rx_poll()
484 desc = &cp->rx_ring[rx_tail]; in cp_rx_poll()
498 cp_rx_err_acct(cp, rx_tail, status, len); in cp_rx_poll()
500 cp->cp_stats.rx_frags++; in cp_rx_poll()
505 cp_rx_err_acct(cp, rx_tail, status, len); in cp_rx_poll()
509 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n", in cp_rx_poll()
518 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, in cp_rx_poll()
520 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { in cp_rx_poll()
526 dma_unmap_single(&cp->pdev->dev, mapping, in cp_rx_poll()
537 cp->rx_skb[rx_tail] = new_skb; in cp_rx_poll()
539 cp_rx_skb(cp, skb, desc); in cp_rx_poll()
544 cp->rx_ring[rx_tail].opts2 = 0; in cp_rx_poll()
545 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); in cp_rx_poll()
548 cp->rx_buf_sz); in cp_rx_poll()
550 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_rx_poll()
554 cp->rx_tail = rx_tail; in cp_rx_poll()
566 spin_lock_irqsave(&cp->lock, flags); in cp_rx_poll()
569 spin_unlock_irqrestore(&cp->lock, flags); in cp_rx_poll()
578 struct cp_private *cp; in cp_interrupt() local
584 cp = netdev_priv(dev); in cp_interrupt()
586 spin_lock(&cp->lock); in cp_interrupt()
594 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n", in cp_interrupt()
606 if (napi_schedule_prep(&cp->napi)) { in cp_interrupt()
608 __napi_schedule(&cp->napi); in cp_interrupt()
612 cp_tx(cp); in cp_interrupt()
614 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); in cp_interrupt()
620 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status); in cp_interrupt()
621 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status); in cp_interrupt()
629 spin_unlock(&cp->lock); in cp_interrupt()
641 struct cp_private *cp = netdev_priv(dev); in cp_poll_controller() local
642 const int irq = cp->pdev->irq; in cp_poll_controller()
650 static void cp_tx (struct cp_private *cp) in cp_tx() argument
652 unsigned tx_head = cp->tx_head; in cp_tx()
653 unsigned tx_tail = cp->tx_tail; in cp_tx()
657 struct cp_desc *txd = cp->tx_ring + tx_tail; in cp_tx()
666 skb = cp->tx_skb[tx_tail]; in cp_tx()
669 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), in cp_tx()
670 cp->tx_opts[tx_tail] & 0xffff, in cp_tx()
675 netif_dbg(cp, tx_err, cp->dev, in cp_tx()
677 cp->dev->stats.tx_errors++; in cp_tx()
679 cp->dev->stats.tx_window_errors++; in cp_tx()
681 cp->dev->stats.tx_aborted_errors++; in cp_tx()
683 cp->dev->stats.tx_carrier_errors++; in cp_tx()
685 cp->dev->stats.tx_fifo_errors++; in cp_tx()
687 cp->dev->stats.collisions += in cp_tx()
689 cp->dev->stats.tx_packets++; in cp_tx()
690 cp->dev->stats.tx_bytes += skb->len; in cp_tx()
691 netif_dbg(cp, tx_done, cp->dev, in cp_tx()
699 cp->tx_skb[tx_tail] = NULL; in cp_tx()
704 cp->tx_tail = tx_tail; in cp_tx()
706 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl); in cp_tx()
707 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1)) in cp_tx()
708 netif_wake_queue(cp->dev); in cp_tx()
717 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, in unwind_tx_frag_mapping() argument
725 cp->tx_skb[index] = NULL; in unwind_tx_frag_mapping()
726 txd = &cp->tx_ring[index]; in unwind_tx_frag_mapping()
728 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), in unwind_tx_frag_mapping()
736 struct cp_private *cp = netdev_priv(dev); in cp_start_xmit() local
743 spin_lock_irqsave(&cp->lock, intr_flags); in cp_start_xmit()
746 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) { in cp_start_xmit()
748 spin_unlock_irqrestore(&cp->lock, intr_flags); in cp_start_xmit()
753 entry = cp->tx_head; in cp_start_xmit()
781 struct cp_desc *txd = &cp->tx_ring[entry]; in cp_start_xmit()
786 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); in cp_start_xmit()
787 if (dma_mapping_error(&cp->pdev->dev, mapping)) in cp_start_xmit()
799 cp->tx_skb[entry] = skb; in cp_start_xmit()
800 cp->tx_opts[entry] = opts1; in cp_start_xmit()
801 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n", in cp_start_xmit()
814 first_mapping = dma_map_single(&cp->pdev->dev, skb->data, in cp_start_xmit()
816 if (dma_mapping_error(&cp->pdev->dev, first_mapping)) in cp_start_xmit()
819 cp->tx_skb[entry] = skb; in cp_start_xmit()
829 mapping = dma_map_single(&cp->pdev->dev, in cp_start_xmit()
832 if (dma_mapping_error(&cp->pdev->dev, mapping)) { in cp_start_xmit()
833 unwind_tx_frag_mapping(cp, skb, first_entry, entry); in cp_start_xmit()
844 txd = &cp->tx_ring[entry]; in cp_start_xmit()
852 cp->tx_opts[entry] = ctrl; in cp_start_xmit()
853 cp->tx_skb[entry] = skb; in cp_start_xmit()
856 txd = &cp->tx_ring[first_entry]; in cp_start_xmit()
865 cp->tx_opts[first_entry] = ctrl; in cp_start_xmit()
866 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n", in cp_start_xmit()
869 cp->tx_head = NEXT_TX(entry); in cp_start_xmit()
872 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1)) in cp_start_xmit()
876 spin_unlock_irqrestore(&cp->lock, intr_flags); in cp_start_xmit()
883 cp->dev->stats.tx_dropped++; in cp_start_xmit()
892 struct cp_private *cp = netdev_priv(dev); in __cp_set_rx_mode() local
921 cp->rx_config = cp_rx_config | rx_mode; in __cp_set_rx_mode()
922 cpw32_f(RxConfig, cp->rx_config); in __cp_set_rx_mode()
931 struct cp_private *cp = netdev_priv(dev); in cp_set_rx_mode() local
933 spin_lock_irqsave (&cp->lock, flags); in cp_set_rx_mode()
935 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_rx_mode()
938 static void __cp_get_stats(struct cp_private *cp) in __cp_get_stats() argument
941 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff); in __cp_get_stats()
947 struct cp_private *cp = netdev_priv(dev); in cp_get_stats() local
951 spin_lock_irqsave(&cp->lock, flags); in cp_get_stats()
953 __cp_get_stats(cp); in cp_get_stats()
954 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_stats()
959 static void cp_stop_hw (struct cp_private *cp) in cp_stop_hw() argument
967 cp->rx_tail = 0; in cp_stop_hw()
968 cp->tx_head = cp->tx_tail = 0; in cp_stop_hw()
970 netdev_reset_queue(cp->dev); in cp_stop_hw()
973 static void cp_reset_hw (struct cp_private *cp) in cp_reset_hw() argument
986 netdev_err(cp->dev, "hardware reset timeout\n"); in cp_reset_hw()
989 static inline void cp_start_hw (struct cp_private *cp) in cp_start_hw() argument
993 cpw16(CpCmd, cp->cpcmd); in cp_start_hw()
1006 ring_dma = cp->ring_dma; in cp_start_hw()
1022 netdev_reset_queue(cp->dev); in cp_start_hw()
1025 static void cp_enable_irq(struct cp_private *cp) in cp_enable_irq() argument
1030 static void cp_init_hw (struct cp_private *cp) in cp_init_hw() argument
1032 struct net_device *dev = cp->dev; in cp_init_hw()
1034 cp_reset_hw(cp); in cp_init_hw()
1042 cp_start_hw(cp); in cp_init_hw()
1051 cp->wol_enabled = 0; in cp_init_hw()
1060 static int cp_refill_rx(struct cp_private *cp) in cp_refill_rx() argument
1062 struct net_device *dev = cp->dev; in cp_refill_rx()
1069 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz); in cp_refill_rx()
1073 mapping = dma_map_single(&cp->pdev->dev, skb->data, in cp_refill_rx()
1074 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_refill_rx()
1075 if (dma_mapping_error(&cp->pdev->dev, mapping)) { in cp_refill_rx()
1079 cp->rx_skb[i] = skb; in cp_refill_rx()
1081 cp->rx_ring[i].opts2 = 0; in cp_refill_rx()
1082 cp->rx_ring[i].addr = cpu_to_le64(mapping); in cp_refill_rx()
1084 cp->rx_ring[i].opts1 = in cp_refill_rx()
1085 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz); in cp_refill_rx()
1087 cp->rx_ring[i].opts1 = in cp_refill_rx()
1088 cpu_to_le32(DescOwn | cp->rx_buf_sz); in cp_refill_rx()
1094 cp_clean_rings(cp); in cp_refill_rx()
1098 static void cp_init_rings_index (struct cp_private *cp) in cp_init_rings_index() argument
1100 cp->rx_tail = 0; in cp_init_rings_index()
1101 cp->tx_head = cp->tx_tail = 0; in cp_init_rings_index()
1104 static int cp_init_rings (struct cp_private *cp) in cp_init_rings() argument
1106 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_init_rings()
1107 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd); in cp_init_rings()
1108 memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); in cp_init_rings()
1110 cp_init_rings_index(cp); in cp_init_rings()
1112 return cp_refill_rx (cp); in cp_init_rings()
1115 static int cp_alloc_rings (struct cp_private *cp) in cp_alloc_rings() argument
1117 struct device *d = &cp->pdev->dev; in cp_alloc_rings()
1121 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL); in cp_alloc_rings()
1125 cp->rx_ring = mem; in cp_alloc_rings()
1126 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE]; in cp_alloc_rings()
1128 rc = cp_init_rings(cp); in cp_alloc_rings()
1130 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma); in cp_alloc_rings()
1135 static void cp_clean_rings (struct cp_private *cp) in cp_clean_rings() argument
1141 if (cp->rx_skb[i]) { in cp_clean_rings()
1142 desc = cp->rx_ring + i; in cp_clean_rings()
1143 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), in cp_clean_rings()
1144 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); in cp_clean_rings()
1145 dev_kfree_skb_any(cp->rx_skb[i]); in cp_clean_rings()
1150 if (cp->tx_skb[i]) { in cp_clean_rings()
1151 struct sk_buff *skb = cp->tx_skb[i]; in cp_clean_rings()
1153 desc = cp->tx_ring + i; in cp_clean_rings()
1154 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), in cp_clean_rings()
1159 cp->dev->stats.tx_dropped++; in cp_clean_rings()
1162 netdev_reset_queue(cp->dev); in cp_clean_rings()
1164 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); in cp_clean_rings()
1165 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); in cp_clean_rings()
1166 memset(cp->tx_opts, 0, sizeof(cp->tx_opts)); in cp_clean_rings()
1168 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE); in cp_clean_rings()
1169 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE); in cp_clean_rings()
1172 static void cp_free_rings (struct cp_private *cp) in cp_free_rings() argument
1174 cp_clean_rings(cp); in cp_free_rings()
1175 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring, in cp_free_rings()
1176 cp->ring_dma); in cp_free_rings()
1177 cp->rx_ring = NULL; in cp_free_rings()
1178 cp->tx_ring = NULL; in cp_free_rings()
1183 struct cp_private *cp = netdev_priv(dev); in cp_open() local
1184 const int irq = cp->pdev->irq; in cp_open()
1187 netif_dbg(cp, ifup, dev, "enabling interface\n"); in cp_open()
1189 rc = cp_alloc_rings(cp); in cp_open()
1193 napi_enable(&cp->napi); in cp_open()
1195 cp_init_hw(cp); in cp_open()
1201 cp_enable_irq(cp); in cp_open()
1204 mii_check_media(&cp->mii_if, netif_msg_link(cp), true); in cp_open()
1210 napi_disable(&cp->napi); in cp_open()
1211 cp_stop_hw(cp); in cp_open()
1212 cp_free_rings(cp); in cp_open()
1218 struct cp_private *cp = netdev_priv(dev); in cp_close() local
1221 napi_disable(&cp->napi); in cp_close()
1223 netif_dbg(cp, ifdown, dev, "disabling interface\n"); in cp_close()
1225 spin_lock_irqsave(&cp->lock, flags); in cp_close()
1230 cp_stop_hw(cp); in cp_close()
1232 spin_unlock_irqrestore(&cp->lock, flags); in cp_close()
1234 free_irq(cp->pdev->irq, dev); in cp_close()
1236 cp_free_rings(cp); in cp_close()
1242 struct cp_private *cp = netdev_priv(dev); in cp_tx_timeout() local
1250 spin_lock_irqsave(&cp->lock, flags); in cp_tx_timeout()
1252 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n", in cp_tx_timeout()
1253 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc)); in cp_tx_timeout()
1255 netif_dbg(cp, tx_err, cp->dev, in cp_tx_timeout()
1257 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1), in cp_tx_timeout()
1258 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2), in cp_tx_timeout()
1259 le64_to_cpu(cp->tx_ring[i].addr), in cp_tx_timeout()
1260 cp->tx_skb[i]); in cp_tx_timeout()
1263 cp_stop_hw(cp); in cp_tx_timeout()
1264 cp_clean_rings(cp); in cp_tx_timeout()
1265 rc = cp_init_rings(cp); in cp_tx_timeout()
1266 cp_start_hw(cp); in cp_tx_timeout()
1271 napi_schedule_irqoff(&cp->napi); in cp_tx_timeout()
1273 spin_unlock_irqrestore(&cp->lock, flags); in cp_tx_timeout()
1278 struct cp_private *cp = netdev_priv(dev); in cp_change_mtu() local
1287 cp_set_rxbufsize(cp); /* set new rx buf size */ in cp_change_mtu()
1294 cp_set_rxbufsize(cp); in cp_change_mtu()
1311 struct cp_private *cp = netdev_priv(dev); in mdio_read() local
1314 readw(cp->regs + mii_2_8139_map[location]) : 0; in mdio_read()
1321 struct cp_private *cp = netdev_priv(dev); in mdio_write() local
1332 static int netdev_set_wol (struct cp_private *cp, in netdev_set_wol() argument
1359 cp->wol_enabled = (wol->wolopts) ? 1 : 0; in netdev_set_wol()
1365 static void netdev_get_wol (struct cp_private *cp, in netdev_get_wol() argument
1374 if (!cp->wol_enabled) return; in netdev_get_wol()
1389 struct cp_private *cp = netdev_priv(dev); in cp_get_drvinfo() local
1393 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); in cp_get_drvinfo()
1422 struct cp_private *cp = netdev_priv(dev); in cp_get_settings() local
1426 spin_lock_irqsave(&cp->lock, flags); in cp_get_settings()
1427 rc = mii_ethtool_gset(&cp->mii_if, cmd); in cp_get_settings()
1428 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_settings()
1435 struct cp_private *cp = netdev_priv(dev); in cp_set_settings() local
1439 spin_lock_irqsave(&cp->lock, flags); in cp_set_settings()
1440 rc = mii_ethtool_sset(&cp->mii_if, cmd); in cp_set_settings()
1441 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_settings()
1448 struct cp_private *cp = netdev_priv(dev); in cp_nway_reset() local
1449 return mii_nway_restart(&cp->mii_if); in cp_nway_reset()
1454 struct cp_private *cp = netdev_priv(dev); in cp_get_msglevel() local
1455 return cp->msg_enable; in cp_get_msglevel()
1460 struct cp_private *cp = netdev_priv(dev); in cp_set_msglevel() local
1461 cp->msg_enable = value; in cp_set_msglevel()
1466 struct cp_private *cp = netdev_priv(dev); in cp_set_features() local
1472 spin_lock_irqsave(&cp->lock, flags); in cp_set_features()
1475 cp->cpcmd |= RxChkSum; in cp_set_features()
1477 cp->cpcmd &= ~RxChkSum; in cp_set_features()
1480 cp->cpcmd |= RxVlanOn; in cp_set_features()
1482 cp->cpcmd &= ~RxVlanOn; in cp_set_features()
1484 cpw16_f(CpCmd, cp->cpcmd); in cp_set_features()
1485 spin_unlock_irqrestore(&cp->lock, flags); in cp_set_features()
1493 struct cp_private *cp = netdev_priv(dev); in cp_get_regs() local
1501 spin_lock_irqsave(&cp->lock, flags); in cp_get_regs()
1502 memcpy_fromio(p, cp->regs, CP_REGS_SIZE); in cp_get_regs()
1503 spin_unlock_irqrestore(&cp->lock, flags); in cp_get_regs()
1508 struct cp_private *cp = netdev_priv(dev); in cp_get_wol() local
1511 spin_lock_irqsave (&cp->lock, flags); in cp_get_wol()
1512 netdev_get_wol (cp, wol); in cp_get_wol()
1513 spin_unlock_irqrestore (&cp->lock, flags); in cp_get_wol()
1518 struct cp_private *cp = netdev_priv(dev); in cp_set_wol() local
1522 spin_lock_irqsave (&cp->lock, flags); in cp_set_wol()
1523 rc = netdev_set_wol (cp, wol); in cp_set_wol()
1524 spin_unlock_irqrestore (&cp->lock, flags); in cp_set_wol()
1544 struct cp_private *cp = netdev_priv(dev); in cp_get_ethtool_stats() local
1549 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats), in cp_get_ethtool_stats()
1582 tmp_stats[i++] = cp->cp_stats.rx_frags; in cp_get_ethtool_stats()
1585 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma); in cp_get_ethtool_stats()
1611 struct cp_private *cp = netdev_priv(dev); in cp_ioctl() local
1618 spin_lock_irqsave(&cp->lock, flags); in cp_ioctl()
1619 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL); in cp_ioctl()
1620 spin_unlock_irqrestore(&cp->lock, flags); in cp_ioctl()
1626 struct cp_private *cp = netdev_priv(dev); in cp_set_mac_address() local
1634 spin_lock_irq(&cp->lock); in cp_set_mac_address()
1641 spin_unlock_irq(&cp->lock); in cp_set_mac_address()
1765 struct cp_private *cp = netdev_priv(dev); in cp_get_eeprom_len() local
1768 spin_lock_irq(&cp->lock); in cp_get_eeprom_len()
1769 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128; in cp_get_eeprom_len()
1770 spin_unlock_irq(&cp->lock); in cp_get_eeprom_len()
1778 struct cp_private *cp = netdev_priv(dev); in cp_get_eeprom() local
1787 spin_lock_irq(&cp->lock); in cp_get_eeprom()
1789 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; in cp_get_eeprom()
1792 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1798 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1805 val = read_eeprom(cp->regs, offset, addr_len); in cp_get_eeprom()
1809 spin_unlock_irq(&cp->lock); in cp_get_eeprom()
1816 struct cp_private *cp = netdev_priv(dev); in cp_set_eeprom() local
1826 spin_lock_irq(&cp->lock); in cp_set_eeprom()
1828 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6; in cp_set_eeprom()
1831 val = read_eeprom(cp->regs, offset, addr_len) & 0xff; in cp_set_eeprom()
1833 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1840 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1845 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00; in cp_set_eeprom()
1847 write_eeprom(cp->regs, offset, val, addr_len); in cp_set_eeprom()
1850 spin_unlock_irq(&cp->lock); in cp_set_eeprom()
1855 static void cp_set_d3_state (struct cp_private *cp) in cp_set_d3_state() argument
1857 pci_enable_wake(cp->pdev, PCI_D0, 1); /* Enable PME# generation */ in cp_set_d3_state()
1858 pci_set_power_state (cp->pdev, PCI_D3hot); in cp_set_d3_state()
1892 struct cp_private *cp; in cp_init_one() local
1913 cp = netdev_priv(dev); in cp_init_one()
1914 cp->pdev = pdev; in cp_init_one()
1915 cp->dev = dev; in cp_init_one()
1916 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug); in cp_init_one()
1917 spin_lock_init (&cp->lock); in cp_init_one()
1918 cp->mii_if.dev = dev; in cp_init_one()
1919 cp->mii_if.mdio_read = mdio_read; in cp_init_one()
1920 cp->mii_if.mdio_write = mdio_write; in cp_init_one()
1921 cp->mii_if.phy_id = CP_INTERNAL_PHY; in cp_init_one()
1922 cp->mii_if.phy_id_mask = 0x1f; in cp_init_one()
1923 cp->mii_if.reg_num_mask = 0x1f; in cp_init_one()
1924 cp_set_rxbufsize(cp); in cp_init_one()
1973 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | in cp_init_one()
1987 cp->regs = regs; in cp_init_one()
1989 cp_stop_hw(cp); in cp_init_one()
1998 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16); in cp_init_one()
2025 if (cp->wol_enabled) in cp_init_one()
2026 cp_set_d3_state (cp); in cp_init_one()
2046 struct cp_private *cp = netdev_priv(dev); in cp_remove_one() local
2049 iounmap(cp->regs); in cp_remove_one()
2050 if (cp->wol_enabled) in cp_remove_one()
2062 struct cp_private *cp = netdev_priv(dev); in cp_suspend() local
2071 spin_lock_irqsave (&cp->lock, flags); in cp_suspend()
2077 spin_unlock_irqrestore (&cp->lock, flags); in cp_suspend()
2080 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled); in cp_suspend()
2089 struct cp_private *cp = netdev_priv(dev); in cp_resume() local
2102 cp_init_rings_index (cp); in cp_resume()
2103 cp_init_hw (cp); in cp_resume()
2104 cp_enable_irq(cp); in cp_resume()
2107 spin_lock_irqsave (&cp->lock, flags); in cp_resume()
2109 mii_check_media(&cp->mii_if, netif_msg_link(cp), false); in cp_resume()
2111 spin_unlock_irqrestore (&cp->lock, flags); in cp_resume()