• Home
  • Raw
  • Download

Lines Matching full:yp

183 the 'yp->tx_full' flag.
187 empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it
566 struct yellowfin_private *yp = netdev_priv(dev); in yellowfin_open() local
567 const int irq = yp->pci_dev->irq; in yellowfin_open()
568 void __iomem *ioaddr = yp->base; in yellowfin_open()
582 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); in yellowfin_open()
583 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); in yellowfin_open()
604 yp->tx_threshold = 32; in yellowfin_open()
605 iowrite32(yp->tx_threshold, ioaddr + TxThreshold); in yellowfin_open()
608 dev->if_port = yp->default_port; in yellowfin_open()
613 if (yp->drv_flags & IsGigabit) { in yellowfin_open()
615 yp->full_duplex = 1; in yellowfin_open()
620 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); in yellowfin_open()
635 timer_setup(&yp->timer, yellowfin_timer, 0); in yellowfin_open()
636 yp->timer.expires = jiffies + 3*HZ; in yellowfin_open()
637 add_timer(&yp->timer); in yellowfin_open()
648 struct yellowfin_private *yp = from_timer(yp, t, timer); in yellowfin_timer() local
649 struct net_device *dev = pci_get_drvdata(yp->pci_dev); in yellowfin_timer()
650 void __iomem *ioaddr = yp->base; in yellowfin_timer()
658 if (yp->mii_cnt) { in yellowfin_timer()
659 int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR); in yellowfin_timer()
660 int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA); in yellowfin_timer()
661 int negotiated = lpa & yp->advertising; in yellowfin_timer()
664 yp->phys[0], bmsr, lpa); in yellowfin_timer()
666 yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated); in yellowfin_timer()
668 iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); in yellowfin_timer()
676 yp->timer.expires = jiffies + next_tick; in yellowfin_timer()
677 add_timer(&yp->timer); in yellowfin_timer()
682 struct yellowfin_private *yp = netdev_priv(dev); in yellowfin_tx_timeout() local
683 void __iomem *ioaddr = yp->base; in yellowfin_tx_timeout()
686 yp->cur_tx, yp->dirty_tx, in yellowfin_tx_timeout()
693 pr_warn(" Rx ring %p: ", yp->rx_ring); in yellowfin_tx_timeout()
695 pr_cont(" %08x", yp->rx_ring[i].result_status); in yellowfin_tx_timeout()
697 pr_warn(" Tx ring %p: ", yp->tx_ring); in yellowfin_tx_timeout()
700 yp->tx_status[i].tx_errs, in yellowfin_tx_timeout()
701 yp->tx_ring[i].result_status); in yellowfin_tx_timeout()
710 iowrite32(0x10001000, yp->base + TxCtrl); in yellowfin_tx_timeout()
711 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) in yellowfin_tx_timeout()
721 struct yellowfin_private *yp = netdev_priv(dev); in yellowfin_init_ring() local
724 yp->tx_full = 0; in yellowfin_init_ring()
725 yp->cur_rx = yp->cur_tx = 0; in yellowfin_init_ring()
726 yp->dirty_tx = 0; in yellowfin_init_ring()
728 yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in yellowfin_init_ring()
731 yp->rx_ring[i].dbdma_cmd = in yellowfin_init_ring()
732 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); in yellowfin_init_ring()
733 yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + in yellowfin_init_ring()
738 struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); in yellowfin_init_ring()
739 yp->rx_skbuff[i] = skb; in yellowfin_init_ring()
743 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, in yellowfin_init_ring()
744 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); in yellowfin_init_ring()
748 dev_kfree_skb(yp->rx_skbuff[j]); in yellowfin_init_ring()
751 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
752 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in yellowfin_init_ring()
758 yp->tx_skbuff[i] = NULL; in yellowfin_init_ring()
759 yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
760 yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
764 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); in yellowfin_init_ring()
770 yp->tx_skbuff[i] = 0; in yellowfin_init_ring()
772 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_init_ring()
773 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
776 if (yp->flags & FullTxStatus) { in yellowfin_init_ring()
777 yp->tx_ring[j].dbdma_cmd = in yellowfin_init_ring()
778 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); in yellowfin_init_ring()
779 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); in yellowfin_init_ring()
780 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + in yellowfin_init_ring()
784 yp->tx_ring[j].dbdma_cmd = in yellowfin_init_ring()
786 yp->tx_ring[j].request_cnt = 2; in yellowfin_init_ring()
788 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + in yellowfin_init_ring()
790 &(yp->tx_status[0].tx_errs) - in yellowfin_init_ring()
791 &(yp->tx_status[0])); in yellowfin_init_ring()
793 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + in yellowfin_init_ring()
797 yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS); in yellowfin_init_ring()
800 yp->tx_tail_desc = &yp->tx_status[0]; in yellowfin_init_ring()
807 struct yellowfin_private *yp = netdev_priv(dev); in yellowfin_start_xmit() local
817 entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit()
825 yp->tx_skbuff[entry] = NULL; in yellowfin_start_xmit()
831 yp->tx_skbuff[entry] = skb; in yellowfin_start_xmit()
834 yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, in yellowfin_start_xmit()
836 yp->tx_ring[entry].result_status = 0; in yellowfin_start_xmit()
839 yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_start_xmit()
840 yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = in yellowfin_start_xmit()
843 yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_start_xmit()
844 yp->tx_ring[entry].dbdma_cmd = in yellowfin_start_xmit()
847 yp->cur_tx++; in yellowfin_start_xmit()
849 yp->tx_ring[entry<<1].request_cnt = len; in yellowfin_start_xmit()
850 yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, in yellowfin_start_xmit()
855 yp->cur_tx++; in yellowfin_start_xmit()
857 unsigned next_entry = yp->cur_tx % TX_RING_SIZE; in yellowfin_start_xmit()
858 yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_start_xmit()
862 yp->tx_ring[entry<<1].dbdma_cmd = in yellowfin_start_xmit()
870 iowrite32(0x10001000, yp->base + TxCtrl); in yellowfin_start_xmit()
872 if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) in yellowfin_start_xmit()
875 yp->tx_full = 1; in yellowfin_start_xmit()
879 yp->cur_tx, entry); in yellowfin_start_xmit()
889 struct yellowfin_private *yp; in yellowfin_interrupt() local
894 yp = netdev_priv(dev); in yellowfin_interrupt()
895 ioaddr = yp->base; in yellowfin_interrupt()
897 spin_lock (&yp->lock); in yellowfin_interrupt()
916 for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) { in yellowfin_interrupt()
917 int entry = yp->dirty_tx % TX_RING_SIZE; in yellowfin_interrupt()
920 if (yp->tx_ring[entry].result_status == 0) in yellowfin_interrupt()
922 skb = yp->tx_skbuff[entry]; in yellowfin_interrupt()
926 pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), in yellowfin_interrupt()
929 yp->tx_skbuff[entry] = NULL; in yellowfin_interrupt()
931 if (yp->tx_full && in yellowfin_interrupt()
932 yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) { in yellowfin_interrupt()
934 yp->tx_full = 0; in yellowfin_interrupt()
938 if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) { in yellowfin_interrupt()
939 unsigned dirty_tx = yp->dirty_tx; in yellowfin_interrupt()
941 for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0; in yellowfin_interrupt()
945 u16 tx_errs = yp->tx_status[entry].tx_errs; in yellowfin_interrupt()
952 yp->tx_status[entry].tx_cnt, in yellowfin_interrupt()
953 yp->tx_status[entry].tx_errs, in yellowfin_interrupt()
954 yp->tx_status[entry].total_tx_cnt, in yellowfin_interrupt()
955 yp->tx_status[entry].paused); in yellowfin_interrupt()
959 skb = yp->tx_skbuff[entry]; in yellowfin_interrupt()
983 pci_unmap_single(yp->pci_dev, in yellowfin_interrupt()
984 yp->tx_ring[entry<<1].addr, skb->len, in yellowfin_interrupt()
987 yp->tx_skbuff[entry] = 0; in yellowfin_interrupt()
989 yp->tx_status[entry].tx_errs = 0; in yellowfin_interrupt()
993 if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { in yellowfin_interrupt()
995 dirty_tx, yp->cur_tx, yp->tx_full); in yellowfin_interrupt()
1000 if (yp->tx_full && in yellowfin_interrupt()
1001 yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) { in yellowfin_interrupt()
1003 yp->tx_full = 0; in yellowfin_interrupt()
1007 yp->dirty_tx = dirty_tx; in yellowfin_interrupt()
1008 yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE]; in yellowfin_interrupt()
1027 spin_unlock (&yp->lock); in yellowfin_interrupt()
1035 struct yellowfin_private *yp = netdev_priv(dev); in yellowfin_rx() local
1036 int entry = yp->cur_rx % RX_RING_SIZE; in yellowfin_rx()
1037 int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx; in yellowfin_rx()
1041 entry, yp->rx_ring[entry].result_status); in yellowfin_rx()
1043 entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, in yellowfin_rx()
1044 yp->rx_ring[entry].result_status); in yellowfin_rx()
1049 struct yellowfin_desc *desc = &yp->rx_ring[entry]; in yellowfin_rx()
1050 struct sk_buff *rx_skb = yp->rx_skbuff[entry]; in yellowfin_rx()
1058 pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr), in yellowfin_rx()
1059 yp->rx_buf_sz, PCI_DMA_FROMDEVICE); in yellowfin_rx()
1078 } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { in yellowfin_rx()
1088 } else if ( !(yp->drv_flags & IsGigabit) && in yellowfin_rx()
1098 } else if ((yp->flags & HasMACAddrBug) && in yellowfin_rx()
1099 !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma + in yellowfin_rx()
1102 !ether_addr_equal(le32_to_cpu(yp->rx_ring_dma + in yellowfin_rx()
1112 (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]); in yellowfin_rx()
1124 pci_unmap_single(yp->pci_dev, in yellowfin_rx()
1125 le32_to_cpu(yp->rx_ring[entry].addr), in yellowfin_rx()
1126 yp->rx_buf_sz, in yellowfin_rx()
1128 yp->rx_skbuff[entry] = NULL; in yellowfin_rx()
1136 pci_dma_sync_single_for_device(yp->pci_dev, in yellowfin_rx()
1138 yp->rx_buf_sz, in yellowfin_rx()
1146 entry = (++yp->cur_rx) % RX_RING_SIZE; in yellowfin_rx()
1150 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) { in yellowfin_rx()
1151 entry = yp->dirty_rx % RX_RING_SIZE; in yellowfin_rx()
1152 if (yp->rx_skbuff[entry] == NULL) { in yellowfin_rx()
1153 struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); in yellowfin_rx()
1156 yp->rx_skbuff[entry] = skb; in yellowfin_rx()
1158 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, in yellowfin_rx()
1159 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); in yellowfin_rx()
1161 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_rx()
1162 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ in yellowfin_rx()
1164 yp->rx_ring[entry - 1].dbdma_cmd = in yellowfin_rx()
1165 cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); in yellowfin_rx()
1167 yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd = in yellowfin_rx()
1169 | yp->rx_buf_sz); in yellowfin_rx()
1187 struct yellowfin_private *yp = netdev_priv(dev); in yellowfin_close() local
1188 void __iomem *ioaddr = yp->base; in yellowfin_close()
1199 yp->cur_tx, yp->dirty_tx, in yellowfin_close()
1200 yp->cur_rx, yp->dirty_rx); in yellowfin_close()
1210 del_timer(&yp->timer); in yellowfin_close()
1215 (unsigned long long)yp->tx_ring_dma); in yellowfin_close()
1218 ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ', in yellowfin_close()
1219 i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr, in yellowfin_close()
1220 yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status); in yellowfin_close()
1221 printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status); in yellowfin_close()
1224 i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs, in yellowfin_close()
1225 yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused); in yellowfin_close()
1228 (unsigned long long)yp->rx_ring_dma); in yellowfin_close()
1231 ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ', in yellowfin_close()
1232 i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr, in yellowfin_close()
1233 yp->rx_ring[i].result_status); in yellowfin_close()
1235 if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) { in yellowfin_close()
1241 get_unaligned(((u16*)yp->rx_ring[i].addr) + j)); in yellowfin_close()
1249 free_irq(yp->pci_dev->irq, dev); in yellowfin_close()
1253 yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); in yellowfin_close()
1254 yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ in yellowfin_close()
1255 if (yp->rx_skbuff[i]) { in yellowfin_close()
1256 dev_kfree_skb(yp->rx_skbuff[i]); in yellowfin_close()
1258 yp->rx_skbuff[i] = NULL; in yellowfin_close()
1261 if (yp->tx_skbuff[i]) in yellowfin_close()
1262 dev_kfree_skb(yp->tx_skbuff[i]); in yellowfin_close()
1263 yp->tx_skbuff[i] = NULL; in yellowfin_close()
1280 struct yellowfin_private *yp = netdev_priv(dev); in set_rx_mode() local
1281 void __iomem *ioaddr = yp->base; in set_rx_mode()
1303 if (yp->drv_flags & HasMulticastBug) { in set_rx_mode()