Lines Matching refs:mp
89 static inline void mace_clean_rings(struct mace_data *mp);
101 struct mace_data *mp; in mace_probe() local
146 mp = netdev_priv(dev); in mace_probe()
147 mp->mdev = mdev; in mace_probe()
151 mp->mace = ioremap(dev->base_addr, 0x1000); in mace_probe()
152 if (mp->mace == NULL) { in mace_probe()
163 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) | in mace_probe()
164 in_8(&mp->mace->chipid_lo); in mace_probe()
167 mp = netdev_priv(dev); in mace_probe()
168 mp->maccc = ENXMT | ENRCV; in mace_probe()
170 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000); in mace_probe()
171 if (mp->tx_dma == NULL) { in mace_probe()
176 mp->tx_dma_intr = macio_irq(mdev, 1); in mace_probe()
178 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000); in mace_probe()
179 if (mp->rx_dma == NULL) { in mace_probe()
184 mp->rx_dma_intr = macio_irq(mdev, 2); in mace_probe()
186 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1); in mace_probe()
187 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1; in mace_probe()
189 memset((char *) mp->tx_cmds, 0, in mace_probe()
191 init_timer(&mp->tx_timeout); in mace_probe()
192 spin_lock_init(&mp->lock); in mace_probe()
193 mp->timeout_active = 0; in mace_probe()
196 mp->port_aaui = port_aaui; in mace_probe()
200 mp->port_aaui = 1; in mace_probe()
203 mp->port_aaui = 1; in mace_probe()
205 mp->port_aaui = 0; in mace_probe()
226 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev); in mace_probe()
228 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr); in mace_probe()
231 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev); in mace_probe()
233 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr); in mace_probe()
245 mp->chipid >> 8, mp->chipid & 0xff); in mace_probe()
256 iounmap(mp->rx_dma); in mace_probe()
258 iounmap(mp->tx_dma); in mace_probe()
260 iounmap(mp->mace); in mace_probe()
272 struct mace_data *mp; in mace_remove() local
278 mp = netdev_priv(dev); in mace_remove()
283 free_irq(mp->tx_dma_intr, dev); in mace_remove()
284 free_irq(mp->rx_dma_intr, dev); in mace_remove()
286 iounmap(mp->rx_dma); in mace_remove()
287 iounmap(mp->tx_dma); in mace_remove()
288 iounmap(mp->mace); in mace_remove()
314 struct mace_data *mp = netdev_priv(dev); in mace_reset() local
315 volatile struct mace __iomem *mb = mp->mace; in mace_reset()
347 if (mp->chipid == BROKEN_ADDRCHG_REV) in mace_reset()
358 if (mp->chipid != BROKEN_ADDRCHG_REV) in mace_reset()
361 if (mp->port_aaui) in mace_reset()
369 struct mace_data *mp = netdev_priv(dev); in __mace_set_address() local
370 volatile struct mace __iomem *mb = mp->mace; in __mace_set_address()
375 if (mp->chipid == BROKEN_ADDRCHG_REV) in __mace_set_address()
384 if (mp->chipid != BROKEN_ADDRCHG_REV) in __mace_set_address()
390 struct mace_data *mp = netdev_priv(dev); in mace_set_address() local
391 volatile struct mace __iomem *mb = mp->mace; in mace_set_address()
394 spin_lock_irqsave(&mp->lock, flags); in mace_set_address()
399 out_8(&mb->maccc, mp->maccc); in mace_set_address()
401 spin_unlock_irqrestore(&mp->lock, flags); in mace_set_address()
405 static inline void mace_clean_rings(struct mace_data *mp) in mace_clean_rings() argument
411 if (mp->rx_bufs[i] != NULL) { in mace_clean_rings()
412 dev_kfree_skb(mp->rx_bufs[i]); in mace_clean_rings()
413 mp->rx_bufs[i] = NULL; in mace_clean_rings()
416 for (i = mp->tx_empty; i != mp->tx_fill; ) { in mace_clean_rings()
417 dev_kfree_skb(mp->tx_bufs[i]); in mace_clean_rings()
425 struct mace_data *mp = netdev_priv(dev); in mace_open() local
426 volatile struct mace __iomem *mb = mp->mace; in mace_open()
427 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_open()
428 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_open()
438 mace_clean_rings(mp); in mace_open()
439 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd)); in mace_open()
440 cp = mp->rx_cmds; in mace_open()
449 mp->rx_bufs[i] = skb; in mace_open()
456 mp->rx_bufs[i] = NULL; in mace_open()
458 mp->rx_fill = i; in mace_open()
459 mp->rx_empty = 0; in mace_open()
464 st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds)); in mace_open()
468 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds)); in mace_open()
472 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING; in mace_open()
474 st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds)); in mace_open()
478 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds)); in mace_open()
479 mp->tx_fill = 0; in mace_open()
480 mp->tx_empty = 0; in mace_open()
481 mp->tx_fullup = 0; in mace_open()
482 mp->tx_active = 0; in mace_open()
483 mp->tx_bad_runt = 0; in mace_open()
486 out_8(&mb->maccc, mp->maccc); in mace_open()
495 struct mace_data *mp = netdev_priv(dev); in mace_close() local
496 volatile struct mace __iomem *mb = mp->mace; in mace_close()
497 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_close()
498 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_close()
508 mace_clean_rings(mp); in mace_close()
515 struct mace_data *mp = netdev_priv(dev); in mace_set_timeout() local
517 if (mp->timeout_active) in mace_set_timeout()
518 del_timer(&mp->tx_timeout); in mace_set_timeout()
519 mp->tx_timeout.expires = jiffies + TX_TIMEOUT; in mace_set_timeout()
520 mp->tx_timeout.function = mace_tx_timeout; in mace_set_timeout()
521 mp->tx_timeout.data = (unsigned long) dev; in mace_set_timeout()
522 add_timer(&mp->tx_timeout); in mace_set_timeout()
523 mp->timeout_active = 1; in mace_set_timeout()
528 struct mace_data *mp = netdev_priv(dev); in mace_xmit_start() local
529 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_xmit_start()
535 spin_lock_irqsave(&mp->lock, flags); in mace_xmit_start()
536 fill = mp->tx_fill; in mace_xmit_start()
540 if (next == mp->tx_empty) { in mace_xmit_start()
542 mp->tx_fullup = 1; in mace_xmit_start()
543 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
546 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
554 mp->tx_bufs[fill] = skb; in mace_xmit_start()
555 cp = mp->tx_cmds + NCMDS_TX * fill; in mace_xmit_start()
559 np = mp->tx_cmds + NCMDS_TX * next; in mace_xmit_start()
563 spin_lock_irqsave(&mp->lock, flags); in mace_xmit_start()
564 mp->tx_fill = next; in mace_xmit_start()
565 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) { in mace_xmit_start()
569 ++mp->tx_active; in mace_xmit_start()
574 if (next == mp->tx_empty) in mace_xmit_start()
576 spin_unlock_irqrestore(&mp->lock, flags); in mace_xmit_start()
583 struct mace_data *mp = netdev_priv(dev); in mace_set_multicast() local
584 volatile struct mace __iomem *mb = mp->mace; in mace_set_multicast()
589 spin_lock_irqsave(&mp->lock, flags); in mace_set_multicast()
590 mp->maccc &= ~PROM; in mace_set_multicast()
592 mp->maccc |= PROM; in mace_set_multicast()
617 if (mp->chipid == BROKEN_ADDRCHG_REV) in mace_set_multicast()
626 if (mp->chipid != BROKEN_ADDRCHG_REV) in mace_set_multicast()
630 out_8(&mb->maccc, mp->maccc); in mace_set_multicast()
631 spin_unlock_irqrestore(&mp->lock, flags); in mace_set_multicast()
634 static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev) in mace_handle_misc_intrs() argument
636 volatile struct mace __iomem *mb = mp->mace; in mace_handle_misc_intrs()
658 struct mace_data *mp = netdev_priv(dev); in mace_interrupt() local
659 volatile struct mace __iomem *mb = mp->mace; in mace_interrupt()
660 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_interrupt()
667 spin_lock_irqsave(&mp->lock, flags); in mace_interrupt()
670 mace_handle_misc_intrs(mp, intr, dev); in mace_interrupt()
672 i = mp->tx_empty; in mace_interrupt()
674 del_timer(&mp->tx_timeout); in mace_interrupt()
675 mp->timeout_active = 0; in mace_interrupt()
683 mace_handle_misc_intrs(mp, intr, dev); in mace_interrupt()
684 if (mp->tx_bad_runt) { in mace_interrupt()
686 mp->tx_bad_runt = 0; in mace_interrupt()
722 cp = mp->tx_cmds + NCMDS_TX * i; in mace_interrupt()
733 mp->tx_bad_runt = 1; in mace_interrupt()
750 if (i == mp->tx_fill) { in mace_interrupt()
763 dev->stats.tx_bytes += mp->tx_bufs[i]->len; in mace_interrupt()
766 dev_kfree_skb_irq(mp->tx_bufs[i]); in mace_interrupt()
767 --mp->tx_active; in mace_interrupt()
776 if (i != mp->tx_empty) { in mace_interrupt()
777 mp->tx_fullup = 0; in mace_interrupt()
780 mp->tx_empty = i; in mace_interrupt()
781 i += mp->tx_active; in mace_interrupt()
784 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) { in mace_interrupt()
787 cp = mp->tx_cmds + NCMDS_TX * i; in mace_interrupt()
790 ++mp->tx_active; in mace_interrupt()
793 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE); in mace_interrupt()
797 spin_unlock_irqrestore(&mp->lock, flags); in mace_interrupt()
804 struct mace_data *mp = netdev_priv(dev); in mace_tx_timeout() local
805 volatile struct mace __iomem *mb = mp->mace; in mace_tx_timeout()
806 volatile struct dbdma_regs __iomem *td = mp->tx_dma; in mace_tx_timeout()
807 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_tx_timeout()
812 spin_lock_irqsave(&mp->lock, flags); in mace_tx_timeout()
813 mp->timeout_active = 0; in mace_tx_timeout()
814 if (mp->tx_active == 0 && !mp->tx_bad_runt) in mace_tx_timeout()
818 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev); in mace_tx_timeout()
820 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty; in mace_tx_timeout()
836 i = mp->tx_empty; in mace_tx_timeout()
837 mp->tx_active = 0; in mace_tx_timeout()
839 if (mp->tx_bad_runt) { in mace_tx_timeout()
840 mp->tx_bad_runt = 0; in mace_tx_timeout()
841 } else if (i != mp->tx_fill) { in mace_tx_timeout()
842 dev_kfree_skb(mp->tx_bufs[i]); in mace_tx_timeout()
845 mp->tx_empty = i; in mace_tx_timeout()
847 mp->tx_fullup = 0; in mace_tx_timeout()
849 if (i != mp->tx_fill) { in mace_tx_timeout()
850 cp = mp->tx_cmds + NCMDS_TX * i; in mace_tx_timeout()
855 ++mp->tx_active; in mace_tx_timeout()
861 out_8(&mb->maccc, mp->maccc); in mace_tx_timeout()
864 spin_unlock_irqrestore(&mp->lock, flags); in mace_tx_timeout()
875 struct mace_data *mp = netdev_priv(dev); in mace_rxdma_intr() local
876 volatile struct dbdma_regs __iomem *rd = mp->rx_dma; in mace_rxdma_intr()
885 spin_lock_irqsave(&mp->lock, flags); in mace_rxdma_intr()
886 for (i = mp->rx_empty; i != mp->rx_fill; ) { in mace_rxdma_intr()
887 cp = mp->rx_cmds + i; in mace_rxdma_intr()
893 np = mp->rx_cmds + next; in mace_rxdma_intr()
894 if (next != mp->rx_fill in mace_rxdma_intr()
904 skb = mp->rx_bufs[i]; in mace_rxdma_intr()
931 mp->rx_bufs[i] = NULL; in mace_rxdma_intr()
943 mp->rx_empty = i; in mace_rxdma_intr()
945 i = mp->rx_fill; in mace_rxdma_intr()
950 if (next == mp->rx_empty) in mace_rxdma_intr()
952 cp = mp->rx_cmds + i; in mace_rxdma_intr()
953 skb = mp->rx_bufs[i]; in mace_rxdma_intr()
958 mp->rx_bufs[i] = skb; in mace_rxdma_intr()
975 if (i != mp->rx_fill) { in mace_rxdma_intr()
977 mp->rx_fill = i; in mace_rxdma_intr()
979 spin_unlock_irqrestore(&mp->lock, flags); in mace_rxdma_intr()