Home
last modified time | relevance | path

Searched refs:tx_new (Results 1 – 18 of 18) sorted by relevance

/drivers/net/ethernet/amd/
D7990.h108 int rx_new, tx_new; member
237 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
238 lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
239 lp->tx_old - lp->tx_new-1)
Da2065.c110 int rx_new, tx_new; member
159 lp->rx_new = lp->tx_new = 0; in lance_init_ring()
336 for (i = j; i != lp->tx_new; i = j) { in lance_tx()
407 if (lp->tx_old <= lp->tx_new) in lance_tx_buffs_avail()
408 return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new; in lance_tx_buffs_avail()
409 return lp->tx_old - lp->tx_new - 1; in lance_tx_buffs_avail()
559 entry = lp->tx_new & lp->tx_ring_mod_mask; in lance_start_xmit()
567 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; in lance_start_xmit()
618 if (lp->tx_old != lp->tx_new) { in lance_set_multicast()
Dsunlance.c249 int rx_new, tx_new; member
272 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
273 lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
274 lp->tx_old - lp->tx_new-1)
331 lp->rx_new = lp->tx_new = 0; in lance_init_ring_dvma()
387 lp->rx_new = lp->tx_new = 0; in lance_init_ring_pio()
576 for (i = j; i != lp->tx_new; i = j) { in lance_tx_dvma()
744 for (i = j; i != lp->tx_new; i = j) { in lance_tx_pio()
884 entry = lp->tx_new & TX_RING_MOD_MASK; in build_fake_packet()
911 lp->tx_new = TX_NEXT(entry); in build_fake_packet()
[all …]
D7990.c147 lp->rx_new = lp->tx_new = 0; in lance_init_ring()
363 for (i = j; i != lp->tx_new; i = j) { in lance_tx()
559 entry = lp->tx_new & lp->tx_ring_mod_mask; in lance_start_xmit()
569 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; in lance_start_xmit()
625 while (lp->tx_old != lp->tx_new) in lance_set_multicast()
Ddeclance.c257 int rx_new, tx_new; member
273 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
274 lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
275 lp->tx_old - lp->tx_new-1)
457 lp->rx_new = lp->tx_new = 0; in lance_init_ring()
654 for (i = j; i != lp->tx_new; i = j) { in lance_tx()
916 entry = lp->tx_new; in lance_start_xmit()
926 lp->tx_new = (entry + 1) & TX_RING_MOD_MASK; in lance_start_xmit()
979 if (lp->tx_old != lp->tx_new) { in lance_set_multicast()
/drivers/net/ethernet/seeq/
Dsgiseeq.c53 #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
54 sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
55 sp->tx_old - sp->tx_new - 1)
103 unsigned int rx_new, tx_new; member
185 sp->rx_new = sp->tx_new = 0; in seeq_init_ring()
276 gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old); in sgiseeq_dump_rings()
472 for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) { in sgiseeq_tx()
513 if (sp->tx_old != sp->tx_new) in sgiseeq_interrupt()
603 entry = sp->tx_new; in sgiseeq_start_xmit()
626 if (sp->tx_old != sp->tx_new) { in sgiseeq_start_xmit()
[all …]
/drivers/net/ethernet/sun/
Dsunbmac.h259 (((bp)->tx_old <= (bp)->tx_new) ? \
260 (bp)->tx_old + (TX_RING_SIZE - 1) - (bp)->tx_new : \
261 (bp)->tx_old - (bp)->tx_new - 1)
301 int rx_new, tx_new, rx_old, tx_old; member
Dsunqe.h299 (((qp)->tx_old <= (qp)->tx_new) ? \
300 (qp)->tx_old + (TX_RING_SIZE - 1) - (qp)->tx_new : \
301 (qp)->tx_old - (qp)->tx_new - 1)
340 int tx_new, tx_old; /* TX ring extents */ member
Dsunhme.h365 (((hp)->tx_old <= (hp)->tx_new) ? \
366 (hp)->tx_old + (TX_RING_SIZE - 1) - (hp)->tx_new : \
367 (hp)->tx_old - (hp)->tx_new - 1)
419 int rx_new, tx_new, rx_old, tx_old; member
Dsungem.h933 (((GP)->tx_old <= (GP)->tx_new) ? \
934 (GP)->tx_old + (TX_RING_SIZE - 1) - (GP)->tx_new : \
935 (GP)->tx_old - (GP)->tx_new - 1)
978 int tx_new, tx_old; member
Dsunqe.c130 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; in qe_init_rings()
535 while (elem != qep->tx_new) { in qe_tx_reclaim()
584 entry = qep->tx_new; in qe_start_xmit()
598 qep->tx_new = NEXT_TX(entry); in qe_start_xmit()
Dcassini.h2761 int tx_new[N_TX_RINGS], tx_old[N_TX_RINGS]; member
2888 #define TX_BUFFS_AVAIL(cp, i) ((cp)->tx_old[(i)] <= (cp)->tx_new[(i)] ? \
2889 (cp)->tx_old[(i)] + (TX_DESC_RINGN_SIZE(i) - 1) - (cp)->tx_new[(i)] : \
2890 (cp)->tx_old[(i)] - (cp)->tx_new[(i)] - 1)
Dsunbmac.c221 bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; in bigmac_init_rings()
767 while (elem != bp->tx_new) { in bigmac_tx()
966 entry = bp->tx_new; in bigmac_start_xmit()
973 bp->tx_new = NEXT_TX(entry); in bigmac_start_xmit()
Dsunhme.c99 int tx_new, tx_old; member
118 tlp->tx_new = hp->tx_new; in tx_add_log()
133 tx_log[this].tx_new, tx_log[this].tx_old, in tx_dump_log()
1255 hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0; in happy_meal_init_rings()
1921 while (elem != hp->tx_new) { in happy_meal_tx()
2278 entry = hp->tx_new; in happy_meal_start_xmit()
2324 hp->tx_new = entry; in happy_meal_start_xmit()
Dsungem.c1025 entry = gp->tx_new; in gem_start_xmit()
1095 gp->tx_new = entry; in gem_start_xmit()
1112 writel(gp->tx_new, gp->regs + TXDMA_KICK); in gem_start_xmit()
1625 gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; in gem_init_rings()
Dcassini.c2798 entry = cp->tx_new[ring]; in cas_xmit_tx_ringN()
2856 cp->tx_new[ring] = entry; in cas_xmit_tx_ringN()
3974 memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); in cas_clean_rings()
/drivers/net/ethernet/dec/tulip/
Dde4x5.c795 int tx_new, tx_old; /* TX descriptor ring pointers */ member
888 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
892 #define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
1419 lp->tx_new = lp->tx_old = 0; in de4x5_sw_reset()
1441 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1; in de4x5_sw_reset()
1451 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; in de4x5_sw_reset()
1452 lp->tx_old = lp->tx_new; in de4x5_sw_reset()
1485 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) { in de4x5_queue_pkt()
[all …]
/drivers/net/ethernet/intel/ixgb/
Dixgb_ethtool.c508 struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new; in ixgb_set_ringparam() local
539 tx_new = adapter->tx_ring; in ixgb_set_ringparam()
545 adapter->tx_ring = tx_new; in ixgb_set_ringparam()