Lines Matching refs:tx_ring
111 static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr, in c2_tx_ring_alloc() argument
119 tx_ring->start = kmalloc(sizeof(*elem) * tx_ring->count, GFP_KERNEL); in c2_tx_ring_alloc()
120 if (!tx_ring->start) in c2_tx_ring_alloc()
123 elem = tx_ring->start; in c2_tx_ring_alloc()
126 for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) { in c2_tx_ring_alloc()
141 if (i == tx_ring->count - 1) { in c2_tx_ring_alloc()
142 elem->next = tx_ring->start; in c2_tx_ring_alloc()
151 tx_ring->to_use = tx_ring->to_clean = tx_ring->start; in c2_tx_ring_alloc()
326 struct c2_ring *tx_ring = &c2_port->tx_ring; in c2_tx_clean() local
334 elem = tx_ring->start; in c2_tx_clean()
363 } while ((elem = elem->next) != tx_ring->start); in c2_tx_clean()
366 c2_port->tx_avail = c2_port->tx_ring.count - 1; in c2_tx_clean()
367 c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start; in c2_tx_clean()
383 struct c2_ring *tx_ring = &c2_port->tx_ring; in c2_tx_interrupt() local
389 for (elem = tx_ring->to_clean; elem != tx_ring->to_use; in c2_tx_interrupt()
403 netdev->name, elem - tx_ring->start, in c2_tx_interrupt()
411 tx_ring->to_clean = elem; in c2_tx_interrupt()
604 tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc); in c2_up()
630 if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size, in c2_up()
638 c2_port->tx_avail = c2_port->tx_ring.count - 1; in c2_up()
639 c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean = in c2_up()
640 c2_port->tx_ring.start + c2dev->cur_tx; in c2_up()
644 BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean); in c2_up()
723 kfree(c2_port->tx_ring.start); in c2_down()
760 struct c2_ring *tx_ring = &c2_port->tx_ring; in c2_xmit_frame() local
782 elem = tx_ring->to_use; in c2_xmit_frame()
823 tx_ring->to_use = elem->next; in c2_xmit_frame()
900 c2_port->tx_ring.count = C2_NUM_TX_DESC; in c2_devinit()