Searched refs:to_clean (Results 1 – 11 of 11) sorted by relevance
58 unsigned int to_clean; member74 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()84 cq->to_clean++; in vnic_cq_service()85 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service()86 cq->to_clean = 0; in vnic_cq_service()91 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
64 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()134 rq->to_use = rq->to_clean = in vnic_rq_init()178 buf = rq->to_clean; in vnic_rq_clean()184 buf = rq->to_clean = buf->next; in vnic_rq_clean()190 rq->to_use = rq->to_clean = in vnic_rq_clean()
78 struct vnic_wq_buf *to_clean; member133 buf = wq->to_clean; in vnic_wq_service()140 wq->to_clean = buf->next; in vnic_wq_service()145 buf = wq->to_clean; in vnic_wq_service()
80 struct vnic_rq_buf *to_clean; member165 buf = rq->to_clean; in vnic_rq_service()175 rq->to_clean = buf->next; in vnic_rq_service()180 buf = rq->to_clean; in vnic_rq_service()
64 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_alloc_bufs()167 buf = wq->to_clean; in vnic_wq_clean()173 buf = wq->to_clean = buf->next; in vnic_wq_clean()177 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_clean()
81 cq->to_clean = 0; in vnic_cq_clean()
303 static void cleanup_one_si(struct smi_info *to_clean);3068 static void cleanup_one_si(struct smi_info *to_clean) in cleanup_one_si() argument3073 if (!to_clean) in cleanup_one_si()3076 list_del(&to_clean->link); in cleanup_one_si()3079 atomic_inc(&to_clean->stop_operation); in cleanup_one_si()3085 wait_for_timer_and_thread(to_clean); in cleanup_one_si()3092 spin_lock_irqsave(&to_clean->si_lock, flags); in cleanup_one_si()3093 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { in cleanup_one_si()3094 spin_unlock_irqrestore(&to_clean->si_lock, flags); in cleanup_one_si()3095 poll(to_clean); in cleanup_one_si()[all …]
153 tx_ring->to_use = tx_ring->to_clean = tx_ring->start; in c2_tx_ring_alloc()205 rx_ring->to_use = rx_ring->to_clean = rx_ring->start; in c2_rx_ring_alloc()274 rx_ring->to_clean = rx_ring->start; in c2_rx_fill()391 for (elem = tx_ring->to_clean; elem != tx_ring->to_use; in c2_tx_interrupt()413 tx_ring->to_clean = elem; in c2_tx_interrupt()479 rx_ring->to_clean = rx_ring->start + c2dev->cur_rx; in c2_rx_interrupt()481 for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean; in c2_rx_interrupt()540 rx_ring->to_clean = elem; in c2_rx_interrupt()644 c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean = in c2_up()649 BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean); in c2_up()
281 struct c2_element *to_clean; member
954 ring->to_use = ring->to_clean = ring->start; in skge_ring_alloc()1046 ring->to_clean = ring->start; in skge_rx_fill()2627 skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); in skge_up()2629 BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); in skge_up()2745 return ((ring->to_clean > ring->to_use) ? 0 : ring->count) in skge_avail()2746 + (ring->to_clean - ring->to_use) - 1; in skge_avail()2877 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { in skge_tx_clean()2883 skge->tx_ring.to_clean = e; in skge_tx_clean()3152 for (e = ring->to_clean; e != ring->to_use; e = e->next) { in skge_tx_done()3160 skge->tx_ring.to_clean = e; in skge_tx_done()[all …]
2401 struct skge_element *to_clean; member