Lines Matching refs:rq
27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
31 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 vdev = rq->vdev; in vnic_rq_alloc_bufs()
37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
38 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
45 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
48 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
49 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
54 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
62 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
63 rq->buf_index = 0; in vnic_rq_alloc_bufs()
68 void vnic_rq_free(struct vnic_rq *rq) in vnic_rq_free() argument
73 vdev = rq->vdev; in vnic_rq_free()
75 vnic_dev_free_desc_ring(vdev, &rq->ring); in vnic_rq_free()
78 kfree(rq->bufs[i]); in vnic_rq_free()
79 rq->bufs[i] = NULL; in vnic_rq_free()
82 rq->ctrl = NULL; in vnic_rq_free()
85 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, in vnic_rq_alloc() argument
90 rq->index = index; in vnic_rq_alloc()
91 rq->vdev = vdev; in vnic_rq_alloc()
93 rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); in vnic_rq_alloc()
94 if (!rq->ctrl) { in vnic_rq_alloc()
99 vnic_rq_disable(rq); in vnic_rq_alloc()
101 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size); in vnic_rq_alloc()
105 err = vnic_rq_alloc_bufs(rq); in vnic_rq_alloc()
107 vnic_rq_free(rq); in vnic_rq_alloc()
114 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, in vnic_rq_init() argument
121 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; in vnic_rq_init()
122 writeq(paddr, &rq->ctrl->ring_base); in vnic_rq_init()
123 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size); in vnic_rq_init()
124 iowrite32(cq_index, &rq->ctrl->cq_index); in vnic_rq_init()
125 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); in vnic_rq_init()
126 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); in vnic_rq_init()
127 iowrite32(0, &rq->ctrl->dropped_packet_count); in vnic_rq_init()
128 iowrite32(0, &rq->ctrl->error_status); in vnic_rq_init()
131 fetch_index = ioread32(&rq->ctrl->fetch_index); in vnic_rq_init()
132 rq->to_use = rq->to_clean = in vnic_rq_init()
133 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] in vnic_rq_init()
135 iowrite32(fetch_index, &rq->ctrl->posted_index); in vnic_rq_init()
137 rq->buf_index = 0; in vnic_rq_init()
140 unsigned int vnic_rq_error_status(struct vnic_rq *rq) in vnic_rq_error_status() argument
142 return ioread32(&rq->ctrl->error_status); in vnic_rq_error_status()
145 void vnic_rq_enable(struct vnic_rq *rq) in vnic_rq_enable() argument
147 iowrite32(1, &rq->ctrl->enable); in vnic_rq_enable()
150 int vnic_rq_disable(struct vnic_rq *rq) in vnic_rq_disable() argument
154 iowrite32(0, &rq->ctrl->enable); in vnic_rq_disable()
158 if (!(ioread32(&rq->ctrl->running))) in vnic_rq_disable()
163 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); in vnic_rq_disable()
168 void vnic_rq_clean(struct vnic_rq *rq, in vnic_rq_clean() argument
169 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) in vnic_rq_clean() argument
174 BUG_ON(ioread32(&rq->ctrl->enable)); in vnic_rq_clean()
176 buf = rq->to_clean; in vnic_rq_clean()
178 while (vnic_rq_desc_used(rq) > 0) { in vnic_rq_clean()
180 (*buf_clean)(rq, buf); in vnic_rq_clean()
182 buf = rq->to_clean = buf->next; in vnic_rq_clean()
183 rq->ring.desc_avail++; in vnic_rq_clean()
187 fetch_index = ioread32(&rq->ctrl->fetch_index); in vnic_rq_clean()
188 rq->to_use = rq->to_clean = in vnic_rq_clean()
189 &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] in vnic_rq_clean()
191 iowrite32(fetch_index, &rq->ctrl->posted_index); in vnic_rq_clean()
193 rq->buf_index = 0; in vnic_rq_clean()
195 vnic_dev_clear_desc_ring(&rq->ring); in vnic_rq_clean()