• Home
  • Raw
  • Download

Lines Matching refs:cq

62 void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)  in rvt_cq_enter()  argument
69 spin_lock_irqsave(&cq->lock, flags); in rvt_cq_enter()
75 wc = cq->queue; in rvt_cq_enter()
77 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter()
78 head = cq->ibcq.cqe; in rvt_cq_enter()
85 spin_unlock_irqrestore(&cq->lock, flags); in rvt_cq_enter()
86 if (cq->ibcq.event_handler) { in rvt_cq_enter()
89 ev.device = cq->ibcq.device; in rvt_cq_enter()
90 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
92 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
96 if (cq->ip) { in rvt_cq_enter()
119 if (cq->notify == IB_CQ_NEXT_COMP || in rvt_cq_enter()
120 (cq->notify == IB_CQ_SOLICITED && in rvt_cq_enter()
128 worker = cq->rdi->worker; in rvt_cq_enter()
130 cq->notify = RVT_CQ_NONE; in rvt_cq_enter()
131 cq->triggered++; in rvt_cq_enter()
132 kthread_queue_work(worker, &cq->comptask); in rvt_cq_enter()
136 spin_unlock_irqrestore(&cq->lock, flags); in rvt_cq_enter()
142 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); in send_complete() local
152 u8 triggered = cq->triggered; in send_complete()
161 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
164 if (cq->triggered == triggered) in send_complete()
187 struct rvt_cq *cq; in rvt_create_cq() local
200 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node); in rvt_create_cq()
201 if (!cq) in rvt_create_cq()
231 cq->ip = rvt_create_mmap_info(rdi, sz, context, wc); in rvt_create_cq()
232 if (!cq->ip) { in rvt_create_cq()
237 err = ib_copy_to_udata(udata, &cq->ip->offset, in rvt_create_cq()
238 sizeof(cq->ip->offset)); in rvt_create_cq()
255 if (cq->ip) { in rvt_create_cq()
257 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps); in rvt_create_cq()
266 cq->rdi = rdi; in rvt_create_cq()
267 cq->ibcq.cqe = entries; in rvt_create_cq()
268 cq->notify = RVT_CQ_NONE; in rvt_create_cq()
269 spin_lock_init(&cq->lock); in rvt_create_cq()
270 kthread_init_work(&cq->comptask, send_complete); in rvt_create_cq()
271 cq->queue = wc; in rvt_create_cq()
273 ret = &cq->ibcq; in rvt_create_cq()
278 kfree(cq->ip); in rvt_create_cq()
282 kfree(cq); in rvt_create_cq()
297 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_destroy_cq() local
298 struct rvt_dev_info *rdi = cq->rdi; in rvt_destroy_cq()
300 kthread_flush_work(&cq->comptask); in rvt_destroy_cq()
304 if (cq->ip) in rvt_destroy_cq()
305 kref_put(&cq->ip->ref, rvt_release_mmap_info); in rvt_destroy_cq()
307 vfree(cq->queue); in rvt_destroy_cq()
308 kfree(cq); in rvt_destroy_cq()
325 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_req_notify_cq() local
329 spin_lock_irqsave(&cq->lock, flags); in rvt_req_notify_cq()
334 if (cq->notify != IB_CQ_NEXT_COMP) in rvt_req_notify_cq()
335 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; in rvt_req_notify_cq()
338 cq->queue->head != cq->queue->tail) in rvt_req_notify_cq()
341 spin_unlock_irqrestore(&cq->lock, flags); in rvt_req_notify_cq()
354 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_resize_cq() local
360 struct rvt_dev_info *rdi = cq->rdi; in rvt_resize_cq()
388 spin_lock_irq(&cq->lock); in rvt_resize_cq()
393 old_wc = cq->queue; in rvt_resize_cq()
395 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq()
396 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
398 if (tail > (u32)cq->ibcq.cqe) in rvt_resize_cq()
399 tail = (u32)cq->ibcq.cqe; in rvt_resize_cq()
401 n = cq->ibcq.cqe + 1 + head - tail; in rvt_resize_cq()
409 if (cq->ip) in rvt_resize_cq()
413 if (tail == (u32)cq->ibcq.cqe) in rvt_resize_cq()
418 cq->ibcq.cqe = cqe; in rvt_resize_cq()
421 cq->queue = wc; in rvt_resize_cq()
422 spin_unlock_irq(&cq->lock); in rvt_resize_cq()
426 if (cq->ip) { in rvt_resize_cq()
427 struct rvt_mmap_info *ip = cq->ip; in rvt_resize_cq()
451 spin_unlock_irq(&cq->lock); in rvt_resize_cq()
470 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_poll_cq() local
477 if (cq->ip) in rvt_poll_cq()
480 spin_lock_irqsave(&cq->lock, flags); in rvt_poll_cq()
482 wc = cq->queue; in rvt_poll_cq()
484 if (tail > (u32)cq->ibcq.cqe) in rvt_poll_cq()
485 tail = (u32)cq->ibcq.cqe; in rvt_poll_cq()
491 if (tail >= cq->ibcq.cqe) in rvt_poll_cq()
498 spin_unlock_irqrestore(&cq->lock, flags); in rvt_poll_cq()