• Home
  • Raw
  • Download

Lines Matching refs:cq

46 	struct c2_cq *cq;  in c2_cq_get()  local
50 cq = c2dev->qptr_array[cqn]; in c2_cq_get()
51 if (!cq) { in c2_cq_get()
55 atomic_inc(&cq->refcount); in c2_cq_get()
57 return cq; in c2_cq_get()
60 static void c2_cq_put(struct c2_cq *cq) in c2_cq_put() argument
62 if (atomic_dec_and_test(&cq->refcount)) in c2_cq_put()
63 wake_up(&cq->wait); in c2_cq_put()
68 struct c2_cq *cq; in c2_cq_event() local
70 cq = c2_cq_get(c2dev, mq_index); in c2_cq_event()
71 if (!cq) { in c2_cq_event()
76 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in c2_cq_event()
77 c2_cq_put(cq); in c2_cq_event()
82 struct c2_cq *cq; in c2_cq_clean() local
85 cq = c2_cq_get(c2dev, mq_index); in c2_cq_clean()
86 if (!cq) in c2_cq_clean()
89 spin_lock_irq(&cq->lock); in c2_cq_clean()
90 q = &cq->mq; in c2_cq_clean()
104 spin_unlock_irq(&cq->lock); in c2_cq_clean()
105 c2_cq_put(cq); in c2_cq_clean()
130 struct c2_cq *cq, struct ib_wc *entry) in c2_poll_one() argument
136 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); in c2_poll_one()
148 c2_mq_free(&cq->mq); in c2_poll_one()
149 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); in c2_poll_one()
194 c2_mq_free(&cq->mq); in c2_poll_one()
202 struct c2_cq *cq = to_c2cq(ibcq); in c2_poll_cq() local
206 spin_lock_irqsave(&cq->lock, flags); in c2_poll_cq()
210 err = c2_poll_one(c2dev, cq, entry + npolled); in c2_poll_cq()
215 spin_unlock_irqrestore(&cq->lock, flags); in c2_poll_cq()
223 struct c2_cq *cq; in c2_arm_cq() local
227 cq = to_c2cq(ibcq); in c2_arm_cq()
228 shared = cq->mq.peer; in c2_arm_cq()
247 spin_lock_irqsave(&cq->lock, flags); in c2_arm_cq()
248 ret = !c2_mq_empty(&cq->mq); in c2_arm_cq()
249 spin_unlock_irqrestore(&cq->lock, flags); in c2_arm_cq()
285 struct c2_ucontext *ctx, struct c2_cq *cq) in c2_init_cq() argument
295 cq->ibcq.cqe = entries - 1; in c2_init_cq()
296 cq->is_kernel = !ctx; in c2_init_cq()
299 cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool, in c2_init_cq()
300 &cq->mq.shared_dma, GFP_KERNEL); in c2_init_cq()
301 if (!cq->mq.shared) in c2_init_cq()
305 err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE); in c2_init_cq()
319 wr.msg_size = cpu_to_be32(cq->mq.msg_size); in c2_init_cq()
320 wr.depth = cpu_to_be32(cq->mq.q_size); in c2_init_cq()
321 wr.shared_ht = cpu_to_be64(cq->mq.shared_dma); in c2_init_cq()
322 wr.msg_pool = cpu_to_be64(cq->mq.host_dma); in c2_init_cq()
323 wr.user_context = (u64) (unsigned long) (cq); in c2_init_cq()
346 cq->adapter_handle = reply->cq_handle; in c2_init_cq()
347 cq->mq.index = be32_to_cpu(reply->mq_index); in c2_init_cq()
350 cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE); in c2_init_cq()
351 if (!cq->mq.peer) { in c2_init_cq()
359 spin_lock_init(&cq->lock); in c2_init_cq()
360 atomic_set(&cq->refcount, 1); in c2_init_cq()
361 init_waitqueue_head(&cq->wait); in c2_init_cq()
367 cq->cqn = cq->mq.index; in c2_init_cq()
368 c2dev->qptr_array[cq->cqn] = cq; in c2_init_cq()
377 c2_free_cq_buf(c2dev, &cq->mq); in c2_init_cq()
379 c2_free_mqsp(cq->mq.shared); in c2_init_cq()
384 void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq) in c2_free_cq() argument
395 c2dev->qptr_array[cq->mq.index] = NULL; in c2_free_cq()
396 atomic_dec(&cq->refcount); in c2_free_cq()
399 wait_event(cq->wait, !atomic_read(&cq->refcount)); in c2_free_cq()
410 wr.cq_handle = cq->adapter_handle; in c2_free_cq()
430 if (cq->is_kernel) { in c2_free_cq()
431 c2_free_cq_buf(c2dev, &cq->mq); in c2_free_cq()