Home
last modified time | relevance | path

Searched refs:cq (Results 1 – 12 of 12) sorted by relevance

/net/9p/
Dtrans_rdma.c82 struct ib_cq *cq; member
293 recv_done(struct ib_cq *cq, struct ib_wc *wc) in recv_done() argument
295 struct p9_client *client = cq->cq_context; in recv_done()
344 send_done(struct ib_cq *cq, struct ib_wc *wc) in send_done() argument
346 struct p9_client *client = cq->cq_context; in send_done()
376 if (rdma->cq && !IS_ERR(rdma->cq)) in rdma_destroy_trans()
377 ib_free_cq(rdma->cq); in rdma_destroy_trans()
700 rdma->cq = ib_alloc_cq_any(rdma->cm_id->device, client, in rdma_create_trans()
703 if (IS_ERR(rdma->cq)) in rdma_create_trans()
721 qp_attr.send_cq = rdma->cq; in rdma_create_trans()
[all …]
/net/sunrpc/
Dcache.c955 struct cache_queue *cq; in cache_poll() local
967 for (cq= &rp->q; &cq->list != &cd->queue; in cache_poll()
968 cq = list_entry(cq->list.next, struct cache_queue, list)) in cache_poll()
969 if (!cq->reader) { in cache_poll()
983 struct cache_queue *cq; in cache_ioctl() local
993 for (cq= &rp->q; &cq->list != &cd->queue; in cache_ioctl()
994 cq = list_entry(cq->list.next, struct cache_queue, list)) in cache_ioctl()
995 if (!cq->reader) { in cache_ioctl()
997 container_of(cq, struct cache_request, q); in cache_ioctl()
1041 struct cache_queue *cq; in cache_release() local
[all …]
/net/sunrpc/xprtrdma/
Dfrwr_ops.c366 static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_fastreg() argument
374 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_fastreg()
461 static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv() argument
470 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv()
480 static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv_wake() argument
490 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv_wake()
576 static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) in frwr_wc_localinv_done() argument
592 rpcrdma_flush_disconnect(cq->cq_context, wc); in frwr_wc_localinv_done()
Dsvc_rdma_recvfrom.c109 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
321 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_receive() argument
323 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_receive()
Dsvc_rdma_rw.c17 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc);
18 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc);
261 static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_write_done() argument
318 static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_read_done() argument
Dsvc_rdma_sendto.c114 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
276 static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in svc_rdma_wc_send() argument
278 struct svcxprt_rdma *rdma = cq->cq_context; in svc_rdma_wc_send()
Dverbs.c152 static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_send() argument
157 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_send()
171 static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) in rpcrdma_wc_receive() argument
176 struct rpcrdma_xprt *r_xprt = cq->cq_context; in rpcrdma_wc_receive()
/net/xdp/
Dxsk_buff_pool.c78 pool->cq = xs->cq_tmp; in xp_create_and_assign_umem()
201 if (!pool->fq || !pool->cq) in xp_assign_dev_shared()
236 if (pool->cq) { in xp_release_deferred()
237 xskq_destroy(pool->cq); in xp_release_deferred()
238 pool->cq = NULL; in xp_release_deferred()
Dxsk_diag.c73 if (!err && pool && pool->cq) in xsk_diag_put_umem()
74 err = xsk_diag_put_ring(pool->cq, in xsk_diag_put_umem()
Dxsk.c313 xskq_prod_submit_n(pool->cq, nb_entries); in xsk_tx_completed()
347 if (xskq_prod_reserve_addr(pool->cq, desc->addr)) in xsk_tx_peek_desc()
404 nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts); in xsk_tx_peek_release_desc_batch()
432 xskq_prod_submit_addr(xs->pool->cq, addr); in xsk_destruct_skb()
562 if (xskq_prod_reserve(xs->pool->cq)) { in xsk_generic_xmit()
572 xskq_prod_cancel(xs->pool->cq); in xsk_generic_xmit()
582 xskq_prod_cancel(xs->pool->cq); in xsk_generic_xmit()
/net/rds/
Dib_cm.c247 static void rds_ib_cq_comp_handler_recv(struct ib_cq *cq, void *context) in rds_ib_cq_comp_handler_recv() argument
252 rdsdebug("conn %p cq %p\n", conn, cq); in rds_ib_cq_comp_handler_recv()
259 static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq, in poll_scq() argument
265 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { in poll_scq()
303 static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, in poll_rcq() argument
310 while ((nr = ib_poll_cq(cq, RDS_IB_WC_MAX, wcs)) > 0) { in poll_rcq()
375 static void rds_ib_cq_comp_handler_send(struct ib_cq *cq, void *context) in rds_ib_cq_comp_handler_send() argument
380 rdsdebug("conn %p cq %p\n", conn, cq); in rds_ib_cq_comp_handler_send()
/net/ipv4/
Dipconfig.c1399 char *cp, *cq; in root_nfs_parse_addr() local
1401 cp = cq = name; in root_nfs_parse_addr()
1405 if (cp == cq || cp - cq > 3) in root_nfs_parse_addr()
1411 cq = cp; in root_nfs_parse_addr()