Home
last modified time | relevance | path

Searched refs:cq (Results 1 – 25 of 79) sorted by relevance

1234

/drivers/net/mlx4/
Den_cq.c40 static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event) in mlx4_en_cq_event() argument
47 struct mlx4_en_cq *cq, in mlx4_en_create_cq() argument
53 cq->size = entries; in mlx4_en_create_cq()
55 cq->buf_size = cq->size * sizeof(struct mlx4_cqe); in mlx4_en_create_cq()
56 cq->vector = ring % mdev->dev->caps.num_comp_vectors; in mlx4_en_create_cq()
58 cq->buf_size = sizeof(struct mlx4_cqe); in mlx4_en_create_cq()
59 cq->vector = 0; in mlx4_en_create_cq()
62 cq->ring = ring; in mlx4_en_create_cq()
63 cq->is_tx = mode; in mlx4_en_create_cq()
64 spin_lock_init(&cq->lock); in mlx4_en_create_cq()
[all …]
Dcq.c78 struct mlx4_cq *cq; in mlx4_cq_completion() local
80 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree, in mlx4_cq_completion()
82 if (!cq) { in mlx4_cq_completion()
87 ++cq->arm_sn; in mlx4_cq_completion()
89 cq->comp(cq); in mlx4_cq_completion()
95 struct mlx4_cq *cq; in mlx4_cq_event() local
99 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event()
100 if (cq) in mlx4_cq_event()
101 atomic_inc(&cq->refcount); in mlx4_cq_event()
105 if (!cq) { in mlx4_cq_event()
[all …]
Den_netdev.c329 struct mlx4_en_cq *cq; in mlx4_en_netpoll() local
334 cq = &priv->rx_cq[i]; in mlx4_en_netpoll()
335 spin_lock_irqsave(&cq->lock, flags); in mlx4_en_netpoll()
336 napi_synchronize(&cq->napi); in mlx4_en_netpoll()
337 mlx4_en_process_rx_cq(dev, cq, 0); in mlx4_en_netpoll()
338 spin_unlock_irqrestore(&cq->lock, flags); in mlx4_en_netpoll()
372 struct mlx4_en_cq *cq; in mlx4_en_set_default_moderation() local
389 cq = &priv->rx_cq[i]; in mlx4_en_set_default_moderation()
390 cq->moder_cnt = priv->rx_frames; in mlx4_en_set_default_moderation()
391 cq->moder_time = priv->rx_usecs; in mlx4_en_set_default_moderation()
[all …]
Den_tx.c152 int cq, int srqn) in mlx4_en_activate_tx_ring() argument
157 ring->cqn = cq; in mlx4_en_activate_tx_ring()
328 static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) in mlx4_en_process_tx_cq() argument
331 struct mlx4_cq *mcq = &cq->mcq; in mlx4_en_process_tx_cq()
332 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; in mlx4_en_process_tx_cq()
333 struct mlx4_cqe *cqe = cq->buf; in mlx4_en_process_tx_cq()
407 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_tx_irq() local
408 struct mlx4_en_priv *priv = netdev_priv(cq->dev); in mlx4_en_tx_irq()
409 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; in mlx4_en_tx_irq()
413 mlx4_en_process_tx_cq(cq->dev, cq); in mlx4_en_tx_irq()
[all …]
/drivers/infiniband/hw/mlx4/
Dcq.c40 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) in mlx4_ib_cq_comp() argument
42 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp()
46 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) in mlx4_ib_cq_event() argument
53 "on CQ %06x\n", type, cq->cqn); in mlx4_ib_cq_event()
57 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event()
61 event.element.cq = ibcq; in mlx4_ib_cq_event()
71 static void *get_cqe(struct mlx4_ib_cq *cq, int n) in get_cqe() argument
73 return get_cqe_from_buf(&cq->buf, n); in get_cqe()
76 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) in get_sw_cqe() argument
78 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
[all …]
/drivers/infiniband/hw/ipath/
Dipath_cq.c47 void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited) in ipath_cq_enter() argument
54 spin_lock_irqsave(&cq->lock, flags); in ipath_cq_enter()
60 wc = cq->queue; in ipath_cq_enter()
62 if (head >= (unsigned) cq->ibcq.cqe) { in ipath_cq_enter()
63 head = cq->ibcq.cqe; in ipath_cq_enter()
68 spin_unlock_irqrestore(&cq->lock, flags); in ipath_cq_enter()
69 if (cq->ibcq.event_handler) { in ipath_cq_enter()
72 ev.device = cq->ibcq.device; in ipath_cq_enter()
73 ev.element.cq = &cq->ibcq; in ipath_cq_enter()
75 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in ipath_cq_enter()
[all …]
/drivers/infiniband/hw/mthca/
Dmthca_cq.c168 static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) in get_cqe() argument
170 return get_cqe_from_buf(&cq->buf, entry); in get_cqe()
178 static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) in next_cqe_sw() argument
180 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
203 static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, in update_cons_index() argument
207 *cq->set_ci_db = cpu_to_be32(cq->cons_index); in update_cons_index()
210 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index()
223 struct mthca_cq *cq; in mthca_cq_completion() local
225 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion()
227 if (!cq) { in mthca_cq_completion()
[all …]
Dmthca_provider.c677 struct mthca_cq *cq; in mthca_create_cq() local
701 cq = kmalloc(sizeof *cq, GFP_KERNEL); in mthca_create_cq()
702 if (!cq) { in mthca_create_cq()
708 cq->buf.mr.ibmr.lkey = ucmd.lkey; in mthca_create_cq()
709 cq->set_ci_db_index = ucmd.set_db_index; in mthca_create_cq()
710 cq->arm_db_index = ucmd.arm_db_index; in mthca_create_cq()
719 cq); in mthca_create_cq()
723 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) { in mthca_create_cq()
724 mthca_free_cq(to_mdev(ibdev), cq); in mthca_create_cq()
728 cq->resize_buf = NULL; in mthca_create_cq()
[all …]
/drivers/infiniband/hw/amso1100/
Dc2_cq.c46 struct c2_cq *cq; in c2_cq_get() local
50 cq = c2dev->qptr_array[cqn]; in c2_cq_get()
51 if (!cq) { in c2_cq_get()
55 atomic_inc(&cq->refcount); in c2_cq_get()
57 return cq; in c2_cq_get()
60 static void c2_cq_put(struct c2_cq *cq) in c2_cq_put() argument
62 if (atomic_dec_and_test(&cq->refcount)) in c2_cq_put()
63 wake_up(&cq->wait); in c2_cq_put()
68 struct c2_cq *cq; in c2_cq_event() local
70 cq = c2_cq_get(c2dev, mq_index); in c2_cq_event()
[all …]
/drivers/net/enic/
Dvnic_cq.c28 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument
30 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
32 cq->ctrl = NULL; in vnic_cq_free()
35 int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, in vnic_cq_alloc() argument
40 cq->index = index; in vnic_cq_alloc()
41 cq->vdev = vdev; in vnic_cq_alloc()
43 cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); in vnic_cq_alloc()
44 if (!cq->ctrl) { in vnic_cq_alloc()
49 err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size); in vnic_cq_alloc()
56 void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable, in vnic_cq_init() argument
[all …]
Dvnic_cq.h62 static inline unsigned int vnic_cq_service(struct vnic_cq *cq, in vnic_cq_service() argument
73 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
74 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
78 while (color != cq->last_color) { in vnic_cq_service()
80 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service()
84 cq->to_clean++; in vnic_cq_service()
85 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service()
86 cq->to_clean = 0; in vnic_cq_service()
87 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service()
90 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
[all …]
/drivers/infiniband/hw/cxgb3/
Dcxio_hal.c70 int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, in cxio_hal_cq_op() argument
78 setup.id = cq->cqid; in cxio_hal_cq_op()
91 if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) { in cxio_hal_cq_op()
94 rptr = cq->rptr; in cxio_hal_cq_op()
100 while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret) in cxio_hal_cq_op()
108 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2); in cxio_hal_cq_op()
109 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { in cxio_hal_cq_op()
158 int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) in cxio_create_cq() argument
161 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); in cxio_create_cq()
163 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); in cxio_create_cq()
[all …]
Dcxio_wr.h724 static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) in cxio_next_hw_cqe() argument
728 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); in cxio_next_hw_cqe()
729 if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe)) in cxio_next_hw_cqe()
734 static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq) in cxio_next_sw_cqe() argument
738 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) { in cxio_next_sw_cqe()
739 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); in cxio_next_sw_cqe()
745 static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq) in cxio_next_cqe() argument
749 if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) { in cxio_next_cqe()
750 cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2)); in cxio_next_cqe()
753 cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2)); in cxio_next_cqe()
[all …]
Dcxio_hal.h147 int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
149 int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
150 int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
151 int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
158 int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
179 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
180 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
181 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
182 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
183 void cxio_flush_hw_cq(struct t3_cq *cq);
[all …]
Diwch_provider.c136 remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); in iwch_destroy_cq()
140 cxio_destroy_cq(&chp->rhp->rdev, &chp->cq); in iwch_destroy_cq()
186 chp->cq.size_log2 = ilog2(entries); in iwch_create_cq()
188 if (cxio_create_cq(&rhp->rdev, &chp->cq)) { in iwch_create_cq()
193 chp->ibcq.cqe = 1 << chp->cq.size_log2; in iwch_create_cq()
197 insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid); in iwch_create_cq()
207 uresp.cqid = chp->cq.cqid; in iwch_create_cq()
208 uresp.size_log2 = chp->cq.size_log2; in iwch_create_cq()
219 mm->addr = virt_to_phys(chp->cq.queue); in iwch_create_cq()
225 chp->cq.cqid, chp, (1 << chp->cq.size_log2), in iwch_create_cq()
[all …]
/drivers/infiniband/hw/ehca/
Dehca_cq.c53 int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) in ehca_cq_assign_qp() argument
59 spin_lock_irqsave(&cq->spinlock, flags); in ehca_cq_assign_qp()
60 hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); in ehca_cq_assign_qp()
61 spin_unlock_irqrestore(&cq->spinlock, flags); in ehca_cq_assign_qp()
63 ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x", in ehca_cq_assign_qp()
64 cq->cq_number, qp_num); in ehca_cq_assign_qp()
69 int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num) in ehca_cq_unassign_qp() argument
77 spin_lock_irqsave(&cq->spinlock, flags); in ehca_cq_unassign_qp()
78 hlist_for_each(iter, &cq->qp_hashtab[key]) { in ehca_cq_unassign_qp()
82 ehca_dbg(cq->ib_cq.device, in ehca_cq_unassign_qp()
[all …]
Dehca_irq.c74 static inline void comp_event_callback(struct ehca_cq *cq) in comp_event_callback() argument
76 if (!cq->ib_cq.comp_handler) in comp_event_callback()
79 spin_lock(&cq->cb_lock); in comp_event_callback()
80 cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context); in comp_event_callback()
81 spin_unlock(&cq->cb_lock); in comp_event_callback()
108 struct ehca_cq *cq = (struct ehca_cq *)data; in print_error_data() local
112 cq->cq_number, resource); in print_error_data()
240 struct ehca_cq *cq; in cq_event_callback() local
244 cq = idr_find(&ehca_cq_idr, token); in cq_event_callback()
245 if (cq) in cq_event_callback()
[all …]
Dehca_uverbs.c159 static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq, in ehca_mmap_cq() argument
166 ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number); in ehca_mmap_cq()
167 ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa); in ehca_mmap_cq()
169 ehca_err(cq->ib_cq.device, in ehca_mmap_cq()
171 ret, cq->cq_number); in ehca_mmap_cq()
177 ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number); in ehca_mmap_cq()
178 ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue); in ehca_mmap_cq()
180 ehca_err(cq->ib_cq.device, in ehca_mmap_cq()
182 ret, cq->cq_number); in ehca_mmap_cq()
188 ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x", in ehca_mmap_cq()
[all …]
Dhipz_fns_core.h75 static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes) in hipz_update_feca() argument
77 hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca, in hipz_update_feca()
81 static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value) in hipz_set_cqx_n0() argument
85 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0, in hipz_set_cqx_n0()
88 cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0); in hipz_set_cqx_n0()
91 static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value) in hipz_set_cqx_n1() argument
95 hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1, in hipz_set_cqx_n1()
97 cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1); in hipz_set_cqx_n1()
Dehca_reqs.c631 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc) in ehca_poll_cq_one() argument
634 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); in ehca_poll_cq_one()
647 ehca_dbg(cq->device, "Completion queue is empty " in ehca_poll_cq_one()
663 ehca_err(cq->device, "cq_num=%x qp_num=%x " in ehca_poll_cq_one()
676 ehca_dbg(cq->device, in ehca_poll_cq_one()
696 ehca_dbg(cq->device, in ehca_poll_cq_one()
701 ehca_dbg(cq->device, in ehca_poll_cq_one()
743 ehca_warn(cq->device, "Double cqe on qp_num=%#x", in ehca_poll_cq_one()
766 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x " in ehca_poll_cq_one()
800 static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq, in generate_flush_cqes() argument
[all …]
/drivers/isdn/mISDN/
Ddsp_hwec.c55 struct mISDN_ctrl_req cq; in dsp_hwec_enable() local
97 memset(&cq, 0, sizeof(cq)); in dsp_hwec_enable()
98 cq.op = MISDN_CTRL_HFC_ECHOCAN_ON; in dsp_hwec_enable()
99 cq.p1 = deftaps; in dsp_hwec_enable()
100 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { in dsp_hwec_enable()
109 struct mISDN_ctrl_req cq; in dsp_hwec_disable() local
118 memset(&cq, 0, sizeof(cq)); in dsp_hwec_disable()
119 cq.op = MISDN_CTRL_HFC_ECHOCAN_OFF; in dsp_hwec_disable()
120 if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) { in dsp_hwec_disable()
Ddsp_core.c191 struct mISDN_ctrl_req cq; in dsp_rx_off_member() local
194 memset(&cq, 0, sizeof(cq)); in dsp_rx_off_member()
223 cq.op = MISDN_CTRL_RX_OFF; in dsp_rx_off_member()
224 cq.p1 = rx_off; in dsp_rx_off_member()
225 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { in dsp_rx_off_member()
258 struct mISDN_ctrl_req cq; in dsp_fill_empty() local
260 memset(&cq, 0, sizeof(cq)); in dsp_fill_empty()
268 cq.op = MISDN_CTRL_FILL_EMPTY; in dsp_fill_empty()
269 cq.p1 = 1; in dsp_fill_empty()
270 if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) { in dsp_fill_empty()
[all …]
/drivers/net/ehea/
Dehea_qmr.c123 struct ehea_cq *cq; in ehea_create_cq() local
130 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in ehea_create_cq()
131 if (!cq) { in ehea_create_cq()
136 cq->attr.max_nr_of_cqes = nr_of_cqe; in ehea_create_cq()
137 cq->attr.cq_token = cq_token; in ehea_create_cq()
138 cq->attr.eq_handle = eq_handle; in ehea_create_cq()
140 cq->adapter = adapter; in ehea_create_cq()
142 cq_handle_ref = &cq->fw_handle; in ehea_create_cq()
146 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, in ehea_create_cq()
147 &cq->fw_handle, &cq->epas); in ehea_create_cq()
[all …]
/drivers/infiniband/core/
Dverbs.c613 struct ib_cq *cq; in ib_create_cq() local
615 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL); in ib_create_cq()
617 if (!IS_ERR(cq)) { in ib_create_cq()
618 cq->device = device; in ib_create_cq()
619 cq->uobject = NULL; in ib_create_cq()
620 cq->comp_handler = comp_handler; in ib_create_cq()
621 cq->event_handler = event_handler; in ib_create_cq()
622 cq->cq_context = cq_context; in ib_create_cq()
623 atomic_set(&cq->usecnt, 0); in ib_create_cq()
626 return cq; in ib_create_cq()
[all …]
/drivers/infiniband/ulp/iser/
Diser_verbs.c45 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
70 device->cq = ib_create_cq(device->ib_device, in iser_create_device_ib_res()
75 if (IS_ERR(device->cq)) in iser_create_device_ib_res()
78 if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP)) in iser_create_device_ib_res()
96 ib_destroy_cq(device->cq); in iser_create_device_ib_res()
115 (void)ib_destroy_cq(device->cq); in iser_free_device_ib_res()
119 device->cq = NULL; in iser_free_device_ib_res()
172 init_attr.send_cq = device->cq; in iser_create_ib_conn_res()
173 init_attr.recv_cq = device->cq; in iser_create_ib_conn_res()
787 struct ib_cq *cq = device->cq; in iser_cq_tasklet_fn() local
[all …]

1234