• Home
  • Raw
  • Download

Lines Matching refs:qp

196 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)  in is_sqp()  argument
198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
202 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) in is_qp0() argument
204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
208 static void *get_recv_wqe(struct mthca_qp *qp, int n) in get_recv_wqe() argument
210 if (qp->is_direct) in get_recv_wqe()
211 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe()
213 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
214 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); in get_recv_wqe()
217 static void *get_send_wqe(struct mthca_qp *qp, int n) in get_send_wqe() argument
219 if (qp->is_direct) in get_send_wqe()
220 return qp->queue.direct.buf + qp->send_wqe_offset + in get_send_wqe()
221 (n << qp->sq.wqe_shift); in get_send_wqe()
223 return qp->queue.page_list[(qp->send_wqe_offset + in get_send_wqe()
224 (n << qp->sq.wqe_shift)) >> in get_send_wqe()
226 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & in get_send_wqe()
241 struct mthca_qp *qp; in mthca_qp_event() local
245 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event()
246 if (qp) in mthca_qp_event()
247 ++qp->refcount; in mthca_qp_event()
250 if (!qp) { in mthca_qp_event()
257 qp->port = qp->alt_port; in mthca_qp_event()
261 event.element.qp = &qp->ibqp; in mthca_qp_event()
262 if (qp->ibqp.event_handler) in mthca_qp_event()
263 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); in mthca_qp_event()
266 if (!--qp->refcount) in mthca_qp_event()
267 wake_up(&qp->wait); in mthca_qp_event()
328 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, in get_hw_access_flags() argument
338 dest_rd_atomic = qp->resp_depth; in get_hw_access_flags()
343 access_flags = qp->atomic_rd_en; in get_hw_access_flags()
434 struct mthca_qp *qp = to_mqp(ibqp); in mthca_query_qp() local
441 mutex_lock(&qp->mutex); in mthca_query_qp()
443 if (qp->state == IB_QPS_RESET) { in mthca_query_qp()
454 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); in mthca_query_qp()
464 qp->state = to_ib_qp_state(mthca_state); in mthca_query_qp()
465 qp_attr->qp_state = qp->state; in mthca_query_qp()
476 if (qp->transport == RC || qp->transport == UC) { in mthca_query_qp()
505 qp_attr->cap.max_send_wr = qp->sq.max; in mthca_query_qp()
506 qp_attr->cap.max_recv_wr = qp->rq.max; in mthca_query_qp()
507 qp_attr->cap.max_send_sge = qp->sq.max_gs; in mthca_query_qp()
508 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mthca_query_qp()
509 qp_attr->cap.max_inline_data = qp->max_inline_data; in mthca_query_qp()
512 qp_init_attr->sq_sig_type = qp->sq_policy; in mthca_query_qp()
518 mutex_unlock(&qp->mutex); in mthca_query_qp()
563 struct mthca_qp *qp = to_mqp(ibqp); in __mthca_modify_qp() local
582 (to_mthca_st(qp->transport) << 16)); in __mthca_modify_qp()
603 if (qp->transport == MLX || qp->transport == UD) in __mthca_modify_qp()
615 if (qp->rq.max) in __mthca_modify_qp()
616 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; in __mthca_modify_qp()
617 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mthca_modify_qp()
619 if (qp->sq.max) in __mthca_modify_qp()
620 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; in __mthca_modify_qp()
621 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; in __mthca_modify_qp()
626 if (qp->ibqp.uobject) in __mthca_modify_qp()
630 qp_context->local_qpn = cpu_to_be32(qp->qpn); in __mthca_modify_qp()
635 if (qp->transport == MLX) in __mthca_modify_qp()
637 cpu_to_be32(qp->port << 24); in __mthca_modify_qp()
661 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) in __mthca_modify_qp()
711 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); in __mthca_modify_qp()
715 if (qp->sq_policy == IB_SIGNAL_ALL_WR) in __mthca_modify_qp()
738 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); in __mthca_modify_qp()
739 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); in __mthca_modify_qp()
751 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); in __mthca_modify_qp()
771 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << in __mthca_modify_qp()
777 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); in __mthca_modify_qp()
793 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, in __mthca_modify_qp()
801 qp->state = new_state; in __mthca_modify_qp()
803 qp->atomic_rd_en = attr->qp_access_flags; in __mthca_modify_qp()
805 qp->resp_depth = attr->max_dest_rd_atomic; in __mthca_modify_qp()
807 qp->port = attr->port_num; in __mthca_modify_qp()
809 qp->alt_port = attr->alt_port_num; in __mthca_modify_qp()
811 if (is_sqp(dev, qp)) in __mthca_modify_qp()
812 store_attrs(qp->sqp, attr, attr_mask); in __mthca_modify_qp()
818 if (is_qp0(dev, qp)) { in __mthca_modify_qp()
821 init_port(dev, qp->port); in __mthca_modify_qp()
827 mthca_CLOSE_IB(dev, qp->port); in __mthca_modify_qp()
834 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { in __mthca_modify_qp()
835 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, in __mthca_modify_qp()
836 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); in __mthca_modify_qp()
837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp()
838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp()
840 mthca_wq_reset(&qp->sq); in __mthca_modify_qp()
841 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); in __mthca_modify_qp()
843 mthca_wq_reset(&qp->rq); in __mthca_modify_qp()
844 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); in __mthca_modify_qp()
847 *qp->sq.db = 0; in __mthca_modify_qp()
848 *qp->rq.db = 0; in __mthca_modify_qp()
862 struct mthca_qp *qp = to_mqp(ibqp); in mthca_modify_qp() local
866 mutex_lock(&qp->mutex); in mthca_modify_qp()
870 spin_lock_irq(&qp->sq.lock); in mthca_modify_qp()
871 spin_lock(&qp->rq.lock); in mthca_modify_qp()
872 cur_state = qp->state; in mthca_modify_qp()
873 spin_unlock(&qp->rq.lock); in mthca_modify_qp()
874 spin_unlock_irq(&qp->sq.lock); in mthca_modify_qp()
883 qp->transport, cur_state, new_state, in mthca_modify_qp()
924 mutex_unlock(&qp->mutex); in mthca_modify_qp()
928 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) in mthca_max_data_size() argument
936 switch (qp->transport) { in mthca_max_data_size()
964 struct mthca_qp *qp) in mthca_adjust_qp_caps() argument
966 int max_data_size = mthca_max_data_size(dev, qp, in mthca_adjust_qp_caps()
968 1 << qp->sq.wqe_shift)); in mthca_adjust_qp_caps()
970 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); in mthca_adjust_qp_caps()
972 qp->sq.max_gs = min_t(int, dev->limits.max_sg, in mthca_adjust_qp_caps()
974 qp->rq.max_gs = min_t(int, dev->limits.max_sg, in mthca_adjust_qp_caps()
975 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - in mthca_adjust_qp_caps()
989 struct mthca_qp *qp, in mthca_alloc_wqe_buf() argument
996 qp->rq.max_gs * sizeof (struct mthca_data_seg); in mthca_alloc_wqe_buf()
1001 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; in mthca_alloc_wqe_buf()
1002 qp->rq.wqe_shift++) in mthca_alloc_wqe_buf()
1005 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); in mthca_alloc_wqe_buf()
1006 switch (qp->transport) { in mthca_alloc_wqe_buf()
1045 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; in mthca_alloc_wqe_buf()
1046 qp->sq.wqe_shift++) in mthca_alloc_wqe_buf()
1049 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, in mthca_alloc_wqe_buf()
1050 1 << qp->sq.wqe_shift); in mthca_alloc_wqe_buf()
1060 size = PAGE_ALIGN(qp->send_wqe_offset + in mthca_alloc_wqe_buf()
1061 (qp->sq.max << qp->sq.wqe_shift)); in mthca_alloc_wqe_buf()
1063 qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64), in mthca_alloc_wqe_buf()
1065 if (!qp->wrid) in mthca_alloc_wqe_buf()
1069 &qp->queue, &qp->is_direct, pd, 0, &qp->mr); in mthca_alloc_wqe_buf()
1076 kfree(qp->wrid); in mthca_alloc_wqe_buf()
1081 struct mthca_qp *qp) in mthca_free_wqe_buf() argument
1083 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + in mthca_free_wqe_buf()
1084 (qp->sq.max << qp->sq.wqe_shift)), in mthca_free_wqe_buf()
1085 &qp->queue, qp->is_direct, &qp->mr); in mthca_free_wqe_buf()
1086 kfree(qp->wrid); in mthca_free_wqe_buf()
1090 struct mthca_qp *qp) in mthca_map_memfree() argument
1095 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); in mthca_map_memfree()
1099 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); in mthca_map_memfree()
1104 qp->qpn << dev->qp_table.rdb_shift); in mthca_map_memfree()
1113 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); in mthca_map_memfree()
1116 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); in mthca_map_memfree()
1122 struct mthca_qp *qp) in mthca_unmap_memfree() argument
1125 qp->qpn << dev->qp_table.rdb_shift); in mthca_unmap_memfree()
1126 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); in mthca_unmap_memfree()
1127 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); in mthca_unmap_memfree()
1131 struct mthca_qp *qp) in mthca_alloc_memfree() argument
1134 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, in mthca_alloc_memfree()
1135 qp->qpn, &qp->rq.db); in mthca_alloc_memfree()
1136 if (qp->rq.db_index < 0) in mthca_alloc_memfree()
1139 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, in mthca_alloc_memfree()
1140 qp->qpn, &qp->sq.db); in mthca_alloc_memfree()
1141 if (qp->sq.db_index < 0) { in mthca_alloc_memfree()
1142 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); in mthca_alloc_memfree()
1151 struct mthca_qp *qp) in mthca_free_memfree() argument
1154 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); in mthca_free_memfree()
1155 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); in mthca_free_memfree()
1164 struct mthca_qp *qp, in mthca_alloc_qp_common() argument
1171 qp->refcount = 1; in mthca_alloc_qp_common()
1172 init_waitqueue_head(&qp->wait); in mthca_alloc_qp_common()
1173 mutex_init(&qp->mutex); in mthca_alloc_qp_common()
1174 qp->state = IB_QPS_RESET; in mthca_alloc_qp_common()
1175 qp->atomic_rd_en = 0; in mthca_alloc_qp_common()
1176 qp->resp_depth = 0; in mthca_alloc_qp_common()
1177 qp->sq_policy = send_policy; in mthca_alloc_qp_common()
1178 mthca_wq_reset(&qp->sq); in mthca_alloc_qp_common()
1179 mthca_wq_reset(&qp->rq); in mthca_alloc_qp_common()
1181 spin_lock_init(&qp->sq.lock); in mthca_alloc_qp_common()
1182 spin_lock_init(&qp->rq.lock); in mthca_alloc_qp_common()
1184 ret = mthca_map_memfree(dev, qp); in mthca_alloc_qp_common()
1188 ret = mthca_alloc_wqe_buf(dev, pd, qp, udata); in mthca_alloc_qp_common()
1190 mthca_unmap_memfree(dev, qp); in mthca_alloc_qp_common()
1194 mthca_adjust_qp_caps(dev, pd, qp); in mthca_alloc_qp_common()
1204 ret = mthca_alloc_memfree(dev, qp); in mthca_alloc_qp_common()
1206 mthca_free_wqe_buf(dev, qp); in mthca_alloc_qp_common()
1207 mthca_unmap_memfree(dev, qp); in mthca_alloc_qp_common()
1214 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; in mthca_alloc_qp_common()
1216 for (i = 0; i < qp->rq.max; ++i) { in mthca_alloc_qp_common()
1217 next = get_recv_wqe(qp, i); in mthca_alloc_qp_common()
1218 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << in mthca_alloc_qp_common()
1219 qp->rq.wqe_shift); in mthca_alloc_qp_common()
1223 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); in mthca_alloc_qp_common()
1228 for (i = 0; i < qp->sq.max; ++i) { in mthca_alloc_qp_common()
1229 next = get_send_wqe(qp, i); in mthca_alloc_qp_common()
1230 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << in mthca_alloc_qp_common()
1231 qp->sq.wqe_shift) + in mthca_alloc_qp_common()
1232 qp->send_wqe_offset); in mthca_alloc_qp_common()
1235 for (i = 0; i < qp->rq.max; ++i) { in mthca_alloc_qp_common()
1236 next = get_recv_wqe(qp, i); in mthca_alloc_qp_common()
1237 next->nda_op = htonl((((i + 1) % qp->rq.max) << in mthca_alloc_qp_common()
1238 qp->rq.wqe_shift) | 1); in mthca_alloc_qp_common()
1243 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); in mthca_alloc_qp_common()
1244 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); in mthca_alloc_qp_common()
1250 struct mthca_pd *pd, struct mthca_qp *qp) in mthca_set_qp_size() argument
1252 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); in mthca_set_qp_size()
1266 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) in mthca_set_qp_size()
1270 qp->rq.max = cap->max_recv_wr ? in mthca_set_qp_size()
1272 qp->sq.max = cap->max_send_wr ? in mthca_set_qp_size()
1275 qp->rq.max = cap->max_recv_wr; in mthca_set_qp_size()
1276 qp->sq.max = cap->max_send_wr; in mthca_set_qp_size()
1279 qp->rq.max_gs = cap->max_recv_sge; in mthca_set_qp_size()
1280 qp->sq.max_gs = max_t(int, cap->max_send_sge, in mthca_set_qp_size()
1295 struct mthca_qp *qp, in mthca_alloc_qp() argument
1301 case IB_QPT_RC: qp->transport = RC; break; in mthca_alloc_qp()
1302 case IB_QPT_UC: qp->transport = UC; break; in mthca_alloc_qp()
1303 case IB_QPT_UD: qp->transport = UD; break; in mthca_alloc_qp()
1307 err = mthca_set_qp_size(dev, cap, pd, qp); in mthca_alloc_qp()
1311 qp->qpn = mthca_alloc(&dev->qp_table.alloc); in mthca_alloc_qp()
1312 if (qp->qpn == -1) in mthca_alloc_qp()
1316 qp->port = 0; in mthca_alloc_qp()
1319 send_policy, qp, udata); in mthca_alloc_qp()
1321 mthca_free(&dev->qp_table.alloc, qp->qpn); in mthca_alloc_qp()
1326 mthca_array_set(&dev->qp_table.qp, in mthca_alloc_qp()
1327 qp->qpn & (dev->limits.num_qps - 1), qp); in mthca_alloc_qp()
1371 struct mthca_qp *qp, in mthca_alloc_sqp() argument
1377 qp->transport = MLX; in mthca_alloc_sqp()
1378 err = mthca_set_qp_size(dev, cap, pd, qp); in mthca_alloc_sqp()
1382 qp->sqp->header_buf_size = qp->sq.max * MTHCA_UD_HEADER_SIZE; in mthca_alloc_sqp()
1383 qp->sqp->header_buf = in mthca_alloc_sqp()
1384 dma_alloc_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, in mthca_alloc_sqp()
1385 &qp->sqp->header_dma, GFP_KERNEL); in mthca_alloc_sqp()
1386 if (!qp->sqp->header_buf) in mthca_alloc_sqp()
1390 if (mthca_array_get(&dev->qp_table.qp, mqpn)) in mthca_alloc_sqp()
1393 mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp); in mthca_alloc_sqp()
1399 qp->port = port; in mthca_alloc_sqp()
1400 qp->qpn = mqpn; in mthca_alloc_sqp()
1401 qp->transport = MLX; in mthca_alloc_sqp()
1404 send_policy, qp, udata); in mthca_alloc_sqp()
1420 mthca_array_clear(&dev->qp_table.qp, mqpn); in mthca_alloc_sqp()
1426 dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, in mthca_alloc_sqp()
1427 qp->sqp->header_buf, qp->sqp->header_dma); in mthca_alloc_sqp()
1431 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) in get_qp_refcount() argument
1436 c = qp->refcount; in get_qp_refcount()
1443 struct mthca_qp *qp) in mthca_free_qp() argument
1448 send_cq = to_mcq(qp->ibqp.send_cq); in mthca_free_qp()
1449 recv_cq = to_mcq(qp->ibqp.recv_cq); in mthca_free_qp()
1458 mthca_array_clear(&dev->qp_table.qp, in mthca_free_qp()
1459 qp->qpn & (dev->limits.num_qps - 1)); in mthca_free_qp()
1460 --qp->refcount; in mthca_free_qp()
1465 wait_event(qp->wait, !get_qp_refcount(dev, qp)); in mthca_free_qp()
1467 if (qp->state != IB_QPS_RESET) in mthca_free_qp()
1468 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, in mthca_free_qp()
1476 if (!qp->ibqp.uobject) { in mthca_free_qp()
1477 mthca_cq_clean(dev, recv_cq, qp->qpn, in mthca_free_qp()
1478 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); in mthca_free_qp()
1480 mthca_cq_clean(dev, send_cq, qp->qpn, NULL); in mthca_free_qp()
1482 mthca_free_memfree(dev, qp); in mthca_free_qp()
1483 mthca_free_wqe_buf(dev, qp); in mthca_free_qp()
1486 mthca_unmap_memfree(dev, qp); in mthca_free_qp()
1488 if (is_sqp(dev, qp)) { in mthca_free_qp()
1489 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); in mthca_free_qp()
1490 dma_free_coherent(&dev->pdev->dev, qp->sqp->header_buf_size, in mthca_free_qp()
1491 qp->sqp->header_buf, qp->sqp->header_dma); in mthca_free_qp()
1493 mthca_free(&dev->qp_table.alloc, qp->qpn); in mthca_free_qp()
1497 static int build_mlx_header(struct mthca_dev *dev, struct mthca_qp *qp, int ind, in build_mlx_header() argument
1502 struct mthca_sqp *sqp = qp->sqp; in build_mlx_header()
1515 mlx->flags |= cpu_to_be32((!qp->ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | in build_mlx_header()
1536 sqp->ud_header.lrh.virtual_lane = !qp->ibqp.qp_num ? 15 : 0; in build_mlx_header()
1540 if (!qp->ibqp.qp_num) in build_mlx_header()
1541 ib_get_cached_pkey(&dev->ib_dev, qp->port, sqp->pkey_index, in build_mlx_header()
1544 ib_get_cached_pkey(&dev->ib_dev, qp->port, wr->pkey_index, in build_mlx_header()
1551 sqp->ud_header.deth.source_qpn = cpu_to_be32(qp->ibqp.qp_num); in build_mlx_header()
1558 data->lkey = cpu_to_be32(to_mpd(qp->ibqp.pd)->ntmr.ibmr.lkey); in build_mlx_header()
1626 struct mthca_qp *qp = to_mqp(ibqp); in mthca_tavor_post_send() local
1646 spin_lock_irqsave(&qp->sq.lock, flags); in mthca_tavor_post_send()
1650 ind = qp->sq.next_ind; in mthca_tavor_post_send()
1653 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send()
1655 " %d max, %d nreq)\n", qp->qpn, in mthca_tavor_post_send()
1656 qp->sq.head, qp->sq.tail, in mthca_tavor_post_send()
1657 qp->sq.max, nreq); in mthca_tavor_post_send()
1663 wqe = get_send_wqe(qp, ind); in mthca_tavor_post_send()
1664 prev_wqe = qp->sq.last; in mthca_tavor_post_send()
1665 qp->sq.last = wqe; in mthca_tavor_post_send()
1682 switch (qp->transport) { in mthca_tavor_post_send()
1738 dev, qp, ind, ud_wr(wr), in mthca_tavor_post_send()
1749 if (wr->num_sge > qp->sq.max_gs) { in mthca_tavor_post_send()
1763 if (qp->transport == MLX) { in mthca_tavor_post_send()
1771 qp->wrid[ind + qp->rq.max] = wr->wr_id; in mthca_tavor_post_send()
1781 cpu_to_be32(((ind << qp->sq.wqe_shift) + in mthca_tavor_post_send()
1782 qp->send_wqe_offset) | in mthca_tavor_post_send()
1798 if (unlikely(ind >= qp->sq.max)) in mthca_tavor_post_send()
1799 ind -= qp->sq.max; in mthca_tavor_post_send()
1806 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + in mthca_tavor_post_send()
1807 qp->send_wqe_offset) | f0 | op0, in mthca_tavor_post_send()
1808 (qp->qpn << 8) | size0, in mthca_tavor_post_send()
1813 qp->sq.next_ind = ind; in mthca_tavor_post_send()
1814 qp->sq.head += nreq; in mthca_tavor_post_send()
1816 spin_unlock_irqrestore(&qp->sq.lock, flags); in mthca_tavor_post_send()
1824 struct mthca_qp *qp = to_mqp(ibqp); in mthca_tavor_post_receive() local
1842 spin_lock_irqsave(&qp->rq.lock, flags); in mthca_tavor_post_receive()
1846 ind = qp->rq.next_ind; in mthca_tavor_post_receive()
1849 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mthca_tavor_post_receive()
1851 " %d max, %d nreq)\n", qp->qpn, in mthca_tavor_post_receive()
1852 qp->rq.head, qp->rq.tail, in mthca_tavor_post_receive()
1853 qp->rq.max, nreq); in mthca_tavor_post_receive()
1859 wqe = get_recv_wqe(qp, ind); in mthca_tavor_post_receive()
1860 prev_wqe = qp->rq.last; in mthca_tavor_post_receive()
1861 qp->rq.last = wqe; in mthca_tavor_post_receive()
1870 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mthca_tavor_post_receive()
1882 qp->wrid[ind] = wr->wr_id; in mthca_tavor_post_receive()
1891 if (unlikely(ind >= qp->rq.max)) in mthca_tavor_post_receive()
1892 ind -= qp->rq.max; in mthca_tavor_post_receive()
1900 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, in mthca_tavor_post_receive()
1901 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, in mthca_tavor_post_receive()
1904 qp->rq.next_ind = ind; in mthca_tavor_post_receive()
1905 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; in mthca_tavor_post_receive()
1913 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, in mthca_tavor_post_receive()
1914 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, in mthca_tavor_post_receive()
1918 qp->rq.next_ind = ind; in mthca_tavor_post_receive()
1919 qp->rq.head += nreq; in mthca_tavor_post_receive()
1921 spin_unlock_irqrestore(&qp->rq.lock, flags); in mthca_tavor_post_receive()
1929 struct mthca_qp *qp = to_mqp(ibqp); in mthca_arbel_post_send() local
1950 spin_lock_irqsave(&qp->sq.lock, flags); in mthca_arbel_post_send()
1954 ind = qp->sq.head & (qp->sq.max - 1); in mthca_arbel_post_send()
1961 ((qp->sq.head & 0xffff) << 8) | f0 | op0; in mthca_arbel_post_send()
1963 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; in mthca_arbel_post_send()
1970 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); in mthca_arbel_post_send()
1978 mthca_write64(dbhi, (qp->qpn << 8) | size0, in mthca_arbel_post_send()
1983 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_arbel_post_send()
1985 " %d max, %d nreq)\n", qp->qpn, in mthca_arbel_post_send()
1986 qp->sq.head, qp->sq.tail, in mthca_arbel_post_send()
1987 qp->sq.max, nreq); in mthca_arbel_post_send()
1993 wqe = get_send_wqe(qp, ind); in mthca_arbel_post_send()
1994 prev_wqe = qp->sq.last; in mthca_arbel_post_send()
1995 qp->sq.last = wqe; in mthca_arbel_post_send()
2012 switch (qp->transport) { in mthca_arbel_post_send()
2068 dev, qp, ind, ud_wr(wr), in mthca_arbel_post_send()
2079 if (wr->num_sge > qp->sq.max_gs) { in mthca_arbel_post_send()
2093 if (qp->transport == MLX) { in mthca_arbel_post_send()
2101 qp->wrid[ind + qp->rq.max] = wr->wr_id; in mthca_arbel_post_send()
2111 cpu_to_be32(((ind << qp->sq.wqe_shift) + in mthca_arbel_post_send()
2112 qp->send_wqe_offset) | in mthca_arbel_post_send()
2128 if (unlikely(ind >= qp->sq.max)) in mthca_arbel_post_send()
2129 ind -= qp->sq.max; in mthca_arbel_post_send()
2134 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; in mthca_arbel_post_send()
2136 qp->sq.head += nreq; in mthca_arbel_post_send()
2143 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); in mthca_arbel_post_send()
2151 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, in mthca_arbel_post_send()
2155 spin_unlock_irqrestore(&qp->sq.lock, flags); in mthca_arbel_post_send()
2163 struct mthca_qp *qp = to_mqp(ibqp); in mthca_arbel_post_receive() local
2171 spin_lock_irqsave(&qp->rq.lock, flags); in mthca_arbel_post_receive()
2175 ind = qp->rq.head & (qp->rq.max - 1); in mthca_arbel_post_receive()
2178 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { in mthca_arbel_post_receive()
2180 " %d max, %d nreq)\n", qp->qpn, in mthca_arbel_post_receive()
2181 qp->rq.head, qp->rq.tail, in mthca_arbel_post_receive()
2182 qp->rq.max, nreq); in mthca_arbel_post_receive()
2188 wqe = get_recv_wqe(qp, ind); in mthca_arbel_post_receive()
2194 if (unlikely(wr->num_sge > qp->rq.max_gs)) { in mthca_arbel_post_receive()
2205 if (i < qp->rq.max_gs) in mthca_arbel_post_receive()
2208 qp->wrid[ind] = wr->wr_id; in mthca_arbel_post_receive()
2211 if (unlikely(ind >= qp->rq.max)) in mthca_arbel_post_receive()
2212 ind -= qp->rq.max; in mthca_arbel_post_receive()
2216 qp->rq.head += nreq; in mthca_arbel_post_receive()
2223 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); in mthca_arbel_post_receive()
2226 spin_unlock_irqrestore(&qp->rq.lock, flags); in mthca_arbel_post_receive()
2230 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, in mthca_free_err_wqe() argument
2239 if (qp->ibqp.srq && !is_send) { in mthca_free_err_wqe()
2245 next = get_send_wqe(qp, index); in mthca_free_err_wqe()
2247 next = get_recv_wqe(qp, index); in mthca_free_err_wqe()
2277 err = mthca_array_init(&dev->qp_table.qp, in mthca_init_qp_table()
2299 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); in mthca_init_qp_table()
2312 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); in mthca_cleanup_qp_table()