1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/kref.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/ib_cache.h>
37 #include "mlx5_ib.h"
38 #include "srq.h"
39 #include "qp.h"
40
mlx5_ib_cq_comp(struct mlx5_core_cq * cq,struct mlx5_eqe * eqe)41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
42 {
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
44
45 ibcq->comp_handler(ibcq, ibcq->cq_context);
46 }
47
mlx5_ib_cq_event(struct mlx5_core_cq * mcq,enum mlx5_event type)48 static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
49 {
50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
52 struct ib_cq *ibcq = &cq->ibcq;
53 struct ib_event event;
54
55 if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
56 mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
57 type, mcq->cqn);
58 return;
59 }
60
61 if (ibcq->event_handler) {
62 event.device = &dev->ib_dev;
63 event.event = IB_EVENT_CQ_ERR;
64 event.element.cq = ibcq;
65 ibcq->event_handler(&event, ibcq->cq_context);
66 }
67 }
68
get_cqe(struct mlx5_ib_cq * cq,int n)69 static void *get_cqe(struct mlx5_ib_cq *cq, int n)
70 {
71 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
72 }
73
sw_ownership_bit(int n,int nent)74 static u8 sw_ownership_bit(int n, int nent)
75 {
76 return (n & nent) ? 1 : 0;
77 }
78
get_sw_cqe(struct mlx5_ib_cq * cq,int n)79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
80 {
81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82 struct mlx5_cqe64 *cqe64;
83
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
85
86 if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
88 return cqe;
89 } else {
90 return NULL;
91 }
92 }
93
next_cqe_sw(struct mlx5_ib_cq * cq)94 static void *next_cqe_sw(struct mlx5_ib_cq *cq)
95 {
96 return get_sw_cqe(cq, cq->mcq.cons_index);
97 }
98
get_umr_comp(struct mlx5_ib_wq * wq,int idx)99 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
100 {
101 switch (wq->wr_data[idx]) {
102 case MLX5_IB_WR_UMR:
103 return 0;
104
105 case IB_WR_LOCAL_INV:
106 return IB_WC_LOCAL_INV;
107
108 case IB_WR_REG_MR:
109 return IB_WC_REG_MR;
110
111 default:
112 pr_warn("unknown completion status\n");
113 return 0;
114 }
115 }
116
handle_good_req(struct ib_wc * wc,struct mlx5_cqe64 * cqe,struct mlx5_ib_wq * wq,int idx)117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
118 struct mlx5_ib_wq *wq, int idx)
119 {
120 wc->wc_flags = 0;
121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
122 case MLX5_OPCODE_RDMA_WRITE_IMM:
123 wc->wc_flags |= IB_WC_WITH_IMM;
124 fallthrough;
125 case MLX5_OPCODE_RDMA_WRITE:
126 wc->opcode = IB_WC_RDMA_WRITE;
127 break;
128 case MLX5_OPCODE_SEND_IMM:
129 wc->wc_flags |= IB_WC_WITH_IMM;
130 fallthrough;
131 case MLX5_OPCODE_SEND:
132 case MLX5_OPCODE_SEND_INVAL:
133 wc->opcode = IB_WC_SEND;
134 break;
135 case MLX5_OPCODE_RDMA_READ:
136 wc->opcode = IB_WC_RDMA_READ;
137 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
138 break;
139 case MLX5_OPCODE_ATOMIC_CS:
140 wc->opcode = IB_WC_COMP_SWAP;
141 wc->byte_len = 8;
142 break;
143 case MLX5_OPCODE_ATOMIC_FA:
144 wc->opcode = IB_WC_FETCH_ADD;
145 wc->byte_len = 8;
146 break;
147 case MLX5_OPCODE_ATOMIC_MASKED_CS:
148 wc->opcode = IB_WC_MASKED_COMP_SWAP;
149 wc->byte_len = 8;
150 break;
151 case MLX5_OPCODE_ATOMIC_MASKED_FA:
152 wc->opcode = IB_WC_MASKED_FETCH_ADD;
153 wc->byte_len = 8;
154 break;
155 case MLX5_OPCODE_UMR:
156 wc->opcode = get_umr_comp(wq, idx);
157 break;
158 }
159 }
160
161 enum {
162 MLX5_GRH_IN_BUFFER = 1,
163 MLX5_GRH_IN_CQE = 2,
164 };
165
handle_responder(struct ib_wc * wc,struct mlx5_cqe64 * cqe,struct mlx5_ib_qp * qp)166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
167 struct mlx5_ib_qp *qp)
168 {
169 enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
170 struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
171 struct mlx5_ib_srq *srq = NULL;
172 struct mlx5_ib_wq *wq;
173 u16 wqe_ctr;
174 u8 roce_packet_type;
175 bool vlan_present;
176 u8 g;
177
178 if (qp->ibqp.srq || qp->ibqp.xrcd) {
179 struct mlx5_core_srq *msrq = NULL;
180
181 if (qp->ibqp.xrcd) {
182 msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
183 if (msrq)
184 srq = to_mibsrq(msrq);
185 } else {
186 srq = to_msrq(qp->ibqp.srq);
187 }
188 if (srq) {
189 wqe_ctr = be16_to_cpu(cqe->wqe_counter);
190 wc->wr_id = srq->wrid[wqe_ctr];
191 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
192 if (msrq)
193 mlx5_core_res_put(&msrq->common);
194 }
195 } else {
196 wq = &qp->rq;
197 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
198 ++wq->tail;
199 }
200 wc->byte_len = be32_to_cpu(cqe->byte_cnt);
201
202 switch (get_cqe_opcode(cqe)) {
203 case MLX5_CQE_RESP_WR_IMM:
204 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
205 wc->wc_flags = IB_WC_WITH_IMM;
206 wc->ex.imm_data = cqe->immediate;
207 break;
208 case MLX5_CQE_RESP_SEND:
209 wc->opcode = IB_WC_RECV;
210 wc->wc_flags = IB_WC_IP_CSUM_OK;
211 if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
212 (cqe->hds_ip_ext & CQE_L4_OK))))
213 wc->wc_flags = 0;
214 break;
215 case MLX5_CQE_RESP_SEND_IMM:
216 wc->opcode = IB_WC_RECV;
217 wc->wc_flags = IB_WC_WITH_IMM;
218 wc->ex.imm_data = cqe->immediate;
219 break;
220 case MLX5_CQE_RESP_SEND_INV:
221 wc->opcode = IB_WC_RECV;
222 wc->wc_flags = IB_WC_WITH_INVALIDATE;
223 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
224 break;
225 }
226 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
227 wc->dlid_path_bits = cqe->ml_path;
228 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
229 wc->wc_flags |= g ? IB_WC_GRH : 0;
230 if (is_qp1(qp->type)) {
231 u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
232
233 ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
234 &wc->pkey_index);
235 } else {
236 wc->pkey_index = 0;
237 }
238
239 if (ll != IB_LINK_LAYER_ETHERNET) {
240 wc->slid = be16_to_cpu(cqe->slid);
241 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
242 return;
243 }
244
245 wc->slid = 0;
246 vlan_present = cqe->l4_l3_hdr_type & 0x1;
247 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
248 if (vlan_present) {
249 wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
250 wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
251 wc->wc_flags |= IB_WC_WITH_VLAN;
252 } else {
253 wc->sl = 0;
254 }
255
256 switch (roce_packet_type) {
257 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
258 wc->network_hdr_type = RDMA_NETWORK_ROCE_V1;
259 break;
260 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
261 wc->network_hdr_type = RDMA_NETWORK_IPV6;
262 break;
263 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
264 wc->network_hdr_type = RDMA_NETWORK_IPV4;
265 break;
266 }
267 wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
268 }
269
dump_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe)270 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
271 {
272 mlx5_ib_warn(dev, "dump error cqe\n");
273 mlx5_dump_err_cqe(dev->mdev, cqe);
274 }
275
mlx5_handle_error_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe,struct ib_wc * wc)276 static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
277 struct mlx5_err_cqe *cqe,
278 struct ib_wc *wc)
279 {
280 int dump = 1;
281
282 switch (cqe->syndrome) {
283 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
284 wc->status = IB_WC_LOC_LEN_ERR;
285 break;
286 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
287 wc->status = IB_WC_LOC_QP_OP_ERR;
288 break;
289 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
290 wc->status = IB_WC_LOC_PROT_ERR;
291 break;
292 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
293 dump = 0;
294 wc->status = IB_WC_WR_FLUSH_ERR;
295 break;
296 case MLX5_CQE_SYNDROME_MW_BIND_ERR:
297 wc->status = IB_WC_MW_BIND_ERR;
298 break;
299 case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
300 wc->status = IB_WC_BAD_RESP_ERR;
301 break;
302 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
303 wc->status = IB_WC_LOC_ACCESS_ERR;
304 break;
305 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
306 wc->status = IB_WC_REM_INV_REQ_ERR;
307 break;
308 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
309 wc->status = IB_WC_REM_ACCESS_ERR;
310 break;
311 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
312 wc->status = IB_WC_REM_OP_ERR;
313 break;
314 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
315 wc->status = IB_WC_RETRY_EXC_ERR;
316 dump = 0;
317 break;
318 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
319 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
320 dump = 0;
321 break;
322 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
323 wc->status = IB_WC_REM_ABORT_ERR;
324 break;
325 default:
326 wc->status = IB_WC_GENERAL_ERR;
327 break;
328 }
329
330 wc->vendor_err = cqe->vendor_err_synd;
331 if (dump)
332 dump_cqe(dev, cqe);
333 }
334
handle_atomics(struct mlx5_ib_qp * qp,struct mlx5_cqe64 * cqe64,u16 tail,u16 head)335 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
336 u16 tail, u16 head)
337 {
338 u16 idx;
339
340 do {
341 idx = tail & (qp->sq.wqe_cnt - 1);
342 if (idx == head)
343 break;
344
345 tail = qp->sq.w_list[idx].next;
346 } while (1);
347 tail = qp->sq.w_list[idx].next;
348 qp->sq.last_poll = tail;
349 }
350
free_cq_buf(struct mlx5_ib_dev * dev,struct mlx5_ib_cq_buf * buf)351 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
352 {
353 mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
354 }
355
get_sig_err_item(struct mlx5_sig_err_cqe * cqe,struct ib_sig_err * item)356 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
357 struct ib_sig_err *item)
358 {
359 u16 syndrome = be16_to_cpu(cqe->syndrome);
360
361 #define GUARD_ERR (1 << 13)
362 #define APPTAG_ERR (1 << 12)
363 #define REFTAG_ERR (1 << 11)
364
365 if (syndrome & GUARD_ERR) {
366 item->err_type = IB_SIG_BAD_GUARD;
367 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
368 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
369 } else
370 if (syndrome & REFTAG_ERR) {
371 item->err_type = IB_SIG_BAD_REFTAG;
372 item->expected = be32_to_cpu(cqe->expected_reftag);
373 item->actual = be32_to_cpu(cqe->actual_reftag);
374 } else
375 if (syndrome & APPTAG_ERR) {
376 item->err_type = IB_SIG_BAD_APPTAG;
377 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
378 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
379 } else {
380 pr_err("Got signature completion error with bad syndrome %04x\n",
381 syndrome);
382 }
383
384 item->sig_err_offset = be64_to_cpu(cqe->err_offset);
385 item->key = be32_to_cpu(cqe->mkey);
386 }
387
sw_comp(struct mlx5_ib_qp * qp,int num_entries,struct ib_wc * wc,int * npolled,bool is_send)388 static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
389 int *npolled, bool is_send)
390 {
391 struct mlx5_ib_wq *wq;
392 unsigned int cur;
393 int np;
394 int i;
395
396 wq = (is_send) ? &qp->sq : &qp->rq;
397 cur = wq->head - wq->tail;
398 np = *npolled;
399
400 if (cur == 0)
401 return;
402
403 for (i = 0; i < cur && np < num_entries; i++) {
404 unsigned int idx;
405
406 idx = (is_send) ? wq->last_poll : wq->tail;
407 idx &= (wq->wqe_cnt - 1);
408 wc->wr_id = wq->wrid[idx];
409 wc->status = IB_WC_WR_FLUSH_ERR;
410 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
411 wq->tail++;
412 if (is_send)
413 wq->last_poll = wq->w_list[idx].next;
414 np++;
415 wc->qp = &qp->ibqp;
416 wc++;
417 }
418 *npolled = np;
419 }
420
mlx5_ib_poll_sw_comp(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,int * npolled)421 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
422 struct ib_wc *wc, int *npolled)
423 {
424 struct mlx5_ib_qp *qp;
425
426 *npolled = 0;
427 /* Find uncompleted WQEs belonging to that cq and return mmics ones */
428 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
429 sw_comp(qp, num_entries, wc + *npolled, npolled, true);
430 if (*npolled >= num_entries)
431 return;
432 }
433
434 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
435 sw_comp(qp, num_entries, wc + *npolled, npolled, false);
436 if (*npolled >= num_entries)
437 return;
438 }
439 }
440
mlx5_poll_one(struct mlx5_ib_cq * cq,struct mlx5_ib_qp ** cur_qp,struct ib_wc * wc)441 static int mlx5_poll_one(struct mlx5_ib_cq *cq,
442 struct mlx5_ib_qp **cur_qp,
443 struct ib_wc *wc)
444 {
445 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
446 struct mlx5_err_cqe *err_cqe;
447 struct mlx5_cqe64 *cqe64;
448 struct mlx5_core_qp *mqp;
449 struct mlx5_ib_wq *wq;
450 uint8_t opcode;
451 uint32_t qpn;
452 u16 wqe_ctr;
453 void *cqe;
454 int idx;
455
456 repoll:
457 cqe = next_cqe_sw(cq);
458 if (!cqe)
459 return -EAGAIN;
460
461 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
462
463 ++cq->mcq.cons_index;
464
465 /* Make sure we read CQ entry contents after we've checked the
466 * ownership bit.
467 */
468 rmb();
469
470 opcode = get_cqe_opcode(cqe64);
471 if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
472 if (likely(cq->resize_buf)) {
473 free_cq_buf(dev, &cq->buf);
474 cq->buf = *cq->resize_buf;
475 kfree(cq->resize_buf);
476 cq->resize_buf = NULL;
477 goto repoll;
478 } else {
479 mlx5_ib_warn(dev, "unexpected resize cqe\n");
480 }
481 }
482
483 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
484 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
485 /* We do not have to take the QP table lock here,
486 * because CQs will be locked while QPs are removed
487 * from the table.
488 */
489 mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
490 *cur_qp = to_mibqp(mqp);
491 }
492
493 wc->qp = &(*cur_qp)->ibqp;
494 switch (opcode) {
495 case MLX5_CQE_REQ:
496 wq = &(*cur_qp)->sq;
497 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
498 idx = wqe_ctr & (wq->wqe_cnt - 1);
499 handle_good_req(wc, cqe64, wq, idx);
500 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
501 wc->wr_id = wq->wrid[idx];
502 wq->tail = wq->wqe_head[idx] + 1;
503 wc->status = IB_WC_SUCCESS;
504 break;
505 case MLX5_CQE_RESP_WR_IMM:
506 case MLX5_CQE_RESP_SEND:
507 case MLX5_CQE_RESP_SEND_IMM:
508 case MLX5_CQE_RESP_SEND_INV:
509 handle_responder(wc, cqe64, *cur_qp);
510 wc->status = IB_WC_SUCCESS;
511 break;
512 case MLX5_CQE_RESIZE_CQ:
513 break;
514 case MLX5_CQE_REQ_ERR:
515 case MLX5_CQE_RESP_ERR:
516 err_cqe = (struct mlx5_err_cqe *)cqe64;
517 mlx5_handle_error_cqe(dev, err_cqe, wc);
518 mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
519 opcode == MLX5_CQE_REQ_ERR ?
520 "Requestor" : "Responder", cq->mcq.cqn);
521 mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
522 err_cqe->syndrome, err_cqe->vendor_err_synd);
523 if (opcode == MLX5_CQE_REQ_ERR) {
524 wq = &(*cur_qp)->sq;
525 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
526 idx = wqe_ctr & (wq->wqe_cnt - 1);
527 wc->wr_id = wq->wrid[idx];
528 wq->tail = wq->wqe_head[idx] + 1;
529 } else {
530 struct mlx5_ib_srq *srq;
531
532 if ((*cur_qp)->ibqp.srq) {
533 srq = to_msrq((*cur_qp)->ibqp.srq);
534 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
535 wc->wr_id = srq->wrid[wqe_ctr];
536 mlx5_ib_free_srq_wqe(srq, wqe_ctr);
537 } else {
538 wq = &(*cur_qp)->rq;
539 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
540 ++wq->tail;
541 }
542 }
543 break;
544 case MLX5_CQE_SIG_ERR: {
545 struct mlx5_sig_err_cqe *sig_err_cqe =
546 (struct mlx5_sig_err_cqe *)cqe64;
547 struct mlx5_core_sig_ctx *sig;
548
549 xa_lock(&dev->sig_mrs);
550 sig = xa_load(&dev->sig_mrs,
551 mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
552 get_sig_err_item(sig_err_cqe, &sig->err_item);
553 sig->sig_err_exists = true;
554 sig->sigerr_count++;
555
556 mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
557 cq->mcq.cqn, sig->err_item.key,
558 sig->err_item.err_type,
559 sig->err_item.sig_err_offset,
560 sig->err_item.expected,
561 sig->err_item.actual);
562
563 xa_unlock(&dev->sig_mrs);
564 goto repoll;
565 }
566 }
567
568 return 0;
569 }
570
poll_soft_wc(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,bool is_fatal_err)571 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
572 struct ib_wc *wc, bool is_fatal_err)
573 {
574 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
575 struct mlx5_ib_wc *soft_wc, *next;
576 int npolled = 0;
577
578 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
579 if (npolled >= num_entries)
580 break;
581
582 mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
583 cq->mcq.cqn);
584
585 if (unlikely(is_fatal_err)) {
586 soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
587 soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
588 }
589 wc[npolled++] = soft_wc->wc;
590 list_del(&soft_wc->list);
591 kfree(soft_wc);
592 }
593
594 return npolled;
595 }
596
mlx5_ib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)597 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
598 {
599 struct mlx5_ib_cq *cq = to_mcq(ibcq);
600 struct mlx5_ib_qp *cur_qp = NULL;
601 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
602 struct mlx5_core_dev *mdev = dev->mdev;
603 unsigned long flags;
604 int soft_polled = 0;
605 int npolled;
606
607 spin_lock_irqsave(&cq->lock, flags);
608 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
609 /* make sure no soft wqe's are waiting */
610 if (unlikely(!list_empty(&cq->wc_list)))
611 soft_polled = poll_soft_wc(cq, num_entries, wc, true);
612
613 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
614 wc + soft_polled, &npolled);
615 goto out;
616 }
617
618 if (unlikely(!list_empty(&cq->wc_list)))
619 soft_polled = poll_soft_wc(cq, num_entries, wc, false);
620
621 for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
622 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
623 break;
624 }
625
626 if (npolled)
627 mlx5_cq_set_ci(&cq->mcq);
628 out:
629 spin_unlock_irqrestore(&cq->lock, flags);
630
631 return soft_polled + npolled;
632 }
633
mlx5_ib_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)634 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
635 {
636 struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
637 struct mlx5_ib_cq *cq = to_mcq(ibcq);
638 void __iomem *uar_page = mdev->priv.uar->map;
639 unsigned long irq_flags;
640 int ret = 0;
641
642 spin_lock_irqsave(&cq->lock, irq_flags);
643 if (cq->notify_flags != IB_CQ_NEXT_COMP)
644 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
645
646 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
647 ret = 1;
648 spin_unlock_irqrestore(&cq->lock, irq_flags);
649
650 mlx5_cq_arm(&cq->mcq,
651 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
652 MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
653 uar_page, to_mcq(ibcq)->mcq.cons_index);
654
655 return ret;
656 }
657
alloc_cq_frag_buf(struct mlx5_ib_dev * dev,struct mlx5_ib_cq_buf * buf,int nent,int cqe_size)658 static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
659 struct mlx5_ib_cq_buf *buf,
660 int nent,
661 int cqe_size)
662 {
663 struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
664 u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
665 u8 log_wq_sz = ilog2(cqe_size);
666 int err;
667
668 err = mlx5_frag_buf_alloc_node(dev->mdev,
669 nent * cqe_size,
670 frag_buf,
671 dev->mdev->priv.numa_node);
672 if (err)
673 return err;
674
675 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
676
677 buf->cqe_size = cqe_size;
678 buf->nent = nent;
679
680 return 0;
681 }
682
683 enum {
684 MLX5_CQE_RES_FORMAT_HASH = 0,
685 MLX5_CQE_RES_FORMAT_CSUM = 1,
686 MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
687 };
688
mini_cqe_res_format_to_hw(struct mlx5_ib_dev * dev,u8 format)689 static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
690 {
691 switch (format) {
692 case MLX5_IB_CQE_RES_FORMAT_HASH:
693 return MLX5_CQE_RES_FORMAT_HASH;
694 case MLX5_IB_CQE_RES_FORMAT_CSUM:
695 return MLX5_CQE_RES_FORMAT_CSUM;
696 case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
697 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
698 return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
699 return -EOPNOTSUPP;
700 default:
701 return -EINVAL;
702 }
703 }
704
create_cq_user(struct mlx5_ib_dev * dev,struct ib_udata * udata,struct mlx5_ib_cq * cq,int entries,u32 ** cqb,int * cqe_size,int * index,int * inlen)705 static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
706 struct mlx5_ib_cq *cq, int entries, u32 **cqb,
707 int *cqe_size, int *index, int *inlen)
708 {
709 struct mlx5_ib_create_cq ucmd = {};
710 unsigned long page_size;
711 unsigned int page_offset_quantized;
712 size_t ucmdlen;
713 __be64 *pas;
714 int ncont;
715 void *cqc;
716 int err;
717 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
718 udata, struct mlx5_ib_ucontext, ibucontext);
719
720 ucmdlen = min(udata->inlen, sizeof(ucmd));
721 if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
722 return -EINVAL;
723
724 if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
725 return -EFAULT;
726
727 if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
728 MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX |
729 MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)))
730 return -EINVAL;
731
732 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
733 ucmd.reserved0 || ucmd.reserved1)
734 return -EINVAL;
735
736 *cqe_size = ucmd.cqe_size;
737
738 cq->buf.umem =
739 ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
740 entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
741 if (IS_ERR(cq->buf.umem)) {
742 err = PTR_ERR(cq->buf.umem);
743 return err;
744 }
745
746 page_size = mlx5_umem_find_best_cq_quantized_pgoff(
747 cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
748 page_offset, 64, &page_offset_quantized);
749 if (!page_size) {
750 err = -EINVAL;
751 goto err_umem;
752 }
753
754 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db);
755 if (err)
756 goto err_umem;
757
758 ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
759 mlx5_ib_dbg(
760 dev,
761 "addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
762 ucmd.buf_addr, entries * ucmd.cqe_size,
763 ib_umem_num_pages(cq->buf.umem), page_size, ncont);
764
765 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
766 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
767 *cqb = kvzalloc(*inlen, GFP_KERNEL);
768 if (!*cqb) {
769 err = -ENOMEM;
770 goto err_db;
771 }
772
773 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
774 mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
775
776 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
777 MLX5_SET(cqc, cqc, log_page_size,
778 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
779 MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
780
781 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
782 *index = ucmd.uar_page_index;
783 } else if (context->bfregi.lib_uar_dyn) {
784 err = -EINVAL;
785 goto err_cqb;
786 } else {
787 *index = context->bfregi.sys_pages[0];
788 }
789
790 if (ucmd.cqe_comp_en == 1) {
791 int mini_cqe_format;
792
793 if (!((*cqe_size == 128 &&
794 MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
795 (*cqe_size == 64 &&
796 MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
797 err = -EOPNOTSUPP;
798 mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
799 *cqe_size);
800 goto err_cqb;
801 }
802
803 mini_cqe_format =
804 mini_cqe_res_format_to_hw(dev,
805 ucmd.cqe_comp_res_format);
806 if (mini_cqe_format < 0) {
807 err = mini_cqe_format;
808 mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
809 ucmd.cqe_comp_res_format, err);
810 goto err_cqb;
811 }
812
813 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
814 MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
815 }
816
817 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
818 if (*cqe_size != 128 ||
819 !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
820 err = -EOPNOTSUPP;
821 mlx5_ib_warn(dev,
822 "CQE padding is not supported for CQE size of %dB!\n",
823 *cqe_size);
824 goto err_cqb;
825 }
826
827 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
828 }
829
830 if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)
831 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS;
832
833 MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
834 return 0;
835
836 err_cqb:
837 kvfree(*cqb);
838
839 err_db:
840 mlx5_ib_db_unmap_user(context, &cq->db);
841
842 err_umem:
843 ib_umem_release(cq->buf.umem);
844 return err;
845 }
846
destroy_cq_user(struct mlx5_ib_cq * cq,struct ib_udata * udata)847 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
848 {
849 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
850 udata, struct mlx5_ib_ucontext, ibucontext);
851
852 mlx5_ib_db_unmap_user(context, &cq->db);
853 ib_umem_release(cq->buf.umem);
854 }
855
init_cq_frag_buf(struct mlx5_ib_cq_buf * buf)856 static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
857 {
858 int i;
859 void *cqe;
860 struct mlx5_cqe64 *cqe64;
861
862 for (i = 0; i < buf->nent; i++) {
863 cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
864 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
865 cqe64->op_own = MLX5_CQE_INVALID << 4;
866 }
867 }
868
create_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size,u32 ** cqb,int * index,int * inlen)869 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
870 int entries, int cqe_size,
871 u32 **cqb, int *index, int *inlen)
872 {
873 __be64 *pas;
874 void *cqc;
875 int err;
876
877 err = mlx5_db_alloc(dev->mdev, &cq->db);
878 if (err)
879 return err;
880
881 cq->mcq.set_ci_db = cq->db.db;
882 cq->mcq.arm_db = cq->db.db + 1;
883 cq->mcq.cqe_sz = cqe_size;
884
885 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
886 if (err)
887 goto err_db;
888
889 init_cq_frag_buf(&cq->buf);
890
891 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
892 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
893 cq->buf.frag_buf.npages;
894 *cqb = kvzalloc(*inlen, GFP_KERNEL);
895 if (!*cqb) {
896 err = -ENOMEM;
897 goto err_buf;
898 }
899
900 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
901 mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
902
903 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
904 MLX5_SET(cqc, cqc, log_page_size,
905 cq->buf.frag_buf.page_shift -
906 MLX5_ADAPTER_PAGE_SHIFT);
907
908 *index = dev->mdev->priv.uar->index;
909
910 return 0;
911
912 err_buf:
913 free_cq_buf(dev, &cq->buf);
914
915 err_db:
916 mlx5_db_free(dev->mdev, &cq->db);
917 return err;
918 }
919
destroy_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq)920 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
921 {
922 free_cq_buf(dev, &cq->buf);
923 mlx5_db_free(dev->mdev, &cq->db);
924 }
925
notify_soft_wc_handler(struct work_struct * work)926 static void notify_soft_wc_handler(struct work_struct *work)
927 {
928 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
929 notify_work);
930
931 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
932 }
933
mlx5_ib_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)934 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
935 struct ib_udata *udata)
936 {
937 struct ib_device *ibdev = ibcq->device;
938 int entries = attr->cqe;
939 int vector = attr->comp_vector;
940 struct mlx5_ib_dev *dev = to_mdev(ibdev);
941 struct mlx5_ib_cq *cq = to_mcq(ibcq);
942 u32 out[MLX5_ST_SZ_DW(create_cq_out)];
943 int index;
944 int inlen;
945 u32 *cqb = NULL;
946 void *cqc;
947 int cqe_size;
948 int eqn;
949 int err;
950
951 if (entries < 0 ||
952 (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
953 return -EINVAL;
954
955 if (check_cq_create_flags(attr->flags))
956 return -EOPNOTSUPP;
957
958 entries = roundup_pow_of_two(entries + 1);
959 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
960 return -EINVAL;
961
962 cq->ibcq.cqe = entries - 1;
963 mutex_init(&cq->resize_mutex);
964 spin_lock_init(&cq->lock);
965 cq->resize_buf = NULL;
966 cq->resize_umem = NULL;
967 cq->create_flags = attr->flags;
968 INIT_LIST_HEAD(&cq->list_send_qp);
969 INIT_LIST_HEAD(&cq->list_recv_qp);
970
971 if (udata) {
972 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
973 &index, &inlen);
974 if (err)
975 return err;
976 } else {
977 cqe_size = cache_line_size() == 128 ? 128 : 64;
978 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
979 &index, &inlen);
980 if (err)
981 return err;
982
983 INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
984 }
985
986 err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
987 if (err)
988 goto err_cqb;
989
990 cq->cqe_size = cqe_size;
991
992 cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
993 MLX5_SET(cqc, cqc, cqe_sz,
994 cqe_sz_to_mlx_sz(cqe_size,
995 cq->private_flags &
996 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
997 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
998 MLX5_SET(cqc, cqc, uar_page, index);
999 MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
1000 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
1001 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
1002 MLX5_SET(cqc, cqc, oi, 1);
1003
1004 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
1005 if (err)
1006 goto err_cqb;
1007
1008 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1009 if (udata)
1010 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1011 else
1012 cq->mcq.comp = mlx5_ib_cq_comp;
1013 cq->mcq.event = mlx5_ib_cq_event;
1014
1015 INIT_LIST_HEAD(&cq->wc_list);
1016
1017 if (udata)
1018 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1019 err = -EFAULT;
1020 goto err_cmd;
1021 }
1022
1023
1024 kvfree(cqb);
1025 return 0;
1026
1027 err_cmd:
1028 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1029
1030 err_cqb:
1031 kvfree(cqb);
1032 if (udata)
1033 destroy_cq_user(cq, udata);
1034 else
1035 destroy_cq_kernel(dev, cq);
1036 return err;
1037 }
1038
mlx5_ib_destroy_cq(struct ib_cq * cq,struct ib_udata * udata)1039 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
1040 {
1041 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1042 struct mlx5_ib_cq *mcq = to_mcq(cq);
1043 int ret;
1044
1045 ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
1046 if (ret)
1047 return ret;
1048
1049 if (udata)
1050 destroy_cq_user(mcq, udata);
1051 else
1052 destroy_cq_kernel(dev, mcq);
1053 return 0;
1054 }
1055
is_equal_rsn(struct mlx5_cqe64 * cqe64,u32 rsn)1056 static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1057 {
1058 return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1059 }
1060
__mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 rsn,struct mlx5_ib_srq * srq)1061 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1062 {
1063 struct mlx5_cqe64 *cqe64, *dest64;
1064 void *cqe, *dest;
1065 u32 prod_index;
1066 int nfreed = 0;
1067 u8 owner_bit;
1068
1069 if (!cq)
1070 return;
1071
1072 /* First we need to find the current producer index, so we
1073 * know where to start cleaning from. It doesn't matter if HW
1074 * adds new entries after this loop -- the QP we're worried
1075 * about is already in RESET, so the new entries won't come
1076 * from our QP and therefore don't need to be checked.
1077 */
1078 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1079 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1080 break;
1081
1082 /* Now sweep backwards through the CQ, removing CQ entries
1083 * that match our QP by copying older entries on top of them.
1084 */
1085 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1086 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1087 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1088 if (is_equal_rsn(cqe64, rsn)) {
1089 if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1090 mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1091 ++nfreed;
1092 } else if (nfreed) {
1093 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1094 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1095 owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1096 memcpy(dest, cqe, cq->mcq.cqe_sz);
1097 dest64->op_own = owner_bit |
1098 (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1099 }
1100 }
1101
1102 if (nfreed) {
1103 cq->mcq.cons_index += nfreed;
1104 /* Make sure update of buffer contents is done before
1105 * updating consumer index.
1106 */
1107 wmb();
1108 mlx5_cq_set_ci(&cq->mcq);
1109 }
1110 }
1111
mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 qpn,struct mlx5_ib_srq * srq)1112 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1113 {
1114 if (!cq)
1115 return;
1116
1117 spin_lock_irq(&cq->lock);
1118 __mlx5_ib_cq_clean(cq, qpn, srq);
1119 spin_unlock_irq(&cq->lock);
1120 }
1121
mlx5_ib_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period)1122 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1123 {
1124 struct mlx5_ib_dev *dev = to_mdev(cq->device);
1125 struct mlx5_ib_cq *mcq = to_mcq(cq);
1126 int err;
1127
1128 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
1129 return -EOPNOTSUPP;
1130
1131 if (cq_period > MLX5_MAX_CQ_PERIOD)
1132 return -EINVAL;
1133
1134 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
1135 cq_period, cq_count);
1136 if (err)
1137 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
1138
1139 return err;
1140 }
1141
resize_user(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,struct ib_udata * udata,int * cqe_size)1142 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1143 int entries, struct ib_udata *udata,
1144 int *cqe_size)
1145 {
1146 struct mlx5_ib_resize_cq ucmd;
1147 struct ib_umem *umem;
1148 int err;
1149
1150 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
1151 if (err)
1152 return err;
1153
1154 if (ucmd.reserved0 || ucmd.reserved1)
1155 return -EINVAL;
1156
1157 /* check multiplication overflow */
1158 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1159 return -EINVAL;
1160
1161 umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
1162 (size_t)ucmd.cqe_size * entries,
1163 IB_ACCESS_LOCAL_WRITE);
1164 if (IS_ERR(umem)) {
1165 err = PTR_ERR(umem);
1166 return err;
1167 }
1168
1169 cq->resize_umem = umem;
1170 *cqe_size = ucmd.cqe_size;
1171
1172 return 0;
1173 }
1174
resize_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size)1175 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1176 int entries, int cqe_size)
1177 {
1178 int err;
1179
1180 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1181 if (!cq->resize_buf)
1182 return -ENOMEM;
1183
1184 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1185 if (err)
1186 goto ex;
1187
1188 init_cq_frag_buf(cq->resize_buf);
1189
1190 return 0;
1191
1192 ex:
1193 kfree(cq->resize_buf);
1194 return err;
1195 }
1196
copy_resize_cqes(struct mlx5_ib_cq * cq)1197 static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1198 {
1199 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1200 struct mlx5_cqe64 *scqe64;
1201 struct mlx5_cqe64 *dcqe64;
1202 void *start_cqe;
1203 void *scqe;
1204 void *dcqe;
1205 int ssize;
1206 int dsize;
1207 int i;
1208 u8 sw_own;
1209
1210 ssize = cq->buf.cqe_size;
1211 dsize = cq->resize_buf->cqe_size;
1212 if (ssize != dsize) {
1213 mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1214 return -EINVAL;
1215 }
1216
1217 i = cq->mcq.cons_index;
1218 scqe = get_sw_cqe(cq, i);
1219 scqe64 = ssize == 64 ? scqe : scqe + 64;
1220 start_cqe = scqe;
1221 if (!scqe) {
1222 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1223 return -EINVAL;
1224 }
1225
1226 while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
1227 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1228 (i + 1) & cq->resize_buf->nent);
1229 dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1230 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1231 memcpy(dcqe, scqe, dsize);
1232 dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1233
1234 ++i;
1235 scqe = get_sw_cqe(cq, i);
1236 scqe64 = ssize == 64 ? scqe : scqe + 64;
1237 if (!scqe) {
1238 mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1239 return -EINVAL;
1240 }
1241
1242 if (scqe == start_cqe) {
1243 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1244 cq->mcq.cqn);
1245 return -ENOMEM;
1246 }
1247 }
1248 ++cq->mcq.cons_index;
1249 return 0;
1250 }
1251
mlx5_ib_resize_cq(struct ib_cq * ibcq,int entries,struct ib_udata * udata)1252 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1253 {
1254 struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1255 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1256 void *cqc;
1257 u32 *in;
1258 int err;
1259 int npas;
1260 __be64 *pas;
1261 unsigned int page_offset_quantized = 0;
1262 unsigned int page_shift;
1263 int inlen;
1264 int cqe_size;
1265 unsigned long flags;
1266
1267 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1268 pr_info("Firmware does not support resize CQ\n");
1269 return -ENOSYS;
1270 }
1271
1272 if (entries < 1 ||
1273 entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
1274 mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
1275 entries,
1276 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1277 return -EINVAL;
1278 }
1279
1280 entries = roundup_pow_of_two(entries + 1);
1281 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1282 return -EINVAL;
1283
1284 if (entries == ibcq->cqe + 1)
1285 return 0;
1286
1287 mutex_lock(&cq->resize_mutex);
1288 if (udata) {
1289 unsigned long page_size;
1290
1291 err = resize_user(dev, cq, entries, udata, &cqe_size);
1292 if (err)
1293 goto ex;
1294
1295 page_size = mlx5_umem_find_best_cq_quantized_pgoff(
1296 cq->resize_umem, cqc, log_page_size,
1297 MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
1298 &page_offset_quantized);
1299 if (!page_size) {
1300 err = -EINVAL;
1301 goto ex_resize;
1302 }
1303 npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size);
1304 page_shift = order_base_2(page_size);
1305 } else {
1306 struct mlx5_frag_buf *frag_buf;
1307
1308 cqe_size = 64;
1309 err = resize_kernel(dev, cq, entries, cqe_size);
1310 if (err)
1311 goto ex;
1312 frag_buf = &cq->resize_buf->frag_buf;
1313 npas = frag_buf->npages;
1314 page_shift = frag_buf->page_shift;
1315 }
1316
1317 inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
1318 MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
1319
1320 in = kvzalloc(inlen, GFP_KERNEL);
1321 if (!in) {
1322 err = -ENOMEM;
1323 goto ex_resize;
1324 }
1325
1326 pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1327 if (udata)
1328 mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
1329 0);
1330 else
1331 mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
1332
1333 MLX5_SET(modify_cq_in, in,
1334 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
1335 MLX5_MODIFY_CQ_MASK_LOG_SIZE |
1336 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1337 MLX5_MODIFY_CQ_MASK_PG_SIZE);
1338
1339 cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
1340
1341 MLX5_SET(cqc, cqc, log_page_size,
1342 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1343 MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
1344 MLX5_SET(cqc, cqc, cqe_sz,
1345 cqe_sz_to_mlx_sz(cqe_size,
1346 cq->private_flags &
1347 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
1348 MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
1349
1350 MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
1351 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1352
1353 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1354 if (err)
1355 goto ex_alloc;
1356
1357 if (udata) {
1358 cq->ibcq.cqe = entries - 1;
1359 ib_umem_release(cq->buf.umem);
1360 cq->buf.umem = cq->resize_umem;
1361 cq->resize_umem = NULL;
1362 } else {
1363 struct mlx5_ib_cq_buf tbuf;
1364 int resized = 0;
1365
1366 spin_lock_irqsave(&cq->lock, flags);
1367 if (cq->resize_buf) {
1368 err = copy_resize_cqes(cq);
1369 if (!err) {
1370 tbuf = cq->buf;
1371 cq->buf = *cq->resize_buf;
1372 kfree(cq->resize_buf);
1373 cq->resize_buf = NULL;
1374 resized = 1;
1375 }
1376 }
1377 cq->ibcq.cqe = entries - 1;
1378 spin_unlock_irqrestore(&cq->lock, flags);
1379 if (resized)
1380 free_cq_buf(dev, &tbuf);
1381 }
1382 mutex_unlock(&cq->resize_mutex);
1383
1384 kvfree(in);
1385 return 0;
1386
1387 ex_alloc:
1388 kvfree(in);
1389
1390 ex_resize:
1391 ib_umem_release(cq->resize_umem);
1392 if (!udata) {
1393 free_cq_buf(dev, cq->resize_buf);
1394 cq->resize_buf = NULL;
1395 }
1396 ex:
1397 mutex_unlock(&cq->resize_mutex);
1398 return err;
1399 }
1400
mlx5_ib_get_cqe_size(struct ib_cq * ibcq)1401 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
1402 {
1403 struct mlx5_ib_cq *cq;
1404
1405 if (!ibcq)
1406 return 128;
1407
1408 cq = to_mcq(ibcq);
1409 return cq->cqe_size;
1410 }
1411
1412 /* Called from atomic context */
mlx5_ib_generate_wc(struct ib_cq * ibcq,struct ib_wc * wc)1413 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
1414 {
1415 struct mlx5_ib_wc *soft_wc;
1416 struct mlx5_ib_cq *cq = to_mcq(ibcq);
1417 unsigned long flags;
1418
1419 soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
1420 if (!soft_wc)
1421 return -ENOMEM;
1422
1423 soft_wc->wc = *wc;
1424 spin_lock_irqsave(&cq->lock, flags);
1425 list_add_tail(&soft_wc->list, &cq->wc_list);
1426 if (cq->notify_flags == IB_CQ_NEXT_COMP ||
1427 wc->status != IB_WC_SUCCESS) {
1428 cq->notify_flags = 0;
1429 schedule_work(&cq->notify_work);
1430 }
1431 spin_unlock_irqrestore(&cq->lock, flags);
1432
1433 return 0;
1434 }
1435