1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/platform_device.h>
34 #include <linux/acpi.h>
35 #include <linux/etherdevice.h>
36 #include <linux/interrupt.h>
37 #include <linux/of.h>
38 #include <linux/of_platform.h>
39 #include <rdma/ib_umem.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_cmd.h"
43 #include "hns_roce_hem.h"
44 #include "hns_roce_hw_v1.h"
45
set_data_seg(struct hns_roce_wqe_data_seg * dseg,struct ib_sge * sg)46 static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
47 {
48 dseg->lkey = cpu_to_le32(sg->lkey);
49 dseg->addr = cpu_to_le64(sg->addr);
50 dseg->len = cpu_to_le32(sg->length);
51 }
52
set_raddr_seg(struct hns_roce_wqe_raddr_seg * rseg,u64 remote_addr,u32 rkey)53 static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
54 u32 rkey)
55 {
56 rseg->raddr = cpu_to_le64(remote_addr);
57 rseg->rkey = cpu_to_le32(rkey);
58 rseg->len = 0;
59 }
60
hns_roce_v1_post_send(struct ib_qp * ibqp,const struct ib_send_wr * wr,const struct ib_send_wr ** bad_wr)61 static int hns_roce_v1_post_send(struct ib_qp *ibqp,
62 const struct ib_send_wr *wr,
63 const struct ib_send_wr **bad_wr)
64 {
65 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
66 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
67 struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
68 struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
69 struct hns_roce_wqe_data_seg *dseg = NULL;
70 struct hns_roce_qp *qp = to_hr_qp(ibqp);
71 struct device *dev = &hr_dev->pdev->dev;
72 struct hns_roce_sq_db sq_db;
73 int ps_opcode = 0, i = 0;
74 unsigned long flags = 0;
75 void *wqe = NULL;
76 __le32 doorbell[2];
77 int nreq = 0;
78 u32 ind = 0;
79 int ret = 0;
80 u8 *smac;
81 int loopback;
82
83 if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
84 ibqp->qp_type != IB_QPT_RC)) {
85 dev_err(dev, "un-supported QP type\n");
86 *bad_wr = NULL;
87 return -EOPNOTSUPP;
88 }
89
90 spin_lock_irqsave(&qp->sq.lock, flags);
91 ind = qp->sq_next_wqe;
92 for (nreq = 0; wr; ++nreq, wr = wr->next) {
93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
94 ret = -ENOMEM;
95 *bad_wr = wr;
96 goto out;
97 }
98
99 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
100 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
101 wr->num_sge, qp->sq.max_gs);
102 ret = -EINVAL;
103 *bad_wr = wr;
104 goto out;
105 }
106
107 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
108 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
109 wr->wr_id;
110
111 /* Corresponding to the RC and RD type wqe process separately */
112 if (ibqp->qp_type == IB_QPT_GSI) {
113 ud_sq_wqe = wqe;
114 roce_set_field(ud_sq_wqe->dmac_h,
115 UD_SEND_WQE_U32_4_DMAC_0_M,
116 UD_SEND_WQE_U32_4_DMAC_0_S,
117 ah->av.mac[0]);
118 roce_set_field(ud_sq_wqe->dmac_h,
119 UD_SEND_WQE_U32_4_DMAC_1_M,
120 UD_SEND_WQE_U32_4_DMAC_1_S,
121 ah->av.mac[1]);
122 roce_set_field(ud_sq_wqe->dmac_h,
123 UD_SEND_WQE_U32_4_DMAC_2_M,
124 UD_SEND_WQE_U32_4_DMAC_2_S,
125 ah->av.mac[2]);
126 roce_set_field(ud_sq_wqe->dmac_h,
127 UD_SEND_WQE_U32_4_DMAC_3_M,
128 UD_SEND_WQE_U32_4_DMAC_3_S,
129 ah->av.mac[3]);
130
131 roce_set_field(ud_sq_wqe->u32_8,
132 UD_SEND_WQE_U32_8_DMAC_4_M,
133 UD_SEND_WQE_U32_8_DMAC_4_S,
134 ah->av.mac[4]);
135 roce_set_field(ud_sq_wqe->u32_8,
136 UD_SEND_WQE_U32_8_DMAC_5_M,
137 UD_SEND_WQE_U32_8_DMAC_5_S,
138 ah->av.mac[5]);
139
140 smac = (u8 *)hr_dev->dev_addr[qp->port];
141 loopback = ether_addr_equal_unaligned(ah->av.mac,
142 smac) ? 1 : 0;
143 roce_set_bit(ud_sq_wqe->u32_8,
144 UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
145 loopback);
146
147 roce_set_field(ud_sq_wqe->u32_8,
148 UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
149 UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
150 HNS_ROCE_WQE_OPCODE_SEND);
151 roce_set_field(ud_sq_wqe->u32_8,
152 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
153 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
154 2);
155 roce_set_bit(ud_sq_wqe->u32_8,
156 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
157 1);
158
159 ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
160 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
161 (wr->send_flags & IB_SEND_SOLICITED ?
162 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
163 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
164 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
165
166 roce_set_field(ud_sq_wqe->u32_16,
167 UD_SEND_WQE_U32_16_DEST_QP_M,
168 UD_SEND_WQE_U32_16_DEST_QP_S,
169 ud_wr(wr)->remote_qpn);
170 roce_set_field(ud_sq_wqe->u32_16,
171 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
172 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
173 ah->av.stat_rate);
174
175 roce_set_field(ud_sq_wqe->u32_36,
176 UD_SEND_WQE_U32_36_FLOW_LABEL_M,
177 UD_SEND_WQE_U32_36_FLOW_LABEL_S,
178 ah->av.flowlabel);
179 roce_set_field(ud_sq_wqe->u32_36,
180 UD_SEND_WQE_U32_36_PRIORITY_M,
181 UD_SEND_WQE_U32_36_PRIORITY_S,
182 ah->av.sl);
183 roce_set_field(ud_sq_wqe->u32_36,
184 UD_SEND_WQE_U32_36_SGID_INDEX_M,
185 UD_SEND_WQE_U32_36_SGID_INDEX_S,
186 hns_get_gid_index(hr_dev, qp->phy_port,
187 ah->av.gid_index));
188
189 roce_set_field(ud_sq_wqe->u32_40,
190 UD_SEND_WQE_U32_40_HOP_LIMIT_M,
191 UD_SEND_WQE_U32_40_HOP_LIMIT_S,
192 ah->av.hop_limit);
193 roce_set_field(ud_sq_wqe->u32_40,
194 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
195 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
196 ah->av.tclass);
197
198 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
199
200 ud_sq_wqe->va0_l =
201 cpu_to_le32((u32)wr->sg_list[0].addr);
202 ud_sq_wqe->va0_h =
203 cpu_to_le32((wr->sg_list[0].addr) >> 32);
204 ud_sq_wqe->l_key0 =
205 cpu_to_le32(wr->sg_list[0].lkey);
206
207 ud_sq_wqe->va1_l =
208 cpu_to_le32((u32)wr->sg_list[1].addr);
209 ud_sq_wqe->va1_h =
210 cpu_to_le32((wr->sg_list[1].addr) >> 32);
211 ud_sq_wqe->l_key1 =
212 cpu_to_le32(wr->sg_list[1].lkey);
213 ind++;
214 } else if (ibqp->qp_type == IB_QPT_RC) {
215 u32 tmp_len = 0;
216
217 ctrl = wqe;
218 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
219 for (i = 0; i < wr->num_sge; i++)
220 tmp_len += wr->sg_list[i].length;
221
222 ctrl->msg_length =
223 cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
224
225 ctrl->sgl_pa_h = 0;
226 ctrl->flag = 0;
227
228 switch (wr->opcode) {
229 case IB_WR_SEND_WITH_IMM:
230 case IB_WR_RDMA_WRITE_WITH_IMM:
231 ctrl->imm_data = wr->ex.imm_data;
232 break;
233 case IB_WR_SEND_WITH_INV:
234 ctrl->inv_key =
235 cpu_to_le32(wr->ex.invalidate_rkey);
236 break;
237 default:
238 ctrl->imm_data = 0;
239 break;
240 }
241
242 /*Ctrl field, ctrl set type: sig, solic, imm, fence */
243 /* SO wait for conforming application scenarios */
244 ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
245 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
246 (wr->send_flags & IB_SEND_SOLICITED ?
247 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
248 ((wr->opcode == IB_WR_SEND_WITH_IMM ||
249 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
250 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
251 (wr->send_flags & IB_SEND_FENCE ?
252 (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
253
254 wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
255
256 switch (wr->opcode) {
257 case IB_WR_RDMA_READ:
258 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
259 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
260 rdma_wr(wr)->rkey);
261 break;
262 case IB_WR_RDMA_WRITE:
263 case IB_WR_RDMA_WRITE_WITH_IMM:
264 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
265 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
266 rdma_wr(wr)->rkey);
267 break;
268 case IB_WR_SEND:
269 case IB_WR_SEND_WITH_INV:
270 case IB_WR_SEND_WITH_IMM:
271 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
272 break;
273 case IB_WR_LOCAL_INV:
274 break;
275 case IB_WR_ATOMIC_CMP_AND_SWP:
276 case IB_WR_ATOMIC_FETCH_AND_ADD:
277 case IB_WR_LSO:
278 default:
279 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
280 break;
281 }
282 ctrl->flag |= cpu_to_le32(ps_opcode);
283 wqe += sizeof(struct hns_roce_wqe_raddr_seg);
284
285 dseg = wqe;
286 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
287 if (le32_to_cpu(ctrl->msg_length) >
288 hr_dev->caps.max_sq_inline) {
289 ret = -EINVAL;
290 *bad_wr = wr;
291 dev_err(dev, "inline len(1-%d)=%d, illegal",
292 ctrl->msg_length,
293 hr_dev->caps.max_sq_inline);
294 goto out;
295 }
296 for (i = 0; i < wr->num_sge; i++) {
297 memcpy(wqe, ((void *) (uintptr_t)
298 wr->sg_list[i].addr),
299 wr->sg_list[i].length);
300 wqe += wr->sg_list[i].length;
301 }
302 ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
303 } else {
304 /*sqe num is two */
305 for (i = 0; i < wr->num_sge; i++)
306 set_data_seg(dseg + i, wr->sg_list + i);
307
308 ctrl->flag |= cpu_to_le32(wr->num_sge <<
309 HNS_ROCE_WQE_SGE_NUM_BIT);
310 }
311 ind++;
312 }
313 }
314
315 out:
316 /* Set DB return */
317 if (likely(nreq)) {
318 qp->sq.head += nreq;
319 /* Memory barrier */
320 wmb();
321
322 sq_db.u32_4 = 0;
323 sq_db.u32_8 = 0;
324 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
325 SQ_DOORBELL_U32_4_SQ_HEAD_S,
326 (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
327 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
328 SQ_DOORBELL_U32_4_SL_S, qp->sl);
329 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
330 SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
331 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
332 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
333 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
334
335 doorbell[0] = sq_db.u32_4;
336 doorbell[1] = sq_db.u32_8;
337
338 hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
339 qp->sq_next_wqe = ind;
340 }
341
342 spin_unlock_irqrestore(&qp->sq.lock, flags);
343
344 return ret;
345 }
346
hns_roce_v1_post_recv(struct ib_qp * ibqp,const struct ib_recv_wr * wr,const struct ib_recv_wr ** bad_wr)347 static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
348 const struct ib_recv_wr *wr,
349 const struct ib_recv_wr **bad_wr)
350 {
351 int ret = 0;
352 int nreq = 0;
353 int ind = 0;
354 int i = 0;
355 u32 reg_val;
356 unsigned long flags = 0;
357 struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
358 struct hns_roce_wqe_data_seg *scat = NULL;
359 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
360 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
361 struct device *dev = &hr_dev->pdev->dev;
362 struct hns_roce_rq_db rq_db;
363 __le32 doorbell[2] = {0};
364
365 spin_lock_irqsave(&hr_qp->rq.lock, flags);
366 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
367
368 for (nreq = 0; wr; ++nreq, wr = wr->next) {
369 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
370 hr_qp->ibqp.recv_cq)) {
371 ret = -ENOMEM;
372 *bad_wr = wr;
373 goto out;
374 }
375
376 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
377 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
378 wr->num_sge, hr_qp->rq.max_gs);
379 ret = -EINVAL;
380 *bad_wr = wr;
381 goto out;
382 }
383
384 ctrl = get_recv_wqe(hr_qp, ind);
385
386 roce_set_field(ctrl->rwqe_byte_12,
387 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
388 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
389 wr->num_sge);
390
391 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
392
393 for (i = 0; i < wr->num_sge; i++)
394 set_data_seg(scat + i, wr->sg_list + i);
395
396 hr_qp->rq.wrid[ind] = wr->wr_id;
397
398 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
399 }
400
401 out:
402 if (likely(nreq)) {
403 hr_qp->rq.head += nreq;
404 /* Memory barrier */
405 wmb();
406
407 if (ibqp->qp_type == IB_QPT_GSI) {
408 __le32 tmp;
409
410 /* SW update GSI rq header */
411 reg_val = roce_read(to_hr_dev(ibqp->device),
412 ROCEE_QP1C_CFG3_0_REG +
413 QP1C_CFGN_OFFSET * hr_qp->phy_port);
414 tmp = cpu_to_le32(reg_val);
415 roce_set_field(tmp,
416 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
417 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
418 hr_qp->rq.head);
419 reg_val = le32_to_cpu(tmp);
420 roce_write(to_hr_dev(ibqp->device),
421 ROCEE_QP1C_CFG3_0_REG +
422 QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
423 } else {
424 rq_db.u32_4 = 0;
425 rq_db.u32_8 = 0;
426
427 roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
428 RQ_DOORBELL_U32_4_RQ_HEAD_S,
429 hr_qp->rq.head);
430 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
431 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
432 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
433 RQ_DOORBELL_U32_8_CMD_S, 1);
434 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
435 1);
436
437 doorbell[0] = rq_db.u32_4;
438 doorbell[1] = rq_db.u32_8;
439
440 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
441 }
442 }
443 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
444
445 return ret;
446 }
447
hns_roce_set_db_event_mode(struct hns_roce_dev * hr_dev,int sdb_mode,int odb_mode)448 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
449 int sdb_mode, int odb_mode)
450 {
451 __le32 tmp;
452 u32 val;
453
454 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
455 tmp = cpu_to_le32(val);
456 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
457 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
458 val = le32_to_cpu(tmp);
459 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
460 }
461
hns_roce_set_db_ext_mode(struct hns_roce_dev * hr_dev,u32 sdb_mode,u32 odb_mode)462 static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
463 u32 odb_mode)
464 {
465 __le32 tmp;
466 u32 val;
467
468 /* Configure SDB/ODB extend mode */
469 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
470 tmp = cpu_to_le32(val);
471 roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
472 roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
473 val = le32_to_cpu(tmp);
474 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
475 }
476
hns_roce_set_sdb(struct hns_roce_dev * hr_dev,u32 sdb_alept,u32 sdb_alful)477 static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
478 u32 sdb_alful)
479 {
480 __le32 tmp;
481 u32 val;
482
483 /* Configure SDB */
484 val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
485 tmp = cpu_to_le32(val);
486 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
487 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
488 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
489 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
490 val = le32_to_cpu(tmp);
491 roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
492 }
493
hns_roce_set_odb(struct hns_roce_dev * hr_dev,u32 odb_alept,u32 odb_alful)494 static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
495 u32 odb_alful)
496 {
497 __le32 tmp;
498 u32 val;
499
500 /* Configure ODB */
501 val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
502 tmp = cpu_to_le32(val);
503 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
504 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
505 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
506 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
507 val = le32_to_cpu(tmp);
508 roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
509 }
510
hns_roce_set_sdb_ext(struct hns_roce_dev * hr_dev,u32 ext_sdb_alept,u32 ext_sdb_alful)511 static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
512 u32 ext_sdb_alful)
513 {
514 struct device *dev = &hr_dev->pdev->dev;
515 struct hns_roce_v1_priv *priv;
516 struct hns_roce_db_table *db;
517 dma_addr_t sdb_dma_addr;
518 __le32 tmp;
519 u32 val;
520
521 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
522 db = &priv->db_table;
523
524 /* Configure extend SDB threshold */
525 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
526 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
527
528 /* Configure extend SDB base addr */
529 sdb_dma_addr = db->ext_db->sdb_buf_list->map;
530 roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
531
532 /* Configure extend SDB depth */
533 val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
534 tmp = cpu_to_le32(val);
535 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
536 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
537 db->ext_db->esdb_dep);
538 /*
539 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
540 * using 4K page, and shift more 32 because of
541 * caculating the high 32 bit value evaluated to hardware.
542 */
543 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
544 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
545 val = le32_to_cpu(tmp);
546 roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
547
548 dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
549 dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
550 ext_sdb_alept, ext_sdb_alful);
551 }
552
hns_roce_set_odb_ext(struct hns_roce_dev * hr_dev,u32 ext_odb_alept,u32 ext_odb_alful)553 static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
554 u32 ext_odb_alful)
555 {
556 struct device *dev = &hr_dev->pdev->dev;
557 struct hns_roce_v1_priv *priv;
558 struct hns_roce_db_table *db;
559 dma_addr_t odb_dma_addr;
560 __le32 tmp;
561 u32 val;
562
563 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
564 db = &priv->db_table;
565
566 /* Configure extend ODB threshold */
567 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
568 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
569
570 /* Configure extend ODB base addr */
571 odb_dma_addr = db->ext_db->odb_buf_list->map;
572 roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
573
574 /* Configure extend ODB depth */
575 val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
576 tmp = cpu_to_le32(val);
577 roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
578 ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
579 db->ext_db->eodb_dep);
580 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
581 ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
582 db->ext_db->eodb_dep);
583 val = le32_to_cpu(tmp);
584 roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
585
586 dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
587 dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
588 ext_odb_alept, ext_odb_alful);
589 }
590
hns_roce_db_ext_init(struct hns_roce_dev * hr_dev,u32 sdb_ext_mod,u32 odb_ext_mod)591 static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
592 u32 odb_ext_mod)
593 {
594 struct device *dev = &hr_dev->pdev->dev;
595 struct hns_roce_v1_priv *priv;
596 struct hns_roce_db_table *db;
597 dma_addr_t sdb_dma_addr;
598 dma_addr_t odb_dma_addr;
599 int ret = 0;
600
601 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
602 db = &priv->db_table;
603
604 db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
605 if (!db->ext_db)
606 return -ENOMEM;
607
608 if (sdb_ext_mod) {
609 db->ext_db->sdb_buf_list = kmalloc(
610 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
611 if (!db->ext_db->sdb_buf_list) {
612 ret = -ENOMEM;
613 goto ext_sdb_buf_fail_out;
614 }
615
616 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
617 HNS_ROCE_V1_EXT_SDB_SIZE,
618 &sdb_dma_addr, GFP_KERNEL);
619 if (!db->ext_db->sdb_buf_list->buf) {
620 ret = -ENOMEM;
621 goto alloc_sq_db_buf_fail;
622 }
623 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
624
625 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
626 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
627 HNS_ROCE_V1_EXT_SDB_ALFUL);
628 } else
629 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
630 HNS_ROCE_V1_SDB_ALFUL);
631
632 if (odb_ext_mod) {
633 db->ext_db->odb_buf_list = kmalloc(
634 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
635 if (!db->ext_db->odb_buf_list) {
636 ret = -ENOMEM;
637 goto ext_odb_buf_fail_out;
638 }
639
640 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
641 HNS_ROCE_V1_EXT_ODB_SIZE,
642 &odb_dma_addr, GFP_KERNEL);
643 if (!db->ext_db->odb_buf_list->buf) {
644 ret = -ENOMEM;
645 goto alloc_otr_db_buf_fail;
646 }
647 db->ext_db->odb_buf_list->map = odb_dma_addr;
648
649 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
650 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
651 HNS_ROCE_V1_EXT_ODB_ALFUL);
652 } else
653 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
654 HNS_ROCE_V1_ODB_ALFUL);
655
656 hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
657
658 return 0;
659
660 alloc_otr_db_buf_fail:
661 kfree(db->ext_db->odb_buf_list);
662
663 ext_odb_buf_fail_out:
664 if (sdb_ext_mod) {
665 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
666 db->ext_db->sdb_buf_list->buf,
667 db->ext_db->sdb_buf_list->map);
668 }
669
670 alloc_sq_db_buf_fail:
671 if (sdb_ext_mod)
672 kfree(db->ext_db->sdb_buf_list);
673
674 ext_sdb_buf_fail_out:
675 kfree(db->ext_db);
676 return ret;
677 }
678
hns_roce_v1_create_lp_qp(struct hns_roce_dev * hr_dev,struct ib_pd * pd)679 static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
680 struct ib_pd *pd)
681 {
682 struct device *dev = &hr_dev->pdev->dev;
683 struct ib_qp_init_attr init_attr;
684 struct ib_qp *qp;
685
686 memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
687 init_attr.qp_type = IB_QPT_RC;
688 init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
689 init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
690 init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
691
692 qp = hns_roce_create_qp(pd, &init_attr, NULL);
693 if (IS_ERR(qp)) {
694 dev_err(dev, "Create loop qp for mr free failed!");
695 return NULL;
696 }
697
698 return to_hr_qp(qp);
699 }
700
hns_roce_v1_rsv_lp_qp(struct hns_roce_dev * hr_dev)701 static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
702 {
703 struct hns_roce_caps *caps = &hr_dev->caps;
704 struct device *dev = &hr_dev->pdev->dev;
705 struct ib_cq_init_attr cq_init_attr;
706 struct hns_roce_free_mr *free_mr;
707 struct ib_qp_attr attr = { 0 };
708 struct hns_roce_v1_priv *priv;
709 struct hns_roce_qp *hr_qp;
710 struct ib_device *ibdev;
711 struct ib_cq *cq;
712 struct ib_pd *pd;
713 union ib_gid dgid;
714 __be64 subnet_prefix;
715 int attr_mask = 0;
716 int ret;
717 int i, j;
718 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
719 u8 phy_port;
720 u8 port = 0;
721 u8 sl;
722
723 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
724 free_mr = &priv->free_mr;
725
726 /* Reserved cq for loop qp */
727 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
728 cq_init_attr.comp_vector = 0;
729
730 ibdev = &hr_dev->ib_dev;
731 cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
732 if (!cq)
733 return -ENOMEM;
734
735 ret = hns_roce_ib_create_cq(cq, &cq_init_attr, NULL);
736 if (ret) {
737 dev_err(dev, "Create cq for reserved loop qp failed!");
738 goto alloc_cq_failed;
739 }
740 free_mr->mr_free_cq = to_hr_cq(cq);
741 free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
742 free_mr->mr_free_cq->ib_cq.uobject = NULL;
743 free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
744 free_mr->mr_free_cq->ib_cq.event_handler = NULL;
745 free_mr->mr_free_cq->ib_cq.cq_context = NULL;
746 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
747
748 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
749 if (!pd) {
750 ret = -ENOMEM;
751 goto alloc_mem_failed;
752 }
753
754 pd->device = ibdev;
755 ret = hns_roce_alloc_pd(pd, NULL);
756 if (ret)
757 goto alloc_pd_failed;
758
759 free_mr->mr_free_pd = to_hr_pd(pd);
760 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
761 free_mr->mr_free_pd->ibpd.uobject = NULL;
762 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
763 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
764
765 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
766 attr.pkey_index = 0;
767 attr.min_rnr_timer = 0;
768 /* Disable read ability */
769 attr.max_dest_rd_atomic = 0;
770 attr.max_rd_atomic = 0;
771 /* Use arbitrary values as rq_psn and sq_psn */
772 attr.rq_psn = 0x0808;
773 attr.sq_psn = 0x0808;
774 attr.retry_cnt = 7;
775 attr.rnr_retry = 7;
776 attr.timeout = 0x12;
777 attr.path_mtu = IB_MTU_256;
778 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
779 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
780 rdma_ah_set_static_rate(&attr.ah_attr, 3);
781
782 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
783 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
784 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
785 (i % HNS_ROCE_MAX_PORTS);
786 sl = i / HNS_ROCE_MAX_PORTS;
787
788 for (j = 0; j < caps->num_ports; j++) {
789 if (hr_dev->iboe.phy_port[j] == phy_port) {
790 queue_en[i] = 1;
791 port = j;
792 break;
793 }
794 }
795
796 if (!queue_en[i])
797 continue;
798
799 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
800 if (!free_mr->mr_free_qp[i]) {
801 dev_err(dev, "Create loop qp failed!\n");
802 ret = -ENOMEM;
803 goto create_lp_qp_failed;
804 }
805 hr_qp = free_mr->mr_free_qp[i];
806
807 hr_qp->port = port;
808 hr_qp->phy_port = phy_port;
809 hr_qp->ibqp.qp_type = IB_QPT_RC;
810 hr_qp->ibqp.device = &hr_dev->ib_dev;
811 hr_qp->ibqp.uobject = NULL;
812 atomic_set(&hr_qp->ibqp.usecnt, 0);
813 hr_qp->ibqp.pd = pd;
814 hr_qp->ibqp.recv_cq = cq;
815 hr_qp->ibqp.send_cq = cq;
816
817 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
818 rdma_ah_set_sl(&attr.ah_attr, sl);
819 attr.port_num = port + 1;
820
821 attr.dest_qp_num = hr_qp->qpn;
822 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
823 hr_dev->dev_addr[port],
824 ETH_ALEN);
825
826 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
827 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
828 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
829 dgid.raw[11] = 0xff;
830 dgid.raw[12] = 0xfe;
831 dgid.raw[8] ^= 2;
832 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
833
834 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
835 IB_QPS_RESET, IB_QPS_INIT);
836 if (ret) {
837 dev_err(dev, "modify qp failed(%d)!\n", ret);
838 goto create_lp_qp_failed;
839 }
840
841 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
842 IB_QPS_INIT, IB_QPS_RTR);
843 if (ret) {
844 dev_err(dev, "modify qp failed(%d)!\n", ret);
845 goto create_lp_qp_failed;
846 }
847
848 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
849 IB_QPS_RTR, IB_QPS_RTS);
850 if (ret) {
851 dev_err(dev, "modify qp failed(%d)!\n", ret);
852 goto create_lp_qp_failed;
853 }
854 }
855
856 return 0;
857
858 create_lp_qp_failed:
859 for (i -= 1; i >= 0; i--) {
860 hr_qp = free_mr->mr_free_qp[i];
861 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
862 dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
863 }
864
865 hns_roce_dealloc_pd(pd, NULL);
866
867 alloc_pd_failed:
868 kfree(pd);
869
870 alloc_mem_failed:
871 hns_roce_ib_destroy_cq(cq, NULL);
872 alloc_cq_failed:
873 kfree(cq);
874 return ret;
875 }
876
hns_roce_v1_release_lp_qp(struct hns_roce_dev * hr_dev)877 static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
878 {
879 struct device *dev = &hr_dev->pdev->dev;
880 struct hns_roce_free_mr *free_mr;
881 struct hns_roce_v1_priv *priv;
882 struct hns_roce_qp *hr_qp;
883 int ret;
884 int i;
885
886 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
887 free_mr = &priv->free_mr;
888
889 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
890 hr_qp = free_mr->mr_free_qp[i];
891 if (!hr_qp)
892 continue;
893
894 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
895 if (ret)
896 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
897 i, ret);
898 }
899
900 hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
901 kfree(&free_mr->mr_free_cq->ib_cq);
902 hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
903 kfree(&free_mr->mr_free_pd->ibpd);
904 }
905
hns_roce_db_init(struct hns_roce_dev * hr_dev)906 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
907 {
908 struct device *dev = &hr_dev->pdev->dev;
909 struct hns_roce_v1_priv *priv;
910 struct hns_roce_db_table *db;
911 u32 sdb_ext_mod;
912 u32 odb_ext_mod;
913 u32 sdb_evt_mod;
914 u32 odb_evt_mod;
915 int ret = 0;
916
917 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
918 db = &priv->db_table;
919
920 memset(db, 0, sizeof(*db));
921
922 /* Default DB mode */
923 sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
924 odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
925 sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
926 odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
927
928 db->sdb_ext_mod = sdb_ext_mod;
929 db->odb_ext_mod = odb_ext_mod;
930
931 /* Init extend DB */
932 ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
933 if (ret) {
934 dev_err(dev, "Failed in extend DB configuration.\n");
935 return ret;
936 }
937
938 hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
939
940 return 0;
941 }
942
hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct * work)943 static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
944 {
945 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
946 struct hns_roce_dev *hr_dev;
947
948 lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
949 work);
950 hr_dev = to_hr_dev(lp_qp_work->ib_dev);
951
952 hns_roce_v1_release_lp_qp(hr_dev);
953
954 if (hns_roce_v1_rsv_lp_qp(hr_dev))
955 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
956
957 if (lp_qp_work->comp_flag)
958 complete(lp_qp_work->comp);
959
960 kfree(lp_qp_work);
961 }
962
hns_roce_v1_recreate_lp_qp(struct hns_roce_dev * hr_dev)963 static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
964 {
965 struct device *dev = &hr_dev->pdev->dev;
966 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
967 struct hns_roce_free_mr *free_mr;
968 struct hns_roce_v1_priv *priv;
969 struct completion comp;
970 long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
971
972 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
973 free_mr = &priv->free_mr;
974
975 lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
976 GFP_KERNEL);
977 if (!lp_qp_work)
978 return -ENOMEM;
979
980 INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
981
982 lp_qp_work->ib_dev = &(hr_dev->ib_dev);
983 lp_qp_work->comp = ∁
984 lp_qp_work->comp_flag = 1;
985
986 init_completion(lp_qp_work->comp);
987
988 queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
989
990 while (end > 0) {
991 if (try_wait_for_completion(&comp))
992 return 0;
993 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
994 end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
995 }
996
997 lp_qp_work->comp_flag = 0;
998 if (try_wait_for_completion(&comp))
999 return 0;
1000
1001 dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
1002 return -ETIMEDOUT;
1003 }
1004
hns_roce_v1_send_lp_wqe(struct hns_roce_qp * hr_qp)1005 static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
1006 {
1007 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
1008 struct device *dev = &hr_dev->pdev->dev;
1009 struct ib_send_wr send_wr;
1010 const struct ib_send_wr *bad_wr;
1011 int ret;
1012
1013 memset(&send_wr, 0, sizeof(send_wr));
1014 send_wr.next = NULL;
1015 send_wr.num_sge = 0;
1016 send_wr.send_flags = 0;
1017 send_wr.sg_list = NULL;
1018 send_wr.wr_id = (unsigned long long)&send_wr;
1019 send_wr.opcode = IB_WR_RDMA_WRITE;
1020
1021 ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
1022 if (ret) {
1023 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
1024 return ret;
1025 }
1026
1027 return 0;
1028 }
1029
hns_roce_v1_mr_free_work_fn(struct work_struct * work)1030 static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1031 {
1032 struct hns_roce_mr_free_work *mr_work;
1033 struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
1034 struct hns_roce_free_mr *free_mr;
1035 struct hns_roce_cq *mr_free_cq;
1036 struct hns_roce_v1_priv *priv;
1037 struct hns_roce_dev *hr_dev;
1038 struct hns_roce_mr *hr_mr;
1039 struct hns_roce_qp *hr_qp;
1040 struct device *dev;
1041 unsigned long end =
1042 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1043 int i;
1044 int ret;
1045 int ne = 0;
1046
1047 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
1048 hr_mr = (struct hns_roce_mr *)mr_work->mr;
1049 hr_dev = to_hr_dev(mr_work->ib_dev);
1050 dev = &hr_dev->pdev->dev;
1051
1052 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1053 free_mr = &priv->free_mr;
1054 mr_free_cq = free_mr->mr_free_cq;
1055
1056 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1057 hr_qp = free_mr->mr_free_qp[i];
1058 if (!hr_qp)
1059 continue;
1060 ne++;
1061
1062 ret = hns_roce_v1_send_lp_wqe(hr_qp);
1063 if (ret) {
1064 dev_err(dev,
1065 "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1066 hr_qp->qpn, ret);
1067 goto free_work;
1068 }
1069 }
1070
1071 if (!ne) {
1072 dev_err(dev, "Reserved loop qp is absent!\n");
1073 goto free_work;
1074 }
1075
1076 do {
1077 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1078 if (ret < 0 && hr_qp) {
1079 dev_err(dev,
1080 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1081 hr_qp->qpn, ret, hr_mr->key, ne);
1082 goto free_work;
1083 }
1084 ne -= ret;
1085 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1086 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1087 } while (ne && time_before_eq(jiffies, end));
1088
1089 if (ne != 0)
1090 dev_err(dev,
1091 "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1092 hr_mr->key, ne);
1093
1094 free_work:
1095 if (mr_work->comp_flag)
1096 complete(mr_work->comp);
1097 kfree(mr_work);
1098 }
1099
hns_roce_v1_dereg_mr(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr,struct ib_udata * udata)1100 static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1101 struct hns_roce_mr *mr, struct ib_udata *udata)
1102 {
1103 struct device *dev = &hr_dev->pdev->dev;
1104 struct hns_roce_mr_free_work *mr_work;
1105 struct hns_roce_free_mr *free_mr;
1106 struct hns_roce_v1_priv *priv;
1107 struct completion comp;
1108 long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
1109 unsigned long start = jiffies;
1110 int npages;
1111 int ret = 0;
1112
1113 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1114 free_mr = &priv->free_mr;
1115
1116 if (mr->enabled) {
1117 if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
1118 & (hr_dev->caps.num_mtpts - 1)))
1119 dev_warn(dev, "HW2SW_MPT failed!\n");
1120 }
1121
1122 mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1123 if (!mr_work) {
1124 ret = -ENOMEM;
1125 goto free_mr;
1126 }
1127
1128 INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1129
1130 mr_work->ib_dev = &(hr_dev->ib_dev);
1131 mr_work->comp = ∁
1132 mr_work->comp_flag = 1;
1133 mr_work->mr = (void *)mr;
1134 init_completion(mr_work->comp);
1135
1136 queue_work(free_mr->free_mr_wq, &(mr_work->work));
1137
1138 while (end > 0) {
1139 if (try_wait_for_completion(&comp))
1140 goto free_mr;
1141 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1142 end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
1143 }
1144
1145 mr_work->comp_flag = 0;
1146 if (try_wait_for_completion(&comp))
1147 goto free_mr;
1148
1149 dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1150 ret = -ETIMEDOUT;
1151
1152 free_mr:
1153 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1154 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1155
1156 if (mr->size != ~0ULL) {
1157 npages = ib_umem_page_count(mr->umem);
1158 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1159 mr->pbl_dma_addr);
1160 }
1161
1162 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1163 key_to_hw_index(mr->key), 0);
1164
1165 ib_umem_release(mr->umem);
1166
1167 kfree(mr);
1168
1169 return ret;
1170 }
1171
hns_roce_db_free(struct hns_roce_dev * hr_dev)1172 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1173 {
1174 struct device *dev = &hr_dev->pdev->dev;
1175 struct hns_roce_v1_priv *priv;
1176 struct hns_roce_db_table *db;
1177
1178 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1179 db = &priv->db_table;
1180
1181 if (db->sdb_ext_mod) {
1182 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1183 db->ext_db->sdb_buf_list->buf,
1184 db->ext_db->sdb_buf_list->map);
1185 kfree(db->ext_db->sdb_buf_list);
1186 }
1187
1188 if (db->odb_ext_mod) {
1189 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1190 db->ext_db->odb_buf_list->buf,
1191 db->ext_db->odb_buf_list->map);
1192 kfree(db->ext_db->odb_buf_list);
1193 }
1194
1195 kfree(db->ext_db);
1196 }
1197
hns_roce_raq_init(struct hns_roce_dev * hr_dev)1198 static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1199 {
1200 int ret;
1201 u32 val;
1202 __le32 tmp;
1203 int raq_shift = 0;
1204 dma_addr_t addr;
1205 struct hns_roce_v1_priv *priv;
1206 struct hns_roce_raq_table *raq;
1207 struct device *dev = &hr_dev->pdev->dev;
1208
1209 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1210 raq = &priv->raq_table;
1211
1212 raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1213 if (!raq->e_raq_buf)
1214 return -ENOMEM;
1215
1216 raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1217 &addr, GFP_KERNEL);
1218 if (!raq->e_raq_buf->buf) {
1219 ret = -ENOMEM;
1220 goto err_dma_alloc_raq;
1221 }
1222 raq->e_raq_buf->map = addr;
1223
1224 /* Configure raq extended address. 48bit 4K align*/
1225 roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1226
1227 /* Configure raq_shift */
1228 raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1229 val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1230 tmp = cpu_to_le32(val);
1231 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1232 ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1233 /*
1234 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1235 * using 4K page, and shift more 32 because of
1236 * caculating the high 32 bit value evaluated to hardware.
1237 */
1238 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1239 ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1240 raq->e_raq_buf->map >> 44);
1241 val = le32_to_cpu(tmp);
1242 roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1243 dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1244
1245 /* Configure raq threshold */
1246 val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1247 tmp = cpu_to_le32(val);
1248 roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1249 ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1250 HNS_ROCE_V1_EXT_RAQ_WF);
1251 val = le32_to_cpu(tmp);
1252 roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1253 dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1254
1255 /* Enable extend raq */
1256 val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1257 tmp = cpu_to_le32(val);
1258 roce_set_field(tmp,
1259 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1260 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1261 POL_TIME_INTERVAL_VAL);
1262 roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1263 roce_set_field(tmp,
1264 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1265 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1266 2);
1267 roce_set_bit(tmp,
1268 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1269 val = le32_to_cpu(tmp);
1270 roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1271 dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1272
1273 /* Enable raq drop */
1274 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1275 tmp = cpu_to_le32(val);
1276 roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1277 val = le32_to_cpu(tmp);
1278 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1279 dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1280
1281 return 0;
1282
1283 err_dma_alloc_raq:
1284 kfree(raq->e_raq_buf);
1285 return ret;
1286 }
1287
hns_roce_raq_free(struct hns_roce_dev * hr_dev)1288 static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1289 {
1290 struct device *dev = &hr_dev->pdev->dev;
1291 struct hns_roce_v1_priv *priv;
1292 struct hns_roce_raq_table *raq;
1293
1294 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1295 raq = &priv->raq_table;
1296
1297 dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1298 raq->e_raq_buf->map);
1299 kfree(raq->e_raq_buf);
1300 }
1301
hns_roce_port_enable(struct hns_roce_dev * hr_dev,int enable_flag)1302 static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1303 {
1304 __le32 tmp;
1305 u32 val;
1306
1307 if (enable_flag) {
1308 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1309 /* Open all ports */
1310 tmp = cpu_to_le32(val);
1311 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1312 ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1313 ALL_PORT_VAL_OPEN);
1314 val = le32_to_cpu(tmp);
1315 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1316 } else {
1317 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1318 /* Close all ports */
1319 tmp = cpu_to_le32(val);
1320 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1321 ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1322 val = le32_to_cpu(tmp);
1323 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1324 }
1325 }
1326
hns_roce_bt_init(struct hns_roce_dev * hr_dev)1327 static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1328 {
1329 struct device *dev = &hr_dev->pdev->dev;
1330 struct hns_roce_v1_priv *priv;
1331 int ret;
1332
1333 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1334
1335 priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1336 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1337 GFP_KERNEL);
1338 if (!priv->bt_table.qpc_buf.buf)
1339 return -ENOMEM;
1340
1341 priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1342 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1343 GFP_KERNEL);
1344 if (!priv->bt_table.mtpt_buf.buf) {
1345 ret = -ENOMEM;
1346 goto err_failed_alloc_mtpt_buf;
1347 }
1348
1349 priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1350 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1351 GFP_KERNEL);
1352 if (!priv->bt_table.cqc_buf.buf) {
1353 ret = -ENOMEM;
1354 goto err_failed_alloc_cqc_buf;
1355 }
1356
1357 return 0;
1358
1359 err_failed_alloc_cqc_buf:
1360 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1361 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1362
1363 err_failed_alloc_mtpt_buf:
1364 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1365 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1366
1367 return ret;
1368 }
1369
hns_roce_bt_free(struct hns_roce_dev * hr_dev)1370 static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1371 {
1372 struct device *dev = &hr_dev->pdev->dev;
1373 struct hns_roce_v1_priv *priv;
1374
1375 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1376
1377 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1378 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1379
1380 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1381 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1382
1383 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1384 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1385 }
1386
hns_roce_tptr_init(struct hns_roce_dev * hr_dev)1387 static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1388 {
1389 struct device *dev = &hr_dev->pdev->dev;
1390 struct hns_roce_buf_list *tptr_buf;
1391 struct hns_roce_v1_priv *priv;
1392
1393 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1394 tptr_buf = &priv->tptr_table.tptr_buf;
1395
1396 /*
1397 * This buffer will be used for CQ's tptr(tail pointer), also
1398 * named ci(customer index). Every CQ will use 2 bytes to save
1399 * cqe ci in hip06. Hardware will read this area to get new ci
1400 * when the queue is almost full.
1401 */
1402 tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1403 &tptr_buf->map, GFP_KERNEL);
1404 if (!tptr_buf->buf)
1405 return -ENOMEM;
1406
1407 hr_dev->tptr_dma_addr = tptr_buf->map;
1408 hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1409
1410 return 0;
1411 }
1412
hns_roce_tptr_free(struct hns_roce_dev * hr_dev)1413 static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1414 {
1415 struct device *dev = &hr_dev->pdev->dev;
1416 struct hns_roce_buf_list *tptr_buf;
1417 struct hns_roce_v1_priv *priv;
1418
1419 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1420 tptr_buf = &priv->tptr_table.tptr_buf;
1421
1422 dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1423 tptr_buf->buf, tptr_buf->map);
1424 }
1425
hns_roce_free_mr_init(struct hns_roce_dev * hr_dev)1426 static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1427 {
1428 struct device *dev = &hr_dev->pdev->dev;
1429 struct hns_roce_free_mr *free_mr;
1430 struct hns_roce_v1_priv *priv;
1431 int ret = 0;
1432
1433 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1434 free_mr = &priv->free_mr;
1435
1436 free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1437 if (!free_mr->free_mr_wq) {
1438 dev_err(dev, "Create free mr workqueue failed!\n");
1439 return -ENOMEM;
1440 }
1441
1442 ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1443 if (ret) {
1444 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1445 flush_workqueue(free_mr->free_mr_wq);
1446 destroy_workqueue(free_mr->free_mr_wq);
1447 }
1448
1449 return ret;
1450 }
1451
hns_roce_free_mr_free(struct hns_roce_dev * hr_dev)1452 static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1453 {
1454 struct hns_roce_free_mr *free_mr;
1455 struct hns_roce_v1_priv *priv;
1456
1457 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1458 free_mr = &priv->free_mr;
1459
1460 flush_workqueue(free_mr->free_mr_wq);
1461 destroy_workqueue(free_mr->free_mr_wq);
1462
1463 hns_roce_v1_release_lp_qp(hr_dev);
1464 }
1465
1466 /**
1467 * hns_roce_v1_reset - reset RoCE
1468 * @hr_dev: RoCE device struct pointer
1469 * @enable: true -- drop reset, false -- reset
1470 * return 0 - success , negative --fail
1471 */
hns_roce_v1_reset(struct hns_roce_dev * hr_dev,bool dereset)1472 static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1473 {
1474 struct device_node *dsaf_node;
1475 struct device *dev = &hr_dev->pdev->dev;
1476 struct device_node *np = dev->of_node;
1477 struct fwnode_handle *fwnode;
1478 int ret;
1479
1480 /* check if this is DT/ACPI case */
1481 if (dev_of_node(dev)) {
1482 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1483 if (!dsaf_node) {
1484 dev_err(dev, "could not find dsaf-handle\n");
1485 return -EINVAL;
1486 }
1487 fwnode = &dsaf_node->fwnode;
1488 } else if (is_acpi_device_node(dev->fwnode)) {
1489 struct fwnode_reference_args args;
1490
1491 ret = acpi_node_get_property_reference(dev->fwnode,
1492 "dsaf-handle", 0, &args);
1493 if (ret) {
1494 dev_err(dev, "could not find dsaf-handle\n");
1495 return ret;
1496 }
1497 fwnode = args.fwnode;
1498 } else {
1499 dev_err(dev, "cannot read data from DT or ACPI\n");
1500 return -ENXIO;
1501 }
1502
1503 ret = hns_dsaf_roce_reset(fwnode, false);
1504 if (ret)
1505 return ret;
1506
1507 if (dereset) {
1508 msleep(SLEEP_TIME_INTERVAL);
1509 ret = hns_dsaf_roce_reset(fwnode, true);
1510 }
1511
1512 return ret;
1513 }
1514
hns_roce_v1_profile(struct hns_roce_dev * hr_dev)1515 static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1516 {
1517 int i = 0;
1518 struct hns_roce_caps *caps = &hr_dev->caps;
1519
1520 hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
1521 hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
1522 hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
1523 ((u64)roce_read(hr_dev,
1524 ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
1525 hr_dev->hw_rev = HNS_ROCE_HW_VER1;
1526
1527 caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
1528 caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
1529 caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
1530 caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
1531 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1532 caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
1533 caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
1534 caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
1535 caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
1536 caps->num_uars = HNS_ROCE_V1_UAR_NUM;
1537 caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
1538 caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
1539 caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
1540 caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1541 caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
1542 caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
1543 caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
1544 caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1545 caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1546 caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1547 caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1548 caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1549 caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1550 caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1551 caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1552 caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1553 caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1554 caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1555 caps->reserved_lkey = 0;
1556 caps->reserved_pds = 0;
1557 caps->reserved_mrws = 1;
1558 caps->reserved_uars = 0;
1559 caps->reserved_cqs = 0;
1560 caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */
1561 caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1562
1563 for (i = 0; i < caps->num_ports; i++)
1564 caps->pkey_table_len[i] = 1;
1565
1566 for (i = 0; i < caps->num_ports; i++) {
1567 /* Six ports shared 16 GID in v1 engine */
1568 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1569 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1570 caps->num_ports;
1571 else
1572 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1573 caps->num_ports + 1;
1574 }
1575
1576 caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1577 caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1578 caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
1579 caps->max_mtu = IB_MTU_2048;
1580
1581 return 0;
1582 }
1583
hns_roce_v1_init(struct hns_roce_dev * hr_dev)1584 static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1585 {
1586 int ret;
1587 u32 val;
1588 __le32 tmp;
1589 struct device *dev = &hr_dev->pdev->dev;
1590
1591 /* DMAE user config */
1592 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1593 tmp = cpu_to_le32(val);
1594 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1595 ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1596 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1597 ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1598 1 << PAGES_SHIFT_16);
1599 val = le32_to_cpu(tmp);
1600 roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1601
1602 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1603 tmp = cpu_to_le32(val);
1604 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1605 ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1606 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1607 ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1608 1 << PAGES_SHIFT_16);
1609
1610 ret = hns_roce_db_init(hr_dev);
1611 if (ret) {
1612 dev_err(dev, "doorbell init failed!\n");
1613 return ret;
1614 }
1615
1616 ret = hns_roce_raq_init(hr_dev);
1617 if (ret) {
1618 dev_err(dev, "raq init failed!\n");
1619 goto error_failed_raq_init;
1620 }
1621
1622 ret = hns_roce_bt_init(hr_dev);
1623 if (ret) {
1624 dev_err(dev, "bt init failed!\n");
1625 goto error_failed_bt_init;
1626 }
1627
1628 ret = hns_roce_tptr_init(hr_dev);
1629 if (ret) {
1630 dev_err(dev, "tptr init failed!\n");
1631 goto error_failed_tptr_init;
1632 }
1633
1634 ret = hns_roce_free_mr_init(hr_dev);
1635 if (ret) {
1636 dev_err(dev, "free mr init failed!\n");
1637 goto error_failed_free_mr_init;
1638 }
1639
1640 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1641
1642 return 0;
1643
1644 error_failed_free_mr_init:
1645 hns_roce_tptr_free(hr_dev);
1646
1647 error_failed_tptr_init:
1648 hns_roce_bt_free(hr_dev);
1649
1650 error_failed_bt_init:
1651 hns_roce_raq_free(hr_dev);
1652
1653 error_failed_raq_init:
1654 hns_roce_db_free(hr_dev);
1655 return ret;
1656 }
1657
hns_roce_v1_exit(struct hns_roce_dev * hr_dev)1658 static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1659 {
1660 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1661 hns_roce_free_mr_free(hr_dev);
1662 hns_roce_tptr_free(hr_dev);
1663 hns_roce_bt_free(hr_dev);
1664 hns_roce_raq_free(hr_dev);
1665 hns_roce_db_free(hr_dev);
1666 }
1667
hns_roce_v1_cmd_pending(struct hns_roce_dev * hr_dev)1668 static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1669 {
1670 u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1671
1672 return (!!(status & (1 << HCR_GO_BIT)));
1673 }
1674
hns_roce_v1_post_mbox(struct hns_roce_dev * hr_dev,u64 in_param,u64 out_param,u32 in_modifier,u8 op_modifier,u16 op,u16 token,int event)1675 static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1676 u64 out_param, u32 in_modifier, u8 op_modifier,
1677 u16 op, u16 token, int event)
1678 {
1679 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1680 unsigned long end;
1681 u32 val = 0;
1682 __le32 tmp;
1683
1684 end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1685 while (hns_roce_v1_cmd_pending(hr_dev)) {
1686 if (time_after(jiffies, end)) {
1687 dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1688 (int)jiffies, (int)end);
1689 return -EAGAIN;
1690 }
1691 cond_resched();
1692 }
1693
1694 tmp = cpu_to_le32(val);
1695 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1696 op);
1697 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1698 ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1699 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1700 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1701 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1702 ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1703
1704 val = le32_to_cpu(tmp);
1705 writeq(in_param, hcr + 0);
1706 writeq(out_param, hcr + 2);
1707 writel(in_modifier, hcr + 4);
1708 /* Memory barrier */
1709 wmb();
1710
1711 writel(val, hcr + 5);
1712
1713 return 0;
1714 }
1715
hns_roce_v1_chk_mbox(struct hns_roce_dev * hr_dev,unsigned long timeout)1716 static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1717 unsigned long timeout)
1718 {
1719 u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1720 unsigned long end = 0;
1721 u32 status = 0;
1722
1723 end = msecs_to_jiffies(timeout) + jiffies;
1724 while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1725 cond_resched();
1726
1727 if (hns_roce_v1_cmd_pending(hr_dev)) {
1728 dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1729 return -ETIMEDOUT;
1730 }
1731
1732 status = le32_to_cpu((__force __le32)
1733 __raw_readl(hcr + HCR_STATUS_OFFSET));
1734 if ((status & STATUS_MASK) != 0x1) {
1735 dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1736 return -EBUSY;
1737 }
1738
1739 return 0;
1740 }
1741
hns_roce_v1_set_gid(struct hns_roce_dev * hr_dev,u8 port,int gid_index,const union ib_gid * gid,const struct ib_gid_attr * attr)1742 static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1743 int gid_index, const union ib_gid *gid,
1744 const struct ib_gid_attr *attr)
1745 {
1746 unsigned long flags;
1747 u32 *p = NULL;
1748 u8 gid_idx = 0;
1749
1750 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1751
1752 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
1753
1754 p = (u32 *)&gid->raw[0];
1755 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1756 (HNS_ROCE_V1_GID_NUM * gid_idx));
1757
1758 p = (u32 *)&gid->raw[4];
1759 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1760 (HNS_ROCE_V1_GID_NUM * gid_idx));
1761
1762 p = (u32 *)&gid->raw[8];
1763 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1764 (HNS_ROCE_V1_GID_NUM * gid_idx));
1765
1766 p = (u32 *)&gid->raw[0xc];
1767 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1768 (HNS_ROCE_V1_GID_NUM * gid_idx));
1769
1770 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
1771
1772 return 0;
1773 }
1774
hns_roce_v1_set_mac(struct hns_roce_dev * hr_dev,u8 phy_port,u8 * addr)1775 static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1776 u8 *addr)
1777 {
1778 u32 reg_smac_l;
1779 u16 reg_smac_h;
1780 __le32 tmp;
1781 u16 *p_h;
1782 u32 *p;
1783 u32 val;
1784
1785 /*
1786 * When mac changed, loopback may fail
1787 * because of smac not equal to dmac.
1788 * We Need to release and create reserved qp again.
1789 */
1790 if (hr_dev->hw->dereg_mr) {
1791 int ret;
1792
1793 ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1794 if (ret && ret != -ETIMEDOUT)
1795 return ret;
1796 }
1797
1798 p = (u32 *)(&addr[0]);
1799 reg_smac_l = *p;
1800 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1801 PHY_PORT_OFFSET * phy_port);
1802
1803 val = roce_read(hr_dev,
1804 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1805 tmp = cpu_to_le32(val);
1806 p_h = (u16 *)(&addr[4]);
1807 reg_smac_h = *p_h;
1808 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1809 ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1810 val = le32_to_cpu(tmp);
1811 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1812 val);
1813
1814 return 0;
1815 }
1816
hns_roce_v1_set_mtu(struct hns_roce_dev * hr_dev,u8 phy_port,enum ib_mtu mtu)1817 static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1818 enum ib_mtu mtu)
1819 {
1820 __le32 tmp;
1821 u32 val;
1822
1823 val = roce_read(hr_dev,
1824 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1825 tmp = cpu_to_le32(val);
1826 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1827 ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1828 val = le32_to_cpu(tmp);
1829 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1830 val);
1831 }
1832
hns_roce_v1_write_mtpt(void * mb_buf,struct hns_roce_mr * mr,unsigned long mtpt_idx)1833 static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1834 unsigned long mtpt_idx)
1835 {
1836 struct hns_roce_v1_mpt_entry *mpt_entry;
1837 struct sg_dma_page_iter sg_iter;
1838 u64 *pages;
1839 int i;
1840
1841 /* MPT filled into mailbox buf */
1842 mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1843 memset(mpt_entry, 0, sizeof(*mpt_entry));
1844
1845 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1846 MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1847 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1848 MPT_BYTE_4_KEY_S, mr->key);
1849 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1850 MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1851 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1852 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1853 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1854 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1855 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1856 MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1857 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1858 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1859 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1860 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1861 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1862 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1863 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1864 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1865 0);
1866 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1867
1868 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1869 MPT_BYTE_12_PBL_ADDR_H_S, 0);
1870 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1871 MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1872
1873 mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
1874 mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
1875 mpt_entry->length = cpu_to_le32((u32)mr->size);
1876
1877 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1878 MPT_BYTE_28_PD_S, mr->pd);
1879 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1880 MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1881 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1882 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1883
1884 /* DMA memory register */
1885 if (mr->type == MR_TYPE_DMA)
1886 return 0;
1887
1888 pages = (u64 *) __get_free_page(GFP_KERNEL);
1889 if (!pages)
1890 return -ENOMEM;
1891
1892 i = 0;
1893 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
1894 pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
1895
1896 /* Directly record to MTPT table firstly 7 entry */
1897 if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1898 break;
1899 i++;
1900 }
1901
1902 /* Register user mr */
1903 for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1904 switch (i) {
1905 case 0:
1906 mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1907 roce_set_field(mpt_entry->mpt_byte_36,
1908 MPT_BYTE_36_PA0_H_M,
1909 MPT_BYTE_36_PA0_H_S,
1910 (u32)(pages[i] >> PAGES_SHIFT_32));
1911 break;
1912 case 1:
1913 roce_set_field(mpt_entry->mpt_byte_36,
1914 MPT_BYTE_36_PA1_L_M,
1915 MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
1916 roce_set_field(mpt_entry->mpt_byte_40,
1917 MPT_BYTE_40_PA1_H_M,
1918 MPT_BYTE_40_PA1_H_S,
1919 (u32)(pages[i] >> PAGES_SHIFT_24));
1920 break;
1921 case 2:
1922 roce_set_field(mpt_entry->mpt_byte_40,
1923 MPT_BYTE_40_PA2_L_M,
1924 MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
1925 roce_set_field(mpt_entry->mpt_byte_44,
1926 MPT_BYTE_44_PA2_H_M,
1927 MPT_BYTE_44_PA2_H_S,
1928 (u32)(pages[i] >> PAGES_SHIFT_16));
1929 break;
1930 case 3:
1931 roce_set_field(mpt_entry->mpt_byte_44,
1932 MPT_BYTE_44_PA3_L_M,
1933 MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
1934 roce_set_field(mpt_entry->mpt_byte_48,
1935 MPT_BYTE_48_PA3_H_M,
1936 MPT_BYTE_48_PA3_H_S,
1937 (u32)(pages[i] >> PAGES_SHIFT_8));
1938 break;
1939 case 4:
1940 mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1941 roce_set_field(mpt_entry->mpt_byte_56,
1942 MPT_BYTE_56_PA4_H_M,
1943 MPT_BYTE_56_PA4_H_S,
1944 (u32)(pages[i] >> PAGES_SHIFT_32));
1945 break;
1946 case 5:
1947 roce_set_field(mpt_entry->mpt_byte_56,
1948 MPT_BYTE_56_PA5_L_M,
1949 MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
1950 roce_set_field(mpt_entry->mpt_byte_60,
1951 MPT_BYTE_60_PA5_H_M,
1952 MPT_BYTE_60_PA5_H_S,
1953 (u32)(pages[i] >> PAGES_SHIFT_24));
1954 break;
1955 case 6:
1956 roce_set_field(mpt_entry->mpt_byte_60,
1957 MPT_BYTE_60_PA6_L_M,
1958 MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
1959 roce_set_field(mpt_entry->mpt_byte_64,
1960 MPT_BYTE_64_PA6_H_M,
1961 MPT_BYTE_64_PA6_H_S,
1962 (u32)(pages[i] >> PAGES_SHIFT_16));
1963 break;
1964 default:
1965 break;
1966 }
1967 }
1968
1969 free_page((unsigned long) pages);
1970
1971 mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
1972
1973 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1974 MPT_BYTE_12_PBL_ADDR_H_S,
1975 ((u32)(mr->pbl_dma_addr >> 32)));
1976
1977 return 0;
1978 }
1979
get_cqe(struct hns_roce_cq * hr_cq,int n)1980 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1981 {
1982 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1983 n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1984 }
1985
get_sw_cqe(struct hns_roce_cq * hr_cq,int n)1986 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1987 {
1988 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1989
1990 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1991 return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1992 !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
1993 }
1994
next_cqe_sw(struct hns_roce_cq * hr_cq)1995 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1996 {
1997 return get_sw_cqe(hr_cq, hr_cq->cons_index);
1998 }
1999
hns_roce_v1_cq_set_ci(struct hns_roce_cq * hr_cq,u32 cons_index)2000 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2001 {
2002 __le32 doorbell[2];
2003
2004 doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
2005 doorbell[1] = 0;
2006 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2007 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2008 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2009 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2010 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
2011 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2012 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
2013
2014 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2015 }
2016
__hns_roce_v1_cq_clean(struct hns_roce_cq * hr_cq,u32 qpn,struct hns_roce_srq * srq)2017 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2018 struct hns_roce_srq *srq)
2019 {
2020 struct hns_roce_cqe *cqe, *dest;
2021 u32 prod_index;
2022 int nfreed = 0;
2023 u8 owner_bit;
2024
2025 for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
2026 ++prod_index) {
2027 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2028 break;
2029 }
2030
2031 /*
2032 * Now backwards through the CQ, removing CQ entries
2033 * that match our QP by overwriting them with next entries.
2034 */
2035 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2036 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2037 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2038 CQE_BYTE_16_LOCAL_QPN_S) &
2039 HNS_ROCE_CQE_QPN_MASK) == qpn) {
2040 /* In v1 engine, not support SRQ */
2041 ++nfreed;
2042 } else if (nfreed) {
2043 dest = get_cqe(hr_cq, (prod_index + nfreed) &
2044 hr_cq->ib_cq.cqe);
2045 owner_bit = roce_get_bit(dest->cqe_byte_4,
2046 CQE_BYTE_4_OWNER_S);
2047 memcpy(dest, cqe, sizeof(*cqe));
2048 roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
2049 owner_bit);
2050 }
2051 }
2052
2053 if (nfreed) {
2054 hr_cq->cons_index += nfreed;
2055 /*
2056 * Make sure update of buffer contents is done before
2057 * updating consumer index.
2058 */
2059 wmb();
2060
2061 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2062 }
2063 }
2064
hns_roce_v1_cq_clean(struct hns_roce_cq * hr_cq,u32 qpn,struct hns_roce_srq * srq)2065 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2066 struct hns_roce_srq *srq)
2067 {
2068 spin_lock_irq(&hr_cq->lock);
2069 __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
2070 spin_unlock_irq(&hr_cq->lock);
2071 }
2072
hns_roce_v1_write_cqc(struct hns_roce_dev * hr_dev,struct hns_roce_cq * hr_cq,void * mb_buf,u64 * mtts,dma_addr_t dma_handle,int nent,u32 vector)2073 static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
2074 struct hns_roce_cq *hr_cq, void *mb_buf,
2075 u64 *mtts, dma_addr_t dma_handle, int nent,
2076 u32 vector)
2077 {
2078 struct hns_roce_cq_context *cq_context = NULL;
2079 struct hns_roce_buf_list *tptr_buf;
2080 struct hns_roce_v1_priv *priv;
2081 dma_addr_t tptr_dma_addr;
2082 int offset;
2083
2084 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2085 tptr_buf = &priv->tptr_table.tptr_buf;
2086
2087 cq_context = mb_buf;
2088 memset(cq_context, 0, sizeof(*cq_context));
2089
2090 /* Get the tptr for this CQ. */
2091 offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2092 tptr_dma_addr = tptr_buf->map + offset;
2093 hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2094
2095 /* Register cq_context members */
2096 roce_set_field(cq_context->cqc_byte_4,
2097 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2098 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2099 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2100 CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2101
2102 cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
2103
2104 roce_set_field(cq_context->cqc_byte_12,
2105 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2106 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2107 ((u64)dma_handle >> 32));
2108 roce_set_field(cq_context->cqc_byte_12,
2109 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2110 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2111 ilog2((unsigned int)nent));
2112 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2113 CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
2114
2115 cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
2116
2117 roce_set_field(cq_context->cqc_byte_20,
2118 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2119 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
2120 /* Dedicated hardware, directly set 0 */
2121 roce_set_field(cq_context->cqc_byte_20,
2122 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2123 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2124 /**
2125 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2126 * using 4K page, and shift more 32 because of
2127 * caculating the high 32 bit value evaluated to hardware.
2128 */
2129 roce_set_field(cq_context->cqc_byte_20,
2130 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2131 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2132 tptr_dma_addr >> 44);
2133
2134 cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
2135
2136 roce_set_field(cq_context->cqc_byte_32,
2137 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2138 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2139 roce_set_bit(cq_context->cqc_byte_32,
2140 CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2141 roce_set_bit(cq_context->cqc_byte_32,
2142 CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2143 roce_set_bit(cq_context->cqc_byte_32,
2144 CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2145 roce_set_bit(cq_context->cqc_byte_32,
2146 CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2147 0);
2148 /* The initial value of cq's ci is 0 */
2149 roce_set_field(cq_context->cqc_byte_32,
2150 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2151 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2152 }
2153
hns_roce_v1_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period)2154 static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2155 {
2156 return -EOPNOTSUPP;
2157 }
2158
hns_roce_v1_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)2159 static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2160 enum ib_cq_notify_flags flags)
2161 {
2162 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2163 u32 notification_flag;
2164 __le32 doorbell[2] = {};
2165
2166 notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2167 IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2168 /*
2169 * flags = 0; Notification Flag = 1, next
2170 * flags = 1; Notification Flag = 0, solocited
2171 */
2172 doorbell[0] =
2173 cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2174 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2175 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2176 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2177 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2178 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2179 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2180 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2181 hr_cq->cqn | notification_flag);
2182
2183 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2184
2185 return 0;
2186 }
2187
hns_roce_v1_poll_one(struct hns_roce_cq * hr_cq,struct hns_roce_qp ** cur_qp,struct ib_wc * wc)2188 static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2189 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2190 {
2191 int qpn;
2192 int is_send;
2193 u16 wqe_ctr;
2194 u32 status;
2195 u32 opcode;
2196 struct hns_roce_cqe *cqe;
2197 struct hns_roce_qp *hr_qp;
2198 struct hns_roce_wq *wq;
2199 struct hns_roce_wqe_ctrl_seg *sq_wqe;
2200 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2201 struct device *dev = &hr_dev->pdev->dev;
2202
2203 /* Find cqe according consumer index */
2204 cqe = next_cqe_sw(hr_cq);
2205 if (!cqe)
2206 return -EAGAIN;
2207
2208 ++hr_cq->cons_index;
2209 /* Memory barrier */
2210 rmb();
2211 /* 0->SQ, 1->RQ */
2212 is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2213
2214 /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2215 if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2216 CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2217 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2218 CQE_BYTE_20_PORT_NUM_S) +
2219 roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2220 CQE_BYTE_16_LOCAL_QPN_S) *
2221 HNS_ROCE_MAX_PORTS;
2222 } else {
2223 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2224 CQE_BYTE_16_LOCAL_QPN_S);
2225 }
2226
2227 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2228 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2229 if (unlikely(!hr_qp)) {
2230 dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2231 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2232 return -EINVAL;
2233 }
2234
2235 *cur_qp = hr_qp;
2236 }
2237
2238 wc->qp = &(*cur_qp)->ibqp;
2239 wc->vendor_err = 0;
2240
2241 status = roce_get_field(cqe->cqe_byte_4,
2242 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2243 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2244 HNS_ROCE_CQE_STATUS_MASK;
2245 switch (status) {
2246 case HNS_ROCE_CQE_SUCCESS:
2247 wc->status = IB_WC_SUCCESS;
2248 break;
2249 case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2250 wc->status = IB_WC_LOC_LEN_ERR;
2251 break;
2252 case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2253 wc->status = IB_WC_LOC_QP_OP_ERR;
2254 break;
2255 case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2256 wc->status = IB_WC_LOC_PROT_ERR;
2257 break;
2258 case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2259 wc->status = IB_WC_WR_FLUSH_ERR;
2260 break;
2261 case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2262 wc->status = IB_WC_MW_BIND_ERR;
2263 break;
2264 case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2265 wc->status = IB_WC_BAD_RESP_ERR;
2266 break;
2267 case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2268 wc->status = IB_WC_LOC_ACCESS_ERR;
2269 break;
2270 case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2271 wc->status = IB_WC_REM_INV_REQ_ERR;
2272 break;
2273 case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2274 wc->status = IB_WC_REM_ACCESS_ERR;
2275 break;
2276 case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2277 wc->status = IB_WC_REM_OP_ERR;
2278 break;
2279 case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2280 wc->status = IB_WC_RETRY_EXC_ERR;
2281 break;
2282 case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2283 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2284 break;
2285 default:
2286 wc->status = IB_WC_GENERAL_ERR;
2287 break;
2288 }
2289
2290 /* CQE status error, directly return */
2291 if (wc->status != IB_WC_SUCCESS)
2292 return 0;
2293
2294 if (is_send) {
2295 /* SQ conrespond to CQE */
2296 sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
2297 CQE_BYTE_4_WQE_INDEX_M,
2298 CQE_BYTE_4_WQE_INDEX_S)&
2299 ((*cur_qp)->sq.wqe_cnt-1));
2300 switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2301 case HNS_ROCE_WQE_OPCODE_SEND:
2302 wc->opcode = IB_WC_SEND;
2303 break;
2304 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2305 wc->opcode = IB_WC_RDMA_READ;
2306 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2307 break;
2308 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2309 wc->opcode = IB_WC_RDMA_WRITE;
2310 break;
2311 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2312 wc->opcode = IB_WC_LOCAL_INV;
2313 break;
2314 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2315 wc->opcode = IB_WC_SEND;
2316 break;
2317 default:
2318 wc->status = IB_WC_GENERAL_ERR;
2319 break;
2320 }
2321 wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2322 IB_WC_WITH_IMM : 0);
2323
2324 wq = &(*cur_qp)->sq;
2325 if ((*cur_qp)->sq_signal_bits) {
2326 /*
2327 * If sg_signal_bit is 1,
2328 * firstly tail pointer updated to wqe
2329 * which current cqe correspond to
2330 */
2331 wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2332 CQE_BYTE_4_WQE_INDEX_M,
2333 CQE_BYTE_4_WQE_INDEX_S);
2334 wq->tail += (wqe_ctr - (u16)wq->tail) &
2335 (wq->wqe_cnt - 1);
2336 }
2337 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2338 ++wq->tail;
2339 } else {
2340 /* RQ conrespond to CQE */
2341 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2342 opcode = roce_get_field(cqe->cqe_byte_4,
2343 CQE_BYTE_4_OPERATION_TYPE_M,
2344 CQE_BYTE_4_OPERATION_TYPE_S) &
2345 HNS_ROCE_CQE_OPCODE_MASK;
2346 switch (opcode) {
2347 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2348 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2349 wc->wc_flags = IB_WC_WITH_IMM;
2350 wc->ex.imm_data =
2351 cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2352 break;
2353 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2354 if (roce_get_bit(cqe->cqe_byte_4,
2355 CQE_BYTE_4_IMM_INDICATOR_S)) {
2356 wc->opcode = IB_WC_RECV;
2357 wc->wc_flags = IB_WC_WITH_IMM;
2358 wc->ex.imm_data = cpu_to_be32(
2359 le32_to_cpu(cqe->immediate_data));
2360 } else {
2361 wc->opcode = IB_WC_RECV;
2362 wc->wc_flags = 0;
2363 }
2364 break;
2365 default:
2366 wc->status = IB_WC_GENERAL_ERR;
2367 break;
2368 }
2369
2370 /* Update tail pointer, record wr_id */
2371 wq = &(*cur_qp)->rq;
2372 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2373 ++wq->tail;
2374 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2375 CQE_BYTE_20_SL_S);
2376 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2377 CQE_BYTE_20_REMOTE_QPN_M,
2378 CQE_BYTE_20_REMOTE_QPN_S);
2379 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2380 CQE_BYTE_20_GRH_PRESENT_S) ?
2381 IB_WC_GRH : 0);
2382 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2383 CQE_BYTE_28_P_KEY_IDX_M,
2384 CQE_BYTE_28_P_KEY_IDX_S);
2385 }
2386
2387 return 0;
2388 }
2389
hns_roce_v1_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)2390 int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2391 {
2392 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2393 struct hns_roce_qp *cur_qp = NULL;
2394 unsigned long flags;
2395 int npolled;
2396 int ret = 0;
2397
2398 spin_lock_irqsave(&hr_cq->lock, flags);
2399
2400 for (npolled = 0; npolled < num_entries; ++npolled) {
2401 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2402 if (ret)
2403 break;
2404 }
2405
2406 if (npolled) {
2407 *hr_cq->tptr_addr = hr_cq->cons_index &
2408 ((hr_cq->cq_depth << 1) - 1);
2409
2410 /* Memroy barrier */
2411 wmb();
2412 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2413 }
2414
2415 spin_unlock_irqrestore(&hr_cq->lock, flags);
2416
2417 if (ret == 0 || ret == -EAGAIN)
2418 return npolled;
2419 else
2420 return ret;
2421 }
2422
hns_roce_v1_clear_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,int obj,int step_idx)2423 static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2424 struct hns_roce_hem_table *table, int obj,
2425 int step_idx)
2426 {
2427 struct device *dev = &hr_dev->pdev->dev;
2428 struct hns_roce_v1_priv *priv;
2429 unsigned long flags = 0;
2430 long end = HW_SYNC_TIMEOUT_MSECS;
2431 __le32 bt_cmd_val[2] = {0};
2432 void __iomem *bt_cmd;
2433 u64 bt_ba = 0;
2434
2435 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2436
2437 switch (table->type) {
2438 case HEM_TYPE_QPC:
2439 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2440 break;
2441 case HEM_TYPE_MTPT:
2442 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2443 break;
2444 case HEM_TYPE_CQC:
2445 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2446 break;
2447 case HEM_TYPE_SRQC:
2448 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2449 return -EINVAL;
2450 default:
2451 return 0;
2452 }
2453 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2454 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
2455 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2456 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2457 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2458 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2459
2460 spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2461
2462 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2463
2464 while (1) {
2465 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2466 if (!end) {
2467 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2468 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2469 flags);
2470 return -EBUSY;
2471 }
2472 } else {
2473 break;
2474 }
2475 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
2476 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
2477 }
2478
2479 bt_cmd_val[0] = cpu_to_le32(bt_ba);
2480 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2481 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2482 hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2483
2484 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2485
2486 return 0;
2487 }
2488
hns_roce_v1_qp_modify(struct hns_roce_dev * hr_dev,struct hns_roce_mtt * mtt,enum hns_roce_qp_state cur_state,enum hns_roce_qp_state new_state,struct hns_roce_qp_context * context,struct hns_roce_qp * hr_qp)2489 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2490 struct hns_roce_mtt *mtt,
2491 enum hns_roce_qp_state cur_state,
2492 enum hns_roce_qp_state new_state,
2493 struct hns_roce_qp_context *context,
2494 struct hns_roce_qp *hr_qp)
2495 {
2496 static const u16
2497 op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2498 [HNS_ROCE_QP_STATE_RST] = {
2499 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2500 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2501 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2502 },
2503 [HNS_ROCE_QP_STATE_INIT] = {
2504 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2505 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2506 /* Note: In v1 engine, HW doesn't support RST2INIT.
2507 * We use RST2INIT cmd instead of INIT2INIT.
2508 */
2509 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2510 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2511 },
2512 [HNS_ROCE_QP_STATE_RTR] = {
2513 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2514 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2515 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2516 },
2517 [HNS_ROCE_QP_STATE_RTS] = {
2518 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2519 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2520 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2521 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2522 },
2523 [HNS_ROCE_QP_STATE_SQD] = {
2524 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2525 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2526 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2527 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2528 },
2529 [HNS_ROCE_QP_STATE_ERR] = {
2530 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2531 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2532 }
2533 };
2534
2535 struct hns_roce_cmd_mailbox *mailbox;
2536 struct device *dev = &hr_dev->pdev->dev;
2537 int ret = 0;
2538
2539 if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2540 new_state >= HNS_ROCE_QP_NUM_STATE ||
2541 !op[cur_state][new_state]) {
2542 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2543 cur_state, new_state);
2544 return -EINVAL;
2545 }
2546
2547 if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2548 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2549 HNS_ROCE_CMD_2RST_QP,
2550 HNS_ROCE_CMD_TIMEOUT_MSECS);
2551
2552 if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2553 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2554 HNS_ROCE_CMD_2ERR_QP,
2555 HNS_ROCE_CMD_TIMEOUT_MSECS);
2556
2557 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2558 if (IS_ERR(mailbox))
2559 return PTR_ERR(mailbox);
2560
2561 memcpy(mailbox->buf, context, sizeof(*context));
2562
2563 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2564 op[cur_state][new_state],
2565 HNS_ROCE_CMD_TIMEOUT_MSECS);
2566
2567 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2568 return ret;
2569 }
2570
hns_roce_v1_m_sqp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)2571 static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2572 int attr_mask, enum ib_qp_state cur_state,
2573 enum ib_qp_state new_state)
2574 {
2575 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2576 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2577 struct hns_roce_sqp_context *context;
2578 struct device *dev = &hr_dev->pdev->dev;
2579 dma_addr_t dma_handle = 0;
2580 u32 __iomem *addr;
2581 int rq_pa_start;
2582 __le32 tmp;
2583 u32 reg_val;
2584 u64 *mtts;
2585
2586 context = kzalloc(sizeof(*context), GFP_KERNEL);
2587 if (!context)
2588 return -ENOMEM;
2589
2590 /* Search QP buf's MTTs */
2591 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2592 hr_qp->mtt.first_seg, &dma_handle);
2593 if (!mtts) {
2594 dev_err(dev, "qp buf pa find failed\n");
2595 goto out;
2596 }
2597
2598 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2599 roce_set_field(context->qp1c_bytes_4,
2600 QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2601 QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2602 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2603 roce_set_field(context->qp1c_bytes_4,
2604 QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2605 QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2606 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2607 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2608 QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2609
2610 context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2611 roce_set_field(context->qp1c_bytes_12,
2612 QP1C_BYTES_12_SQ_RQ_BT_H_M,
2613 QP1C_BYTES_12_SQ_RQ_BT_H_S,
2614 ((u32)(dma_handle >> 32)));
2615
2616 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2617 QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2618 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2619 QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2620 roce_set_bit(context->qp1c_bytes_16,
2621 QP1C_BYTES_16_SIGNALING_TYPE_S,
2622 hr_qp->sq_signal_bits);
2623 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2624 1);
2625 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2626 1);
2627 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2628 0);
2629
2630 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2631 QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2632 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2633 QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2634
2635 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2636 context->cur_rq_wqe_ba_l =
2637 cpu_to_le32((u32)(mtts[rq_pa_start]));
2638
2639 roce_set_field(context->qp1c_bytes_28,
2640 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2641 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2642 (mtts[rq_pa_start]) >> 32);
2643 roce_set_field(context->qp1c_bytes_28,
2644 QP1C_BYTES_28_RQ_CUR_IDX_M,
2645 QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2646
2647 roce_set_field(context->qp1c_bytes_32,
2648 QP1C_BYTES_32_RX_CQ_NUM_M,
2649 QP1C_BYTES_32_RX_CQ_NUM_S,
2650 to_hr_cq(ibqp->recv_cq)->cqn);
2651 roce_set_field(context->qp1c_bytes_32,
2652 QP1C_BYTES_32_TX_CQ_NUM_M,
2653 QP1C_BYTES_32_TX_CQ_NUM_S,
2654 to_hr_cq(ibqp->send_cq)->cqn);
2655
2656 context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
2657
2658 roce_set_field(context->qp1c_bytes_40,
2659 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2660 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2661 (mtts[0]) >> 32);
2662 roce_set_field(context->qp1c_bytes_40,
2663 QP1C_BYTES_40_SQ_CUR_IDX_M,
2664 QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2665
2666 /* Copy context to QP1C register */
2667 addr = (u32 __iomem *)(hr_dev->reg_base +
2668 ROCEE_QP1C_CFG0_0_REG +
2669 hr_qp->phy_port * sizeof(*context));
2670
2671 writel(le32_to_cpu(context->qp1c_bytes_4), addr);
2672 writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
2673 writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
2674 writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
2675 writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
2676 writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
2677 writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
2678 writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
2679 writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
2680 writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
2681 }
2682
2683 /* Modify QP1C status */
2684 reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2685 hr_qp->phy_port * sizeof(*context));
2686 tmp = cpu_to_le32(reg_val);
2687 roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2688 ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2689 reg_val = le32_to_cpu(tmp);
2690 roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2691 hr_qp->phy_port * sizeof(*context), reg_val);
2692
2693 hr_qp->state = new_state;
2694 if (new_state == IB_QPS_RESET) {
2695 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2696 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2697 if (ibqp->send_cq != ibqp->recv_cq)
2698 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2699 hr_qp->qpn, NULL);
2700
2701 hr_qp->rq.head = 0;
2702 hr_qp->rq.tail = 0;
2703 hr_qp->sq.head = 0;
2704 hr_qp->sq.tail = 0;
2705 hr_qp->sq_next_wqe = 0;
2706 }
2707
2708 kfree(context);
2709 return 0;
2710
2711 out:
2712 kfree(context);
2713 return -EINVAL;
2714 }
2715
hns_roce_v1_m_qp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)2716 static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2717 int attr_mask, enum ib_qp_state cur_state,
2718 enum ib_qp_state new_state)
2719 {
2720 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2721 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2722 struct device *dev = &hr_dev->pdev->dev;
2723 struct hns_roce_qp_context *context;
2724 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2725 dma_addr_t dma_handle_2 = 0;
2726 dma_addr_t dma_handle = 0;
2727 __le32 doorbell[2] = {0};
2728 int rq_pa_start = 0;
2729 u64 *mtts_2 = NULL;
2730 int ret = -EINVAL;
2731 u64 *mtts = NULL;
2732 int port;
2733 u8 port_num;
2734 u8 *dmac;
2735 u8 *smac;
2736
2737 context = kzalloc(sizeof(*context), GFP_KERNEL);
2738 if (!context)
2739 return -ENOMEM;
2740
2741 /* Search qp buf's mtts */
2742 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2743 hr_qp->mtt.first_seg, &dma_handle);
2744 if (mtts == NULL) {
2745 dev_err(dev, "qp buf pa find failed\n");
2746 goto out;
2747 }
2748
2749 /* Search IRRL's mtts */
2750 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2751 hr_qp->qpn, &dma_handle_2);
2752 if (mtts_2 == NULL) {
2753 dev_err(dev, "qp irrl_table find failed\n");
2754 goto out;
2755 }
2756
2757 /*
2758 * Reset to init
2759 * Mandatory param:
2760 * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2761 * Optional param: NA
2762 */
2763 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2764 roce_set_field(context->qpc_bytes_4,
2765 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2766 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2767 to_hr_qp_type(hr_qp->ibqp.qp_type));
2768
2769 roce_set_bit(context->qpc_bytes_4,
2770 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2771 roce_set_bit(context->qpc_bytes_4,
2772 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2773 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2774 roce_set_bit(context->qpc_bytes_4,
2775 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2776 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2777 );
2778 roce_set_bit(context->qpc_bytes_4,
2779 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2780 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2781 );
2782 roce_set_bit(context->qpc_bytes_4,
2783 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2784 roce_set_field(context->qpc_bytes_4,
2785 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2786 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2787 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2788 roce_set_field(context->qpc_bytes_4,
2789 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2790 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2791 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2792 roce_set_field(context->qpc_bytes_4,
2793 QP_CONTEXT_QPC_BYTES_4_PD_M,
2794 QP_CONTEXT_QPC_BYTES_4_PD_S,
2795 to_hr_pd(ibqp->pd)->pdn);
2796 hr_qp->access_flags = attr->qp_access_flags;
2797 roce_set_field(context->qpc_bytes_8,
2798 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2799 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2800 to_hr_cq(ibqp->send_cq)->cqn);
2801 roce_set_field(context->qpc_bytes_8,
2802 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2803 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2804 to_hr_cq(ibqp->recv_cq)->cqn);
2805
2806 if (ibqp->srq)
2807 roce_set_field(context->qpc_bytes_12,
2808 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2809 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2810 to_hr_srq(ibqp->srq)->srqn);
2811
2812 roce_set_field(context->qpc_bytes_12,
2813 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2814 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2815 attr->pkey_index);
2816 hr_qp->pkey_index = attr->pkey_index;
2817 roce_set_field(context->qpc_bytes_16,
2818 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2819 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2820
2821 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2822 roce_set_field(context->qpc_bytes_4,
2823 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2824 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2825 to_hr_qp_type(hr_qp->ibqp.qp_type));
2826 roce_set_bit(context->qpc_bytes_4,
2827 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2828 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2829 roce_set_bit(context->qpc_bytes_4,
2830 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2831 !!(attr->qp_access_flags &
2832 IB_ACCESS_REMOTE_READ));
2833 roce_set_bit(context->qpc_bytes_4,
2834 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2835 !!(attr->qp_access_flags &
2836 IB_ACCESS_REMOTE_WRITE));
2837 } else {
2838 roce_set_bit(context->qpc_bytes_4,
2839 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2840 !!(hr_qp->access_flags &
2841 IB_ACCESS_REMOTE_READ));
2842 roce_set_bit(context->qpc_bytes_4,
2843 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2844 !!(hr_qp->access_flags &
2845 IB_ACCESS_REMOTE_WRITE));
2846 }
2847
2848 roce_set_bit(context->qpc_bytes_4,
2849 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2850 roce_set_field(context->qpc_bytes_4,
2851 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2852 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2853 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2854 roce_set_field(context->qpc_bytes_4,
2855 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2856 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2857 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2858 roce_set_field(context->qpc_bytes_4,
2859 QP_CONTEXT_QPC_BYTES_4_PD_M,
2860 QP_CONTEXT_QPC_BYTES_4_PD_S,
2861 to_hr_pd(ibqp->pd)->pdn);
2862
2863 roce_set_field(context->qpc_bytes_8,
2864 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2865 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2866 to_hr_cq(ibqp->send_cq)->cqn);
2867 roce_set_field(context->qpc_bytes_8,
2868 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2869 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2870 to_hr_cq(ibqp->recv_cq)->cqn);
2871
2872 if (ibqp->srq)
2873 roce_set_field(context->qpc_bytes_12,
2874 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2875 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2876 to_hr_srq(ibqp->srq)->srqn);
2877 if (attr_mask & IB_QP_PKEY_INDEX)
2878 roce_set_field(context->qpc_bytes_12,
2879 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2880 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2881 attr->pkey_index);
2882 else
2883 roce_set_field(context->qpc_bytes_12,
2884 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2885 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2886 hr_qp->pkey_index);
2887
2888 roce_set_field(context->qpc_bytes_16,
2889 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2890 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2891 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2892 if ((attr_mask & IB_QP_ALT_PATH) ||
2893 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2894 (attr_mask & IB_QP_PKEY_INDEX) ||
2895 (attr_mask & IB_QP_QKEY)) {
2896 dev_err(dev, "INIT2RTR attr_mask error\n");
2897 goto out;
2898 }
2899
2900 dmac = (u8 *)attr->ah_attr.roce.dmac;
2901
2902 context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2903 roce_set_field(context->qpc_bytes_24,
2904 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2905 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2906 ((u32)(dma_handle >> 32)));
2907 roce_set_bit(context->qpc_bytes_24,
2908 QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2909 1);
2910 roce_set_field(context->qpc_bytes_24,
2911 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2912 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2913 attr->min_rnr_timer);
2914 context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
2915 roce_set_field(context->qpc_bytes_32,
2916 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2917 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2918 ((u32)(dma_handle_2 >> 32)) &
2919 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2920 roce_set_field(context->qpc_bytes_32,
2921 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2922 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2923 roce_set_bit(context->qpc_bytes_32,
2924 QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2925 1);
2926 roce_set_bit(context->qpc_bytes_32,
2927 QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2928 hr_qp->sq_signal_bits);
2929
2930 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2931 hr_qp->port;
2932 smac = (u8 *)hr_dev->dev_addr[port];
2933 /* when dmac equals smac or loop_idc is 1, it should loopback */
2934 if (ether_addr_equal_unaligned(dmac, smac) ||
2935 hr_dev->loop_idc == 0x1)
2936 roce_set_bit(context->qpc_bytes_32,
2937 QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2938
2939 roce_set_bit(context->qpc_bytes_32,
2940 QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2941 rdma_ah_get_ah_flags(&attr->ah_attr));
2942 roce_set_field(context->qpc_bytes_32,
2943 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2944 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2945 ilog2((unsigned int)attr->max_dest_rd_atomic));
2946
2947 if (attr_mask & IB_QP_DEST_QPN)
2948 roce_set_field(context->qpc_bytes_36,
2949 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2950 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2951 attr->dest_qp_num);
2952
2953 /* Configure GID index */
2954 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2955 roce_set_field(context->qpc_bytes_36,
2956 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2957 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2958 hns_get_gid_index(hr_dev,
2959 port_num - 1,
2960 grh->sgid_index));
2961
2962 memcpy(&(context->dmac_l), dmac, 4);
2963
2964 roce_set_field(context->qpc_bytes_44,
2965 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2966 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2967 *((u16 *)(&dmac[4])));
2968 roce_set_field(context->qpc_bytes_44,
2969 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2970 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2971 rdma_ah_get_static_rate(&attr->ah_attr));
2972 roce_set_field(context->qpc_bytes_44,
2973 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2974 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2975 grh->hop_limit);
2976
2977 roce_set_field(context->qpc_bytes_48,
2978 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2979 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2980 grh->flow_label);
2981 roce_set_field(context->qpc_bytes_48,
2982 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2983 QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2984 grh->traffic_class);
2985 roce_set_field(context->qpc_bytes_48,
2986 QP_CONTEXT_QPC_BYTES_48_MTU_M,
2987 QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2988
2989 memcpy(context->dgid, grh->dgid.raw,
2990 sizeof(grh->dgid.raw));
2991
2992 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2993 roce_get_field(context->qpc_bytes_44,
2994 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2995 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2996
2997 roce_set_field(context->qpc_bytes_68,
2998 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2999 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
3000 hr_qp->rq.head);
3001 roce_set_field(context->qpc_bytes_68,
3002 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
3003 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
3004
3005 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
3006 context->cur_rq_wqe_ba_l =
3007 cpu_to_le32((u32)(mtts[rq_pa_start]));
3008
3009 roce_set_field(context->qpc_bytes_76,
3010 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
3011 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
3012 mtts[rq_pa_start] >> 32);
3013 roce_set_field(context->qpc_bytes_76,
3014 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
3015 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
3016
3017 context->rx_rnr_time = 0;
3018
3019 roce_set_field(context->qpc_bytes_84,
3020 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
3021 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
3022 attr->rq_psn - 1);
3023 roce_set_field(context->qpc_bytes_84,
3024 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
3025 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
3026
3027 roce_set_field(context->qpc_bytes_88,
3028 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3029 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
3030 attr->rq_psn);
3031 roce_set_bit(context->qpc_bytes_88,
3032 QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
3033 roce_set_bit(context->qpc_bytes_88,
3034 QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
3035 roce_set_field(context->qpc_bytes_88,
3036 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
3037 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
3038 0);
3039 roce_set_field(context->qpc_bytes_88,
3040 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
3041 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
3042 0);
3043
3044 context->dma_length = 0;
3045 context->r_key = 0;
3046 context->va_l = 0;
3047 context->va_h = 0;
3048
3049 roce_set_field(context->qpc_bytes_108,
3050 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3051 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3052 roce_set_bit(context->qpc_bytes_108,
3053 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3054 roce_set_bit(context->qpc_bytes_108,
3055 QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3056
3057 roce_set_field(context->qpc_bytes_112,
3058 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3059 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3060 roce_set_field(context->qpc_bytes_112,
3061 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3062 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3063
3064 /* For chip resp ack */
3065 roce_set_field(context->qpc_bytes_156,
3066 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3067 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3068 hr_qp->phy_port);
3069 roce_set_field(context->qpc_bytes_156,
3070 QP_CONTEXT_QPC_BYTES_156_SL_M,
3071 QP_CONTEXT_QPC_BYTES_156_SL_S,
3072 rdma_ah_get_sl(&attr->ah_attr));
3073 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3074 } else if (cur_state == IB_QPS_RTR &&
3075 new_state == IB_QPS_RTS) {
3076 /* If exist optional param, return error */
3077 if ((attr_mask & IB_QP_ALT_PATH) ||
3078 (attr_mask & IB_QP_ACCESS_FLAGS) ||
3079 (attr_mask & IB_QP_QKEY) ||
3080 (attr_mask & IB_QP_PATH_MIG_STATE) ||
3081 (attr_mask & IB_QP_CUR_STATE) ||
3082 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3083 dev_err(dev, "RTR2RTS attr_mask error\n");
3084 goto out;
3085 }
3086
3087 context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3088
3089 roce_set_field(context->qpc_bytes_120,
3090 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3091 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3092 (mtts[0]) >> 32);
3093
3094 roce_set_field(context->qpc_bytes_124,
3095 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3096 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3097 roce_set_field(context->qpc_bytes_124,
3098 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3099 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3100
3101 roce_set_field(context->qpc_bytes_128,
3102 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3103 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3104 attr->sq_psn);
3105 roce_set_bit(context->qpc_bytes_128,
3106 QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3107 roce_set_field(context->qpc_bytes_128,
3108 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3109 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3110 0);
3111 roce_set_bit(context->qpc_bytes_128,
3112 QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3113
3114 roce_set_field(context->qpc_bytes_132,
3115 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3116 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3117 roce_set_field(context->qpc_bytes_132,
3118 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3119 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3120
3121 roce_set_field(context->qpc_bytes_136,
3122 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3123 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3124 attr->sq_psn);
3125 roce_set_field(context->qpc_bytes_136,
3126 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3127 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3128 attr->sq_psn);
3129
3130 roce_set_field(context->qpc_bytes_140,
3131 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3132 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3133 (attr->sq_psn >> SQ_PSN_SHIFT));
3134 roce_set_field(context->qpc_bytes_140,
3135 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3136 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3137 roce_set_bit(context->qpc_bytes_140,
3138 QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3139
3140 roce_set_field(context->qpc_bytes_148,
3141 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3142 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3143 roce_set_field(context->qpc_bytes_148,
3144 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3145 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3146 attr->retry_cnt);
3147 roce_set_field(context->qpc_bytes_148,
3148 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3149 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3150 attr->rnr_retry);
3151 roce_set_field(context->qpc_bytes_148,
3152 QP_CONTEXT_QPC_BYTES_148_LSN_M,
3153 QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3154
3155 context->rnr_retry = 0;
3156
3157 roce_set_field(context->qpc_bytes_156,
3158 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3159 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3160 attr->retry_cnt);
3161 if (attr->timeout < 0x12) {
3162 dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3163 attr->timeout);
3164 roce_set_field(context->qpc_bytes_156,
3165 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3166 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3167 0x12);
3168 } else {
3169 roce_set_field(context->qpc_bytes_156,
3170 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3171 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3172 attr->timeout);
3173 }
3174 roce_set_field(context->qpc_bytes_156,
3175 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3176 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3177 attr->rnr_retry);
3178 roce_set_field(context->qpc_bytes_156,
3179 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3180 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3181 hr_qp->phy_port);
3182 roce_set_field(context->qpc_bytes_156,
3183 QP_CONTEXT_QPC_BYTES_156_SL_M,
3184 QP_CONTEXT_QPC_BYTES_156_SL_S,
3185 rdma_ah_get_sl(&attr->ah_attr));
3186 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3187 roce_set_field(context->qpc_bytes_156,
3188 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3189 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3190 ilog2((unsigned int)attr->max_rd_atomic));
3191 roce_set_field(context->qpc_bytes_156,
3192 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3193 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3194 context->pkt_use_len = 0;
3195
3196 roce_set_field(context->qpc_bytes_164,
3197 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3198 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3199 roce_set_field(context->qpc_bytes_164,
3200 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3201 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3202
3203 roce_set_field(context->qpc_bytes_168,
3204 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3205 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3206 attr->sq_psn);
3207 roce_set_field(context->qpc_bytes_168,
3208 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3209 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3210 roce_set_field(context->qpc_bytes_168,
3211 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3212 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3213 roce_set_bit(context->qpc_bytes_168,
3214 QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3215 roce_set_bit(context->qpc_bytes_168,
3216 QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3217 roce_set_bit(context->qpc_bytes_168,
3218 QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3219 context->sge_use_len = 0;
3220
3221 roce_set_field(context->qpc_bytes_176,
3222 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3223 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3224 roce_set_field(context->qpc_bytes_176,
3225 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3226 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3227 0);
3228 roce_set_field(context->qpc_bytes_180,
3229 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3230 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3231 roce_set_field(context->qpc_bytes_180,
3232 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3233 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3234
3235 context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3236
3237 roce_set_field(context->qpc_bytes_188,
3238 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3239 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3240 (mtts[0]) >> 32);
3241 roce_set_bit(context->qpc_bytes_188,
3242 QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3243 roce_set_field(context->qpc_bytes_188,
3244 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3245 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3246 0);
3247 } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3248 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3249 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3250 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3251 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3252 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3253 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3254 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3255 dev_err(dev, "not support this status migration\n");
3256 goto out;
3257 }
3258
3259 /* Every status migrate must change state */
3260 roce_set_field(context->qpc_bytes_144,
3261 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3262 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3263
3264 /* SW pass context to HW */
3265 ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3266 to_hns_roce_state(cur_state),
3267 to_hns_roce_state(new_state), context,
3268 hr_qp);
3269 if (ret) {
3270 dev_err(dev, "hns_roce_qp_modify failed\n");
3271 goto out;
3272 }
3273
3274 /*
3275 * Use rst2init to instead of init2init with drv,
3276 * need to hw to flash RQ HEAD by DB again
3277 */
3278 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3279 /* Memory barrier */
3280 wmb();
3281
3282 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3283 RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3284 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3285 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3286 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3287 RQ_DOORBELL_U32_8_CMD_S, 1);
3288 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3289
3290 if (ibqp->uobject) {
3291 hr_qp->rq.db_reg_l = hr_dev->reg_base +
3292 hr_dev->odb_offset +
3293 DB_REG_OFFSET * hr_dev->priv_uar.index;
3294 }
3295
3296 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3297 }
3298
3299 hr_qp->state = new_state;
3300
3301 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3302 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3303 if (attr_mask & IB_QP_PORT) {
3304 hr_qp->port = attr->port_num - 1;
3305 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3306 }
3307
3308 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3309 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3310 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3311 if (ibqp->send_cq != ibqp->recv_cq)
3312 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3313 hr_qp->qpn, NULL);
3314
3315 hr_qp->rq.head = 0;
3316 hr_qp->rq.tail = 0;
3317 hr_qp->sq.head = 0;
3318 hr_qp->sq.tail = 0;
3319 hr_qp->sq_next_wqe = 0;
3320 }
3321 out:
3322 kfree(context);
3323 return ret;
3324 }
3325
hns_roce_v1_modify_qp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)3326 static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3327 const struct ib_qp_attr *attr, int attr_mask,
3328 enum ib_qp_state cur_state,
3329 enum ib_qp_state new_state)
3330 {
3331
3332 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3333 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3334 new_state);
3335 else
3336 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3337 new_state);
3338 }
3339
to_ib_qp_state(enum hns_roce_qp_state state)3340 static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3341 {
3342 switch (state) {
3343 case HNS_ROCE_QP_STATE_RST:
3344 return IB_QPS_RESET;
3345 case HNS_ROCE_QP_STATE_INIT:
3346 return IB_QPS_INIT;
3347 case HNS_ROCE_QP_STATE_RTR:
3348 return IB_QPS_RTR;
3349 case HNS_ROCE_QP_STATE_RTS:
3350 return IB_QPS_RTS;
3351 case HNS_ROCE_QP_STATE_SQD:
3352 return IB_QPS_SQD;
3353 case HNS_ROCE_QP_STATE_ERR:
3354 return IB_QPS_ERR;
3355 default:
3356 return IB_QPS_ERR;
3357 }
3358 }
3359
hns_roce_v1_query_qpc(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct hns_roce_qp_context * hr_context)3360 static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3361 struct hns_roce_qp *hr_qp,
3362 struct hns_roce_qp_context *hr_context)
3363 {
3364 struct hns_roce_cmd_mailbox *mailbox;
3365 int ret;
3366
3367 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3368 if (IS_ERR(mailbox))
3369 return PTR_ERR(mailbox);
3370
3371 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3372 HNS_ROCE_CMD_QUERY_QP,
3373 HNS_ROCE_CMD_TIMEOUT_MSECS);
3374 if (!ret)
3375 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3376 else
3377 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3378
3379 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3380
3381 return ret;
3382 }
3383
hns_roce_v1_q_sqp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)3384 static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3385 int qp_attr_mask,
3386 struct ib_qp_init_attr *qp_init_attr)
3387 {
3388 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3389 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3390 struct hns_roce_sqp_context context;
3391 u32 addr;
3392
3393 mutex_lock(&hr_qp->mutex);
3394
3395 if (hr_qp->state == IB_QPS_RESET) {
3396 qp_attr->qp_state = IB_QPS_RESET;
3397 goto done;
3398 }
3399
3400 addr = ROCEE_QP1C_CFG0_0_REG +
3401 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3402 context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
3403 context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
3404 context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
3405 context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
3406 context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
3407 context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
3408 context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
3409 context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
3410 context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
3411 context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
3412
3413 hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3414 QP1C_BYTES_4_QP_STATE_M,
3415 QP1C_BYTES_4_QP_STATE_S);
3416 qp_attr->qp_state = hr_qp->state;
3417 qp_attr->path_mtu = IB_MTU_256;
3418 qp_attr->path_mig_state = IB_MIG_ARMED;
3419 qp_attr->qkey = QKEY_VAL;
3420 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3421 qp_attr->rq_psn = 0;
3422 qp_attr->sq_psn = 0;
3423 qp_attr->dest_qp_num = 1;
3424 qp_attr->qp_access_flags = 6;
3425
3426 qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3427 QP1C_BYTES_20_PKEY_IDX_M,
3428 QP1C_BYTES_20_PKEY_IDX_S);
3429 qp_attr->port_num = hr_qp->port + 1;
3430 qp_attr->sq_draining = 0;
3431 qp_attr->max_rd_atomic = 0;
3432 qp_attr->max_dest_rd_atomic = 0;
3433 qp_attr->min_rnr_timer = 0;
3434 qp_attr->timeout = 0;
3435 qp_attr->retry_cnt = 0;
3436 qp_attr->rnr_retry = 0;
3437 qp_attr->alt_timeout = 0;
3438
3439 done:
3440 qp_attr->cur_qp_state = qp_attr->qp_state;
3441 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3442 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3443 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3444 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3445 qp_attr->cap.max_inline_data = 0;
3446 qp_init_attr->cap = qp_attr->cap;
3447 qp_init_attr->create_flags = 0;
3448
3449 mutex_unlock(&hr_qp->mutex);
3450
3451 return 0;
3452 }
3453
hns_roce_v1_q_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)3454 static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3455 int qp_attr_mask,
3456 struct ib_qp_init_attr *qp_init_attr)
3457 {
3458 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3459 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3460 struct device *dev = &hr_dev->pdev->dev;
3461 struct hns_roce_qp_context *context;
3462 int tmp_qp_state = 0;
3463 int ret = 0;
3464 int state;
3465
3466 context = kzalloc(sizeof(*context), GFP_KERNEL);
3467 if (!context)
3468 return -ENOMEM;
3469
3470 memset(qp_attr, 0, sizeof(*qp_attr));
3471 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3472
3473 mutex_lock(&hr_qp->mutex);
3474
3475 if (hr_qp->state == IB_QPS_RESET) {
3476 qp_attr->qp_state = IB_QPS_RESET;
3477 goto done;
3478 }
3479
3480 ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3481 if (ret) {
3482 dev_err(dev, "query qpc error\n");
3483 ret = -EINVAL;
3484 goto out;
3485 }
3486
3487 state = roce_get_field(context->qpc_bytes_144,
3488 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3489 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3490 tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3491 if (tmp_qp_state == -1) {
3492 dev_err(dev, "to_ib_qp_state error\n");
3493 ret = -EINVAL;
3494 goto out;
3495 }
3496 hr_qp->state = (u8)tmp_qp_state;
3497 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3498 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3499 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3500 QP_CONTEXT_QPC_BYTES_48_MTU_S);
3501 qp_attr->path_mig_state = IB_MIG_ARMED;
3502 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3503 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3504 qp_attr->qkey = QKEY_VAL;
3505
3506 qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3507 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3508 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3509 qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3510 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3511 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3512 qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3513 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3514 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3515 qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3516 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3517 ((roce_get_bit(context->qpc_bytes_4,
3518 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3519 ((roce_get_bit(context->qpc_bytes_4,
3520 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3521
3522 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3523 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3524 struct ib_global_route *grh =
3525 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3526
3527 rdma_ah_set_sl(&qp_attr->ah_attr,
3528 roce_get_field(context->qpc_bytes_156,
3529 QP_CONTEXT_QPC_BYTES_156_SL_M,
3530 QP_CONTEXT_QPC_BYTES_156_SL_S));
3531 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3532 grh->flow_label =
3533 roce_get_field(context->qpc_bytes_48,
3534 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3535 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3536 grh->sgid_index =
3537 roce_get_field(context->qpc_bytes_36,
3538 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3539 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3540 grh->hop_limit =
3541 roce_get_field(context->qpc_bytes_44,
3542 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3543 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3544 grh->traffic_class =
3545 roce_get_field(context->qpc_bytes_48,
3546 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3547 QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3548
3549 memcpy(grh->dgid.raw, context->dgid,
3550 sizeof(grh->dgid.raw));
3551 }
3552
3553 qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3554 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3555 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3556 qp_attr->port_num = hr_qp->port + 1;
3557 qp_attr->sq_draining = 0;
3558 qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3559 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3560 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3561 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3562 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3563 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3564 qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3565 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3566 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3567 qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3568 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3569 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3570 qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3571 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3572 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3573 qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
3574
3575 done:
3576 qp_attr->cur_qp_state = qp_attr->qp_state;
3577 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3578 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3579
3580 if (!ibqp->uobject) {
3581 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3582 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3583 } else {
3584 qp_attr->cap.max_send_wr = 0;
3585 qp_attr->cap.max_send_sge = 0;
3586 }
3587
3588 qp_init_attr->cap = qp_attr->cap;
3589
3590 out:
3591 mutex_unlock(&hr_qp->mutex);
3592 kfree(context);
3593 return ret;
3594 }
3595
hns_roce_v1_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)3596 static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3597 int qp_attr_mask,
3598 struct ib_qp_init_attr *qp_init_attr)
3599 {
3600 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3601
3602 return hr_qp->doorbell_qpn <= 1 ?
3603 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3604 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3605 }
3606
hns_roce_v1_destroy_qp(struct ib_qp * ibqp,struct ib_udata * udata)3607 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3608 {
3609 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3610 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3611 struct hns_roce_cq *send_cq, *recv_cq;
3612 int ret;
3613
3614 ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
3615 if (ret)
3616 return ret;
3617
3618 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3619 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3620
3621 hns_roce_lock_cqs(send_cq, recv_cq);
3622 if (!udata) {
3623 __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3624 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3625 if (send_cq != recv_cq)
3626 __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3627 }
3628 hns_roce_unlock_cqs(send_cq, recv_cq);
3629
3630 hns_roce_qp_remove(hr_dev, hr_qp);
3631 hns_roce_qp_free(hr_dev, hr_qp);
3632
3633 /* RC QP, release QPN */
3634 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3635 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3636
3637 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3638
3639 ib_umem_release(hr_qp->umem);
3640 if (!udata) {
3641 kfree(hr_qp->sq.wrid);
3642 kfree(hr_qp->rq.wrid);
3643
3644 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3645 }
3646
3647 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3648 kfree(hr_qp);
3649 else
3650 kfree(hr_to_hr_sqp(hr_qp));
3651 return 0;
3652 }
3653
hns_roce_v1_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)3654 static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
3655 {
3656 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3657 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3658 struct device *dev = &hr_dev->pdev->dev;
3659 u32 cqe_cnt_ori;
3660 u32 cqe_cnt_cur;
3661 u32 cq_buf_size;
3662 int wait_time = 0;
3663
3664 hns_roce_free_cq(hr_dev, hr_cq);
3665
3666 /*
3667 * Before freeing cq buffer, we need to ensure that the outstanding CQE
3668 * have been written by checking the CQE counter.
3669 */
3670 cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3671 while (1) {
3672 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3673 HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3674 break;
3675
3676 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3677 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3678 break;
3679
3680 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3681 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3682 dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3683 hr_cq->cqn);
3684 break;
3685 }
3686 wait_time++;
3687 }
3688
3689 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
3690
3691 ib_umem_release(hr_cq->umem);
3692 if (!udata) {
3693 /* Free the buff of stored cq */
3694 cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
3695 hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
3696 }
3697 }
3698
set_eq_cons_index_v1(struct hns_roce_eq * eq,int req_not)3699 static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3700 {
3701 roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3702 (req_not << eq->log_entries), eq->doorbell);
3703 }
3704
hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev * hr_dev,struct hns_roce_aeqe * aeqe,int qpn)3705 static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3706 struct hns_roce_aeqe *aeqe, int qpn)
3707 {
3708 struct device *dev = &hr_dev->pdev->dev;
3709
3710 dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3711 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3712 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3713 case HNS_ROCE_LWQCE_QPC_ERROR:
3714 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3715 break;
3716 case HNS_ROCE_LWQCE_MTU_ERROR:
3717 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3718 break;
3719 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3720 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3721 break;
3722 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3723 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3724 break;
3725 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3726 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3727 break;
3728 case HNS_ROCE_LWQCE_SL_ERROR:
3729 dev_warn(dev, "QP %d, SL error.\n", qpn);
3730 break;
3731 case HNS_ROCE_LWQCE_PORT_ERROR:
3732 dev_warn(dev, "QP %d, port error.\n", qpn);
3733 break;
3734 default:
3735 break;
3736 }
3737 }
3738
hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev * hr_dev,struct hns_roce_aeqe * aeqe,int qpn)3739 static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3740 struct hns_roce_aeqe *aeqe,
3741 int qpn)
3742 {
3743 struct device *dev = &hr_dev->pdev->dev;
3744
3745 dev_warn(dev, "Local Access Violation Work Queue Error.\n");
3746 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3747 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3748 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3749 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3750 break;
3751 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3752 dev_warn(dev, "QP %d, length error.\n", qpn);
3753 break;
3754 case HNS_ROCE_LAVWQE_VA_ERROR:
3755 dev_warn(dev, "QP %d, VA error.\n", qpn);
3756 break;
3757 case HNS_ROCE_LAVWQE_PD_ERROR:
3758 dev_err(dev, "QP %d, PD error.\n", qpn);
3759 break;
3760 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3761 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3762 break;
3763 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3764 dev_warn(dev, "QP %d, key state error.\n", qpn);
3765 break;
3766 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3767 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3768 break;
3769 default:
3770 break;
3771 }
3772 }
3773
hns_roce_v1_qp_err_handle(struct hns_roce_dev * hr_dev,struct hns_roce_aeqe * aeqe,int event_type)3774 static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
3775 struct hns_roce_aeqe *aeqe,
3776 int event_type)
3777 {
3778 struct device *dev = &hr_dev->pdev->dev;
3779 int phy_port;
3780 int qpn;
3781
3782 qpn = roce_get_field(aeqe->event.qp_event.qp,
3783 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
3784 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
3785 phy_port = roce_get_field(aeqe->event.qp_event.qp,
3786 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
3787 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
3788 if (qpn <= 1)
3789 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
3790
3791 switch (event_type) {
3792 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3793 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
3794 "QP %d, phy_port %d.\n", qpn, phy_port);
3795 break;
3796 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3797 hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
3798 break;
3799 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3800 hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3801 break;
3802 default:
3803 break;
3804 }
3805
3806 hns_roce_qp_event(hr_dev, qpn, event_type);
3807 }
3808
hns_roce_v1_cq_err_handle(struct hns_roce_dev * hr_dev,struct hns_roce_aeqe * aeqe,int event_type)3809 static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
3810 struct hns_roce_aeqe *aeqe,
3811 int event_type)
3812 {
3813 struct device *dev = &hr_dev->pdev->dev;
3814 u32 cqn;
3815
3816 cqn = roce_get_field(aeqe->event.cq_event.cq,
3817 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
3818 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
3819
3820 switch (event_type) {
3821 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3822 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3823 break;
3824 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3825 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3826 break;
3827 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3828 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
3829 break;
3830 default:
3831 break;
3832 }
3833
3834 hns_roce_cq_event(hr_dev, cqn, event_type);
3835 }
3836
hns_roce_v1_db_overflow_handle(struct hns_roce_dev * hr_dev,struct hns_roce_aeqe * aeqe)3837 static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
3838 struct hns_roce_aeqe *aeqe)
3839 {
3840 struct device *dev = &hr_dev->pdev->dev;
3841
3842 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3843 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3844 case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
3845 dev_warn(dev, "SDB overflow.\n");
3846 break;
3847 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
3848 dev_warn(dev, "SDB almost overflow.\n");
3849 break;
3850 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
3851 dev_warn(dev, "SDB almost empty.\n");
3852 break;
3853 case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
3854 dev_warn(dev, "ODB overflow.\n");
3855 break;
3856 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
3857 dev_warn(dev, "ODB almost overflow.\n");
3858 break;
3859 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
3860 dev_warn(dev, "SDB almost empty.\n");
3861 break;
3862 default:
3863 break;
3864 }
3865 }
3866
get_aeqe_v1(struct hns_roce_eq * eq,u32 entry)3867 static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
3868 {
3869 unsigned long off = (entry & (eq->entries - 1)) *
3870 HNS_ROCE_AEQ_ENTRY_SIZE;
3871
3872 return (struct hns_roce_aeqe *)((u8 *)
3873 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3874 off % HNS_ROCE_BA_SIZE);
3875 }
3876
next_aeqe_sw_v1(struct hns_roce_eq * eq)3877 static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
3878 {
3879 struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
3880
3881 return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
3882 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3883 }
3884
hns_roce_v1_aeq_int(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)3885 static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
3886 struct hns_roce_eq *eq)
3887 {
3888 struct device *dev = &hr_dev->pdev->dev;
3889 struct hns_roce_aeqe *aeqe;
3890 int aeqes_found = 0;
3891 int event_type;
3892
3893 while ((aeqe = next_aeqe_sw_v1(eq))) {
3894
3895 /* Make sure we read the AEQ entry after we have checked the
3896 * ownership bit
3897 */
3898 dma_rmb();
3899
3900 dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
3901 aeqe,
3902 roce_get_field(aeqe->asyn,
3903 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3904 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
3905 event_type = roce_get_field(aeqe->asyn,
3906 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3907 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
3908 switch (event_type) {
3909 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3910 dev_warn(dev, "PATH MIG not supported\n");
3911 break;
3912 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3913 dev_warn(dev, "COMMUNICATION established\n");
3914 break;
3915 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3916 dev_warn(dev, "SQ DRAINED not supported\n");
3917 break;
3918 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3919 dev_warn(dev, "PATH MIG failed\n");
3920 break;
3921 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3922 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3923 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3924 hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
3925 break;
3926 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3927 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3928 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3929 dev_warn(dev, "SRQ not support!\n");
3930 break;
3931 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3932 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3933 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3934 hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
3935 break;
3936 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
3937 dev_warn(dev, "port change.\n");
3938 break;
3939 case HNS_ROCE_EVENT_TYPE_MB:
3940 hns_roce_cmd_event(hr_dev,
3941 le16_to_cpu(aeqe->event.cmd.token),
3942 aeqe->event.cmd.status,
3943 le64_to_cpu(aeqe->event.cmd.out_param
3944 ));
3945 break;
3946 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3947 hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
3948 break;
3949 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3950 dev_warn(dev, "CEQ 0x%lx overflow.\n",
3951 roce_get_field(aeqe->event.ce_event.ceqe,
3952 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
3953 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
3954 break;
3955 default:
3956 dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3957 event_type, eq->eqn, eq->cons_index);
3958 break;
3959 }
3960
3961 eq->cons_index++;
3962 aeqes_found = 1;
3963
3964 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
3965 dev_warn(dev, "cons_index overflow, set back to 0.\n");
3966 eq->cons_index = 0;
3967 }
3968 }
3969
3970 set_eq_cons_index_v1(eq, 0);
3971
3972 return aeqes_found;
3973 }
3974
get_ceqe_v1(struct hns_roce_eq * eq,u32 entry)3975 static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
3976 {
3977 unsigned long off = (entry & (eq->entries - 1)) *
3978 HNS_ROCE_CEQ_ENTRY_SIZE;
3979
3980 return (struct hns_roce_ceqe *)((u8 *)
3981 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3982 off % HNS_ROCE_BA_SIZE);
3983 }
3984
next_ceqe_sw_v1(struct hns_roce_eq * eq)3985 static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
3986 {
3987 struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
3988
3989 return (!!(roce_get_bit(ceqe->comp,
3990 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
3991 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3992 }
3993
hns_roce_v1_ceq_int(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)3994 static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
3995 struct hns_roce_eq *eq)
3996 {
3997 struct hns_roce_ceqe *ceqe;
3998 int ceqes_found = 0;
3999 u32 cqn;
4000
4001 while ((ceqe = next_ceqe_sw_v1(eq))) {
4002
4003 /* Make sure we read CEQ entry after we have checked the
4004 * ownership bit
4005 */
4006 dma_rmb();
4007
4008 cqn = roce_get_field(ceqe->comp,
4009 HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
4010 HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
4011 hns_roce_cq_completion(hr_dev, cqn);
4012
4013 ++eq->cons_index;
4014 ceqes_found = 1;
4015
4016 if (eq->cons_index >
4017 EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) {
4018 dev_warn(&eq->hr_dev->pdev->dev,
4019 "cons_index overflow, set back to 0.\n");
4020 eq->cons_index = 0;
4021 }
4022 }
4023
4024 set_eq_cons_index_v1(eq, 0);
4025
4026 return ceqes_found;
4027 }
4028
hns_roce_v1_msix_interrupt_eq(int irq,void * eq_ptr)4029 static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
4030 {
4031 struct hns_roce_eq *eq = eq_ptr;
4032 struct hns_roce_dev *hr_dev = eq->hr_dev;
4033 int int_work = 0;
4034
4035 if (eq->type_flag == HNS_ROCE_CEQ)
4036 /* CEQ irq routine, CEQ is pulse irq, not clear */
4037 int_work = hns_roce_v1_ceq_int(hr_dev, eq);
4038 else
4039 /* AEQ irq routine, AEQ is pulse irq, not clear */
4040 int_work = hns_roce_v1_aeq_int(hr_dev, eq);
4041
4042 return IRQ_RETVAL(int_work);
4043 }
4044
hns_roce_v1_msix_interrupt_abn(int irq,void * dev_id)4045 static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
4046 {
4047 struct hns_roce_dev *hr_dev = dev_id;
4048 struct device *dev = &hr_dev->pdev->dev;
4049 int int_work = 0;
4050 u32 caepaemask_val;
4051 u32 cealmovf_val;
4052 u32 caepaest_val;
4053 u32 aeshift_val;
4054 u32 ceshift_val;
4055 u32 cemask_val;
4056 __le32 tmp;
4057 int i;
4058
4059 /*
4060 * Abnormal interrupt:
4061 * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
4062 * interrupt, mask irq, clear irq, cancel mask operation
4063 */
4064 aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
4065 tmp = cpu_to_le32(aeshift_val);
4066
4067 /* AEQE overflow */
4068 if (roce_get_bit(tmp,
4069 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
4070 dev_warn(dev, "AEQ overflow!\n");
4071
4072 /* Set mask */
4073 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4074 tmp = cpu_to_le32(caepaemask_val);
4075 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4076 HNS_ROCE_INT_MASK_ENABLE);
4077 caepaemask_val = le32_to_cpu(tmp);
4078 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4079
4080 /* Clear int state(INT_WC : write 1 clear) */
4081 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
4082 tmp = cpu_to_le32(caepaest_val);
4083 roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
4084 caepaest_val = le32_to_cpu(tmp);
4085 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
4086
4087 /* Clear mask */
4088 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4089 tmp = cpu_to_le32(caepaemask_val);
4090 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4091 HNS_ROCE_INT_MASK_DISABLE);
4092 caepaemask_val = le32_to_cpu(tmp);
4093 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4094 }
4095
4096 /* CEQ almost overflow */
4097 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4098 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4099 i * CEQ_REG_OFFSET);
4100 tmp = cpu_to_le32(ceshift_val);
4101
4102 if (roce_get_bit(tmp,
4103 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4104 dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4105 int_work++;
4106
4107 /* Set mask */
4108 cemask_val = roce_read(hr_dev,
4109 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4110 i * CEQ_REG_OFFSET);
4111 tmp = cpu_to_le32(cemask_val);
4112 roce_set_bit(tmp,
4113 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4114 HNS_ROCE_INT_MASK_ENABLE);
4115 cemask_val = le32_to_cpu(tmp);
4116 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4117 i * CEQ_REG_OFFSET, cemask_val);
4118
4119 /* Clear int state(INT_WC : write 1 clear) */
4120 cealmovf_val = roce_read(hr_dev,
4121 ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4122 i * CEQ_REG_OFFSET);
4123 tmp = cpu_to_le32(cealmovf_val);
4124 roce_set_bit(tmp,
4125 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4126 1);
4127 cealmovf_val = le32_to_cpu(tmp);
4128 roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4129 i * CEQ_REG_OFFSET, cealmovf_val);
4130
4131 /* Clear mask */
4132 cemask_val = roce_read(hr_dev,
4133 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4134 i * CEQ_REG_OFFSET);
4135 tmp = cpu_to_le32(cemask_val);
4136 roce_set_bit(tmp,
4137 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4138 HNS_ROCE_INT_MASK_DISABLE);
4139 cemask_val = le32_to_cpu(tmp);
4140 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4141 i * CEQ_REG_OFFSET, cemask_val);
4142 }
4143 }
4144
4145 /* ECC multi-bit error alarm */
4146 dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4147 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4148 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4149 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4150
4151 dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4152 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4153 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4154 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4155
4156 return IRQ_RETVAL(int_work);
4157 }
4158
hns_roce_v1_int_mask_enable(struct hns_roce_dev * hr_dev)4159 static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4160 {
4161 u32 aemask_val;
4162 int masken = 0;
4163 __le32 tmp;
4164 int i;
4165
4166 /* AEQ INT */
4167 aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4168 tmp = cpu_to_le32(aemask_val);
4169 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4170 masken);
4171 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4172 aemask_val = le32_to_cpu(tmp);
4173 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4174
4175 /* CEQ INT */
4176 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4177 /* IRQ mask */
4178 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4179 i * CEQ_REG_OFFSET, masken);
4180 }
4181 }
4182
hns_roce_v1_free_eq(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)4183 static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4184 struct hns_roce_eq *eq)
4185 {
4186 int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4187 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4188 int i;
4189
4190 if (!eq->buf_list)
4191 return;
4192
4193 for (i = 0; i < npages; ++i)
4194 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4195 eq->buf_list[i].buf, eq->buf_list[i].map);
4196
4197 kfree(eq->buf_list);
4198 }
4199
hns_roce_v1_enable_eq(struct hns_roce_dev * hr_dev,int eq_num,int enable_flag)4200 static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4201 int enable_flag)
4202 {
4203 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4204 __le32 tmp;
4205 u32 val;
4206
4207 val = readl(eqc);
4208 tmp = cpu_to_le32(val);
4209
4210 if (enable_flag)
4211 roce_set_field(tmp,
4212 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4213 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4214 HNS_ROCE_EQ_STAT_VALID);
4215 else
4216 roce_set_field(tmp,
4217 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4218 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4219 HNS_ROCE_EQ_STAT_INVALID);
4220
4221 val = le32_to_cpu(tmp);
4222 writel(val, eqc);
4223 }
4224
hns_roce_v1_create_eq(struct hns_roce_dev * hr_dev,struct hns_roce_eq * eq)4225 static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4226 struct hns_roce_eq *eq)
4227 {
4228 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4229 struct device *dev = &hr_dev->pdev->dev;
4230 dma_addr_t tmp_dma_addr;
4231 u32 eqconsindx_val = 0;
4232 u32 eqcuridx_val = 0;
4233 u32 eqshift_val = 0;
4234 __le32 tmp2 = 0;
4235 __le32 tmp1 = 0;
4236 __le32 tmp = 0;
4237 int num_bas;
4238 int ret;
4239 int i;
4240
4241 num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4242 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4243
4244 if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4245 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4246 (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4247 num_bas);
4248 return -EINVAL;
4249 }
4250
4251 eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4252 if (!eq->buf_list)
4253 return -ENOMEM;
4254
4255 for (i = 0; i < num_bas; ++i) {
4256 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4257 &tmp_dma_addr,
4258 GFP_KERNEL);
4259 if (!eq->buf_list[i].buf) {
4260 ret = -ENOMEM;
4261 goto err_out_free_pages;
4262 }
4263
4264 eq->buf_list[i].map = tmp_dma_addr;
4265 }
4266 eq->cons_index = 0;
4267 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4268 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4269 HNS_ROCE_EQ_STAT_INVALID);
4270 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4271 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4272 eq->log_entries);
4273 eqshift_val = le32_to_cpu(tmp);
4274 writel(eqshift_val, eqc);
4275
4276 /* Configure eq extended address 12~44bit */
4277 writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4278
4279 /*
4280 * Configure eq extended address 45~49 bit.
4281 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4282 * using 4K page, and shift more 32 because of
4283 * caculating the high 32 bit value evaluated to hardware.
4284 */
4285 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4286 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4287 eq->buf_list[0].map >> 44);
4288 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4289 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4290 eqcuridx_val = le32_to_cpu(tmp1);
4291 writel(eqcuridx_val, eqc + 8);
4292
4293 /* Configure eq consumer index */
4294 roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4295 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4296 eqconsindx_val = le32_to_cpu(tmp2);
4297 writel(eqconsindx_val, eqc + 0xc);
4298
4299 return 0;
4300
4301 err_out_free_pages:
4302 for (i -= 1; i >= 0; i--)
4303 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4304 eq->buf_list[i].map);
4305
4306 kfree(eq->buf_list);
4307 return ret;
4308 }
4309
hns_roce_v1_init_eq_table(struct hns_roce_dev * hr_dev)4310 static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4311 {
4312 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4313 struct device *dev = &hr_dev->pdev->dev;
4314 struct hns_roce_eq *eq;
4315 int irq_num;
4316 int eq_num;
4317 int ret;
4318 int i, j;
4319
4320 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4321 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4322
4323 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4324 if (!eq_table->eq)
4325 return -ENOMEM;
4326
4327 eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4328 GFP_KERNEL);
4329 if (!eq_table->eqc_base) {
4330 ret = -ENOMEM;
4331 goto err_eqc_base_alloc_fail;
4332 }
4333
4334 for (i = 0; i < eq_num; i++) {
4335 eq = &eq_table->eq[i];
4336 eq->hr_dev = hr_dev;
4337 eq->eqn = i;
4338 eq->irq = hr_dev->irq[i];
4339 eq->log_page_size = PAGE_SHIFT;
4340
4341 if (i < hr_dev->caps.num_comp_vectors) {
4342 /* CEQ */
4343 eq_table->eqc_base[i] = hr_dev->reg_base +
4344 ROCEE_CAEP_CEQC_SHIFT_0_REG +
4345 CEQ_REG_OFFSET * i;
4346 eq->type_flag = HNS_ROCE_CEQ;
4347 eq->doorbell = hr_dev->reg_base +
4348 ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4349 CEQ_REG_OFFSET * i;
4350 eq->entries = hr_dev->caps.ceqe_depth;
4351 eq->log_entries = ilog2(eq->entries);
4352 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4353 } else {
4354 /* AEQ */
4355 eq_table->eqc_base[i] = hr_dev->reg_base +
4356 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4357 eq->type_flag = HNS_ROCE_AEQ;
4358 eq->doorbell = hr_dev->reg_base +
4359 ROCEE_CAEP_AEQE_CONS_IDX_REG;
4360 eq->entries = hr_dev->caps.aeqe_depth;
4361 eq->log_entries = ilog2(eq->entries);
4362 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4363 }
4364 }
4365
4366 /* Disable irq */
4367 hns_roce_v1_int_mask_enable(hr_dev);
4368
4369 /* Configure ce int interval */
4370 roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4371 HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4372
4373 /* Configure ce int burst num */
4374 roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4375 HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4376
4377 for (i = 0; i < eq_num; i++) {
4378 ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4379 if (ret) {
4380 dev_err(dev, "eq create failed\n");
4381 goto err_create_eq_fail;
4382 }
4383 }
4384
4385 for (j = 0; j < irq_num; j++) {
4386 if (j < eq_num)
4387 ret = request_irq(hr_dev->irq[j],
4388 hns_roce_v1_msix_interrupt_eq, 0,
4389 hr_dev->irq_names[j],
4390 &eq_table->eq[j]);
4391 else
4392 ret = request_irq(hr_dev->irq[j],
4393 hns_roce_v1_msix_interrupt_abn, 0,
4394 hr_dev->irq_names[j], hr_dev);
4395
4396 if (ret) {
4397 dev_err(dev, "request irq error!\n");
4398 goto err_request_irq_fail;
4399 }
4400 }
4401
4402 for (i = 0; i < eq_num; i++)
4403 hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4404
4405 return 0;
4406
4407 err_request_irq_fail:
4408 for (j -= 1; j >= 0; j--)
4409 free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4410
4411 err_create_eq_fail:
4412 for (i -= 1; i >= 0; i--)
4413 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4414
4415 kfree(eq_table->eqc_base);
4416
4417 err_eqc_base_alloc_fail:
4418 kfree(eq_table->eq);
4419
4420 return ret;
4421 }
4422
hns_roce_v1_cleanup_eq_table(struct hns_roce_dev * hr_dev)4423 static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4424 {
4425 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4426 int irq_num;
4427 int eq_num;
4428 int i;
4429
4430 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4431 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4432 for (i = 0; i < eq_num; i++) {
4433 /* Disable EQ */
4434 hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4435
4436 free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4437
4438 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4439 }
4440 for (i = eq_num; i < irq_num; i++)
4441 free_irq(hr_dev->irq[i], hr_dev);
4442
4443 kfree(eq_table->eqc_base);
4444 kfree(eq_table->eq);
4445 }
4446
4447 static const struct ib_device_ops hns_roce_v1_dev_ops = {
4448 .destroy_qp = hns_roce_v1_destroy_qp,
4449 .modify_cq = hns_roce_v1_modify_cq,
4450 .poll_cq = hns_roce_v1_poll_cq,
4451 .post_recv = hns_roce_v1_post_recv,
4452 .post_send = hns_roce_v1_post_send,
4453 .query_qp = hns_roce_v1_query_qp,
4454 .req_notify_cq = hns_roce_v1_req_notify_cq,
4455 };
4456
4457 static const struct hns_roce_hw hns_roce_hw_v1 = {
4458 .reset = hns_roce_v1_reset,
4459 .hw_profile = hns_roce_v1_profile,
4460 .hw_init = hns_roce_v1_init,
4461 .hw_exit = hns_roce_v1_exit,
4462 .post_mbox = hns_roce_v1_post_mbox,
4463 .chk_mbox = hns_roce_v1_chk_mbox,
4464 .set_gid = hns_roce_v1_set_gid,
4465 .set_mac = hns_roce_v1_set_mac,
4466 .set_mtu = hns_roce_v1_set_mtu,
4467 .write_mtpt = hns_roce_v1_write_mtpt,
4468 .write_cqc = hns_roce_v1_write_cqc,
4469 .modify_cq = hns_roce_v1_modify_cq,
4470 .clear_hem = hns_roce_v1_clear_hem,
4471 .modify_qp = hns_roce_v1_modify_qp,
4472 .query_qp = hns_roce_v1_query_qp,
4473 .destroy_qp = hns_roce_v1_destroy_qp,
4474 .post_send = hns_roce_v1_post_send,
4475 .post_recv = hns_roce_v1_post_recv,
4476 .req_notify_cq = hns_roce_v1_req_notify_cq,
4477 .poll_cq = hns_roce_v1_poll_cq,
4478 .dereg_mr = hns_roce_v1_dereg_mr,
4479 .destroy_cq = hns_roce_v1_destroy_cq,
4480 .init_eq = hns_roce_v1_init_eq_table,
4481 .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4482 .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
4483 };
4484
4485 static const struct of_device_id hns_roce_of_match[] = {
4486 { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4487 {},
4488 };
4489 MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4490
4491 static const struct acpi_device_id hns_roce_acpi_match[] = {
4492 { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4493 {},
4494 };
4495 MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4496
4497 static struct
hns_roce_find_pdev(struct fwnode_handle * fwnode)4498 platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4499 {
4500 struct device *dev;
4501
4502 /* get the 'device' corresponding to the matching 'fwnode' */
4503 dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
4504 /* get the platform device */
4505 return dev ? to_platform_device(dev) : NULL;
4506 }
4507
hns_roce_get_cfg(struct hns_roce_dev * hr_dev)4508 static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4509 {
4510 struct device *dev = &hr_dev->pdev->dev;
4511 struct platform_device *pdev = NULL;
4512 struct net_device *netdev = NULL;
4513 struct device_node *net_node;
4514 int port_cnt = 0;
4515 u8 phy_port;
4516 int ret;
4517 int i;
4518
4519 /* check if we are compatible with the underlying SoC */
4520 if (dev_of_node(dev)) {
4521 const struct of_device_id *of_id;
4522
4523 of_id = of_match_node(hns_roce_of_match, dev->of_node);
4524 if (!of_id) {
4525 dev_err(dev, "device is not compatible!\n");
4526 return -ENXIO;
4527 }
4528 hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4529 if (!hr_dev->hw) {
4530 dev_err(dev, "couldn't get H/W specific DT data!\n");
4531 return -ENXIO;
4532 }
4533 } else if (is_acpi_device_node(dev->fwnode)) {
4534 const struct acpi_device_id *acpi_id;
4535
4536 acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4537 if (!acpi_id) {
4538 dev_err(dev, "device is not compatible!\n");
4539 return -ENXIO;
4540 }
4541 hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4542 if (!hr_dev->hw) {
4543 dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4544 return -ENXIO;
4545 }
4546 } else {
4547 dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4548 return -ENXIO;
4549 }
4550
4551 /* get the mapped register base address */
4552 hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
4553 if (IS_ERR(hr_dev->reg_base))
4554 return PTR_ERR(hr_dev->reg_base);
4555
4556 /* read the node_guid of IB device from the DT or ACPI */
4557 ret = device_property_read_u8_array(dev, "node-guid",
4558 (u8 *)&hr_dev->ib_dev.node_guid,
4559 GUID_LEN);
4560 if (ret) {
4561 dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4562 return ret;
4563 }
4564
4565 /* get the RoCE associated ethernet ports or netdevices */
4566 for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4567 if (dev_of_node(dev)) {
4568 net_node = of_parse_phandle(dev->of_node, "eth-handle",
4569 i);
4570 if (!net_node)
4571 continue;
4572 pdev = of_find_device_by_node(net_node);
4573 } else if (is_acpi_device_node(dev->fwnode)) {
4574 struct fwnode_reference_args args;
4575
4576 ret = acpi_node_get_property_reference(dev->fwnode,
4577 "eth-handle",
4578 i, &args);
4579 if (ret)
4580 continue;
4581 pdev = hns_roce_find_pdev(args.fwnode);
4582 } else {
4583 dev_err(dev, "cannot read data from DT or ACPI\n");
4584 return -ENXIO;
4585 }
4586
4587 if (pdev) {
4588 netdev = platform_get_drvdata(pdev);
4589 phy_port = (u8)i;
4590 if (netdev) {
4591 hr_dev->iboe.netdevs[port_cnt] = netdev;
4592 hr_dev->iboe.phy_port[port_cnt] = phy_port;
4593 } else {
4594 dev_err(dev, "no netdev found with pdev %s\n",
4595 pdev->name);
4596 return -ENODEV;
4597 }
4598 port_cnt++;
4599 }
4600 }
4601
4602 if (port_cnt == 0) {
4603 dev_err(dev, "unable to get eth-handle for available ports!\n");
4604 return -EINVAL;
4605 }
4606
4607 hr_dev->caps.num_ports = port_cnt;
4608
4609 /* cmd issue mode: 0 is poll, 1 is event */
4610 hr_dev->cmd_mod = 1;
4611 hr_dev->loop_idc = 0;
4612 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4613 hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4614
4615 /* read the interrupt names from the DT or ACPI */
4616 ret = device_property_read_string_array(dev, "interrupt-names",
4617 hr_dev->irq_names,
4618 HNS_ROCE_V1_MAX_IRQ_NUM);
4619 if (ret < 0) {
4620 dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4621 return ret;
4622 }
4623
4624 /* fetch the interrupt numbers */
4625 for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4626 hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4627 if (hr_dev->irq[i] <= 0)
4628 return -EINVAL;
4629 }
4630
4631 return 0;
4632 }
4633
4634 /**
4635 * hns_roce_probe - RoCE driver entrance
4636 * @pdev: pointer to platform device
4637 * Return : int
4638 *
4639 */
hns_roce_probe(struct platform_device * pdev)4640 static int hns_roce_probe(struct platform_device *pdev)
4641 {
4642 int ret;
4643 struct hns_roce_dev *hr_dev;
4644 struct device *dev = &pdev->dev;
4645
4646 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
4647 if (!hr_dev)
4648 return -ENOMEM;
4649
4650 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4651 if (!hr_dev->priv) {
4652 ret = -ENOMEM;
4653 goto error_failed_kzalloc;
4654 }
4655
4656 hr_dev->pdev = pdev;
4657 hr_dev->dev = dev;
4658 platform_set_drvdata(pdev, hr_dev);
4659
4660 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4661 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4662 dev_err(dev, "Not usable DMA addressing mode\n");
4663 ret = -EIO;
4664 goto error_failed_get_cfg;
4665 }
4666
4667 ret = hns_roce_get_cfg(hr_dev);
4668 if (ret) {
4669 dev_err(dev, "Get Configuration failed!\n");
4670 goto error_failed_get_cfg;
4671 }
4672
4673 ret = hns_roce_init(hr_dev);
4674 if (ret) {
4675 dev_err(dev, "RoCE engine init failed!\n");
4676 goto error_failed_get_cfg;
4677 }
4678
4679 return 0;
4680
4681 error_failed_get_cfg:
4682 kfree(hr_dev->priv);
4683
4684 error_failed_kzalloc:
4685 ib_dealloc_device(&hr_dev->ib_dev);
4686
4687 return ret;
4688 }
4689
4690 /**
4691 * hns_roce_remove - remove RoCE device
4692 * @pdev: pointer to platform device
4693 */
hns_roce_remove(struct platform_device * pdev)4694 static int hns_roce_remove(struct platform_device *pdev)
4695 {
4696 struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4697
4698 hns_roce_exit(hr_dev);
4699 kfree(hr_dev->priv);
4700 ib_dealloc_device(&hr_dev->ib_dev);
4701
4702 return 0;
4703 }
4704
4705 static struct platform_driver hns_roce_driver = {
4706 .probe = hns_roce_probe,
4707 .remove = hns_roce_remove,
4708 .driver = {
4709 .name = DRV_NAME,
4710 .of_match_table = hns_roce_of_match,
4711 .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4712 },
4713 };
4714
4715 module_platform_driver(hns_roce_driver);
4716
4717 MODULE_LICENSE("Dual BSD/GPL");
4718 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4719 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4720 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4721 MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
4722