1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/platform_device.h>
34 #include <linux/acpi.h>
35 #include <linux/etherdevice.h>
36 #include <linux/of.h>
37 #include <rdma/ib_umem.h>
38 #include "hns_roce_common.h"
39 #include "hns_roce_device.h"
40 #include "hns_roce_cmd.h"
41 #include "hns_roce_hem.h"
42 #include "hns_roce_hw_v1.h"
43
set_data_seg(struct hns_roce_wqe_data_seg * dseg,struct ib_sge * sg)44 static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
45 {
46 dseg->lkey = cpu_to_le32(sg->lkey);
47 dseg->addr = cpu_to_le64(sg->addr);
48 dseg->len = cpu_to_le32(sg->length);
49 }
50
set_raddr_seg(struct hns_roce_wqe_raddr_seg * rseg,u64 remote_addr,u32 rkey)51 static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
52 u32 rkey)
53 {
54 rseg->raddr = cpu_to_le64(remote_addr);
55 rseg->rkey = cpu_to_le32(rkey);
56 rseg->len = 0;
57 }
58
hns_roce_v1_post_send(struct ib_qp * ibqp,struct ib_send_wr * wr,struct ib_send_wr ** bad_wr)59 int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
60 struct ib_send_wr **bad_wr)
61 {
62 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
63 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
64 struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
65 struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
66 struct hns_roce_wqe_data_seg *dseg = NULL;
67 struct hns_roce_qp *qp = to_hr_qp(ibqp);
68 struct device *dev = &hr_dev->pdev->dev;
69 struct hns_roce_sq_db sq_db;
70 int ps_opcode = 0, i = 0;
71 unsigned long flags = 0;
72 void *wqe = NULL;
73 u32 doorbell[2];
74 int nreq = 0;
75 u32 ind = 0;
76 int ret = 0;
77 u8 *smac;
78 int loopback;
79
80 if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
81 ibqp->qp_type != IB_QPT_RC)) {
82 dev_err(dev, "un-supported QP type\n");
83 *bad_wr = NULL;
84 return -EOPNOTSUPP;
85 }
86
87 spin_lock_irqsave(&qp->sq.lock, flags);
88 ind = qp->sq_next_wqe;
89 for (nreq = 0; wr; ++nreq, wr = wr->next) {
90 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
91 ret = -ENOMEM;
92 *bad_wr = wr;
93 goto out;
94 }
95
96 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
97 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
98 wr->num_sge, qp->sq.max_gs);
99 ret = -EINVAL;
100 *bad_wr = wr;
101 goto out;
102 }
103
104 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
105 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
106 wr->wr_id;
107
108 /* Corresponding to the RC and RD type wqe process separately */
109 if (ibqp->qp_type == IB_QPT_GSI) {
110 ud_sq_wqe = wqe;
111 roce_set_field(ud_sq_wqe->dmac_h,
112 UD_SEND_WQE_U32_4_DMAC_0_M,
113 UD_SEND_WQE_U32_4_DMAC_0_S,
114 ah->av.mac[0]);
115 roce_set_field(ud_sq_wqe->dmac_h,
116 UD_SEND_WQE_U32_4_DMAC_1_M,
117 UD_SEND_WQE_U32_4_DMAC_1_S,
118 ah->av.mac[1]);
119 roce_set_field(ud_sq_wqe->dmac_h,
120 UD_SEND_WQE_U32_4_DMAC_2_M,
121 UD_SEND_WQE_U32_4_DMAC_2_S,
122 ah->av.mac[2]);
123 roce_set_field(ud_sq_wqe->dmac_h,
124 UD_SEND_WQE_U32_4_DMAC_3_M,
125 UD_SEND_WQE_U32_4_DMAC_3_S,
126 ah->av.mac[3]);
127
128 roce_set_field(ud_sq_wqe->u32_8,
129 UD_SEND_WQE_U32_8_DMAC_4_M,
130 UD_SEND_WQE_U32_8_DMAC_4_S,
131 ah->av.mac[4]);
132 roce_set_field(ud_sq_wqe->u32_8,
133 UD_SEND_WQE_U32_8_DMAC_5_M,
134 UD_SEND_WQE_U32_8_DMAC_5_S,
135 ah->av.mac[5]);
136
137 smac = (u8 *)hr_dev->dev_addr[qp->port];
138 loopback = ether_addr_equal_unaligned(ah->av.mac,
139 smac) ? 1 : 0;
140 roce_set_bit(ud_sq_wqe->u32_8,
141 UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
142 loopback);
143
144 roce_set_field(ud_sq_wqe->u32_8,
145 UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
146 UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
147 HNS_ROCE_WQE_OPCODE_SEND);
148 roce_set_field(ud_sq_wqe->u32_8,
149 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
150 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
151 2);
152 roce_set_bit(ud_sq_wqe->u32_8,
153 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
154 1);
155
156 ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
157 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
158 (wr->send_flags & IB_SEND_SOLICITED ?
159 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
160 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
161 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
162
163 roce_set_field(ud_sq_wqe->u32_16,
164 UD_SEND_WQE_U32_16_DEST_QP_M,
165 UD_SEND_WQE_U32_16_DEST_QP_S,
166 ud_wr(wr)->remote_qpn);
167 roce_set_field(ud_sq_wqe->u32_16,
168 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
169 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
170 ah->av.stat_rate);
171
172 roce_set_field(ud_sq_wqe->u32_36,
173 UD_SEND_WQE_U32_36_FLOW_LABEL_M,
174 UD_SEND_WQE_U32_36_FLOW_LABEL_S, 0);
175 roce_set_field(ud_sq_wqe->u32_36,
176 UD_SEND_WQE_U32_36_PRIORITY_M,
177 UD_SEND_WQE_U32_36_PRIORITY_S,
178 ah->av.sl_tclass_flowlabel >>
179 HNS_ROCE_SL_SHIFT);
180 roce_set_field(ud_sq_wqe->u32_36,
181 UD_SEND_WQE_U32_36_SGID_INDEX_M,
182 UD_SEND_WQE_U32_36_SGID_INDEX_S,
183 hns_get_gid_index(hr_dev, qp->phy_port,
184 ah->av.gid_index));
185
186 roce_set_field(ud_sq_wqe->u32_40,
187 UD_SEND_WQE_U32_40_HOP_LIMIT_M,
188 UD_SEND_WQE_U32_40_HOP_LIMIT_S,
189 ah->av.hop_limit);
190 roce_set_field(ud_sq_wqe->u32_40,
191 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
192 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, 0);
193
194 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
195
196 ud_sq_wqe->va0_l = (u32)wr->sg_list[0].addr;
197 ud_sq_wqe->va0_h = (wr->sg_list[0].addr) >> 32;
198 ud_sq_wqe->l_key0 = wr->sg_list[0].lkey;
199
200 ud_sq_wqe->va1_l = (u32)wr->sg_list[1].addr;
201 ud_sq_wqe->va1_h = (wr->sg_list[1].addr) >> 32;
202 ud_sq_wqe->l_key1 = wr->sg_list[1].lkey;
203 ind++;
204 } else if (ibqp->qp_type == IB_QPT_RC) {
205 ctrl = wqe;
206 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
207 for (i = 0; i < wr->num_sge; i++)
208 ctrl->msg_length += wr->sg_list[i].length;
209
210 ctrl->sgl_pa_h = 0;
211 ctrl->flag = 0;
212 ctrl->imm_data = send_ieth(wr);
213
214 /*Ctrl field, ctrl set type: sig, solic, imm, fence */
215 /* SO wait for conforming application scenarios */
216 ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
217 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
218 (wr->send_flags & IB_SEND_SOLICITED ?
219 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
220 ((wr->opcode == IB_WR_SEND_WITH_IMM ||
221 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
222 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
223 (wr->send_flags & IB_SEND_FENCE ?
224 (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
225
226 wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
227
228 switch (wr->opcode) {
229 case IB_WR_RDMA_READ:
230 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
231 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
232 rdma_wr(wr)->rkey);
233 break;
234 case IB_WR_RDMA_WRITE:
235 case IB_WR_RDMA_WRITE_WITH_IMM:
236 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
237 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
238 rdma_wr(wr)->rkey);
239 break;
240 case IB_WR_SEND:
241 case IB_WR_SEND_WITH_INV:
242 case IB_WR_SEND_WITH_IMM:
243 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
244 break;
245 case IB_WR_LOCAL_INV:
246 break;
247 case IB_WR_ATOMIC_CMP_AND_SWP:
248 case IB_WR_ATOMIC_FETCH_AND_ADD:
249 case IB_WR_LSO:
250 default:
251 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
252 break;
253 }
254 ctrl->flag |= cpu_to_le32(ps_opcode);
255 wqe += sizeof(struct hns_roce_wqe_raddr_seg);
256
257 dseg = wqe;
258 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
259 if (ctrl->msg_length >
260 hr_dev->caps.max_sq_inline) {
261 ret = -EINVAL;
262 *bad_wr = wr;
263 dev_err(dev, "inline len(1-%d)=%d, illegal",
264 ctrl->msg_length,
265 hr_dev->caps.max_sq_inline);
266 goto out;
267 }
268 for (i = 0; i < wr->num_sge; i++) {
269 memcpy(wqe, ((void *) (uintptr_t)
270 wr->sg_list[i].addr),
271 wr->sg_list[i].length);
272 wqe += wr->sg_list[i].length;
273 }
274 ctrl->flag |= HNS_ROCE_WQE_INLINE;
275 } else {
276 /*sqe num is two */
277 for (i = 0; i < wr->num_sge; i++)
278 set_data_seg(dseg + i, wr->sg_list + i);
279
280 ctrl->flag |= cpu_to_le32(wr->num_sge <<
281 HNS_ROCE_WQE_SGE_NUM_BIT);
282 }
283 ind++;
284 }
285 }
286
287 out:
288 /* Set DB return */
289 if (likely(nreq)) {
290 qp->sq.head += nreq;
291 /* Memory barrier */
292 wmb();
293
294 sq_db.u32_4 = 0;
295 sq_db.u32_8 = 0;
296 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
297 SQ_DOORBELL_U32_4_SQ_HEAD_S,
298 (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
299 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
300 SQ_DOORBELL_U32_4_SL_S, qp->sl);
301 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
302 SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
303 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
304 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
305 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
306
307 doorbell[0] = sq_db.u32_4;
308 doorbell[1] = sq_db.u32_8;
309
310 hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
311 qp->sq_next_wqe = ind;
312 }
313
314 spin_unlock_irqrestore(&qp->sq.lock, flags);
315
316 return ret;
317 }
318
hns_roce_v1_post_recv(struct ib_qp * ibqp,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)319 int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
320 struct ib_recv_wr **bad_wr)
321 {
322 int ret = 0;
323 int nreq = 0;
324 int ind = 0;
325 int i = 0;
326 u32 reg_val = 0;
327 unsigned long flags = 0;
328 struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
329 struct hns_roce_wqe_data_seg *scat = NULL;
330 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
331 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
332 struct device *dev = &hr_dev->pdev->dev;
333 struct hns_roce_rq_db rq_db;
334 uint32_t doorbell[2] = {0};
335
336 spin_lock_irqsave(&hr_qp->rq.lock, flags);
337 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
338
339 for (nreq = 0; wr; ++nreq, wr = wr->next) {
340 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
341 hr_qp->ibqp.recv_cq)) {
342 ret = -ENOMEM;
343 *bad_wr = wr;
344 goto out;
345 }
346
347 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
348 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
349 wr->num_sge, hr_qp->rq.max_gs);
350 ret = -EINVAL;
351 *bad_wr = wr;
352 goto out;
353 }
354
355 ctrl = get_recv_wqe(hr_qp, ind);
356
357 roce_set_field(ctrl->rwqe_byte_12,
358 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
359 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
360 wr->num_sge);
361
362 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
363
364 for (i = 0; i < wr->num_sge; i++)
365 set_data_seg(scat + i, wr->sg_list + i);
366
367 hr_qp->rq.wrid[ind] = wr->wr_id;
368
369 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
370 }
371
372 out:
373 if (likely(nreq)) {
374 hr_qp->rq.head += nreq;
375 /* Memory barrier */
376 wmb();
377
378 if (ibqp->qp_type == IB_QPT_GSI) {
379 /* SW update GSI rq header */
380 reg_val = roce_read(to_hr_dev(ibqp->device),
381 ROCEE_QP1C_CFG3_0_REG +
382 QP1C_CFGN_OFFSET * hr_qp->phy_port);
383 roce_set_field(reg_val,
384 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
385 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
386 hr_qp->rq.head);
387 roce_write(to_hr_dev(ibqp->device),
388 ROCEE_QP1C_CFG3_0_REG +
389 QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
390 } else {
391 rq_db.u32_4 = 0;
392 rq_db.u32_8 = 0;
393
394 roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
395 RQ_DOORBELL_U32_4_RQ_HEAD_S,
396 hr_qp->rq.head);
397 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
398 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
399 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
400 RQ_DOORBELL_U32_8_CMD_S, 1);
401 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
402 1);
403
404 doorbell[0] = rq_db.u32_4;
405 doorbell[1] = rq_db.u32_8;
406
407 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
408 }
409 }
410 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
411
412 return ret;
413 }
414
hns_roce_set_db_event_mode(struct hns_roce_dev * hr_dev,int sdb_mode,int odb_mode)415 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
416 int sdb_mode, int odb_mode)
417 {
418 u32 val;
419
420 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
421 roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
422 roce_set_bit(val, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
423 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
424 }
425
hns_roce_set_db_ext_mode(struct hns_roce_dev * hr_dev,u32 sdb_mode,u32 odb_mode)426 static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
427 u32 odb_mode)
428 {
429 u32 val;
430
431 /* Configure SDB/ODB extend mode */
432 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
433 roce_set_bit(val, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
434 roce_set_bit(val, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
435 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
436 }
437
hns_roce_set_sdb(struct hns_roce_dev * hr_dev,u32 sdb_alept,u32 sdb_alful)438 static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
439 u32 sdb_alful)
440 {
441 u32 val;
442
443 /* Configure SDB */
444 val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
445 roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
446 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
447 roce_set_field(val, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
448 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
449 roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
450 }
451
hns_roce_set_odb(struct hns_roce_dev * hr_dev,u32 odb_alept,u32 odb_alful)452 static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
453 u32 odb_alful)
454 {
455 u32 val;
456
457 /* Configure ODB */
458 val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
459 roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
460 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
461 roce_set_field(val, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
462 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
463 roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
464 }
465
hns_roce_set_sdb_ext(struct hns_roce_dev * hr_dev,u32 ext_sdb_alept,u32 ext_sdb_alful)466 static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
467 u32 ext_sdb_alful)
468 {
469 struct device *dev = &hr_dev->pdev->dev;
470 struct hns_roce_v1_priv *priv;
471 struct hns_roce_db_table *db;
472 dma_addr_t sdb_dma_addr;
473 u32 val;
474
475 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
476 db = &priv->db_table;
477
478 /* Configure extend SDB threshold */
479 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
480 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
481
482 /* Configure extend SDB base addr */
483 sdb_dma_addr = db->ext_db->sdb_buf_list->map;
484 roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
485
486 /* Configure extend SDB depth */
487 val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
488 roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
489 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
490 db->ext_db->esdb_dep);
491 /*
492 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
493 * using 4K page, and shift more 32 because of
494 * caculating the high 32 bit value evaluated to hardware.
495 */
496 roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
497 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
498 roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
499
500 dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
501 dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
502 ext_sdb_alept, ext_sdb_alful);
503 }
504
hns_roce_set_odb_ext(struct hns_roce_dev * hr_dev,u32 ext_odb_alept,u32 ext_odb_alful)505 static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
506 u32 ext_odb_alful)
507 {
508 struct device *dev = &hr_dev->pdev->dev;
509 struct hns_roce_v1_priv *priv;
510 struct hns_roce_db_table *db;
511 dma_addr_t odb_dma_addr;
512 u32 val;
513
514 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
515 db = &priv->db_table;
516
517 /* Configure extend ODB threshold */
518 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
519 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
520
521 /* Configure extend ODB base addr */
522 odb_dma_addr = db->ext_db->odb_buf_list->map;
523 roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
524
525 /* Configure extend ODB depth */
526 val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
527 roce_set_field(val, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
528 ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
529 db->ext_db->eodb_dep);
530 roce_set_field(val, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
531 ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
532 db->ext_db->eodb_dep);
533 roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
534
535 dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
536 dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
537 ext_odb_alept, ext_odb_alful);
538 }
539
hns_roce_db_ext_init(struct hns_roce_dev * hr_dev,u32 sdb_ext_mod,u32 odb_ext_mod)540 static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
541 u32 odb_ext_mod)
542 {
543 struct device *dev = &hr_dev->pdev->dev;
544 struct hns_roce_v1_priv *priv;
545 struct hns_roce_db_table *db;
546 dma_addr_t sdb_dma_addr;
547 dma_addr_t odb_dma_addr;
548 int ret = 0;
549
550 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
551 db = &priv->db_table;
552
553 db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
554 if (!db->ext_db)
555 return -ENOMEM;
556
557 if (sdb_ext_mod) {
558 db->ext_db->sdb_buf_list = kmalloc(
559 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
560 if (!db->ext_db->sdb_buf_list) {
561 ret = -ENOMEM;
562 goto ext_sdb_buf_fail_out;
563 }
564
565 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
566 HNS_ROCE_V1_EXT_SDB_SIZE,
567 &sdb_dma_addr, GFP_KERNEL);
568 if (!db->ext_db->sdb_buf_list->buf) {
569 ret = -ENOMEM;
570 goto alloc_sq_db_buf_fail;
571 }
572 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
573
574 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
575 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
576 HNS_ROCE_V1_EXT_SDB_ALFUL);
577 } else
578 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
579 HNS_ROCE_V1_SDB_ALFUL);
580
581 if (odb_ext_mod) {
582 db->ext_db->odb_buf_list = kmalloc(
583 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
584 if (!db->ext_db->odb_buf_list) {
585 ret = -ENOMEM;
586 goto ext_odb_buf_fail_out;
587 }
588
589 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
590 HNS_ROCE_V1_EXT_ODB_SIZE,
591 &odb_dma_addr, GFP_KERNEL);
592 if (!db->ext_db->odb_buf_list->buf) {
593 ret = -ENOMEM;
594 goto alloc_otr_db_buf_fail;
595 }
596 db->ext_db->odb_buf_list->map = odb_dma_addr;
597
598 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
599 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
600 HNS_ROCE_V1_EXT_ODB_ALFUL);
601 } else
602 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
603 HNS_ROCE_V1_ODB_ALFUL);
604
605 hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
606
607 return 0;
608
609 alloc_otr_db_buf_fail:
610 kfree(db->ext_db->odb_buf_list);
611
612 ext_odb_buf_fail_out:
613 if (sdb_ext_mod) {
614 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
615 db->ext_db->sdb_buf_list->buf,
616 db->ext_db->sdb_buf_list->map);
617 }
618
619 alloc_sq_db_buf_fail:
620 if (sdb_ext_mod)
621 kfree(db->ext_db->sdb_buf_list);
622
623 ext_sdb_buf_fail_out:
624 kfree(db->ext_db);
625 return ret;
626 }
627
hns_roce_v1_create_lp_qp(struct hns_roce_dev * hr_dev,struct ib_pd * pd)628 static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
629 struct ib_pd *pd)
630 {
631 struct device *dev = &hr_dev->pdev->dev;
632 struct ib_qp_init_attr init_attr;
633 struct ib_qp *qp;
634
635 memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
636 init_attr.qp_type = IB_QPT_RC;
637 init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
638 init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
639 init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
640
641 qp = hns_roce_create_qp(pd, &init_attr, NULL);
642 if (IS_ERR(qp)) {
643 dev_err(dev, "Create loop qp for mr free failed!");
644 return NULL;
645 }
646
647 return to_hr_qp(qp);
648 }
649
hns_roce_v1_rsv_lp_qp(struct hns_roce_dev * hr_dev)650 static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
651 {
652 struct hns_roce_caps *caps = &hr_dev->caps;
653 struct device *dev = &hr_dev->pdev->dev;
654 struct ib_cq_init_attr cq_init_attr;
655 struct hns_roce_free_mr *free_mr;
656 struct ib_qp_attr attr = { 0 };
657 struct hns_roce_v1_priv *priv;
658 struct hns_roce_qp *hr_qp;
659 struct ib_cq *cq;
660 struct ib_pd *pd;
661 union ib_gid dgid;
662 u64 subnet_prefix;
663 int attr_mask = 0;
664 int i, j;
665 int ret;
666 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
667 u8 phy_port;
668 u8 port = 0;
669 u8 sl;
670
671 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
672 free_mr = &priv->free_mr;
673
674 /* Reserved cq for loop qp */
675 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
676 cq_init_attr.comp_vector = 0;
677 cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
678 if (IS_ERR(cq)) {
679 dev_err(dev, "Create cq for reseved loop qp failed!");
680 return -ENOMEM;
681 }
682 free_mr->mr_free_cq = to_hr_cq(cq);
683 free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
684 free_mr->mr_free_cq->ib_cq.uobject = NULL;
685 free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
686 free_mr->mr_free_cq->ib_cq.event_handler = NULL;
687 free_mr->mr_free_cq->ib_cq.cq_context = NULL;
688 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
689
690 pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
691 if (IS_ERR(pd)) {
692 dev_err(dev, "Create pd for reseved loop qp failed!");
693 ret = -ENOMEM;
694 goto alloc_pd_failed;
695 }
696 free_mr->mr_free_pd = to_hr_pd(pd);
697 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
698 free_mr->mr_free_pd->ibpd.uobject = NULL;
699 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
700
701 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
702 attr.pkey_index = 0;
703 attr.min_rnr_timer = 0;
704 /* Disable read ability */
705 attr.max_dest_rd_atomic = 0;
706 attr.max_rd_atomic = 0;
707 /* Use arbitrary values as rq_psn and sq_psn */
708 attr.rq_psn = 0x0808;
709 attr.sq_psn = 0x0808;
710 attr.retry_cnt = 7;
711 attr.rnr_retry = 7;
712 attr.timeout = 0x12;
713 attr.path_mtu = IB_MTU_256;
714 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
715 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
716 rdma_ah_set_static_rate(&attr.ah_attr, 3);
717
718 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
719 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
720 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
721 (i % HNS_ROCE_MAX_PORTS);
722 sl = i / HNS_ROCE_MAX_PORTS;
723
724 for (j = 0; j < caps->num_ports; j++) {
725 if (hr_dev->iboe.phy_port[j] == phy_port) {
726 queue_en[i] = 1;
727 port = j;
728 break;
729 }
730 }
731
732 if (!queue_en[i])
733 continue;
734
735 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
736 if (!free_mr->mr_free_qp[i]) {
737 dev_err(dev, "Create loop qp failed!\n");
738 goto create_lp_qp_failed;
739 }
740 hr_qp = free_mr->mr_free_qp[i];
741
742 hr_qp->port = port;
743 hr_qp->phy_port = phy_port;
744 hr_qp->ibqp.qp_type = IB_QPT_RC;
745 hr_qp->ibqp.device = &hr_dev->ib_dev;
746 hr_qp->ibqp.uobject = NULL;
747 atomic_set(&hr_qp->ibqp.usecnt, 0);
748 hr_qp->ibqp.pd = pd;
749 hr_qp->ibqp.recv_cq = cq;
750 hr_qp->ibqp.send_cq = cq;
751
752 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
753 rdma_ah_set_sl(&attr.ah_attr, sl);
754 attr.port_num = port + 1;
755
756 attr.dest_qp_num = hr_qp->qpn;
757 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
758 hr_dev->dev_addr[port],
759 MAC_ADDR_OCTET_NUM);
760
761 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
762 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
763 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
764 dgid.raw[11] = 0xff;
765 dgid.raw[12] = 0xfe;
766 dgid.raw[8] ^= 2;
767 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
768
769 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
770 IB_QPS_RESET, IB_QPS_INIT);
771 if (ret) {
772 dev_err(dev, "modify qp failed(%d)!\n", ret);
773 goto create_lp_qp_failed;
774 }
775
776 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
777 IB_QPS_INIT, IB_QPS_RTR);
778 if (ret) {
779 dev_err(dev, "modify qp failed(%d)!\n", ret);
780 goto create_lp_qp_failed;
781 }
782
783 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
784 IB_QPS_RTR, IB_QPS_RTS);
785 if (ret) {
786 dev_err(dev, "modify qp failed(%d)!\n", ret);
787 goto create_lp_qp_failed;
788 }
789 }
790
791 return 0;
792
793 create_lp_qp_failed:
794 for (i -= 1; i >= 0; i--) {
795 hr_qp = free_mr->mr_free_qp[i];
796 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
797 dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
798 }
799
800 if (hns_roce_dealloc_pd(pd))
801 dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
802
803 alloc_pd_failed:
804 if (hns_roce_ib_destroy_cq(cq))
805 dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
806
807 return -EINVAL;
808 }
809
hns_roce_v1_release_lp_qp(struct hns_roce_dev * hr_dev)810 static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
811 {
812 struct device *dev = &hr_dev->pdev->dev;
813 struct hns_roce_free_mr *free_mr;
814 struct hns_roce_v1_priv *priv;
815 struct hns_roce_qp *hr_qp;
816 int ret;
817 int i;
818
819 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
820 free_mr = &priv->free_mr;
821
822 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
823 hr_qp = free_mr->mr_free_qp[i];
824 if (!hr_qp)
825 continue;
826
827 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
828 if (ret)
829 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
830 i, ret);
831 }
832
833 ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
834 if (ret)
835 dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
836
837 ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
838 if (ret)
839 dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
840 }
841
hns_roce_db_init(struct hns_roce_dev * hr_dev)842 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
843 {
844 struct device *dev = &hr_dev->pdev->dev;
845 struct hns_roce_v1_priv *priv;
846 struct hns_roce_db_table *db;
847 u32 sdb_ext_mod;
848 u32 odb_ext_mod;
849 u32 sdb_evt_mod;
850 u32 odb_evt_mod;
851 int ret = 0;
852
853 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
854 db = &priv->db_table;
855
856 memset(db, 0, sizeof(*db));
857
858 /* Default DB mode */
859 sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
860 odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
861 sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
862 odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
863
864 db->sdb_ext_mod = sdb_ext_mod;
865 db->odb_ext_mod = odb_ext_mod;
866
867 /* Init extend DB */
868 ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
869 if (ret) {
870 dev_err(dev, "Failed in extend DB configuration.\n");
871 return ret;
872 }
873
874 hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
875
876 return 0;
877 }
878
hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct * work)879 void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
880 {
881 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
882 struct hns_roce_dev *hr_dev;
883
884 lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
885 work);
886 hr_dev = to_hr_dev(lp_qp_work->ib_dev);
887
888 hns_roce_v1_release_lp_qp(hr_dev);
889
890 if (hns_roce_v1_rsv_lp_qp(hr_dev))
891 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
892
893 if (lp_qp_work->comp_flag)
894 complete(lp_qp_work->comp);
895
896 kfree(lp_qp_work);
897 }
898
hns_roce_v1_recreate_lp_qp(struct hns_roce_dev * hr_dev)899 static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
900 {
901 struct device *dev = &hr_dev->pdev->dev;
902 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
903 struct hns_roce_free_mr *free_mr;
904 struct hns_roce_v1_priv *priv;
905 struct completion comp;
906 unsigned long end =
907 msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
908
909 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
910 free_mr = &priv->free_mr;
911
912 lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
913 GFP_KERNEL);
914
915 INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
916
917 lp_qp_work->ib_dev = &(hr_dev->ib_dev);
918 lp_qp_work->comp = ∁
919 lp_qp_work->comp_flag = 1;
920
921 init_completion(lp_qp_work->comp);
922
923 queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
924
925 while (time_before_eq(jiffies, end)) {
926 if (try_wait_for_completion(&comp))
927 return 0;
928 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
929 }
930
931 lp_qp_work->comp_flag = 0;
932 if (try_wait_for_completion(&comp))
933 return 0;
934
935 dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
936 return -ETIMEDOUT;
937 }
938
hns_roce_v1_send_lp_wqe(struct hns_roce_qp * hr_qp)939 static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
940 {
941 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
942 struct device *dev = &hr_dev->pdev->dev;
943 struct ib_send_wr send_wr, *bad_wr;
944 int ret;
945
946 memset(&send_wr, 0, sizeof(send_wr));
947 send_wr.next = NULL;
948 send_wr.num_sge = 0;
949 send_wr.send_flags = 0;
950 send_wr.sg_list = NULL;
951 send_wr.wr_id = (unsigned long long)&send_wr;
952 send_wr.opcode = IB_WR_RDMA_WRITE;
953
954 ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
955 if (ret) {
956 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
957 return ret;
958 }
959
960 return 0;
961 }
962
hns_roce_v1_mr_free_work_fn(struct work_struct * work)963 static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
964 {
965 struct hns_roce_mr_free_work *mr_work;
966 struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
967 struct hns_roce_free_mr *free_mr;
968 struct hns_roce_cq *mr_free_cq;
969 struct hns_roce_v1_priv *priv;
970 struct hns_roce_dev *hr_dev;
971 struct hns_roce_mr *hr_mr;
972 struct hns_roce_qp *hr_qp;
973 struct device *dev;
974 unsigned long end =
975 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
976 int i;
977 int ret;
978 int ne = 0;
979
980 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
981 hr_mr = (struct hns_roce_mr *)mr_work->mr;
982 hr_dev = to_hr_dev(mr_work->ib_dev);
983 dev = &hr_dev->pdev->dev;
984
985 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
986 free_mr = &priv->free_mr;
987 mr_free_cq = free_mr->mr_free_cq;
988
989 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
990 hr_qp = free_mr->mr_free_qp[i];
991 if (!hr_qp)
992 continue;
993 ne++;
994
995 ret = hns_roce_v1_send_lp_wqe(hr_qp);
996 if (ret) {
997 dev_err(dev,
998 "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
999 hr_qp->qpn, ret);
1000 goto free_work;
1001 }
1002 }
1003
1004 if (!ne) {
1005 dev_err(dev, "Reseved loop qp is absent!\n");
1006 goto free_work;
1007 }
1008
1009 do {
1010 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1011 if (ret < 0) {
1012 dev_err(dev,
1013 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1014 hr_qp->qpn, ret, hr_mr->key, ne);
1015 goto free_work;
1016 }
1017 ne -= ret;
1018 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1019 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1020 } while (ne && time_before_eq(jiffies, end));
1021
1022 if (ne != 0)
1023 dev_err(dev,
1024 "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1025 hr_mr->key, ne);
1026
1027 free_work:
1028 if (mr_work->comp_flag)
1029 complete(mr_work->comp);
1030 kfree(mr_work);
1031 }
1032
hns_roce_v1_dereg_mr(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)1033 int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
1034 {
1035 struct device *dev = &hr_dev->pdev->dev;
1036 struct hns_roce_mr_free_work *mr_work;
1037 struct hns_roce_free_mr *free_mr;
1038 struct hns_roce_v1_priv *priv;
1039 struct completion comp;
1040 unsigned long end =
1041 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1042 unsigned long start = jiffies;
1043 int npages;
1044 int ret = 0;
1045
1046 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1047 free_mr = &priv->free_mr;
1048
1049 if (mr->enabled) {
1050 if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
1051 & (hr_dev->caps.num_mtpts - 1)))
1052 dev_warn(dev, "HW2SW_MPT failed!\n");
1053 }
1054
1055 mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1056 if (!mr_work) {
1057 ret = -ENOMEM;
1058 goto free_mr;
1059 }
1060
1061 INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1062
1063 mr_work->ib_dev = &(hr_dev->ib_dev);
1064 mr_work->comp = ∁
1065 mr_work->comp_flag = 1;
1066 mr_work->mr = (void *)mr;
1067 init_completion(mr_work->comp);
1068
1069 queue_work(free_mr->free_mr_wq, &(mr_work->work));
1070
1071 while (time_before_eq(jiffies, end)) {
1072 if (try_wait_for_completion(&comp))
1073 goto free_mr;
1074 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1075 }
1076
1077 mr_work->comp_flag = 0;
1078 if (try_wait_for_completion(&comp))
1079 goto free_mr;
1080
1081 dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1082 ret = -ETIMEDOUT;
1083
1084 free_mr:
1085 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1086 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1087
1088 if (mr->size != ~0ULL) {
1089 npages = ib_umem_page_count(mr->umem);
1090 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1091 mr->pbl_dma_addr);
1092 }
1093
1094 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1095 key_to_hw_index(mr->key), 0);
1096
1097 if (mr->umem)
1098 ib_umem_release(mr->umem);
1099
1100 kfree(mr);
1101
1102 return ret;
1103 }
1104
hns_roce_db_free(struct hns_roce_dev * hr_dev)1105 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1106 {
1107 struct device *dev = &hr_dev->pdev->dev;
1108 struct hns_roce_v1_priv *priv;
1109 struct hns_roce_db_table *db;
1110
1111 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1112 db = &priv->db_table;
1113
1114 if (db->sdb_ext_mod) {
1115 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1116 db->ext_db->sdb_buf_list->buf,
1117 db->ext_db->sdb_buf_list->map);
1118 kfree(db->ext_db->sdb_buf_list);
1119 }
1120
1121 if (db->odb_ext_mod) {
1122 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1123 db->ext_db->odb_buf_list->buf,
1124 db->ext_db->odb_buf_list->map);
1125 kfree(db->ext_db->odb_buf_list);
1126 }
1127
1128 kfree(db->ext_db);
1129 }
1130
hns_roce_raq_init(struct hns_roce_dev * hr_dev)1131 static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1132 {
1133 int ret;
1134 int raq_shift = 0;
1135 dma_addr_t addr;
1136 u32 val;
1137 struct hns_roce_v1_priv *priv;
1138 struct hns_roce_raq_table *raq;
1139 struct device *dev = &hr_dev->pdev->dev;
1140
1141 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1142 raq = &priv->raq_table;
1143
1144 raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1145 if (!raq->e_raq_buf)
1146 return -ENOMEM;
1147
1148 raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1149 &addr, GFP_KERNEL);
1150 if (!raq->e_raq_buf->buf) {
1151 ret = -ENOMEM;
1152 goto err_dma_alloc_raq;
1153 }
1154 raq->e_raq_buf->map = addr;
1155
1156 /* Configure raq extended address. 48bit 4K align*/
1157 roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1158
1159 /* Configure raq_shift */
1160 raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1161 val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1162 roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1163 ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1164 /*
1165 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1166 * using 4K page, and shift more 32 because of
1167 * caculating the high 32 bit value evaluated to hardware.
1168 */
1169 roce_set_field(val, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1170 ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1171 raq->e_raq_buf->map >> 44);
1172 roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1173 dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1174
1175 /* Configure raq threshold */
1176 val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1177 roce_set_field(val, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1178 ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1179 HNS_ROCE_V1_EXT_RAQ_WF);
1180 roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1181 dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1182
1183 /* Enable extend raq */
1184 val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1185 roce_set_field(val,
1186 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1187 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1188 POL_TIME_INTERVAL_VAL);
1189 roce_set_bit(val, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1190 roce_set_field(val,
1191 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1192 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1193 2);
1194 roce_set_bit(val,
1195 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1196 roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1197 dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1198
1199 /* Enable raq drop */
1200 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1201 roce_set_bit(val, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1202 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1203 dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1204
1205 return 0;
1206
1207 err_dma_alloc_raq:
1208 kfree(raq->e_raq_buf);
1209 return ret;
1210 }
1211
hns_roce_raq_free(struct hns_roce_dev * hr_dev)1212 static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1213 {
1214 struct device *dev = &hr_dev->pdev->dev;
1215 struct hns_roce_v1_priv *priv;
1216 struct hns_roce_raq_table *raq;
1217
1218 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1219 raq = &priv->raq_table;
1220
1221 dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1222 raq->e_raq_buf->map);
1223 kfree(raq->e_raq_buf);
1224 }
1225
hns_roce_port_enable(struct hns_roce_dev * hr_dev,int enable_flag)1226 static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1227 {
1228 u32 val;
1229
1230 if (enable_flag) {
1231 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1232 /* Open all ports */
1233 roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1234 ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1235 ALL_PORT_VAL_OPEN);
1236 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1237 } else {
1238 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1239 /* Close all ports */
1240 roce_set_field(val, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1241 ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1242 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1243 }
1244 }
1245
hns_roce_bt_init(struct hns_roce_dev * hr_dev)1246 static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1247 {
1248 struct device *dev = &hr_dev->pdev->dev;
1249 struct hns_roce_v1_priv *priv;
1250 int ret;
1251
1252 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1253
1254 priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1255 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1256 GFP_KERNEL);
1257 if (!priv->bt_table.qpc_buf.buf)
1258 return -ENOMEM;
1259
1260 priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1261 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1262 GFP_KERNEL);
1263 if (!priv->bt_table.mtpt_buf.buf) {
1264 ret = -ENOMEM;
1265 goto err_failed_alloc_mtpt_buf;
1266 }
1267
1268 priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1269 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1270 GFP_KERNEL);
1271 if (!priv->bt_table.cqc_buf.buf) {
1272 ret = -ENOMEM;
1273 goto err_failed_alloc_cqc_buf;
1274 }
1275
1276 return 0;
1277
1278 err_failed_alloc_cqc_buf:
1279 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1280 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1281
1282 err_failed_alloc_mtpt_buf:
1283 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1284 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1285
1286 return ret;
1287 }
1288
hns_roce_bt_free(struct hns_roce_dev * hr_dev)1289 static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1290 {
1291 struct device *dev = &hr_dev->pdev->dev;
1292 struct hns_roce_v1_priv *priv;
1293
1294 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1295
1296 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1297 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1298
1299 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1300 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1301
1302 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1303 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1304 }
1305
hns_roce_tptr_init(struct hns_roce_dev * hr_dev)1306 static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1307 {
1308 struct device *dev = &hr_dev->pdev->dev;
1309 struct hns_roce_buf_list *tptr_buf;
1310 struct hns_roce_v1_priv *priv;
1311
1312 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1313 tptr_buf = &priv->tptr_table.tptr_buf;
1314
1315 /*
1316 * This buffer will be used for CQ's tptr(tail pointer), also
1317 * named ci(customer index). Every CQ will use 2 bytes to save
1318 * cqe ci in hip06. Hardware will read this area to get new ci
1319 * when the queue is almost full.
1320 */
1321 tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1322 &tptr_buf->map, GFP_KERNEL);
1323 if (!tptr_buf->buf)
1324 return -ENOMEM;
1325
1326 hr_dev->tptr_dma_addr = tptr_buf->map;
1327 hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1328
1329 return 0;
1330 }
1331
hns_roce_tptr_free(struct hns_roce_dev * hr_dev)1332 static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1333 {
1334 struct device *dev = &hr_dev->pdev->dev;
1335 struct hns_roce_buf_list *tptr_buf;
1336 struct hns_roce_v1_priv *priv;
1337
1338 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1339 tptr_buf = &priv->tptr_table.tptr_buf;
1340
1341 dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1342 tptr_buf->buf, tptr_buf->map);
1343 }
1344
hns_roce_free_mr_init(struct hns_roce_dev * hr_dev)1345 static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1346 {
1347 struct device *dev = &hr_dev->pdev->dev;
1348 struct hns_roce_free_mr *free_mr;
1349 struct hns_roce_v1_priv *priv;
1350 int ret = 0;
1351
1352 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1353 free_mr = &priv->free_mr;
1354
1355 free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1356 if (!free_mr->free_mr_wq) {
1357 dev_err(dev, "Create free mr workqueue failed!\n");
1358 return -ENOMEM;
1359 }
1360
1361 ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1362 if (ret) {
1363 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1364 flush_workqueue(free_mr->free_mr_wq);
1365 destroy_workqueue(free_mr->free_mr_wq);
1366 }
1367
1368 return ret;
1369 }
1370
hns_roce_free_mr_free(struct hns_roce_dev * hr_dev)1371 static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1372 {
1373 struct hns_roce_free_mr *free_mr;
1374 struct hns_roce_v1_priv *priv;
1375
1376 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1377 free_mr = &priv->free_mr;
1378
1379 flush_workqueue(free_mr->free_mr_wq);
1380 destroy_workqueue(free_mr->free_mr_wq);
1381
1382 hns_roce_v1_release_lp_qp(hr_dev);
1383 }
1384
1385 /**
1386 * hns_roce_v1_reset - reset RoCE
1387 * @hr_dev: RoCE device struct pointer
1388 * @enable: true -- drop reset, false -- reset
1389 * return 0 - success , negative --fail
1390 */
hns_roce_v1_reset(struct hns_roce_dev * hr_dev,bool dereset)1391 int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1392 {
1393 struct device_node *dsaf_node;
1394 struct device *dev = &hr_dev->pdev->dev;
1395 struct device_node *np = dev->of_node;
1396 struct fwnode_handle *fwnode;
1397 int ret;
1398
1399 /* check if this is DT/ACPI case */
1400 if (dev_of_node(dev)) {
1401 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1402 if (!dsaf_node) {
1403 dev_err(dev, "could not find dsaf-handle\n");
1404 return -EINVAL;
1405 }
1406 fwnode = &dsaf_node->fwnode;
1407 } else if (is_acpi_device_node(dev->fwnode)) {
1408 struct acpi_reference_args args;
1409
1410 ret = acpi_node_get_property_reference(dev->fwnode,
1411 "dsaf-handle", 0, &args);
1412 if (ret) {
1413 dev_err(dev, "could not find dsaf-handle\n");
1414 return ret;
1415 }
1416 fwnode = acpi_fwnode_handle(args.adev);
1417 } else {
1418 dev_err(dev, "cannot read data from DT or ACPI\n");
1419 return -ENXIO;
1420 }
1421
1422 ret = hns_dsaf_roce_reset(fwnode, false);
1423 if (ret)
1424 return ret;
1425
1426 if (dereset) {
1427 msleep(SLEEP_TIME_INTERVAL);
1428 ret = hns_dsaf_roce_reset(fwnode, true);
1429 }
1430
1431 return ret;
1432 }
1433
hns_roce_des_qp_init(struct hns_roce_dev * hr_dev)1434 static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
1435 {
1436 struct device *dev = &hr_dev->pdev->dev;
1437 struct hns_roce_v1_priv *priv;
1438 struct hns_roce_des_qp *des_qp;
1439
1440 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1441 des_qp = &priv->des_qp;
1442
1443 des_qp->requeue_flag = 1;
1444 des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
1445 if (!des_qp->qp_wq) {
1446 dev_err(dev, "Create destroy qp workqueue failed!\n");
1447 return -ENOMEM;
1448 }
1449
1450 return 0;
1451 }
1452
hns_roce_des_qp_free(struct hns_roce_dev * hr_dev)1453 static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
1454 {
1455 struct hns_roce_v1_priv *priv;
1456 struct hns_roce_des_qp *des_qp;
1457
1458 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1459 des_qp = &priv->des_qp;
1460
1461 des_qp->requeue_flag = 0;
1462 flush_workqueue(des_qp->qp_wq);
1463 destroy_workqueue(des_qp->qp_wq);
1464 }
1465
hns_roce_v1_profile(struct hns_roce_dev * hr_dev)1466 void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1467 {
1468 int i = 0;
1469 struct hns_roce_caps *caps = &hr_dev->caps;
1470
1471 hr_dev->vendor_id = le32_to_cpu(roce_read(hr_dev, ROCEE_VENDOR_ID_REG));
1472 hr_dev->vendor_part_id = le32_to_cpu(roce_read(hr_dev,
1473 ROCEE_VENDOR_PART_ID_REG));
1474 hr_dev->sys_image_guid = le32_to_cpu(roce_read(hr_dev,
1475 ROCEE_SYS_IMAGE_GUID_L_REG)) |
1476 ((u64)le32_to_cpu(roce_read(hr_dev,
1477 ROCEE_SYS_IMAGE_GUID_H_REG)) << 32);
1478 hr_dev->hw_rev = HNS_ROCE_HW_VER1;
1479
1480 caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
1481 caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
1482 caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
1483 caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
1484 caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
1485 caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
1486 caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
1487 caps->num_uars = HNS_ROCE_V1_UAR_NUM;
1488 caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
1489 caps->num_aeq_vectors = HNS_ROCE_AEQE_VEC_NUM;
1490 caps->num_comp_vectors = HNS_ROCE_COMP_VEC_NUM;
1491 caps->num_other_vectors = HNS_ROCE_AEQE_OF_VEC_NUM;
1492 caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
1493 caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
1494 caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
1495 caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1496 caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1497 caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1498 caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1499 caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1500 caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1501 caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1502 caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1503 caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1504 caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1505 caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1506 caps->reserved_lkey = 0;
1507 caps->reserved_pds = 0;
1508 caps->reserved_mrws = 1;
1509 caps->reserved_uars = 0;
1510 caps->reserved_cqs = 0;
1511
1512 for (i = 0; i < caps->num_ports; i++)
1513 caps->pkey_table_len[i] = 1;
1514
1515 for (i = 0; i < caps->num_ports; i++) {
1516 /* Six ports shared 16 GID in v1 engine */
1517 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1518 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1519 caps->num_ports;
1520 else
1521 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1522 caps->num_ports + 1;
1523 }
1524
1525 for (i = 0; i < caps->num_comp_vectors; i++)
1526 caps->ceqe_depth[i] = HNS_ROCE_V1_NUM_COMP_EQE;
1527
1528 caps->aeqe_depth = HNS_ROCE_V1_NUM_ASYNC_EQE;
1529 caps->local_ca_ack_delay = le32_to_cpu(roce_read(hr_dev,
1530 ROCEE_ACK_DELAY_REG));
1531 caps->max_mtu = IB_MTU_2048;
1532 }
1533
hns_roce_v1_init(struct hns_roce_dev * hr_dev)1534 int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1535 {
1536 int ret;
1537 u32 val;
1538 struct device *dev = &hr_dev->pdev->dev;
1539
1540 /* DMAE user config */
1541 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1542 roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1543 ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1544 roce_set_field(val, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1545 ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1546 1 << PAGES_SHIFT_16);
1547 roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1548
1549 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1550 roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1551 ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1552 roce_set_field(val, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1553 ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1554 1 << PAGES_SHIFT_16);
1555
1556 ret = hns_roce_db_init(hr_dev);
1557 if (ret) {
1558 dev_err(dev, "doorbell init failed!\n");
1559 return ret;
1560 }
1561
1562 ret = hns_roce_raq_init(hr_dev);
1563 if (ret) {
1564 dev_err(dev, "raq init failed!\n");
1565 goto error_failed_raq_init;
1566 }
1567
1568 ret = hns_roce_bt_init(hr_dev);
1569 if (ret) {
1570 dev_err(dev, "bt init failed!\n");
1571 goto error_failed_bt_init;
1572 }
1573
1574 ret = hns_roce_tptr_init(hr_dev);
1575 if (ret) {
1576 dev_err(dev, "tptr init failed!\n");
1577 goto error_failed_tptr_init;
1578 }
1579
1580 ret = hns_roce_des_qp_init(hr_dev);
1581 if (ret) {
1582 dev_err(dev, "des qp init failed!\n");
1583 goto error_failed_des_qp_init;
1584 }
1585
1586 ret = hns_roce_free_mr_init(hr_dev);
1587 if (ret) {
1588 dev_err(dev, "free mr init failed!\n");
1589 goto error_failed_free_mr_init;
1590 }
1591
1592 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1593
1594 return 0;
1595
1596 error_failed_free_mr_init:
1597 hns_roce_des_qp_free(hr_dev);
1598
1599 error_failed_des_qp_init:
1600 hns_roce_tptr_free(hr_dev);
1601
1602 error_failed_tptr_init:
1603 hns_roce_bt_free(hr_dev);
1604
1605 error_failed_bt_init:
1606 hns_roce_raq_free(hr_dev);
1607
1608 error_failed_raq_init:
1609 hns_roce_db_free(hr_dev);
1610 return ret;
1611 }
1612
hns_roce_v1_exit(struct hns_roce_dev * hr_dev)1613 void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1614 {
1615 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1616 hns_roce_free_mr_free(hr_dev);
1617 hns_roce_des_qp_free(hr_dev);
1618 hns_roce_tptr_free(hr_dev);
1619 hns_roce_bt_free(hr_dev);
1620 hns_roce_raq_free(hr_dev);
1621 hns_roce_db_free(hr_dev);
1622 }
1623
hns_roce_v1_set_gid(struct hns_roce_dev * hr_dev,u8 port,int gid_index,union ib_gid * gid)1624 void hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
1625 union ib_gid *gid)
1626 {
1627 u32 *p = NULL;
1628 u8 gid_idx = 0;
1629
1630 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1631
1632 p = (u32 *)&gid->raw[0];
1633 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1634 (HNS_ROCE_V1_GID_NUM * gid_idx));
1635
1636 p = (u32 *)&gid->raw[4];
1637 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1638 (HNS_ROCE_V1_GID_NUM * gid_idx));
1639
1640 p = (u32 *)&gid->raw[8];
1641 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1642 (HNS_ROCE_V1_GID_NUM * gid_idx));
1643
1644 p = (u32 *)&gid->raw[0xc];
1645 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1646 (HNS_ROCE_V1_GID_NUM * gid_idx));
1647 }
1648
hns_roce_v1_set_mac(struct hns_roce_dev * hr_dev,u8 phy_port,u8 * addr)1649 void hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr)
1650 {
1651 u32 reg_smac_l;
1652 u16 reg_smac_h;
1653 u16 *p_h;
1654 u32 *p;
1655 u32 val;
1656
1657 /*
1658 * When mac changed, loopback may fail
1659 * because of smac not equal to dmac.
1660 * We Need to release and create reserved qp again.
1661 */
1662 if (hr_dev->hw->dereg_mr && hns_roce_v1_recreate_lp_qp(hr_dev))
1663 dev_warn(&hr_dev->pdev->dev, "recreate lp qp timeout!\n");
1664
1665 p = (u32 *)(&addr[0]);
1666 reg_smac_l = *p;
1667 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1668 PHY_PORT_OFFSET * phy_port);
1669
1670 val = roce_read(hr_dev,
1671 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1672 p_h = (u16 *)(&addr[4]);
1673 reg_smac_h = *p_h;
1674 roce_set_field(val, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1675 ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1676 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1677 val);
1678 }
1679
hns_roce_v1_set_mtu(struct hns_roce_dev * hr_dev,u8 phy_port,enum ib_mtu mtu)1680 void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1681 enum ib_mtu mtu)
1682 {
1683 u32 val;
1684
1685 val = roce_read(hr_dev,
1686 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1687 roce_set_field(val, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1688 ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1689 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1690 val);
1691 }
1692
hns_roce_v1_write_mtpt(void * mb_buf,struct hns_roce_mr * mr,unsigned long mtpt_idx)1693 int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1694 unsigned long mtpt_idx)
1695 {
1696 struct hns_roce_v1_mpt_entry *mpt_entry;
1697 struct scatterlist *sg;
1698 u64 *pages;
1699 int entry;
1700 int i;
1701
1702 /* MPT filled into mailbox buf */
1703 mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1704 memset(mpt_entry, 0, sizeof(*mpt_entry));
1705
1706 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1707 MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1708 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1709 MPT_BYTE_4_KEY_S, mr->key);
1710 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1711 MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1712 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1713 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1714 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1715 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1716 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1717 MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1718 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1719 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1720 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1721 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1722 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1723 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1724 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1725 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1726 0);
1727 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1728
1729 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1730 MPT_BYTE_12_PBL_ADDR_H_S, 0);
1731 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1732 MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1733
1734 mpt_entry->virt_addr_l = (u32)mr->iova;
1735 mpt_entry->virt_addr_h = (u32)(mr->iova >> 32);
1736 mpt_entry->length = (u32)mr->size;
1737
1738 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1739 MPT_BYTE_28_PD_S, mr->pd);
1740 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1741 MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1742 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1743 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1744
1745 /* DMA memory register */
1746 if (mr->type == MR_TYPE_DMA)
1747 return 0;
1748
1749 pages = (u64 *) __get_free_page(GFP_KERNEL);
1750 if (!pages)
1751 return -ENOMEM;
1752
1753 i = 0;
1754 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
1755 pages[i] = ((u64)sg_dma_address(sg)) >> 12;
1756
1757 /* Directly record to MTPT table firstly 7 entry */
1758 if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1759 break;
1760 i++;
1761 }
1762
1763 /* Register user mr */
1764 for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1765 switch (i) {
1766 case 0:
1767 mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1768 roce_set_field(mpt_entry->mpt_byte_36,
1769 MPT_BYTE_36_PA0_H_M,
1770 MPT_BYTE_36_PA0_H_S,
1771 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
1772 break;
1773 case 1:
1774 roce_set_field(mpt_entry->mpt_byte_36,
1775 MPT_BYTE_36_PA1_L_M,
1776 MPT_BYTE_36_PA1_L_S,
1777 cpu_to_le32((u32)(pages[i])));
1778 roce_set_field(mpt_entry->mpt_byte_40,
1779 MPT_BYTE_40_PA1_H_M,
1780 MPT_BYTE_40_PA1_H_S,
1781 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
1782 break;
1783 case 2:
1784 roce_set_field(mpt_entry->mpt_byte_40,
1785 MPT_BYTE_40_PA2_L_M,
1786 MPT_BYTE_40_PA2_L_S,
1787 cpu_to_le32((u32)(pages[i])));
1788 roce_set_field(mpt_entry->mpt_byte_44,
1789 MPT_BYTE_44_PA2_H_M,
1790 MPT_BYTE_44_PA2_H_S,
1791 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
1792 break;
1793 case 3:
1794 roce_set_field(mpt_entry->mpt_byte_44,
1795 MPT_BYTE_44_PA3_L_M,
1796 MPT_BYTE_44_PA3_L_S,
1797 cpu_to_le32((u32)(pages[i])));
1798 roce_set_field(mpt_entry->mpt_byte_48,
1799 MPT_BYTE_48_PA3_H_M,
1800 MPT_BYTE_48_PA3_H_S,
1801 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_8)));
1802 break;
1803 case 4:
1804 mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1805 roce_set_field(mpt_entry->mpt_byte_56,
1806 MPT_BYTE_56_PA4_H_M,
1807 MPT_BYTE_56_PA4_H_S,
1808 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_32)));
1809 break;
1810 case 5:
1811 roce_set_field(mpt_entry->mpt_byte_56,
1812 MPT_BYTE_56_PA5_L_M,
1813 MPT_BYTE_56_PA5_L_S,
1814 cpu_to_le32((u32)(pages[i])));
1815 roce_set_field(mpt_entry->mpt_byte_60,
1816 MPT_BYTE_60_PA5_H_M,
1817 MPT_BYTE_60_PA5_H_S,
1818 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_24)));
1819 break;
1820 case 6:
1821 roce_set_field(mpt_entry->mpt_byte_60,
1822 MPT_BYTE_60_PA6_L_M,
1823 MPT_BYTE_60_PA6_L_S,
1824 cpu_to_le32((u32)(pages[i])));
1825 roce_set_field(mpt_entry->mpt_byte_64,
1826 MPT_BYTE_64_PA6_H_M,
1827 MPT_BYTE_64_PA6_H_S,
1828 cpu_to_le32((u32)(pages[i] >> PAGES_SHIFT_16)));
1829 break;
1830 default:
1831 break;
1832 }
1833 }
1834
1835 free_page((unsigned long) pages);
1836
1837 mpt_entry->pbl_addr_l = (u32)(mr->pbl_dma_addr);
1838
1839 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1840 MPT_BYTE_12_PBL_ADDR_H_S,
1841 ((u32)(mr->pbl_dma_addr >> 32)));
1842
1843 return 0;
1844 }
1845
get_cqe(struct hns_roce_cq * hr_cq,int n)1846 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1847 {
1848 return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
1849 n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1850 }
1851
get_sw_cqe(struct hns_roce_cq * hr_cq,int n)1852 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1853 {
1854 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1855
1856 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1857 return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1858 !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
1859 }
1860
next_cqe_sw(struct hns_roce_cq * hr_cq)1861 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1862 {
1863 return get_sw_cqe(hr_cq, hr_cq->cons_index);
1864 }
1865
hns_roce_v1_cq_set_ci(struct hns_roce_cq * hr_cq,u32 cons_index)1866 void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1867 {
1868 u32 doorbell[2];
1869
1870 doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
1871 doorbell[1] = 0;
1872 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
1873 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
1874 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
1875 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
1876 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
1877 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
1878 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
1879
1880 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
1881 }
1882
__hns_roce_v1_cq_clean(struct hns_roce_cq * hr_cq,u32 qpn,struct hns_roce_srq * srq)1883 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1884 struct hns_roce_srq *srq)
1885 {
1886 struct hns_roce_cqe *cqe, *dest;
1887 u32 prod_index;
1888 int nfreed = 0;
1889 u8 owner_bit;
1890
1891 for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
1892 ++prod_index) {
1893 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
1894 break;
1895 }
1896
1897 /*
1898 * Now backwards through the CQ, removing CQ entries
1899 * that match our QP by overwriting them with next entries.
1900 */
1901 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
1902 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
1903 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
1904 CQE_BYTE_16_LOCAL_QPN_S) &
1905 HNS_ROCE_CQE_QPN_MASK) == qpn) {
1906 /* In v1 engine, not support SRQ */
1907 ++nfreed;
1908 } else if (nfreed) {
1909 dest = get_cqe(hr_cq, (prod_index + nfreed) &
1910 hr_cq->ib_cq.cqe);
1911 owner_bit = roce_get_bit(dest->cqe_byte_4,
1912 CQE_BYTE_4_OWNER_S);
1913 memcpy(dest, cqe, sizeof(*cqe));
1914 roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
1915 owner_bit);
1916 }
1917 }
1918
1919 if (nfreed) {
1920 hr_cq->cons_index += nfreed;
1921 /*
1922 * Make sure update of buffer contents is done before
1923 * updating consumer index.
1924 */
1925 wmb();
1926
1927 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
1928 }
1929 }
1930
hns_roce_v1_cq_clean(struct hns_roce_cq * hr_cq,u32 qpn,struct hns_roce_srq * srq)1931 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
1932 struct hns_roce_srq *srq)
1933 {
1934 spin_lock_irq(&hr_cq->lock);
1935 __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
1936 spin_unlock_irq(&hr_cq->lock);
1937 }
1938
hns_roce_v1_write_cqc(struct hns_roce_dev * hr_dev,struct hns_roce_cq * hr_cq,void * mb_buf,u64 * mtts,dma_addr_t dma_handle,int nent,u32 vector)1939 void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
1940 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
1941 dma_addr_t dma_handle, int nent, u32 vector)
1942 {
1943 struct hns_roce_cq_context *cq_context = NULL;
1944 struct hns_roce_buf_list *tptr_buf;
1945 struct hns_roce_v1_priv *priv;
1946 dma_addr_t tptr_dma_addr;
1947 int offset;
1948
1949 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
1950 tptr_buf = &priv->tptr_table.tptr_buf;
1951
1952 cq_context = mb_buf;
1953 memset(cq_context, 0, sizeof(*cq_context));
1954
1955 /* Get the tptr for this CQ. */
1956 offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
1957 tptr_dma_addr = tptr_buf->map + offset;
1958 hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
1959
1960 /* Register cq_context members */
1961 roce_set_field(cq_context->cqc_byte_4,
1962 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
1963 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
1964 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
1965 CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
1966 cq_context->cqc_byte_4 = cpu_to_le32(cq_context->cqc_byte_4);
1967
1968 cq_context->cq_bt_l = (u32)dma_handle;
1969 cq_context->cq_bt_l = cpu_to_le32(cq_context->cq_bt_l);
1970
1971 roce_set_field(cq_context->cqc_byte_12,
1972 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
1973 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
1974 ((u64)dma_handle >> 32));
1975 roce_set_field(cq_context->cqc_byte_12,
1976 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
1977 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
1978 ilog2((unsigned int)nent));
1979 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
1980 CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
1981 cq_context->cqc_byte_12 = cpu_to_le32(cq_context->cqc_byte_12);
1982
1983 cq_context->cur_cqe_ba0_l = (u32)(mtts[0]);
1984 cq_context->cur_cqe_ba0_l = cpu_to_le32(cq_context->cur_cqe_ba0_l);
1985
1986 roce_set_field(cq_context->cqc_byte_20,
1987 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
1988 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S,
1989 cpu_to_le32((mtts[0]) >> 32));
1990 /* Dedicated hardware, directly set 0 */
1991 roce_set_field(cq_context->cqc_byte_20,
1992 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
1993 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
1994 /**
1995 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1996 * using 4K page, and shift more 32 because of
1997 * caculating the high 32 bit value evaluated to hardware.
1998 */
1999 roce_set_field(cq_context->cqc_byte_20,
2000 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2001 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2002 tptr_dma_addr >> 44);
2003 cq_context->cqc_byte_20 = cpu_to_le32(cq_context->cqc_byte_20);
2004
2005 cq_context->cqe_tptr_addr_l = (u32)(tptr_dma_addr >> 12);
2006
2007 roce_set_field(cq_context->cqc_byte_32,
2008 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2009 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2010 roce_set_bit(cq_context->cqc_byte_32,
2011 CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2012 roce_set_bit(cq_context->cqc_byte_32,
2013 CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2014 roce_set_bit(cq_context->cqc_byte_32,
2015 CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2016 roce_set_bit(cq_context->cqc_byte_32,
2017 CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2018 0);
2019 /* The initial value of cq's ci is 0 */
2020 roce_set_field(cq_context->cqc_byte_32,
2021 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2022 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2023 cq_context->cqc_byte_32 = cpu_to_le32(cq_context->cqc_byte_32);
2024 }
2025
hns_roce_v1_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)2026 int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
2027 {
2028 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2029 u32 notification_flag;
2030 u32 doorbell[2];
2031
2032 notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2033 IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2034 /*
2035 * flags = 0; Notification Flag = 1, next
2036 * flags = 1; Notification Flag = 0, solocited
2037 */
2038 doorbell[0] = hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1);
2039 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2040 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2041 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2042 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2043 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2044 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2045 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2046 hr_cq->cqn | notification_flag);
2047
2048 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2049
2050 return 0;
2051 }
2052
hns_roce_v1_poll_one(struct hns_roce_cq * hr_cq,struct hns_roce_qp ** cur_qp,struct ib_wc * wc)2053 static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2054 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2055 {
2056 int qpn;
2057 int is_send;
2058 u16 wqe_ctr;
2059 u32 status;
2060 u32 opcode;
2061 struct hns_roce_cqe *cqe;
2062 struct hns_roce_qp *hr_qp;
2063 struct hns_roce_wq *wq;
2064 struct hns_roce_wqe_ctrl_seg *sq_wqe;
2065 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2066 struct device *dev = &hr_dev->pdev->dev;
2067
2068 /* Find cqe according consumer index */
2069 cqe = next_cqe_sw(hr_cq);
2070 if (!cqe)
2071 return -EAGAIN;
2072
2073 ++hr_cq->cons_index;
2074 /* Memory barrier */
2075 rmb();
2076 /* 0->SQ, 1->RQ */
2077 is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2078
2079 /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2080 if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2081 CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2082 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2083 CQE_BYTE_20_PORT_NUM_S) +
2084 roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2085 CQE_BYTE_16_LOCAL_QPN_S) *
2086 HNS_ROCE_MAX_PORTS;
2087 } else {
2088 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2089 CQE_BYTE_16_LOCAL_QPN_S);
2090 }
2091
2092 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2093 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2094 if (unlikely(!hr_qp)) {
2095 dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2096 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2097 return -EINVAL;
2098 }
2099
2100 *cur_qp = hr_qp;
2101 }
2102
2103 wc->qp = &(*cur_qp)->ibqp;
2104 wc->vendor_err = 0;
2105
2106 status = roce_get_field(cqe->cqe_byte_4,
2107 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2108 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2109 HNS_ROCE_CQE_STATUS_MASK;
2110 switch (status) {
2111 case HNS_ROCE_CQE_SUCCESS:
2112 wc->status = IB_WC_SUCCESS;
2113 break;
2114 case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2115 wc->status = IB_WC_LOC_LEN_ERR;
2116 break;
2117 case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2118 wc->status = IB_WC_LOC_QP_OP_ERR;
2119 break;
2120 case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2121 wc->status = IB_WC_LOC_PROT_ERR;
2122 break;
2123 case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2124 wc->status = IB_WC_WR_FLUSH_ERR;
2125 break;
2126 case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2127 wc->status = IB_WC_MW_BIND_ERR;
2128 break;
2129 case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2130 wc->status = IB_WC_BAD_RESP_ERR;
2131 break;
2132 case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2133 wc->status = IB_WC_LOC_ACCESS_ERR;
2134 break;
2135 case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2136 wc->status = IB_WC_REM_INV_REQ_ERR;
2137 break;
2138 case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2139 wc->status = IB_WC_REM_ACCESS_ERR;
2140 break;
2141 case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2142 wc->status = IB_WC_REM_OP_ERR;
2143 break;
2144 case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2145 wc->status = IB_WC_RETRY_EXC_ERR;
2146 break;
2147 case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2148 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2149 break;
2150 default:
2151 wc->status = IB_WC_GENERAL_ERR;
2152 break;
2153 }
2154
2155 /* CQE status error, directly return */
2156 if (wc->status != IB_WC_SUCCESS)
2157 return 0;
2158
2159 if (is_send) {
2160 /* SQ conrespond to CQE */
2161 sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
2162 CQE_BYTE_4_WQE_INDEX_M,
2163 CQE_BYTE_4_WQE_INDEX_S)&
2164 ((*cur_qp)->sq.wqe_cnt-1));
2165 switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
2166 case HNS_ROCE_WQE_OPCODE_SEND:
2167 wc->opcode = IB_WC_SEND;
2168 break;
2169 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2170 wc->opcode = IB_WC_RDMA_READ;
2171 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2172 break;
2173 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2174 wc->opcode = IB_WC_RDMA_WRITE;
2175 break;
2176 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2177 wc->opcode = IB_WC_LOCAL_INV;
2178 break;
2179 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2180 wc->opcode = IB_WC_SEND;
2181 break;
2182 default:
2183 wc->status = IB_WC_GENERAL_ERR;
2184 break;
2185 }
2186 wc->wc_flags = (sq_wqe->flag & HNS_ROCE_WQE_IMM ?
2187 IB_WC_WITH_IMM : 0);
2188
2189 wq = &(*cur_qp)->sq;
2190 if ((*cur_qp)->sq_signal_bits) {
2191 /*
2192 * If sg_signal_bit is 1,
2193 * firstly tail pointer updated to wqe
2194 * which current cqe correspond to
2195 */
2196 wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2197 CQE_BYTE_4_WQE_INDEX_M,
2198 CQE_BYTE_4_WQE_INDEX_S);
2199 wq->tail += (wqe_ctr - (u16)wq->tail) &
2200 (wq->wqe_cnt - 1);
2201 }
2202 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2203 ++wq->tail;
2204 } else {
2205 /* RQ conrespond to CQE */
2206 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2207 opcode = roce_get_field(cqe->cqe_byte_4,
2208 CQE_BYTE_4_OPERATION_TYPE_M,
2209 CQE_BYTE_4_OPERATION_TYPE_S) &
2210 HNS_ROCE_CQE_OPCODE_MASK;
2211 switch (opcode) {
2212 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2213 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2214 wc->wc_flags = IB_WC_WITH_IMM;
2215 wc->ex.imm_data = le32_to_cpu(cqe->immediate_data);
2216 break;
2217 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2218 if (roce_get_bit(cqe->cqe_byte_4,
2219 CQE_BYTE_4_IMM_INDICATOR_S)) {
2220 wc->opcode = IB_WC_RECV;
2221 wc->wc_flags = IB_WC_WITH_IMM;
2222 wc->ex.imm_data = le32_to_cpu(
2223 cqe->immediate_data);
2224 } else {
2225 wc->opcode = IB_WC_RECV;
2226 wc->wc_flags = 0;
2227 }
2228 break;
2229 default:
2230 wc->status = IB_WC_GENERAL_ERR;
2231 break;
2232 }
2233
2234 /* Update tail pointer, record wr_id */
2235 wq = &(*cur_qp)->rq;
2236 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2237 ++wq->tail;
2238 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2239 CQE_BYTE_20_SL_S);
2240 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2241 CQE_BYTE_20_REMOTE_QPN_M,
2242 CQE_BYTE_20_REMOTE_QPN_S);
2243 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2244 CQE_BYTE_20_GRH_PRESENT_S) ?
2245 IB_WC_GRH : 0);
2246 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2247 CQE_BYTE_28_P_KEY_IDX_M,
2248 CQE_BYTE_28_P_KEY_IDX_S);
2249 }
2250
2251 return 0;
2252 }
2253
hns_roce_v1_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)2254 int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2255 {
2256 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2257 struct hns_roce_qp *cur_qp = NULL;
2258 unsigned long flags;
2259 int npolled;
2260 int ret = 0;
2261
2262 spin_lock_irqsave(&hr_cq->lock, flags);
2263
2264 for (npolled = 0; npolled < num_entries; ++npolled) {
2265 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2266 if (ret)
2267 break;
2268 }
2269
2270 if (npolled) {
2271 *hr_cq->tptr_addr = hr_cq->cons_index &
2272 ((hr_cq->cq_depth << 1) - 1);
2273
2274 /* Memroy barrier */
2275 wmb();
2276 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2277 }
2278
2279 spin_unlock_irqrestore(&hr_cq->lock, flags);
2280
2281 if (ret == 0 || ret == -EAGAIN)
2282 return npolled;
2283 else
2284 return ret;
2285 }
2286
hns_roce_v1_clear_hem(struct hns_roce_dev * hr_dev,struct hns_roce_hem_table * table,int obj)2287 int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2288 struct hns_roce_hem_table *table, int obj)
2289 {
2290 struct device *dev = &hr_dev->pdev->dev;
2291 struct hns_roce_v1_priv *priv;
2292 unsigned long end = 0, flags = 0;
2293 uint32_t bt_cmd_val[2] = {0};
2294 void __iomem *bt_cmd;
2295 u64 bt_ba = 0;
2296
2297 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
2298
2299 switch (table->type) {
2300 case HEM_TYPE_QPC:
2301 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2302 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
2303 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2304 break;
2305 case HEM_TYPE_MTPT:
2306 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2307 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
2308 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2309 break;
2310 case HEM_TYPE_CQC:
2311 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2312 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
2313 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2314 break;
2315 case HEM_TYPE_SRQC:
2316 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2317 return -EINVAL;
2318 default:
2319 return 0;
2320 }
2321 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2322 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2323 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2324 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2325
2326 spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2327
2328 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2329
2330 end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
2331 while (1) {
2332 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2333 if (!(time_before(jiffies, end))) {
2334 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2335 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2336 flags);
2337 return -EBUSY;
2338 }
2339 } else {
2340 break;
2341 }
2342 msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
2343 }
2344
2345 bt_cmd_val[0] = (uint32_t)bt_ba;
2346 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2347 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2348 hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2349
2350 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2351
2352 return 0;
2353 }
2354
hns_roce_v1_qp_modify(struct hns_roce_dev * hr_dev,struct hns_roce_mtt * mtt,enum hns_roce_qp_state cur_state,enum hns_roce_qp_state new_state,struct hns_roce_qp_context * context,struct hns_roce_qp * hr_qp)2355 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2356 struct hns_roce_mtt *mtt,
2357 enum hns_roce_qp_state cur_state,
2358 enum hns_roce_qp_state new_state,
2359 struct hns_roce_qp_context *context,
2360 struct hns_roce_qp *hr_qp)
2361 {
2362 static const u16
2363 op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2364 [HNS_ROCE_QP_STATE_RST] = {
2365 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2366 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2367 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2368 },
2369 [HNS_ROCE_QP_STATE_INIT] = {
2370 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2371 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2372 /* Note: In v1 engine, HW doesn't support RST2INIT.
2373 * We use RST2INIT cmd instead of INIT2INIT.
2374 */
2375 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2376 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2377 },
2378 [HNS_ROCE_QP_STATE_RTR] = {
2379 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2380 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2381 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2382 },
2383 [HNS_ROCE_QP_STATE_RTS] = {
2384 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2385 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2386 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2387 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2388 },
2389 [HNS_ROCE_QP_STATE_SQD] = {
2390 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2391 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2392 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2393 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2394 },
2395 [HNS_ROCE_QP_STATE_ERR] = {
2396 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2397 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2398 }
2399 };
2400
2401 struct hns_roce_cmd_mailbox *mailbox;
2402 struct device *dev = &hr_dev->pdev->dev;
2403 int ret = 0;
2404
2405 if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2406 new_state >= HNS_ROCE_QP_NUM_STATE ||
2407 !op[cur_state][new_state]) {
2408 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2409 cur_state, new_state);
2410 return -EINVAL;
2411 }
2412
2413 if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2414 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2415 HNS_ROCE_CMD_2RST_QP,
2416 HNS_ROCE_CMD_TIMEOUT_MSECS);
2417
2418 if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2419 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2420 HNS_ROCE_CMD_2ERR_QP,
2421 HNS_ROCE_CMD_TIMEOUT_MSECS);
2422
2423 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2424 if (IS_ERR(mailbox))
2425 return PTR_ERR(mailbox);
2426
2427 memcpy(mailbox->buf, context, sizeof(*context));
2428
2429 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2430 op[cur_state][new_state],
2431 HNS_ROCE_CMD_TIMEOUT_MSECS);
2432
2433 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2434 return ret;
2435 }
2436
hns_roce_v1_m_sqp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)2437 static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2438 int attr_mask, enum ib_qp_state cur_state,
2439 enum ib_qp_state new_state)
2440 {
2441 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2442 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2443 struct hns_roce_sqp_context *context;
2444 struct device *dev = &hr_dev->pdev->dev;
2445 dma_addr_t dma_handle = 0;
2446 int rq_pa_start;
2447 u32 reg_val;
2448 u64 *mtts;
2449 u32 *addr;
2450
2451 context = kzalloc(sizeof(*context), GFP_KERNEL);
2452 if (!context)
2453 return -ENOMEM;
2454
2455 /* Search QP buf's MTTs */
2456 mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
2457 hr_qp->mtt.first_seg, &dma_handle);
2458 if (!mtts) {
2459 dev_err(dev, "qp buf pa find failed\n");
2460 goto out;
2461 }
2462
2463 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2464 roce_set_field(context->qp1c_bytes_4,
2465 QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2466 QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2467 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2468 roce_set_field(context->qp1c_bytes_4,
2469 QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2470 QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2471 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2472 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2473 QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2474
2475 context->sq_rq_bt_l = (u32)(dma_handle);
2476 roce_set_field(context->qp1c_bytes_12,
2477 QP1C_BYTES_12_SQ_RQ_BT_H_M,
2478 QP1C_BYTES_12_SQ_RQ_BT_H_S,
2479 ((u32)(dma_handle >> 32)));
2480
2481 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2482 QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2483 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2484 QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2485 roce_set_bit(context->qp1c_bytes_16,
2486 QP1C_BYTES_16_SIGNALING_TYPE_S,
2487 hr_qp->sq_signal_bits);
2488 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2489 1);
2490 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2491 1);
2492 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2493 0);
2494
2495 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2496 QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2497 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2498 QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2499
2500 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2501 context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
2502
2503 roce_set_field(context->qp1c_bytes_28,
2504 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2505 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2506 (mtts[rq_pa_start]) >> 32);
2507 roce_set_field(context->qp1c_bytes_28,
2508 QP1C_BYTES_28_RQ_CUR_IDX_M,
2509 QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2510
2511 roce_set_field(context->qp1c_bytes_32,
2512 QP1C_BYTES_32_RX_CQ_NUM_M,
2513 QP1C_BYTES_32_RX_CQ_NUM_S,
2514 to_hr_cq(ibqp->recv_cq)->cqn);
2515 roce_set_field(context->qp1c_bytes_32,
2516 QP1C_BYTES_32_TX_CQ_NUM_M,
2517 QP1C_BYTES_32_TX_CQ_NUM_S,
2518 to_hr_cq(ibqp->send_cq)->cqn);
2519
2520 context->cur_sq_wqe_ba_l = (u32)mtts[0];
2521
2522 roce_set_field(context->qp1c_bytes_40,
2523 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2524 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2525 (mtts[0]) >> 32);
2526 roce_set_field(context->qp1c_bytes_40,
2527 QP1C_BYTES_40_SQ_CUR_IDX_M,
2528 QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2529
2530 /* Copy context to QP1C register */
2531 addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
2532 hr_qp->phy_port * sizeof(*context));
2533
2534 writel(context->qp1c_bytes_4, addr);
2535 writel(context->sq_rq_bt_l, addr + 1);
2536 writel(context->qp1c_bytes_12, addr + 2);
2537 writel(context->qp1c_bytes_16, addr + 3);
2538 writel(context->qp1c_bytes_20, addr + 4);
2539 writel(context->cur_rq_wqe_ba_l, addr + 5);
2540 writel(context->qp1c_bytes_28, addr + 6);
2541 writel(context->qp1c_bytes_32, addr + 7);
2542 writel(context->cur_sq_wqe_ba_l, addr + 8);
2543 writel(context->qp1c_bytes_40, addr + 9);
2544 }
2545
2546 /* Modify QP1C status */
2547 reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2548 hr_qp->phy_port * sizeof(*context));
2549 roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2550 ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2551 roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2552 hr_qp->phy_port * sizeof(*context), reg_val);
2553
2554 hr_qp->state = new_state;
2555 if (new_state == IB_QPS_RESET) {
2556 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2557 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2558 if (ibqp->send_cq != ibqp->recv_cq)
2559 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2560 hr_qp->qpn, NULL);
2561
2562 hr_qp->rq.head = 0;
2563 hr_qp->rq.tail = 0;
2564 hr_qp->sq.head = 0;
2565 hr_qp->sq.tail = 0;
2566 hr_qp->sq_next_wqe = 0;
2567 }
2568
2569 kfree(context);
2570 return 0;
2571
2572 out:
2573 kfree(context);
2574 return -EINVAL;
2575 }
2576
hns_roce_v1_m_qp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)2577 static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2578 int attr_mask, enum ib_qp_state cur_state,
2579 enum ib_qp_state new_state)
2580 {
2581 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2582 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2583 struct device *dev = &hr_dev->pdev->dev;
2584 struct hns_roce_qp_context *context;
2585 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2586 dma_addr_t dma_handle_2 = 0;
2587 dma_addr_t dma_handle = 0;
2588 uint32_t doorbell[2] = {0};
2589 int rq_pa_start = 0;
2590 u64 *mtts_2 = NULL;
2591 int ret = -EINVAL;
2592 u64 *mtts = NULL;
2593 int port;
2594 u8 port_num;
2595 u8 *dmac;
2596 u8 *smac;
2597
2598 context = kzalloc(sizeof(*context), GFP_KERNEL);
2599 if (!context)
2600 return -ENOMEM;
2601
2602 /* Search qp buf's mtts */
2603 mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
2604 hr_qp->mtt.first_seg, &dma_handle);
2605 if (mtts == NULL) {
2606 dev_err(dev, "qp buf pa find failed\n");
2607 goto out;
2608 }
2609
2610 /* Search IRRL's mtts */
2611 mtts_2 = hns_roce_table_find(&hr_dev->qp_table.irrl_table, hr_qp->qpn,
2612 &dma_handle_2);
2613 if (mtts_2 == NULL) {
2614 dev_err(dev, "qp irrl_table find failed\n");
2615 goto out;
2616 }
2617
2618 /*
2619 * Reset to init
2620 * Mandatory param:
2621 * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2622 * Optional param: NA
2623 */
2624 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2625 roce_set_field(context->qpc_bytes_4,
2626 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2627 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2628 to_hr_qp_type(hr_qp->ibqp.qp_type));
2629
2630 roce_set_bit(context->qpc_bytes_4,
2631 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2632 roce_set_bit(context->qpc_bytes_4,
2633 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2634 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2635 roce_set_bit(context->qpc_bytes_4,
2636 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2637 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2638 );
2639 roce_set_bit(context->qpc_bytes_4,
2640 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2641 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2642 );
2643 roce_set_bit(context->qpc_bytes_4,
2644 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2645 roce_set_field(context->qpc_bytes_4,
2646 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2647 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2648 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2649 roce_set_field(context->qpc_bytes_4,
2650 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2651 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2652 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2653 roce_set_field(context->qpc_bytes_4,
2654 QP_CONTEXT_QPC_BYTES_4_PD_M,
2655 QP_CONTEXT_QPC_BYTES_4_PD_S,
2656 to_hr_pd(ibqp->pd)->pdn);
2657 hr_qp->access_flags = attr->qp_access_flags;
2658 roce_set_field(context->qpc_bytes_8,
2659 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2660 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2661 to_hr_cq(ibqp->send_cq)->cqn);
2662 roce_set_field(context->qpc_bytes_8,
2663 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2664 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2665 to_hr_cq(ibqp->recv_cq)->cqn);
2666
2667 if (ibqp->srq)
2668 roce_set_field(context->qpc_bytes_12,
2669 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2670 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2671 to_hr_srq(ibqp->srq)->srqn);
2672
2673 roce_set_field(context->qpc_bytes_12,
2674 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2675 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2676 attr->pkey_index);
2677 hr_qp->pkey_index = attr->pkey_index;
2678 roce_set_field(context->qpc_bytes_16,
2679 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2680 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2681
2682 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2683 roce_set_field(context->qpc_bytes_4,
2684 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2685 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2686 to_hr_qp_type(hr_qp->ibqp.qp_type));
2687 roce_set_bit(context->qpc_bytes_4,
2688 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2689 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2690 roce_set_bit(context->qpc_bytes_4,
2691 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2692 !!(attr->qp_access_flags &
2693 IB_ACCESS_REMOTE_READ));
2694 roce_set_bit(context->qpc_bytes_4,
2695 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2696 !!(attr->qp_access_flags &
2697 IB_ACCESS_REMOTE_WRITE));
2698 } else {
2699 roce_set_bit(context->qpc_bytes_4,
2700 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2701 !!(hr_qp->access_flags &
2702 IB_ACCESS_REMOTE_READ));
2703 roce_set_bit(context->qpc_bytes_4,
2704 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2705 !!(hr_qp->access_flags &
2706 IB_ACCESS_REMOTE_WRITE));
2707 }
2708
2709 roce_set_bit(context->qpc_bytes_4,
2710 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2711 roce_set_field(context->qpc_bytes_4,
2712 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2713 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2714 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2715 roce_set_field(context->qpc_bytes_4,
2716 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2717 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2718 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2719 roce_set_field(context->qpc_bytes_4,
2720 QP_CONTEXT_QPC_BYTES_4_PD_M,
2721 QP_CONTEXT_QPC_BYTES_4_PD_S,
2722 to_hr_pd(ibqp->pd)->pdn);
2723
2724 roce_set_field(context->qpc_bytes_8,
2725 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2726 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2727 to_hr_cq(ibqp->send_cq)->cqn);
2728 roce_set_field(context->qpc_bytes_8,
2729 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2730 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2731 to_hr_cq(ibqp->recv_cq)->cqn);
2732
2733 if (ibqp->srq)
2734 roce_set_field(context->qpc_bytes_12,
2735 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2736 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2737 to_hr_srq(ibqp->srq)->srqn);
2738 if (attr_mask & IB_QP_PKEY_INDEX)
2739 roce_set_field(context->qpc_bytes_12,
2740 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2741 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2742 attr->pkey_index);
2743 else
2744 roce_set_field(context->qpc_bytes_12,
2745 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2746 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2747 hr_qp->pkey_index);
2748
2749 roce_set_field(context->qpc_bytes_16,
2750 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2751 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2752 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2753 if ((attr_mask & IB_QP_ALT_PATH) ||
2754 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2755 (attr_mask & IB_QP_PKEY_INDEX) ||
2756 (attr_mask & IB_QP_QKEY)) {
2757 dev_err(dev, "INIT2RTR attr_mask error\n");
2758 goto out;
2759 }
2760
2761 dmac = (u8 *)attr->ah_attr.roce.dmac;
2762
2763 context->sq_rq_bt_l = (u32)(dma_handle);
2764 roce_set_field(context->qpc_bytes_24,
2765 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2766 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2767 ((u32)(dma_handle >> 32)));
2768 roce_set_bit(context->qpc_bytes_24,
2769 QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2770 1);
2771 roce_set_field(context->qpc_bytes_24,
2772 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2773 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2774 attr->min_rnr_timer);
2775 context->irrl_ba_l = (u32)(dma_handle_2);
2776 roce_set_field(context->qpc_bytes_32,
2777 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2778 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2779 ((u32)(dma_handle_2 >> 32)) &
2780 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2781 roce_set_field(context->qpc_bytes_32,
2782 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2783 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2784 roce_set_bit(context->qpc_bytes_32,
2785 QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2786 1);
2787 roce_set_bit(context->qpc_bytes_32,
2788 QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2789 hr_qp->sq_signal_bits);
2790
2791 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2792 hr_qp->port;
2793 smac = (u8 *)hr_dev->dev_addr[port];
2794 /* when dmac equals smac or loop_idc is 1, it should loopback */
2795 if (ether_addr_equal_unaligned(dmac, smac) ||
2796 hr_dev->loop_idc == 0x1)
2797 roce_set_bit(context->qpc_bytes_32,
2798 QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2799
2800 roce_set_bit(context->qpc_bytes_32,
2801 QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2802 rdma_ah_get_ah_flags(&attr->ah_attr));
2803 roce_set_field(context->qpc_bytes_32,
2804 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2805 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2806 ilog2((unsigned int)attr->max_dest_rd_atomic));
2807
2808 roce_set_field(context->qpc_bytes_36,
2809 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2810 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2811 attr->dest_qp_num);
2812
2813 /* Configure GID index */
2814 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2815 roce_set_field(context->qpc_bytes_36,
2816 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2817 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2818 hns_get_gid_index(hr_dev,
2819 port_num - 1,
2820 grh->sgid_index));
2821
2822 memcpy(&(context->dmac_l), dmac, 4);
2823
2824 roce_set_field(context->qpc_bytes_44,
2825 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2826 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2827 *((u16 *)(&dmac[4])));
2828 roce_set_field(context->qpc_bytes_44,
2829 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2830 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2831 rdma_ah_get_static_rate(&attr->ah_attr));
2832 roce_set_field(context->qpc_bytes_44,
2833 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2834 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2835 grh->hop_limit);
2836
2837 roce_set_field(context->qpc_bytes_48,
2838 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2839 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2840 grh->flow_label);
2841 roce_set_field(context->qpc_bytes_48,
2842 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2843 QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2844 grh->traffic_class);
2845 roce_set_field(context->qpc_bytes_48,
2846 QP_CONTEXT_QPC_BYTES_48_MTU_M,
2847 QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2848
2849 memcpy(context->dgid, grh->dgid.raw,
2850 sizeof(grh->dgid.raw));
2851
2852 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2853 roce_get_field(context->qpc_bytes_44,
2854 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2855 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2856
2857 roce_set_field(context->qpc_bytes_68,
2858 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2859 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2860 hr_qp->rq.head);
2861 roce_set_field(context->qpc_bytes_68,
2862 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
2863 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
2864
2865 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2866 context->cur_rq_wqe_ba_l = (u32)(mtts[rq_pa_start]);
2867
2868 roce_set_field(context->qpc_bytes_76,
2869 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
2870 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
2871 mtts[rq_pa_start] >> 32);
2872 roce_set_field(context->qpc_bytes_76,
2873 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
2874 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
2875
2876 context->rx_rnr_time = 0;
2877
2878 roce_set_field(context->qpc_bytes_84,
2879 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
2880 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
2881 attr->rq_psn - 1);
2882 roce_set_field(context->qpc_bytes_84,
2883 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
2884 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
2885
2886 roce_set_field(context->qpc_bytes_88,
2887 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
2888 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
2889 attr->rq_psn);
2890 roce_set_bit(context->qpc_bytes_88,
2891 QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
2892 roce_set_bit(context->qpc_bytes_88,
2893 QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
2894 roce_set_field(context->qpc_bytes_88,
2895 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
2896 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
2897 0);
2898 roce_set_field(context->qpc_bytes_88,
2899 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
2900 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
2901 0);
2902
2903 context->dma_length = 0;
2904 context->r_key = 0;
2905 context->va_l = 0;
2906 context->va_h = 0;
2907
2908 roce_set_field(context->qpc_bytes_108,
2909 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
2910 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
2911 roce_set_bit(context->qpc_bytes_108,
2912 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
2913 roce_set_bit(context->qpc_bytes_108,
2914 QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
2915
2916 roce_set_field(context->qpc_bytes_112,
2917 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
2918 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
2919 roce_set_field(context->qpc_bytes_112,
2920 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
2921 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
2922
2923 /* For chip resp ack */
2924 roce_set_field(context->qpc_bytes_156,
2925 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
2926 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
2927 hr_qp->phy_port);
2928 roce_set_field(context->qpc_bytes_156,
2929 QP_CONTEXT_QPC_BYTES_156_SL_M,
2930 QP_CONTEXT_QPC_BYTES_156_SL_S,
2931 rdma_ah_get_sl(&attr->ah_attr));
2932 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
2933 } else if (cur_state == IB_QPS_RTR &&
2934 new_state == IB_QPS_RTS) {
2935 /* If exist optional param, return error */
2936 if ((attr_mask & IB_QP_ALT_PATH) ||
2937 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2938 (attr_mask & IB_QP_QKEY) ||
2939 (attr_mask & IB_QP_PATH_MIG_STATE) ||
2940 (attr_mask & IB_QP_CUR_STATE) ||
2941 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
2942 dev_err(dev, "RTR2RTS attr_mask error\n");
2943 goto out;
2944 }
2945
2946 context->rx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
2947
2948 roce_set_field(context->qpc_bytes_120,
2949 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
2950 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
2951 (mtts[0]) >> 32);
2952
2953 roce_set_field(context->qpc_bytes_124,
2954 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
2955 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
2956 roce_set_field(context->qpc_bytes_124,
2957 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
2958 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
2959
2960 roce_set_field(context->qpc_bytes_128,
2961 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
2962 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
2963 attr->sq_psn);
2964 roce_set_bit(context->qpc_bytes_128,
2965 QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
2966 roce_set_field(context->qpc_bytes_128,
2967 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
2968 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
2969 0);
2970 roce_set_bit(context->qpc_bytes_128,
2971 QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
2972
2973 roce_set_field(context->qpc_bytes_132,
2974 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
2975 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
2976 roce_set_field(context->qpc_bytes_132,
2977 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
2978 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
2979
2980 roce_set_field(context->qpc_bytes_136,
2981 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
2982 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
2983 attr->sq_psn);
2984 roce_set_field(context->qpc_bytes_136,
2985 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
2986 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
2987 attr->sq_psn);
2988
2989 roce_set_field(context->qpc_bytes_140,
2990 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
2991 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
2992 (attr->sq_psn >> SQ_PSN_SHIFT));
2993 roce_set_field(context->qpc_bytes_140,
2994 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
2995 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
2996 roce_set_bit(context->qpc_bytes_140,
2997 QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
2998
2999 roce_set_field(context->qpc_bytes_148,
3000 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3001 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3002 roce_set_field(context->qpc_bytes_148,
3003 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3004 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3005 attr->retry_cnt);
3006 roce_set_field(context->qpc_bytes_148,
3007 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3008 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3009 attr->rnr_retry);
3010 roce_set_field(context->qpc_bytes_148,
3011 QP_CONTEXT_QPC_BYTES_148_LSN_M,
3012 QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3013
3014 context->rnr_retry = 0;
3015
3016 roce_set_field(context->qpc_bytes_156,
3017 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3018 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3019 attr->retry_cnt);
3020 if (attr->timeout < 0x12) {
3021 dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3022 attr->timeout);
3023 roce_set_field(context->qpc_bytes_156,
3024 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3025 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3026 0x12);
3027 } else {
3028 roce_set_field(context->qpc_bytes_156,
3029 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3030 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3031 attr->timeout);
3032 }
3033 roce_set_field(context->qpc_bytes_156,
3034 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3035 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3036 attr->rnr_retry);
3037 roce_set_field(context->qpc_bytes_156,
3038 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3039 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3040 hr_qp->phy_port);
3041 roce_set_field(context->qpc_bytes_156,
3042 QP_CONTEXT_QPC_BYTES_156_SL_M,
3043 QP_CONTEXT_QPC_BYTES_156_SL_S,
3044 rdma_ah_get_sl(&attr->ah_attr));
3045 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3046 roce_set_field(context->qpc_bytes_156,
3047 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3048 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3049 ilog2((unsigned int)attr->max_rd_atomic));
3050 roce_set_field(context->qpc_bytes_156,
3051 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3052 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3053 context->pkt_use_len = 0;
3054
3055 roce_set_field(context->qpc_bytes_164,
3056 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3057 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3058 roce_set_field(context->qpc_bytes_164,
3059 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3060 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3061
3062 roce_set_field(context->qpc_bytes_168,
3063 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3064 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3065 attr->sq_psn);
3066 roce_set_field(context->qpc_bytes_168,
3067 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3068 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3069 roce_set_field(context->qpc_bytes_168,
3070 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3071 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3072 roce_set_bit(context->qpc_bytes_168,
3073 QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3074 roce_set_bit(context->qpc_bytes_168,
3075 QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3076 roce_set_bit(context->qpc_bytes_168,
3077 QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3078 context->sge_use_len = 0;
3079
3080 roce_set_field(context->qpc_bytes_176,
3081 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3082 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3083 roce_set_field(context->qpc_bytes_176,
3084 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3085 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3086 0);
3087 roce_set_field(context->qpc_bytes_180,
3088 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3089 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3090 roce_set_field(context->qpc_bytes_180,
3091 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3092 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3093
3094 context->tx_cur_sq_wqe_ba_l = (u32)(mtts[0]);
3095
3096 roce_set_field(context->qpc_bytes_188,
3097 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3098 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3099 (mtts[0]) >> 32);
3100 roce_set_bit(context->qpc_bytes_188,
3101 QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3102 roce_set_field(context->qpc_bytes_188,
3103 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3104 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3105 0);
3106 } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3107 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3108 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3109 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3110 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3111 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3112 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3113 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3114 dev_err(dev, "not support this status migration\n");
3115 goto out;
3116 }
3117
3118 /* Every status migrate must change state */
3119 roce_set_field(context->qpc_bytes_144,
3120 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3121 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3122
3123 /* SW pass context to HW */
3124 ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3125 to_hns_roce_state(cur_state),
3126 to_hns_roce_state(new_state), context,
3127 hr_qp);
3128 if (ret) {
3129 dev_err(dev, "hns_roce_qp_modify failed\n");
3130 goto out;
3131 }
3132
3133 /*
3134 * Use rst2init to instead of init2init with drv,
3135 * need to hw to flash RQ HEAD by DB again
3136 */
3137 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3138 /* Memory barrier */
3139 wmb();
3140
3141 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3142 RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3143 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3144 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3145 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3146 RQ_DOORBELL_U32_8_CMD_S, 1);
3147 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3148
3149 if (ibqp->uobject) {
3150 hr_qp->rq.db_reg_l = hr_dev->reg_base +
3151 ROCEE_DB_OTHERS_L_0_REG +
3152 DB_REG_OFFSET * hr_dev->priv_uar.index;
3153 }
3154
3155 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3156 }
3157
3158 hr_qp->state = new_state;
3159
3160 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3161 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3162 if (attr_mask & IB_QP_PORT) {
3163 hr_qp->port = attr->port_num - 1;
3164 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3165 }
3166
3167 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3168 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3169 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3170 if (ibqp->send_cq != ibqp->recv_cq)
3171 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3172 hr_qp->qpn, NULL);
3173
3174 hr_qp->rq.head = 0;
3175 hr_qp->rq.tail = 0;
3176 hr_qp->sq.head = 0;
3177 hr_qp->sq.tail = 0;
3178 hr_qp->sq_next_wqe = 0;
3179 }
3180 out:
3181 kfree(context);
3182 return ret;
3183 }
3184
hns_roce_v1_modify_qp(struct ib_qp * ibqp,const struct ib_qp_attr * attr,int attr_mask,enum ib_qp_state cur_state,enum ib_qp_state new_state)3185 int hns_roce_v1_modify_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
3186 int attr_mask, enum ib_qp_state cur_state,
3187 enum ib_qp_state new_state)
3188 {
3189
3190 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3191 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3192 new_state);
3193 else
3194 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3195 new_state);
3196 }
3197
to_ib_qp_state(enum hns_roce_qp_state state)3198 static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3199 {
3200 switch (state) {
3201 case HNS_ROCE_QP_STATE_RST:
3202 return IB_QPS_RESET;
3203 case HNS_ROCE_QP_STATE_INIT:
3204 return IB_QPS_INIT;
3205 case HNS_ROCE_QP_STATE_RTR:
3206 return IB_QPS_RTR;
3207 case HNS_ROCE_QP_STATE_RTS:
3208 return IB_QPS_RTS;
3209 case HNS_ROCE_QP_STATE_SQD:
3210 return IB_QPS_SQD;
3211 case HNS_ROCE_QP_STATE_ERR:
3212 return IB_QPS_ERR;
3213 default:
3214 return IB_QPS_ERR;
3215 }
3216 }
3217
hns_roce_v1_query_qpc(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct hns_roce_qp_context * hr_context)3218 static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3219 struct hns_roce_qp *hr_qp,
3220 struct hns_roce_qp_context *hr_context)
3221 {
3222 struct hns_roce_cmd_mailbox *mailbox;
3223 int ret;
3224
3225 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3226 if (IS_ERR(mailbox))
3227 return PTR_ERR(mailbox);
3228
3229 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3230 HNS_ROCE_CMD_QUERY_QP,
3231 HNS_ROCE_CMD_TIMEOUT_MSECS);
3232 if (!ret)
3233 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3234 else
3235 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3236
3237 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3238
3239 return ret;
3240 }
3241
hns_roce_v1_q_sqp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)3242 static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3243 int qp_attr_mask,
3244 struct ib_qp_init_attr *qp_init_attr)
3245 {
3246 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3247 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3248 struct hns_roce_sqp_context context;
3249 u32 addr;
3250
3251 mutex_lock(&hr_qp->mutex);
3252
3253 if (hr_qp->state == IB_QPS_RESET) {
3254 qp_attr->qp_state = IB_QPS_RESET;
3255 goto done;
3256 }
3257
3258 addr = ROCEE_QP1C_CFG0_0_REG +
3259 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3260 context.qp1c_bytes_4 = roce_read(hr_dev, addr);
3261 context.sq_rq_bt_l = roce_read(hr_dev, addr + 1);
3262 context.qp1c_bytes_12 = roce_read(hr_dev, addr + 2);
3263 context.qp1c_bytes_16 = roce_read(hr_dev, addr + 3);
3264 context.qp1c_bytes_20 = roce_read(hr_dev, addr + 4);
3265 context.cur_rq_wqe_ba_l = roce_read(hr_dev, addr + 5);
3266 context.qp1c_bytes_28 = roce_read(hr_dev, addr + 6);
3267 context.qp1c_bytes_32 = roce_read(hr_dev, addr + 7);
3268 context.cur_sq_wqe_ba_l = roce_read(hr_dev, addr + 8);
3269 context.qp1c_bytes_40 = roce_read(hr_dev, addr + 9);
3270
3271 hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3272 QP1C_BYTES_4_QP_STATE_M,
3273 QP1C_BYTES_4_QP_STATE_S);
3274 qp_attr->qp_state = hr_qp->state;
3275 qp_attr->path_mtu = IB_MTU_256;
3276 qp_attr->path_mig_state = IB_MIG_ARMED;
3277 qp_attr->qkey = QKEY_VAL;
3278 qp_attr->rq_psn = 0;
3279 qp_attr->sq_psn = 0;
3280 qp_attr->dest_qp_num = 1;
3281 qp_attr->qp_access_flags = 6;
3282
3283 qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3284 QP1C_BYTES_20_PKEY_IDX_M,
3285 QP1C_BYTES_20_PKEY_IDX_S);
3286 qp_attr->port_num = hr_qp->port + 1;
3287 qp_attr->sq_draining = 0;
3288 qp_attr->max_rd_atomic = 0;
3289 qp_attr->max_dest_rd_atomic = 0;
3290 qp_attr->min_rnr_timer = 0;
3291 qp_attr->timeout = 0;
3292 qp_attr->retry_cnt = 0;
3293 qp_attr->rnr_retry = 0;
3294 qp_attr->alt_timeout = 0;
3295
3296 done:
3297 qp_attr->cur_qp_state = qp_attr->qp_state;
3298 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3299 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3300 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3301 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3302 qp_attr->cap.max_inline_data = 0;
3303 qp_init_attr->cap = qp_attr->cap;
3304 qp_init_attr->create_flags = 0;
3305
3306 mutex_unlock(&hr_qp->mutex);
3307
3308 return 0;
3309 }
3310
hns_roce_v1_q_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)3311 static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3312 int qp_attr_mask,
3313 struct ib_qp_init_attr *qp_init_attr)
3314 {
3315 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3316 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3317 struct device *dev = &hr_dev->pdev->dev;
3318 struct hns_roce_qp_context *context;
3319 int tmp_qp_state = 0;
3320 int ret = 0;
3321 int state;
3322
3323 context = kzalloc(sizeof(*context), GFP_KERNEL);
3324 if (!context)
3325 return -ENOMEM;
3326
3327 memset(qp_attr, 0, sizeof(*qp_attr));
3328 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3329
3330 mutex_lock(&hr_qp->mutex);
3331
3332 if (hr_qp->state == IB_QPS_RESET) {
3333 qp_attr->qp_state = IB_QPS_RESET;
3334 goto done;
3335 }
3336
3337 ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3338 if (ret) {
3339 dev_err(dev, "query qpc error\n");
3340 ret = -EINVAL;
3341 goto out;
3342 }
3343
3344 state = roce_get_field(context->qpc_bytes_144,
3345 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3346 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3347 tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3348 if (tmp_qp_state == -1) {
3349 dev_err(dev, "to_ib_qp_state error\n");
3350 ret = -EINVAL;
3351 goto out;
3352 }
3353 hr_qp->state = (u8)tmp_qp_state;
3354 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3355 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3356 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3357 QP_CONTEXT_QPC_BYTES_48_MTU_S);
3358 qp_attr->path_mig_state = IB_MIG_ARMED;
3359 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3360 qp_attr->qkey = QKEY_VAL;
3361
3362 qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3363 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3364 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3365 qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3366 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3367 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3368 qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3369 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3370 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3371 qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3372 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3373 ((roce_get_bit(context->qpc_bytes_4,
3374 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3375 ((roce_get_bit(context->qpc_bytes_4,
3376 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3377
3378 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3379 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3380 struct ib_global_route *grh =
3381 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3382
3383 rdma_ah_set_sl(&qp_attr->ah_attr,
3384 roce_get_field(context->qpc_bytes_156,
3385 QP_CONTEXT_QPC_BYTES_156_SL_M,
3386 QP_CONTEXT_QPC_BYTES_156_SL_S));
3387 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3388 grh->flow_label =
3389 roce_get_field(context->qpc_bytes_48,
3390 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3391 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3392 grh->sgid_index =
3393 roce_get_field(context->qpc_bytes_36,
3394 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3395 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3396 grh->hop_limit =
3397 roce_get_field(context->qpc_bytes_44,
3398 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3399 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3400 grh->traffic_class =
3401 roce_get_field(context->qpc_bytes_48,
3402 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3403 QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3404
3405 memcpy(grh->dgid.raw, context->dgid,
3406 sizeof(grh->dgid.raw));
3407 }
3408
3409 qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3410 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3411 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3412 qp_attr->port_num = hr_qp->port + 1;
3413 qp_attr->sq_draining = 0;
3414 qp_attr->max_rd_atomic = roce_get_field(context->qpc_bytes_156,
3415 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3416 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3417 qp_attr->max_dest_rd_atomic = roce_get_field(context->qpc_bytes_32,
3418 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3419 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3420 qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3421 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3422 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3423 qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3424 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3425 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3426 qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3427 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3428 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3429 qp_attr->rnr_retry = context->rnr_retry;
3430
3431 done:
3432 qp_attr->cur_qp_state = qp_attr->qp_state;
3433 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3434 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3435
3436 if (!ibqp->uobject) {
3437 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3438 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3439 } else {
3440 qp_attr->cap.max_send_wr = 0;
3441 qp_attr->cap.max_send_sge = 0;
3442 }
3443
3444 qp_init_attr->cap = qp_attr->cap;
3445
3446 out:
3447 mutex_unlock(&hr_qp->mutex);
3448 kfree(context);
3449 return ret;
3450 }
3451
hns_roce_v1_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * qp_attr,int qp_attr_mask,struct ib_qp_init_attr * qp_init_attr)3452 int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3453 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
3454 {
3455 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3456
3457 return hr_qp->doorbell_qpn <= 1 ?
3458 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3459 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3460 }
3461
check_qp_db_process_status(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,u32 sdb_issue_ptr,u32 * sdb_inv_cnt,u32 * wait_stage)3462 static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
3463 struct hns_roce_qp *hr_qp,
3464 u32 sdb_issue_ptr,
3465 u32 *sdb_inv_cnt,
3466 u32 *wait_stage)
3467 {
3468 struct device *dev = &hr_dev->pdev->dev;
3469 u32 sdb_retry_cnt, old_retry;
3470 u32 sdb_send_ptr, old_send;
3471 u32 success_flags = 0;
3472 u32 cur_cnt, old_cnt;
3473 unsigned long end;
3474 u32 send_ptr;
3475 u32 inv_cnt;
3476 u32 tsp_st;
3477
3478 if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
3479 *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
3480 dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
3481 hr_qp->qpn, *wait_stage);
3482 return -EINVAL;
3483 }
3484
3485 /* Calculate the total timeout for the entire verification process */
3486 end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
3487
3488 if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
3489 /* Query db process status, until hw process completely */
3490 sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3491 while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
3492 ROCEE_SDB_PTR_CMP_BITS)) {
3493 if (!time_before(jiffies, end)) {
3494 dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
3495 hr_qp->qpn, sdb_issue_ptr,
3496 sdb_send_ptr);
3497 return 0;
3498 }
3499
3500 msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3501 sdb_send_ptr = roce_read(hr_dev,
3502 ROCEE_SDB_SEND_PTR_REG);
3503 }
3504
3505 if (roce_get_field(sdb_issue_ptr,
3506 ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
3507 ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
3508 roce_get_field(sdb_send_ptr,
3509 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3510 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
3511 old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
3512 old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
3513
3514 do {
3515 tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
3516 if (roce_get_bit(tsp_st,
3517 ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
3518 *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
3519 return 0;
3520 }
3521
3522 if (!time_before(jiffies, end)) {
3523 dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
3524 "issue 0x%x send 0x%x.\n",
3525 hr_qp->qpn, sdb_issue_ptr,
3526 sdb_send_ptr);
3527 return 0;
3528 }
3529
3530 msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3531
3532 sdb_send_ptr = roce_read(hr_dev,
3533 ROCEE_SDB_SEND_PTR_REG);
3534 sdb_retry_cnt = roce_read(hr_dev,
3535 ROCEE_SDB_RETRY_CNT_REG);
3536 cur_cnt = roce_get_field(sdb_send_ptr,
3537 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3538 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3539 roce_get_field(sdb_retry_cnt,
3540 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3541 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3542 if (!roce_get_bit(tsp_st,
3543 ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
3544 old_cnt = roce_get_field(old_send,
3545 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3546 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3547 roce_get_field(old_retry,
3548 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3549 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3550 if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
3551 success_flags = 1;
3552 } else {
3553 old_cnt = roce_get_field(old_send,
3554 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3555 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
3556 if (cur_cnt - old_cnt >
3557 SDB_ST_CMP_VAL) {
3558 success_flags = 1;
3559 } else {
3560 send_ptr =
3561 roce_get_field(old_send,
3562 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3563 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3564 roce_get_field(sdb_retry_cnt,
3565 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
3566 ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
3567 roce_set_field(old_send,
3568 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3569 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
3570 send_ptr);
3571 }
3572 }
3573 } while (!success_flags);
3574 }
3575
3576 *wait_stage = HNS_ROCE_V1_DB_STAGE2;
3577
3578 /* Get list pointer */
3579 *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3580 dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
3581 hr_qp->qpn, *sdb_inv_cnt);
3582 }
3583
3584 if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
3585 /* Query db's list status, until hw reversal */
3586 inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3587 while (roce_hw_index_cmp_lt(inv_cnt,
3588 *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
3589 ROCEE_SDB_CNT_CMP_BITS)) {
3590 if (!time_before(jiffies, end)) {
3591 dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
3592 hr_qp->qpn, inv_cnt);
3593 return 0;
3594 }
3595
3596 msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
3597 inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
3598 }
3599
3600 *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
3601 }
3602
3603 return 0;
3604 }
3605
check_qp_reset_state(struct hns_roce_dev * hr_dev,struct hns_roce_qp * hr_qp,struct hns_roce_qp_work * qp_work_entry,int * is_timeout)3606 static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
3607 struct hns_roce_qp *hr_qp,
3608 struct hns_roce_qp_work *qp_work_entry,
3609 int *is_timeout)
3610 {
3611 struct device *dev = &hr_dev->pdev->dev;
3612 u32 sdb_issue_ptr;
3613 int ret;
3614
3615 if (hr_qp->state != IB_QPS_RESET) {
3616 /* Set qp to ERR, waiting for hw complete processing all dbs */
3617 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3618 IB_QPS_ERR);
3619 if (ret) {
3620 dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
3621 hr_qp->qpn);
3622 return ret;
3623 }
3624
3625 /* Record issued doorbell */
3626 sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
3627 qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
3628 qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
3629
3630 /* Query db process status, until hw process completely */
3631 ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
3632 &qp_work_entry->sdb_inv_cnt,
3633 &qp_work_entry->db_wait_stage);
3634 if (ret) {
3635 dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3636 hr_qp->qpn);
3637 return ret;
3638 }
3639
3640 if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
3641 qp_work_entry->sche_cnt = 0;
3642 *is_timeout = 1;
3643 return 0;
3644 }
3645
3646 /* Modify qp to reset before destroying qp */
3647 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3648 IB_QPS_RESET);
3649 if (ret) {
3650 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
3651 hr_qp->qpn);
3652 return ret;
3653 }
3654 }
3655
3656 return 0;
3657 }
3658
hns_roce_v1_destroy_qp_work_fn(struct work_struct * work)3659 static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3660 {
3661 struct hns_roce_qp_work *qp_work_entry;
3662 struct hns_roce_v1_priv *priv;
3663 struct hns_roce_dev *hr_dev;
3664 struct hns_roce_qp *hr_qp;
3665 struct device *dev;
3666 unsigned long qpn;
3667 int ret;
3668
3669 qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
3670 hr_dev = to_hr_dev(qp_work_entry->ib_dev);
3671 dev = &hr_dev->pdev->dev;
3672 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
3673 hr_qp = qp_work_entry->qp;
3674 qpn = hr_qp->qpn;
3675
3676 dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
3677
3678 qp_work_entry->sche_cnt++;
3679
3680 /* Query db process status, until hw process completely */
3681 ret = check_qp_db_process_status(hr_dev, hr_qp,
3682 qp_work_entry->sdb_issue_ptr,
3683 &qp_work_entry->sdb_inv_cnt,
3684 &qp_work_entry->db_wait_stage);
3685 if (ret) {
3686 dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3687 qpn);
3688 return;
3689 }
3690
3691 if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
3692 priv->des_qp.requeue_flag) {
3693 queue_work(priv->des_qp.qp_wq, work);
3694 return;
3695 }
3696
3697 /* Modify qp to reset before destroying qp */
3698 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3699 IB_QPS_RESET);
3700 if (ret) {
3701 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
3702 return;
3703 }
3704
3705 hns_roce_qp_remove(hr_dev, hr_qp);
3706 hns_roce_qp_free(hr_dev, hr_qp);
3707
3708 if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
3709 /* RC QP, release QPN */
3710 hns_roce_release_range_qp(hr_dev, qpn, 1);
3711 kfree(hr_qp);
3712 } else
3713 kfree(hr_to_hr_sqp(hr_qp));
3714
3715 kfree(qp_work_entry);
3716
3717 dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
3718 }
3719
hns_roce_v1_destroy_qp(struct ib_qp * ibqp)3720 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
3721 {
3722 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3723 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3724 struct device *dev = &hr_dev->pdev->dev;
3725 struct hns_roce_qp_work qp_work_entry;
3726 struct hns_roce_qp_work *qp_work;
3727 struct hns_roce_v1_priv *priv;
3728 struct hns_roce_cq *send_cq, *recv_cq;
3729 int is_user = !!ibqp->pd->uobject;
3730 int is_timeout = 0;
3731 int ret;
3732
3733 ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
3734 if (ret) {
3735 dev_err(dev, "QP reset state check failed(%d)!\n", ret);
3736 return ret;
3737 }
3738
3739 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
3740 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
3741
3742 hns_roce_lock_cqs(send_cq, recv_cq);
3743 if (!is_user) {
3744 __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
3745 to_hr_srq(hr_qp->ibqp.srq) : NULL);
3746 if (send_cq != recv_cq)
3747 __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3748 }
3749 hns_roce_unlock_cqs(send_cq, recv_cq);
3750
3751 if (!is_timeout) {
3752 hns_roce_qp_remove(hr_dev, hr_qp);
3753 hns_roce_qp_free(hr_dev, hr_qp);
3754
3755 /* RC QP, release QPN */
3756 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3757 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
3758 }
3759
3760 hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
3761
3762 if (is_user)
3763 ib_umem_release(hr_qp->umem);
3764 else {
3765 kfree(hr_qp->sq.wrid);
3766 kfree(hr_qp->rq.wrid);
3767
3768 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
3769 }
3770
3771 if (!is_timeout) {
3772 if (hr_qp->ibqp.qp_type == IB_QPT_RC)
3773 kfree(hr_qp);
3774 else
3775 kfree(hr_to_hr_sqp(hr_qp));
3776 } else {
3777 qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
3778 if (!qp_work)
3779 return -ENOMEM;
3780
3781 INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
3782 qp_work->ib_dev = &hr_dev->ib_dev;
3783 qp_work->qp = hr_qp;
3784 qp_work->db_wait_stage = qp_work_entry.db_wait_stage;
3785 qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr;
3786 qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
3787 qp_work->sche_cnt = qp_work_entry.sche_cnt;
3788
3789 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
3790 queue_work(priv->des_qp.qp_wq, &qp_work->work);
3791 dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
3792 }
3793
3794 return 0;
3795 }
3796
hns_roce_v1_destroy_cq(struct ib_cq * ibcq)3797 int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
3798 {
3799 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3800 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3801 struct device *dev = &hr_dev->pdev->dev;
3802 u32 cqe_cnt_ori;
3803 u32 cqe_cnt_cur;
3804 u32 cq_buf_size;
3805 int wait_time = 0;
3806 int ret = 0;
3807
3808 hns_roce_free_cq(hr_dev, hr_cq);
3809
3810 /*
3811 * Before freeing cq buffer, we need to ensure that the outstanding CQE
3812 * have been written by checking the CQE counter.
3813 */
3814 cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3815 while (1) {
3816 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3817 HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3818 break;
3819
3820 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3821 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3822 break;
3823
3824 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3825 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3826 dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3827 hr_cq->cqn);
3828 ret = -ETIMEDOUT;
3829 break;
3830 }
3831 wait_time++;
3832 }
3833
3834 hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
3835
3836 if (ibcq->uobject)
3837 ib_umem_release(hr_cq->umem);
3838 else {
3839 /* Free the buff of stored cq */
3840 cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
3841 hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
3842 }
3843
3844 kfree(hr_cq);
3845
3846 return ret;
3847 }
3848
3849 struct hns_roce_v1_priv hr_v1_priv;
3850
3851 struct hns_roce_hw hns_roce_hw_v1 = {
3852 .reset = hns_roce_v1_reset,
3853 .hw_profile = hns_roce_v1_profile,
3854 .hw_init = hns_roce_v1_init,
3855 .hw_exit = hns_roce_v1_exit,
3856 .set_gid = hns_roce_v1_set_gid,
3857 .set_mac = hns_roce_v1_set_mac,
3858 .set_mtu = hns_roce_v1_set_mtu,
3859 .write_mtpt = hns_roce_v1_write_mtpt,
3860 .write_cqc = hns_roce_v1_write_cqc,
3861 .clear_hem = hns_roce_v1_clear_hem,
3862 .modify_qp = hns_roce_v1_modify_qp,
3863 .query_qp = hns_roce_v1_query_qp,
3864 .destroy_qp = hns_roce_v1_destroy_qp,
3865 .post_send = hns_roce_v1_post_send,
3866 .post_recv = hns_roce_v1_post_recv,
3867 .req_notify_cq = hns_roce_v1_req_notify_cq,
3868 .poll_cq = hns_roce_v1_poll_cq,
3869 .dereg_mr = hns_roce_v1_dereg_mr,
3870 .destroy_cq = hns_roce_v1_destroy_cq,
3871 .priv = &hr_v1_priv,
3872 };
3873