1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #include <linux/types.h>
33 #include <asm/byteorder.h>
34 #include <linux/bitops.h>
35 #include <linux/delay.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/errno.h>
38 #include <linux/io.h>
39 #include <linux/kernel.h>
40 #include <linux/list.h>
41 #include <linux/module.h>
42 #include <linux/mutex.h>
43 #include <linux/pci.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46 #include <linux/string.h>
47 #include "qed.h"
48 #include "qed_cxt.h"
49 #include "qed_hsi.h"
50 #include "qed_hw.h"
51 #include "qed_init_ops.h"
52 #include "qed_int.h"
53 #include "qed_ll2.h"
54 #include "qed_mcp.h"
55 #include "qed_reg_addr.h"
56 #include <linux/qed/qed_rdma_if.h>
57 #include "qed_rdma.h"
58 #include "qed_roce.h"
59 #include "qed_sp.h"
60
61 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
62
63 static int
qed_roce_async_event(struct qed_hwfn * p_hwfn,u8 fw_event_code,u16 echo,union event_ring_data * data,u8 fw_return_code)64 qed_roce_async_event(struct qed_hwfn *p_hwfn,
65 u8 fw_event_code,
66 u16 echo, union event_ring_data *data, u8 fw_return_code)
67 {
68 if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
69 u16 icid =
70 (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
71
72 /* icid release in this async event can occur only if the icid
73 * was offloaded to the FW. In case it wasn't offloaded this is
74 * handled in qed_roce_sp_destroy_qp.
75 */
76 qed_roce_free_real_icid(p_hwfn, icid);
77 } else {
78 struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events;
79
80 events->affiliated_event(p_hwfn->p_rdma_info->events.context,
81 fw_event_code,
82 (void *)&data->rdma_data.async_handle);
83 }
84
85 return 0;
86 }
87
qed_roce_stop(struct qed_hwfn * p_hwfn)88 void qed_roce_stop(struct qed_hwfn *p_hwfn)
89 {
90 struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
91 int wait_count = 0;
92
93 /* when destroying a_RoCE QP the control is returned to the user after
94 * the synchronous part. The asynchronous part may take a little longer.
95 * We delay for a short while if an async destroy QP is still expected.
96 * Beyond the added delay we clear the bitmap anyway.
97 */
98 while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
99 msleep(100);
100 if (wait_count++ > 20) {
101 DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
102 break;
103 }
104 }
105 qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
106 }
107
qed_rdma_copy_gids(struct qed_rdma_qp * qp,__le32 * src_gid,__le32 * dst_gid)108 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
109 __le32 *dst_gid)
110 {
111 u32 i;
112
113 if (qp->roce_mode == ROCE_V2_IPV4) {
114 /* The IPv4 addresses shall be aligned to the highest word.
115 * The lower words must be zero.
116 */
117 memset(src_gid, 0, sizeof(union qed_gid));
118 memset(dst_gid, 0, sizeof(union qed_gid));
119 src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
120 dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
121 } else {
122 /* GIDs and IPv6 addresses coincide in location and size */
123 for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
124 src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
125 dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
126 }
127 }
128 }
129
qed_roce_mode_to_flavor(enum roce_mode roce_mode)130 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
131 {
132 switch (roce_mode) {
133 case ROCE_V1:
134 return PLAIN_ROCE;
135 case ROCE_V2_IPV4:
136 return RROCE_IPV4;
137 case ROCE_V2_IPV6:
138 return RROCE_IPV6;
139 default:
140 return MAX_ROCE_FLAVOR;
141 }
142 }
143
qed_roce_free_cid_pair(struct qed_hwfn * p_hwfn,u16 cid)144 void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
145 {
146 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
147 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
148 qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
149 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
150 }
151
qed_roce_alloc_cid(struct qed_hwfn * p_hwfn,u16 * cid)152 int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
153 {
154 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
155 u32 responder_icid;
156 u32 requester_icid;
157 int rc;
158
159 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
160 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
161 &responder_icid);
162 if (rc) {
163 spin_unlock_bh(&p_rdma_info->lock);
164 return rc;
165 }
166
167 rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
168 &requester_icid);
169
170 spin_unlock_bh(&p_rdma_info->lock);
171 if (rc)
172 goto err;
173
174 /* the two icid's should be adjacent */
175 if ((requester_icid - responder_icid) != 1) {
176 DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
177 rc = -EINVAL;
178 goto err;
179 }
180
181 responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
182 p_rdma_info->proto);
183 requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
184 p_rdma_info->proto);
185
186 /* If these icids require a new ILT line allocate DMA-able context for
187 * an ILT page
188 */
189 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
190 if (rc)
191 goto err;
192
193 rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
194 if (rc)
195 goto err;
196
197 *cid = (u16)responder_icid;
198 return rc;
199
200 err:
201 spin_lock_bh(&p_rdma_info->lock);
202 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
203 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
204
205 spin_unlock_bh(&p_rdma_info->lock);
206 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
207 "Allocate CID - failed, rc = %d\n", rc);
208 return rc;
209 }
210
qed_roce_set_real_cid(struct qed_hwfn * p_hwfn,u32 cid)211 static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
212 {
213 spin_lock_bh(&p_hwfn->p_rdma_info->lock);
214 qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
215 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
216 }
217
qed_roce_sp_create_responder(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)218 static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
219 struct qed_rdma_qp *qp)
220 {
221 struct roce_create_qp_resp_ramrod_data *p_ramrod;
222 struct qed_sp_init_data init_data;
223 enum roce_flavor roce_flavor;
224 struct qed_spq_entry *p_ent;
225 u16 regular_latency_queue;
226 enum protocol_type proto;
227 int rc;
228
229 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
230
231 /* Allocate DMA-able memory for IRQ */
232 qp->irq_num_pages = 1;
233 qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
234 RDMA_RING_PAGE_SIZE,
235 &qp->irq_phys_addr, GFP_KERNEL);
236 if (!qp->irq) {
237 rc = -ENOMEM;
238 DP_NOTICE(p_hwfn,
239 "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
240 rc);
241 return rc;
242 }
243
244 /* Get SPQ entry */
245 memset(&init_data, 0, sizeof(init_data));
246 init_data.cid = qp->icid;
247 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
248 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
249
250 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
251 PROTOCOLID_ROCE, &init_data);
252 if (rc)
253 goto err;
254
255 p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
256
257 p_ramrod->flags = 0;
258
259 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
260 SET_FIELD(p_ramrod->flags,
261 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
262
263 SET_FIELD(p_ramrod->flags,
264 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
265 qp->incoming_rdma_read_en);
266
267 SET_FIELD(p_ramrod->flags,
268 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
269 qp->incoming_rdma_write_en);
270
271 SET_FIELD(p_ramrod->flags,
272 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
273 qp->incoming_atomic_en);
274
275 SET_FIELD(p_ramrod->flags,
276 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
277 qp->e2e_flow_control_en);
278
279 SET_FIELD(p_ramrod->flags,
280 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
281
282 SET_FIELD(p_ramrod->flags,
283 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
284 qp->fmr_and_reserved_lkey);
285
286 SET_FIELD(p_ramrod->flags,
287 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
288 qp->min_rnr_nak_timer);
289
290 p_ramrod->max_ird = qp->max_rd_atomic_resp;
291 p_ramrod->traffic_class = qp->traffic_class_tos;
292 p_ramrod->hop_limit = qp->hop_limit_ttl;
293 p_ramrod->irq_num_pages = qp->irq_num_pages;
294 p_ramrod->p_key = cpu_to_le16(qp->pkey);
295 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
296 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
297 p_ramrod->mtu = cpu_to_le16(qp->mtu);
298 p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
299 p_ramrod->pd = cpu_to_le16(qp->pd);
300 p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
301 DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
302 DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
303 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
304 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
305 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
306 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
307 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
308 p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
309 qp->rq_cq_id);
310
311 regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
312
313 p_ramrod->regular_latency_phy_queue =
314 cpu_to_le16(regular_latency_queue);
315 p_ramrod->low_latency_phy_queue =
316 cpu_to_le16(regular_latency_queue);
317
318 p_ramrod->dpi = cpu_to_le16(qp->dpi);
319
320 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
321 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
322
323 p_ramrod->udp_src_port = qp->udp_src_port;
324 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
325 p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
326 p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
327
328 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
329 qp->stats_queue;
330
331 rc = qed_spq_post(p_hwfn, p_ent, NULL);
332
333 DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
334 "rc = %d regular physical queue = 0x%x\n", rc,
335 regular_latency_queue);
336
337 if (rc)
338 goto err;
339
340 qp->resp_offloaded = true;
341 qp->cq_prod = 0;
342
343 proto = p_hwfn->p_rdma_info->proto;
344 qed_roce_set_real_cid(p_hwfn, qp->icid -
345 qed_cxt_get_proto_cid_start(p_hwfn, proto));
346
347 return rc;
348
349 err:
350 DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
351 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
352 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
353 qp->irq, qp->irq_phys_addr);
354
355 return rc;
356 }
357
qed_roce_sp_create_requester(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)358 static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
359 struct qed_rdma_qp *qp)
360 {
361 struct roce_create_qp_req_ramrod_data *p_ramrod;
362 struct qed_sp_init_data init_data;
363 enum roce_flavor roce_flavor;
364 struct qed_spq_entry *p_ent;
365 u16 regular_latency_queue;
366 enum protocol_type proto;
367 int rc;
368
369 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
370
371 /* Allocate DMA-able memory for ORQ */
372 qp->orq_num_pages = 1;
373 qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
374 RDMA_RING_PAGE_SIZE,
375 &qp->orq_phys_addr, GFP_KERNEL);
376 if (!qp->orq) {
377 rc = -ENOMEM;
378 DP_NOTICE(p_hwfn,
379 "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
380 rc);
381 return rc;
382 }
383
384 /* Get SPQ entry */
385 memset(&init_data, 0, sizeof(init_data));
386 init_data.cid = qp->icid + 1;
387 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
388 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
389
390 rc = qed_sp_init_request(p_hwfn, &p_ent,
391 ROCE_RAMROD_CREATE_QP,
392 PROTOCOLID_ROCE, &init_data);
393 if (rc)
394 goto err;
395
396 p_ramrod = &p_ent->ramrod.roce_create_qp_req;
397
398 p_ramrod->flags = 0;
399
400 roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
401 SET_FIELD(p_ramrod->flags,
402 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
403
404 SET_FIELD(p_ramrod->flags,
405 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
406 qp->fmr_and_reserved_lkey);
407
408 SET_FIELD(p_ramrod->flags,
409 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
410
411 SET_FIELD(p_ramrod->flags,
412 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
413
414 SET_FIELD(p_ramrod->flags,
415 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
416 qp->rnr_retry_cnt);
417
418 p_ramrod->max_ord = qp->max_rd_atomic_req;
419 p_ramrod->traffic_class = qp->traffic_class_tos;
420 p_ramrod->hop_limit = qp->hop_limit_ttl;
421 p_ramrod->orq_num_pages = qp->orq_num_pages;
422 p_ramrod->p_key = cpu_to_le16(qp->pkey);
423 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
424 p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
425 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
426 p_ramrod->mtu = cpu_to_le16(qp->mtu);
427 p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
428 p_ramrod->pd = cpu_to_le16(qp->pd);
429 p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
430 DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
431 DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
432 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
433 p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
434 p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
435 p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
436 p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
437 p_ramrod->cq_cid =
438 cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
439
440 regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
441
442 p_ramrod->regular_latency_phy_queue =
443 cpu_to_le16(regular_latency_queue);
444 p_ramrod->low_latency_phy_queue =
445 cpu_to_le16(regular_latency_queue);
446
447 p_ramrod->dpi = cpu_to_le16(qp->dpi);
448
449 qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
450 qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
451
452 p_ramrod->udp_src_port = qp->udp_src_port;
453 p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
454 p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
455 qp->stats_queue;
456
457 rc = qed_spq_post(p_hwfn, p_ent, NULL);
458
459 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
460
461 if (rc)
462 goto err;
463
464 qp->req_offloaded = true;
465 proto = p_hwfn->p_rdma_info->proto;
466 qed_roce_set_real_cid(p_hwfn,
467 qp->icid + 1 -
468 qed_cxt_get_proto_cid_start(p_hwfn, proto));
469
470 return rc;
471
472 err:
473 DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
474 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
475 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
476 qp->orq, qp->orq_phys_addr);
477 return rc;
478 }
479
qed_roce_sp_modify_responder(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,bool move_to_err,u32 modify_flags)480 static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
481 struct qed_rdma_qp *qp,
482 bool move_to_err, u32 modify_flags)
483 {
484 struct roce_modify_qp_resp_ramrod_data *p_ramrod;
485 struct qed_sp_init_data init_data;
486 struct qed_spq_entry *p_ent;
487 int rc;
488
489 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
490
491 if (move_to_err && !qp->resp_offloaded)
492 return 0;
493
494 /* Get SPQ entry */
495 memset(&init_data, 0, sizeof(init_data));
496 init_data.cid = qp->icid;
497 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
498 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
499
500 rc = qed_sp_init_request(p_hwfn, &p_ent,
501 ROCE_EVENT_MODIFY_QP,
502 PROTOCOLID_ROCE, &init_data);
503 if (rc) {
504 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
505 return rc;
506 }
507
508 p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
509
510 p_ramrod->flags = 0;
511
512 SET_FIELD(p_ramrod->flags,
513 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
514
515 SET_FIELD(p_ramrod->flags,
516 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
517 qp->incoming_rdma_read_en);
518
519 SET_FIELD(p_ramrod->flags,
520 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
521 qp->incoming_rdma_write_en);
522
523 SET_FIELD(p_ramrod->flags,
524 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
525 qp->incoming_atomic_en);
526
527 SET_FIELD(p_ramrod->flags,
528 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
529 qp->e2e_flow_control_en);
530
531 SET_FIELD(p_ramrod->flags,
532 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
533 GET_FIELD(modify_flags,
534 QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
535
536 SET_FIELD(p_ramrod->flags,
537 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
538 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
539
540 SET_FIELD(p_ramrod->flags,
541 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
542 GET_FIELD(modify_flags,
543 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
544
545 SET_FIELD(p_ramrod->flags,
546 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
547 GET_FIELD(modify_flags,
548 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
549
550 SET_FIELD(p_ramrod->flags,
551 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
552 GET_FIELD(modify_flags,
553 QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
554
555 p_ramrod->fields = 0;
556 SET_FIELD(p_ramrod->fields,
557 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
558 qp->min_rnr_nak_timer);
559
560 p_ramrod->max_ird = qp->max_rd_atomic_resp;
561 p_ramrod->traffic_class = qp->traffic_class_tos;
562 p_ramrod->hop_limit = qp->hop_limit_ttl;
563 p_ramrod->p_key = cpu_to_le16(qp->pkey);
564 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
565 p_ramrod->mtu = cpu_to_le16(qp->mtu);
566 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
567 rc = qed_spq_post(p_hwfn, p_ent, NULL);
568
569 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
570 return rc;
571 }
572
qed_roce_sp_modify_requester(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,bool move_to_sqd,bool move_to_err,u32 modify_flags)573 static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
574 struct qed_rdma_qp *qp,
575 bool move_to_sqd,
576 bool move_to_err, u32 modify_flags)
577 {
578 struct roce_modify_qp_req_ramrod_data *p_ramrod;
579 struct qed_sp_init_data init_data;
580 struct qed_spq_entry *p_ent;
581 int rc;
582
583 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
584
585 if (move_to_err && !(qp->req_offloaded))
586 return 0;
587
588 /* Get SPQ entry */
589 memset(&init_data, 0, sizeof(init_data));
590 init_data.cid = qp->icid + 1;
591 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
592 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
593
594 rc = qed_sp_init_request(p_hwfn, &p_ent,
595 ROCE_EVENT_MODIFY_QP,
596 PROTOCOLID_ROCE, &init_data);
597 if (rc) {
598 DP_NOTICE(p_hwfn, "rc = %d\n", rc);
599 return rc;
600 }
601
602 p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
603
604 p_ramrod->flags = 0;
605
606 SET_FIELD(p_ramrod->flags,
607 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
608
609 SET_FIELD(p_ramrod->flags,
610 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
611
612 SET_FIELD(p_ramrod->flags,
613 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
614 qp->sqd_async);
615
616 SET_FIELD(p_ramrod->flags,
617 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
618 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
619
620 SET_FIELD(p_ramrod->flags,
621 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
622 GET_FIELD(modify_flags,
623 QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
624
625 SET_FIELD(p_ramrod->flags,
626 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
627 GET_FIELD(modify_flags,
628 QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
629
630 SET_FIELD(p_ramrod->flags,
631 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
632 GET_FIELD(modify_flags,
633 QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
634
635 SET_FIELD(p_ramrod->flags,
636 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
637 GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
638
639 SET_FIELD(p_ramrod->flags,
640 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
641 GET_FIELD(modify_flags,
642 QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
643
644 p_ramrod->fields = 0;
645 SET_FIELD(p_ramrod->fields,
646 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
647
648 SET_FIELD(p_ramrod->fields,
649 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
650 qp->rnr_retry_cnt);
651
652 p_ramrod->max_ord = qp->max_rd_atomic_req;
653 p_ramrod->traffic_class = qp->traffic_class_tos;
654 p_ramrod->hop_limit = qp->hop_limit_ttl;
655 p_ramrod->p_key = cpu_to_le16(qp->pkey);
656 p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
657 p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
658 p_ramrod->mtu = cpu_to_le16(qp->mtu);
659 qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
660 rc = qed_spq_post(p_hwfn, p_ent, NULL);
661
662 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
663 return rc;
664 }
665
qed_roce_sp_destroy_qp_responder(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,u32 * num_invalidated_mw,u32 * cq_prod)666 static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
667 struct qed_rdma_qp *qp,
668 u32 *num_invalidated_mw,
669 u32 *cq_prod)
670 {
671 struct roce_destroy_qp_resp_output_params *p_ramrod_res;
672 struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
673 struct qed_sp_init_data init_data;
674 struct qed_spq_entry *p_ent;
675 dma_addr_t ramrod_res_phys;
676 int rc;
677
678 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
679
680 *num_invalidated_mw = 0;
681 *cq_prod = qp->cq_prod;
682
683 if (!qp->resp_offloaded) {
684 /* If a responder was never offload, we need to free the cids
685 * allocated in create_qp as a FW async event will never arrive
686 */
687 u32 cid;
688
689 cid = qp->icid -
690 qed_cxt_get_proto_cid_start(p_hwfn,
691 p_hwfn->p_rdma_info->proto);
692 qed_roce_free_cid_pair(p_hwfn, (u16)cid);
693
694 return 0;
695 }
696
697 /* Get SPQ entry */
698 memset(&init_data, 0, sizeof(init_data));
699 init_data.cid = qp->icid;
700 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
701 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
702
703 rc = qed_sp_init_request(p_hwfn, &p_ent,
704 ROCE_RAMROD_DESTROY_QP,
705 PROTOCOLID_ROCE, &init_data);
706 if (rc)
707 return rc;
708
709 p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
710
711 p_ramrod_res = (struct roce_destroy_qp_resp_output_params *)
712 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
713 &ramrod_res_phys, GFP_KERNEL);
714
715 if (!p_ramrod_res) {
716 rc = -ENOMEM;
717 DP_NOTICE(p_hwfn,
718 "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
719 rc);
720 return rc;
721 }
722
723 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
724
725 rc = qed_spq_post(p_hwfn, p_ent, NULL);
726 if (rc)
727 goto err;
728
729 *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
730 *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
731 qp->cq_prod = *cq_prod;
732
733 /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
734 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
735 qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
736 qp->irq, qp->irq_phys_addr);
737
738 qp->resp_offloaded = false;
739
740 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
741
742 err:
743 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
744 sizeof(struct roce_destroy_qp_resp_output_params),
745 p_ramrod_res, ramrod_res_phys);
746
747 return rc;
748 }
749
qed_roce_sp_destroy_qp_requester(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,u32 * num_bound_mw)750 static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
751 struct qed_rdma_qp *qp,
752 u32 *num_bound_mw)
753 {
754 struct roce_destroy_qp_req_output_params *p_ramrod_res;
755 struct roce_destroy_qp_req_ramrod_data *p_ramrod;
756 struct qed_sp_init_data init_data;
757 struct qed_spq_entry *p_ent;
758 dma_addr_t ramrod_res_phys;
759 int rc = -ENOMEM;
760
761 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
762
763 if (!qp->req_offloaded)
764 return 0;
765
766 p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
767 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
768 sizeof(*p_ramrod_res),
769 &ramrod_res_phys, GFP_KERNEL);
770 if (!p_ramrod_res) {
771 DP_NOTICE(p_hwfn,
772 "qed destroy requester failed: cannot allocate memory (ramrod)\n");
773 return rc;
774 }
775
776 /* Get SPQ entry */
777 memset(&init_data, 0, sizeof(init_data));
778 init_data.cid = qp->icid + 1;
779 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
780 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
781
782 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
783 PROTOCOLID_ROCE, &init_data);
784 if (rc)
785 goto err;
786
787 p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
788 DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
789
790 rc = qed_spq_post(p_hwfn, p_ent, NULL);
791 if (rc)
792 goto err;
793
794 *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
795
796 /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
797 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
798 qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
799 qp->orq, qp->orq_phys_addr);
800
801 qp->req_offloaded = false;
802
803 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
804
805 err:
806 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
807 p_ramrod_res, ramrod_res_phys);
808
809 return rc;
810 }
811
qed_roce_query_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,struct qed_rdma_query_qp_out_params * out_params)812 int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
813 struct qed_rdma_qp *qp,
814 struct qed_rdma_query_qp_out_params *out_params)
815 {
816 struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
817 struct roce_query_qp_req_output_params *p_req_ramrod_res;
818 struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
819 struct roce_query_qp_req_ramrod_data *p_req_ramrod;
820 struct qed_sp_init_data init_data;
821 dma_addr_t resp_ramrod_res_phys;
822 dma_addr_t req_ramrod_res_phys;
823 struct qed_spq_entry *p_ent;
824 bool rq_err_state;
825 bool sq_err_state;
826 bool sq_draining;
827 int rc = -ENOMEM;
828
829 if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
830 /* We can't send ramrod to the fw since this qp wasn't offloaded
831 * to the fw yet
832 */
833 out_params->draining = false;
834 out_params->rq_psn = qp->rq_psn;
835 out_params->sq_psn = qp->sq_psn;
836 out_params->state = qp->cur_state;
837
838 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
839 return 0;
840 }
841
842 if (!(qp->resp_offloaded)) {
843 DP_NOTICE(p_hwfn,
844 "The responder's qp should be offloded before requester's\n");
845 return -EINVAL;
846 }
847
848 /* Send a query responder ramrod to FW to get RQ-PSN and state */
849 p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *)
850 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
851 sizeof(*p_resp_ramrod_res),
852 &resp_ramrod_res_phys, GFP_KERNEL);
853 if (!p_resp_ramrod_res) {
854 DP_NOTICE(p_hwfn,
855 "qed query qp failed: cannot allocate memory (ramrod)\n");
856 return rc;
857 }
858
859 /* Get SPQ entry */
860 memset(&init_data, 0, sizeof(init_data));
861 init_data.cid = qp->icid;
862 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
863 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
864 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
865 PROTOCOLID_ROCE, &init_data);
866 if (rc)
867 goto err_resp;
868
869 p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
870 DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
871
872 rc = qed_spq_post(p_hwfn, p_ent, NULL);
873 if (rc)
874 goto err_resp;
875
876 out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
877 rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
878 ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
879
880 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
881 p_resp_ramrod_res, resp_ramrod_res_phys);
882
883 if (!(qp->req_offloaded)) {
884 /* Don't send query qp for the requester */
885 out_params->sq_psn = qp->sq_psn;
886 out_params->draining = false;
887
888 if (rq_err_state)
889 qp->cur_state = QED_ROCE_QP_STATE_ERR;
890
891 out_params->state = qp->cur_state;
892
893 return 0;
894 }
895
896 /* Send a query requester ramrod to FW to get SQ-PSN and state */
897 p_req_ramrod_res = (struct roce_query_qp_req_output_params *)
898 dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
899 sizeof(*p_req_ramrod_res),
900 &req_ramrod_res_phys,
901 GFP_KERNEL);
902 if (!p_req_ramrod_res) {
903 rc = -ENOMEM;
904 DP_NOTICE(p_hwfn,
905 "qed query qp failed: cannot allocate memory (ramrod)\n");
906 return rc;
907 }
908
909 /* Get SPQ entry */
910 init_data.cid = qp->icid + 1;
911 rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
912 PROTOCOLID_ROCE, &init_data);
913 if (rc)
914 goto err_req;
915
916 p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
917 DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
918
919 rc = qed_spq_post(p_hwfn, p_ent, NULL);
920 if (rc)
921 goto err_req;
922
923 out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
924 sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
925 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
926 sq_draining =
927 GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
928 ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
929
930 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
931 p_req_ramrod_res, req_ramrod_res_phys);
932
933 out_params->draining = false;
934
935 if (rq_err_state || sq_err_state)
936 qp->cur_state = QED_ROCE_QP_STATE_ERR;
937 else if (sq_draining)
938 out_params->draining = true;
939 out_params->state = qp->cur_state;
940
941 return 0;
942
943 err_req:
944 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
945 p_req_ramrod_res, req_ramrod_res_phys);
946 return rc;
947 err_resp:
948 dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
949 p_resp_ramrod_res, resp_ramrod_res_phys);
950 return rc;
951 }
952
qed_roce_destroy_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp)953 int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
954 {
955 u32 num_invalidated_mw = 0;
956 u32 num_bound_mw = 0;
957 u32 cq_prod;
958 int rc;
959
960 /* Destroys the specified QP */
961 if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
962 (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
963 (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
964 DP_NOTICE(p_hwfn,
965 "QP must be in error, reset or init state before destroying it\n");
966 return -EINVAL;
967 }
968
969 if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
970 rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
971 &num_invalidated_mw,
972 &cq_prod);
973 if (rc)
974 return rc;
975
976 /* Send destroy requester ramrod */
977 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
978 &num_bound_mw);
979 if (rc)
980 return rc;
981
982 if (num_invalidated_mw != num_bound_mw) {
983 DP_NOTICE(p_hwfn,
984 "number of invalidate memory windows is different from bounded ones\n");
985 return -EINVAL;
986 }
987 }
988
989 return 0;
990 }
991
qed_roce_modify_qp(struct qed_hwfn * p_hwfn,struct qed_rdma_qp * qp,enum qed_roce_qp_state prev_state,struct qed_rdma_modify_qp_in_params * params)992 int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
993 struct qed_rdma_qp *qp,
994 enum qed_roce_qp_state prev_state,
995 struct qed_rdma_modify_qp_in_params *params)
996 {
997 u32 num_invalidated_mw = 0, num_bound_mw = 0;
998 int rc = 0;
999
1000 /* Perform additional operations according to the current state and the
1001 * next state
1002 */
1003 if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
1004 (prev_state == QED_ROCE_QP_STATE_RESET)) &&
1005 (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
1006 /* Init->RTR or Reset->RTR */
1007 rc = qed_roce_sp_create_responder(p_hwfn, qp);
1008 return rc;
1009 } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
1010 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1011 /* RTR-> RTS */
1012 rc = qed_roce_sp_create_requester(p_hwfn, qp);
1013 if (rc)
1014 return rc;
1015
1016 /* Send modify responder ramrod */
1017 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1018 params->modify_flags);
1019 return rc;
1020 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1021 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1022 /* RTS->RTS */
1023 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1024 params->modify_flags);
1025 if (rc)
1026 return rc;
1027
1028 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1029 params->modify_flags);
1030 return rc;
1031 } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
1032 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1033 /* RTS->SQD */
1034 rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
1035 params->modify_flags);
1036 return rc;
1037 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1038 (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
1039 /* SQD->SQD */
1040 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1041 params->modify_flags);
1042 if (rc)
1043 return rc;
1044
1045 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1046 params->modify_flags);
1047 return rc;
1048 } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
1049 (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
1050 /* SQD->RTS */
1051 rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
1052 params->modify_flags);
1053 if (rc)
1054 return rc;
1055
1056 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
1057 params->modify_flags);
1058
1059 return rc;
1060 } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
1061 /* ->ERR */
1062 rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
1063 params->modify_flags);
1064 if (rc)
1065 return rc;
1066
1067 rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
1068 params->modify_flags);
1069 return rc;
1070 } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
1071 /* Any state -> RESET */
1072 u32 cq_prod;
1073
1074 /* Send destroy responder ramrod */
1075 rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
1076 qp,
1077 &num_invalidated_mw,
1078 &cq_prod);
1079
1080 if (rc)
1081 return rc;
1082
1083 qp->cq_prod = cq_prod;
1084
1085 rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp,
1086 &num_bound_mw);
1087
1088 if (num_invalidated_mw != num_bound_mw) {
1089 DP_NOTICE(p_hwfn,
1090 "number of invalidate memory windows is different from bounded ones\n");
1091 return -EINVAL;
1092 }
1093 } else {
1094 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
1095 }
1096
1097 return rc;
1098 }
1099
qed_roce_free_real_icid(struct qed_hwfn * p_hwfn,u16 icid)1100 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
1101 {
1102 struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
1103 u32 start_cid, cid, xcid;
1104
1105 /* an even icid belongs to a responder while an odd icid belongs to a
1106 * requester. The 'cid' received as an input can be either. We calculate
1107 * the "partner" icid and call it xcid. Only if both are free then the
1108 * "cid" map can be cleared.
1109 */
1110 start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
1111 cid = icid - start_cid;
1112 xcid = cid ^ 1;
1113
1114 spin_lock_bh(&p_rdma_info->lock);
1115
1116 qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
1117 if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
1118 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
1119 qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
1120 }
1121
1122 spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
1123 }
1124
qed_roce_dpm_dcbx(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1125 void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1126 {
1127 u8 val;
1128
1129 /* if any QPs are already active, we want to disable DPM, since their
1130 * context information contains information from before the latest DCBx
1131 * update. Otherwise enable it.
1132 */
1133 val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
1134 p_hwfn->dcbx_no_edpm = (u8)val;
1135
1136 qed_rdma_dpm_conf(p_hwfn, p_ptt);
1137 }
1138
qed_roce_setup(struct qed_hwfn * p_hwfn)1139 int qed_roce_setup(struct qed_hwfn *p_hwfn)
1140 {
1141 return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
1142 qed_roce_async_event);
1143 }
1144
qed_roce_init_hw(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)1145 int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1146 {
1147 u32 ll2_ethertype_en;
1148
1149 qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
1150
1151 p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
1152
1153 ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
1154 qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
1155 (ll2_ethertype_en | 0x01));
1156
1157 if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
1158 DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
1159 return -EINVAL;
1160 }
1161
1162 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
1163 return 0;
1164 }
1165