1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/io.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include "qed.h"
20 #include "qed_cxt.h"
21 #include "qed_dev_api.h"
22 #include "qed_hsi.h"
23 #include "qed_hw.h"
24 #include "qed_int.h"
25 #include "qed_iscsi.h"
26 #include "qed_mcp.h"
27 #include "qed_ooo.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30 #include "qed_sriov.h"
31 #include "qed_rdma.h"
32
33 /***************************************************************************
34 * Structures & Definitions
35 ***************************************************************************/
36
37 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
38
39 #define SPQ_BLOCK_DELAY_MAX_ITER (10)
40 #define SPQ_BLOCK_DELAY_US (10)
41 #define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
42 #define SPQ_BLOCK_SLEEP_MS (5)
43
44 /***************************************************************************
45 * Blocking Imp. (BLOCK/EBLOCK mode)
46 ***************************************************************************/
qed_spq_blocking_cb(struct qed_hwfn * p_hwfn,void * cookie,union event_ring_data * data,u8 fw_return_code)47 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
48 void *cookie,
49 union event_ring_data *data, u8 fw_return_code)
50 {
51 struct qed_spq_comp_done *comp_done;
52
53 comp_done = (struct qed_spq_comp_done *)cookie;
54
55 comp_done->fw_return_code = fw_return_code;
56
57 /* Make sure completion done is visible on waiting thread */
58 smp_store_release(&comp_done->done, 0x1);
59 }
60
__qed_spq_block(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent,u8 * p_fw_ret,bool sleep_between_iter)61 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
62 struct qed_spq_entry *p_ent,
63 u8 *p_fw_ret, bool sleep_between_iter)
64 {
65 struct qed_spq_comp_done *comp_done;
66 u32 iter_cnt;
67
68 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
69 iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
70 : SPQ_BLOCK_DELAY_MAX_ITER;
71
72 while (iter_cnt--) {
73 /* Validate we receive completion update */
74 if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
75 if (p_fw_ret)
76 *p_fw_ret = comp_done->fw_return_code;
77 return 0;
78 }
79
80 if (sleep_between_iter)
81 msleep(SPQ_BLOCK_SLEEP_MS);
82 else
83 udelay(SPQ_BLOCK_DELAY_US);
84 }
85
86 return -EBUSY;
87 }
88
qed_spq_block(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent,u8 * p_fw_ret,bool skip_quick_poll)89 static int qed_spq_block(struct qed_hwfn *p_hwfn,
90 struct qed_spq_entry *p_ent,
91 u8 *p_fw_ret, bool skip_quick_poll)
92 {
93 struct qed_spq_comp_done *comp_done;
94 struct qed_ptt *p_ptt;
95 int rc;
96
97 /* A relatively short polling period w/o sleeping, to allow the FW to
98 * complete the ramrod and thus possibly to avoid the following sleeps.
99 */
100 if (!skip_quick_poll) {
101 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
102 if (!rc)
103 return 0;
104 }
105
106 /* Move to polling with a sleeping period between iterations */
107 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
108 if (!rc)
109 return 0;
110
111 p_ptt = qed_ptt_acquire(p_hwfn);
112 if (!p_ptt) {
113 DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
114 return -EAGAIN;
115 }
116
117 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
118 rc = qed_mcp_drain(p_hwfn, p_ptt);
119 qed_ptt_release(p_hwfn, p_ptt);
120 if (rc) {
121 DP_NOTICE(p_hwfn, "MCP drain failed\n");
122 goto err;
123 }
124
125 /* Retry after drain */
126 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
127 if (!rc)
128 return 0;
129
130 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
131 if (comp_done->done == 1) {
132 if (p_fw_ret)
133 *p_fw_ret = comp_done->fw_return_code;
134 return 0;
135 }
136 err:
137 p_ptt = qed_ptt_acquire(p_hwfn);
138 if (!p_ptt)
139 return -EBUSY;
140 qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
141 "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
142 le32_to_cpu(p_ent->elem.hdr.cid),
143 p_ent->elem.hdr.cmd_id,
144 p_ent->elem.hdr.protocol_id,
145 le16_to_cpu(p_ent->elem.hdr.echo));
146 qed_ptt_release(p_hwfn, p_ptt);
147
148 return -EBUSY;
149 }
150
151 /***************************************************************************
152 * SPQ entries inner API
153 ***************************************************************************/
qed_spq_fill_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent)154 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
155 struct qed_spq_entry *p_ent)
156 {
157 p_ent->flags = 0;
158
159 switch (p_ent->comp_mode) {
160 case QED_SPQ_MODE_EBLOCK:
161 case QED_SPQ_MODE_BLOCK:
162 p_ent->comp_cb.function = qed_spq_blocking_cb;
163 break;
164 case QED_SPQ_MODE_CB:
165 break;
166 default:
167 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
168 p_ent->comp_mode);
169 return -EINVAL;
170 }
171
172 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
173 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
174 p_ent->elem.hdr.cid,
175 p_ent->elem.hdr.cmd_id,
176 p_ent->elem.hdr.protocol_id,
177 p_ent->elem.data_ptr.hi,
178 p_ent->elem.data_ptr.lo,
179 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
180 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
181 "MODE_CB"));
182
183 return 0;
184 }
185
186 /***************************************************************************
187 * HSI access
188 ***************************************************************************/
qed_spq_hw_initialize(struct qed_hwfn * p_hwfn,struct qed_spq * p_spq)189 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
190 struct qed_spq *p_spq)
191 {
192 struct e4_core_conn_context *p_cxt;
193 struct qed_cxt_info cxt_info;
194 u16 physical_q;
195 int rc;
196
197 cxt_info.iid = p_spq->cid;
198
199 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
200
201 if (rc < 0) {
202 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
203 p_spq->cid);
204 return;
205 }
206
207 p_cxt = cxt_info.p_cxt;
208
209 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
210 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
211 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
212 E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
213 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
214 E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
215
216 /* QM physical queue */
217 physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
218 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
219
220 p_cxt->xstorm_st_context.spq_base_lo =
221 DMA_LO_LE(p_spq->chain.p_phys_addr);
222 p_cxt->xstorm_st_context.spq_base_hi =
223 DMA_HI_LE(p_spq->chain.p_phys_addr);
224
225 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
226 p_hwfn->p_consq->chain.p_phys_addr);
227 }
228
qed_spq_hw_post(struct qed_hwfn * p_hwfn,struct qed_spq * p_spq,struct qed_spq_entry * p_ent)229 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
230 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
231 {
232 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
233 struct core_db_data *p_db_data = &p_spq->db_data;
234 u16 echo = qed_chain_get_prod_idx(p_chain);
235 struct slow_path_element *elem;
236
237 p_ent->elem.hdr.echo = cpu_to_le16(echo);
238 elem = qed_chain_produce(p_chain);
239 if (!elem) {
240 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
241 return -EINVAL;
242 }
243
244 *elem = p_ent->elem; /* struct assignment */
245
246 /* send a doorbell on the slow hwfn session */
247 p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
248
249 /* make sure the SPQE is updated before the doorbell */
250 wmb();
251
252 DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
253
254 /* make sure doorbell is rang */
255 wmb();
256
257 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
258 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
259 p_spq->db_addr_offset,
260 p_spq->cid,
261 p_db_data->params,
262 p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
263
264 return 0;
265 }
266
267 /***************************************************************************
268 * Asynchronous events
269 ***************************************************************************/
270 static int
qed_async_event_completion(struct qed_hwfn * p_hwfn,struct event_ring_entry * p_eqe)271 qed_async_event_completion(struct qed_hwfn *p_hwfn,
272 struct event_ring_entry *p_eqe)
273 {
274 qed_spq_async_comp_cb cb;
275
276 if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
277 return -EINVAL;
278
279 cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
280 if (cb) {
281 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
282 &p_eqe->data, p_eqe->fw_return_code);
283 } else {
284 DP_NOTICE(p_hwfn,
285 "Unknown Async completion for protocol: %d\n",
286 p_eqe->protocol_id);
287 return -EINVAL;
288 }
289 }
290
291 int
qed_spq_register_async_cb(struct qed_hwfn * p_hwfn,enum protocol_type protocol_id,qed_spq_async_comp_cb cb)292 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
293 enum protocol_type protocol_id,
294 qed_spq_async_comp_cb cb)
295 {
296 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
297 return -EINVAL;
298
299 p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
300 return 0;
301 }
302
303 void
qed_spq_unregister_async_cb(struct qed_hwfn * p_hwfn,enum protocol_type protocol_id)304 qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
305 enum protocol_type protocol_id)
306 {
307 if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
308 return;
309
310 p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
311 }
312
313 /***************************************************************************
314 * EQ API
315 ***************************************************************************/
qed_eq_prod_update(struct qed_hwfn * p_hwfn,u16 prod)316 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
317 {
318 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
319 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
320
321 REG_WR16(p_hwfn, addr, prod);
322 }
323
qed_eq_completion(struct qed_hwfn * p_hwfn,void * cookie)324 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
325 {
326 struct qed_eq *p_eq = cookie;
327 struct qed_chain *p_chain = &p_eq->chain;
328 int rc = 0;
329
330 /* take a snapshot of the FW consumer */
331 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
332
333 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
334
335 /* Need to guarantee the fw_cons index we use points to a usuable
336 * element (to comply with our chain), so our macros would comply
337 */
338 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
339 qed_chain_get_usable_per_page(p_chain))
340 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
341
342 /* Complete current segment of eq entries */
343 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
344 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
345
346 if (!p_eqe) {
347 rc = -EINVAL;
348 break;
349 }
350
351 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
352 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
353 p_eqe->opcode,
354 p_eqe->protocol_id,
355 p_eqe->reserved0,
356 le16_to_cpu(p_eqe->echo),
357 p_eqe->fw_return_code,
358 p_eqe->flags);
359
360 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
361 if (qed_async_event_completion(p_hwfn, p_eqe))
362 rc = -EINVAL;
363 } else if (qed_spq_completion(p_hwfn,
364 p_eqe->echo,
365 p_eqe->fw_return_code,
366 &p_eqe->data)) {
367 rc = -EINVAL;
368 }
369
370 qed_chain_recycle_consumed(p_chain);
371 }
372
373 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
374
375 /* Attempt to post pending requests */
376 spin_lock_bh(&p_hwfn->p_spq->lock);
377 rc = qed_spq_pend_post(p_hwfn);
378 spin_unlock_bh(&p_hwfn->p_spq->lock);
379
380 return rc;
381 }
382
qed_eq_alloc(struct qed_hwfn * p_hwfn,u16 num_elem)383 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
384 {
385 struct qed_chain_init_params params = {
386 .mode = QED_CHAIN_MODE_PBL,
387 .intended_use = QED_CHAIN_USE_TO_PRODUCE,
388 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
389 .num_elems = num_elem,
390 .elem_size = sizeof(union event_ring_element),
391 };
392 struct qed_eq *p_eq;
393 int ret;
394
395 /* Allocate EQ struct */
396 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
397 if (!p_eq)
398 return -ENOMEM;
399
400 ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, ¶ms);
401 if (ret) {
402 DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
403 goto eq_allocate_fail;
404 }
405
406 /* register EQ completion on the SP SB */
407 qed_int_register_cb(p_hwfn, qed_eq_completion,
408 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
409
410 p_hwfn->p_eq = p_eq;
411 return 0;
412
413 eq_allocate_fail:
414 kfree(p_eq);
415
416 return ret;
417 }
418
qed_eq_setup(struct qed_hwfn * p_hwfn)419 void qed_eq_setup(struct qed_hwfn *p_hwfn)
420 {
421 qed_chain_reset(&p_hwfn->p_eq->chain);
422 }
423
qed_eq_free(struct qed_hwfn * p_hwfn)424 void qed_eq_free(struct qed_hwfn *p_hwfn)
425 {
426 if (!p_hwfn->p_eq)
427 return;
428
429 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
430
431 kfree(p_hwfn->p_eq);
432 p_hwfn->p_eq = NULL;
433 }
434
435 /***************************************************************************
436 * CQE API - manipulate EQ functionality
437 ***************************************************************************/
qed_cqe_completion(struct qed_hwfn * p_hwfn,struct eth_slow_path_rx_cqe * cqe,enum protocol_type protocol)438 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
439 struct eth_slow_path_rx_cqe *cqe,
440 enum protocol_type protocol)
441 {
442 if (IS_VF(p_hwfn->cdev))
443 return 0;
444
445 /* @@@tmp - it's possible we'll eventually want to handle some
446 * actual commands that can arrive here, but for now this is only
447 * used to complete the ramrod using the echo value on the cqe
448 */
449 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
450 }
451
qed_eth_cqe_completion(struct qed_hwfn * p_hwfn,struct eth_slow_path_rx_cqe * cqe)452 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
453 struct eth_slow_path_rx_cqe *cqe)
454 {
455 int rc;
456
457 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
458 if (rc)
459 DP_NOTICE(p_hwfn,
460 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
461 cqe->ramrod_cmd_id);
462
463 return rc;
464 }
465
466 /***************************************************************************
467 * Slow hwfn Queue (spq)
468 ***************************************************************************/
qed_spq_setup(struct qed_hwfn * p_hwfn)469 void qed_spq_setup(struct qed_hwfn *p_hwfn)
470 {
471 struct qed_spq *p_spq = p_hwfn->p_spq;
472 struct qed_spq_entry *p_virt = NULL;
473 struct core_db_data *p_db_data;
474 void __iomem *db_addr;
475 dma_addr_t p_phys = 0;
476 u32 i, capacity;
477 int rc;
478
479 INIT_LIST_HEAD(&p_spq->pending);
480 INIT_LIST_HEAD(&p_spq->completion_pending);
481 INIT_LIST_HEAD(&p_spq->free_pool);
482 INIT_LIST_HEAD(&p_spq->unlimited_pending);
483 spin_lock_init(&p_spq->lock);
484
485 /* SPQ empty pool */
486 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
487 p_virt = p_spq->p_virt;
488
489 capacity = qed_chain_get_capacity(&p_spq->chain);
490 for (i = 0; i < capacity; i++) {
491 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
492
493 list_add_tail(&p_virt->list, &p_spq->free_pool);
494
495 p_virt++;
496 p_phys += sizeof(struct qed_spq_entry);
497 }
498
499 /* Statistics */
500 p_spq->normal_count = 0;
501 p_spq->comp_count = 0;
502 p_spq->comp_sent_count = 0;
503 p_spq->unlimited_pending_count = 0;
504
505 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
506 p_spq->comp_bitmap_idx = 0;
507
508 /* SPQ cid, cannot fail */
509 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
510 qed_spq_hw_initialize(p_hwfn, p_spq);
511
512 /* reset the chain itself */
513 qed_chain_reset(&p_spq->chain);
514
515 /* Initialize the address/data of the SPQ doorbell */
516 p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
517 p_db_data = &p_spq->db_data;
518 memset(p_db_data, 0, sizeof(*p_db_data));
519 SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
520 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
521 SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
522 DQ_XCM_CORE_SPQ_PROD_CMD);
523 p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
524
525 /* Register the SPQ doorbell with the doorbell recovery mechanism */
526 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
527 p_spq->db_addr_offset);
528 rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
529 DB_REC_WIDTH_32B, DB_REC_KERNEL);
530 if (rc)
531 DP_INFO(p_hwfn,
532 "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
533 }
534
qed_spq_alloc(struct qed_hwfn * p_hwfn)535 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
536 {
537 struct qed_chain_init_params params = {
538 .mode = QED_CHAIN_MODE_SINGLE,
539 .intended_use = QED_CHAIN_USE_TO_PRODUCE,
540 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
541 .elem_size = sizeof(struct slow_path_element),
542 };
543 struct qed_dev *cdev = p_hwfn->cdev;
544 struct qed_spq_entry *p_virt = NULL;
545 struct qed_spq *p_spq = NULL;
546 dma_addr_t p_phys = 0;
547 u32 capacity;
548 int ret;
549
550 /* SPQ struct */
551 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
552 if (!p_spq)
553 return -ENOMEM;
554
555 /* SPQ ring */
556 ret = qed_chain_alloc(cdev, &p_spq->chain, ¶ms);
557 if (ret) {
558 DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
559 goto spq_chain_alloc_fail;
560 }
561
562 /* allocate and fill the SPQ elements (incl. ramrod data list) */
563 capacity = qed_chain_get_capacity(&p_spq->chain);
564 ret = -ENOMEM;
565
566 p_virt = dma_alloc_coherent(&cdev->pdev->dev,
567 capacity * sizeof(struct qed_spq_entry),
568 &p_phys, GFP_KERNEL);
569 if (!p_virt)
570 goto spq_alloc_fail;
571
572 p_spq->p_virt = p_virt;
573 p_spq->p_phys = p_phys;
574 p_hwfn->p_spq = p_spq;
575
576 return 0;
577
578 spq_alloc_fail:
579 qed_chain_free(cdev, &p_spq->chain);
580 spq_chain_alloc_fail:
581 kfree(p_spq);
582
583 return ret;
584 }
585
qed_spq_free(struct qed_hwfn * p_hwfn)586 void qed_spq_free(struct qed_hwfn *p_hwfn)
587 {
588 struct qed_spq *p_spq = p_hwfn->p_spq;
589 void __iomem *db_addr;
590 u32 capacity;
591
592 if (!p_spq)
593 return;
594
595 /* Delete the SPQ doorbell from the doorbell recovery mechanism */
596 db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
597 p_spq->db_addr_offset);
598 qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
599
600 if (p_spq->p_virt) {
601 capacity = qed_chain_get_capacity(&p_spq->chain);
602 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
603 capacity *
604 sizeof(struct qed_spq_entry),
605 p_spq->p_virt, p_spq->p_phys);
606 }
607
608 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
609 kfree(p_spq);
610 p_hwfn->p_spq = NULL;
611 }
612
qed_spq_get_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry ** pp_ent)613 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
614 {
615 struct qed_spq *p_spq = p_hwfn->p_spq;
616 struct qed_spq_entry *p_ent = NULL;
617 int rc = 0;
618
619 spin_lock_bh(&p_spq->lock);
620
621 if (list_empty(&p_spq->free_pool)) {
622 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
623 if (!p_ent) {
624 DP_NOTICE(p_hwfn,
625 "Failed to allocate an SPQ entry for a pending ramrod\n");
626 rc = -ENOMEM;
627 goto out_unlock;
628 }
629 p_ent->queue = &p_spq->unlimited_pending;
630 } else {
631 p_ent = list_first_entry(&p_spq->free_pool,
632 struct qed_spq_entry, list);
633 list_del(&p_ent->list);
634 p_ent->queue = &p_spq->pending;
635 }
636
637 *pp_ent = p_ent;
638
639 out_unlock:
640 spin_unlock_bh(&p_spq->lock);
641 return rc;
642 }
643
644 /* Locked variant; Should be called while the SPQ lock is taken */
__qed_spq_return_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent)645 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
646 struct qed_spq_entry *p_ent)
647 {
648 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
649 }
650
qed_spq_return_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent)651 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
652 {
653 spin_lock_bh(&p_hwfn->p_spq->lock);
654 __qed_spq_return_entry(p_hwfn, p_ent);
655 spin_unlock_bh(&p_hwfn->p_spq->lock);
656 }
657
658 /**
659 * qed_spq_add_entry() - Add a new entry to the pending list.
660 * Should be used while lock is being held.
661 *
662 * @p_hwfn: HW device data.
663 * @p_ent: An entry to add.
664 * @priority: Desired priority.
665 *
666 * Adds an entry to the pending list is there is room (an empty
667 * element is available in the free_pool), or else places the
668 * entry in the unlimited_pending pool.
669 *
670 * Return: zero on success, -EINVAL on invalid @priority.
671 */
qed_spq_add_entry(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent,enum spq_priority priority)672 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
673 struct qed_spq_entry *p_ent,
674 enum spq_priority priority)
675 {
676 struct qed_spq *p_spq = p_hwfn->p_spq;
677
678 if (p_ent->queue == &p_spq->unlimited_pending) {
679
680 if (list_empty(&p_spq->free_pool)) {
681 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
682 p_spq->unlimited_pending_count++;
683
684 return 0;
685 } else {
686 struct qed_spq_entry *p_en2;
687
688 p_en2 = list_first_entry(&p_spq->free_pool,
689 struct qed_spq_entry, list);
690 list_del(&p_en2->list);
691
692 /* Copy the ring element physical pointer to the new
693 * entry, since we are about to override the entire ring
694 * entry and don't want to lose the pointer.
695 */
696 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
697
698 *p_en2 = *p_ent;
699
700 /* EBLOCK responsible to free the allocated p_ent */
701 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
702 kfree(p_ent);
703 else
704 p_ent->post_ent = p_en2;
705
706 p_ent = p_en2;
707 }
708 }
709
710 /* entry is to be placed in 'pending' queue */
711 switch (priority) {
712 case QED_SPQ_PRIORITY_NORMAL:
713 list_add_tail(&p_ent->list, &p_spq->pending);
714 p_spq->normal_count++;
715 break;
716 case QED_SPQ_PRIORITY_HIGH:
717 list_add(&p_ent->list, &p_spq->pending);
718 p_spq->high_count++;
719 break;
720 default:
721 return -EINVAL;
722 }
723
724 return 0;
725 }
726
727 /***************************************************************************
728 * Accessor
729 ***************************************************************************/
qed_spq_get_cid(struct qed_hwfn * p_hwfn)730 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
731 {
732 if (!p_hwfn->p_spq)
733 return 0xffffffff; /* illegal */
734 return p_hwfn->p_spq->cid;
735 }
736
737 /***************************************************************************
738 * Posting new Ramrods
739 ***************************************************************************/
qed_spq_post_list(struct qed_hwfn * p_hwfn,struct list_head * head,u32 keep_reserve)740 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
741 struct list_head *head, u32 keep_reserve)
742 {
743 struct qed_spq *p_spq = p_hwfn->p_spq;
744 int rc;
745
746 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
747 !list_empty(head)) {
748 struct qed_spq_entry *p_ent =
749 list_first_entry(head, struct qed_spq_entry, list);
750 list_move_tail(&p_ent->list, &p_spq->completion_pending);
751 p_spq->comp_sent_count++;
752
753 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
754 if (rc) {
755 list_del(&p_ent->list);
756 __qed_spq_return_entry(p_hwfn, p_ent);
757 return rc;
758 }
759 }
760
761 return 0;
762 }
763
qed_spq_pend_post(struct qed_hwfn * p_hwfn)764 int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
765 {
766 struct qed_spq *p_spq = p_hwfn->p_spq;
767 struct qed_spq_entry *p_ent = NULL;
768
769 while (!list_empty(&p_spq->free_pool)) {
770 if (list_empty(&p_spq->unlimited_pending))
771 break;
772
773 p_ent = list_first_entry(&p_spq->unlimited_pending,
774 struct qed_spq_entry, list);
775 if (!p_ent)
776 return -EINVAL;
777
778 list_del(&p_ent->list);
779
780 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
781 }
782
783 return qed_spq_post_list(p_hwfn, &p_spq->pending,
784 SPQ_HIGH_PRI_RESERVE_DEFAULT);
785 }
786
qed_spq_recov_set_ret_code(struct qed_spq_entry * p_ent,u8 * fw_return_code)787 static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
788 u8 *fw_return_code)
789 {
790 if (!fw_return_code)
791 return;
792
793 if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
794 p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
795 *fw_return_code = RDMA_RETURN_OK;
796 }
797
798 /* Avoid overriding of SPQ entries when getting out-of-order completions, by
799 * marking the completions in a bitmap and increasing the chain consumer only
800 * for the first successive completed entries.
801 */
qed_spq_comp_bmap_update(struct qed_hwfn * p_hwfn,__le16 echo)802 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
803 {
804 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
805 struct qed_spq *p_spq = p_hwfn->p_spq;
806
807 __set_bit(pos, p_spq->p_comp_bitmap);
808 while (test_bit(p_spq->comp_bitmap_idx,
809 p_spq->p_comp_bitmap)) {
810 __clear_bit(p_spq->comp_bitmap_idx,
811 p_spq->p_comp_bitmap);
812 p_spq->comp_bitmap_idx++;
813 qed_chain_return_produced(&p_spq->chain);
814 }
815 }
816
qed_spq_post(struct qed_hwfn * p_hwfn,struct qed_spq_entry * p_ent,u8 * fw_return_code)817 int qed_spq_post(struct qed_hwfn *p_hwfn,
818 struct qed_spq_entry *p_ent, u8 *fw_return_code)
819 {
820 int rc = 0;
821 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
822 bool b_ret_ent = true;
823 bool eblock;
824
825 if (!p_hwfn)
826 return -EINVAL;
827
828 if (!p_ent) {
829 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
830 return -EINVAL;
831 }
832
833 if (p_hwfn->cdev->recov_in_prog) {
834 DP_VERBOSE(p_hwfn,
835 QED_MSG_SPQ,
836 "Recovery is in progress. Skip spq post [cmd %02x protocol %02x]\n",
837 p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
838
839 /* Let the flow complete w/o any error handling */
840 qed_spq_recov_set_ret_code(p_ent, fw_return_code);
841 return 0;
842 }
843
844 /* Complete the entry */
845 rc = qed_spq_fill_entry(p_hwfn, p_ent);
846
847 spin_lock_bh(&p_spq->lock);
848
849 /* Check return value after LOCK is taken for cleaner error flow */
850 if (rc)
851 goto spq_post_fail;
852
853 /* Check if entry is in block mode before qed_spq_add_entry,
854 * which might kfree p_ent.
855 */
856 eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
857
858 /* Add the request to the pending queue */
859 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
860 if (rc)
861 goto spq_post_fail;
862
863 rc = qed_spq_pend_post(p_hwfn);
864 if (rc) {
865 /* Since it's possible that pending failed for a different
866 * entry [although unlikely], the failed entry was already
867 * dealt with; No need to return it here.
868 */
869 b_ret_ent = false;
870 goto spq_post_fail;
871 }
872
873 spin_unlock_bh(&p_spq->lock);
874
875 if (eblock) {
876 /* For entries in QED BLOCK mode, the completion code cannot
877 * perform the necessary cleanup - if it did, we couldn't
878 * access p_ent here to see whether it's successful or not.
879 * Thus, after gaining the answer perform the cleanup here.
880 */
881 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
882 p_ent->queue == &p_spq->unlimited_pending);
883
884 if (p_ent->queue == &p_spq->unlimited_pending) {
885 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
886
887 kfree(p_ent);
888
889 /* Return the entry which was actually posted */
890 p_ent = p_post_ent;
891 }
892
893 if (rc)
894 goto spq_post_fail2;
895
896 /* return to pool */
897 qed_spq_return_entry(p_hwfn, p_ent);
898 }
899 return rc;
900
901 spq_post_fail2:
902 spin_lock_bh(&p_spq->lock);
903 list_del(&p_ent->list);
904 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
905
906 spq_post_fail:
907 /* return to the free pool */
908 if (b_ret_ent)
909 __qed_spq_return_entry(p_hwfn, p_ent);
910 spin_unlock_bh(&p_spq->lock);
911
912 return rc;
913 }
914
qed_spq_completion(struct qed_hwfn * p_hwfn,__le16 echo,u8 fw_return_code,union event_ring_data * p_data)915 int qed_spq_completion(struct qed_hwfn *p_hwfn,
916 __le16 echo,
917 u8 fw_return_code,
918 union event_ring_data *p_data)
919 {
920 struct qed_spq *p_spq;
921 struct qed_spq_entry *p_ent = NULL;
922 struct qed_spq_entry *tmp;
923 struct qed_spq_entry *found = NULL;
924
925 if (!p_hwfn)
926 return -EINVAL;
927
928 p_spq = p_hwfn->p_spq;
929 if (!p_spq)
930 return -EINVAL;
931
932 spin_lock_bh(&p_spq->lock);
933 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
934 if (p_ent->elem.hdr.echo == echo) {
935 list_del(&p_ent->list);
936 qed_spq_comp_bmap_update(p_hwfn, echo);
937 p_spq->comp_count++;
938 found = p_ent;
939 break;
940 }
941
942 /* This is relatively uncommon - depends on scenarios
943 * which have mutliple per-PF sent ramrods.
944 */
945 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
946 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
947 le16_to_cpu(echo),
948 le16_to_cpu(p_ent->elem.hdr.echo));
949 }
950
951 /* Release lock before callback, as callback may post
952 * an additional ramrod.
953 */
954 spin_unlock_bh(&p_spq->lock);
955
956 if (!found) {
957 DP_NOTICE(p_hwfn,
958 "Failed to find an entry this EQE [echo %04x] completes\n",
959 le16_to_cpu(echo));
960 return -EEXIST;
961 }
962
963 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
964 "Complete EQE [echo %04x]: func %p cookie %p)\n",
965 le16_to_cpu(echo),
966 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
967 if (found->comp_cb.function)
968 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
969 fw_return_code);
970 else
971 DP_VERBOSE(p_hwfn,
972 QED_MSG_SPQ,
973 "Got a completion without a callback function\n");
974
975 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
976 /* EBLOCK is responsible for returning its own entry into the
977 * free list.
978 */
979 qed_spq_return_entry(p_hwfn, found);
980
981 return 0;
982 }
983
984 #define QED_SPQ_CONSQ_ELEM_SIZE 0x80
985
qed_consq_alloc(struct qed_hwfn * p_hwfn)986 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
987 {
988 struct qed_chain_init_params params = {
989 .mode = QED_CHAIN_MODE_PBL,
990 .intended_use = QED_CHAIN_USE_TO_PRODUCE,
991 .cnt_type = QED_CHAIN_CNT_TYPE_U16,
992 .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
993 .elem_size = QED_SPQ_CONSQ_ELEM_SIZE,
994 };
995 struct qed_consq *p_consq;
996 int ret;
997
998 /* Allocate ConsQ struct */
999 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
1000 if (!p_consq)
1001 return -ENOMEM;
1002
1003 /* Allocate and initialize ConsQ chain */
1004 ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, ¶ms);
1005 if (ret) {
1006 DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
1007 goto consq_alloc_fail;
1008 }
1009
1010 p_hwfn->p_consq = p_consq;
1011
1012 return 0;
1013
1014 consq_alloc_fail:
1015 kfree(p_consq);
1016
1017 return ret;
1018 }
1019
qed_consq_setup(struct qed_hwfn * p_hwfn)1020 void qed_consq_setup(struct qed_hwfn *p_hwfn)
1021 {
1022 qed_chain_reset(&p_hwfn->p_consq->chain);
1023 }
1024
qed_consq_free(struct qed_hwfn * p_hwfn)1025 void qed_consq_free(struct qed_hwfn *p_hwfn)
1026 {
1027 if (!p_hwfn->p_consq)
1028 return;
1029
1030 qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
1031
1032 kfree(p_hwfn->p_consq);
1033 p_hwfn->p_consq = NULL;
1034 }
1035