1 /* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #ifndef __QEDR_H__
33 #define __QEDR_H__
34
35 #include <linux/pci.h>
36 #include <linux/xarray.h>
37 #include <rdma/ib_addr.h>
38 #include <linux/qed/qed_if.h>
39 #include <linux/qed/qed_chain.h>
40 #include <linux/qed/qed_rdma_if.h>
41 #include <linux/qed/qede_rdma.h>
42 #include <linux/qed/roce_common.h>
43 #include "qedr_hsi_rdma.h"
44
45 #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
46 #define DP_NAME(_dev) dev_name(&(_dev)->ibdev.dev)
47 #define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
48 #define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
49
50 #define DP_DEBUG(dev, module, fmt, ...) \
51 pr_debug("(%s) " module ": " fmt, \
52 DP_NAME(dev) ? DP_NAME(dev) : "", ## __VA_ARGS__)
53
54 #define QEDR_MSG_INIT "INIT"
55 #define QEDR_MSG_MISC "MISC"
56 #define QEDR_MSG_CQ " CQ"
57 #define QEDR_MSG_MR " MR"
58 #define QEDR_MSG_RQ " RQ"
59 #define QEDR_MSG_SQ " SQ"
60 #define QEDR_MSG_QP " QP"
61 #define QEDR_MSG_SRQ " SRQ"
62 #define QEDR_MSG_GSI " GSI"
63 #define QEDR_MSG_IWARP " IW"
64
65 #define QEDR_CQ_MAGIC_NUMBER (0x11223344)
66
67 #define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
68 #define FW_PAGE_SHIFT (12)
69
70 struct qedr_dev;
71
72 struct qedr_cnq {
73 struct qedr_dev *dev;
74 struct qed_chain pbl;
75 struct qed_sb_info *sb;
76 char name[32];
77 u64 n_comp;
78 __le16 *hw_cons_ptr;
79 u8 index;
80 };
81
82 #define QEDR_MAX_SGID 128
83
84 struct qedr_device_attr {
85 u32 vendor_id;
86 u32 vendor_part_id;
87 u32 hw_ver;
88 u64 fw_ver;
89 u64 node_guid;
90 u64 sys_image_guid;
91 u8 max_cnq;
92 u8 max_sge;
93 u16 max_inline;
94 u32 max_sqe;
95 u32 max_rqe;
96 u8 max_qp_resp_rd_atomic_resc;
97 u8 max_qp_req_rd_atomic_resc;
98 u64 max_dev_resp_rd_atomic_resc;
99 u32 max_cq;
100 u32 max_qp;
101 u32 max_mr;
102 u64 max_mr_size;
103 u32 max_cqe;
104 u32 max_mw;
105 u32 max_fmr;
106 u32 max_mr_mw_fmr_pbl;
107 u64 max_mr_mw_fmr_size;
108 u32 max_pd;
109 u32 max_ah;
110 u8 max_pkey;
111 u32 max_srq;
112 u32 max_srq_wr;
113 u8 max_srq_sge;
114 u8 max_stats_queues;
115 u32 dev_caps;
116
117 u64 page_size_caps;
118 u8 dev_ack_delay;
119 u32 reserved_lkey;
120 u32 bad_pkey_counter;
121 struct qed_rdma_events events;
122 };
123
124 #define QEDR_ENET_STATE_BIT (0)
125
126 struct qedr_dev {
127 struct ib_device ibdev;
128 struct qed_dev *cdev;
129 struct pci_dev *pdev;
130 struct net_device *ndev;
131
132 enum ib_atomic_cap atomic_cap;
133
134 void *rdma_ctx;
135 struct qedr_device_attr attr;
136
137 const struct qed_rdma_ops *ops;
138 struct qed_int_info int_info;
139
140 struct qed_sb_info *sb_array;
141 struct qedr_cnq *cnq_array;
142 int num_cnq;
143 int sb_start;
144
145 void __iomem *db_addr;
146 u64 db_phys_addr;
147 u32 db_size;
148 u16 dpi;
149
150 union ib_gid *sgid_tbl;
151
152 /* Lock for sgid table */
153 spinlock_t sgid_lock;
154
155 u64 guid;
156
157 u32 dp_module;
158 u8 dp_level;
159 u8 num_hwfns;
160 #define QEDR_IS_CMT(dev) ((dev)->num_hwfns > 1)
161 u8 affin_hwfn_idx;
162 u8 gsi_ll2_handle;
163
164 uint wq_multiplier;
165 u8 gsi_ll2_mac_address[ETH_ALEN];
166 int gsi_qp_created;
167 struct qedr_cq *gsi_sqcq;
168 struct qedr_cq *gsi_rqcq;
169 struct qedr_qp *gsi_qp;
170 enum qed_rdma_type rdma_type;
171 struct xarray qps;
172 struct xarray srqs;
173 struct workqueue_struct *iwarp_wq;
174 u16 iwarp_max_mtu;
175
176 unsigned long enet_state;
177
178 u8 user_dpm_enabled;
179 };
180
181 #define QEDR_MAX_SQ_PBL (0x8000)
182 #define QEDR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *))
183 #define QEDR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge))
184 #define QEDR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
185 QEDR_SQE_ELEMENT_SIZE)
186 #define QEDR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
187 QEDR_SQE_ELEMENT_SIZE)
188 #define QEDR_MAX_SQE ((QEDR_MAX_SQ_PBL_ENTRIES) *\
189 (RDMA_RING_PAGE_SIZE) / \
190 (QEDR_SQE_ELEMENT_SIZE) /\
191 (QEDR_MAX_SQE_ELEMENTS_PER_SQE))
192 /* RQ */
193 #define QEDR_MAX_RQ_PBL (0x2000)
194 #define QEDR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *))
195 #define QEDR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge))
196 #define QEDR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE)
197 #define QEDR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
198 QEDR_RQE_ELEMENT_SIZE)
199 #define QEDR_MAX_RQE ((QEDR_MAX_RQ_PBL_ENTRIES) *\
200 (RDMA_RING_PAGE_SIZE) / \
201 (QEDR_RQE_ELEMENT_SIZE) /\
202 (QEDR_MAX_RQE_ELEMENTS_PER_RQE))
203
204 #define QEDR_CQE_SIZE (sizeof(union rdma_cqe))
205 #define QEDR_MAX_CQE_PBL_SIZE (512 * 1024)
206 #define QEDR_MAX_CQE_PBL_ENTRIES (((QEDR_MAX_CQE_PBL_SIZE) / \
207 sizeof(u64)) - 1)
208 #define QEDR_MAX_CQES ((u32)((QEDR_MAX_CQE_PBL_ENTRIES) * \
209 (QED_CHAIN_PAGE_SIZE) / QEDR_CQE_SIZE))
210
211 #define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
212
213 #define QEDR_MAX_PORT (1)
214 #define QEDR_PORT (1)
215
216 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
217
218 #define QEDR_ROCE_PKEY_MAX 1
219 #define QEDR_ROCE_PKEY_TABLE_LEN 1
220 #define QEDR_ROCE_PKEY_DEFAULT 0xffff
221
222 struct qedr_pbl {
223 struct list_head list_entry;
224 void *va;
225 dma_addr_t pa;
226 };
227
228 struct qedr_ucontext {
229 struct ib_ucontext ibucontext;
230 struct qedr_dev *dev;
231 struct qedr_pd *pd;
232 void __iomem *dpi_addr;
233 u64 dpi_phys_addr;
234 u32 dpi_size;
235 u16 dpi;
236
237 struct list_head mm_head;
238
239 /* Lock to protect mm list */
240 struct mutex mm_list_lock;
241 };
242
243 union db_prod64 {
244 struct rdma_pwm_val32_data data;
245 u64 raw;
246 };
247
248 enum qedr_cq_type {
249 QEDR_CQ_TYPE_GSI,
250 QEDR_CQ_TYPE_KERNEL,
251 QEDR_CQ_TYPE_USER,
252 };
253
254 struct qedr_pbl_info {
255 u32 num_pbls;
256 u32 num_pbes;
257 u32 pbl_size;
258 u32 pbe_size;
259 bool two_layered;
260 };
261
262 struct qedr_userq {
263 struct ib_umem *umem;
264 struct qedr_pbl_info pbl_info;
265 struct qedr_pbl *pbl_tbl;
266 u64 buf_addr;
267 size_t buf_len;
268 };
269
270 struct qedr_cq {
271 struct ib_cq ibcq;
272
273 enum qedr_cq_type cq_type;
274 u32 sig;
275
276 u16 icid;
277
278 /* Lock to protect multiplem CQ's */
279 spinlock_t cq_lock;
280 u8 arm_flags;
281 struct qed_chain pbl;
282
283 void __iomem *db_addr;
284 union db_prod64 db;
285
286 u8 pbl_toggle;
287 union rdma_cqe *latest_cqe;
288 union rdma_cqe *toggle_cqe;
289
290 u32 cq_cons;
291
292 struct qedr_userq q;
293 u8 destroyed;
294 u16 cnq_notif;
295 };
296
297 struct qedr_pd {
298 struct ib_pd ibpd;
299 u32 pd_id;
300 struct qedr_ucontext *uctx;
301 };
302
303 struct qedr_mm {
304 struct {
305 u64 phy_addr;
306 unsigned long len;
307 } key;
308 struct list_head entry;
309 };
310
311 union db_prod32 {
312 struct rdma_pwm_val16_data data;
313 u32 raw;
314 };
315
316 struct qedr_qp_hwq_info {
317 /* WQE Elements */
318 struct qed_chain pbl;
319 u64 p_phys_addr_tbl;
320 u32 max_sges;
321
322 /* WQE */
323 u16 prod;
324 u16 cons;
325 u16 wqe_cons;
326 u16 gsi_cons;
327 u16 max_wr;
328
329 /* DB */
330 void __iomem *db;
331 union db_prod32 db_data;
332
333 void __iomem *iwarp_db2;
334 union db_prod32 iwarp_db2_data;
335 };
336
337 #define QEDR_INC_SW_IDX(p_info, index) \
338 do { \
339 p_info->index = (p_info->index + 1) & \
340 qed_chain_get_capacity(p_info->pbl) \
341 } while (0)
342
343 struct qedr_srq_hwq_info {
344 u32 max_sges;
345 u32 max_wr;
346 struct qed_chain pbl;
347 u64 p_phys_addr_tbl;
348 u32 wqe_prod;
349 u32 sge_prod;
350 u32 wr_prod_cnt;
351 u32 wr_cons_cnt;
352 u32 num_elems;
353
354 u32 *virt_prod_pair_addr;
355 dma_addr_t phy_prod_pair_addr;
356 };
357
358 struct qedr_srq {
359 struct ib_srq ibsrq;
360 struct qedr_dev *dev;
361
362 struct qedr_userq usrq;
363 struct qedr_srq_hwq_info hw_srq;
364 struct ib_umem *prod_umem;
365 u16 srq_id;
366 u32 srq_limit;
367 /* lock to protect srq recv post */
368 spinlock_t lock;
369 };
370
371 enum qedr_qp_err_bitmap {
372 QEDR_QP_ERR_SQ_FULL = 1,
373 QEDR_QP_ERR_RQ_FULL = 2,
374 QEDR_QP_ERR_BAD_SR = 4,
375 QEDR_QP_ERR_BAD_RR = 8,
376 QEDR_QP_ERR_SQ_PBL_FULL = 16,
377 QEDR_QP_ERR_RQ_PBL_FULL = 32,
378 };
379
380 struct qedr_qp {
381 struct ib_qp ibqp; /* must be first */
382 struct qedr_dev *dev;
383 struct qedr_iw_ep *ep;
384 struct qedr_qp_hwq_info sq;
385 struct qedr_qp_hwq_info rq;
386
387 u32 max_inline_data;
388
389 /* Lock for QP's */
390 spinlock_t q_lock;
391 struct qedr_cq *sq_cq;
392 struct qedr_cq *rq_cq;
393 struct qedr_srq *srq;
394 enum qed_roce_qp_state state;
395 u32 id;
396 struct qedr_pd *pd;
397 enum ib_qp_type qp_type;
398 struct qed_rdma_qp *qed_qp;
399 u32 qp_id;
400 u16 icid;
401 u16 mtu;
402 int sgid_idx;
403 u32 rq_psn;
404 u32 sq_psn;
405 u32 qkey;
406 u32 dest_qp_num;
407
408 /* Relevant to qps created from kernel space only (ULPs) */
409 u8 prev_wqe_size;
410 u16 wqe_cons;
411 u32 err_bitmap;
412 bool signaled;
413
414 /* SQ shadow */
415 struct {
416 u64 wr_id;
417 enum ib_wc_opcode opcode;
418 u32 bytes_len;
419 u8 wqe_size;
420 bool signaled;
421 dma_addr_t icrc_mapping;
422 u32 *icrc;
423 struct qedr_mr *mr;
424 } *wqe_wr_id;
425
426 /* RQ shadow */
427 struct {
428 u64 wr_id;
429 struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
430 u8 wqe_size;
431
432 u8 smac[ETH_ALEN];
433 u16 vlan;
434 int rc;
435 } *rqe_wr_id;
436
437 /* Relevant to qps created from user space only (applications) */
438 struct qedr_userq usq;
439 struct qedr_userq urq;
440 atomic_t refcnt;
441 bool destroyed;
442 };
443
444 struct qedr_ah {
445 struct ib_ah ibah;
446 struct rdma_ah_attr attr;
447 };
448
449 enum qedr_mr_type {
450 QEDR_MR_USER,
451 QEDR_MR_KERNEL,
452 QEDR_MR_DMA,
453 QEDR_MR_FRMR,
454 };
455
456 struct mr_info {
457 struct qedr_pbl *pbl_table;
458 struct qedr_pbl_info pbl_info;
459 struct list_head free_pbl_list;
460 struct list_head inuse_pbl_list;
461 u32 completed;
462 u32 completed_handled;
463 };
464
465 struct qedr_mr {
466 struct ib_mr ibmr;
467 struct ib_umem *umem;
468
469 struct qed_rdma_register_tid_in_params hw_mr;
470 enum qedr_mr_type type;
471
472 struct qedr_dev *dev;
473 struct mr_info info;
474
475 u64 *pages;
476 u32 npages;
477 };
478
479 #define SET_FIELD2(value, name, flag) ((value) |= ((flag) << (name ## _SHIFT)))
480
481 #define QEDR_RESP_IMM (RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
482 RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
483 #define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
484 RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
485 #define QEDR_RESP_INV (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
486 RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
487
qedr_inc_sw_cons(struct qedr_qp_hwq_info * info)488 static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
489 {
490 info->cons = (info->cons + 1) % info->max_wr;
491 info->wqe_cons++;
492 }
493
qedr_inc_sw_prod(struct qedr_qp_hwq_info * info)494 static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
495 {
496 info->prod = (info->prod + 1) % info->max_wr;
497 }
498
qedr_get_dmac(struct qedr_dev * dev,struct rdma_ah_attr * ah_attr,u8 * mac_addr)499 static inline int qedr_get_dmac(struct qedr_dev *dev,
500 struct rdma_ah_attr *ah_attr, u8 *mac_addr)
501 {
502 union ib_gid zero_sgid = { { 0 } };
503 struct in6_addr in6;
504 const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
505 u8 *dmac;
506
507 if (!memcmp(&grh->dgid, &zero_sgid, sizeof(union ib_gid))) {
508 DP_ERR(dev, "Local port GID not supported\n");
509 eth_zero_addr(mac_addr);
510 return -EINVAL;
511 }
512
513 memcpy(&in6, grh->dgid.raw, sizeof(in6));
514 dmac = rdma_ah_retrieve_dmac(ah_attr);
515 if (!dmac)
516 return -EINVAL;
517 ether_addr_copy(mac_addr, dmac);
518
519 return 0;
520 }
521
522 struct qedr_iw_listener {
523 struct qedr_dev *dev;
524 struct iw_cm_id *cm_id;
525 int backlog;
526 void *qed_handle;
527 };
528
529 struct qedr_iw_ep {
530 struct qedr_dev *dev;
531 struct iw_cm_id *cm_id;
532 struct qedr_qp *qp;
533 void *qed_context;
534 u8 during_connect;
535 };
536
537 static inline
get_qedr_ucontext(struct ib_ucontext * ibucontext)538 struct qedr_ucontext *get_qedr_ucontext(struct ib_ucontext *ibucontext)
539 {
540 return container_of(ibucontext, struct qedr_ucontext, ibucontext);
541 }
542
get_qedr_dev(struct ib_device * ibdev)543 static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
544 {
545 return container_of(ibdev, struct qedr_dev, ibdev);
546 }
547
get_qedr_pd(struct ib_pd * ibpd)548 static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
549 {
550 return container_of(ibpd, struct qedr_pd, ibpd);
551 }
552
get_qedr_cq(struct ib_cq * ibcq)553 static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
554 {
555 return container_of(ibcq, struct qedr_cq, ibcq);
556 }
557
get_qedr_qp(struct ib_qp * ibqp)558 static inline struct qedr_qp *get_qedr_qp(struct ib_qp *ibqp)
559 {
560 return container_of(ibqp, struct qedr_qp, ibqp);
561 }
562
get_qedr_ah(struct ib_ah * ibah)563 static inline struct qedr_ah *get_qedr_ah(struct ib_ah *ibah)
564 {
565 return container_of(ibah, struct qedr_ah, ibah);
566 }
567
get_qedr_mr(struct ib_mr * ibmr)568 static inline struct qedr_mr *get_qedr_mr(struct ib_mr *ibmr)
569 {
570 return container_of(ibmr, struct qedr_mr, ibmr);
571 }
572
get_qedr_srq(struct ib_srq * ibsrq)573 static inline struct qedr_srq *get_qedr_srq(struct ib_srq *ibsrq)
574 {
575 return container_of(ibsrq, struct qedr_srq, ibsrq);
576 }
577 #endif
578