1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #ifndef _QED_RDMA_H
8 #define _QED_RDMA_H
9 #include <linux/types.h>
10 #include <linux/bitops.h>
11 #include <linux/kernel.h>
12 #include <linux/list.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/qed/qed_if.h>
16 #include <linux/qed/qed_rdma_if.h>
17 #include "qed.h"
18 #include "qed_dev_api.h"
19 #include "qed_hsi.h"
20 #include "qed_iwarp.h"
21 #include "qed_roce.h"
22
23 #define QED_RDMA_MAX_P_KEY (1)
24 #define QED_RDMA_MAX_WQE (0x7FFF)
25 #define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
26 #define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
27 #define QED_RDMA_ACK_DELAY (15)
28 #define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
29 #define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
30 #define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
31 /* Add 1 for header element */
32 #define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
33 #define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
34 #define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
35 #define QED_RDMA_MAX_SRQS (32 * 1024)
36
37 #define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
38 #define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
39
40 /* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
41 * SRQs is much smaller so there's no need to have that many domains.
42 */
43 #define QED_RDMA_MAX_XRCDS (roundup_pow_of_two(RDMA_MAX_XRC_SRQS))
44
45 enum qed_rdma_toggle_bit {
46 QED_RDMA_TOGGLE_BIT_CLEAR = 0,
47 QED_RDMA_TOGGLE_BIT_SET = 1
48 };
49
50 #define QED_RDMA_MAX_BMAP_NAME (10)
51 struct qed_bmap {
52 unsigned long *bitmap;
53 u32 max_count;
54 char name[QED_RDMA_MAX_BMAP_NAME];
55 };
56
57 struct qed_rdma_info {
58 /* spin lock to protect bitmaps */
59 spinlock_t lock;
60
61 struct qed_bmap cq_map;
62 struct qed_bmap pd_map;
63 struct qed_bmap xrcd_map;
64 struct qed_bmap tid_map;
65 struct qed_bmap qp_map;
66 struct qed_bmap srq_map;
67 struct qed_bmap xrc_srq_map;
68 struct qed_bmap cid_map;
69 struct qed_bmap tcp_cid_map;
70 struct qed_bmap real_cid_map;
71 struct qed_bmap dpi_map;
72 struct qed_bmap toggle_bits;
73 struct qed_rdma_events events;
74 struct qed_rdma_device *dev;
75 struct qed_rdma_port *port;
76 u32 last_tid;
77 u8 num_cnqs;
78 u32 num_qps;
79 u32 num_mrs;
80 u32 num_srqs;
81 u16 srq_id_offset;
82 u16 queue_zone_base;
83 u16 max_queue_zones;
84 enum protocol_type proto;
85 struct qed_iwarp_info iwarp;
86 u8 active:1;
87 };
88
89 struct qed_rdma_qp {
90 struct regpair qp_handle;
91 struct regpair qp_handle_async;
92 u32 qpid;
93 u16 icid;
94 enum qed_roce_qp_state cur_state;
95 enum qed_rdma_qp_type qp_type;
96 enum qed_iwarp_qp_state iwarp_state;
97 bool use_srq;
98 bool signal_all;
99 bool fmr_and_reserved_lkey;
100
101 bool incoming_rdma_read_en;
102 bool incoming_rdma_write_en;
103 bool incoming_atomic_en;
104 bool e2e_flow_control_en;
105
106 u16 pd;
107 u16 pkey;
108 u32 dest_qp;
109 u16 mtu;
110 u16 srq_id;
111 u8 traffic_class_tos;
112 u8 hop_limit_ttl;
113 u16 dpi;
114 u32 flow_label;
115 bool lb_indication;
116 u16 vlan_id;
117 u32 ack_timeout;
118 u8 retry_cnt;
119 u8 rnr_retry_cnt;
120 u8 min_rnr_nak_timer;
121 bool sqd_async;
122 union qed_gid sgid;
123 union qed_gid dgid;
124 enum roce_mode roce_mode;
125 u16 udp_src_port;
126 u8 stats_queue;
127
128 /* requeseter */
129 u8 max_rd_atomic_req;
130 u32 sq_psn;
131 u16 sq_cq_id;
132 u16 sq_num_pages;
133 dma_addr_t sq_pbl_ptr;
134 void *orq;
135 dma_addr_t orq_phys_addr;
136 u8 orq_num_pages;
137 bool req_offloaded;
138 bool has_req;
139
140 /* responder */
141 u8 max_rd_atomic_resp;
142 u32 rq_psn;
143 u16 rq_cq_id;
144 u16 rq_num_pages;
145 u16 xrcd_id;
146 dma_addr_t rq_pbl_ptr;
147 void *irq;
148 dma_addr_t irq_phys_addr;
149 u8 irq_num_pages;
150 bool resp_offloaded;
151 u32 cq_prod;
152 bool has_resp;
153
154 u8 remote_mac_addr[6];
155 u8 local_mac_addr[6];
156
157 void *shared_queue;
158 dma_addr_t shared_queue_phys_addr;
159 struct qed_iwarp_ep *ep;
160 u8 edpm_mode;
161 };
162
qed_rdma_is_xrc_qp(struct qed_rdma_qp * qp)163 static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
164 {
165 if (qp->qp_type == QED_RDMA_QP_TYPE_XRC_TGT ||
166 qp->qp_type == QED_RDMA_QP_TYPE_XRC_INI)
167 return true;
168
169 return false;
170 }
171 #if IS_ENABLED(CONFIG_QED_RDMA)
172 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
173 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
174 int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
175 void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
176 #else
qed_rdma_dpm_conf(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)177 static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
qed_rdma_dpm_bar(struct qed_hwfn * p_hwfn,struct qed_ptt * p_ptt)178 static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
179 struct qed_ptt *p_ptt) {}
qed_rdma_info_alloc(struct qed_hwfn * p_hwfn)180 static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
qed_rdma_info_free(struct qed_hwfn * p_hwfn)181 static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
182 #endif
183
184 int
185 qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
186 struct qed_bmap *bmap, u32 max_count, char *name);
187
188 void
189 qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check);
190
191 int
192 qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
193 struct qed_bmap *bmap, u32 *id_num);
194
195 void
196 qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
197
198 void
199 qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
200
201 int
202 qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
203
204 void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac);
205
206 bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn);
207 #endif
208