• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_TYPE_H
4 #define IRDMA_TYPE_H
5 #include "status.h"
6 #include "osdep.h"
7 #include "irdma.h"
8 #include "user.h"
9 #include "hmc.h"
10 #include "uda.h"
11 #include "ws.h"
12 #define IRDMA_DEBUG_ERR		"ERR"
13 #define IRDMA_DEBUG_INIT	"INIT"
14 #define IRDMA_DEBUG_DEV		"DEV"
15 #define IRDMA_DEBUG_CM		"CM"
16 #define IRDMA_DEBUG_VERBS	"VERBS"
17 #define IRDMA_DEBUG_PUDA	"PUDA"
18 #define IRDMA_DEBUG_ILQ		"ILQ"
19 #define IRDMA_DEBUG_IEQ		"IEQ"
20 #define IRDMA_DEBUG_QP		"QP"
21 #define IRDMA_DEBUG_CQ		"CQ"
22 #define IRDMA_DEBUG_MR		"MR"
23 #define IRDMA_DEBUG_PBLE	"PBLE"
24 #define IRDMA_DEBUG_WQE		"WQE"
25 #define IRDMA_DEBUG_AEQ		"AEQ"
26 #define IRDMA_DEBUG_CQP		"CQP"
27 #define IRDMA_DEBUG_HMC		"HMC"
28 #define IRDMA_DEBUG_USER	"USER"
29 #define IRDMA_DEBUG_VIRT	"VIRT"
30 #define IRDMA_DEBUG_DCB		"DCB"
31 #define	IRDMA_DEBUG_CQE		"CQE"
32 #define IRDMA_DEBUG_CLNT	"CLNT"
33 #define IRDMA_DEBUG_WS		"WS"
34 #define IRDMA_DEBUG_STATS	"STATS"
35 
36 enum irdma_page_size {
37 	IRDMA_PAGE_SIZE_4K = 0,
38 	IRDMA_PAGE_SIZE_2M,
39 	IRDMA_PAGE_SIZE_1G,
40 };
41 
42 enum irdma_hdrct_flags {
43 	DDP_LEN_FLAG  = 0x80,
44 	DDP_HDR_FLAG  = 0x40,
45 	RDMA_HDR_FLAG = 0x20,
46 };
47 
48 enum irdma_term_layers {
49 	LAYER_RDMA = 0,
50 	LAYER_DDP  = 1,
51 	LAYER_MPA  = 2,
52 };
53 
54 enum irdma_term_error_types {
55 	RDMAP_REMOTE_PROT = 1,
56 	RDMAP_REMOTE_OP   = 2,
57 	DDP_CATASTROPHIC  = 0,
58 	DDP_TAGGED_BUF    = 1,
59 	DDP_UNTAGGED_BUF  = 2,
60 	DDP_LLP		  = 3,
61 };
62 
63 enum irdma_term_rdma_errors {
64 	RDMAP_INV_STAG		  = 0x00,
65 	RDMAP_INV_BOUNDS	  = 0x01,
66 	RDMAP_ACCESS		  = 0x02,
67 	RDMAP_UNASSOC_STAG	  = 0x03,
68 	RDMAP_TO_WRAP		  = 0x04,
69 	RDMAP_INV_RDMAP_VER       = 0x05,
70 	RDMAP_UNEXPECTED_OP       = 0x06,
71 	RDMAP_CATASTROPHIC_LOCAL  = 0x07,
72 	RDMAP_CATASTROPHIC_GLOBAL = 0x08,
73 	RDMAP_CANT_INV_STAG       = 0x09,
74 	RDMAP_UNSPECIFIED	  = 0xff,
75 };
76 
77 enum irdma_term_ddp_errors {
78 	DDP_CATASTROPHIC_LOCAL      = 0x00,
79 	DDP_TAGGED_INV_STAG	    = 0x00,
80 	DDP_TAGGED_BOUNDS	    = 0x01,
81 	DDP_TAGGED_UNASSOC_STAG     = 0x02,
82 	DDP_TAGGED_TO_WRAP	    = 0x03,
83 	DDP_TAGGED_INV_DDP_VER      = 0x04,
84 	DDP_UNTAGGED_INV_QN	    = 0x01,
85 	DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
86 	DDP_UNTAGGED_INV_MSN_RANGE  = 0x03,
87 	DDP_UNTAGGED_INV_MO	    = 0x04,
88 	DDP_UNTAGGED_INV_TOO_LONG   = 0x05,
89 	DDP_UNTAGGED_INV_DDP_VER    = 0x06,
90 };
91 
92 enum irdma_term_mpa_errors {
93 	MPA_CLOSED  = 0x01,
94 	MPA_CRC     = 0x02,
95 	MPA_MARKER  = 0x03,
96 	MPA_REQ_RSP = 0x04,
97 };
98 
99 enum irdma_qp_event_type {
100 	IRDMA_QP_EVENT_CATASTROPHIC,
101 	IRDMA_QP_EVENT_ACCESS_ERR,
102 	IRDMA_QP_EVENT_REQ_ERR,
103 };
104 
105 enum irdma_hw_stats_index_32b {
106 	IRDMA_HW_STAT_INDEX_IP4RXDISCARD	= 0,
107 	IRDMA_HW_STAT_INDEX_IP4RXTRUNC		= 1,
108 	IRDMA_HW_STAT_INDEX_IP4TXNOROUTE	= 2,
109 	IRDMA_HW_STAT_INDEX_IP6RXDISCARD	= 3,
110 	IRDMA_HW_STAT_INDEX_IP6RXTRUNC		= 4,
111 	IRDMA_HW_STAT_INDEX_IP6TXNOROUTE	= 5,
112 	IRDMA_HW_STAT_INDEX_TCPRTXSEG		= 6,
113 	IRDMA_HW_STAT_INDEX_TCPRXOPTERR		= 7,
114 	IRDMA_HW_STAT_INDEX_TCPRXPROTOERR	= 8,
115 	IRDMA_HW_STAT_INDEX_MAX_32_GEN_1	= 9, /* Must be same value as next entry */
116 	IRDMA_HW_STAT_INDEX_RXVLANERR		= 9,
117 	IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED	= 10,
118 	IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED	= 11,
119 	IRDMA_HW_STAT_INDEX_TXNPCNPSENT		= 12,
120 	IRDMA_HW_STAT_INDEX_MAX_32, /* Must be last entry */
121 };
122 
123 enum irdma_hw_stats_index_64b {
124 	IRDMA_HW_STAT_INDEX_IP4RXOCTS	= 0,
125 	IRDMA_HW_STAT_INDEX_IP4RXPKTS	= 1,
126 	IRDMA_HW_STAT_INDEX_IP4RXFRAGS	= 2,
127 	IRDMA_HW_STAT_INDEX_IP4RXMCPKTS	= 3,
128 	IRDMA_HW_STAT_INDEX_IP4TXOCTS	= 4,
129 	IRDMA_HW_STAT_INDEX_IP4TXPKTS	= 5,
130 	IRDMA_HW_STAT_INDEX_IP4TXFRAGS	= 6,
131 	IRDMA_HW_STAT_INDEX_IP4TXMCPKTS	= 7,
132 	IRDMA_HW_STAT_INDEX_IP6RXOCTS	= 8,
133 	IRDMA_HW_STAT_INDEX_IP6RXPKTS	= 9,
134 	IRDMA_HW_STAT_INDEX_IP6RXFRAGS	= 10,
135 	IRDMA_HW_STAT_INDEX_IP6RXMCPKTS	= 11,
136 	IRDMA_HW_STAT_INDEX_IP6TXOCTS	= 12,
137 	IRDMA_HW_STAT_INDEX_IP6TXPKTS	= 13,
138 	IRDMA_HW_STAT_INDEX_IP6TXFRAGS	= 14,
139 	IRDMA_HW_STAT_INDEX_IP6TXMCPKTS	= 15,
140 	IRDMA_HW_STAT_INDEX_TCPRXSEGS	= 16,
141 	IRDMA_HW_STAT_INDEX_TCPTXSEG	= 17,
142 	IRDMA_HW_STAT_INDEX_RDMARXRDS	= 18,
143 	IRDMA_HW_STAT_INDEX_RDMARXSNDS	= 19,
144 	IRDMA_HW_STAT_INDEX_RDMARXWRS	= 20,
145 	IRDMA_HW_STAT_INDEX_RDMATXRDS	= 21,
146 	IRDMA_HW_STAT_INDEX_RDMATXSNDS	= 22,
147 	IRDMA_HW_STAT_INDEX_RDMATXWRS	= 23,
148 	IRDMA_HW_STAT_INDEX_RDMAVBND	= 24,
149 	IRDMA_HW_STAT_INDEX_RDMAVINV	= 25,
150 	IRDMA_HW_STAT_INDEX_MAX_64_GEN_1 = 26, /* Must be same value as next entry */
151 	IRDMA_HW_STAT_INDEX_IP4RXMCOCTS	= 26,
152 	IRDMA_HW_STAT_INDEX_IP4TXMCOCTS	= 27,
153 	IRDMA_HW_STAT_INDEX_IP6RXMCOCTS	= 28,
154 	IRDMA_HW_STAT_INDEX_IP6TXMCOCTS	= 29,
155 	IRDMA_HW_STAT_INDEX_UDPRXPKTS	= 30,
156 	IRDMA_HW_STAT_INDEX_UDPTXPKTS	= 31,
157 	IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 32,
158 	IRDMA_HW_STAT_INDEX_MAX_64, /* Must be last entry */
159 };
160 
161 enum irdma_feature_type {
162 	IRDMA_FEATURE_FW_INFO = 0,
163 	IRDMA_HW_VERSION_INFO = 1,
164 	IRDMA_QSETS_MAX       = 26,
165 	IRDMA_MAX_FEATURES, /* Must be last entry */
166 };
167 
168 enum irdma_sched_prio_type {
169 	IRDMA_PRIO_WEIGHTED_RR     = 1,
170 	IRDMA_PRIO_STRICT	   = 2,
171 	IRDMA_PRIO_WEIGHTED_STRICT = 3,
172 };
173 
174 enum irdma_vm_vf_type {
175 	IRDMA_VF_TYPE = 0,
176 	IRDMA_VM_TYPE,
177 	IRDMA_PF_TYPE,
178 };
179 
180 enum irdma_cqp_hmc_profile {
181 	IRDMA_HMC_PROFILE_DEFAULT  = 1,
182 	IRDMA_HMC_PROFILE_FAVOR_VF = 2,
183 	IRDMA_HMC_PROFILE_EQUAL    = 3,
184 };
185 
186 enum irdma_quad_entry_type {
187 	IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
188 	IRDMA_QHASH_TYPE_TCP_SYN,
189 	IRDMA_QHASH_TYPE_UDP_UNICAST,
190 	IRDMA_QHASH_TYPE_UDP_MCAST,
191 	IRDMA_QHASH_TYPE_ROCE_MCAST,
192 	IRDMA_QHASH_TYPE_ROCEV2_HW,
193 };
194 
195 enum irdma_quad_hash_manage_type {
196 	IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
197 	IRDMA_QHASH_MANAGE_TYPE_ADD,
198 	IRDMA_QHASH_MANAGE_TYPE_MODIFY,
199 };
200 
201 enum irdma_syn_rst_handling {
202 	IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
203 	IRDMA_SYN_RST_HANDLING_HW_TCP,
204 	IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
205 	IRDMA_SYN_RST_HANDLING_FW_TCP,
206 };
207 
208 enum irdma_queue_type {
209 	IRDMA_QUEUE_TYPE_SQ_RQ = 0,
210 	IRDMA_QUEUE_TYPE_CQP,
211 };
212 
213 struct irdma_sc_dev;
214 struct irdma_vsi_pestat;
215 
216 struct irdma_dcqcn_cc_params {
217 	u8 cc_cfg_valid;
218 	u8 min_dec_factor;
219 	u8 min_rate;
220 	u8 dcqcn_f;
221 	u16 rai_factor;
222 	u16 hai_factor;
223 	u16 dcqcn_t;
224 	u32 dcqcn_b;
225 	u32 rreduce_mperiod;
226 };
227 
228 struct irdma_cqp_init_info {
229 	u64 cqp_compl_ctx;
230 	u64 host_ctx_pa;
231 	u64 sq_pa;
232 	struct irdma_sc_dev *dev;
233 	struct irdma_cqp_quanta *sq;
234 	struct irdma_dcqcn_cc_params dcqcn_params;
235 	__le64 *host_ctx;
236 	u64 *scratch_array;
237 	u32 sq_size;
238 	u16 hw_maj_ver;
239 	u16 hw_min_ver;
240 	u8 struct_ver;
241 	u8 hmc_profile;
242 	u8 ena_vf_count;
243 	u8 ceqs_per_vf;
244 	bool en_datacenter_tcp:1;
245 	bool disable_packed:1;
246 	bool rocev2_rto_policy:1;
247 	enum irdma_protocol_used protocol_used;
248 };
249 
250 struct irdma_terminate_hdr {
251 	u8 layer_etype;
252 	u8 error_code;
253 	u8 hdrct;
254 	u8 rsvd;
255 };
256 
257 struct irdma_cqp_sq_wqe {
258 	__le64 buf[IRDMA_CQP_WQE_SIZE];
259 };
260 
261 struct irdma_sc_aeqe {
262 	__le64 buf[IRDMA_AEQE_SIZE];
263 };
264 
265 struct irdma_ceqe {
266 	__le64 buf[IRDMA_CEQE_SIZE];
267 };
268 
269 struct irdma_cqp_ctx {
270 	__le64 buf[IRDMA_CQP_CTX_SIZE];
271 };
272 
273 struct irdma_cq_shadow_area {
274 	__le64 buf[IRDMA_SHADOW_AREA_SIZE];
275 };
276 
277 struct irdma_dev_hw_stats_offsets {
278 	u32 stats_offset_32[IRDMA_HW_STAT_INDEX_MAX_32];
279 	u32 stats_offset_64[IRDMA_HW_STAT_INDEX_MAX_64];
280 };
281 
282 struct irdma_dev_hw_stats {
283 	u64 stats_val_32[IRDMA_HW_STAT_INDEX_MAX_32];
284 	u64 stats_val_64[IRDMA_HW_STAT_INDEX_MAX_64];
285 };
286 
287 struct irdma_gather_stats {
288 	u32 rsvd1;
289 	u32 rxvlanerr;
290 	u64 ip4rxocts;
291 	u64 ip4rxpkts;
292 	u32 ip4rxtrunc;
293 	u32 ip4rxdiscard;
294 	u64 ip4rxfrags;
295 	u64 ip4rxmcocts;
296 	u64 ip4rxmcpkts;
297 	u64 ip6rxocts;
298 	u64 ip6rxpkts;
299 	u32 ip6rxtrunc;
300 	u32 ip6rxdiscard;
301 	u64 ip6rxfrags;
302 	u64 ip6rxmcocts;
303 	u64 ip6rxmcpkts;
304 	u64 ip4txocts;
305 	u64 ip4txpkts;
306 	u64 ip4txfrag;
307 	u64 ip4txmcocts;
308 	u64 ip4txmcpkts;
309 	u64 ip6txocts;
310 	u64 ip6txpkts;
311 	u64 ip6txfrags;
312 	u64 ip6txmcocts;
313 	u64 ip6txmcpkts;
314 	u32 ip6txnoroute;
315 	u32 ip4txnoroute;
316 	u64 tcprxsegs;
317 	u32 tcprxprotoerr;
318 	u32 tcprxopterr;
319 	u64 tcptxsegs;
320 	u32 rsvd2;
321 	u32 tcprtxseg;
322 	u64 udprxpkts;
323 	u64 udptxpkts;
324 	u64 rdmarxwrs;
325 	u64 rdmarxrds;
326 	u64 rdmarxsnds;
327 	u64 rdmatxwrs;
328 	u64 rdmatxrds;
329 	u64 rdmatxsnds;
330 	u64 rdmavbn;
331 	u64 rdmavinv;
332 	u64 rxnpecnmrkpkts;
333 	u32 rxrpcnphandled;
334 	u32 rxrpcnpignored;
335 	u32 txnpcnpsent;
336 	u32 rsvd3[88];
337 };
338 
339 struct irdma_stats_gather_info {
340 	bool use_hmc_fcn_index:1;
341 	bool use_stats_inst:1;
342 	u8 hmc_fcn_index;
343 	u8 stats_inst_index;
344 	struct irdma_dma_mem stats_buff_mem;
345 	void *gather_stats_va;
346 	void *last_gather_stats_va;
347 };
348 
349 struct irdma_vsi_pestat {
350 	struct irdma_hw *hw;
351 	struct irdma_dev_hw_stats hw_stats;
352 	struct irdma_stats_gather_info gather_info;
353 	struct timer_list stats_timer;
354 	struct irdma_sc_vsi *vsi;
355 	struct irdma_dev_hw_stats last_hw_stats;
356 	spinlock_t lock; /* rdma stats lock */
357 };
358 
359 struct irdma_hw {
360 	u8 __iomem *hw_addr;
361 	u8 __iomem *priv_hw_addr;
362 	struct device *device;
363 	struct irdma_hmc_info hmc;
364 };
365 
366 struct irdma_pfpdu {
367 	struct list_head rxlist;
368 	u32 rcv_nxt;
369 	u32 fps;
370 	u32 max_fpdu_data;
371 	u32 nextseqnum;
372 	u32 rcv_start_seq;
373 	bool mode:1;
374 	bool mpa_crc_err:1;
375 	u8  marker_len;
376 	u64 total_ieq_bufs;
377 	u64 fpdu_processed;
378 	u64 bad_seq_num;
379 	u64 crc_err;
380 	u64 no_tx_bufs;
381 	u64 tx_err;
382 	u64 out_of_order;
383 	u64 pmode_count;
384 	struct irdma_sc_ah *ah;
385 	struct irdma_puda_buf *ah_buf;
386 	spinlock_t lock; /* fpdu processing lock */
387 	struct irdma_puda_buf *lastrcv_buf;
388 };
389 
390 struct irdma_sc_pd {
391 	struct irdma_sc_dev *dev;
392 	u32 pd_id;
393 	int abi_ver;
394 };
395 
396 struct irdma_cqp_quanta {
397 	__le64 elem[IRDMA_CQP_WQE_SIZE];
398 };
399 
400 struct irdma_sc_cqp {
401 	u32 size;
402 	u64 sq_pa;
403 	u64 host_ctx_pa;
404 	void *back_cqp;
405 	struct irdma_sc_dev *dev;
406 	enum irdma_status_code (*process_cqp_sds)(struct irdma_sc_dev *dev,
407 						  struct irdma_update_sds_info *info);
408 	struct irdma_dma_mem sdbuf;
409 	struct irdma_ring sq_ring;
410 	struct irdma_cqp_quanta *sq_base;
411 	struct irdma_dcqcn_cc_params dcqcn_params;
412 	__le64 *host_ctx;
413 	u64 *scratch_array;
414 	u64 requested_ops;
415 	atomic64_t completed_ops;
416 	u32 cqp_id;
417 	u32 sq_size;
418 	u32 hw_sq_size;
419 	u16 hw_maj_ver;
420 	u16 hw_min_ver;
421 	u8 struct_ver;
422 	u8 polarity;
423 	u8 hmc_profile;
424 	u8 ena_vf_count;
425 	u8 timeout_count;
426 	u8 ceqs_per_vf;
427 	bool en_datacenter_tcp:1;
428 	bool disable_packed:1;
429 	bool rocev2_rto_policy:1;
430 	enum irdma_protocol_used protocol_used;
431 };
432 
433 struct irdma_sc_aeq {
434 	u32 size;
435 	u64 aeq_elem_pa;
436 	struct irdma_sc_dev *dev;
437 	struct irdma_sc_aeqe *aeqe_base;
438 	void *pbl_list;
439 	u32 elem_cnt;
440 	struct irdma_ring aeq_ring;
441 	u8 pbl_chunk_size;
442 	u32 first_pm_pbl_idx;
443 	u32 msix_idx;
444 	u8 polarity;
445 	bool virtual_map:1;
446 };
447 
448 struct irdma_sc_ceq {
449 	u32 size;
450 	u64 ceq_elem_pa;
451 	struct irdma_sc_dev *dev;
452 	struct irdma_ceqe *ceqe_base;
453 	void *pbl_list;
454 	u32 ceq_id;
455 	u32 elem_cnt;
456 	struct irdma_ring ceq_ring;
457 	u8 pbl_chunk_size;
458 	u8 tph_val;
459 	u32 first_pm_pbl_idx;
460 	u8 polarity;
461 	struct irdma_sc_vsi *vsi;
462 	struct irdma_sc_cq **reg_cq;
463 	u32 reg_cq_size;
464 	spinlock_t req_cq_lock; /* protect access to reg_cq array */
465 	bool virtual_map:1;
466 	bool tph_en:1;
467 	bool itr_no_expire:1;
468 };
469 
470 struct irdma_sc_cq {
471 	struct irdma_cq_uk cq_uk;
472 	u64 cq_pa;
473 	u64 shadow_area_pa;
474 	struct irdma_sc_dev *dev;
475 	struct irdma_sc_vsi *vsi;
476 	void *pbl_list;
477 	void *back_cq;
478 	u32 ceq_id;
479 	u32 shadow_read_threshold;
480 	u8 pbl_chunk_size;
481 	u8 cq_type;
482 	u8 tph_val;
483 	u32 first_pm_pbl_idx;
484 	bool ceqe_mask:1;
485 	bool virtual_map:1;
486 	bool check_overflow:1;
487 	bool ceq_id_valid:1;
488 	bool tph_en;
489 };
490 
491 struct irdma_sc_qp {
492 	struct irdma_qp_uk qp_uk;
493 	u64 sq_pa;
494 	u64 rq_pa;
495 	u64 hw_host_ctx_pa;
496 	u64 shadow_area_pa;
497 	u64 q2_pa;
498 	struct irdma_sc_dev *dev;
499 	struct irdma_sc_vsi *vsi;
500 	struct irdma_sc_pd *pd;
501 	__le64 *hw_host_ctx;
502 	void *llp_stream_handle;
503 	struct irdma_pfpdu pfpdu;
504 	u32 ieq_qp;
505 	u8 *q2_buf;
506 	u64 qp_compl_ctx;
507 	u32 push_idx;
508 	u16 qs_handle;
509 	u16 push_offset;
510 	u8 flush_wqes_count;
511 	u8 sq_tph_val;
512 	u8 rq_tph_val;
513 	u8 qp_state;
514 	u8 hw_sq_size;
515 	u8 hw_rq_size;
516 	u8 src_mac_addr_idx;
517 	bool on_qoslist:1;
518 	bool ieq_pass_thru:1;
519 	bool sq_tph_en:1;
520 	bool rq_tph_en:1;
521 	bool rcv_tph_en:1;
522 	bool xmit_tph_en:1;
523 	bool virtual_map:1;
524 	bool flush_sq:1;
525 	bool flush_rq:1;
526 	bool sq_flush_code:1;
527 	bool rq_flush_code:1;
528 	enum irdma_flush_opcode flush_code;
529 	enum irdma_qp_event_type event_type;
530 	u8 term_flags;
531 	u8 user_pri;
532 	struct list_head list;
533 };
534 
535 struct irdma_stats_inst_info {
536 	bool use_hmc_fcn_index;
537 	u8 hmc_fn_id;
538 	u8 stats_idx;
539 };
540 
541 struct irdma_up_info {
542 	u8 map[8];
543 	u8 cnp_up_override;
544 	u8 hmc_fcn_idx;
545 	bool use_vlan:1;
546 	bool use_cnp_up_override:1;
547 };
548 
549 #define IRDMA_MAX_WS_NODES	0x3FF
550 #define IRDMA_WS_NODE_INVALID	0xFFFF
551 
552 struct irdma_ws_node_info {
553 	u16 id;
554 	u16 vsi;
555 	u16 parent_id;
556 	u16 qs_handle;
557 	bool type_leaf:1;
558 	bool enable:1;
559 	u8 prio_type;
560 	u8 tc;
561 	u8 weight;
562 };
563 
564 struct irdma_hmc_fpm_misc {
565 	u32 max_ceqs;
566 	u32 max_sds;
567 	u32 xf_block_size;
568 	u32 q1_block_size;
569 	u32 ht_multiplier;
570 	u32 timer_bucket;
571 	u32 rrf_block_size;
572 	u32 ooiscf_block_size;
573 };
574 
575 #define IRDMA_LEAF_DEFAULT_REL_BW		64
576 #define IRDMA_PARENT_DEFAULT_REL_BW		1
577 
578 struct irdma_qos {
579 	struct list_head qplist;
580 	struct mutex qos_mutex; /* protect QoS attributes per QoS level */
581 	u64 lan_qos_handle;
582 	u32 l2_sched_node_id;
583 	u16 qs_handle;
584 	u8 traffic_class;
585 	u8 rel_bw;
586 	u8 prio_type;
587 	bool valid;
588 };
589 
590 #define IRDMA_INVALID_FCN_ID 0xff
591 struct irdma_sc_vsi {
592 	u16 vsi_idx;
593 	struct irdma_sc_dev *dev;
594 	void *back_vsi;
595 	u32 ilq_count;
596 	struct irdma_virt_mem ilq_mem;
597 	struct irdma_puda_rsrc *ilq;
598 	u32 ieq_count;
599 	struct irdma_virt_mem ieq_mem;
600 	struct irdma_puda_rsrc *ieq;
601 	u32 exception_lan_q;
602 	u16 mtu;
603 	u16 vm_id;
604 	u8 fcn_id;
605 	enum irdma_vm_vf_type vm_vf_type;
606 	bool stats_fcn_id_alloc:1;
607 	bool tc_change_pending:1;
608 	struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
609 	struct irdma_vsi_pestat *pestat;
610 	atomic_t qp_suspend_reqs;
611 	enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
612 						struct irdma_ws_node *tc_node);
613 	void (*unregister_qset)(struct irdma_sc_vsi *vsi,
614 				struct irdma_ws_node *tc_node);
615 	u8 qos_rel_bw;
616 	u8 qos_prio_type;
617 };
618 
619 struct irdma_sc_dev {
620 	struct list_head cqp_cmd_head; /* head of the CQP command list */
621 	spinlock_t cqp_lock; /* protect CQP list access */
622 	bool fcn_id_array[IRDMA_MAX_STATS_COUNT];
623 	struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
624 	u64 fpm_query_buf_pa;
625 	u64 fpm_commit_buf_pa;
626 	__le64 *fpm_query_buf;
627 	__le64 *fpm_commit_buf;
628 	struct irdma_hw *hw;
629 	u8 __iomem *db_addr;
630 	u32 __iomem *wqe_alloc_db;
631 	u32 __iomem *cq_arm_db;
632 	u32 __iomem *aeq_alloc_db;
633 	u32 __iomem *cqp_db;
634 	u32 __iomem *cq_ack_db;
635 	u32 __iomem *ceq_itr_mask_db;
636 	u32 __iomem *aeq_itr_mask_db;
637 	u32 __iomem *hw_regs[IRDMA_MAX_REGS];
638 	u32 ceq_itr;   /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
639 	u64 hw_masks[IRDMA_MAX_MASKS];
640 	u64 hw_shifts[IRDMA_MAX_SHIFTS];
641 	u64 hw_stats_regs_32[IRDMA_HW_STAT_INDEX_MAX_32];
642 	u64 hw_stats_regs_64[IRDMA_HW_STAT_INDEX_MAX_64];
643 	u64 feature_info[IRDMA_MAX_FEATURES];
644 	u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
645 	struct irdma_hw_attrs hw_attrs;
646 	struct irdma_hmc_info *hmc_info;
647 	struct irdma_sc_cqp *cqp;
648 	struct irdma_sc_aeq *aeq;
649 	struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
650 	struct irdma_sc_cq *ccq;
651 	const struct irdma_irq_ops *irq_ops;
652 	struct irdma_hmc_fpm_misc hmc_fpm_misc;
653 	struct irdma_ws_node *ws_tree_root;
654 	struct mutex ws_mutex; /* ws tree mutex */
655 	u16 num_vfs;
656 	u8 hmc_fn_id;
657 	u8 vf_id;
658 	bool vchnl_up:1;
659 	bool ceq_valid:1;
660 	u8 pci_rev;
661 	enum irdma_status_code (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
662 	void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
663 	void (*ws_reset)(struct irdma_sc_vsi *vsi);
664 };
665 
666 struct irdma_modify_cq_info {
667 	u64 cq_pa;
668 	struct irdma_cqe *cq_base;
669 	u32 cq_size;
670 	u32 shadow_read_threshold;
671 	u8 pbl_chunk_size;
672 	u32 first_pm_pbl_idx;
673 	bool virtual_map:1;
674 	bool check_overflow;
675 	bool cq_resize:1;
676 };
677 
678 struct irdma_create_qp_info {
679 	bool ord_valid:1;
680 	bool tcp_ctx_valid:1;
681 	bool cq_num_valid:1;
682 	bool arp_cache_idx_valid:1;
683 	bool mac_valid:1;
684 	bool force_lpb;
685 	u8 next_iwarp_state;
686 };
687 
688 struct irdma_modify_qp_info {
689 	u64 rx_win0;
690 	u64 rx_win1;
691 	u16 new_mss;
692 	u8 next_iwarp_state;
693 	u8 curr_iwarp_state;
694 	u8 termlen;
695 	bool ord_valid:1;
696 	bool tcp_ctx_valid:1;
697 	bool udp_ctx_valid:1;
698 	bool cq_num_valid:1;
699 	bool arp_cache_idx_valid:1;
700 	bool reset_tcp_conn:1;
701 	bool remove_hash_idx:1;
702 	bool dont_send_term:1;
703 	bool dont_send_fin:1;
704 	bool cached_var_valid:1;
705 	bool mss_change:1;
706 	bool force_lpb:1;
707 	bool mac_valid:1;
708 };
709 
710 struct irdma_ccq_cqe_info {
711 	struct irdma_sc_cqp *cqp;
712 	u64 scratch;
713 	u32 op_ret_val;
714 	u16 maj_err_code;
715 	u16 min_err_code;
716 	u8 op_code;
717 	bool error;
718 };
719 
720 struct irdma_dcb_app_info {
721 	u8 priority;
722 	u8 selector;
723 	u16 prot_id;
724 };
725 
726 struct irdma_qos_tc_info {
727 	u64 tc_ctx;
728 	u8 rel_bw;
729 	u8 prio_type;
730 	u8 egress_virt_up;
731 	u8 ingress_virt_up;
732 };
733 
734 struct irdma_l2params {
735 	struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
736 	struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
737 	u32 num_apps;
738 	u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
739 	u16 mtu;
740 	u8 up2tc[IRDMA_MAX_USER_PRIORITY];
741 	u8 num_tc;
742 	u8 vsi_rel_bw;
743 	u8 vsi_prio_type;
744 	bool mtu_changed:1;
745 	bool tc_changed:1;
746 };
747 
748 struct irdma_vsi_init_info {
749 	struct irdma_sc_dev *dev;
750 	void *back_vsi;
751 	struct irdma_l2params *params;
752 	u16 exception_lan_q;
753 	u16 pf_data_vsi_num;
754 	enum irdma_vm_vf_type vm_vf_type;
755 	u16 vm_id;
756 	enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
757 						struct irdma_ws_node *tc_node);
758 	void (*unregister_qset)(struct irdma_sc_vsi *vsi,
759 				struct irdma_ws_node *tc_node);
760 };
761 
762 struct irdma_vsi_stats_info {
763 	struct irdma_vsi_pestat *pestat;
764 	u8 fcn_id;
765 	bool alloc_fcn_id;
766 };
767 
768 struct irdma_device_init_info {
769 	u64 fpm_query_buf_pa;
770 	u64 fpm_commit_buf_pa;
771 	__le64 *fpm_query_buf;
772 	__le64 *fpm_commit_buf;
773 	struct irdma_hw *hw;
774 	void __iomem *bar0;
775 	u8 hmc_fn_id;
776 };
777 
778 struct irdma_ceq_init_info {
779 	u64 ceqe_pa;
780 	struct irdma_sc_dev *dev;
781 	u64 *ceqe_base;
782 	void *pbl_list;
783 	u32 elem_cnt;
784 	u32 ceq_id;
785 	bool virtual_map:1;
786 	bool tph_en:1;
787 	bool itr_no_expire:1;
788 	u8 pbl_chunk_size;
789 	u8 tph_val;
790 	u32 first_pm_pbl_idx;
791 	struct irdma_sc_vsi *vsi;
792 	struct irdma_sc_cq **reg_cq;
793 	u32 reg_cq_idx;
794 };
795 
796 struct irdma_aeq_init_info {
797 	u64 aeq_elem_pa;
798 	struct irdma_sc_dev *dev;
799 	u32 *aeqe_base;
800 	void *pbl_list;
801 	u32 elem_cnt;
802 	bool virtual_map;
803 	u8 pbl_chunk_size;
804 	u32 first_pm_pbl_idx;
805 	u32 msix_idx;
806 };
807 
808 struct irdma_ccq_init_info {
809 	u64 cq_pa;
810 	u64 shadow_area_pa;
811 	struct irdma_sc_dev *dev;
812 	struct irdma_cqe *cq_base;
813 	__le64 *shadow_area;
814 	void *pbl_list;
815 	u32 num_elem;
816 	u32 ceq_id;
817 	u32 shadow_read_threshold;
818 	bool ceqe_mask:1;
819 	bool ceq_id_valid:1;
820 	bool avoid_mem_cflct:1;
821 	bool virtual_map:1;
822 	bool tph_en:1;
823 	u8 tph_val;
824 	u8 pbl_chunk_size;
825 	u32 first_pm_pbl_idx;
826 	struct irdma_sc_vsi *vsi;
827 };
828 
829 struct irdma_udp_offload_info {
830 	bool ipv4:1;
831 	bool insert_vlan_tag:1;
832 	u8 ttl;
833 	u8 tos;
834 	u16 src_port;
835 	u16 dst_port;
836 	u32 dest_ip_addr[4];
837 	u32 snd_mss;
838 	u16 vlan_tag;
839 	u16 arp_idx;
840 	u32 flow_label;
841 	u8 udp_state;
842 	u32 psn_nxt;
843 	u32 lsn;
844 	u32 epsn;
845 	u32 psn_max;
846 	u32 psn_una;
847 	u32 local_ipaddr[4];
848 	u32 cwnd;
849 	u8 rexmit_thresh;
850 	u8 rnr_nak_thresh;
851 };
852 
853 struct irdma_roce_offload_info {
854 	u16 p_key;
855 	u16 err_rq_idx;
856 	u32 qkey;
857 	u32 dest_qp;
858 	u32 local_qp;
859 	u8 roce_tver;
860 	u8 ack_credits;
861 	u8 err_rq_idx_valid;
862 	u32 pd_id;
863 	u16 ord_size;
864 	u16 ird_size;
865 	bool is_qp1:1;
866 	bool udprivcq_en:1;
867 	bool dcqcn_en:1;
868 	bool rcv_no_icrc:1;
869 	bool wr_rdresp_en:1;
870 	bool bind_en:1;
871 	bool fast_reg_en:1;
872 	bool priv_mode_en:1;
873 	bool rd_en:1;
874 	bool timely_en:1;
875 	bool dctcp_en:1;
876 	bool fw_cc_enable:1;
877 	bool use_stats_inst:1;
878 	u16 t_high;
879 	u16 t_low;
880 	u8 last_byte_sent;
881 	u8 mac_addr[ETH_ALEN];
882 	u8 rtomin;
883 };
884 
885 struct irdma_iwarp_offload_info {
886 	u16 rcv_mark_offset;
887 	u16 snd_mark_offset;
888 	u8 ddp_ver;
889 	u8 rdmap_ver;
890 	u8 iwarp_mode;
891 	u16 err_rq_idx;
892 	u32 pd_id;
893 	u16 ord_size;
894 	u16 ird_size;
895 	bool ib_rd_en:1;
896 	bool align_hdrs:1;
897 	bool rcv_no_mpa_crc:1;
898 	bool err_rq_idx_valid:1;
899 	bool snd_mark_en:1;
900 	bool rcv_mark_en:1;
901 	bool wr_rdresp_en:1;
902 	bool bind_en:1;
903 	bool fast_reg_en:1;
904 	bool priv_mode_en:1;
905 	bool rd_en:1;
906 	bool timely_en:1;
907 	bool use_stats_inst:1;
908 	bool ecn_en:1;
909 	bool dctcp_en:1;
910 	u16 t_high;
911 	u16 t_low;
912 	u8 last_byte_sent;
913 	u8 mac_addr[ETH_ALEN];
914 	u8 rtomin;
915 };
916 
917 struct irdma_tcp_offload_info {
918 	bool ipv4:1;
919 	bool no_nagle:1;
920 	bool insert_vlan_tag:1;
921 	bool time_stamp:1;
922 	bool drop_ooo_seg:1;
923 	bool avoid_stretch_ack:1;
924 	bool wscale:1;
925 	bool ignore_tcp_opt:1;
926 	bool ignore_tcp_uns_opt:1;
927 	u8 cwnd_inc_limit;
928 	u8 dup_ack_thresh;
929 	u8 ttl;
930 	u8 src_mac_addr_idx;
931 	u8 tos;
932 	u16 src_port;
933 	u16 dst_port;
934 	u32 dest_ip_addr[4];
935 	//u32 dest_ip_addr0;
936 	//u32 dest_ip_addr1;
937 	//u32 dest_ip_addr2;
938 	//u32 dest_ip_addr3;
939 	u32 snd_mss;
940 	u16 syn_rst_handling;
941 	u16 vlan_tag;
942 	u16 arp_idx;
943 	u32 flow_label;
944 	u8 tcp_state;
945 	u8 snd_wscale;
946 	u8 rcv_wscale;
947 	u32 time_stamp_recent;
948 	u32 time_stamp_age;
949 	u32 snd_nxt;
950 	u32 snd_wnd;
951 	u32 rcv_nxt;
952 	u32 rcv_wnd;
953 	u32 snd_max;
954 	u32 snd_una;
955 	u32 srtt;
956 	u32 rtt_var;
957 	u32 ss_thresh;
958 	u32 cwnd;
959 	u32 snd_wl1;
960 	u32 snd_wl2;
961 	u32 max_snd_window;
962 	u8 rexmit_thresh;
963 	u32 local_ipaddr[4];
964 };
965 
966 struct irdma_qp_host_ctx_info {
967 	u64 qp_compl_ctx;
968 	union {
969 		struct irdma_tcp_offload_info *tcp_info;
970 		struct irdma_udp_offload_info *udp_info;
971 	};
972 	union {
973 		struct irdma_iwarp_offload_info *iwarp_info;
974 		struct irdma_roce_offload_info *roce_info;
975 	};
976 	u32 send_cq_num;
977 	u32 rcv_cq_num;
978 	u32 rem_endpoint_idx;
979 	u8 stats_idx;
980 	bool srq_valid:1;
981 	bool tcp_info_valid:1;
982 	bool iwarp_info_valid:1;
983 	bool stats_idx_valid:1;
984 	u8 user_pri;
985 };
986 
987 struct irdma_aeqe_info {
988 	u64 compl_ctx;
989 	u32 qp_cq_id;
990 	u16 ae_id;
991 	u16 wqe_idx;
992 	u8 tcp_state;
993 	u8 iwarp_state;
994 	bool qp:1;
995 	bool cq:1;
996 	bool sq:1;
997 	bool rq:1;
998 	bool in_rdrsp_wr:1;
999 	bool out_rdrsp:1;
1000 	bool aeqe_overflow:1;
1001 	u8 q2_data_written;
1002 	u8 ae_src;
1003 };
1004 
1005 struct irdma_allocate_stag_info {
1006 	u64 total_len;
1007 	u64 first_pm_pbl_idx;
1008 	u32 chunk_size;
1009 	u32 stag_idx;
1010 	u32 page_size;
1011 	u32 pd_id;
1012 	u16 access_rights;
1013 	bool remote_access:1;
1014 	bool use_hmc_fcn_index:1;
1015 	bool use_pf_rid:1;
1016 	bool all_memory:1;
1017 	u8 hmc_fcn_index;
1018 };
1019 
1020 struct irdma_mw_alloc_info {
1021 	u32 mw_stag_index;
1022 	u32 page_size;
1023 	u32 pd_id;
1024 	bool remote_access:1;
1025 	bool mw_wide:1;
1026 	bool mw1_bind_dont_vldt_key:1;
1027 };
1028 
1029 struct irdma_reg_ns_stag_info {
1030 	u64 reg_addr_pa;
1031 	u64 va;
1032 	u64 total_len;
1033 	u32 page_size;
1034 	u32 chunk_size;
1035 	u32 first_pm_pbl_index;
1036 	enum irdma_addressing_type addr_type;
1037 	irdma_stag_index stag_idx;
1038 	u16 access_rights;
1039 	u32 pd_id;
1040 	irdma_stag_key stag_key;
1041 	bool use_hmc_fcn_index:1;
1042 	u8 hmc_fcn_index;
1043 	bool use_pf_rid:1;
1044 	bool all_memory:1;
1045 };
1046 
1047 struct irdma_fast_reg_stag_info {
1048 	u64 wr_id;
1049 	u64 reg_addr_pa;
1050 	u64 fbo;
1051 	void *va;
1052 	u64 total_len;
1053 	u32 page_size;
1054 	u32 chunk_size;
1055 	u32 first_pm_pbl_index;
1056 	enum irdma_addressing_type addr_type;
1057 	irdma_stag_index stag_idx;
1058 	u16 access_rights;
1059 	u32 pd_id;
1060 	irdma_stag_key stag_key;
1061 	bool local_fence:1;
1062 	bool read_fence:1;
1063 	bool signaled:1;
1064 	bool push_wqe:1;
1065 	bool use_hmc_fcn_index:1;
1066 	u8 hmc_fcn_index;
1067 	bool use_pf_rid:1;
1068 	bool defer_flag:1;
1069 };
1070 
1071 struct irdma_dealloc_stag_info {
1072 	u32 stag_idx;
1073 	u32 pd_id;
1074 	bool mr:1;
1075 	bool dealloc_pbl:1;
1076 };
1077 
1078 struct irdma_register_shared_stag {
1079 	u64 va;
1080 	enum irdma_addressing_type addr_type;
1081 	irdma_stag_index new_stag_idx;
1082 	irdma_stag_index parent_stag_idx;
1083 	u32 access_rights;
1084 	u32 pd_id;
1085 	u32 page_size;
1086 	irdma_stag_key new_stag_key;
1087 };
1088 
1089 struct irdma_qp_init_info {
1090 	struct irdma_qp_uk_init_info qp_uk_init_info;
1091 	struct irdma_sc_pd *pd;
1092 	struct irdma_sc_vsi *vsi;
1093 	__le64 *host_ctx;
1094 	u8 *q2;
1095 	u64 sq_pa;
1096 	u64 rq_pa;
1097 	u64 host_ctx_pa;
1098 	u64 q2_pa;
1099 	u64 shadow_area_pa;
1100 	u8 sq_tph_val;
1101 	u8 rq_tph_val;
1102 	bool sq_tph_en:1;
1103 	bool rq_tph_en:1;
1104 	bool rcv_tph_en:1;
1105 	bool xmit_tph_en:1;
1106 	bool virtual_map:1;
1107 };
1108 
1109 struct irdma_cq_init_info {
1110 	struct irdma_sc_dev *dev;
1111 	u64 cq_base_pa;
1112 	u64 shadow_area_pa;
1113 	u32 ceq_id;
1114 	u32 shadow_read_threshold;
1115 	u8 pbl_chunk_size;
1116 	u32 first_pm_pbl_idx;
1117 	bool virtual_map:1;
1118 	bool ceqe_mask:1;
1119 	bool ceq_id_valid:1;
1120 	bool tph_en:1;
1121 	u8 tph_val;
1122 	u8 type;
1123 	struct irdma_cq_uk_init_info cq_uk_init_info;
1124 	struct irdma_sc_vsi *vsi;
1125 };
1126 
1127 struct irdma_upload_context_info {
1128 	u64 buf_pa;
1129 	u32 qp_id;
1130 	u8 qp_type;
1131 	bool freeze_qp:1;
1132 	bool raw_format:1;
1133 };
1134 
1135 struct irdma_local_mac_entry_info {
1136 	u8 mac_addr[6];
1137 	u16 entry_idx;
1138 };
1139 
1140 struct irdma_add_arp_cache_entry_info {
1141 	u8 mac_addr[ETH_ALEN];
1142 	u32 reach_max;
1143 	u16 arp_index;
1144 	bool permanent;
1145 };
1146 
1147 struct irdma_apbvt_info {
1148 	u16 port;
1149 	bool add;
1150 };
1151 
1152 struct irdma_qhash_table_info {
1153 	struct irdma_sc_vsi *vsi;
1154 	enum irdma_quad_hash_manage_type manage;
1155 	enum irdma_quad_entry_type entry_type;
1156 	bool vlan_valid:1;
1157 	bool ipv4_valid:1;
1158 	u8 mac_addr[ETH_ALEN];
1159 	u16 vlan_id;
1160 	u8 user_pri;
1161 	u32 qp_num;
1162 	u32 dest_ip[4];
1163 	u32 src_ip[4];
1164 	u16 dest_port;
1165 	u16 src_port;
1166 };
1167 
1168 struct irdma_cqp_manage_push_page_info {
1169 	u32 push_idx;
1170 	u16 qs_handle;
1171 	u8 free_page;
1172 	u8 push_page_type;
1173 };
1174 
1175 struct irdma_qp_flush_info {
1176 	u16 sq_minor_code;
1177 	u16 sq_major_code;
1178 	u16 rq_minor_code;
1179 	u16 rq_major_code;
1180 	u16 ae_code;
1181 	u8 ae_src;
1182 	bool sq:1;
1183 	bool rq:1;
1184 	bool userflushcode:1;
1185 	bool generate_ae:1;
1186 };
1187 
1188 struct irdma_gen_ae_info {
1189 	u16 ae_code;
1190 	u8 ae_src;
1191 };
1192 
1193 struct irdma_cqp_timeout {
1194 	u64 compl_cqp_cmds;
1195 	u32 count;
1196 };
1197 
1198 struct irdma_irq_ops {
1199 	void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1200 	void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1201 			      bool enable);
1202 	void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1203 	void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1204 };
1205 
1206 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1207 enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1208 					   bool check_overflow, bool post_sq);
1209 enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
1210 					    bool post_sq);
1211 enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1212 						 struct irdma_ccq_cqe_info *info);
1213 enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1214 					 struct irdma_ccq_init_info *info);
1215 
1216 enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1217 enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1218 
1219 enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
1220 					    bool post_sq);
1221 enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1222 					 struct irdma_ceq_init_info *info);
1223 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1224 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1225 
1226 enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1227 					 struct irdma_aeq_init_info *info);
1228 enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1229 					      struct irdma_aeqe_info *info);
1230 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1231 
1232 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1233 		      int abi_ver);
1234 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1235 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1236 			      struct irdma_sc_dev *dev);
1237 enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
1238 					   u16 *min_err);
1239 enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
1240 enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1241 					 struct irdma_cqp_init_info *info);
1242 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1243 enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1244 						     struct irdma_ccq_cqe_info *cmpl_info);
1245 enum irdma_status_code irdma_sc_fast_register(struct irdma_sc_qp *qp,
1246 					      struct irdma_fast_reg_stag_info *info,
1247 					      bool post_sq);
1248 enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp,
1249 					  struct irdma_create_qp_info *info,
1250 					  u64 scratch, bool post_sq);
1251 enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp,
1252 					   u64 scratch, bool remove_hash_idx,
1253 					   bool ignore_mw_bnd, bool post_sq);
1254 enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1255 					      struct irdma_qp_flush_info *info,
1256 					      u64 scratch, bool post_sq);
1257 enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
1258 					struct irdma_qp_init_info *info);
1259 enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1260 					  struct irdma_modify_qp_info *info,
1261 					  u64 scratch, bool post_sq);
1262 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1263 			irdma_stag stag);
1264 void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
1265 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1266 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1267 			struct irdma_qp_host_ctx_info *info);
1268 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1269 			     struct irdma_qp_host_ctx_info *info);
1270 enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
1271 					   bool post_sq);
1272 enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
1273 					struct irdma_cq_init_info *info);
1274 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1275 enum irdma_status_code irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp,
1276 							   u64 scratch, u8 hmc_fn_id,
1277 							   bool post_sq, bool poll_registers);
1278 
1279 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1280 struct cqp_info {
1281 	union {
1282 		struct {
1283 			struct irdma_sc_qp *qp;
1284 			struct irdma_create_qp_info info;
1285 			u64 scratch;
1286 		} qp_create;
1287 
1288 		struct {
1289 			struct irdma_sc_qp *qp;
1290 			struct irdma_modify_qp_info info;
1291 			u64 scratch;
1292 		} qp_modify;
1293 
1294 		struct {
1295 			struct irdma_sc_qp *qp;
1296 			u64 scratch;
1297 			bool remove_hash_idx;
1298 			bool ignore_mw_bnd;
1299 		} qp_destroy;
1300 
1301 		struct {
1302 			struct irdma_sc_cq *cq;
1303 			u64 scratch;
1304 			bool check_overflow;
1305 		} cq_create;
1306 
1307 		struct {
1308 			struct irdma_sc_cq *cq;
1309 			struct irdma_modify_cq_info info;
1310 			u64 scratch;
1311 		} cq_modify;
1312 
1313 		struct {
1314 			struct irdma_sc_cq *cq;
1315 			u64 scratch;
1316 		} cq_destroy;
1317 
1318 		struct {
1319 			struct irdma_sc_dev *dev;
1320 			struct irdma_allocate_stag_info info;
1321 			u64 scratch;
1322 		} alloc_stag;
1323 
1324 		struct {
1325 			struct irdma_sc_dev *dev;
1326 			struct irdma_mw_alloc_info info;
1327 			u64 scratch;
1328 		} mw_alloc;
1329 
1330 		struct {
1331 			struct irdma_sc_dev *dev;
1332 			struct irdma_reg_ns_stag_info info;
1333 			u64 scratch;
1334 		} mr_reg_non_shared;
1335 
1336 		struct {
1337 			struct irdma_sc_dev *dev;
1338 			struct irdma_dealloc_stag_info info;
1339 			u64 scratch;
1340 		} dealloc_stag;
1341 
1342 		struct {
1343 			struct irdma_sc_cqp *cqp;
1344 			struct irdma_add_arp_cache_entry_info info;
1345 			u64 scratch;
1346 		} add_arp_cache_entry;
1347 
1348 		struct {
1349 			struct irdma_sc_cqp *cqp;
1350 			u64 scratch;
1351 			u16 arp_index;
1352 		} del_arp_cache_entry;
1353 
1354 		struct {
1355 			struct irdma_sc_cqp *cqp;
1356 			struct irdma_local_mac_entry_info info;
1357 			u64 scratch;
1358 		} add_local_mac_entry;
1359 
1360 		struct {
1361 			struct irdma_sc_cqp *cqp;
1362 			u64 scratch;
1363 			u8 entry_idx;
1364 			u8 ignore_ref_count;
1365 		} del_local_mac_entry;
1366 
1367 		struct {
1368 			struct irdma_sc_cqp *cqp;
1369 			u64 scratch;
1370 		} alloc_local_mac_entry;
1371 
1372 		struct {
1373 			struct irdma_sc_cqp *cqp;
1374 			struct irdma_cqp_manage_push_page_info info;
1375 			u64 scratch;
1376 		} manage_push_page;
1377 
1378 		struct {
1379 			struct irdma_sc_dev *dev;
1380 			struct irdma_upload_context_info info;
1381 			u64 scratch;
1382 		} qp_upload_context;
1383 
1384 		struct {
1385 			struct irdma_sc_dev *dev;
1386 			struct irdma_hmc_fcn_info info;
1387 			u64 scratch;
1388 		} manage_hmc_pm;
1389 
1390 		struct {
1391 			struct irdma_sc_ceq *ceq;
1392 			u64 scratch;
1393 		} ceq_create;
1394 
1395 		struct {
1396 			struct irdma_sc_ceq *ceq;
1397 			u64 scratch;
1398 		} ceq_destroy;
1399 
1400 		struct {
1401 			struct irdma_sc_aeq *aeq;
1402 			u64 scratch;
1403 		} aeq_create;
1404 
1405 		struct {
1406 			struct irdma_sc_aeq *aeq;
1407 			u64 scratch;
1408 		} aeq_destroy;
1409 
1410 		struct {
1411 			struct irdma_sc_qp *qp;
1412 			struct irdma_qp_flush_info info;
1413 			u64 scratch;
1414 		} qp_flush_wqes;
1415 
1416 		struct {
1417 			struct irdma_sc_qp *qp;
1418 			struct irdma_gen_ae_info info;
1419 			u64 scratch;
1420 		} gen_ae;
1421 
1422 		struct {
1423 			struct irdma_sc_cqp *cqp;
1424 			void *fpm_val_va;
1425 			u64 fpm_val_pa;
1426 			u8 hmc_fn_id;
1427 			u64 scratch;
1428 		} query_fpm_val;
1429 
1430 		struct {
1431 			struct irdma_sc_cqp *cqp;
1432 			void *fpm_val_va;
1433 			u64 fpm_val_pa;
1434 			u8 hmc_fn_id;
1435 			u64 scratch;
1436 		} commit_fpm_val;
1437 
1438 		struct {
1439 			struct irdma_sc_cqp *cqp;
1440 			struct irdma_apbvt_info info;
1441 			u64 scratch;
1442 		} manage_apbvt_entry;
1443 
1444 		struct {
1445 			struct irdma_sc_cqp *cqp;
1446 			struct irdma_qhash_table_info info;
1447 			u64 scratch;
1448 		} manage_qhash_table_entry;
1449 
1450 		struct {
1451 			struct irdma_sc_dev *dev;
1452 			struct irdma_update_sds_info info;
1453 			u64 scratch;
1454 		} update_pe_sds;
1455 
1456 		struct {
1457 			struct irdma_sc_cqp *cqp;
1458 			struct irdma_sc_qp *qp;
1459 			u64 scratch;
1460 		} suspend_resume;
1461 
1462 		struct {
1463 			struct irdma_sc_cqp *cqp;
1464 			struct irdma_ah_info info;
1465 			u64 scratch;
1466 		} ah_create;
1467 
1468 		struct {
1469 			struct irdma_sc_cqp *cqp;
1470 			struct irdma_ah_info info;
1471 			u64 scratch;
1472 		} ah_destroy;
1473 
1474 		struct {
1475 			struct irdma_sc_cqp *cqp;
1476 			struct irdma_mcast_grp_info info;
1477 			u64 scratch;
1478 		} mc_create;
1479 
1480 		struct {
1481 			struct irdma_sc_cqp *cqp;
1482 			struct irdma_mcast_grp_info info;
1483 			u64 scratch;
1484 		} mc_destroy;
1485 
1486 		struct {
1487 			struct irdma_sc_cqp *cqp;
1488 			struct irdma_mcast_grp_info info;
1489 			u64 scratch;
1490 		} mc_modify;
1491 
1492 		struct {
1493 			struct irdma_sc_cqp *cqp;
1494 			struct irdma_stats_inst_info info;
1495 			u64 scratch;
1496 		} stats_manage;
1497 
1498 		struct {
1499 			struct irdma_sc_cqp *cqp;
1500 			struct irdma_stats_gather_info info;
1501 			u64 scratch;
1502 		} stats_gather;
1503 
1504 		struct {
1505 			struct irdma_sc_cqp *cqp;
1506 			struct irdma_ws_node_info info;
1507 			u64 scratch;
1508 		} ws_node;
1509 
1510 		struct {
1511 			struct irdma_sc_cqp *cqp;
1512 			struct irdma_up_info info;
1513 			u64 scratch;
1514 		} up_map;
1515 
1516 		struct {
1517 			struct irdma_sc_cqp *cqp;
1518 			struct irdma_dma_mem query_buff_mem;
1519 			u64 scratch;
1520 		} query_rdma;
1521 	} u;
1522 };
1523 
1524 struct cqp_cmds_info {
1525 	struct list_head cqp_cmd_entry;
1526 	u8 cqp_cmd;
1527 	u8 post_sq;
1528 	struct cqp_info in;
1529 };
1530 
1531 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1532 					   u32 *wqe_idx);
1533 
1534 /**
1535  * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1536  * @cqp: struct for cqp hw
1537  * @scratch: private data for CQP WQE
1538  */
irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp * cqp,u64 scratch)1539 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1540 {
1541 	u32 wqe_idx;
1542 
1543 	return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1544 }
1545 #endif /* IRDMA_TYPE_H */
1546