• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2018 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #ifndef BNXT_H
12 #define BNXT_H
13 
14 #define DRV_MODULE_NAME		"bnxt_en"
15 
16 /* DO NOT CHANGE DRV_VER_* defines
17  * FIXME: Delete them
18  */
19 #define DRV_VER_MAJ	1
20 #define DRV_VER_MIN	10
21 #define DRV_VER_UPD	2
22 
23 #include <linux/ethtool.h>
24 #include <linux/interrupt.h>
25 #include <linux/rhashtable.h>
26 #include <linux/crash_dump.h>
27 #include <net/devlink.h>
28 #include <net/dst_metadata.h>
29 #include <net/xdp.h>
30 #include <linux/dim.h>
31 #include <linux/io-64-nonatomic-lo-hi.h>
32 #ifdef CONFIG_TEE_BNXT_FW
33 #include <linux/firmware/broadcom/tee_bnxt_fw.h>
34 #endif
35 
36 extern struct list_head bnxt_block_cb_list;
37 
38 struct page_pool;
39 
40 struct tx_bd {
41 	__le32 tx_bd_len_flags_type;
42 	#define TX_BD_TYPE					(0x3f << 0)
43 	 #define TX_BD_TYPE_SHORT_TX_BD				 (0x00 << 0)
44 	 #define TX_BD_TYPE_LONG_TX_BD				 (0x10 << 0)
45 	#define TX_BD_FLAGS_PACKET_END				(1 << 6)
46 	#define TX_BD_FLAGS_NO_CMPL				(1 << 7)
47 	#define TX_BD_FLAGS_BD_CNT				(0x1f << 8)
48 	 #define TX_BD_FLAGS_BD_CNT_SHIFT			 8
49 	#define TX_BD_FLAGS_LHINT				(3 << 13)
50 	 #define TX_BD_FLAGS_LHINT_SHIFT			 13
51 	 #define TX_BD_FLAGS_LHINT_512_AND_SMALLER		 (0 << 13)
52 	 #define TX_BD_FLAGS_LHINT_512_TO_1023			 (1 << 13)
53 	 #define TX_BD_FLAGS_LHINT_1024_TO_2047			 (2 << 13)
54 	 #define TX_BD_FLAGS_LHINT_2048_AND_LARGER		 (3 << 13)
55 	#define TX_BD_FLAGS_COAL_NOW				(1 << 15)
56 	#define TX_BD_LEN					(0xffff << 16)
57 	 #define TX_BD_LEN_SHIFT				 16
58 
59 	u32 tx_bd_opaque;
60 	__le64 tx_bd_haddr;
61 } __packed;
62 
63 struct tx_bd_ext {
64 	__le32 tx_bd_hsize_lflags;
65 	#define TX_BD_FLAGS_TCP_UDP_CHKSUM			(1 << 0)
66 	#define TX_BD_FLAGS_IP_CKSUM				(1 << 1)
67 	#define TX_BD_FLAGS_NO_CRC				(1 << 2)
68 	#define TX_BD_FLAGS_STAMP				(1 << 3)
69 	#define TX_BD_FLAGS_T_IP_CHKSUM				(1 << 4)
70 	#define TX_BD_FLAGS_LSO					(1 << 5)
71 	#define TX_BD_FLAGS_IPID_FMT				(1 << 6)
72 	#define TX_BD_FLAGS_T_IPID				(1 << 7)
73 	#define TX_BD_HSIZE					(0xff << 16)
74 	 #define TX_BD_HSIZE_SHIFT				 16
75 
76 	__le32 tx_bd_mss;
77 	__le32 tx_bd_cfa_action;
78 	#define TX_BD_CFA_ACTION				(0xffff << 16)
79 	 #define TX_BD_CFA_ACTION_SHIFT				 16
80 
81 	__le32 tx_bd_cfa_meta;
82 	#define TX_BD_CFA_META_MASK                             0xfffffff
83 	#define TX_BD_CFA_META_VID_MASK                         0xfff
84 	#define TX_BD_CFA_META_PRI_MASK                         (0xf << 12)
85 	 #define TX_BD_CFA_META_PRI_SHIFT                        12
86 	#define TX_BD_CFA_META_TPID_MASK                        (3 << 16)
87 	 #define TX_BD_CFA_META_TPID_SHIFT                       16
88 	#define TX_BD_CFA_META_KEY                              (0xf << 28)
89 	 #define TX_BD_CFA_META_KEY_SHIFT			 28
90 	#define TX_BD_CFA_META_KEY_VLAN                         (1 << 28)
91 };
92 
93 #define BNXT_TX_PTP_IS_SET(lflags) ((lflags) & cpu_to_le32(TX_BD_FLAGS_STAMP))
94 
95 struct rx_bd {
96 	__le32 rx_bd_len_flags_type;
97 	#define RX_BD_TYPE					(0x3f << 0)
98 	 #define RX_BD_TYPE_RX_PACKET_BD			 0x4
99 	 #define RX_BD_TYPE_RX_BUFFER_BD			 0x5
100 	 #define RX_BD_TYPE_RX_AGG_BD				 0x6
101 	 #define RX_BD_TYPE_16B_BD_SIZE				 (0 << 4)
102 	 #define RX_BD_TYPE_32B_BD_SIZE				 (1 << 4)
103 	 #define RX_BD_TYPE_48B_BD_SIZE				 (2 << 4)
104 	 #define RX_BD_TYPE_64B_BD_SIZE				 (3 << 4)
105 	#define RX_BD_FLAGS_SOP					(1 << 6)
106 	#define RX_BD_FLAGS_EOP					(1 << 7)
107 	#define RX_BD_FLAGS_BUFFERS				(3 << 8)
108 	 #define RX_BD_FLAGS_1_BUFFER_PACKET			 (0 << 8)
109 	 #define RX_BD_FLAGS_2_BUFFER_PACKET			 (1 << 8)
110 	 #define RX_BD_FLAGS_3_BUFFER_PACKET			 (2 << 8)
111 	 #define RX_BD_FLAGS_4_BUFFER_PACKET			 (3 << 8)
112 	#define RX_BD_LEN					(0xffff << 16)
113 	 #define RX_BD_LEN_SHIFT				 16
114 
115 	u32 rx_bd_opaque;
116 	__le64 rx_bd_haddr;
117 };
118 
119 struct tx_cmp {
120 	__le32 tx_cmp_flags_type;
121 	#define CMP_TYPE					(0x3f << 0)
122 	 #define CMP_TYPE_TX_L2_CMP				 0
123 	 #define CMP_TYPE_RX_L2_CMP				 17
124 	 #define CMP_TYPE_RX_AGG_CMP				 18
125 	 #define CMP_TYPE_RX_L2_TPA_START_CMP			 19
126 	 #define CMP_TYPE_RX_L2_TPA_END_CMP			 21
127 	 #define CMP_TYPE_RX_TPA_AGG_CMP			 22
128 	 #define CMP_TYPE_STATUS_CMP				 32
129 	 #define CMP_TYPE_REMOTE_DRIVER_REQ			 34
130 	 #define CMP_TYPE_REMOTE_DRIVER_RESP			 36
131 	 #define CMP_TYPE_ERROR_STATUS				 48
132 	 #define CMPL_BASE_TYPE_STAT_EJECT			 0x1aUL
133 	 #define CMPL_BASE_TYPE_HWRM_DONE			 0x20UL
134 	 #define CMPL_BASE_TYPE_HWRM_FWD_REQ			 0x22UL
135 	 #define CMPL_BASE_TYPE_HWRM_FWD_RESP			 0x24UL
136 	 #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT		 0x2eUL
137 
138 	#define TX_CMP_FLAGS_ERROR				(1 << 6)
139 	#define TX_CMP_FLAGS_PUSH				(1 << 7)
140 
141 	u32 tx_cmp_opaque;
142 	__le32 tx_cmp_errors_v;
143 	#define TX_CMP_V					(1 << 0)
144 	#define TX_CMP_ERRORS_BUFFER_ERROR			(7 << 1)
145 	 #define TX_CMP_ERRORS_BUFFER_ERROR_NO_ERROR		 0
146 	 #define TX_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT		 2
147 	 #define TX_CMP_ERRORS_BUFFER_ERROR_INVALID_STAG	 4
148 	 #define TX_CMP_ERRORS_BUFFER_ERROR_STAG_BOUNDS		 5
149 	 #define TX_CMP_ERRORS_ZERO_LENGTH_PKT			 (1 << 4)
150 	 #define TX_CMP_ERRORS_EXCESSIVE_BD_LEN			 (1 << 5)
151 	 #define TX_CMP_ERRORS_DMA_ERROR			 (1 << 6)
152 	 #define TX_CMP_ERRORS_HINT_TOO_SHORT			 (1 << 7)
153 
154 	__le32 tx_cmp_unsed_3;
155 };
156 
157 struct rx_cmp {
158 	__le32 rx_cmp_len_flags_type;
159 	#define RX_CMP_CMP_TYPE					(0x3f << 0)
160 	#define RX_CMP_FLAGS_ERROR				(1 << 6)
161 	#define RX_CMP_FLAGS_PLACEMENT				(7 << 7)
162 	#define RX_CMP_FLAGS_RSS_VALID				(1 << 10)
163 	#define RX_CMP_FLAGS_UNUSED				(1 << 11)
164 	 #define RX_CMP_FLAGS_ITYPES_SHIFT			 12
165 	 #define RX_CMP_FLAGS_ITYPES_MASK			 0xf000
166 	 #define RX_CMP_FLAGS_ITYPE_UNKNOWN			 (0 << 12)
167 	 #define RX_CMP_FLAGS_ITYPE_IP				 (1 << 12)
168 	 #define RX_CMP_FLAGS_ITYPE_TCP				 (2 << 12)
169 	 #define RX_CMP_FLAGS_ITYPE_UDP				 (3 << 12)
170 	 #define RX_CMP_FLAGS_ITYPE_FCOE			 (4 << 12)
171 	 #define RX_CMP_FLAGS_ITYPE_ROCE			 (5 << 12)
172 	 #define RX_CMP_FLAGS_ITYPE_PTP_WO_TS			 (8 << 12)
173 	 #define RX_CMP_FLAGS_ITYPE_PTP_W_TS			 (9 << 12)
174 	#define RX_CMP_LEN					(0xffff << 16)
175 	 #define RX_CMP_LEN_SHIFT				 16
176 
177 	u32 rx_cmp_opaque;
178 	__le32 rx_cmp_misc_v1;
179 	#define RX_CMP_V1					(1 << 0)
180 	#define RX_CMP_AGG_BUFS					(0x1f << 1)
181 	 #define RX_CMP_AGG_BUFS_SHIFT				 1
182 	#define RX_CMP_RSS_HASH_TYPE				(0x7f << 9)
183 	 #define RX_CMP_RSS_HASH_TYPE_SHIFT			 9
184 	#define RX_CMP_PAYLOAD_OFFSET				(0xff << 16)
185 	 #define RX_CMP_PAYLOAD_OFFSET_SHIFT			 16
186 
187 	__le32 rx_cmp_rss_hash;
188 };
189 
190 #define RX_CMP_HASH_VALID(rxcmp)				\
191 	((rxcmp)->rx_cmp_len_flags_type & cpu_to_le32(RX_CMP_FLAGS_RSS_VALID))
192 
193 #define RSS_PROFILE_ID_MASK	0x1f
194 
195 #define RX_CMP_HASH_TYPE(rxcmp)					\
196 	(((le32_to_cpu((rxcmp)->rx_cmp_misc_v1) & RX_CMP_RSS_HASH_TYPE) >>\
197 	  RX_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
198 
199 struct rx_cmp_ext {
200 	__le32 rx_cmp_flags2;
201 	#define RX_CMP_FLAGS2_IP_CS_CALC			0x1
202 	#define RX_CMP_FLAGS2_L4_CS_CALC			(0x1 << 1)
203 	#define RX_CMP_FLAGS2_T_IP_CS_CALC			(0x1 << 2)
204 	#define RX_CMP_FLAGS2_T_L4_CS_CALC			(0x1 << 3)
205 	#define RX_CMP_FLAGS2_META_FORMAT_VLAN			(0x1 << 4)
206 	__le32 rx_cmp_meta_data;
207 	#define RX_CMP_FLAGS2_METADATA_TCI_MASK			0xffff
208 	#define RX_CMP_FLAGS2_METADATA_VID_MASK			0xfff
209 	#define RX_CMP_FLAGS2_METADATA_TPID_MASK		0xffff0000
210 	 #define RX_CMP_FLAGS2_METADATA_TPID_SFT		 16
211 	__le32 rx_cmp_cfa_code_errors_v2;
212 	#define RX_CMP_V					(1 << 0)
213 	#define RX_CMPL_ERRORS_MASK				(0x7fff << 1)
214 	 #define RX_CMPL_ERRORS_SFT				 1
215 	#define RX_CMPL_ERRORS_BUFFER_ERROR_MASK		(0x7 << 1)
216 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER		 (0x0 << 1)
217 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT	 (0x1 << 1)
218 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP	 (0x2 << 1)
219 	 #define RX_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT		 (0x3 << 1)
220 	#define RX_CMPL_ERRORS_IP_CS_ERROR			(0x1 << 4)
221 	#define RX_CMPL_ERRORS_L4_CS_ERROR			(0x1 << 5)
222 	#define RX_CMPL_ERRORS_T_IP_CS_ERROR			(0x1 << 6)
223 	#define RX_CMPL_ERRORS_T_L4_CS_ERROR			(0x1 << 7)
224 	#define RX_CMPL_ERRORS_CRC_ERROR			(0x1 << 8)
225 	#define RX_CMPL_ERRORS_T_PKT_ERROR_MASK			(0x7 << 9)
226 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR		 (0x0 << 9)
227 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION	 (0x1 << 9)
228 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN	 (0x2 << 9)
229 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR	 (0x3 << 9)
230 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR	 (0x4 << 9)
231 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR	 (0x5 << 9)
232 	 #define RX_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL	 (0x6 << 9)
233 	#define RX_CMPL_ERRORS_PKT_ERROR_MASK			(0xf << 12)
234 	 #define RX_CMPL_ERRORS_PKT_ERROR_NO_ERROR		 (0x0 << 12)
235 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION	 (0x1 << 12)
236 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN	 (0x2 << 12)
237 	 #define RX_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL		 (0x3 << 12)
238 	 #define RX_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR	 (0x4 << 12)
239 	 #define RX_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR	 (0x5 << 12)
240 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN	 (0x6 << 12)
241 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7 << 12)
242 	 #define RX_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN	 (0x8 << 12)
243 
244 	#define RX_CMPL_CFA_CODE_MASK				(0xffff << 16)
245 	 #define RX_CMPL_CFA_CODE_SFT				 16
246 
247 	__le32 rx_cmp_timestamp;
248 };
249 
250 #define RX_CMP_L2_ERRORS						\
251 	cpu_to_le32(RX_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_CMPL_ERRORS_CRC_ERROR)
252 
253 #define RX_CMP_L4_CS_BITS						\
254 	(cpu_to_le32(RX_CMP_FLAGS2_L4_CS_CALC | RX_CMP_FLAGS2_T_L4_CS_CALC))
255 
256 #define RX_CMP_L4_CS_ERR_BITS						\
257 	(cpu_to_le32(RX_CMPL_ERRORS_L4_CS_ERROR | RX_CMPL_ERRORS_T_L4_CS_ERROR))
258 
259 #define RX_CMP_L4_CS_OK(rxcmp1)						\
260 	    (((rxcmp1)->rx_cmp_flags2 &	RX_CMP_L4_CS_BITS) &&		\
261 	     !((rxcmp1)->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS))
262 
263 #define RX_CMP_ENCAP(rxcmp1)						\
264 	    ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) &			\
265 	     RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
266 
267 #define RX_CMP_CFA_CODE(rxcmpl1)					\
268 	((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) &		\
269 	  RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
270 
271 struct rx_agg_cmp {
272 	__le32 rx_agg_cmp_len_flags_type;
273 	#define RX_AGG_CMP_TYPE					(0x3f << 0)
274 	#define RX_AGG_CMP_LEN					(0xffff << 16)
275 	 #define RX_AGG_CMP_LEN_SHIFT				 16
276 	u32 rx_agg_cmp_opaque;
277 	__le32 rx_agg_cmp_v;
278 	#define RX_AGG_CMP_V					(1 << 0)
279 	#define RX_AGG_CMP_AGG_ID				(0xffff << 16)
280 	 #define RX_AGG_CMP_AGG_ID_SHIFT			 16
281 	__le32 rx_agg_cmp_unused;
282 };
283 
284 #define TPA_AGG_AGG_ID(rx_agg)				\
285 	((le32_to_cpu((rx_agg)->rx_agg_cmp_v) &		\
286 	 RX_AGG_CMP_AGG_ID) >> RX_AGG_CMP_AGG_ID_SHIFT)
287 
288 struct rx_tpa_start_cmp {
289 	__le32 rx_tpa_start_cmp_len_flags_type;
290 	#define RX_TPA_START_CMP_TYPE				(0x3f << 0)
291 	#define RX_TPA_START_CMP_FLAGS				(0x3ff << 6)
292 	 #define RX_TPA_START_CMP_FLAGS_SHIFT			 6
293 	#define RX_TPA_START_CMP_FLAGS_ERROR			(0x1 << 6)
294 	#define RX_TPA_START_CMP_FLAGS_PLACEMENT		(0x7 << 7)
295 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_SHIFT		 7
296 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
297 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
298 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
299 	 #define RX_TPA_START_CMP_FLAGS_PLACEMENT_GRO_HDS	 (0x6 << 7)
300 	#define RX_TPA_START_CMP_FLAGS_RSS_VALID		(0x1 << 10)
301 	#define RX_TPA_START_CMP_FLAGS_TIMESTAMP		(0x1 << 11)
302 	#define RX_TPA_START_CMP_FLAGS_ITYPES			(0xf << 12)
303 	 #define RX_TPA_START_CMP_FLAGS_ITYPES_SHIFT		 12
304 	 #define RX_TPA_START_CMP_FLAGS_ITYPE_TCP		 (0x2 << 12)
305 	#define RX_TPA_START_CMP_LEN				(0xffff << 16)
306 	 #define RX_TPA_START_CMP_LEN_SHIFT			 16
307 
308 	u32 rx_tpa_start_cmp_opaque;
309 	__le32 rx_tpa_start_cmp_misc_v1;
310 	#define RX_TPA_START_CMP_V1				(0x1 << 0)
311 	#define RX_TPA_START_CMP_RSS_HASH_TYPE			(0x7f << 9)
312 	 #define RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT		 9
313 	#define RX_TPA_START_CMP_AGG_ID				(0x7f << 25)
314 	 #define RX_TPA_START_CMP_AGG_ID_SHIFT			 25
315 	#define RX_TPA_START_CMP_AGG_ID_P5			(0xffff << 16)
316 	 #define RX_TPA_START_CMP_AGG_ID_SHIFT_P5		 16
317 
318 	__le32 rx_tpa_start_cmp_rss_hash;
319 };
320 
321 #define TPA_START_HASH_VALID(rx_tpa_start)				\
322 	((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &		\
323 	 cpu_to_le32(RX_TPA_START_CMP_FLAGS_RSS_VALID))
324 
325 #define TPA_START_HASH_TYPE(rx_tpa_start)				\
326 	(((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
327 	   RX_TPA_START_CMP_RSS_HASH_TYPE) >>				\
328 	  RX_TPA_START_CMP_RSS_HASH_TYPE_SHIFT) & RSS_PROFILE_ID_MASK)
329 
330 #define TPA_START_AGG_ID(rx_tpa_start)					\
331 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
332 	 RX_TPA_START_CMP_AGG_ID) >> RX_TPA_START_CMP_AGG_ID_SHIFT)
333 
334 #define TPA_START_AGG_ID_P5(rx_tpa_start)				\
335 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_misc_v1) &	\
336 	 RX_TPA_START_CMP_AGG_ID_P5) >> RX_TPA_START_CMP_AGG_ID_SHIFT_P5)
337 
338 #define TPA_START_ERROR(rx_tpa_start)					\
339 	((rx_tpa_start)->rx_tpa_start_cmp_len_flags_type &		\
340 	 cpu_to_le32(RX_TPA_START_CMP_FLAGS_ERROR))
341 
342 struct rx_tpa_start_cmp_ext {
343 	__le32 rx_tpa_start_cmp_flags2;
344 	#define RX_TPA_START_CMP_FLAGS2_IP_CS_CALC		(0x1 << 0)
345 	#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC		(0x1 << 1)
346 	#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC		(0x1 << 2)
347 	#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC		(0x1 << 3)
348 	#define RX_TPA_START_CMP_FLAGS2_IP_TYPE			(0x1 << 8)
349 	#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_VALID		(0x1 << 9)
350 	#define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT		(0x3 << 10)
351 	 #define RX_TPA_START_CMP_FLAGS2_EXT_META_FORMAT_SHIFT	 10
352 	#define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL		(0xffff << 16)
353 	 #define RX_TPA_START_CMP_FLAGS2_CSUM_CMPL_SHIFT	 16
354 
355 	__le32 rx_tpa_start_cmp_metadata;
356 	__le32 rx_tpa_start_cmp_cfa_code_v2;
357 	#define RX_TPA_START_CMP_V2				(0x1 << 0)
358 	#define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK	(0x7 << 1)
359 	 #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT	 1
360 	 #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER	 (0x0 << 1)
361 	 #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3 << 1)
362 	 #define RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_FLUSH	 (0x5 << 1)
363 	#define RX_TPA_START_CMP_CFA_CODE			(0xffff << 16)
364 	 #define RX_TPA_START_CMPL_CFA_CODE_SHIFT		 16
365 	__le32 rx_tpa_start_cmp_hdr_info;
366 };
367 
368 #define TPA_START_CFA_CODE(rx_tpa_start)				\
369 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &	\
370 	 RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
371 
372 #define TPA_START_IS_IPV6(rx_tpa_start)				\
373 	(!!((rx_tpa_start)->rx_tpa_start_cmp_flags2 &		\
374 	    cpu_to_le32(RX_TPA_START_CMP_FLAGS2_IP_TYPE)))
375 
376 #define TPA_START_ERROR_CODE(rx_tpa_start)				\
377 	((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) &	\
378 	  RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_MASK) >>			\
379 	 RX_TPA_START_CMP_ERRORS_BUFFER_ERROR_SHIFT)
380 
381 struct rx_tpa_end_cmp {
382 	__le32 rx_tpa_end_cmp_len_flags_type;
383 	#define RX_TPA_END_CMP_TYPE				(0x3f << 0)
384 	#define RX_TPA_END_CMP_FLAGS				(0x3ff << 6)
385 	 #define RX_TPA_END_CMP_FLAGS_SHIFT			 6
386 	#define RX_TPA_END_CMP_FLAGS_PLACEMENT			(0x7 << 7)
387 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_SHIFT		 7
388 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_JUMBO		 (0x1 << 7)
389 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_HDS		 (0x2 << 7)
390 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO	 (0x5 << 7)
391 	 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS		 (0x6 << 7)
392 	#define RX_TPA_END_CMP_FLAGS_RSS_VALID			(0x1 << 10)
393 	#define RX_TPA_END_CMP_FLAGS_ITYPES			(0xf << 12)
394 	 #define RX_TPA_END_CMP_FLAGS_ITYPES_SHIFT		 12
395 	 #define RX_TPA_END_CMP_FLAGS_ITYPE_TCP			 (0x2 << 12)
396 	#define RX_TPA_END_CMP_LEN				(0xffff << 16)
397 	 #define RX_TPA_END_CMP_LEN_SHIFT			 16
398 
399 	u32 rx_tpa_end_cmp_opaque;
400 	__le32 rx_tpa_end_cmp_misc_v1;
401 	#define RX_TPA_END_CMP_V1				(0x1 << 0)
402 	#define RX_TPA_END_CMP_AGG_BUFS				(0x3f << 1)
403 	 #define RX_TPA_END_CMP_AGG_BUFS_SHIFT			 1
404 	#define RX_TPA_END_CMP_TPA_SEGS				(0xff << 8)
405 	 #define RX_TPA_END_CMP_TPA_SEGS_SHIFT			 8
406 	#define RX_TPA_END_CMP_PAYLOAD_OFFSET			(0xff << 16)
407 	 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT		 16
408 	#define RX_TPA_END_CMP_AGG_ID				(0x7f << 25)
409 	 #define RX_TPA_END_CMP_AGG_ID_SHIFT			 25
410 	#define RX_TPA_END_CMP_AGG_ID_P5			(0xffff << 16)
411 	 #define RX_TPA_END_CMP_AGG_ID_SHIFT_P5			 16
412 
413 	__le32 rx_tpa_end_cmp_tsdelta;
414 	#define RX_TPA_END_GRO_TS				(0x1 << 31)
415 };
416 
417 #define TPA_END_AGG_ID(rx_tpa_end)					\
418 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
419 	 RX_TPA_END_CMP_AGG_ID) >> RX_TPA_END_CMP_AGG_ID_SHIFT)
420 
421 #define TPA_END_AGG_ID_P5(rx_tpa_end)					\
422 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
423 	 RX_TPA_END_CMP_AGG_ID_P5) >> RX_TPA_END_CMP_AGG_ID_SHIFT_P5)
424 
425 #define TPA_END_PAYLOAD_OFF(rx_tpa_end)					\
426 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
427 	 RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT)
428 
429 #define TPA_END_AGG_BUFS(rx_tpa_end)					\
430 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
431 	 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT)
432 
433 #define TPA_END_TPA_SEGS(rx_tpa_end)					\
434 	((le32_to_cpu((rx_tpa_end)->rx_tpa_end_cmp_misc_v1) &		\
435 	 RX_TPA_END_CMP_TPA_SEGS) >> RX_TPA_END_CMP_TPA_SEGS_SHIFT)
436 
437 #define RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO				\
438 	cpu_to_le32(RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_JUMBO &		\
439 		    RX_TPA_END_CMP_FLAGS_PLACEMENT_GRO_HDS)
440 
441 #define TPA_END_GRO(rx_tpa_end)						\
442 	((rx_tpa_end)->rx_tpa_end_cmp_len_flags_type &			\
443 	 RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
444 
445 #define TPA_END_GRO_TS(rx_tpa_end)					\
446 	(!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta &			\
447 	    cpu_to_le32(RX_TPA_END_GRO_TS)))
448 
449 struct rx_tpa_end_cmp_ext {
450 	__le32 rx_tpa_end_cmp_dup_acks;
451 	#define RX_TPA_END_CMP_TPA_DUP_ACKS			(0xf << 0)
452 	#define RX_TPA_END_CMP_PAYLOAD_OFFSET_P5		(0xff << 16)
453 	 #define RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5		 16
454 	#define RX_TPA_END_CMP_AGG_BUFS_P5			(0xff << 24)
455 	 #define RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5		 24
456 
457 	__le32 rx_tpa_end_cmp_seg_len;
458 	#define RX_TPA_END_CMP_TPA_SEG_LEN			(0xffff << 0)
459 
460 	__le32 rx_tpa_end_cmp_errors_v2;
461 	#define RX_TPA_END_CMP_V2				(0x1 << 0)
462 	#define RX_TPA_END_CMP_ERRORS				(0x3 << 1)
463 	#define RX_TPA_END_CMP_ERRORS_P5			(0x7 << 1)
464 	#define RX_TPA_END_CMPL_ERRORS_SHIFT			 1
465 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NO_BUFFER	 (0x0 << 1)
466 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_NOT_ON_CHIP	 (0x2 << 1)
467 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_BAD_FORMAT	 (0x3 << 1)
468 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_RSV_ERROR	 (0x4 << 1)
469 	 #define RX_TPA_END_CMP_ERRORS_BUFFER_ERROR_FLUSH	 (0x5 << 1)
470 
471 	u32 rx_tpa_end_cmp_start_opaque;
472 };
473 
474 #define TPA_END_ERRORS(rx_tpa_end_ext)					\
475 	((rx_tpa_end_ext)->rx_tpa_end_cmp_errors_v2 &			\
476 	 cpu_to_le32(RX_TPA_END_CMP_ERRORS))
477 
478 #define TPA_END_PAYLOAD_OFF_P5(rx_tpa_end_ext)				\
479 	((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) &	\
480 	 RX_TPA_END_CMP_PAYLOAD_OFFSET_P5) >>				\
481 	RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT_P5)
482 
483 #define TPA_END_AGG_BUFS_P5(rx_tpa_end_ext)				\
484 	((le32_to_cpu((rx_tpa_end_ext)->rx_tpa_end_cmp_dup_acks) &	\
485 	 RX_TPA_END_CMP_AGG_BUFS_P5) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT_P5)
486 
487 #define EVENT_DATA1_RESET_NOTIFY_FATAL(data1)				\
488 	(((data1) &							\
489 	  ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
490 	 ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
491 
492 #define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1)				\
493 	!!((data1) &							\
494 	   ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
495 
496 #define EVENT_DATA1_RECOVERY_ENABLED(data1)				\
497 	!!((data1) &							\
498 	   ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
499 
500 #define BNXT_EVENT_ERROR_REPORT_TYPE(data1)				\
501 	(((data1) &							\
502 	  ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\
503 	 ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
504 
505 #define BNXT_EVENT_INVALID_SIGNAL_DATA(data2)				\
506 	(((data2) &							\
507 	  ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\
508 	 ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
509 
510 struct nqe_cn {
511 	__le16	type;
512 	#define NQ_CN_TYPE_MASK           0x3fUL
513 	#define NQ_CN_TYPE_SFT            0
514 	#define NQ_CN_TYPE_CQ_NOTIFICATION  0x30UL
515 	#define NQ_CN_TYPE_LAST            NQ_CN_TYPE_CQ_NOTIFICATION
516 	__le16	reserved16;
517 	__le32	cq_handle_low;
518 	__le32	v;
519 	#define NQ_CN_V     0x1UL
520 	__le32	cq_handle_high;
521 };
522 
523 #define DB_IDX_MASK						0xffffff
524 #define DB_IDX_VALID						(0x1 << 26)
525 #define DB_IRQ_DIS						(0x1 << 27)
526 #define DB_KEY_TX						(0x0 << 28)
527 #define DB_KEY_RX						(0x1 << 28)
528 #define DB_KEY_CP						(0x2 << 28)
529 #define DB_KEY_ST						(0x3 << 28)
530 #define DB_KEY_TX_PUSH						(0x4 << 28)
531 #define DB_LONG_TX_PUSH						(0x2 << 24)
532 
533 #define BNXT_MIN_ROCE_CP_RINGS	2
534 #define BNXT_MIN_ROCE_STAT_CTXS	1
535 
536 /* 64-bit doorbell */
537 #define DBR_INDEX_MASK					0x0000000000ffffffULL
538 #define DBR_XID_MASK					0x000fffff00000000ULL
539 #define DBR_XID_SFT					32
540 #define DBR_PATH_L2					(0x1ULL << 56)
541 #define DBR_TYPE_SQ					(0x0ULL << 60)
542 #define DBR_TYPE_RQ					(0x1ULL << 60)
543 #define DBR_TYPE_SRQ					(0x2ULL << 60)
544 #define DBR_TYPE_SRQ_ARM				(0x3ULL << 60)
545 #define DBR_TYPE_CQ					(0x4ULL << 60)
546 #define DBR_TYPE_CQ_ARMSE				(0x5ULL << 60)
547 #define DBR_TYPE_CQ_ARMALL				(0x6ULL << 60)
548 #define DBR_TYPE_CQ_ARMENA				(0x7ULL << 60)
549 #define DBR_TYPE_SRQ_ARMENA				(0x8ULL << 60)
550 #define DBR_TYPE_CQ_CUTOFF_ACK				(0x9ULL << 60)
551 #define DBR_TYPE_NQ					(0xaULL << 60)
552 #define DBR_TYPE_NQ_ARM					(0xbULL << 60)
553 #define DBR_TYPE_NULL					(0xfULL << 60)
554 
555 #define DB_PF_OFFSET_P5					0x10000
556 #define DB_VF_OFFSET_P5					0x4000
557 
558 #define INVALID_HW_RING_ID	((u16)-1)
559 
560 /* The hardware supports certain page sizes.  Use the supported page sizes
561  * to allocate the rings.
562  */
563 #if (PAGE_SHIFT < 12)
564 #define BNXT_PAGE_SHIFT	12
565 #elif (PAGE_SHIFT <= 13)
566 #define BNXT_PAGE_SHIFT	PAGE_SHIFT
567 #elif (PAGE_SHIFT < 16)
568 #define BNXT_PAGE_SHIFT	13
569 #else
570 #define BNXT_PAGE_SHIFT	16
571 #endif
572 
573 #define BNXT_PAGE_SIZE	(1 << BNXT_PAGE_SHIFT)
574 
575 /* The RXBD length is 16-bit so we can only support page sizes < 64K */
576 #if (PAGE_SHIFT > 15)
577 #define BNXT_RX_PAGE_SHIFT 15
578 #else
579 #define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
580 #endif
581 
582 #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
583 
584 #define BNXT_MAX_MTU		9500
585 #define BNXT_MAX_PAGE_MODE_MTU	\
586 	((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -	\
587 	 XDP_PACKET_HEADROOM - \
588 	 SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
589 
590 #define BNXT_MIN_PKT_SIZE	52
591 
592 #define BNXT_DEFAULT_RX_RING_SIZE	511
593 #define BNXT_DEFAULT_TX_RING_SIZE	511
594 
595 #define MAX_TPA		64
596 #define MAX_TPA_P5	256
597 #define MAX_TPA_P5_MASK	(MAX_TPA_P5 - 1)
598 #define MAX_TPA_SEGS_P5	0x3f
599 
600 #if (BNXT_PAGE_SHIFT == 16)
601 #define MAX_RX_PAGES_AGG_ENA	1
602 #define MAX_RX_PAGES	4
603 #define MAX_RX_AGG_PAGES	4
604 #define MAX_TX_PAGES	1
605 #define MAX_CP_PAGES	16
606 #else
607 #define MAX_RX_PAGES_AGG_ENA	8
608 #define MAX_RX_PAGES	32
609 #define MAX_RX_AGG_PAGES	32
610 #define MAX_TX_PAGES	8
611 #define MAX_CP_PAGES	128
612 #endif
613 
614 #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
615 #define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
616 #define CP_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_cmp))
617 
618 #define SW_RXBD_RING_SIZE (sizeof(struct bnxt_sw_rx_bd) * RX_DESC_CNT)
619 #define HW_RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
620 
621 #define SW_RXBD_AGG_RING_SIZE (sizeof(struct bnxt_sw_rx_agg_bd) * RX_DESC_CNT)
622 
623 #define SW_TXBD_RING_SIZE (sizeof(struct bnxt_sw_tx_bd) * TX_DESC_CNT)
624 #define HW_TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
625 
626 #define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT)
627 
628 #define BNXT_MAX_RX_DESC_CNT		(RX_DESC_CNT * MAX_RX_PAGES - 1)
629 #define BNXT_MAX_RX_DESC_CNT_JUM_ENA	(RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1)
630 #define BNXT_MAX_RX_JUM_DESC_CNT	(RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
631 #define BNXT_MAX_TX_DESC_CNT		(TX_DESC_CNT * MAX_TX_PAGES - 1)
632 
633 /* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1.  We need one extra
634  * BD because the first TX BD is always a long BD.
635  */
636 #define BNXT_MIN_TX_DESC_CNT		(MAX_SKB_FRAGS + 2)
637 
638 #define RX_RING(x)	(((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
639 #define RX_IDX(x)	((x) & (RX_DESC_CNT - 1))
640 
641 #define TX_RING(x)	(((x) & ~(TX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
642 #define TX_IDX(x)	((x) & (TX_DESC_CNT - 1))
643 
644 #define CP_RING(x)	(((x) & ~(CP_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
645 #define CP_IDX(x)	((x) & (CP_DESC_CNT - 1))
646 
647 #define TX_CMP_VALID(txcmp, raw_cons)					\
648 	(!!((txcmp)->tx_cmp_errors_v & cpu_to_le32(TX_CMP_V)) ==	\
649 	 !((raw_cons) & bp->cp_bit))
650 
651 #define RX_CMP_VALID(rxcmp1, raw_cons)					\
652 	(!!((rxcmp1)->rx_cmp_cfa_code_errors_v2 & cpu_to_le32(RX_CMP_V)) ==\
653 	 !((raw_cons) & bp->cp_bit))
654 
655 #define RX_AGG_CMP_VALID(agg, raw_cons)				\
656 	(!!((agg)->rx_agg_cmp_v & cpu_to_le32(RX_AGG_CMP_V)) ==	\
657 	 !((raw_cons) & bp->cp_bit))
658 
659 #define NQ_CMP_VALID(nqcmp, raw_cons)				\
660 	(!!((nqcmp)->v & cpu_to_le32(NQ_CN_V)) == !((raw_cons) & bp->cp_bit))
661 
662 #define TX_CMP_TYPE(txcmp)					\
663 	(le32_to_cpu((txcmp)->tx_cmp_flags_type) & CMP_TYPE)
664 
665 #define RX_CMP_TYPE(rxcmp)					\
666 	(le32_to_cpu((rxcmp)->rx_cmp_len_flags_type) & RX_CMP_CMP_TYPE)
667 
668 #define NEXT_RX(idx)		(((idx) + 1) & bp->rx_ring_mask)
669 
670 #define NEXT_RX_AGG(idx)	(((idx) + 1) & bp->rx_agg_ring_mask)
671 
672 #define NEXT_TX(idx)		(((idx) + 1) & bp->tx_ring_mask)
673 
674 #define ADV_RAW_CMP(idx, n)	((idx) + (n))
675 #define NEXT_RAW_CMP(idx)	ADV_RAW_CMP(idx, 1)
676 #define RING_CMP(idx)		((idx) & bp->cp_ring_mask)
677 #define NEXT_CMP(idx)		RING_CMP(ADV_RAW_CMP(idx, 1))
678 
679 #define DFLT_HWRM_CMD_TIMEOUT		500
680 
681 #define BNXT_RX_EVENT		1
682 #define BNXT_AGG_EVENT		2
683 #define BNXT_TX_EVENT		4
684 #define BNXT_REDIRECT_EVENT	8
685 
686 struct bnxt_sw_tx_bd {
687 	union {
688 		struct sk_buff		*skb;
689 		struct xdp_frame	*xdpf;
690 	};
691 	DEFINE_DMA_UNMAP_ADDR(mapping);
692 	DEFINE_DMA_UNMAP_LEN(len);
693 	u8			is_gso;
694 	u8			is_push;
695 	u8			action;
696 	union {
697 		unsigned short		nr_frags;
698 		u16			rx_prod;
699 	};
700 };
701 
702 struct bnxt_sw_rx_bd {
703 	void			*data;
704 	u8			*data_ptr;
705 	dma_addr_t		mapping;
706 };
707 
708 struct bnxt_sw_rx_agg_bd {
709 	struct page		*page;
710 	unsigned int		offset;
711 	dma_addr_t		mapping;
712 };
713 
714 struct bnxt_mem_init {
715 	u8	init_val;
716 	u16	offset;
717 #define	BNXT_MEM_INVALID_OFFSET	0xffff
718 	u16	size;
719 };
720 
721 struct bnxt_ring_mem_info {
722 	int			nr_pages;
723 	int			page_size;
724 	u16			flags;
725 #define BNXT_RMEM_VALID_PTE_FLAG	1
726 #define BNXT_RMEM_RING_PTE_FLAG		2
727 #define BNXT_RMEM_USE_FULL_PAGE_FLAG	4
728 
729 	u16			depth;
730 	struct bnxt_mem_init	*mem_init;
731 
732 	void			**pg_arr;
733 	dma_addr_t		*dma_arr;
734 
735 	__le64			*pg_tbl;
736 	dma_addr_t		pg_tbl_map;
737 
738 	int			vmem_size;
739 	void			**vmem;
740 };
741 
742 struct bnxt_ring_struct {
743 	struct bnxt_ring_mem_info	ring_mem;
744 
745 	u16			fw_ring_id; /* Ring id filled by Chimp FW */
746 	union {
747 		u16		grp_idx;
748 		u16		map_idx; /* Used by cmpl rings */
749 	};
750 	u32			handle;
751 	u8			queue_id;
752 };
753 
754 struct tx_push_bd {
755 	__le32			doorbell;
756 	__le32			tx_bd_len_flags_type;
757 	u32			tx_bd_opaque;
758 	struct tx_bd_ext	txbd2;
759 };
760 
761 struct tx_push_buffer {
762 	struct tx_push_bd	push_bd;
763 	u32			data[25];
764 };
765 
766 struct bnxt_db_info {
767 	void __iomem		*doorbell;
768 	union {
769 		u64		db_key64;
770 		u32		db_key32;
771 	};
772 };
773 
774 struct bnxt_tx_ring_info {
775 	struct bnxt_napi	*bnapi;
776 	u16			tx_prod;
777 	u16			tx_cons;
778 	u16			txq_index;
779 	u8			kick_pending;
780 	struct bnxt_db_info	tx_db;
781 
782 	struct tx_bd		*tx_desc_ring[MAX_TX_PAGES];
783 	struct bnxt_sw_tx_bd	*tx_buf_ring;
784 
785 	dma_addr_t		tx_desc_mapping[MAX_TX_PAGES];
786 
787 	struct tx_push_buffer	*tx_push;
788 	dma_addr_t		tx_push_mapping;
789 	__le64			data_mapping;
790 
791 #define BNXT_DEV_STATE_CLOSING	0x1
792 	u32			dev_state;
793 
794 	struct bnxt_ring_struct	tx_ring_struct;
795 	/* Synchronize simultaneous xdp_xmit on same ring */
796 	spinlock_t		xdp_tx_lock;
797 };
798 
799 #define BNXT_LEGACY_COAL_CMPL_PARAMS					\
800 	(RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN |		\
801 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MAX |		\
802 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET |		\
803 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE |			\
804 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR |		\
805 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT | \
806 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR |		\
807 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT | \
808 	 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_AGGR_INT)
809 
810 #define BNXT_COAL_CMPL_ENABLES						\
811 	(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR | \
812 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_CMPL_AGGR_DMA_TMR | \
813 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MAX | \
814 	 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_AGGR_INT)
815 
816 #define BNXT_COAL_CMPL_MIN_TMR_ENABLE					\
817 	RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_INT_LAT_TMR_MIN
818 
819 #define BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE			\
820 	RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT
821 
822 struct bnxt_coal_cap {
823 	u32			cmpl_params;
824 	u32			nq_params;
825 	u16			num_cmpl_dma_aggr_max;
826 	u16			num_cmpl_dma_aggr_during_int_max;
827 	u16			cmpl_aggr_dma_tmr_max;
828 	u16			cmpl_aggr_dma_tmr_during_int_max;
829 	u16			int_lat_tmr_min_max;
830 	u16			int_lat_tmr_max_max;
831 	u16			num_cmpl_aggr_int_max;
832 	u16			timer_units;
833 };
834 
835 struct bnxt_coal {
836 	u16			coal_ticks;
837 	u16			coal_ticks_irq;
838 	u16			coal_bufs;
839 	u16			coal_bufs_irq;
840 			/* RING_IDLE enabled when coal ticks < idle_thresh  */
841 	u16			idle_thresh;
842 	u8			bufs_per_record;
843 	u8			budget;
844 };
845 
846 struct bnxt_tpa_info {
847 	void			*data;
848 	u8			*data_ptr;
849 	dma_addr_t		mapping;
850 	u16			len;
851 	unsigned short		gso_type;
852 	u32			flags2;
853 	u32			metadata;
854 	enum pkt_hash_types	hash_type;
855 	u32			rss_hash;
856 	u32			hdr_info;
857 
858 #define BNXT_TPA_L4_SIZE(hdr_info)	\
859 	(((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32)
860 
861 #define BNXT_TPA_INNER_L3_OFF(hdr_info)	\
862 	(((hdr_info) >> 18) & 0x1ff)
863 
864 #define BNXT_TPA_INNER_L2_OFF(hdr_info)	\
865 	(((hdr_info) >> 9) & 0x1ff)
866 
867 #define BNXT_TPA_OUTER_L3_OFF(hdr_info)	\
868 	((hdr_info) & 0x1ff)
869 
870 	u16			cfa_code; /* cfa_code in TPA start compl */
871 	u8			agg_count;
872 	struct rx_agg_cmp	*agg_arr;
873 };
874 
875 #define BNXT_AGG_IDX_BMAP_SIZE	(MAX_TPA_P5 / BITS_PER_LONG)
876 
877 struct bnxt_tpa_idx_map {
878 	u16		agg_id_tbl[1024];
879 	unsigned long	agg_idx_bmap[BNXT_AGG_IDX_BMAP_SIZE];
880 };
881 
882 struct bnxt_rx_ring_info {
883 	struct bnxt_napi	*bnapi;
884 	u16			rx_prod;
885 	u16			rx_agg_prod;
886 	u16			rx_sw_agg_prod;
887 	u16			rx_next_cons;
888 	struct bnxt_db_info	rx_db;
889 	struct bnxt_db_info	rx_agg_db;
890 
891 	struct bpf_prog		*xdp_prog;
892 
893 	struct rx_bd		*rx_desc_ring[MAX_RX_PAGES];
894 	struct bnxt_sw_rx_bd	*rx_buf_ring;
895 
896 	struct rx_bd		*rx_agg_desc_ring[MAX_RX_AGG_PAGES];
897 	struct bnxt_sw_rx_agg_bd	*rx_agg_ring;
898 
899 	unsigned long		*rx_agg_bmap;
900 	u16			rx_agg_bmap_size;
901 
902 	struct page		*rx_page;
903 	unsigned int		rx_page_offset;
904 
905 	dma_addr_t		rx_desc_mapping[MAX_RX_PAGES];
906 	dma_addr_t		rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
907 
908 	struct bnxt_tpa_info	*rx_tpa;
909 	struct bnxt_tpa_idx_map *rx_tpa_idx_map;
910 
911 	struct bnxt_ring_struct	rx_ring_struct;
912 	struct bnxt_ring_struct	rx_agg_ring_struct;
913 	struct xdp_rxq_info	xdp_rxq;
914 	struct page_pool	*page_pool;
915 };
916 
917 struct bnxt_rx_sw_stats {
918 	u64			rx_l4_csum_errors;
919 	u64			rx_resets;
920 	u64			rx_buf_errors;
921 	u64			rx_oom_discards;
922 	u64			rx_netpoll_discards;
923 };
924 
925 struct bnxt_cmn_sw_stats {
926 	u64			missed_irqs;
927 };
928 
929 struct bnxt_sw_stats {
930 	struct bnxt_rx_sw_stats rx;
931 	struct bnxt_cmn_sw_stats cmn;
932 };
933 
934 struct bnxt_stats_mem {
935 	u64		*sw_stats;
936 	u64		*hw_masks;
937 	void		*hw_stats;
938 	dma_addr_t	hw_stats_map;
939 	int		len;
940 };
941 
942 struct bnxt_cp_ring_info {
943 	struct bnxt_napi	*bnapi;
944 	u32			cp_raw_cons;
945 	struct bnxt_db_info	cp_db;
946 
947 	u8			had_work_done:1;
948 	u8			has_more_work:1;
949 
950 	u32			last_cp_raw_cons;
951 
952 	struct bnxt_coal	rx_ring_coal;
953 	u64			rx_packets;
954 	u64			rx_bytes;
955 	u64			event_ctr;
956 
957 	struct dim		dim;
958 
959 	union {
960 		struct tx_cmp	**cp_desc_ring;
961 		struct nqe_cn	**nq_desc_ring;
962 	};
963 
964 	dma_addr_t		*cp_desc_mapping;
965 
966 	struct bnxt_stats_mem	stats;
967 	u32			hw_stats_ctx_id;
968 
969 	struct bnxt_sw_stats	sw_stats;
970 
971 	struct bnxt_ring_struct	cp_ring_struct;
972 
973 	struct bnxt_cp_ring_info *cp_ring_arr[2];
974 #define BNXT_RX_HDL	0
975 #define BNXT_TX_HDL	1
976 };
977 
978 struct bnxt_napi {
979 	struct napi_struct	napi;
980 	struct bnxt		*bp;
981 
982 	int			index;
983 	struct bnxt_cp_ring_info	cp_ring;
984 	struct bnxt_rx_ring_info	*rx_ring;
985 	struct bnxt_tx_ring_info	*tx_ring;
986 
987 	void			(*tx_int)(struct bnxt *, struct bnxt_napi *,
988 					  int);
989 	int			tx_pkts;
990 	u8			events;
991 
992 	u32			flags;
993 #define BNXT_NAPI_FLAG_XDP	0x1
994 
995 	bool			in_reset;
996 };
997 
998 struct bnxt_irq {
999 	irq_handler_t	handler;
1000 	unsigned int	vector;
1001 	u8		requested:1;
1002 	u8		have_cpumask:1;
1003 	char		name[IFNAMSIZ + 2];
1004 	cpumask_var_t	cpu_mask;
1005 };
1006 
1007 #define HWRM_RING_ALLOC_TX	0x1
1008 #define HWRM_RING_ALLOC_RX	0x2
1009 #define HWRM_RING_ALLOC_AGG	0x4
1010 #define HWRM_RING_ALLOC_CMPL	0x8
1011 #define HWRM_RING_ALLOC_NQ	0x10
1012 
1013 #define INVALID_STATS_CTX_ID	-1
1014 
1015 struct bnxt_ring_grp_info {
1016 	u16	fw_stats_ctx;
1017 	u16	fw_grp_id;
1018 	u16	rx_fw_ring_id;
1019 	u16	agg_fw_ring_id;
1020 	u16	cp_fw_ring_id;
1021 };
1022 
1023 struct bnxt_vnic_info {
1024 	u16		fw_vnic_id; /* returned by Chimp during alloc */
1025 #define BNXT_MAX_CTX_PER_VNIC	8
1026 	u16		fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
1027 	u16		fw_l2_ctx_id;
1028 #define BNXT_MAX_UC_ADDRS	4
1029 	__le64		fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
1030 				/* index 0 always dev_addr */
1031 	u16		uc_filter_count;
1032 	u8		*uc_list;
1033 
1034 	u16		*fw_grp_ids;
1035 	dma_addr_t	rss_table_dma_addr;
1036 	__le16		*rss_table;
1037 	dma_addr_t	rss_hash_key_dma_addr;
1038 	u64		*rss_hash_key;
1039 	int		rss_table_size;
1040 #define BNXT_RSS_TABLE_ENTRIES_P5	64
1041 #define BNXT_RSS_TABLE_SIZE_P5		(BNXT_RSS_TABLE_ENTRIES_P5 * 4)
1042 #define BNXT_RSS_TABLE_MAX_TBL_P5	8
1043 #define BNXT_MAX_RSS_TABLE_SIZE_P5				\
1044 	(BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
1045 #define BNXT_MAX_RSS_TABLE_ENTRIES_P5				\
1046 	(BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
1047 
1048 	u32		rx_mask;
1049 
1050 	u8		*mc_list;
1051 	int		mc_list_size;
1052 	int		mc_list_count;
1053 	dma_addr_t	mc_list_mapping;
1054 #define BNXT_MAX_MC_ADDRS	16
1055 
1056 	u32		flags;
1057 #define BNXT_VNIC_RSS_FLAG	1
1058 #define BNXT_VNIC_RFS_FLAG	2
1059 #define BNXT_VNIC_MCAST_FLAG	4
1060 #define BNXT_VNIC_UCAST_FLAG	8
1061 #define BNXT_VNIC_RFS_NEW_RSS_FLAG	0x10
1062 };
1063 
1064 struct bnxt_hw_resc {
1065 	u16	min_rsscos_ctxs;
1066 	u16	max_rsscos_ctxs;
1067 	u16	min_cp_rings;
1068 	u16	max_cp_rings;
1069 	u16	resv_cp_rings;
1070 	u16	min_tx_rings;
1071 	u16	max_tx_rings;
1072 	u16	resv_tx_rings;
1073 	u16	max_tx_sch_inputs;
1074 	u16	min_rx_rings;
1075 	u16	max_rx_rings;
1076 	u16	resv_rx_rings;
1077 	u16	min_hw_ring_grps;
1078 	u16	max_hw_ring_grps;
1079 	u16	resv_hw_ring_grps;
1080 	u16	min_l2_ctxs;
1081 	u16	max_l2_ctxs;
1082 	u16	min_vnics;
1083 	u16	max_vnics;
1084 	u16	resv_vnics;
1085 	u16	min_stat_ctxs;
1086 	u16	max_stat_ctxs;
1087 	u16	resv_stat_ctxs;
1088 	u16	max_nqs;
1089 	u16	max_irqs;
1090 	u16	resv_irqs;
1091 };
1092 
1093 #if defined(CONFIG_BNXT_SRIOV)
1094 struct bnxt_vf_info {
1095 	u16	fw_fid;
1096 	u8	mac_addr[ETH_ALEN];	/* PF assigned MAC Address */
1097 	u8	vf_mac_addr[ETH_ALEN];	/* VF assigned MAC address, only
1098 					 * stored by PF.
1099 					 */
1100 	u16	vlan;
1101 	u16	func_qcfg_flags;
1102 	u32	flags;
1103 #define BNXT_VF_QOS		0x1
1104 #define BNXT_VF_SPOOFCHK	0x2
1105 #define BNXT_VF_LINK_FORCED	0x4
1106 #define BNXT_VF_LINK_UP		0x8
1107 #define BNXT_VF_TRUST		0x10
1108 	u32	min_tx_rate;
1109 	u32	max_tx_rate;
1110 	void	*hwrm_cmd_req_addr;
1111 	dma_addr_t	hwrm_cmd_req_dma_addr;
1112 };
1113 #endif
1114 
1115 struct bnxt_pf_info {
1116 #define BNXT_FIRST_PF_FID	1
1117 #define BNXT_FIRST_VF_FID	128
1118 	u16	fw_fid;
1119 	u16	port_id;
1120 	u8	mac_addr[ETH_ALEN];
1121 	u32	first_vf_id;
1122 	u16	active_vfs;
1123 	u16	registered_vfs;
1124 	u16	max_vfs;
1125 	u32	max_encap_records;
1126 	u32	max_decap_records;
1127 	u32	max_tx_em_flows;
1128 	u32	max_tx_wm_flows;
1129 	u32	max_rx_em_flows;
1130 	u32	max_rx_wm_flows;
1131 	unsigned long	*vf_event_bmap;
1132 	u16	hwrm_cmd_req_pages;
1133 	u8	vf_resv_strategy;
1134 #define BNXT_VF_RESV_STRATEGY_MAXIMAL	0
1135 #define BNXT_VF_RESV_STRATEGY_MINIMAL	1
1136 #define BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC	2
1137 	void			*hwrm_cmd_req_addr[4];
1138 	dma_addr_t		hwrm_cmd_req_dma_addr[4];
1139 	struct bnxt_vf_info	*vf;
1140 };
1141 
1142 struct bnxt_ntuple_filter {
1143 	struct hlist_node	hash;
1144 	u8			dst_mac_addr[ETH_ALEN];
1145 	u8			src_mac_addr[ETH_ALEN];
1146 	struct flow_keys	fkeys;
1147 	__le64			filter_id;
1148 	u16			sw_id;
1149 	u8			l2_fltr_idx;
1150 	u16			rxq;
1151 	u32			flow_id;
1152 	unsigned long		state;
1153 #define BNXT_FLTR_VALID		0
1154 #define BNXT_FLTR_UPDATE	1
1155 };
1156 
1157 struct bnxt_link_info {
1158 	u8			phy_type;
1159 	u8			media_type;
1160 	u8			transceiver;
1161 	u8			phy_addr;
1162 	u8			phy_link_status;
1163 #define BNXT_LINK_NO_LINK	PORT_PHY_QCFG_RESP_LINK_NO_LINK
1164 #define BNXT_LINK_SIGNAL	PORT_PHY_QCFG_RESP_LINK_SIGNAL
1165 #define BNXT_LINK_LINK		PORT_PHY_QCFG_RESP_LINK_LINK
1166 	u8			wire_speed;
1167 	u8			phy_state;
1168 #define BNXT_PHY_STATE_ENABLED		0
1169 #define BNXT_PHY_STATE_DISABLED		1
1170 
1171 	u8			link_up;
1172 	u8			duplex;
1173 #define BNXT_LINK_DUPLEX_HALF	PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
1174 #define BNXT_LINK_DUPLEX_FULL	PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
1175 	u8			pause;
1176 #define BNXT_LINK_PAUSE_TX	PORT_PHY_QCFG_RESP_PAUSE_TX
1177 #define BNXT_LINK_PAUSE_RX	PORT_PHY_QCFG_RESP_PAUSE_RX
1178 #define BNXT_LINK_PAUSE_BOTH	(PORT_PHY_QCFG_RESP_PAUSE_RX | \
1179 				 PORT_PHY_QCFG_RESP_PAUSE_TX)
1180 	u8			lp_pause;
1181 	u8			auto_pause_setting;
1182 	u8			force_pause_setting;
1183 	u8			duplex_setting;
1184 	u8			auto_mode;
1185 #define BNXT_AUTO_MODE(mode)	((mode) > BNXT_LINK_AUTO_NONE && \
1186 				 (mode) <= BNXT_LINK_AUTO_MSK)
1187 #define BNXT_LINK_AUTO_NONE     PORT_PHY_QCFG_RESP_AUTO_MODE_NONE
1188 #define BNXT_LINK_AUTO_ALLSPDS	PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
1189 #define BNXT_LINK_AUTO_ONESPD	PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
1190 #define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
1191 #define BNXT_LINK_AUTO_MSK	PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
1192 #define PHY_VER_LEN		3
1193 	u8			phy_ver[PHY_VER_LEN];
1194 	u16			link_speed;
1195 #define BNXT_LINK_SPEED_100MB	PORT_PHY_QCFG_RESP_LINK_SPEED_100MB
1196 #define BNXT_LINK_SPEED_1GB	PORT_PHY_QCFG_RESP_LINK_SPEED_1GB
1197 #define BNXT_LINK_SPEED_2GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2GB
1198 #define BNXT_LINK_SPEED_2_5GB	PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB
1199 #define BNXT_LINK_SPEED_10GB	PORT_PHY_QCFG_RESP_LINK_SPEED_10GB
1200 #define BNXT_LINK_SPEED_20GB	PORT_PHY_QCFG_RESP_LINK_SPEED_20GB
1201 #define BNXT_LINK_SPEED_25GB	PORT_PHY_QCFG_RESP_LINK_SPEED_25GB
1202 #define BNXT_LINK_SPEED_40GB	PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
1203 #define BNXT_LINK_SPEED_50GB	PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
1204 #define BNXT_LINK_SPEED_100GB	PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
1205 #define BNXT_LINK_SPEED_200GB	PORT_PHY_QCFG_RESP_LINK_SPEED_200GB
1206 	u16			support_speeds;
1207 	u16			support_pam4_speeds;
1208 	u16			auto_link_speeds;	/* fw adv setting */
1209 #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
1210 #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
1211 #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
1212 #define BNXT_LINK_SPEED_MSK_10GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB
1213 #define BNXT_LINK_SPEED_MSK_2_5GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB
1214 #define BNXT_LINK_SPEED_MSK_20GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB
1215 #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
1216 #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
1217 #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
1218 #define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
1219 	u16			auto_pam4_link_speeds;
1220 #define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G
1221 #define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G
1222 #define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G
1223 	u16			support_auto_speeds;
1224 	u16			support_pam4_auto_speeds;
1225 	u16			lp_auto_link_speeds;
1226 	u16			lp_auto_pam4_link_speeds;
1227 	u16			force_link_speed;
1228 	u16			force_pam4_link_speed;
1229 	u32			preemphasis;
1230 	u8			module_status;
1231 	u8			active_fec_sig_mode;
1232 	u16			fec_cfg;
1233 #define BNXT_FEC_NONE		PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
1234 #define BNXT_FEC_AUTONEG_CAP	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED
1235 #define BNXT_FEC_AUTONEG	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
1236 #define BNXT_FEC_ENC_BASE_R_CAP	\
1237 	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED
1238 #define BNXT_FEC_ENC_BASE_R	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
1239 #define BNXT_FEC_ENC_RS_CAP	\
1240 	PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED
1241 #define BNXT_FEC_ENC_LLRS_CAP	\
1242 	(PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED |	\
1243 	 PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED)
1244 #define BNXT_FEC_ENC_RS		\
1245 	(PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED |	\
1246 	 PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED |	\
1247 	 PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED)
1248 #define BNXT_FEC_ENC_LLRS	\
1249 	(PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED |	\
1250 	 PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED)
1251 
1252 	/* copy of requested setting from ethtool cmd */
1253 	u8			autoneg;
1254 #define BNXT_AUTONEG_SPEED		1
1255 #define BNXT_AUTONEG_FLOW_CTRL		2
1256 	u8			req_signal_mode;
1257 #define BNXT_SIG_MODE_NRZ	PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
1258 #define BNXT_SIG_MODE_PAM4	PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
1259 	u8			req_duplex;
1260 	u8			req_flow_ctrl;
1261 	u16			req_link_speed;
1262 	u16			advertising;	/* user adv setting */
1263 	u16			advertising_pam4;
1264 	bool			force_link_chng;
1265 
1266 	bool			phy_retry;
1267 	unsigned long		phy_retry_expires;
1268 
1269 	/* a copy of phy_qcfg output used to report link
1270 	 * info to VF
1271 	 */
1272 	struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
1273 };
1274 
1275 #define BNXT_FEC_RS544_ON					\
1276 	 (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE |		\
1277 	  PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE)
1278 
1279 #define BNXT_FEC_RS544_OFF					\
1280 	 (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE |	\
1281 	  PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE)
1282 
1283 #define BNXT_FEC_RS272_ON					\
1284 	 (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE |		\
1285 	  PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE)
1286 
1287 #define BNXT_FEC_RS272_OFF					\
1288 	 (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE |	\
1289 	  PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE)
1290 
1291 #define BNXT_PAM4_SUPPORTED(link_info)				\
1292 	((link_info)->support_pam4_speeds)
1293 
1294 #define BNXT_FEC_RS_ON(link_info)				\
1295 	(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE |		\
1296 	 PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE |		\
1297 	 (BNXT_PAM4_SUPPORTED(link_info) ?			\
1298 	  (BNXT_FEC_RS544_ON | BNXT_FEC_RS272_OFF) : 0))
1299 
1300 #define BNXT_FEC_LLRS_ON					\
1301 	(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE |		\
1302 	 PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE |		\
1303 	 BNXT_FEC_RS272_ON | BNXT_FEC_RS544_OFF)
1304 
1305 #define BNXT_FEC_RS_OFF(link_info)				\
1306 	(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE |		\
1307 	 (BNXT_PAM4_SUPPORTED(link_info) ?			\
1308 	  (BNXT_FEC_RS544_OFF | BNXT_FEC_RS272_OFF) : 0))
1309 
1310 #define BNXT_FEC_BASE_R_ON(link_info)				\
1311 	(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE |		\
1312 	 BNXT_FEC_RS_OFF(link_info))
1313 
1314 #define BNXT_FEC_ALL_OFF(link_info)				\
1315 	(PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE |		\
1316 	 BNXT_FEC_RS_OFF(link_info))
1317 
1318 #define BNXT_MAX_QUEUE	8
1319 
1320 struct bnxt_queue_info {
1321 	u8	queue_id;
1322 	u8	queue_profile;
1323 };
1324 
1325 #define BNXT_MAX_LED			4
1326 
1327 struct bnxt_led_info {
1328 	u8	led_id;
1329 	u8	led_type;
1330 	u8	led_group_id;
1331 	u8	unused;
1332 	__le16	led_state_caps;
1333 #define BNXT_LED_ALT_BLINK_CAP(x)	((x) &	\
1334 	cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED))
1335 
1336 	__le16	led_color_caps;
1337 };
1338 
1339 #define BNXT_MAX_TEST	8
1340 
1341 struct bnxt_test_info {
1342 	u8 offline_mask;
1343 	u16 timeout;
1344 	char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
1345 };
1346 
1347 #define CHIMP_REG_VIEW_ADDR				\
1348 	((bp->flags & BNXT_FLAG_CHIP_P5) ? 0x80000000 : 0xb1000000)
1349 
1350 #define BNXT_GRCPF_REG_CHIMP_COMM		0x0
1351 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER	0x100
1352 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT		0x400
1353 #define BNXT_CAG_REG_LEGACY_INT_STATUS		0x4014
1354 #define BNXT_CAG_REG_BASE			0x300000
1355 
1356 #define BNXT_GRC_REG_STATUS_P5			0x520
1357 
1358 #define BNXT_GRCPF_REG_KONG_COMM		0xA00
1359 #define BNXT_GRCPF_REG_KONG_COMM_TRIGGER	0xB00
1360 
1361 #define BNXT_GRC_REG_CHIP_NUM			0x48
1362 #define BNXT_GRC_REG_BASE			0x260000
1363 
1364 #define BNXT_TS_REG_TIMESYNC_TS0_LOWER		0x640180c
1365 #define BNXT_TS_REG_TIMESYNC_TS0_UPPER		0x6401810
1366 
1367 #define BNXT_GRC_BASE_MASK			0xfffff000
1368 #define BNXT_GRC_OFFSET_MASK			0x00000ffc
1369 
1370 struct bnxt_tc_flow_stats {
1371 	u64		packets;
1372 	u64		bytes;
1373 };
1374 
1375 #ifdef CONFIG_BNXT_FLOWER_OFFLOAD
1376 struct bnxt_flower_indr_block_cb_priv {
1377 	struct net_device *tunnel_netdev;
1378 	struct bnxt *bp;
1379 	struct list_head list;
1380 };
1381 #endif
1382 
1383 struct bnxt_tc_info {
1384 	bool				enabled;
1385 
1386 	/* hash table to store TC offloaded flows */
1387 	struct rhashtable		flow_table;
1388 	struct rhashtable_params	flow_ht_params;
1389 
1390 	/* hash table to store L2 keys of TC flows */
1391 	struct rhashtable		l2_table;
1392 	struct rhashtable_params	l2_ht_params;
1393 	/* hash table to store L2 keys for TC tunnel decap */
1394 	struct rhashtable		decap_l2_table;
1395 	struct rhashtable_params	decap_l2_ht_params;
1396 	/* hash table to store tunnel decap entries */
1397 	struct rhashtable		decap_table;
1398 	struct rhashtable_params	decap_ht_params;
1399 	/* hash table to store tunnel encap entries */
1400 	struct rhashtable		encap_table;
1401 	struct rhashtable_params	encap_ht_params;
1402 
1403 	/* lock to atomically add/del an l2 node when a flow is
1404 	 * added or deleted.
1405 	 */
1406 	struct mutex			lock;
1407 
1408 	/* Fields used for batching stats query */
1409 	struct rhashtable_iter		iter;
1410 #define BNXT_FLOW_STATS_BATCH_MAX	10
1411 	struct bnxt_tc_stats_batch {
1412 		void			  *flow_node;
1413 		struct bnxt_tc_flow_stats hw_stats;
1414 	} stats_batch[BNXT_FLOW_STATS_BATCH_MAX];
1415 
1416 	/* Stat counter mask (width) */
1417 	u64				bytes_mask;
1418 	u64				packets_mask;
1419 };
1420 
1421 struct bnxt_vf_rep_stats {
1422 	u64			packets;
1423 	u64			bytes;
1424 	u64			dropped;
1425 };
1426 
1427 struct bnxt_vf_rep {
1428 	struct bnxt			*bp;
1429 	struct net_device		*dev;
1430 	struct metadata_dst		*dst;
1431 	u16				vf_idx;
1432 	u16				tx_cfa_action;
1433 	u16				rx_cfa_code;
1434 
1435 	struct bnxt_vf_rep_stats	rx_stats;
1436 	struct bnxt_vf_rep_stats	tx_stats;
1437 };
1438 
1439 #define PTU_PTE_VALID             0x1UL
1440 #define PTU_PTE_LAST              0x2UL
1441 #define PTU_PTE_NEXT_TO_LAST      0x4UL
1442 
1443 #define MAX_CTX_PAGES	(BNXT_PAGE_SIZE / 8)
1444 #define MAX_CTX_TOTAL_PAGES	(MAX_CTX_PAGES * MAX_CTX_PAGES)
1445 
1446 struct bnxt_ctx_pg_info {
1447 	u32		entries;
1448 	u32		nr_pages;
1449 	void		*ctx_pg_arr[MAX_CTX_PAGES];
1450 	dma_addr_t	ctx_dma_arr[MAX_CTX_PAGES];
1451 	struct bnxt_ring_mem_info ring_mem;
1452 	struct bnxt_ctx_pg_info **ctx_pg_tbl;
1453 };
1454 
1455 #define BNXT_MAX_TQM_SP_RINGS		1
1456 #define BNXT_MAX_TQM_FP_RINGS		8
1457 #define BNXT_MAX_TQM_RINGS		\
1458 	(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
1459 
1460 #define BNXT_BACKING_STORE_CFG_LEGACY_LEN	256
1461 
1462 #define BNXT_SET_CTX_PAGE_ATTR(attr)					\
1463 do {									\
1464 	if (BNXT_PAGE_SIZE == 0x2000)					\
1465 		attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K;	\
1466 	else if (BNXT_PAGE_SIZE == 0x10000)				\
1467 		attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K;	\
1468 	else								\
1469 		attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K;	\
1470 } while (0)
1471 
1472 struct bnxt_ctx_mem_info {
1473 	u32	qp_max_entries;
1474 	u16	qp_min_qp1_entries;
1475 	u16	qp_max_l2_entries;
1476 	u16	qp_entry_size;
1477 	u16	srq_max_l2_entries;
1478 	u32	srq_max_entries;
1479 	u16	srq_entry_size;
1480 	u16	cq_max_l2_entries;
1481 	u32	cq_max_entries;
1482 	u16	cq_entry_size;
1483 	u16	vnic_max_vnic_entries;
1484 	u16	vnic_max_ring_table_entries;
1485 	u16	vnic_entry_size;
1486 	u32	stat_max_entries;
1487 	u16	stat_entry_size;
1488 	u16	tqm_entry_size;
1489 	u32	tqm_min_entries_per_ring;
1490 	u32	tqm_max_entries_per_ring;
1491 	u32	mrav_max_entries;
1492 	u16	mrav_entry_size;
1493 	u16	tim_entry_size;
1494 	u32	tim_max_entries;
1495 	u16	mrav_num_entries_units;
1496 	u8	tqm_entries_multiple;
1497 	u8	tqm_fp_rings_count;
1498 
1499 	u32	flags;
1500 	#define BNXT_CTX_FLAG_INITED	0x01
1501 
1502 	struct bnxt_ctx_pg_info qp_mem;
1503 	struct bnxt_ctx_pg_info srq_mem;
1504 	struct bnxt_ctx_pg_info cq_mem;
1505 	struct bnxt_ctx_pg_info vnic_mem;
1506 	struct bnxt_ctx_pg_info stat_mem;
1507 	struct bnxt_ctx_pg_info mrav_mem;
1508 	struct bnxt_ctx_pg_info tim_mem;
1509 	struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
1510 
1511 #define BNXT_CTX_MEM_INIT_QP	0
1512 #define BNXT_CTX_MEM_INIT_SRQ	1
1513 #define BNXT_CTX_MEM_INIT_CQ	2
1514 #define BNXT_CTX_MEM_INIT_VNIC	3
1515 #define BNXT_CTX_MEM_INIT_STAT	4
1516 #define BNXT_CTX_MEM_INIT_MRAV	5
1517 #define BNXT_CTX_MEM_INIT_MAX	6
1518 	struct bnxt_mem_init	mem_init[BNXT_CTX_MEM_INIT_MAX];
1519 };
1520 
1521 struct bnxt_fw_health {
1522 	u32 flags;
1523 	u32 polling_dsecs;
1524 	u32 master_func_wait_dsecs;
1525 	u32 normal_func_wait_dsecs;
1526 	u32 post_reset_wait_dsecs;
1527 	u32 post_reset_max_wait_dsecs;
1528 	u32 regs[4];
1529 	u32 mapped_regs[4];
1530 #define BNXT_FW_HEALTH_REG		0
1531 #define BNXT_FW_HEARTBEAT_REG		1
1532 #define BNXT_FW_RESET_CNT_REG		2
1533 #define BNXT_FW_RESET_INPROG_REG	3
1534 	u32 fw_reset_inprog_reg_mask;
1535 	u32 last_fw_heartbeat;
1536 	u32 last_fw_reset_cnt;
1537 	u8 enabled:1;
1538 	u8 master:1;
1539 	u8 fatal:1;
1540 	u8 status_reliable:1;
1541 	u8 tmr_multiplier;
1542 	u8 tmr_counter;
1543 	u8 fw_reset_seq_cnt;
1544 	u32 fw_reset_seq_regs[16];
1545 	u32 fw_reset_seq_vals[16];
1546 	u32 fw_reset_seq_delay_msec[16];
1547 	u32 echo_req_data1;
1548 	u32 echo_req_data2;
1549 	struct devlink_health_reporter	*fw_reporter;
1550 	struct devlink_health_reporter *fw_reset_reporter;
1551 	struct devlink_health_reporter *fw_fatal_reporter;
1552 };
1553 
1554 struct bnxt_fw_reporter_ctx {
1555 	unsigned long sp_event;
1556 };
1557 
1558 #define BNXT_FW_HEALTH_REG_TYPE_MASK	3
1559 #define BNXT_FW_HEALTH_REG_TYPE_CFG	0
1560 #define BNXT_FW_HEALTH_REG_TYPE_GRC	1
1561 #define BNXT_FW_HEALTH_REG_TYPE_BAR0	2
1562 #define BNXT_FW_HEALTH_REG_TYPE_BAR1	3
1563 
1564 #define BNXT_FW_HEALTH_REG_TYPE(reg)	((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK)
1565 #define BNXT_FW_HEALTH_REG_OFF(reg)	((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK)
1566 
1567 #define BNXT_FW_HEALTH_WIN_BASE		0x3000
1568 #define BNXT_FW_HEALTH_WIN_MAP_OFF	8
1569 
1570 #define BNXT_FW_HEALTH_WIN_OFF(reg)	(BNXT_FW_HEALTH_WIN_BASE +	\
1571 					 ((reg) & BNXT_GRC_OFFSET_MASK))
1572 
1573 #define BNXT_FW_STATUS_HEALTH_MSK	0xffff
1574 #define BNXT_FW_STATUS_HEALTHY		0x8000
1575 #define BNXT_FW_STATUS_SHUTDOWN		0x100000
1576 #define BNXT_FW_STATUS_RECOVERING	0x400000
1577 
1578 #define BNXT_FW_IS_HEALTHY(sts)		(((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
1579 					 BNXT_FW_STATUS_HEALTHY)
1580 
1581 #define BNXT_FW_IS_BOOTING(sts)		(((sts) & BNXT_FW_STATUS_HEALTH_MSK) < \
1582 					 BNXT_FW_STATUS_HEALTHY)
1583 
1584 #define BNXT_FW_IS_ERR(sts)		(((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
1585 					 BNXT_FW_STATUS_HEALTHY)
1586 
1587 #define BNXT_FW_IS_RECOVERING(sts)	(BNXT_FW_IS_ERR(sts) &&		       \
1588 					 ((sts) & BNXT_FW_STATUS_RECOVERING))
1589 
1590 #define BNXT_FW_RETRY			5
1591 #define BNXT_FW_IF_RETRY		10
1592 
1593 struct bnxt {
1594 	void __iomem		*bar0;
1595 	void __iomem		*bar1;
1596 	void __iomem		*bar2;
1597 
1598 	u32			reg_base;
1599 	u16			chip_num;
1600 #define CHIP_NUM_57301		0x16c8
1601 #define CHIP_NUM_57302		0x16c9
1602 #define CHIP_NUM_57304		0x16ca
1603 #define CHIP_NUM_58700		0x16cd
1604 #define CHIP_NUM_57402		0x16d0
1605 #define CHIP_NUM_57404		0x16d1
1606 #define CHIP_NUM_57406		0x16d2
1607 #define CHIP_NUM_57407		0x16d5
1608 
1609 #define CHIP_NUM_57311		0x16ce
1610 #define CHIP_NUM_57312		0x16cf
1611 #define CHIP_NUM_57314		0x16df
1612 #define CHIP_NUM_57317		0x16e0
1613 #define CHIP_NUM_57412		0x16d6
1614 #define CHIP_NUM_57414		0x16d7
1615 #define CHIP_NUM_57416		0x16d8
1616 #define CHIP_NUM_57417		0x16d9
1617 #define CHIP_NUM_57412L		0x16da
1618 #define CHIP_NUM_57414L		0x16db
1619 
1620 #define CHIP_NUM_5745X		0xd730
1621 #define CHIP_NUM_57452		0xc452
1622 #define CHIP_NUM_57454		0xc454
1623 
1624 #define CHIP_NUM_57508		0x1750
1625 #define CHIP_NUM_57504		0x1751
1626 #define CHIP_NUM_57502		0x1752
1627 
1628 #define CHIP_NUM_58802		0xd802
1629 #define CHIP_NUM_58804		0xd804
1630 #define CHIP_NUM_58808		0xd808
1631 
1632 	u8			chip_rev;
1633 
1634 #define CHIP_NUM_58818		0xd818
1635 
1636 #define BNXT_CHIP_NUM_5730X(chip_num)		\
1637 	((chip_num) >= CHIP_NUM_57301 &&	\
1638 	 (chip_num) <= CHIP_NUM_57304)
1639 
1640 #define BNXT_CHIP_NUM_5740X(chip_num)		\
1641 	(((chip_num) >= CHIP_NUM_57402 &&	\
1642 	  (chip_num) <= CHIP_NUM_57406) ||	\
1643 	 (chip_num) == CHIP_NUM_57407)
1644 
1645 #define BNXT_CHIP_NUM_5731X(chip_num)		\
1646 	((chip_num) == CHIP_NUM_57311 ||	\
1647 	 (chip_num) == CHIP_NUM_57312 ||	\
1648 	 (chip_num) == CHIP_NUM_57314 ||	\
1649 	 (chip_num) == CHIP_NUM_57317)
1650 
1651 #define BNXT_CHIP_NUM_5741X(chip_num)		\
1652 	((chip_num) >= CHIP_NUM_57412 &&	\
1653 	 (chip_num) <= CHIP_NUM_57414L)
1654 
1655 #define BNXT_CHIP_NUM_58700(chip_num)		\
1656 	 ((chip_num) == CHIP_NUM_58700)
1657 
1658 #define BNXT_CHIP_NUM_5745X(chip_num)		\
1659 	((chip_num) == CHIP_NUM_5745X ||	\
1660 	 (chip_num) == CHIP_NUM_57452 ||	\
1661 	 (chip_num) == CHIP_NUM_57454)
1662 
1663 
1664 #define BNXT_CHIP_NUM_57X0X(chip_num)		\
1665 	(BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
1666 
1667 #define BNXT_CHIP_NUM_57X1X(chip_num)		\
1668 	(BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
1669 
1670 #define BNXT_CHIP_NUM_588XX(chip_num)		\
1671 	((chip_num) == CHIP_NUM_58802 ||	\
1672 	 (chip_num) == CHIP_NUM_58804 ||        \
1673 	 (chip_num) == CHIP_NUM_58808)
1674 
1675 #define BNXT_VPD_FLD_LEN	32
1676 	char			board_partno[BNXT_VPD_FLD_LEN];
1677 	char			board_serialno[BNXT_VPD_FLD_LEN];
1678 
1679 	struct net_device	*dev;
1680 	struct pci_dev		*pdev;
1681 
1682 	atomic_t		intr_sem;
1683 
1684 	u32			flags;
1685 	#define BNXT_FLAG_CHIP_P5	0x1
1686 	#define BNXT_FLAG_VF		0x2
1687 	#define BNXT_FLAG_LRO		0x4
1688 #ifdef CONFIG_INET
1689 	#define BNXT_FLAG_GRO		0x8
1690 #else
1691 	/* Cannot support hardware GRO if CONFIG_INET is not set */
1692 	#define BNXT_FLAG_GRO		0x0
1693 #endif
1694 	#define BNXT_FLAG_TPA		(BNXT_FLAG_LRO | BNXT_FLAG_GRO)
1695 	#define BNXT_FLAG_JUMBO		0x10
1696 	#define BNXT_FLAG_STRIP_VLAN	0x20
1697 	#define BNXT_FLAG_AGG_RINGS	(BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
1698 					 BNXT_FLAG_LRO)
1699 	#define BNXT_FLAG_USING_MSIX	0x40
1700 	#define BNXT_FLAG_MSIX_CAP	0x80
1701 	#define BNXT_FLAG_RFS		0x100
1702 	#define BNXT_FLAG_SHARED_RINGS	0x200
1703 	#define BNXT_FLAG_PORT_STATS	0x400
1704 	#define BNXT_FLAG_UDP_RSS_CAP	0x800
1705 	#define BNXT_FLAG_NEW_RSS_CAP	0x2000
1706 	#define BNXT_FLAG_WOL_CAP	0x4000
1707 	#define BNXT_FLAG_ROCEV1_CAP	0x8000
1708 	#define BNXT_FLAG_ROCEV2_CAP	0x10000
1709 	#define BNXT_FLAG_ROCE_CAP	(BNXT_FLAG_ROCEV1_CAP |	\
1710 					 BNXT_FLAG_ROCEV2_CAP)
1711 	#define BNXT_FLAG_NO_AGG_RINGS	0x20000
1712 	#define BNXT_FLAG_RX_PAGE_MODE	0x40000
1713 	#define BNXT_FLAG_CHIP_SR2	0x80000
1714 	#define BNXT_FLAG_MULTI_HOST	0x100000
1715 	#define BNXT_FLAG_DSN_VALID	0x200000
1716 	#define BNXT_FLAG_DOUBLE_DB	0x400000
1717 	#define BNXT_FLAG_CHIP_NITRO_A0	0x1000000
1718 	#define BNXT_FLAG_DIM		0x2000000
1719 	#define BNXT_FLAG_ROCE_MIRROR_CAP	0x4000000
1720 	#define BNXT_FLAG_PORT_STATS_EXT	0x10000000
1721 
1722 	#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA |		\
1723 					    BNXT_FLAG_RFS |		\
1724 					    BNXT_FLAG_STRIP_VLAN)
1725 
1726 #define BNXT_PF(bp)		(!((bp)->flags & BNXT_FLAG_VF))
1727 #define BNXT_VF(bp)		((bp)->flags & BNXT_FLAG_VF)
1728 #define BNXT_NPAR(bp)		((bp)->port_partition_type)
1729 #define BNXT_MH(bp)		((bp)->flags & BNXT_FLAG_MULTI_HOST)
1730 #define BNXT_SINGLE_PF(bp)	(BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
1731 #define BNXT_SH_PORT_CFG_OK(bp)	(BNXT_PF(bp) &&				\
1732 				 ((bp)->phy_flags & BNXT_PHY_FL_SHARED_PORT_CFG))
1733 #define BNXT_PHY_CFG_ABLE(bp)	((BNXT_SINGLE_PF(bp) ||			\
1734 				  BNXT_SH_PORT_CFG_OK(bp)) &&		\
1735 				 (bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED)
1736 #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
1737 #define BNXT_RX_PAGE_MODE(bp)	((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
1738 #define BNXT_SUPPORTS_TPA(bp)	(!BNXT_CHIP_TYPE_NITRO_A0(bp) &&	\
1739 				 (!((bp)->flags & BNXT_FLAG_CHIP_P5) ||	\
1740 				  (bp)->max_tpa_v2) && !is_kdump_kernel())
1741 
1742 #define BNXT_CHIP_SR2(bp)			\
1743 	((bp)->chip_num == CHIP_NUM_58818)
1744 
1745 #define BNXT_CHIP_P5_THOR(bp)			\
1746 	((bp)->chip_num == CHIP_NUM_57508 ||	\
1747 	 (bp)->chip_num == CHIP_NUM_57504 ||	\
1748 	 (bp)->chip_num == CHIP_NUM_57502)
1749 
1750 /* Chip class phase 5 */
1751 #define BNXT_CHIP_P5(bp)			\
1752 	(BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp))
1753 
1754 /* Chip class phase 4.x */
1755 #define BNXT_CHIP_P4(bp)			\
1756 	(BNXT_CHIP_NUM_57X1X((bp)->chip_num) ||	\
1757 	 BNXT_CHIP_NUM_5745X((bp)->chip_num) ||	\
1758 	 BNXT_CHIP_NUM_588XX((bp)->chip_num) ||	\
1759 	 (BNXT_CHIP_NUM_58700((bp)->chip_num) &&	\
1760 	  !BNXT_CHIP_TYPE_NITRO_A0(bp)))
1761 
1762 #define BNXT_CHIP_P4_PLUS(bp)			\
1763 	(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
1764 
1765 	struct bnxt_en_dev	*edev;
1766 
1767 	struct bnxt_napi	**bnapi;
1768 
1769 	struct bnxt_rx_ring_info	*rx_ring;
1770 	struct bnxt_tx_ring_info	*tx_ring;
1771 	u16			*tx_ring_map;
1772 
1773 	struct sk_buff *	(*gro_func)(struct bnxt_tpa_info *, int, int,
1774 					    struct sk_buff *);
1775 
1776 	struct sk_buff *	(*rx_skb_func)(struct bnxt *,
1777 					       struct bnxt_rx_ring_info *,
1778 					       u16, void *, u8 *, dma_addr_t,
1779 					       unsigned int);
1780 
1781 	u16			max_tpa_v2;
1782 	u16			max_tpa;
1783 	u32			rx_buf_size;
1784 	u32			rx_buf_use_size;	/* useable size */
1785 	u16			rx_offset;
1786 	u16			rx_dma_offset;
1787 	enum dma_data_direction	rx_dir;
1788 	u32			rx_ring_size;
1789 	u32			rx_agg_ring_size;
1790 	u32			rx_copy_thresh;
1791 	u32			rx_ring_mask;
1792 	u32			rx_agg_ring_mask;
1793 	int			rx_nr_pages;
1794 	int			rx_agg_nr_pages;
1795 	int			rx_nr_rings;
1796 	int			rsscos_nr_ctxs;
1797 
1798 	u32			tx_ring_size;
1799 	u32			tx_ring_mask;
1800 	int			tx_nr_pages;
1801 	int			tx_nr_rings;
1802 	int			tx_nr_rings_per_tc;
1803 	int			tx_nr_rings_xdp;
1804 
1805 	int			tx_wake_thresh;
1806 	int			tx_push_thresh;
1807 	int			tx_push_size;
1808 
1809 	u32			cp_ring_size;
1810 	u32			cp_ring_mask;
1811 	u32			cp_bit;
1812 	int			cp_nr_pages;
1813 	int			cp_nr_rings;
1814 
1815 	/* grp_info indexed by completion ring index */
1816 	struct bnxt_ring_grp_info	*grp_info;
1817 	struct bnxt_vnic_info	*vnic_info;
1818 	int			nr_vnics;
1819 	u16			*rss_indir_tbl;
1820 	u16			rss_indir_tbl_entries;
1821 	u32			rss_hash_cfg;
1822 
1823 	u16			max_mtu;
1824 	u8			max_tc;
1825 	u8			max_lltc;	/* lossless TCs */
1826 	struct bnxt_queue_info	q_info[BNXT_MAX_QUEUE];
1827 	u8			tc_to_qidx[BNXT_MAX_QUEUE];
1828 	u8			q_ids[BNXT_MAX_QUEUE];
1829 	u8			max_q;
1830 
1831 	unsigned int		current_interval;
1832 #define BNXT_TIMER_INTERVAL	HZ
1833 
1834 	struct timer_list	timer;
1835 
1836 	unsigned long		state;
1837 #define BNXT_STATE_OPEN		0
1838 #define BNXT_STATE_IN_SP_TASK	1
1839 #define BNXT_STATE_READ_STATS	2
1840 #define BNXT_STATE_FW_RESET_DET 3
1841 #define BNXT_STATE_IN_FW_RESET	4
1842 #define BNXT_STATE_ABORT_ERR	5
1843 #define BNXT_STATE_FW_FATAL_COND	6
1844 #define BNXT_STATE_DRV_REGISTERED	7
1845 #define BNXT_STATE_PCI_CHANNEL_IO_FROZEN	8
1846 #define BNXT_STATE_NAPI_DISABLED	9
1847 #define BNXT_STATE_HALF_OPEN		15	/* For offline ethtool tests */
1848 
1849 #define BNXT_NO_FW_ACCESS(bp)					\
1850 	(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) ||	\
1851 	 pci_channel_offline((bp)->pdev))
1852 
1853 	struct bnxt_irq	*irq_tbl;
1854 	int			total_irqs;
1855 	u8			mac_addr[ETH_ALEN];
1856 
1857 #ifdef CONFIG_BNXT_DCB
1858 	struct ieee_pfc		*ieee_pfc;
1859 	struct ieee_ets		*ieee_ets;
1860 	u8			dcbx_cap;
1861 	u8			default_pri;
1862 	u8			max_dscp_value;
1863 #endif /* CONFIG_BNXT_DCB */
1864 
1865 	u32			msg_enable;
1866 
1867 	u32			fw_cap;
1868 	#define BNXT_FW_CAP_SHORT_CMD			0x00000001
1869 	#define BNXT_FW_CAP_LLDP_AGENT			0x00000002
1870 	#define BNXT_FW_CAP_DCBX_AGENT			0x00000004
1871 	#define BNXT_FW_CAP_NEW_RM			0x00000008
1872 	#define BNXT_FW_CAP_IF_CHANGE			0x00000010
1873 	#define BNXT_FW_CAP_KONG_MB_CHNL		0x00000080
1874 	#define BNXT_FW_CAP_OVS_64BIT_HANDLE		0x00000400
1875 	#define BNXT_FW_CAP_TRUSTED_VF			0x00000800
1876 	#define BNXT_FW_CAP_ERROR_RECOVERY		0x00002000
1877 	#define BNXT_FW_CAP_PKG_VER			0x00004000
1878 	#define BNXT_FW_CAP_CFA_ADV_FLOW		0x00008000
1879 	#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2	0x00010000
1880 	#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED	0x00020000
1881 	#define BNXT_FW_CAP_EXT_STATS_SUPPORTED		0x00040000
1882 	#define BNXT_FW_CAP_ERR_RECOVER_RELOAD		0x00100000
1883 	#define BNXT_FW_CAP_HOT_RESET			0x00200000
1884 	#define BNXT_FW_CAP_VLAN_RX_STRIP		0x01000000
1885 	#define BNXT_FW_CAP_VLAN_TX_INSERT		0x02000000
1886 	#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED	0x04000000
1887 	#define BNXT_FW_CAP_PTP_PPS			0x10000000
1888 	#define BNXT_FW_CAP_RING_MONITOR		0x40000000
1889 
1890 #define BNXT_NEW_RM(bp)		((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
1891 	u32			hwrm_spec_code;
1892 	u16			hwrm_cmd_seq;
1893 	u16                     hwrm_cmd_kong_seq;
1894 	struct dma_pool		*hwrm_dma_pool;
1895 	struct hlist_head	hwrm_pending_list;
1896 
1897 	struct rtnl_link_stats64	net_stats_prev;
1898 	struct bnxt_stats_mem	port_stats;
1899 	struct bnxt_stats_mem	rx_port_stats_ext;
1900 	struct bnxt_stats_mem	tx_port_stats_ext;
1901 	u16			fw_rx_stats_ext_size;
1902 	u16			fw_tx_stats_ext_size;
1903 	u16			hw_ring_stats_size;
1904 	u8			pri2cos_idx[8];
1905 	u8			pri2cos_valid;
1906 
1907 	u16			hwrm_max_req_len;
1908 	u16			hwrm_max_ext_req_len;
1909 	unsigned int		hwrm_cmd_timeout;
1910 	unsigned int		hwrm_cmd_max_timeout;
1911 	struct mutex		hwrm_cmd_lock;	/* serialize hwrm messages */
1912 	struct hwrm_ver_get_output	ver_resp;
1913 #define FW_VER_STR_LEN		32
1914 #define BC_HWRM_STR_LEN		21
1915 #define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
1916 	char			fw_ver_str[FW_VER_STR_LEN];
1917 	char			hwrm_ver_supp[FW_VER_STR_LEN];
1918 	char			nvm_cfg_ver[FW_VER_STR_LEN];
1919 	u64			fw_ver_code;
1920 #define BNXT_FW_VER_CODE(maj, min, bld, rsv)			\
1921 	((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
1922 #define BNXT_FW_MAJ(bp)		((bp)->fw_ver_code >> 48)
1923 
1924 	u16			vxlan_fw_dst_port_id;
1925 	u16			nge_fw_dst_port_id;
1926 	__be16			vxlan_port;
1927 	__be16			nge_port;
1928 	u8			port_partition_type;
1929 	u8			port_count;
1930 	u16			br_mode;
1931 
1932 	struct bnxt_coal_cap	coal_cap;
1933 	struct bnxt_coal	rx_coal;
1934 	struct bnxt_coal	tx_coal;
1935 
1936 	u32			stats_coal_ticks;
1937 #define BNXT_DEF_STATS_COAL_TICKS	 1000000
1938 #define BNXT_MIN_STATS_COAL_TICKS	  250000
1939 #define BNXT_MAX_STATS_COAL_TICKS	 1000000
1940 
1941 	struct work_struct	sp_task;
1942 	unsigned long		sp_event;
1943 #define BNXT_RX_MASK_SP_EVENT		0
1944 #define BNXT_RX_NTP_FLTR_SP_EVENT	1
1945 #define BNXT_LINK_CHNG_SP_EVENT		2
1946 #define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT	3
1947 #define BNXT_RESET_TASK_SP_EVENT	6
1948 #define BNXT_RST_RING_SP_EVENT		7
1949 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT	8
1950 #define BNXT_PERIODIC_STATS_SP_EVENT	9
1951 #define BNXT_HWRM_PORT_MODULE_SP_EVENT	10
1952 #define BNXT_RESET_TASK_SILENT_SP_EVENT	11
1953 #define BNXT_LINK_SPEED_CHNG_SP_EVENT	14
1954 #define BNXT_FLOW_STATS_SP_EVENT	15
1955 #define BNXT_UPDATE_PHY_SP_EVENT	16
1956 #define BNXT_RING_COAL_NOW_SP_EVENT	17
1957 #define BNXT_FW_RESET_NOTIFY_SP_EVENT	18
1958 #define BNXT_FW_EXCEPTION_SP_EVENT	19
1959 #define BNXT_LINK_CFG_CHANGE_SP_EVENT	21
1960 #define BNXT_FW_ECHO_REQUEST_SP_EVENT	23
1961 
1962 	struct delayed_work	fw_reset_task;
1963 	int			fw_reset_state;
1964 #define BNXT_FW_RESET_STATE_POLL_VF	1
1965 #define BNXT_FW_RESET_STATE_RESET_FW	2
1966 #define BNXT_FW_RESET_STATE_ENABLE_DEV	3
1967 #define BNXT_FW_RESET_STATE_POLL_FW	4
1968 #define BNXT_FW_RESET_STATE_OPENING	5
1969 #define BNXT_FW_RESET_STATE_POLL_FW_DOWN	6
1970 
1971 	u16			fw_reset_min_dsecs;
1972 #define BNXT_DFLT_FW_RST_MIN_DSECS	20
1973 	u16			fw_reset_max_dsecs;
1974 #define BNXT_DFLT_FW_RST_MAX_DSECS	60
1975 	unsigned long		fw_reset_timestamp;
1976 
1977 	struct bnxt_fw_health	*fw_health;
1978 
1979 	struct bnxt_hw_resc	hw_resc;
1980 	struct bnxt_pf_info	pf;
1981 	struct bnxt_ctx_mem_info	*ctx;
1982 #ifdef CONFIG_BNXT_SRIOV
1983 	int			nr_vfs;
1984 	struct bnxt_vf_info	vf;
1985 	wait_queue_head_t	sriov_cfg_wait;
1986 	bool			sriov_cfg;
1987 #define BNXT_SRIOV_CFG_WAIT_TMO	msecs_to_jiffies(10000)
1988 
1989 	/* lock to protect VF-rep creation/cleanup via
1990 	 * multiple paths such as ->sriov_configure() and
1991 	 * devlink ->eswitch_mode_set()
1992 	 */
1993 	struct mutex		sriov_lock;
1994 #endif
1995 
1996 #if BITS_PER_LONG == 32
1997 	/* ensure atomic 64-bit doorbell writes on 32-bit systems. */
1998 	spinlock_t		db_lock;
1999 #endif
2000 	int			db_size;
2001 
2002 #define BNXT_NTP_FLTR_MAX_FLTR	4096
2003 #define BNXT_NTP_FLTR_HASH_SIZE	512
2004 #define BNXT_NTP_FLTR_HASH_MASK	(BNXT_NTP_FLTR_HASH_SIZE - 1)
2005 	struct hlist_head	ntp_fltr_hash_tbl[BNXT_NTP_FLTR_HASH_SIZE];
2006 	spinlock_t		ntp_fltr_lock;	/* for hash table add, del */
2007 
2008 	unsigned long		*ntp_fltr_bmap;
2009 	int			ntp_fltr_count;
2010 
2011 	/* To protect link related settings during link changes and
2012 	 * ethtool settings changes.
2013 	 */
2014 	struct mutex		link_lock;
2015 	struct bnxt_link_info	link_info;
2016 	struct ethtool_eee	eee;
2017 	u32			lpi_tmr_lo;
2018 	u32			lpi_tmr_hi;
2019 
2020 	/* copied from flags in hwrm_port_phy_qcaps_output */
2021 	u8			phy_flags;
2022 #define BNXT_PHY_FL_EEE_CAP		PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED
2023 #define BNXT_PHY_FL_EXT_LPBK		PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED
2024 #define BNXT_PHY_FL_AN_PHY_LPBK		PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED
2025 #define BNXT_PHY_FL_SHARED_PORT_CFG	PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED
2026 #define BNXT_PHY_FL_PORT_STATS_NO_RESET	PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET
2027 #define BNXT_PHY_FL_NO_PHY_LPBK		PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
2028 #define BNXT_PHY_FL_FW_MANAGED_LKDN	PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN
2029 #define BNXT_PHY_FL_NO_FCS		PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS
2030 
2031 	u8			num_tests;
2032 	struct bnxt_test_info	*test_info;
2033 
2034 	u8			wol_filter_id;
2035 	u8			wol;
2036 
2037 	u8			num_leds;
2038 	struct bnxt_led_info	leds[BNXT_MAX_LED];
2039 	u16			dump_flag;
2040 #define BNXT_DUMP_LIVE		0
2041 #define BNXT_DUMP_CRASH		1
2042 
2043 	struct bpf_prog		*xdp_prog;
2044 
2045 	struct bnxt_ptp_cfg	*ptp_cfg;
2046 
2047 	/* devlink interface and vf-rep structs */
2048 	struct devlink		*dl;
2049 	struct devlink_port	dl_port;
2050 	enum devlink_eswitch_mode eswitch_mode;
2051 	struct bnxt_vf_rep	**vf_reps; /* array of vf-rep ptrs */
2052 	u16			*cfa_code_map; /* cfa_code -> vf_idx map */
2053 	u8			dsn[8];
2054 	struct bnxt_tc_info	*tc_info;
2055 	struct list_head	tc_indr_block_list;
2056 	struct dentry		*debugfs_pdev;
2057 	struct device		*hwmon_dev;
2058 };
2059 
2060 #define BNXT_NUM_RX_RING_STATS			8
2061 #define BNXT_NUM_TX_RING_STATS			8
2062 #define BNXT_NUM_TPA_RING_STATS			4
2063 #define BNXT_NUM_TPA_RING_STATS_P5		5
2064 #define BNXT_NUM_TPA_RING_STATS_P5_SR2		6
2065 
2066 #define BNXT_RING_STATS_SIZE_P5					\
2067 	((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS +	\
2068 	  BNXT_NUM_TPA_RING_STATS_P5) * 8)
2069 
2070 #define BNXT_RING_STATS_SIZE_P5_SR2				\
2071 	((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS +	\
2072 	  BNXT_NUM_TPA_RING_STATS_P5_SR2) * 8)
2073 
2074 #define BNXT_GET_RING_STATS64(sw, counter)		\
2075 	(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
2076 
2077 #define BNXT_GET_RX_PORT_STATS64(sw, counter)		\
2078 	(*((sw) + offsetof(struct rx_port_stats, counter) / 8))
2079 
2080 #define BNXT_GET_TX_PORT_STATS64(sw, counter)		\
2081 	(*((sw) + offsetof(struct tx_port_stats, counter) / 8))
2082 
2083 #define BNXT_PORT_STATS_SIZE				\
2084 	(sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024)
2085 
2086 #define BNXT_TX_PORT_STATS_BYTE_OFFSET			\
2087 	(sizeof(struct rx_port_stats) + 512)
2088 
2089 #define BNXT_RX_STATS_OFFSET(counter)			\
2090 	(offsetof(struct rx_port_stats, counter) / 8)
2091 
2092 #define BNXT_TX_STATS_OFFSET(counter)			\
2093 	((offsetof(struct tx_port_stats, counter) +	\
2094 	  BNXT_TX_PORT_STATS_BYTE_OFFSET) / 8)
2095 
2096 #define BNXT_RX_STATS_EXT_OFFSET(counter)		\
2097 	(offsetof(struct rx_port_stats_ext, counter) / 8)
2098 
2099 #define BNXT_TX_STATS_EXT_OFFSET(counter)		\
2100 	(offsetof(struct tx_port_stats_ext, counter) / 8)
2101 
2102 #define BNXT_HW_FEATURE_VLAN_ALL_RX				\
2103 	(NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
2104 #define BNXT_HW_FEATURE_VLAN_ALL_TX				\
2105 	(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
2106 
2107 #define I2C_DEV_ADDR_A0				0xa0
2108 #define I2C_DEV_ADDR_A2				0xa2
2109 #define SFF_DIAG_SUPPORT_OFFSET			0x5c
2110 #define SFF_MODULE_ID_SFP			0x3
2111 #define SFF_MODULE_ID_QSFP			0xc
2112 #define SFF_MODULE_ID_QSFP_PLUS			0xd
2113 #define SFF_MODULE_ID_QSFP28			0x11
2114 #define BNXT_MAX_PHY_I2C_RESP_SIZE		64
2115 
bnxt_tx_avail(struct bnxt * bp,struct bnxt_tx_ring_info * txr)2116 static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
2117 {
2118 	/* Tell compiler to fetch tx indices from memory. */
2119 	barrier();
2120 
2121 	return bp->tx_ring_size -
2122 		((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
2123 }
2124 
bnxt_writeq(struct bnxt * bp,u64 val,volatile void __iomem * addr)2125 static inline void bnxt_writeq(struct bnxt *bp, u64 val,
2126 			       volatile void __iomem *addr)
2127 {
2128 #if BITS_PER_LONG == 32
2129 	spin_lock(&bp->db_lock);
2130 	lo_hi_writeq(val, addr);
2131 	spin_unlock(&bp->db_lock);
2132 #else
2133 	writeq(val, addr);
2134 #endif
2135 }
2136 
bnxt_writeq_relaxed(struct bnxt * bp,u64 val,volatile void __iomem * addr)2137 static inline void bnxt_writeq_relaxed(struct bnxt *bp, u64 val,
2138 				       volatile void __iomem *addr)
2139 {
2140 #if BITS_PER_LONG == 32
2141 	spin_lock(&bp->db_lock);
2142 	lo_hi_writeq_relaxed(val, addr);
2143 	spin_unlock(&bp->db_lock);
2144 #else
2145 	writeq_relaxed(val, addr);
2146 #endif
2147 }
2148 
2149 /* For TX and RX ring doorbells with no ordering guarantee*/
bnxt_db_write_relaxed(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)2150 static inline void bnxt_db_write_relaxed(struct bnxt *bp,
2151 					 struct bnxt_db_info *db, u32 idx)
2152 {
2153 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
2154 		bnxt_writeq_relaxed(bp, db->db_key64 | idx, db->doorbell);
2155 	} else {
2156 		u32 db_val = db->db_key32 | idx;
2157 
2158 		writel_relaxed(db_val, db->doorbell);
2159 		if (bp->flags & BNXT_FLAG_DOUBLE_DB)
2160 			writel_relaxed(db_val, db->doorbell);
2161 	}
2162 }
2163 
2164 /* For TX and RX ring doorbells */
bnxt_db_write(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)2165 static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
2166 				 u32 idx)
2167 {
2168 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
2169 		bnxt_writeq(bp, db->db_key64 | idx, db->doorbell);
2170 	} else {
2171 		u32 db_val = db->db_key32 | idx;
2172 
2173 		writel(db_val, db->doorbell);
2174 		if (bp->flags & BNXT_FLAG_DOUBLE_DB)
2175 			writel(db_val, db->doorbell);
2176 	}
2177 }
2178 
2179 extern const u16 bnxt_lhint_arr[];
2180 
2181 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
2182 		       u16 prod, gfp_t gfp);
2183 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data);
2184 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx);
2185 void bnxt_set_tpa_flags(struct bnxt *bp);
2186 void bnxt_set_ring_params(struct bnxt *);
2187 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
2188 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
2189 			    int bmap_size, bool async_only);
2190 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
2191 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
2192 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
2193 int bnxt_nq_rings_in_use(struct bnxt *bp);
2194 int bnxt_hwrm_set_coal(struct bnxt *);
2195 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
2196 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
2197 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
2198 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
2199 int bnxt_get_avail_msix(struct bnxt *bp, int num);
2200 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
2201 void bnxt_tx_disable(struct bnxt *bp);
2202 void bnxt_tx_enable(struct bnxt *bp);
2203 int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
2204 int bnxt_hwrm_set_pause(struct bnxt *);
2205 int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
2206 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
2207 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
2208 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
2209 bool bnxt_is_fw_healthy(struct bnxt *bp);
2210 int bnxt_hwrm_fw_set_time(struct bnxt *);
2211 int bnxt_open_nic(struct bnxt *, bool, bool);
2212 int bnxt_half_open_nic(struct bnxt *bp);
2213 void bnxt_half_close_nic(struct bnxt *bp);
2214 int bnxt_close_nic(struct bnxt *, bool, bool);
2215 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
2216 			 u32 *reg_buf);
2217 void bnxt_fw_exception(struct bnxt *bp);
2218 void bnxt_fw_reset(struct bnxt *bp);
2219 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
2220 		     int tx_xdp);
2221 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
2222 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
2223 int bnxt_restore_pf_fw_resources(struct bnxt *bp);
2224 int bnxt_get_port_parent_id(struct net_device *dev,
2225 			    struct netdev_phys_item_id *ppid);
2226 void bnxt_dim_work(struct work_struct *work);
2227 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
2228 
2229 #endif
2230