• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10 
11 #include <linux/module.h>
12 
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58 
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70 
71 #define BNXT_TX_TIMEOUT		(5 * HZ)
72 #define BNXT_DEF_MSG_ENABLE	(NETIF_MSG_DRV | NETIF_MSG_HW | \
73 				 NETIF_MSG_TX_ERR)
74 
75 MODULE_LICENSE("GPL");
76 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
77 
78 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80 #define BNXT_RX_COPY_THRESH 256
81 
82 #define BNXT_TX_PUSH_THRESH 164
83 
84 enum board_idx {
85 	BCM57301,
86 	BCM57302,
87 	BCM57304,
88 	BCM57417_NPAR,
89 	BCM58700,
90 	BCM57311,
91 	BCM57312,
92 	BCM57402,
93 	BCM57404,
94 	BCM57406,
95 	BCM57402_NPAR,
96 	BCM57407,
97 	BCM57412,
98 	BCM57414,
99 	BCM57416,
100 	BCM57417,
101 	BCM57412_NPAR,
102 	BCM57314,
103 	BCM57417_SFP,
104 	BCM57416_SFP,
105 	BCM57404_NPAR,
106 	BCM57406_NPAR,
107 	BCM57407_SFP,
108 	BCM57407_NPAR,
109 	BCM57414_NPAR,
110 	BCM57416_NPAR,
111 	BCM57452,
112 	BCM57454,
113 	BCM5745x_NPAR,
114 	BCM57508,
115 	BCM57504,
116 	BCM57502,
117 	BCM57508_NPAR,
118 	BCM57504_NPAR,
119 	BCM57502_NPAR,
120 	BCM58802,
121 	BCM58804,
122 	BCM58808,
123 	NETXTREME_E_VF,
124 	NETXTREME_C_VF,
125 	NETXTREME_S_VF,
126 	NETXTREME_C_VF_HV,
127 	NETXTREME_E_VF_HV,
128 	NETXTREME_E_P5_VF,
129 	NETXTREME_E_P5_VF_HV,
130 };
131 
132 /* indexed by enum above */
133 static const struct {
134 	char *name;
135 } board_info[] = {
136 	[BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
137 	[BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
138 	[BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
139 	[BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
140 	[BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
141 	[BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
142 	[BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
143 	[BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
144 	[BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
145 	[BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
146 	[BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
147 	[BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
148 	[BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
149 	[BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
150 	[BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
151 	[BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
152 	[BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
153 	[BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
154 	[BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
155 	[BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
156 	[BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
157 	[BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
158 	[BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
159 	[BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
160 	[BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
161 	[BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
162 	[BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
163 	[BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
164 	[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
165 	[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166 	[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
167 	[BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
168 	[BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
169 	[BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
170 	[BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
171 	[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
172 	[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 	[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
174 	[NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
175 	[NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
176 	[NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
177 	[NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
178 	[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
179 	[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
180 	[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
181 };
182 
183 static const struct pci_device_id bnxt_pci_tbl[] = {
184 	{ PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
185 	{ PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
186 	{ PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
187 	{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
188 	{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
189 	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
190 	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
191 	{ PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
192 	{ PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
193 	{ PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
194 	{ PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
195 	{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
196 	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
197 	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
198 	{ PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
199 	{ PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
200 	{ PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
201 	{ PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
202 	{ PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
203 	{ PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
204 	{ PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
205 	{ PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
206 	{ PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
207 	{ PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
208 	{ PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
209 	{ PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
210 	{ PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
211 	{ PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
212 	{ PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
213 	{ PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
214 	{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
215 	{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
216 	{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
217 	{ PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
218 	{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
219 	{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
220 	{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
221 	{ PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
222 	{ PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
223 	{ PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
224 	{ PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
225 	{ PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
226 	{ PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
227 	{ PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
228 	{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
229 	{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
230 #ifdef CONFIG_BNXT_SRIOV
231 	{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
232 	{ PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
233 	{ PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
234 	{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
235 	{ PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
236 	{ PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
237 	{ PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
238 	{ PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
239 	{ PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
240 	{ PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
241 	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
242 	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
243 	{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
244 	{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
245 	{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
246 	{ PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
247 	{ PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
248 	{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
249 	{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
250 	{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
251 	{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
252 #endif
253 	{ 0 }
254 };
255 
256 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
257 
258 static const u16 bnxt_vf_req_snif[] = {
259 	HWRM_FUNC_CFG,
260 	HWRM_FUNC_VF_CFG,
261 	HWRM_PORT_PHY_QCFG,
262 	HWRM_CFA_L2_FILTER_ALLOC,
263 };
264 
265 static const u16 bnxt_async_events_arr[] = {
266 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
267 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
268 	ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
269 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
270 	ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
271 	ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
272 	ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
273 	ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
274 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
275 	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
276 	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
277 };
278 
279 static struct workqueue_struct *bnxt_pf_wq;
280 
bnxt_vf_pciid(enum board_idx idx)281 static bool bnxt_vf_pciid(enum board_idx idx)
282 {
283 	return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
284 		idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
285 		idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
286 		idx == NETXTREME_E_P5_VF_HV);
287 }
288 
289 #define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
290 #define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
291 #define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
292 
293 #define BNXT_CP_DB_IRQ_DIS(db)						\
294 		writel(DB_CP_IRQ_DIS_FLAGS, db)
295 
296 #define BNXT_DB_CQ(db, idx)						\
297 	writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
298 
299 #define BNXT_DB_NQ_P5(db, idx)						\
300 	writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
301 
302 #define BNXT_DB_CQ_ARM(db, idx)						\
303 	writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
304 
305 #define BNXT_DB_NQ_ARM_P5(db, idx)					\
306 	writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
307 
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)308 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
309 {
310 	if (bp->flags & BNXT_FLAG_CHIP_P5)
311 		BNXT_DB_NQ_P5(db, idx);
312 	else
313 		BNXT_DB_CQ(db, idx);
314 }
315 
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)316 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
317 {
318 	if (bp->flags & BNXT_FLAG_CHIP_P5)
319 		BNXT_DB_NQ_ARM_P5(db, idx);
320 	else
321 		BNXT_DB_CQ_ARM(db, idx);
322 }
323 
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)324 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
325 {
326 	if (bp->flags & BNXT_FLAG_CHIP_P5)
327 		writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
328 		       db->doorbell);
329 	else
330 		BNXT_DB_CQ(db, idx);
331 }
332 
333 const u16 bnxt_lhint_arr[] = {
334 	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
335 	TX_BD_FLAGS_LHINT_512_TO_1023,
336 	TX_BD_FLAGS_LHINT_1024_TO_2047,
337 	TX_BD_FLAGS_LHINT_1024_TO_2047,
338 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
339 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
340 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
341 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
342 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
343 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
344 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
345 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
346 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
347 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
348 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
349 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
350 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
351 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
352 	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
353 };
354 
bnxt_xmit_get_cfa_action(struct sk_buff * skb)355 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
356 {
357 	struct metadata_dst *md_dst = skb_metadata_dst(skb);
358 
359 	if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
360 		return 0;
361 
362 	return md_dst->u.port_info.port_id;
363 }
364 
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)365 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
366 			     u16 prod)
367 {
368 	bnxt_db_write(bp, &txr->tx_db, prod);
369 	txr->kick_pending = 0;
370 }
371 
bnxt_txr_netif_try_stop_queue(struct bnxt * bp,struct bnxt_tx_ring_info * txr,struct netdev_queue * txq)372 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
373 					  struct bnxt_tx_ring_info *txr,
374 					  struct netdev_queue *txq)
375 {
376 	netif_tx_stop_queue(txq);
377 
378 	/* netif_tx_stop_queue() must be done before checking
379 	 * tx index in bnxt_tx_avail() below, because in
380 	 * bnxt_tx_int(), we update tx index before checking for
381 	 * netif_tx_queue_stopped().
382 	 */
383 	smp_mb();
384 	if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
385 		netif_tx_wake_queue(txq);
386 		return false;
387 	}
388 
389 	return true;
390 }
391 
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)392 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
393 {
394 	struct bnxt *bp = netdev_priv(dev);
395 	struct tx_bd *txbd;
396 	struct tx_bd_ext *txbd1;
397 	struct netdev_queue *txq;
398 	int i;
399 	dma_addr_t mapping;
400 	unsigned int length, pad = 0;
401 	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
402 	u16 prod, last_frag;
403 	struct pci_dev *pdev = bp->pdev;
404 	struct bnxt_tx_ring_info *txr;
405 	struct bnxt_sw_tx_bd *tx_buf;
406 
407 	i = skb_get_queue_mapping(skb);
408 	if (unlikely(i >= bp->tx_nr_rings)) {
409 		dev_kfree_skb_any(skb);
410 		atomic_long_inc(&dev->tx_dropped);
411 		return NETDEV_TX_OK;
412 	}
413 
414 	txq = netdev_get_tx_queue(dev, i);
415 	txr = &bp->tx_ring[bp->tx_ring_map[i]];
416 	prod = txr->tx_prod;
417 
418 	free_size = bnxt_tx_avail(bp, txr);
419 	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
420 		/* We must have raced with NAPI cleanup */
421 		if (net_ratelimit() && txr->kick_pending)
422 			netif_warn(bp, tx_err, dev,
423 				   "bnxt: ring busy w/ flush pending!\n");
424 		if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
425 			return NETDEV_TX_BUSY;
426 	}
427 
428 	length = skb->len;
429 	len = skb_headlen(skb);
430 	last_frag = skb_shinfo(skb)->nr_frags;
431 
432 	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
433 
434 	txbd->tx_bd_opaque = prod;
435 
436 	tx_buf = &txr->tx_buf_ring[prod];
437 	tx_buf->skb = skb;
438 	tx_buf->nr_frags = last_frag;
439 
440 	vlan_tag_flags = 0;
441 	cfa_action = bnxt_xmit_get_cfa_action(skb);
442 	if (skb_vlan_tag_present(skb)) {
443 		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
444 				 skb_vlan_tag_get(skb);
445 		/* Currently supports 8021Q, 8021AD vlan offloads
446 		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
447 		 */
448 		if (skb->vlan_proto == htons(ETH_P_8021Q))
449 			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
450 	}
451 
452 	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
453 		struct tx_push_buffer *tx_push_buf = txr->tx_push;
454 		struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
455 		struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
456 		void __iomem *db = txr->tx_db.doorbell;
457 		void *pdata = tx_push_buf->data;
458 		u64 *end;
459 		int j, push_len;
460 
461 		/* Set COAL_NOW to be ready quickly for the next push */
462 		tx_push->tx_bd_len_flags_type =
463 			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
464 					TX_BD_TYPE_LONG_TX_BD |
465 					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
466 					TX_BD_FLAGS_COAL_NOW |
467 					TX_BD_FLAGS_PACKET_END |
468 					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
469 
470 		if (skb->ip_summed == CHECKSUM_PARTIAL)
471 			tx_push1->tx_bd_hsize_lflags =
472 					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
473 		else
474 			tx_push1->tx_bd_hsize_lflags = 0;
475 
476 		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
477 		tx_push1->tx_bd_cfa_action =
478 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
479 
480 		end = pdata + length;
481 		end = PTR_ALIGN(end, 8) - 1;
482 		*end = 0;
483 
484 		skb_copy_from_linear_data(skb, pdata, len);
485 		pdata += len;
486 		for (j = 0; j < last_frag; j++) {
487 			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
488 			void *fptr;
489 
490 			fptr = skb_frag_address_safe(frag);
491 			if (!fptr)
492 				goto normal_tx;
493 
494 			memcpy(pdata, fptr, skb_frag_size(frag));
495 			pdata += skb_frag_size(frag);
496 		}
497 
498 		txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
499 		txbd->tx_bd_haddr = txr->data_mapping;
500 		prod = NEXT_TX(prod);
501 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
502 		memcpy(txbd, tx_push1, sizeof(*txbd));
503 		prod = NEXT_TX(prod);
504 		tx_push->doorbell =
505 			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
506 		txr->tx_prod = prod;
507 
508 		tx_buf->is_push = 1;
509 		netdev_tx_sent_queue(txq, skb->len);
510 		wmb();	/* Sync is_push and byte queue before pushing data */
511 
512 		push_len = (length + sizeof(*tx_push) + 7) / 8;
513 		if (push_len > 16) {
514 			__iowrite64_copy(db, tx_push_buf, 16);
515 			__iowrite32_copy(db + 4, tx_push_buf + 1,
516 					 (push_len - 16) << 1);
517 		} else {
518 			__iowrite64_copy(db, tx_push_buf, push_len);
519 		}
520 
521 		goto tx_done;
522 	}
523 
524 normal_tx:
525 	if (length < BNXT_MIN_PKT_SIZE) {
526 		pad = BNXT_MIN_PKT_SIZE - length;
527 		if (skb_pad(skb, pad))
528 			/* SKB already freed. */
529 			goto tx_kick_pending;
530 		length = BNXT_MIN_PKT_SIZE;
531 	}
532 
533 	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
534 
535 	if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
536 		goto tx_free;
537 
538 	dma_unmap_addr_set(tx_buf, mapping, mapping);
539 	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
540 		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
541 
542 	txbd->tx_bd_haddr = cpu_to_le64(mapping);
543 
544 	prod = NEXT_TX(prod);
545 	txbd1 = (struct tx_bd_ext *)
546 		&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
547 
548 	txbd1->tx_bd_hsize_lflags = 0;
549 	if (skb_is_gso(skb)) {
550 		u32 hdr_len;
551 
552 		if (skb->encapsulation)
553 			hdr_len = skb_inner_network_offset(skb) +
554 				skb_inner_network_header_len(skb) +
555 				inner_tcp_hdrlen(skb);
556 		else
557 			hdr_len = skb_transport_offset(skb) +
558 				tcp_hdrlen(skb);
559 
560 		txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
561 					TX_BD_FLAGS_T_IPID |
562 					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
563 		length = skb_shinfo(skb)->gso_size;
564 		txbd1->tx_bd_mss = cpu_to_le32(length);
565 		length += hdr_len;
566 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
567 		txbd1->tx_bd_hsize_lflags =
568 			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
569 		txbd1->tx_bd_mss = 0;
570 	}
571 
572 	length >>= 9;
573 	if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
574 		dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
575 				     skb->len);
576 		i = 0;
577 		goto tx_dma_error;
578 	}
579 	flags |= bnxt_lhint_arr[length];
580 	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
581 
582 	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
583 	txbd1->tx_bd_cfa_action =
584 			cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
585 	for (i = 0; i < last_frag; i++) {
586 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
587 
588 		prod = NEXT_TX(prod);
589 		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
590 
591 		len = skb_frag_size(frag);
592 		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
593 					   DMA_TO_DEVICE);
594 
595 		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
596 			goto tx_dma_error;
597 
598 		tx_buf = &txr->tx_buf_ring[prod];
599 		dma_unmap_addr_set(tx_buf, mapping, mapping);
600 
601 		txbd->tx_bd_haddr = cpu_to_le64(mapping);
602 
603 		flags = len << TX_BD_LEN_SHIFT;
604 		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
605 	}
606 
607 	flags &= ~TX_BD_LEN;
608 	txbd->tx_bd_len_flags_type =
609 		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
610 			    TX_BD_FLAGS_PACKET_END);
611 
612 	netdev_tx_sent_queue(txq, skb->len);
613 
614 	/* Sync BD data before updating doorbell */
615 	wmb();
616 
617 	prod = NEXT_TX(prod);
618 	txr->tx_prod = prod;
619 
620 	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
621 		bnxt_txr_db_kick(bp, txr, prod);
622 	else
623 		txr->kick_pending = 1;
624 
625 tx_done:
626 
627 	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
628 		if (netdev_xmit_more() && !tx_buf->is_push)
629 			bnxt_txr_db_kick(bp, txr, prod);
630 
631 		bnxt_txr_netif_try_stop_queue(bp, txr, txq);
632 	}
633 	return NETDEV_TX_OK;
634 
635 tx_dma_error:
636 	last_frag = i;
637 
638 	/* start back at beginning and unmap skb */
639 	prod = txr->tx_prod;
640 	tx_buf = &txr->tx_buf_ring[prod];
641 	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
642 			 skb_headlen(skb), PCI_DMA_TODEVICE);
643 	prod = NEXT_TX(prod);
644 
645 	/* unmap remaining mapped pages */
646 	for (i = 0; i < last_frag; i++) {
647 		prod = NEXT_TX(prod);
648 		tx_buf = &txr->tx_buf_ring[prod];
649 		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
650 			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
651 			       PCI_DMA_TODEVICE);
652 	}
653 
654 tx_free:
655 	dev_kfree_skb_any(skb);
656 tx_kick_pending:
657 	if (txr->kick_pending)
658 		bnxt_txr_db_kick(bp, txr, txr->tx_prod);
659 	txr->tx_buf_ring[txr->tx_prod].skb = NULL;
660 	atomic_long_inc(&dev->tx_dropped);
661 	return NETDEV_TX_OK;
662 }
663 
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int nr_pkts)664 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
665 {
666 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
667 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
668 	u16 cons = txr->tx_cons;
669 	struct pci_dev *pdev = bp->pdev;
670 	int i;
671 	unsigned int tx_bytes = 0;
672 
673 	for (i = 0; i < nr_pkts; i++) {
674 		struct bnxt_sw_tx_bd *tx_buf;
675 		struct sk_buff *skb;
676 		int j, last;
677 
678 		tx_buf = &txr->tx_buf_ring[cons];
679 		cons = NEXT_TX(cons);
680 		skb = tx_buf->skb;
681 		tx_buf->skb = NULL;
682 
683 		if (tx_buf->is_push) {
684 			tx_buf->is_push = 0;
685 			goto next_tx_int;
686 		}
687 
688 		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
689 				 skb_headlen(skb), PCI_DMA_TODEVICE);
690 		last = tx_buf->nr_frags;
691 
692 		for (j = 0; j < last; j++) {
693 			cons = NEXT_TX(cons);
694 			tx_buf = &txr->tx_buf_ring[cons];
695 			dma_unmap_page(
696 				&pdev->dev,
697 				dma_unmap_addr(tx_buf, mapping),
698 				skb_frag_size(&skb_shinfo(skb)->frags[j]),
699 				PCI_DMA_TODEVICE);
700 		}
701 
702 next_tx_int:
703 		cons = NEXT_TX(cons);
704 
705 		tx_bytes += skb->len;
706 		dev_kfree_skb_any(skb);
707 	}
708 
709 	netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
710 	txr->tx_cons = cons;
711 
712 	/* Need to make the tx_cons update visible to bnxt_start_xmit()
713 	 * before checking for netif_tx_queue_stopped().  Without the
714 	 * memory barrier, there is a small possibility that bnxt_start_xmit()
715 	 * will miss it and cause the queue to be stopped forever.
716 	 */
717 	smp_mb();
718 
719 	if (unlikely(netif_tx_queue_stopped(txq)) &&
720 	    bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
721 	    READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
722 		netif_tx_wake_queue(txq);
723 }
724 
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)725 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
726 					 struct bnxt_rx_ring_info *rxr,
727 					 gfp_t gfp)
728 {
729 	struct device *dev = &bp->pdev->dev;
730 	struct page *page;
731 
732 	page = page_pool_dev_alloc_pages(rxr->page_pool);
733 	if (!page)
734 		return NULL;
735 
736 	*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
737 				      DMA_ATTR_WEAK_ORDERING);
738 	if (dma_mapping_error(dev, *mapping)) {
739 		page_pool_recycle_direct(rxr->page_pool, page);
740 		return NULL;
741 	}
742 	*mapping += bp->rx_dma_offset;
743 	return page;
744 }
745 
__bnxt_alloc_rx_data(struct bnxt * bp,dma_addr_t * mapping,gfp_t gfp)746 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
747 				       gfp_t gfp)
748 {
749 	u8 *data;
750 	struct pci_dev *pdev = bp->pdev;
751 
752 	data = kmalloc(bp->rx_buf_size, gfp);
753 	if (!data)
754 		return NULL;
755 
756 	*mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
757 					bp->rx_buf_use_size, bp->rx_dir,
758 					DMA_ATTR_WEAK_ORDERING);
759 
760 	if (dma_mapping_error(&pdev->dev, *mapping)) {
761 		kfree(data);
762 		data = NULL;
763 	}
764 	return data;
765 }
766 
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)767 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
768 		       u16 prod, gfp_t gfp)
769 {
770 	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
771 	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
772 	dma_addr_t mapping;
773 
774 	if (BNXT_RX_PAGE_MODE(bp)) {
775 		struct page *page =
776 			__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
777 
778 		if (!page)
779 			return -ENOMEM;
780 
781 		rx_buf->data = page;
782 		rx_buf->data_ptr = page_address(page) + bp->rx_offset;
783 	} else {
784 		u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
785 
786 		if (!data)
787 			return -ENOMEM;
788 
789 		rx_buf->data = data;
790 		rx_buf->data_ptr = data + bp->rx_offset;
791 	}
792 	rx_buf->mapping = mapping;
793 
794 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
795 	return 0;
796 }
797 
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)798 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
799 {
800 	u16 prod = rxr->rx_prod;
801 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
802 	struct rx_bd *cons_bd, *prod_bd;
803 
804 	prod_rx_buf = &rxr->rx_buf_ring[prod];
805 	cons_rx_buf = &rxr->rx_buf_ring[cons];
806 
807 	prod_rx_buf->data = data;
808 	prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
809 
810 	prod_rx_buf->mapping = cons_rx_buf->mapping;
811 
812 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
813 	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
814 
815 	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
816 }
817 
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)818 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
819 {
820 	u16 next, max = rxr->rx_agg_bmap_size;
821 
822 	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
823 	if (next >= max)
824 		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
825 	return next;
826 }
827 
bnxt_alloc_rx_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)828 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
829 				     struct bnxt_rx_ring_info *rxr,
830 				     u16 prod, gfp_t gfp)
831 {
832 	struct rx_bd *rxbd =
833 		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
834 	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
835 	struct pci_dev *pdev = bp->pdev;
836 	struct page *page;
837 	dma_addr_t mapping;
838 	u16 sw_prod = rxr->rx_sw_agg_prod;
839 	unsigned int offset = 0;
840 
841 	if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
842 		page = rxr->rx_page;
843 		if (!page) {
844 			page = alloc_page(gfp);
845 			if (!page)
846 				return -ENOMEM;
847 			rxr->rx_page = page;
848 			rxr->rx_page_offset = 0;
849 		}
850 		offset = rxr->rx_page_offset;
851 		rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
852 		if (rxr->rx_page_offset == PAGE_SIZE)
853 			rxr->rx_page = NULL;
854 		else
855 			get_page(page);
856 	} else {
857 		page = alloc_page(gfp);
858 		if (!page)
859 			return -ENOMEM;
860 	}
861 
862 	mapping = dma_map_page_attrs(&pdev->dev, page, offset,
863 				     BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
864 				     DMA_ATTR_WEAK_ORDERING);
865 	if (dma_mapping_error(&pdev->dev, mapping)) {
866 		__free_page(page);
867 		return -EIO;
868 	}
869 
870 	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
871 		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
872 
873 	__set_bit(sw_prod, rxr->rx_agg_bmap);
874 	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
875 	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
876 
877 	rx_agg_buf->page = page;
878 	rx_agg_buf->offset = offset;
879 	rx_agg_buf->mapping = mapping;
880 	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
881 	rxbd->rx_bd_opaque = sw_prod;
882 	return 0;
883 }
884 
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)885 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
886 				       struct bnxt_cp_ring_info *cpr,
887 				       u16 cp_cons, u16 curr)
888 {
889 	struct rx_agg_cmp *agg;
890 
891 	cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
892 	agg = (struct rx_agg_cmp *)
893 		&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
894 	return agg;
895 }
896 
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)897 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
898 					      struct bnxt_rx_ring_info *rxr,
899 					      u16 agg_id, u16 curr)
900 {
901 	struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
902 
903 	return &tpa_info->agg_arr[curr];
904 }
905 
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)906 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
907 				   u16 start, u32 agg_bufs, bool tpa)
908 {
909 	struct bnxt_napi *bnapi = cpr->bnapi;
910 	struct bnxt *bp = bnapi->bp;
911 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
912 	u16 prod = rxr->rx_agg_prod;
913 	u16 sw_prod = rxr->rx_sw_agg_prod;
914 	bool p5_tpa = false;
915 	u32 i;
916 
917 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
918 		p5_tpa = true;
919 
920 	for (i = 0; i < agg_bufs; i++) {
921 		u16 cons;
922 		struct rx_agg_cmp *agg;
923 		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
924 		struct rx_bd *prod_bd;
925 		struct page *page;
926 
927 		if (p5_tpa)
928 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
929 		else
930 			agg = bnxt_get_agg(bp, cpr, idx, start + i);
931 		cons = agg->rx_agg_cmp_opaque;
932 		__clear_bit(cons, rxr->rx_agg_bmap);
933 
934 		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
935 			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
936 
937 		__set_bit(sw_prod, rxr->rx_agg_bmap);
938 		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
939 		cons_rx_buf = &rxr->rx_agg_ring[cons];
940 
941 		/* It is possible for sw_prod to be equal to cons, so
942 		 * set cons_rx_buf->page to NULL first.
943 		 */
944 		page = cons_rx_buf->page;
945 		cons_rx_buf->page = NULL;
946 		prod_rx_buf->page = page;
947 		prod_rx_buf->offset = cons_rx_buf->offset;
948 
949 		prod_rx_buf->mapping = cons_rx_buf->mapping;
950 
951 		prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
952 
953 		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
954 		prod_bd->rx_bd_opaque = sw_prod;
955 
956 		prod = NEXT_RX_AGG(prod);
957 		sw_prod = NEXT_RX_AGG(sw_prod);
958 	}
959 	rxr->rx_agg_prod = prod;
960 	rxr->rx_sw_agg_prod = sw_prod;
961 }
962 
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)963 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
964 					struct bnxt_rx_ring_info *rxr,
965 					u16 cons, void *data, u8 *data_ptr,
966 					dma_addr_t dma_addr,
967 					unsigned int offset_and_len)
968 {
969 	unsigned int payload = offset_and_len >> 16;
970 	unsigned int len = offset_and_len & 0xffff;
971 	skb_frag_t *frag;
972 	struct page *page = data;
973 	u16 prod = rxr->rx_prod;
974 	struct sk_buff *skb;
975 	int off, err;
976 
977 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
978 	if (unlikely(err)) {
979 		bnxt_reuse_rx_data(rxr, cons, data);
980 		return NULL;
981 	}
982 	dma_addr -= bp->rx_dma_offset;
983 	dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
984 			     DMA_ATTR_WEAK_ORDERING);
985 	page_pool_release_page(rxr->page_pool, page);
986 
987 	if (unlikely(!payload))
988 		payload = eth_get_headlen(bp->dev, data_ptr, len);
989 
990 	skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
991 	if (!skb) {
992 		__free_page(page);
993 		return NULL;
994 	}
995 
996 	off = (void *)data_ptr - page_address(page);
997 	skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
998 	memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
999 	       payload + NET_IP_ALIGN);
1000 
1001 	frag = &skb_shinfo(skb)->frags[0];
1002 	skb_frag_size_sub(frag, payload);
1003 	skb_frag_off_add(frag, payload);
1004 	skb->data_len -= payload;
1005 	skb->tail += payload;
1006 
1007 	return skb;
1008 }
1009 
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1010 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1011 				   struct bnxt_rx_ring_info *rxr, u16 cons,
1012 				   void *data, u8 *data_ptr,
1013 				   dma_addr_t dma_addr,
1014 				   unsigned int offset_and_len)
1015 {
1016 	u16 prod = rxr->rx_prod;
1017 	struct sk_buff *skb;
1018 	int err;
1019 
1020 	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1021 	if (unlikely(err)) {
1022 		bnxt_reuse_rx_data(rxr, cons, data);
1023 		return NULL;
1024 	}
1025 
1026 	skb = build_skb(data, 0);
1027 	dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1028 			       bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1029 	if (!skb) {
1030 		kfree(data);
1031 		return NULL;
1032 	}
1033 
1034 	skb_reserve(skb, bp->rx_offset);
1035 	skb_put(skb, offset_and_len & 0xffff);
1036 	return skb;
1037 }
1038 
bnxt_rx_pages(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1039 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1040 				     struct bnxt_cp_ring_info *cpr,
1041 				     struct sk_buff *skb, u16 idx,
1042 				     u32 agg_bufs, bool tpa)
1043 {
1044 	struct bnxt_napi *bnapi = cpr->bnapi;
1045 	struct pci_dev *pdev = bp->pdev;
1046 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1047 	u16 prod = rxr->rx_agg_prod;
1048 	bool p5_tpa = false;
1049 	u32 i;
1050 
1051 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1052 		p5_tpa = true;
1053 
1054 	for (i = 0; i < agg_bufs; i++) {
1055 		u16 cons, frag_len;
1056 		struct rx_agg_cmp *agg;
1057 		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1058 		struct page *page;
1059 		dma_addr_t mapping;
1060 
1061 		if (p5_tpa)
1062 			agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1063 		else
1064 			agg = bnxt_get_agg(bp, cpr, idx, i);
1065 		cons = agg->rx_agg_cmp_opaque;
1066 		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1067 			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1068 
1069 		cons_rx_buf = &rxr->rx_agg_ring[cons];
1070 		skb_fill_page_desc(skb, i, cons_rx_buf->page,
1071 				   cons_rx_buf->offset, frag_len);
1072 		__clear_bit(cons, rxr->rx_agg_bmap);
1073 
1074 		/* It is possible for bnxt_alloc_rx_page() to allocate
1075 		 * a sw_prod index that equals the cons index, so we
1076 		 * need to clear the cons entry now.
1077 		 */
1078 		mapping = cons_rx_buf->mapping;
1079 		page = cons_rx_buf->page;
1080 		cons_rx_buf->page = NULL;
1081 
1082 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1083 			struct skb_shared_info *shinfo;
1084 			unsigned int nr_frags;
1085 
1086 			shinfo = skb_shinfo(skb);
1087 			nr_frags = --shinfo->nr_frags;
1088 			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1089 
1090 			dev_kfree_skb(skb);
1091 
1092 			cons_rx_buf->page = page;
1093 
1094 			/* Update prod since possibly some pages have been
1095 			 * allocated already.
1096 			 */
1097 			rxr->rx_agg_prod = prod;
1098 			bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1099 			return NULL;
1100 		}
1101 
1102 		dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1103 				     PCI_DMA_FROMDEVICE,
1104 				     DMA_ATTR_WEAK_ORDERING);
1105 
1106 		skb->data_len += frag_len;
1107 		skb->len += frag_len;
1108 		skb->truesize += PAGE_SIZE;
1109 
1110 		prod = NEXT_RX_AGG(prod);
1111 	}
1112 	rxr->rx_agg_prod = prod;
1113 	return skb;
1114 }
1115 
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1116 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1117 			       u8 agg_bufs, u32 *raw_cons)
1118 {
1119 	u16 last;
1120 	struct rx_agg_cmp *agg;
1121 
1122 	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1123 	last = RING_CMP(*raw_cons);
1124 	agg = (struct rx_agg_cmp *)
1125 		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1126 	return RX_AGG_CMP_VALID(agg, *raw_cons);
1127 }
1128 
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1129 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1130 					    unsigned int len,
1131 					    dma_addr_t mapping)
1132 {
1133 	struct bnxt *bp = bnapi->bp;
1134 	struct pci_dev *pdev = bp->pdev;
1135 	struct sk_buff *skb;
1136 
1137 	skb = napi_alloc_skb(&bnapi->napi, len);
1138 	if (!skb)
1139 		return NULL;
1140 
1141 	dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1142 				bp->rx_dir);
1143 
1144 	memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1145 	       len + NET_IP_ALIGN);
1146 
1147 	dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1148 				   bp->rx_dir);
1149 
1150 	skb_put(skb, len);
1151 	return skb;
1152 }
1153 
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1154 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1155 			   u32 *raw_cons, void *cmp)
1156 {
1157 	struct rx_cmp *rxcmp = cmp;
1158 	u32 tmp_raw_cons = *raw_cons;
1159 	u8 cmp_type, agg_bufs = 0;
1160 
1161 	cmp_type = RX_CMP_TYPE(rxcmp);
1162 
1163 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1164 		agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1165 			    RX_CMP_AGG_BUFS) >>
1166 			   RX_CMP_AGG_BUFS_SHIFT;
1167 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1168 		struct rx_tpa_end_cmp *tpa_end = cmp;
1169 
1170 		if (bp->flags & BNXT_FLAG_CHIP_P5)
1171 			return 0;
1172 
1173 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1174 	}
1175 
1176 	if (agg_bufs) {
1177 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1178 			return -EBUSY;
1179 	}
1180 	*raw_cons = tmp_raw_cons;
1181 	return 0;
1182 }
1183 
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)1184 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1185 {
1186 	if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1187 		return;
1188 
1189 	if (BNXT_PF(bp))
1190 		queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1191 	else
1192 		schedule_delayed_work(&bp->fw_reset_task, delay);
1193 }
1194 
bnxt_queue_sp_work(struct bnxt * bp)1195 static void bnxt_queue_sp_work(struct bnxt *bp)
1196 {
1197 	if (BNXT_PF(bp))
1198 		queue_work(bnxt_pf_wq, &bp->sp_task);
1199 	else
1200 		schedule_work(&bp->sp_task);
1201 }
1202 
bnxt_sched_reset(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)1203 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1204 {
1205 	if (!rxr->bnapi->in_reset) {
1206 		rxr->bnapi->in_reset = true;
1207 		if (bp->flags & BNXT_FLAG_CHIP_P5)
1208 			set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1209 		else
1210 			set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1211 		bnxt_queue_sp_work(bp);
1212 	}
1213 	rxr->rx_next_cons = 0xffff;
1214 }
1215 
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1216 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1217 {
1218 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1219 	u16 idx = agg_id & MAX_TPA_P5_MASK;
1220 
1221 	if (test_bit(idx, map->agg_idx_bmap))
1222 		idx = find_first_zero_bit(map->agg_idx_bmap,
1223 					  BNXT_AGG_IDX_BMAP_SIZE);
1224 	__set_bit(idx, map->agg_idx_bmap);
1225 	map->agg_id_tbl[agg_id] = idx;
1226 	return idx;
1227 }
1228 
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1229 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1230 {
1231 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1232 
1233 	__clear_bit(idx, map->agg_idx_bmap);
1234 }
1235 
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1236 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1237 {
1238 	struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1239 
1240 	return map->agg_id_tbl[agg_id];
1241 }
1242 
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1243 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1244 			   struct rx_tpa_start_cmp *tpa_start,
1245 			   struct rx_tpa_start_cmp_ext *tpa_start1)
1246 {
1247 	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1248 	struct bnxt_tpa_info *tpa_info;
1249 	u16 cons, prod, agg_id;
1250 	struct rx_bd *prod_bd;
1251 	dma_addr_t mapping;
1252 
1253 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
1254 		agg_id = TPA_START_AGG_ID_P5(tpa_start);
1255 		agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1256 	} else {
1257 		agg_id = TPA_START_AGG_ID(tpa_start);
1258 	}
1259 	cons = tpa_start->rx_tpa_start_cmp_opaque;
1260 	prod = rxr->rx_prod;
1261 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1262 	prod_rx_buf = &rxr->rx_buf_ring[prod];
1263 	tpa_info = &rxr->rx_tpa[agg_id];
1264 
1265 	if (unlikely(cons != rxr->rx_next_cons ||
1266 		     TPA_START_ERROR(tpa_start))) {
1267 		netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1268 			    cons, rxr->rx_next_cons,
1269 			    TPA_START_ERROR_CODE(tpa_start1));
1270 		bnxt_sched_reset(bp, rxr);
1271 		return;
1272 	}
1273 	/* Store cfa_code in tpa_info to use in tpa_end
1274 	 * completion processing.
1275 	 */
1276 	tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1277 	prod_rx_buf->data = tpa_info->data;
1278 	prod_rx_buf->data_ptr = tpa_info->data_ptr;
1279 
1280 	mapping = tpa_info->mapping;
1281 	prod_rx_buf->mapping = mapping;
1282 
1283 	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1284 
1285 	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1286 
1287 	tpa_info->data = cons_rx_buf->data;
1288 	tpa_info->data_ptr = cons_rx_buf->data_ptr;
1289 	cons_rx_buf->data = NULL;
1290 	tpa_info->mapping = cons_rx_buf->mapping;
1291 
1292 	tpa_info->len =
1293 		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1294 				RX_TPA_START_CMP_LEN_SHIFT;
1295 	if (likely(TPA_START_HASH_VALID(tpa_start))) {
1296 		u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1297 
1298 		tpa_info->hash_type = PKT_HASH_TYPE_L4;
1299 		tpa_info->gso_type = SKB_GSO_TCPV4;
1300 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1301 		if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1302 			tpa_info->gso_type = SKB_GSO_TCPV6;
1303 		tpa_info->rss_hash =
1304 			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1305 	} else {
1306 		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1307 		tpa_info->gso_type = 0;
1308 		netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1309 	}
1310 	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1311 	tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1312 	tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1313 	tpa_info->agg_count = 0;
1314 
1315 	rxr->rx_prod = NEXT_RX(prod);
1316 	cons = NEXT_RX(cons);
1317 	rxr->rx_next_cons = NEXT_RX(cons);
1318 	cons_rx_buf = &rxr->rx_buf_ring[cons];
1319 
1320 	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1321 	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1322 	cons_rx_buf->data = NULL;
1323 }
1324 
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1325 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1326 {
1327 	if (agg_bufs)
1328 		bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1329 }
1330 
1331 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1332 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1333 {
1334 	struct udphdr *uh = NULL;
1335 
1336 	if (ip_proto == htons(ETH_P_IP)) {
1337 		struct iphdr *iph = (struct iphdr *)skb->data;
1338 
1339 		if (iph->protocol == IPPROTO_UDP)
1340 			uh = (struct udphdr *)(iph + 1);
1341 	} else {
1342 		struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1343 
1344 		if (iph->nexthdr == IPPROTO_UDP)
1345 			uh = (struct udphdr *)(iph + 1);
1346 	}
1347 	if (uh) {
1348 		if (uh->check)
1349 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1350 		else
1351 			skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1352 	}
1353 }
1354 #endif
1355 
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1356 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1357 					   int payload_off, int tcp_ts,
1358 					   struct sk_buff *skb)
1359 {
1360 #ifdef CONFIG_INET
1361 	struct tcphdr *th;
1362 	int len, nw_off;
1363 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1364 	u32 hdr_info = tpa_info->hdr_info;
1365 	bool loopback = false;
1366 
1367 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1368 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1369 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1370 
1371 	/* If the packet is an internal loopback packet, the offsets will
1372 	 * have an extra 4 bytes.
1373 	 */
1374 	if (inner_mac_off == 4) {
1375 		loopback = true;
1376 	} else if (inner_mac_off > 4) {
1377 		__be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1378 					    ETH_HLEN - 2));
1379 
1380 		/* We only support inner iPv4/ipv6.  If we don't see the
1381 		 * correct protocol ID, it must be a loopback packet where
1382 		 * the offsets are off by 4.
1383 		 */
1384 		if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1385 			loopback = true;
1386 	}
1387 	if (loopback) {
1388 		/* internal loopback packet, subtract all offsets by 4 */
1389 		inner_ip_off -= 4;
1390 		inner_mac_off -= 4;
1391 		outer_ip_off -= 4;
1392 	}
1393 
1394 	nw_off = inner_ip_off - ETH_HLEN;
1395 	skb_set_network_header(skb, nw_off);
1396 	if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1397 		struct ipv6hdr *iph = ipv6_hdr(skb);
1398 
1399 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1400 		len = skb->len - skb_transport_offset(skb);
1401 		th = tcp_hdr(skb);
1402 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1403 	} else {
1404 		struct iphdr *iph = ip_hdr(skb);
1405 
1406 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1407 		len = skb->len - skb_transport_offset(skb);
1408 		th = tcp_hdr(skb);
1409 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1410 	}
1411 
1412 	if (inner_mac_off) { /* tunnel */
1413 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1414 					    ETH_HLEN - 2));
1415 
1416 		bnxt_gro_tunnel(skb, proto);
1417 	}
1418 #endif
1419 	return skb;
1420 }
1421 
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1422 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1423 					   int payload_off, int tcp_ts,
1424 					   struct sk_buff *skb)
1425 {
1426 #ifdef CONFIG_INET
1427 	u16 outer_ip_off, inner_ip_off, inner_mac_off;
1428 	u32 hdr_info = tpa_info->hdr_info;
1429 	int iphdr_len, nw_off;
1430 
1431 	inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1432 	inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1433 	outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1434 
1435 	nw_off = inner_ip_off - ETH_HLEN;
1436 	skb_set_network_header(skb, nw_off);
1437 	iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1438 		     sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1439 	skb_set_transport_header(skb, nw_off + iphdr_len);
1440 
1441 	if (inner_mac_off) { /* tunnel */
1442 		__be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1443 					    ETH_HLEN - 2));
1444 
1445 		bnxt_gro_tunnel(skb, proto);
1446 	}
1447 #endif
1448 	return skb;
1449 }
1450 
1451 #define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
1452 #define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1453 
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1454 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1455 					   int payload_off, int tcp_ts,
1456 					   struct sk_buff *skb)
1457 {
1458 #ifdef CONFIG_INET
1459 	struct tcphdr *th;
1460 	int len, nw_off, tcp_opt_len = 0;
1461 
1462 	if (tcp_ts)
1463 		tcp_opt_len = 12;
1464 
1465 	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1466 		struct iphdr *iph;
1467 
1468 		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1469 			 ETH_HLEN;
1470 		skb_set_network_header(skb, nw_off);
1471 		iph = ip_hdr(skb);
1472 		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1473 		len = skb->len - skb_transport_offset(skb);
1474 		th = tcp_hdr(skb);
1475 		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1476 	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1477 		struct ipv6hdr *iph;
1478 
1479 		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1480 			 ETH_HLEN;
1481 		skb_set_network_header(skb, nw_off);
1482 		iph = ipv6_hdr(skb);
1483 		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1484 		len = skb->len - skb_transport_offset(skb);
1485 		th = tcp_hdr(skb);
1486 		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1487 	} else {
1488 		dev_kfree_skb_any(skb);
1489 		return NULL;
1490 	}
1491 
1492 	if (nw_off) /* tunnel */
1493 		bnxt_gro_tunnel(skb, skb->protocol);
1494 #endif
1495 	return skb;
1496 }
1497 
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1498 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1499 					   struct bnxt_tpa_info *tpa_info,
1500 					   struct rx_tpa_end_cmp *tpa_end,
1501 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1502 					   struct sk_buff *skb)
1503 {
1504 #ifdef CONFIG_INET
1505 	int payload_off;
1506 	u16 segs;
1507 
1508 	segs = TPA_END_TPA_SEGS(tpa_end);
1509 	if (segs == 1)
1510 		return skb;
1511 
1512 	NAPI_GRO_CB(skb)->count = segs;
1513 	skb_shinfo(skb)->gso_size =
1514 		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1515 	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1516 	if (bp->flags & BNXT_FLAG_CHIP_P5)
1517 		payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1518 	else
1519 		payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1520 	skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1521 	if (likely(skb))
1522 		tcp_gro_complete(skb);
1523 #endif
1524 	return skb;
1525 }
1526 
1527 /* Given the cfa_code of a received packet determine which
1528  * netdev (vf-rep or PF) the packet is destined to.
1529  */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1530 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1531 {
1532 	struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1533 
1534 	/* if vf-rep dev is NULL, the must belongs to the PF */
1535 	return dev ? dev : bp->dev;
1536 }
1537 
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1538 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1539 					   struct bnxt_cp_ring_info *cpr,
1540 					   u32 *raw_cons,
1541 					   struct rx_tpa_end_cmp *tpa_end,
1542 					   struct rx_tpa_end_cmp_ext *tpa_end1,
1543 					   u8 *event)
1544 {
1545 	struct bnxt_napi *bnapi = cpr->bnapi;
1546 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1547 	u8 *data_ptr, agg_bufs;
1548 	unsigned int len;
1549 	struct bnxt_tpa_info *tpa_info;
1550 	dma_addr_t mapping;
1551 	struct sk_buff *skb;
1552 	u16 idx = 0, agg_id;
1553 	void *data;
1554 	bool gro;
1555 
1556 	if (unlikely(bnapi->in_reset)) {
1557 		int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1558 
1559 		if (rc < 0)
1560 			return ERR_PTR(-EBUSY);
1561 		return NULL;
1562 	}
1563 
1564 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
1565 		agg_id = TPA_END_AGG_ID_P5(tpa_end);
1566 		agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1567 		agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1568 		tpa_info = &rxr->rx_tpa[agg_id];
1569 		if (unlikely(agg_bufs != tpa_info->agg_count)) {
1570 			netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1571 				    agg_bufs, tpa_info->agg_count);
1572 			agg_bufs = tpa_info->agg_count;
1573 		}
1574 		tpa_info->agg_count = 0;
1575 		*event |= BNXT_AGG_EVENT;
1576 		bnxt_free_agg_idx(rxr, agg_id);
1577 		idx = agg_id;
1578 		gro = !!(bp->flags & BNXT_FLAG_GRO);
1579 	} else {
1580 		agg_id = TPA_END_AGG_ID(tpa_end);
1581 		agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1582 		tpa_info = &rxr->rx_tpa[agg_id];
1583 		idx = RING_CMP(*raw_cons);
1584 		if (agg_bufs) {
1585 			if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1586 				return ERR_PTR(-EBUSY);
1587 
1588 			*event |= BNXT_AGG_EVENT;
1589 			idx = NEXT_CMP(idx);
1590 		}
1591 		gro = !!TPA_END_GRO(tpa_end);
1592 	}
1593 	data = tpa_info->data;
1594 	data_ptr = tpa_info->data_ptr;
1595 	prefetch(data_ptr);
1596 	len = tpa_info->len;
1597 	mapping = tpa_info->mapping;
1598 
1599 	if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1600 		bnxt_abort_tpa(cpr, idx, agg_bufs);
1601 		if (agg_bufs > MAX_SKB_FRAGS)
1602 			netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1603 				    agg_bufs, (int)MAX_SKB_FRAGS);
1604 		return NULL;
1605 	}
1606 
1607 	if (len <= bp->rx_copy_thresh) {
1608 		skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1609 		if (!skb) {
1610 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1611 			return NULL;
1612 		}
1613 	} else {
1614 		u8 *new_data;
1615 		dma_addr_t new_mapping;
1616 
1617 		new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1618 		if (!new_data) {
1619 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1620 			return NULL;
1621 		}
1622 
1623 		tpa_info->data = new_data;
1624 		tpa_info->data_ptr = new_data + bp->rx_offset;
1625 		tpa_info->mapping = new_mapping;
1626 
1627 		skb = build_skb(data, 0);
1628 		dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1629 				       bp->rx_buf_use_size, bp->rx_dir,
1630 				       DMA_ATTR_WEAK_ORDERING);
1631 
1632 		if (!skb) {
1633 			kfree(data);
1634 			bnxt_abort_tpa(cpr, idx, agg_bufs);
1635 			return NULL;
1636 		}
1637 		skb_reserve(skb, bp->rx_offset);
1638 		skb_put(skb, len);
1639 	}
1640 
1641 	if (agg_bufs) {
1642 		skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1643 		if (!skb) {
1644 			/* Page reuse already handled by bnxt_rx_pages(). */
1645 			return NULL;
1646 		}
1647 	}
1648 
1649 	skb->protocol =
1650 		eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1651 
1652 	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1653 		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1654 
1655 	if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1656 	    (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1657 		__be16 vlan_proto = htons(tpa_info->metadata >>
1658 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1659 		u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1660 
1661 		if (eth_type_vlan(vlan_proto)) {
1662 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1663 		} else {
1664 			dev_kfree_skb(skb);
1665 			return NULL;
1666 		}
1667 	}
1668 
1669 	skb_checksum_none_assert(skb);
1670 	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1671 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1672 		skb->csum_level =
1673 			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1674 	}
1675 
1676 	if (gro)
1677 		skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1678 
1679 	return skb;
1680 }
1681 
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1682 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1683 			 struct rx_agg_cmp *rx_agg)
1684 {
1685 	u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1686 	struct bnxt_tpa_info *tpa_info;
1687 
1688 	agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1689 	tpa_info = &rxr->rx_tpa[agg_id];
1690 	BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1691 	tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1692 }
1693 
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1694 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1695 			     struct sk_buff *skb)
1696 {
1697 	if (skb->dev != bp->dev) {
1698 		/* this packet belongs to a vf-rep */
1699 		bnxt_vf_rep_rx(bp, skb);
1700 		return;
1701 	}
1702 	skb_record_rx_queue(skb, bnapi->index);
1703 	napi_gro_receive(&bnapi->napi, skb);
1704 }
1705 
1706 /* returns the following:
1707  * 1       - 1 packet successfully received
1708  * 0       - successful TPA_START, packet not completed yet
1709  * -EBUSY  - completion ring does not have all the agg buffers yet
1710  * -ENOMEM - packet aborted due to out of memory
1711  * -EIO    - packet aborted due to hw error indicated in BD
1712  */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)1713 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1714 		       u32 *raw_cons, u8 *event)
1715 {
1716 	struct bnxt_napi *bnapi = cpr->bnapi;
1717 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1718 	struct net_device *dev = bp->dev;
1719 	struct rx_cmp *rxcmp;
1720 	struct rx_cmp_ext *rxcmp1;
1721 	u32 tmp_raw_cons = *raw_cons;
1722 	u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1723 	struct bnxt_sw_rx_bd *rx_buf;
1724 	unsigned int len;
1725 	u8 *data_ptr, agg_bufs, cmp_type;
1726 	dma_addr_t dma_addr;
1727 	struct sk_buff *skb;
1728 	void *data;
1729 	int rc = 0;
1730 	u32 misc;
1731 
1732 	rxcmp = (struct rx_cmp *)
1733 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1734 
1735 	cmp_type = RX_CMP_TYPE(rxcmp);
1736 
1737 	if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1738 		bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1739 		goto next_rx_no_prod_no_len;
1740 	}
1741 
1742 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1743 	cp_cons = RING_CMP(tmp_raw_cons);
1744 	rxcmp1 = (struct rx_cmp_ext *)
1745 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1746 
1747 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1748 		return -EBUSY;
1749 
1750 	/* The valid test of the entry must be done first before
1751 	 * reading any further.
1752 	 */
1753 	dma_rmb();
1754 	prod = rxr->rx_prod;
1755 
1756 	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1757 		bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1758 			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
1759 
1760 		*event |= BNXT_RX_EVENT;
1761 		goto next_rx_no_prod_no_len;
1762 
1763 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1764 		skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1765 				   (struct rx_tpa_end_cmp *)rxcmp,
1766 				   (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1767 
1768 		if (IS_ERR(skb))
1769 			return -EBUSY;
1770 
1771 		rc = -ENOMEM;
1772 		if (likely(skb)) {
1773 			bnxt_deliver_skb(bp, bnapi, skb);
1774 			rc = 1;
1775 		}
1776 		*event |= BNXT_RX_EVENT;
1777 		goto next_rx_no_prod_no_len;
1778 	}
1779 
1780 	cons = rxcmp->rx_cmp_opaque;
1781 	if (unlikely(cons != rxr->rx_next_cons)) {
1782 		int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1783 
1784 		/* 0xffff is forced error, don't print it */
1785 		if (rxr->rx_next_cons != 0xffff)
1786 			netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1787 				    cons, rxr->rx_next_cons);
1788 		bnxt_sched_reset(bp, rxr);
1789 		if (rc1)
1790 			return rc1;
1791 		goto next_rx_no_prod_no_len;
1792 	}
1793 	rx_buf = &rxr->rx_buf_ring[cons];
1794 	data = rx_buf->data;
1795 	data_ptr = rx_buf->data_ptr;
1796 	prefetch(data_ptr);
1797 
1798 	misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1799 	agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1800 
1801 	if (agg_bufs) {
1802 		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1803 			return -EBUSY;
1804 
1805 		cp_cons = NEXT_CMP(cp_cons);
1806 		*event |= BNXT_AGG_EVENT;
1807 	}
1808 	*event |= BNXT_RX_EVENT;
1809 
1810 	rx_buf->data = NULL;
1811 	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1812 		u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1813 
1814 		bnxt_reuse_rx_data(rxr, cons, data);
1815 		if (agg_bufs)
1816 			bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1817 					       false);
1818 
1819 		rc = -EIO;
1820 		if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1821 			bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1822 			if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1823 			    !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1824 				netdev_warn_once(bp->dev, "RX buffer error %x\n",
1825 						 rx_err);
1826 				bnxt_sched_reset(bp, rxr);
1827 			}
1828 		}
1829 		goto next_rx_no_len;
1830 	}
1831 
1832 	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1833 	dma_addr = rx_buf->mapping;
1834 
1835 	if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1836 		rc = 1;
1837 		goto next_rx;
1838 	}
1839 
1840 	if (len <= bp->rx_copy_thresh) {
1841 		skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1842 		bnxt_reuse_rx_data(rxr, cons, data);
1843 		if (!skb) {
1844 			if (agg_bufs)
1845 				bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1846 						       agg_bufs, false);
1847 			rc = -ENOMEM;
1848 			goto next_rx;
1849 		}
1850 	} else {
1851 		u32 payload;
1852 
1853 		if (rx_buf->data_ptr == data_ptr)
1854 			payload = misc & RX_CMP_PAYLOAD_OFFSET;
1855 		else
1856 			payload = 0;
1857 		skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1858 				      payload | len);
1859 		if (!skb) {
1860 			rc = -ENOMEM;
1861 			goto next_rx;
1862 		}
1863 	}
1864 
1865 	if (agg_bufs) {
1866 		skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1867 		if (!skb) {
1868 			rc = -ENOMEM;
1869 			goto next_rx;
1870 		}
1871 	}
1872 
1873 	if (RX_CMP_HASH_VALID(rxcmp)) {
1874 		u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1875 		enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1876 
1877 		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1878 		if (hash_type != 1 && hash_type != 3)
1879 			type = PKT_HASH_TYPE_L3;
1880 		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1881 	}
1882 
1883 	cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1884 	skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1885 
1886 	if ((rxcmp1->rx_cmp_flags2 &
1887 	     cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1888 	    (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1889 		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1890 		u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1891 		__be16 vlan_proto = htons(meta_data >>
1892 					  RX_CMP_FLAGS2_METADATA_TPID_SFT);
1893 
1894 		if (eth_type_vlan(vlan_proto)) {
1895 			__vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1896 		} else {
1897 			dev_kfree_skb(skb);
1898 			goto next_rx;
1899 		}
1900 	}
1901 
1902 	skb_checksum_none_assert(skb);
1903 	if (RX_CMP_L4_CS_OK(rxcmp1)) {
1904 		if (dev->features & NETIF_F_RXCSUM) {
1905 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1906 			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1907 		}
1908 	} else {
1909 		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1910 			if (dev->features & NETIF_F_RXCSUM)
1911 				bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
1912 		}
1913 	}
1914 
1915 	bnxt_deliver_skb(bp, bnapi, skb);
1916 	rc = 1;
1917 
1918 next_rx:
1919 	cpr->rx_packets += 1;
1920 	cpr->rx_bytes += len;
1921 
1922 next_rx_no_len:
1923 	rxr->rx_prod = NEXT_RX(prod);
1924 	rxr->rx_next_cons = NEXT_RX(cons);
1925 
1926 next_rx_no_prod_no_len:
1927 	*raw_cons = tmp_raw_cons;
1928 
1929 	return rc;
1930 }
1931 
1932 /* In netpoll mode, if we are using a combined completion ring, we need to
1933  * discard the rx packets and recycle the buffers.
1934  */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)1935 static int bnxt_force_rx_discard(struct bnxt *bp,
1936 				 struct bnxt_cp_ring_info *cpr,
1937 				 u32 *raw_cons, u8 *event)
1938 {
1939 	u32 tmp_raw_cons = *raw_cons;
1940 	struct rx_cmp_ext *rxcmp1;
1941 	struct rx_cmp *rxcmp;
1942 	u16 cp_cons;
1943 	u8 cmp_type;
1944 
1945 	cp_cons = RING_CMP(tmp_raw_cons);
1946 	rxcmp = (struct rx_cmp *)
1947 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1948 
1949 	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1950 	cp_cons = RING_CMP(tmp_raw_cons);
1951 	rxcmp1 = (struct rx_cmp_ext *)
1952 			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1953 
1954 	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1955 		return -EBUSY;
1956 
1957 	/* The valid test of the entry must be done first before
1958 	 * reading any further.
1959 	 */
1960 	dma_rmb();
1961 	cmp_type = RX_CMP_TYPE(rxcmp);
1962 	if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1963 		rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1964 			cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1965 	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1966 		struct rx_tpa_end_cmp_ext *tpa_end1;
1967 
1968 		tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1969 		tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1970 			cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1971 	}
1972 	return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1973 }
1974 
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)1975 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1976 {
1977 	struct bnxt_fw_health *fw_health = bp->fw_health;
1978 	u32 reg = fw_health->regs[reg_idx];
1979 	u32 reg_type, reg_off, val = 0;
1980 
1981 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1982 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1983 	switch (reg_type) {
1984 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
1985 		pci_read_config_dword(bp->pdev, reg_off, &val);
1986 		break;
1987 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
1988 		reg_off = fw_health->mapped_regs[reg_idx];
1989 		fallthrough;
1990 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1991 		val = readl(bp->bar0 + reg_off);
1992 		break;
1993 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1994 		val = readl(bp->bar1 + reg_off);
1995 		break;
1996 	}
1997 	if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1998 		val &= fw_health->fw_reset_inprog_reg_mask;
1999 	return val;
2000 }
2001 
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2002 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2003 {
2004 	int i;
2005 
2006 	for (i = 0; i < bp->rx_nr_rings; i++) {
2007 		u16 grp_idx = bp->rx_ring[i].bnapi->index;
2008 		struct bnxt_ring_grp_info *grp_info;
2009 
2010 		grp_info = &bp->grp_info[grp_idx];
2011 		if (grp_info->agg_fw_ring_id == ring_id)
2012 			return grp_idx;
2013 	}
2014 	return INVALID_HW_RING_ID;
2015 }
2016 
2017 #define BNXT_GET_EVENT_PORT(data)	\
2018 	((data) &			\
2019 	 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2020 
2021 #define BNXT_EVENT_RING_TYPE(data2)	\
2022 	((data2) &			\
2023 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2024 
2025 #define BNXT_EVENT_RING_TYPE_RX(data2)	\
2026 	(BNXT_EVENT_RING_TYPE(data2) ==	\
2027 	 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2028 
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2029 static int bnxt_async_event_process(struct bnxt *bp,
2030 				    struct hwrm_async_event_cmpl *cmpl)
2031 {
2032 	u16 event_id = le16_to_cpu(cmpl->event_id);
2033 	u32 data1 = le32_to_cpu(cmpl->event_data1);
2034 	u32 data2 = le32_to_cpu(cmpl->event_data2);
2035 
2036 	/* TODO CHIMP_FW: Define event id's for link change, error etc */
2037 	switch (event_id) {
2038 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2039 		struct bnxt_link_info *link_info = &bp->link_info;
2040 
2041 		if (BNXT_VF(bp))
2042 			goto async_event_process_exit;
2043 
2044 		/* print unsupported speed warning in forced speed mode only */
2045 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2046 		    (data1 & 0x20000)) {
2047 			u16 fw_speed = link_info->force_link_speed;
2048 			u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2049 
2050 			if (speed != SPEED_UNKNOWN)
2051 				netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2052 					    speed);
2053 		}
2054 		set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2055 	}
2056 		fallthrough;
2057 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2058 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2059 		set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2060 		fallthrough;
2061 	case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2062 		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2063 		break;
2064 	case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2065 		set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2066 		break;
2067 	case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2068 		u16 port_id = BNXT_GET_EVENT_PORT(data1);
2069 
2070 		if (BNXT_VF(bp))
2071 			break;
2072 
2073 		if (bp->pf.port_id != port_id)
2074 			break;
2075 
2076 		set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2077 		break;
2078 	}
2079 	case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2080 		if (BNXT_PF(bp))
2081 			goto async_event_process_exit;
2082 		set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2083 		break;
2084 	case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2085 		char *fatal_str = "non-fatal";
2086 
2087 		if (!bp->fw_health)
2088 			goto async_event_process_exit;
2089 
2090 		bp->fw_reset_timestamp = jiffies;
2091 		bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2092 		if (!bp->fw_reset_min_dsecs)
2093 			bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2094 		bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2095 		if (!bp->fw_reset_max_dsecs)
2096 			bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2097 		if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2098 			fatal_str = "fatal";
2099 			set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2100 		}
2101 		netif_warn(bp, hw, bp->dev,
2102 			   "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2103 			   fatal_str, data1, data2,
2104 			   bp->fw_reset_min_dsecs * 100,
2105 			   bp->fw_reset_max_dsecs * 100);
2106 		set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2107 		break;
2108 	}
2109 	case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2110 		struct bnxt_fw_health *fw_health = bp->fw_health;
2111 
2112 		if (!fw_health)
2113 			goto async_event_process_exit;
2114 
2115 		if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2116 			fw_health->enabled = false;
2117 			netif_info(bp, drv, bp->dev,
2118 				   "Error recovery info: error recovery[0]\n");
2119 			break;
2120 		}
2121 		fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2122 		fw_health->tmr_multiplier =
2123 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2124 				     bp->current_interval * 10);
2125 		fw_health->tmr_counter = fw_health->tmr_multiplier;
2126 		if (!fw_health->enabled)
2127 			fw_health->last_fw_heartbeat =
2128 				bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2129 		fw_health->last_fw_reset_cnt =
2130 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2131 		netif_info(bp, drv, bp->dev,
2132 			   "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
2133 			   fw_health->master, fw_health->last_fw_reset_cnt,
2134 			   bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
2135 		if (!fw_health->enabled) {
2136 			/* Make sure tmr_counter is set and visible to
2137 			 * bnxt_health_check() before setting enabled to true.
2138 			 */
2139 			smp_wmb();
2140 			fw_health->enabled = true;
2141 		}
2142 		goto async_event_process_exit;
2143 	}
2144 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2145 		netif_notice(bp, hw, bp->dev,
2146 			     "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2147 			     data1, data2);
2148 		goto async_event_process_exit;
2149 	case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2150 		struct bnxt_rx_ring_info *rxr;
2151 		u16 grp_idx;
2152 
2153 		if (bp->flags & BNXT_FLAG_CHIP_P5)
2154 			goto async_event_process_exit;
2155 
2156 		netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2157 			    BNXT_EVENT_RING_TYPE(data2), data1);
2158 		if (!BNXT_EVENT_RING_TYPE_RX(data2))
2159 			goto async_event_process_exit;
2160 
2161 		grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2162 		if (grp_idx == INVALID_HW_RING_ID) {
2163 			netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2164 				    data1);
2165 			goto async_event_process_exit;
2166 		}
2167 		rxr = bp->bnapi[grp_idx]->rx_ring;
2168 		bnxt_sched_reset(bp, rxr);
2169 		goto async_event_process_exit;
2170 	}
2171 	default:
2172 		goto async_event_process_exit;
2173 	}
2174 	bnxt_queue_sp_work(bp);
2175 async_event_process_exit:
2176 	bnxt_ulp_async_events(bp, cmpl);
2177 	return 0;
2178 }
2179 
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2180 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2181 {
2182 	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2183 	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2184 	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2185 				(struct hwrm_fwd_req_cmpl *)txcmp;
2186 
2187 	switch (cmpl_type) {
2188 	case CMPL_BASE_TYPE_HWRM_DONE:
2189 		seq_id = le16_to_cpu(h_cmpl->sequence_id);
2190 		if (seq_id == bp->hwrm_intr_seq_id)
2191 			bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2192 		else
2193 			netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2194 		break;
2195 
2196 	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2197 		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2198 
2199 		if ((vf_id < bp->pf.first_vf_id) ||
2200 		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2201 			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2202 				   vf_id);
2203 			return -EINVAL;
2204 		}
2205 
2206 		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2207 		set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2208 		bnxt_queue_sp_work(bp);
2209 		break;
2210 
2211 	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2212 		bnxt_async_event_process(bp,
2213 					 (struct hwrm_async_event_cmpl *)txcmp);
2214 
2215 	default:
2216 		break;
2217 	}
2218 
2219 	return 0;
2220 }
2221 
bnxt_msix(int irq,void * dev_instance)2222 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2223 {
2224 	struct bnxt_napi *bnapi = dev_instance;
2225 	struct bnxt *bp = bnapi->bp;
2226 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2227 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2228 
2229 	cpr->event_ctr++;
2230 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2231 	napi_schedule(&bnapi->napi);
2232 	return IRQ_HANDLED;
2233 }
2234 
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2235 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2236 {
2237 	u32 raw_cons = cpr->cp_raw_cons;
2238 	u16 cons = RING_CMP(raw_cons);
2239 	struct tx_cmp *txcmp;
2240 
2241 	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2242 
2243 	return TX_CMP_VALID(txcmp, raw_cons);
2244 }
2245 
bnxt_inta(int irq,void * dev_instance)2246 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2247 {
2248 	struct bnxt_napi *bnapi = dev_instance;
2249 	struct bnxt *bp = bnapi->bp;
2250 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2251 	u32 cons = RING_CMP(cpr->cp_raw_cons);
2252 	u32 int_status;
2253 
2254 	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2255 
2256 	if (!bnxt_has_work(bp, cpr)) {
2257 		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2258 		/* return if erroneous interrupt */
2259 		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2260 			return IRQ_NONE;
2261 	}
2262 
2263 	/* disable ring IRQ */
2264 	BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2265 
2266 	/* Return here if interrupt is shared and is disabled. */
2267 	if (unlikely(atomic_read(&bp->intr_sem) != 0))
2268 		return IRQ_HANDLED;
2269 
2270 	napi_schedule(&bnapi->napi);
2271 	return IRQ_HANDLED;
2272 }
2273 
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2274 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2275 			    int budget)
2276 {
2277 	struct bnxt_napi *bnapi = cpr->bnapi;
2278 	u32 raw_cons = cpr->cp_raw_cons;
2279 	u32 cons;
2280 	int tx_pkts = 0;
2281 	int rx_pkts = 0;
2282 	u8 event = 0;
2283 	struct tx_cmp *txcmp;
2284 
2285 	cpr->has_more_work = 0;
2286 	cpr->had_work_done = 1;
2287 	while (1) {
2288 		int rc;
2289 
2290 		cons = RING_CMP(raw_cons);
2291 		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2292 
2293 		if (!TX_CMP_VALID(txcmp, raw_cons))
2294 			break;
2295 
2296 		/* The valid test of the entry must be done first before
2297 		 * reading any further.
2298 		 */
2299 		dma_rmb();
2300 		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2301 			tx_pkts++;
2302 			/* return full budget so NAPI will complete. */
2303 			if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2304 				rx_pkts = budget;
2305 				raw_cons = NEXT_RAW_CMP(raw_cons);
2306 				if (budget)
2307 					cpr->has_more_work = 1;
2308 				break;
2309 			}
2310 		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2311 			if (likely(budget))
2312 				rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2313 			else
2314 				rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2315 							   &event);
2316 			if (likely(rc >= 0))
2317 				rx_pkts += rc;
2318 			/* Increment rx_pkts when rc is -ENOMEM to count towards
2319 			 * the NAPI budget.  Otherwise, we may potentially loop
2320 			 * here forever if we consistently cannot allocate
2321 			 * buffers.
2322 			 */
2323 			else if (rc == -ENOMEM && budget)
2324 				rx_pkts++;
2325 			else if (rc == -EBUSY)	/* partial completion */
2326 				break;
2327 		} else if (unlikely((TX_CMP_TYPE(txcmp) ==
2328 				     CMPL_BASE_TYPE_HWRM_DONE) ||
2329 				    (TX_CMP_TYPE(txcmp) ==
2330 				     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2331 				    (TX_CMP_TYPE(txcmp) ==
2332 				     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2333 			bnxt_hwrm_handler(bp, txcmp);
2334 		}
2335 		raw_cons = NEXT_RAW_CMP(raw_cons);
2336 
2337 		if (rx_pkts && rx_pkts == budget) {
2338 			cpr->has_more_work = 1;
2339 			break;
2340 		}
2341 	}
2342 
2343 	if (event & BNXT_REDIRECT_EVENT)
2344 		xdp_do_flush_map();
2345 
2346 	if (event & BNXT_TX_EVENT) {
2347 		struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2348 		u16 prod = txr->tx_prod;
2349 
2350 		/* Sync BD data before updating doorbell */
2351 		wmb();
2352 
2353 		bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2354 	}
2355 
2356 	cpr->cp_raw_cons = raw_cons;
2357 	bnapi->tx_pkts += tx_pkts;
2358 	bnapi->events |= event;
2359 	return rx_pkts;
2360 }
2361 
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi)2362 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2363 {
2364 	if (bnapi->tx_pkts) {
2365 		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2366 		bnapi->tx_pkts = 0;
2367 	}
2368 
2369 	if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2370 		struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2371 
2372 		if (bnapi->events & BNXT_AGG_EVENT)
2373 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2374 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2375 	}
2376 	bnapi->events = 0;
2377 }
2378 
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2379 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2380 			  int budget)
2381 {
2382 	struct bnxt_napi *bnapi = cpr->bnapi;
2383 	int rx_pkts;
2384 
2385 	rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2386 
2387 	/* ACK completion ring before freeing tx ring and producing new
2388 	 * buffers in rx/agg rings to prevent overflowing the completion
2389 	 * ring.
2390 	 */
2391 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2392 
2393 	__bnxt_poll_work_done(bp, bnapi);
2394 	return rx_pkts;
2395 }
2396 
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)2397 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2398 {
2399 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2400 	struct bnxt *bp = bnapi->bp;
2401 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2402 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2403 	struct tx_cmp *txcmp;
2404 	struct rx_cmp_ext *rxcmp1;
2405 	u32 cp_cons, tmp_raw_cons;
2406 	u32 raw_cons = cpr->cp_raw_cons;
2407 	u32 rx_pkts = 0;
2408 	u8 event = 0;
2409 
2410 	while (1) {
2411 		int rc;
2412 
2413 		cp_cons = RING_CMP(raw_cons);
2414 		txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2415 
2416 		if (!TX_CMP_VALID(txcmp, raw_cons))
2417 			break;
2418 
2419 		/* The valid test of the entry must be done first before
2420 		 * reading any further.
2421 		 */
2422 		dma_rmb();
2423 		if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2424 			tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2425 			cp_cons = RING_CMP(tmp_raw_cons);
2426 			rxcmp1 = (struct rx_cmp_ext *)
2427 			  &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2428 
2429 			if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2430 				break;
2431 
2432 			/* force an error to recycle the buffer */
2433 			rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2434 				cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2435 
2436 			rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2437 			if (likely(rc == -EIO) && budget)
2438 				rx_pkts++;
2439 			else if (rc == -EBUSY)	/* partial completion */
2440 				break;
2441 		} else if (unlikely(TX_CMP_TYPE(txcmp) ==
2442 				    CMPL_BASE_TYPE_HWRM_DONE)) {
2443 			bnxt_hwrm_handler(bp, txcmp);
2444 		} else {
2445 			netdev_err(bp->dev,
2446 				   "Invalid completion received on special ring\n");
2447 		}
2448 		raw_cons = NEXT_RAW_CMP(raw_cons);
2449 
2450 		if (rx_pkts == budget)
2451 			break;
2452 	}
2453 
2454 	cpr->cp_raw_cons = raw_cons;
2455 	BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2456 	bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2457 
2458 	if (event & BNXT_AGG_EVENT)
2459 		bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2460 
2461 	if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2462 		napi_complete_done(napi, rx_pkts);
2463 		BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2464 	}
2465 	return rx_pkts;
2466 }
2467 
bnxt_poll(struct napi_struct * napi,int budget)2468 static int bnxt_poll(struct napi_struct *napi, int budget)
2469 {
2470 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2471 	struct bnxt *bp = bnapi->bp;
2472 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2473 	int work_done = 0;
2474 
2475 	while (1) {
2476 		work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2477 
2478 		if (work_done >= budget) {
2479 			if (!budget)
2480 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2481 			break;
2482 		}
2483 
2484 		if (!bnxt_has_work(bp, cpr)) {
2485 			if (napi_complete_done(napi, work_done))
2486 				BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2487 			break;
2488 		}
2489 	}
2490 	if (bp->flags & BNXT_FLAG_DIM) {
2491 		struct dim_sample dim_sample = {};
2492 
2493 		dim_update_sample(cpr->event_ctr,
2494 				  cpr->rx_packets,
2495 				  cpr->rx_bytes,
2496 				  &dim_sample);
2497 		net_dim(&cpr->dim, dim_sample);
2498 	}
2499 	return work_done;
2500 }
2501 
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)2502 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2503 {
2504 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2505 	int i, work_done = 0;
2506 
2507 	for (i = 0; i < 2; i++) {
2508 		struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2509 
2510 		if (cpr2) {
2511 			work_done += __bnxt_poll_work(bp, cpr2,
2512 						      budget - work_done);
2513 			cpr->has_more_work |= cpr2->has_more_work;
2514 		}
2515 	}
2516 	return work_done;
2517 }
2518 
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type)2519 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2520 				 u64 dbr_type)
2521 {
2522 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2523 	int i;
2524 
2525 	for (i = 0; i < 2; i++) {
2526 		struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2527 		struct bnxt_db_info *db;
2528 
2529 		if (cpr2 && cpr2->had_work_done) {
2530 			db = &cpr2->cp_db;
2531 			writeq(db->db_key64 | dbr_type |
2532 			       RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2533 			cpr2->had_work_done = 0;
2534 		}
2535 	}
2536 	__bnxt_poll_work_done(bp, bnapi);
2537 }
2538 
bnxt_poll_p5(struct napi_struct * napi,int budget)2539 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2540 {
2541 	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2542 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2543 	u32 raw_cons = cpr->cp_raw_cons;
2544 	struct bnxt *bp = bnapi->bp;
2545 	struct nqe_cn *nqcmp;
2546 	int work_done = 0;
2547 	u32 cons;
2548 
2549 	if (cpr->has_more_work) {
2550 		cpr->has_more_work = 0;
2551 		work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2552 	}
2553 	while (1) {
2554 		cons = RING_CMP(raw_cons);
2555 		nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2556 
2557 		if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2558 			if (cpr->has_more_work)
2559 				break;
2560 
2561 			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2562 			cpr->cp_raw_cons = raw_cons;
2563 			if (napi_complete_done(napi, work_done))
2564 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2565 						  cpr->cp_raw_cons);
2566 			return work_done;
2567 		}
2568 
2569 		/* The valid test of the entry must be done first before
2570 		 * reading any further.
2571 		 */
2572 		dma_rmb();
2573 
2574 		if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2575 			u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2576 			struct bnxt_cp_ring_info *cpr2;
2577 
2578 			cpr2 = cpr->cp_ring_arr[idx];
2579 			work_done += __bnxt_poll_work(bp, cpr2,
2580 						      budget - work_done);
2581 			cpr->has_more_work |= cpr2->has_more_work;
2582 		} else {
2583 			bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2584 		}
2585 		raw_cons = NEXT_RAW_CMP(raw_cons);
2586 	}
2587 	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2588 	if (raw_cons != cpr->cp_raw_cons) {
2589 		cpr->cp_raw_cons = raw_cons;
2590 		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2591 	}
2592 	return work_done;
2593 }
2594 
bnxt_free_tx_skbs(struct bnxt * bp)2595 static void bnxt_free_tx_skbs(struct bnxt *bp)
2596 {
2597 	int i, max_idx;
2598 	struct pci_dev *pdev = bp->pdev;
2599 
2600 	if (!bp->tx_ring)
2601 		return;
2602 
2603 	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2604 	for (i = 0; i < bp->tx_nr_rings; i++) {
2605 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2606 		int j;
2607 
2608 		if (!txr->tx_buf_ring)
2609 			continue;
2610 
2611 		for (j = 0; j < max_idx;) {
2612 			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2613 			struct sk_buff *skb;
2614 			int k, last;
2615 
2616 			if (i < bp->tx_nr_rings_xdp &&
2617 			    tx_buf->action == XDP_REDIRECT) {
2618 				dma_unmap_single(&pdev->dev,
2619 					dma_unmap_addr(tx_buf, mapping),
2620 					dma_unmap_len(tx_buf, len),
2621 					PCI_DMA_TODEVICE);
2622 				xdp_return_frame(tx_buf->xdpf);
2623 				tx_buf->action = 0;
2624 				tx_buf->xdpf = NULL;
2625 				j++;
2626 				continue;
2627 			}
2628 
2629 			skb = tx_buf->skb;
2630 			if (!skb) {
2631 				j++;
2632 				continue;
2633 			}
2634 
2635 			tx_buf->skb = NULL;
2636 
2637 			if (tx_buf->is_push) {
2638 				dev_kfree_skb(skb);
2639 				j += 2;
2640 				continue;
2641 			}
2642 
2643 			dma_unmap_single(&pdev->dev,
2644 					 dma_unmap_addr(tx_buf, mapping),
2645 					 skb_headlen(skb),
2646 					 PCI_DMA_TODEVICE);
2647 
2648 			last = tx_buf->nr_frags;
2649 			j += 2;
2650 			for (k = 0; k < last; k++, j++) {
2651 				int ring_idx = j & bp->tx_ring_mask;
2652 				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2653 
2654 				tx_buf = &txr->tx_buf_ring[ring_idx];
2655 				dma_unmap_page(
2656 					&pdev->dev,
2657 					dma_unmap_addr(tx_buf, mapping),
2658 					skb_frag_size(frag), PCI_DMA_TODEVICE);
2659 			}
2660 			dev_kfree_skb(skb);
2661 		}
2662 		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2663 	}
2664 }
2665 
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,int ring_nr)2666 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2667 {
2668 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2669 	struct pci_dev *pdev = bp->pdev;
2670 	struct bnxt_tpa_idx_map *map;
2671 	int i, max_idx, max_agg_idx;
2672 
2673 	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2674 	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2675 	if (!rxr->rx_tpa)
2676 		goto skip_rx_tpa_free;
2677 
2678 	for (i = 0; i < bp->max_tpa; i++) {
2679 		struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2680 		u8 *data = tpa_info->data;
2681 
2682 		if (!data)
2683 			continue;
2684 
2685 		dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2686 				       bp->rx_buf_use_size, bp->rx_dir,
2687 				       DMA_ATTR_WEAK_ORDERING);
2688 
2689 		tpa_info->data = NULL;
2690 
2691 		kfree(data);
2692 	}
2693 
2694 skip_rx_tpa_free:
2695 	if (!rxr->rx_buf_ring)
2696 		goto skip_rx_buf_free;
2697 
2698 	for (i = 0; i < max_idx; i++) {
2699 		struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2700 		dma_addr_t mapping = rx_buf->mapping;
2701 		void *data = rx_buf->data;
2702 
2703 		if (!data)
2704 			continue;
2705 
2706 		rx_buf->data = NULL;
2707 		if (BNXT_RX_PAGE_MODE(bp)) {
2708 			mapping -= bp->rx_dma_offset;
2709 			dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2710 					     bp->rx_dir,
2711 					     DMA_ATTR_WEAK_ORDERING);
2712 			page_pool_recycle_direct(rxr->page_pool, data);
2713 		} else {
2714 			dma_unmap_single_attrs(&pdev->dev, mapping,
2715 					       bp->rx_buf_use_size, bp->rx_dir,
2716 					       DMA_ATTR_WEAK_ORDERING);
2717 			kfree(data);
2718 		}
2719 	}
2720 
2721 skip_rx_buf_free:
2722 	if (!rxr->rx_agg_ring)
2723 		goto skip_rx_agg_free;
2724 
2725 	for (i = 0; i < max_agg_idx; i++) {
2726 		struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2727 		struct page *page = rx_agg_buf->page;
2728 
2729 		if (!page)
2730 			continue;
2731 
2732 		dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2733 				     BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
2734 				     DMA_ATTR_WEAK_ORDERING);
2735 
2736 		rx_agg_buf->page = NULL;
2737 		__clear_bit(i, rxr->rx_agg_bmap);
2738 
2739 		__free_page(page);
2740 	}
2741 
2742 skip_rx_agg_free:
2743 	if (rxr->rx_page) {
2744 		__free_page(rxr->rx_page);
2745 		rxr->rx_page = NULL;
2746 	}
2747 	map = rxr->rx_tpa_idx_map;
2748 	if (map)
2749 		memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2750 }
2751 
bnxt_free_rx_skbs(struct bnxt * bp)2752 static void bnxt_free_rx_skbs(struct bnxt *bp)
2753 {
2754 	int i;
2755 
2756 	if (!bp->rx_ring)
2757 		return;
2758 
2759 	for (i = 0; i < bp->rx_nr_rings; i++)
2760 		bnxt_free_one_rx_ring_skbs(bp, i);
2761 }
2762 
bnxt_free_skbs(struct bnxt * bp)2763 static void bnxt_free_skbs(struct bnxt *bp)
2764 {
2765 	bnxt_free_tx_skbs(bp);
2766 	bnxt_free_rx_skbs(bp);
2767 }
2768 
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)2769 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2770 {
2771 	struct pci_dev *pdev = bp->pdev;
2772 	int i;
2773 
2774 	for (i = 0; i < rmem->nr_pages; i++) {
2775 		if (!rmem->pg_arr[i])
2776 			continue;
2777 
2778 		dma_free_coherent(&pdev->dev, rmem->page_size,
2779 				  rmem->pg_arr[i], rmem->dma_arr[i]);
2780 
2781 		rmem->pg_arr[i] = NULL;
2782 	}
2783 	if (rmem->pg_tbl) {
2784 		size_t pg_tbl_size = rmem->nr_pages * 8;
2785 
2786 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2787 			pg_tbl_size = rmem->page_size;
2788 		dma_free_coherent(&pdev->dev, pg_tbl_size,
2789 				  rmem->pg_tbl, rmem->pg_tbl_map);
2790 		rmem->pg_tbl = NULL;
2791 	}
2792 	if (rmem->vmem_size && *rmem->vmem) {
2793 		vfree(*rmem->vmem);
2794 		*rmem->vmem = NULL;
2795 	}
2796 }
2797 
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)2798 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2799 {
2800 	struct pci_dev *pdev = bp->pdev;
2801 	u64 valid_bit = 0;
2802 	int i;
2803 
2804 	if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2805 		valid_bit = PTU_PTE_VALID;
2806 	if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2807 		size_t pg_tbl_size = rmem->nr_pages * 8;
2808 
2809 		if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2810 			pg_tbl_size = rmem->page_size;
2811 		rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2812 						  &rmem->pg_tbl_map,
2813 						  GFP_KERNEL);
2814 		if (!rmem->pg_tbl)
2815 			return -ENOMEM;
2816 	}
2817 
2818 	for (i = 0; i < rmem->nr_pages; i++) {
2819 		u64 extra_bits = valid_bit;
2820 
2821 		rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2822 						     rmem->page_size,
2823 						     &rmem->dma_arr[i],
2824 						     GFP_KERNEL);
2825 		if (!rmem->pg_arr[i])
2826 			return -ENOMEM;
2827 
2828 		if (rmem->init_val)
2829 			memset(rmem->pg_arr[i], rmem->init_val,
2830 			       rmem->page_size);
2831 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
2832 			if (i == rmem->nr_pages - 2 &&
2833 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2834 				extra_bits |= PTU_PTE_NEXT_TO_LAST;
2835 			else if (i == rmem->nr_pages - 1 &&
2836 				 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2837 				extra_bits |= PTU_PTE_LAST;
2838 			rmem->pg_tbl[i] =
2839 				cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2840 		}
2841 	}
2842 
2843 	if (rmem->vmem_size) {
2844 		*rmem->vmem = vzalloc(rmem->vmem_size);
2845 		if (!(*rmem->vmem))
2846 			return -ENOMEM;
2847 	}
2848 	return 0;
2849 }
2850 
bnxt_free_tpa_info(struct bnxt * bp)2851 static void bnxt_free_tpa_info(struct bnxt *bp)
2852 {
2853 	int i;
2854 
2855 	for (i = 0; i < bp->rx_nr_rings; i++) {
2856 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2857 
2858 		kfree(rxr->rx_tpa_idx_map);
2859 		rxr->rx_tpa_idx_map = NULL;
2860 		if (rxr->rx_tpa) {
2861 			kfree(rxr->rx_tpa[0].agg_arr);
2862 			rxr->rx_tpa[0].agg_arr = NULL;
2863 		}
2864 		kfree(rxr->rx_tpa);
2865 		rxr->rx_tpa = NULL;
2866 	}
2867 }
2868 
bnxt_alloc_tpa_info(struct bnxt * bp)2869 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2870 {
2871 	int i, j, total_aggs = 0;
2872 
2873 	bp->max_tpa = MAX_TPA;
2874 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
2875 		if (!bp->max_tpa_v2)
2876 			return 0;
2877 		bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2878 		total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2879 	}
2880 
2881 	for (i = 0; i < bp->rx_nr_rings; i++) {
2882 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2883 		struct rx_agg_cmp *agg;
2884 
2885 		rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2886 				      GFP_KERNEL);
2887 		if (!rxr->rx_tpa)
2888 			return -ENOMEM;
2889 
2890 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2891 			continue;
2892 		agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2893 		rxr->rx_tpa[0].agg_arr = agg;
2894 		if (!agg)
2895 			return -ENOMEM;
2896 		for (j = 1; j < bp->max_tpa; j++)
2897 			rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2898 		rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2899 					      GFP_KERNEL);
2900 		if (!rxr->rx_tpa_idx_map)
2901 			return -ENOMEM;
2902 	}
2903 	return 0;
2904 }
2905 
bnxt_free_rx_rings(struct bnxt * bp)2906 static void bnxt_free_rx_rings(struct bnxt *bp)
2907 {
2908 	int i;
2909 
2910 	if (!bp->rx_ring)
2911 		return;
2912 
2913 	bnxt_free_tpa_info(bp);
2914 	for (i = 0; i < bp->rx_nr_rings; i++) {
2915 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2916 		struct bnxt_ring_struct *ring;
2917 
2918 		if (rxr->xdp_prog)
2919 			bpf_prog_put(rxr->xdp_prog);
2920 
2921 		if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2922 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
2923 
2924 		page_pool_destroy(rxr->page_pool);
2925 		rxr->page_pool = NULL;
2926 
2927 		kfree(rxr->rx_agg_bmap);
2928 		rxr->rx_agg_bmap = NULL;
2929 
2930 		ring = &rxr->rx_ring_struct;
2931 		bnxt_free_ring(bp, &ring->ring_mem);
2932 
2933 		ring = &rxr->rx_agg_ring_struct;
2934 		bnxt_free_ring(bp, &ring->ring_mem);
2935 	}
2936 }
2937 
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)2938 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2939 				   struct bnxt_rx_ring_info *rxr)
2940 {
2941 	struct page_pool_params pp = { 0 };
2942 
2943 	pp.pool_size = bp->rx_ring_size;
2944 	pp.nid = dev_to_node(&bp->pdev->dev);
2945 	pp.dev = &bp->pdev->dev;
2946 	pp.dma_dir = DMA_BIDIRECTIONAL;
2947 
2948 	rxr->page_pool = page_pool_create(&pp);
2949 	if (IS_ERR(rxr->page_pool)) {
2950 		int err = PTR_ERR(rxr->page_pool);
2951 
2952 		rxr->page_pool = NULL;
2953 		return err;
2954 	}
2955 	return 0;
2956 }
2957 
bnxt_alloc_rx_rings(struct bnxt * bp)2958 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2959 {
2960 	int i, rc = 0, agg_rings = 0;
2961 
2962 	if (!bp->rx_ring)
2963 		return -ENOMEM;
2964 
2965 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
2966 		agg_rings = 1;
2967 
2968 	for (i = 0; i < bp->rx_nr_rings; i++) {
2969 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2970 		struct bnxt_ring_struct *ring;
2971 
2972 		ring = &rxr->rx_ring_struct;
2973 
2974 		rc = bnxt_alloc_rx_page_pool(bp, rxr);
2975 		if (rc)
2976 			return rc;
2977 
2978 		rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2979 		if (rc < 0)
2980 			return rc;
2981 
2982 		rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2983 						MEM_TYPE_PAGE_POOL,
2984 						rxr->page_pool);
2985 		if (rc) {
2986 			xdp_rxq_info_unreg(&rxr->xdp_rxq);
2987 			return rc;
2988 		}
2989 
2990 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2991 		if (rc)
2992 			return rc;
2993 
2994 		ring->grp_idx = i;
2995 		if (agg_rings) {
2996 			u16 mem_size;
2997 
2998 			ring = &rxr->rx_agg_ring_struct;
2999 			rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3000 			if (rc)
3001 				return rc;
3002 
3003 			ring->grp_idx = i;
3004 			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3005 			mem_size = rxr->rx_agg_bmap_size / 8;
3006 			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3007 			if (!rxr->rx_agg_bmap)
3008 				return -ENOMEM;
3009 		}
3010 	}
3011 	if (bp->flags & BNXT_FLAG_TPA)
3012 		rc = bnxt_alloc_tpa_info(bp);
3013 	return rc;
3014 }
3015 
bnxt_free_tx_rings(struct bnxt * bp)3016 static void bnxt_free_tx_rings(struct bnxt *bp)
3017 {
3018 	int i;
3019 	struct pci_dev *pdev = bp->pdev;
3020 
3021 	if (!bp->tx_ring)
3022 		return;
3023 
3024 	for (i = 0; i < bp->tx_nr_rings; i++) {
3025 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3026 		struct bnxt_ring_struct *ring;
3027 
3028 		if (txr->tx_push) {
3029 			dma_free_coherent(&pdev->dev, bp->tx_push_size,
3030 					  txr->tx_push, txr->tx_push_mapping);
3031 			txr->tx_push = NULL;
3032 		}
3033 
3034 		ring = &txr->tx_ring_struct;
3035 
3036 		bnxt_free_ring(bp, &ring->ring_mem);
3037 	}
3038 }
3039 
bnxt_alloc_tx_rings(struct bnxt * bp)3040 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3041 {
3042 	int i, j, rc;
3043 	struct pci_dev *pdev = bp->pdev;
3044 
3045 	bp->tx_push_size = 0;
3046 	if (bp->tx_push_thresh) {
3047 		int push_size;
3048 
3049 		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3050 					bp->tx_push_thresh);
3051 
3052 		if (push_size > 256) {
3053 			push_size = 0;
3054 			bp->tx_push_thresh = 0;
3055 		}
3056 
3057 		bp->tx_push_size = push_size;
3058 	}
3059 
3060 	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3061 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3062 		struct bnxt_ring_struct *ring;
3063 		u8 qidx;
3064 
3065 		ring = &txr->tx_ring_struct;
3066 
3067 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3068 		if (rc)
3069 			return rc;
3070 
3071 		ring->grp_idx = txr->bnapi->index;
3072 		if (bp->tx_push_size) {
3073 			dma_addr_t mapping;
3074 
3075 			/* One pre-allocated DMA buffer to backup
3076 			 * TX push operation
3077 			 */
3078 			txr->tx_push = dma_alloc_coherent(&pdev->dev,
3079 						bp->tx_push_size,
3080 						&txr->tx_push_mapping,
3081 						GFP_KERNEL);
3082 
3083 			if (!txr->tx_push)
3084 				return -ENOMEM;
3085 
3086 			mapping = txr->tx_push_mapping +
3087 				sizeof(struct tx_push_bd);
3088 			txr->data_mapping = cpu_to_le64(mapping);
3089 		}
3090 		qidx = bp->tc_to_qidx[j];
3091 		ring->queue_id = bp->q_info[qidx].queue_id;
3092 		if (i < bp->tx_nr_rings_xdp)
3093 			continue;
3094 		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3095 			j++;
3096 	}
3097 	return 0;
3098 }
3099 
bnxt_free_cp_rings(struct bnxt * bp)3100 static void bnxt_free_cp_rings(struct bnxt *bp)
3101 {
3102 	int i;
3103 
3104 	if (!bp->bnapi)
3105 		return;
3106 
3107 	for (i = 0; i < bp->cp_nr_rings; i++) {
3108 		struct bnxt_napi *bnapi = bp->bnapi[i];
3109 		struct bnxt_cp_ring_info *cpr;
3110 		struct bnxt_ring_struct *ring;
3111 		int j;
3112 
3113 		if (!bnapi)
3114 			continue;
3115 
3116 		cpr = &bnapi->cp_ring;
3117 		ring = &cpr->cp_ring_struct;
3118 
3119 		bnxt_free_ring(bp, &ring->ring_mem);
3120 
3121 		for (j = 0; j < 2; j++) {
3122 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3123 
3124 			if (cpr2) {
3125 				ring = &cpr2->cp_ring_struct;
3126 				bnxt_free_ring(bp, &ring->ring_mem);
3127 				kfree(cpr2);
3128 				cpr->cp_ring_arr[j] = NULL;
3129 			}
3130 		}
3131 	}
3132 }
3133 
bnxt_alloc_cp_sub_ring(struct bnxt * bp)3134 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3135 {
3136 	struct bnxt_ring_mem_info *rmem;
3137 	struct bnxt_ring_struct *ring;
3138 	struct bnxt_cp_ring_info *cpr;
3139 	int rc;
3140 
3141 	cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3142 	if (!cpr)
3143 		return NULL;
3144 
3145 	ring = &cpr->cp_ring_struct;
3146 	rmem = &ring->ring_mem;
3147 	rmem->nr_pages = bp->cp_nr_pages;
3148 	rmem->page_size = HW_CMPD_RING_SIZE;
3149 	rmem->pg_arr = (void **)cpr->cp_desc_ring;
3150 	rmem->dma_arr = cpr->cp_desc_mapping;
3151 	rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3152 	rc = bnxt_alloc_ring(bp, rmem);
3153 	if (rc) {
3154 		bnxt_free_ring(bp, rmem);
3155 		kfree(cpr);
3156 		cpr = NULL;
3157 	}
3158 	return cpr;
3159 }
3160 
bnxt_alloc_cp_rings(struct bnxt * bp)3161 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3162 {
3163 	bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3164 	int i, rc, ulp_base_vec, ulp_msix;
3165 
3166 	ulp_msix = bnxt_get_ulp_msix_num(bp);
3167 	ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3168 	for (i = 0; i < bp->cp_nr_rings; i++) {
3169 		struct bnxt_napi *bnapi = bp->bnapi[i];
3170 		struct bnxt_cp_ring_info *cpr;
3171 		struct bnxt_ring_struct *ring;
3172 
3173 		if (!bnapi)
3174 			continue;
3175 
3176 		cpr = &bnapi->cp_ring;
3177 		cpr->bnapi = bnapi;
3178 		ring = &cpr->cp_ring_struct;
3179 
3180 		rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3181 		if (rc)
3182 			return rc;
3183 
3184 		if (ulp_msix && i >= ulp_base_vec)
3185 			ring->map_idx = i + ulp_msix;
3186 		else
3187 			ring->map_idx = i;
3188 
3189 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3190 			continue;
3191 
3192 		if (i < bp->rx_nr_rings) {
3193 			struct bnxt_cp_ring_info *cpr2 =
3194 				bnxt_alloc_cp_sub_ring(bp);
3195 
3196 			cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3197 			if (!cpr2)
3198 				return -ENOMEM;
3199 			cpr2->bnapi = bnapi;
3200 		}
3201 		if ((sh && i < bp->tx_nr_rings) ||
3202 		    (!sh && i >= bp->rx_nr_rings)) {
3203 			struct bnxt_cp_ring_info *cpr2 =
3204 				bnxt_alloc_cp_sub_ring(bp);
3205 
3206 			cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3207 			if (!cpr2)
3208 				return -ENOMEM;
3209 			cpr2->bnapi = bnapi;
3210 		}
3211 	}
3212 	return 0;
3213 }
3214 
bnxt_init_ring_struct(struct bnxt * bp)3215 static void bnxt_init_ring_struct(struct bnxt *bp)
3216 {
3217 	int i;
3218 
3219 	for (i = 0; i < bp->cp_nr_rings; i++) {
3220 		struct bnxt_napi *bnapi = bp->bnapi[i];
3221 		struct bnxt_ring_mem_info *rmem;
3222 		struct bnxt_cp_ring_info *cpr;
3223 		struct bnxt_rx_ring_info *rxr;
3224 		struct bnxt_tx_ring_info *txr;
3225 		struct bnxt_ring_struct *ring;
3226 
3227 		if (!bnapi)
3228 			continue;
3229 
3230 		cpr = &bnapi->cp_ring;
3231 		ring = &cpr->cp_ring_struct;
3232 		rmem = &ring->ring_mem;
3233 		rmem->nr_pages = bp->cp_nr_pages;
3234 		rmem->page_size = HW_CMPD_RING_SIZE;
3235 		rmem->pg_arr = (void **)cpr->cp_desc_ring;
3236 		rmem->dma_arr = cpr->cp_desc_mapping;
3237 		rmem->vmem_size = 0;
3238 
3239 		rxr = bnapi->rx_ring;
3240 		if (!rxr)
3241 			goto skip_rx;
3242 
3243 		ring = &rxr->rx_ring_struct;
3244 		rmem = &ring->ring_mem;
3245 		rmem->nr_pages = bp->rx_nr_pages;
3246 		rmem->page_size = HW_RXBD_RING_SIZE;
3247 		rmem->pg_arr = (void **)rxr->rx_desc_ring;
3248 		rmem->dma_arr = rxr->rx_desc_mapping;
3249 		rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3250 		rmem->vmem = (void **)&rxr->rx_buf_ring;
3251 
3252 		ring = &rxr->rx_agg_ring_struct;
3253 		rmem = &ring->ring_mem;
3254 		rmem->nr_pages = bp->rx_agg_nr_pages;
3255 		rmem->page_size = HW_RXBD_RING_SIZE;
3256 		rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3257 		rmem->dma_arr = rxr->rx_agg_desc_mapping;
3258 		rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3259 		rmem->vmem = (void **)&rxr->rx_agg_ring;
3260 
3261 skip_rx:
3262 		txr = bnapi->tx_ring;
3263 		if (!txr)
3264 			continue;
3265 
3266 		ring = &txr->tx_ring_struct;
3267 		rmem = &ring->ring_mem;
3268 		rmem->nr_pages = bp->tx_nr_pages;
3269 		rmem->page_size = HW_RXBD_RING_SIZE;
3270 		rmem->pg_arr = (void **)txr->tx_desc_ring;
3271 		rmem->dma_arr = txr->tx_desc_mapping;
3272 		rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3273 		rmem->vmem = (void **)&txr->tx_buf_ring;
3274 	}
3275 }
3276 
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)3277 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3278 {
3279 	int i;
3280 	u32 prod;
3281 	struct rx_bd **rx_buf_ring;
3282 
3283 	rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3284 	for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3285 		int j;
3286 		struct rx_bd *rxbd;
3287 
3288 		rxbd = rx_buf_ring[i];
3289 		if (!rxbd)
3290 			continue;
3291 
3292 		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3293 			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3294 			rxbd->rx_bd_opaque = prod;
3295 		}
3296 	}
3297 }
3298 
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)3299 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3300 {
3301 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3302 	struct net_device *dev = bp->dev;
3303 	u32 prod;
3304 	int i;
3305 
3306 	prod = rxr->rx_prod;
3307 	for (i = 0; i < bp->rx_ring_size; i++) {
3308 		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3309 			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3310 				    ring_nr, i, bp->rx_ring_size);
3311 			break;
3312 		}
3313 		prod = NEXT_RX(prod);
3314 	}
3315 	rxr->rx_prod = prod;
3316 
3317 	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3318 		return 0;
3319 
3320 	prod = rxr->rx_agg_prod;
3321 	for (i = 0; i < bp->rx_agg_ring_size; i++) {
3322 		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3323 			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3324 				    ring_nr, i, bp->rx_ring_size);
3325 			break;
3326 		}
3327 		prod = NEXT_RX_AGG(prod);
3328 	}
3329 	rxr->rx_agg_prod = prod;
3330 
3331 	if (rxr->rx_tpa) {
3332 		dma_addr_t mapping;
3333 		u8 *data;
3334 
3335 		for (i = 0; i < bp->max_tpa; i++) {
3336 			data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
3337 			if (!data)
3338 				return -ENOMEM;
3339 
3340 			rxr->rx_tpa[i].data = data;
3341 			rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3342 			rxr->rx_tpa[i].mapping = mapping;
3343 		}
3344 	}
3345 	return 0;
3346 }
3347 
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)3348 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3349 {
3350 	struct bnxt_rx_ring_info *rxr;
3351 	struct bnxt_ring_struct *ring;
3352 	u32 type;
3353 
3354 	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3355 		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3356 
3357 	if (NET_IP_ALIGN == 2)
3358 		type |= RX_BD_FLAGS_SOP;
3359 
3360 	rxr = &bp->rx_ring[ring_nr];
3361 	ring = &rxr->rx_ring_struct;
3362 	bnxt_init_rxbd_pages(ring, type);
3363 
3364 	if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3365 		bpf_prog_add(bp->xdp_prog, 1);
3366 		rxr->xdp_prog = bp->xdp_prog;
3367 	}
3368 	ring->fw_ring_id = INVALID_HW_RING_ID;
3369 
3370 	ring = &rxr->rx_agg_ring_struct;
3371 	ring->fw_ring_id = INVALID_HW_RING_ID;
3372 
3373 	if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3374 		type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3375 			RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3376 
3377 		bnxt_init_rxbd_pages(ring, type);
3378 	}
3379 
3380 	return bnxt_alloc_one_rx_ring(bp, ring_nr);
3381 }
3382 
bnxt_init_cp_rings(struct bnxt * bp)3383 static void bnxt_init_cp_rings(struct bnxt *bp)
3384 {
3385 	int i, j;
3386 
3387 	for (i = 0; i < bp->cp_nr_rings; i++) {
3388 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3389 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3390 
3391 		ring->fw_ring_id = INVALID_HW_RING_ID;
3392 		cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3393 		cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3394 		for (j = 0; j < 2; j++) {
3395 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3396 
3397 			if (!cpr2)
3398 				continue;
3399 
3400 			ring = &cpr2->cp_ring_struct;
3401 			ring->fw_ring_id = INVALID_HW_RING_ID;
3402 			cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3403 			cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3404 		}
3405 	}
3406 }
3407 
bnxt_init_rx_rings(struct bnxt * bp)3408 static int bnxt_init_rx_rings(struct bnxt *bp)
3409 {
3410 	int i, rc = 0;
3411 
3412 	if (BNXT_RX_PAGE_MODE(bp)) {
3413 		bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3414 		bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3415 	} else {
3416 		bp->rx_offset = BNXT_RX_OFFSET;
3417 		bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3418 	}
3419 
3420 	for (i = 0; i < bp->rx_nr_rings; i++) {
3421 		rc = bnxt_init_one_rx_ring(bp, i);
3422 		if (rc)
3423 			break;
3424 	}
3425 
3426 	return rc;
3427 }
3428 
bnxt_init_tx_rings(struct bnxt * bp)3429 static int bnxt_init_tx_rings(struct bnxt *bp)
3430 {
3431 	u16 i;
3432 
3433 	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3434 				   BNXT_MIN_TX_DESC_CNT);
3435 
3436 	for (i = 0; i < bp->tx_nr_rings; i++) {
3437 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3438 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3439 
3440 		ring->fw_ring_id = INVALID_HW_RING_ID;
3441 	}
3442 
3443 	return 0;
3444 }
3445 
bnxt_free_ring_grps(struct bnxt * bp)3446 static void bnxt_free_ring_grps(struct bnxt *bp)
3447 {
3448 	kfree(bp->grp_info);
3449 	bp->grp_info = NULL;
3450 }
3451 
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)3452 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3453 {
3454 	int i;
3455 
3456 	if (irq_re_init) {
3457 		bp->grp_info = kcalloc(bp->cp_nr_rings,
3458 				       sizeof(struct bnxt_ring_grp_info),
3459 				       GFP_KERNEL);
3460 		if (!bp->grp_info)
3461 			return -ENOMEM;
3462 	}
3463 	for (i = 0; i < bp->cp_nr_rings; i++) {
3464 		if (irq_re_init)
3465 			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3466 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3467 		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3468 		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3469 		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3470 	}
3471 	return 0;
3472 }
3473 
bnxt_free_vnics(struct bnxt * bp)3474 static void bnxt_free_vnics(struct bnxt *bp)
3475 {
3476 	kfree(bp->vnic_info);
3477 	bp->vnic_info = NULL;
3478 	bp->nr_vnics = 0;
3479 }
3480 
bnxt_alloc_vnics(struct bnxt * bp)3481 static int bnxt_alloc_vnics(struct bnxt *bp)
3482 {
3483 	int num_vnics = 1;
3484 
3485 #ifdef CONFIG_RFS_ACCEL
3486 	if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3487 		num_vnics += bp->rx_nr_rings;
3488 #endif
3489 
3490 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3491 		num_vnics++;
3492 
3493 	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3494 				GFP_KERNEL);
3495 	if (!bp->vnic_info)
3496 		return -ENOMEM;
3497 
3498 	bp->nr_vnics = num_vnics;
3499 	return 0;
3500 }
3501 
bnxt_init_vnics(struct bnxt * bp)3502 static void bnxt_init_vnics(struct bnxt *bp)
3503 {
3504 	int i;
3505 
3506 	for (i = 0; i < bp->nr_vnics; i++) {
3507 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3508 		int j;
3509 
3510 		vnic->fw_vnic_id = INVALID_HW_RING_ID;
3511 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3512 			vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3513 
3514 		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3515 
3516 		if (bp->vnic_info[i].rss_hash_key) {
3517 			if (i == 0)
3518 				prandom_bytes(vnic->rss_hash_key,
3519 					      HW_HASH_KEY_SIZE);
3520 			else
3521 				memcpy(vnic->rss_hash_key,
3522 				       bp->vnic_info[0].rss_hash_key,
3523 				       HW_HASH_KEY_SIZE);
3524 		}
3525 	}
3526 }
3527 
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)3528 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3529 {
3530 	int pages;
3531 
3532 	pages = ring_size / desc_per_pg;
3533 
3534 	if (!pages)
3535 		return 1;
3536 
3537 	pages++;
3538 
3539 	while (pages & (pages - 1))
3540 		pages++;
3541 
3542 	return pages;
3543 }
3544 
bnxt_set_tpa_flags(struct bnxt * bp)3545 void bnxt_set_tpa_flags(struct bnxt *bp)
3546 {
3547 	bp->flags &= ~BNXT_FLAG_TPA;
3548 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3549 		return;
3550 	if (bp->dev->features & NETIF_F_LRO)
3551 		bp->flags |= BNXT_FLAG_LRO;
3552 	else if (bp->dev->features & NETIF_F_GRO_HW)
3553 		bp->flags |= BNXT_FLAG_GRO;
3554 }
3555 
3556 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3557  * be set on entry.
3558  */
bnxt_set_ring_params(struct bnxt * bp)3559 void bnxt_set_ring_params(struct bnxt *bp)
3560 {
3561 	u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3562 	u32 agg_factor = 0, agg_ring_size = 0;
3563 
3564 	/* 8 for CRC and VLAN */
3565 	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3566 
3567 	rx_space = rx_size + NET_SKB_PAD +
3568 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3569 
3570 	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3571 	ring_size = bp->rx_ring_size;
3572 	bp->rx_agg_ring_size = 0;
3573 	bp->rx_agg_nr_pages = 0;
3574 
3575 	if (bp->flags & BNXT_FLAG_TPA)
3576 		agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3577 
3578 	bp->flags &= ~BNXT_FLAG_JUMBO;
3579 	if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3580 		u32 jumbo_factor;
3581 
3582 		bp->flags |= BNXT_FLAG_JUMBO;
3583 		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3584 		if (jumbo_factor > agg_factor)
3585 			agg_factor = jumbo_factor;
3586 	}
3587 	agg_ring_size = ring_size * agg_factor;
3588 
3589 	if (agg_ring_size) {
3590 		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3591 							RX_DESC_CNT);
3592 		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3593 			u32 tmp = agg_ring_size;
3594 
3595 			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3596 			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3597 			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3598 				    tmp, agg_ring_size);
3599 		}
3600 		bp->rx_agg_ring_size = agg_ring_size;
3601 		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3602 		rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3603 		rx_space = rx_size + NET_SKB_PAD +
3604 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3605 	}
3606 
3607 	bp->rx_buf_use_size = rx_size;
3608 	bp->rx_buf_size = rx_space;
3609 
3610 	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3611 	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3612 
3613 	ring_size = bp->tx_ring_size;
3614 	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3615 	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3616 
3617 	max_rx_cmpl = bp->rx_ring_size;
3618 	/* MAX TPA needs to be added because TPA_START completions are
3619 	 * immediately recycled, so the TPA completions are not bound by
3620 	 * the RX ring size.
3621 	 */
3622 	if (bp->flags & BNXT_FLAG_TPA)
3623 		max_rx_cmpl += bp->max_tpa;
3624 	/* RX and TPA completions are 32-byte, all others are 16-byte */
3625 	ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3626 	bp->cp_ring_size = ring_size;
3627 
3628 	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3629 	if (bp->cp_nr_pages > MAX_CP_PAGES) {
3630 		bp->cp_nr_pages = MAX_CP_PAGES;
3631 		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3632 		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3633 			    ring_size, bp->cp_ring_size);
3634 	}
3635 	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3636 	bp->cp_ring_mask = bp->cp_bit - 1;
3637 }
3638 
3639 /* Changing allocation mode of RX rings.
3640  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3641  */
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)3642 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3643 {
3644 	if (page_mode) {
3645 		if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3646 			return -EOPNOTSUPP;
3647 		bp->dev->max_mtu =
3648 			min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3649 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3650 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3651 		bp->rx_dir = DMA_BIDIRECTIONAL;
3652 		bp->rx_skb_func = bnxt_rx_page_skb;
3653 		/* Disable LRO or GRO_HW */
3654 		netdev_update_features(bp->dev);
3655 	} else {
3656 		bp->dev->max_mtu = bp->max_mtu;
3657 		bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3658 		bp->rx_dir = DMA_FROM_DEVICE;
3659 		bp->rx_skb_func = bnxt_rx_skb;
3660 	}
3661 	return 0;
3662 }
3663 
bnxt_free_vnic_attributes(struct bnxt * bp)3664 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3665 {
3666 	int i;
3667 	struct bnxt_vnic_info *vnic;
3668 	struct pci_dev *pdev = bp->pdev;
3669 
3670 	if (!bp->vnic_info)
3671 		return;
3672 
3673 	for (i = 0; i < bp->nr_vnics; i++) {
3674 		vnic = &bp->vnic_info[i];
3675 
3676 		kfree(vnic->fw_grp_ids);
3677 		vnic->fw_grp_ids = NULL;
3678 
3679 		kfree(vnic->uc_list);
3680 		vnic->uc_list = NULL;
3681 
3682 		if (vnic->mc_list) {
3683 			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3684 					  vnic->mc_list, vnic->mc_list_mapping);
3685 			vnic->mc_list = NULL;
3686 		}
3687 
3688 		if (vnic->rss_table) {
3689 			dma_free_coherent(&pdev->dev, vnic->rss_table_size,
3690 					  vnic->rss_table,
3691 					  vnic->rss_table_dma_addr);
3692 			vnic->rss_table = NULL;
3693 		}
3694 
3695 		vnic->rss_hash_key = NULL;
3696 		vnic->flags = 0;
3697 	}
3698 }
3699 
bnxt_alloc_vnic_attributes(struct bnxt * bp)3700 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3701 {
3702 	int i, rc = 0, size;
3703 	struct bnxt_vnic_info *vnic;
3704 	struct pci_dev *pdev = bp->pdev;
3705 	int max_rings;
3706 
3707 	for (i = 0; i < bp->nr_vnics; i++) {
3708 		vnic = &bp->vnic_info[i];
3709 
3710 		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3711 			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3712 
3713 			if (mem_size > 0) {
3714 				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3715 				if (!vnic->uc_list) {
3716 					rc = -ENOMEM;
3717 					goto out;
3718 				}
3719 			}
3720 		}
3721 
3722 		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3723 			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3724 			vnic->mc_list =
3725 				dma_alloc_coherent(&pdev->dev,
3726 						   vnic->mc_list_size,
3727 						   &vnic->mc_list_mapping,
3728 						   GFP_KERNEL);
3729 			if (!vnic->mc_list) {
3730 				rc = -ENOMEM;
3731 				goto out;
3732 			}
3733 		}
3734 
3735 		if (bp->flags & BNXT_FLAG_CHIP_P5)
3736 			goto vnic_skip_grps;
3737 
3738 		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3739 			max_rings = bp->rx_nr_rings;
3740 		else
3741 			max_rings = 1;
3742 
3743 		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3744 		if (!vnic->fw_grp_ids) {
3745 			rc = -ENOMEM;
3746 			goto out;
3747 		}
3748 vnic_skip_grps:
3749 		if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3750 		    !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3751 			continue;
3752 
3753 		/* Allocate rss table and hash key */
3754 		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3755 		if (bp->flags & BNXT_FLAG_CHIP_P5)
3756 			size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
3757 
3758 		vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
3759 		vnic->rss_table = dma_alloc_coherent(&pdev->dev,
3760 						     vnic->rss_table_size,
3761 						     &vnic->rss_table_dma_addr,
3762 						     GFP_KERNEL);
3763 		if (!vnic->rss_table) {
3764 			rc = -ENOMEM;
3765 			goto out;
3766 		}
3767 
3768 		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3769 		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3770 	}
3771 	return 0;
3772 
3773 out:
3774 	return rc;
3775 }
3776 
bnxt_free_hwrm_resources(struct bnxt * bp)3777 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3778 {
3779 	struct pci_dev *pdev = bp->pdev;
3780 
3781 	if (bp->hwrm_cmd_resp_addr) {
3782 		dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3783 				  bp->hwrm_cmd_resp_dma_addr);
3784 		bp->hwrm_cmd_resp_addr = NULL;
3785 	}
3786 
3787 	if (bp->hwrm_cmd_kong_resp_addr) {
3788 		dma_free_coherent(&pdev->dev, PAGE_SIZE,
3789 				  bp->hwrm_cmd_kong_resp_addr,
3790 				  bp->hwrm_cmd_kong_resp_dma_addr);
3791 		bp->hwrm_cmd_kong_resp_addr = NULL;
3792 	}
3793 }
3794 
bnxt_alloc_kong_hwrm_resources(struct bnxt * bp)3795 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3796 {
3797 	struct pci_dev *pdev = bp->pdev;
3798 
3799 	if (bp->hwrm_cmd_kong_resp_addr)
3800 		return 0;
3801 
3802 	bp->hwrm_cmd_kong_resp_addr =
3803 		dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3804 				   &bp->hwrm_cmd_kong_resp_dma_addr,
3805 				   GFP_KERNEL);
3806 	if (!bp->hwrm_cmd_kong_resp_addr)
3807 		return -ENOMEM;
3808 
3809 	return 0;
3810 }
3811 
bnxt_alloc_hwrm_resources(struct bnxt * bp)3812 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3813 {
3814 	struct pci_dev *pdev = bp->pdev;
3815 
3816 	bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3817 						   &bp->hwrm_cmd_resp_dma_addr,
3818 						   GFP_KERNEL);
3819 	if (!bp->hwrm_cmd_resp_addr)
3820 		return -ENOMEM;
3821 
3822 	return 0;
3823 }
3824 
bnxt_free_hwrm_short_cmd_req(struct bnxt * bp)3825 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3826 {
3827 	if (bp->hwrm_short_cmd_req_addr) {
3828 		struct pci_dev *pdev = bp->pdev;
3829 
3830 		dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3831 				  bp->hwrm_short_cmd_req_addr,
3832 				  bp->hwrm_short_cmd_req_dma_addr);
3833 		bp->hwrm_short_cmd_req_addr = NULL;
3834 	}
3835 }
3836 
bnxt_alloc_hwrm_short_cmd_req(struct bnxt * bp)3837 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3838 {
3839 	struct pci_dev *pdev = bp->pdev;
3840 
3841 	if (bp->hwrm_short_cmd_req_addr)
3842 		return 0;
3843 
3844 	bp->hwrm_short_cmd_req_addr =
3845 		dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3846 				   &bp->hwrm_short_cmd_req_dma_addr,
3847 				   GFP_KERNEL);
3848 	if (!bp->hwrm_short_cmd_req_addr)
3849 		return -ENOMEM;
3850 
3851 	return 0;
3852 }
3853 
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)3854 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
3855 {
3856 	kfree(stats->hw_masks);
3857 	stats->hw_masks = NULL;
3858 	kfree(stats->sw_stats);
3859 	stats->sw_stats = NULL;
3860 	if (stats->hw_stats) {
3861 		dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
3862 				  stats->hw_stats_map);
3863 		stats->hw_stats = NULL;
3864 	}
3865 }
3866 
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)3867 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
3868 				bool alloc_masks)
3869 {
3870 	stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
3871 					     &stats->hw_stats_map, GFP_KERNEL);
3872 	if (!stats->hw_stats)
3873 		return -ENOMEM;
3874 
3875 	stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
3876 	if (!stats->sw_stats)
3877 		goto stats_mem_err;
3878 
3879 	if (alloc_masks) {
3880 		stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
3881 		if (!stats->hw_masks)
3882 			goto stats_mem_err;
3883 	}
3884 	return 0;
3885 
3886 stats_mem_err:
3887 	bnxt_free_stats_mem(bp, stats);
3888 	return -ENOMEM;
3889 }
3890 
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)3891 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
3892 {
3893 	int i;
3894 
3895 	for (i = 0; i < count; i++)
3896 		mask_arr[i] = mask;
3897 }
3898 
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)3899 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
3900 {
3901 	int i;
3902 
3903 	for (i = 0; i < count; i++)
3904 		mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
3905 }
3906 
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)3907 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
3908 				    struct bnxt_stats_mem *stats)
3909 {
3910 	struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3911 	struct hwrm_func_qstats_ext_input req = {0};
3912 	__le64 *hw_masks;
3913 	int rc;
3914 
3915 	if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
3916 	    !(bp->flags & BNXT_FLAG_CHIP_P5))
3917 		return -EOPNOTSUPP;
3918 
3919 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1);
3920 	req.fid = cpu_to_le16(0xffff);
3921 	req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3922 	mutex_lock(&bp->hwrm_cmd_lock);
3923 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3924 	if (rc)
3925 		goto qstat_exit;
3926 
3927 	hw_masks = &resp->rx_ucast_pkts;
3928 	bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
3929 
3930 qstat_exit:
3931 	mutex_unlock(&bp->hwrm_cmd_lock);
3932 	return rc;
3933 }
3934 
3935 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
3936 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
3937 
bnxt_init_stats(struct bnxt * bp)3938 static void bnxt_init_stats(struct bnxt *bp)
3939 {
3940 	struct bnxt_napi *bnapi = bp->bnapi[0];
3941 	struct bnxt_cp_ring_info *cpr;
3942 	struct bnxt_stats_mem *stats;
3943 	__le64 *rx_stats, *tx_stats;
3944 	int rc, rx_count, tx_count;
3945 	u64 *rx_masks, *tx_masks;
3946 	u64 mask;
3947 	u8 flags;
3948 
3949 	cpr = &bnapi->cp_ring;
3950 	stats = &cpr->stats;
3951 	rc = bnxt_hwrm_func_qstat_ext(bp, stats);
3952 	if (rc) {
3953 		if (bp->flags & BNXT_FLAG_CHIP_P5)
3954 			mask = (1ULL << 48) - 1;
3955 		else
3956 			mask = -1ULL;
3957 		bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
3958 	}
3959 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
3960 		stats = &bp->port_stats;
3961 		rx_stats = stats->hw_stats;
3962 		rx_masks = stats->hw_masks;
3963 		rx_count = sizeof(struct rx_port_stats) / 8;
3964 		tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3965 		tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
3966 		tx_count = sizeof(struct tx_port_stats) / 8;
3967 
3968 		flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
3969 		rc = bnxt_hwrm_port_qstats(bp, flags);
3970 		if (rc) {
3971 			mask = (1ULL << 40) - 1;
3972 
3973 			bnxt_fill_masks(rx_masks, mask, rx_count);
3974 			bnxt_fill_masks(tx_masks, mask, tx_count);
3975 		} else {
3976 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
3977 			bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
3978 			bnxt_hwrm_port_qstats(bp, 0);
3979 		}
3980 	}
3981 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
3982 		stats = &bp->rx_port_stats_ext;
3983 		rx_stats = stats->hw_stats;
3984 		rx_masks = stats->hw_masks;
3985 		rx_count = sizeof(struct rx_port_stats_ext) / 8;
3986 		stats = &bp->tx_port_stats_ext;
3987 		tx_stats = stats->hw_stats;
3988 		tx_masks = stats->hw_masks;
3989 		tx_count = sizeof(struct tx_port_stats_ext) / 8;
3990 
3991 		flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
3992 		rc = bnxt_hwrm_port_qstats_ext(bp, flags);
3993 		if (rc) {
3994 			mask = (1ULL << 40) - 1;
3995 
3996 			bnxt_fill_masks(rx_masks, mask, rx_count);
3997 			if (tx_stats)
3998 				bnxt_fill_masks(tx_masks, mask, tx_count);
3999 		} else {
4000 			bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4001 			if (tx_stats)
4002 				bnxt_copy_hw_masks(tx_masks, tx_stats,
4003 						   tx_count);
4004 			bnxt_hwrm_port_qstats_ext(bp, 0);
4005 		}
4006 	}
4007 }
4008 
bnxt_free_port_stats(struct bnxt * bp)4009 static void bnxt_free_port_stats(struct bnxt *bp)
4010 {
4011 	bp->flags &= ~BNXT_FLAG_PORT_STATS;
4012 	bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4013 
4014 	bnxt_free_stats_mem(bp, &bp->port_stats);
4015 	bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4016 	bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4017 }
4018 
bnxt_free_ring_stats(struct bnxt * bp)4019 static void bnxt_free_ring_stats(struct bnxt *bp)
4020 {
4021 	int i;
4022 
4023 	if (!bp->bnapi)
4024 		return;
4025 
4026 	for (i = 0; i < bp->cp_nr_rings; i++) {
4027 		struct bnxt_napi *bnapi = bp->bnapi[i];
4028 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4029 
4030 		bnxt_free_stats_mem(bp, &cpr->stats);
4031 	}
4032 }
4033 
bnxt_alloc_stats(struct bnxt * bp)4034 static int bnxt_alloc_stats(struct bnxt *bp)
4035 {
4036 	u32 size, i;
4037 	int rc;
4038 
4039 	size = bp->hw_ring_stats_size;
4040 
4041 	for (i = 0; i < bp->cp_nr_rings; i++) {
4042 		struct bnxt_napi *bnapi = bp->bnapi[i];
4043 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4044 
4045 		cpr->stats.len = size;
4046 		rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4047 		if (rc)
4048 			return rc;
4049 
4050 		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4051 	}
4052 
4053 	if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4054 		return 0;
4055 
4056 	if (bp->port_stats.hw_stats)
4057 		goto alloc_ext_stats;
4058 
4059 	bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4060 	rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4061 	if (rc)
4062 		return rc;
4063 
4064 	bp->flags |= BNXT_FLAG_PORT_STATS;
4065 
4066 alloc_ext_stats:
4067 	/* Display extended statistics only if FW supports it */
4068 	if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4069 		if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4070 			return 0;
4071 
4072 	if (bp->rx_port_stats_ext.hw_stats)
4073 		goto alloc_tx_ext_stats;
4074 
4075 	bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4076 	rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4077 	/* Extended stats are optional */
4078 	if (rc)
4079 		return 0;
4080 
4081 alloc_tx_ext_stats:
4082 	if (bp->tx_port_stats_ext.hw_stats)
4083 		return 0;
4084 
4085 	if (bp->hwrm_spec_code >= 0x10902 ||
4086 	    (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4087 		bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4088 		rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4089 		/* Extended stats are optional */
4090 		if (rc)
4091 			return 0;
4092 	}
4093 	bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4094 	return 0;
4095 }
4096 
bnxt_clear_ring_indices(struct bnxt * bp)4097 static void bnxt_clear_ring_indices(struct bnxt *bp)
4098 {
4099 	int i;
4100 
4101 	if (!bp->bnapi)
4102 		return;
4103 
4104 	for (i = 0; i < bp->cp_nr_rings; i++) {
4105 		struct bnxt_napi *bnapi = bp->bnapi[i];
4106 		struct bnxt_cp_ring_info *cpr;
4107 		struct bnxt_rx_ring_info *rxr;
4108 		struct bnxt_tx_ring_info *txr;
4109 
4110 		if (!bnapi)
4111 			continue;
4112 
4113 		cpr = &bnapi->cp_ring;
4114 		cpr->cp_raw_cons = 0;
4115 
4116 		txr = bnapi->tx_ring;
4117 		if (txr) {
4118 			txr->tx_prod = 0;
4119 			txr->tx_cons = 0;
4120 		}
4121 
4122 		rxr = bnapi->rx_ring;
4123 		if (rxr) {
4124 			rxr->rx_prod = 0;
4125 			rxr->rx_agg_prod = 0;
4126 			rxr->rx_sw_agg_prod = 0;
4127 			rxr->rx_next_cons = 0;
4128 		}
4129 	}
4130 }
4131 
bnxt_free_ntp_fltrs(struct bnxt * bp,bool irq_reinit)4132 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4133 {
4134 #ifdef CONFIG_RFS_ACCEL
4135 	int i;
4136 
4137 	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
4138 	 * safe to delete the hash table.
4139 	 */
4140 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4141 		struct hlist_head *head;
4142 		struct hlist_node *tmp;
4143 		struct bnxt_ntuple_filter *fltr;
4144 
4145 		head = &bp->ntp_fltr_hash_tbl[i];
4146 		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4147 			hlist_del(&fltr->hash);
4148 			kfree(fltr);
4149 		}
4150 	}
4151 	if (irq_reinit) {
4152 		kfree(bp->ntp_fltr_bmap);
4153 		bp->ntp_fltr_bmap = NULL;
4154 	}
4155 	bp->ntp_fltr_count = 0;
4156 #endif
4157 }
4158 
bnxt_alloc_ntp_fltrs(struct bnxt * bp)4159 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4160 {
4161 #ifdef CONFIG_RFS_ACCEL
4162 	int i, rc = 0;
4163 
4164 	if (!(bp->flags & BNXT_FLAG_RFS))
4165 		return 0;
4166 
4167 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4168 		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4169 
4170 	bp->ntp_fltr_count = 0;
4171 	bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
4172 				    sizeof(long),
4173 				    GFP_KERNEL);
4174 
4175 	if (!bp->ntp_fltr_bmap)
4176 		rc = -ENOMEM;
4177 
4178 	return rc;
4179 #else
4180 	return 0;
4181 #endif
4182 }
4183 
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)4184 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4185 {
4186 	bnxt_free_vnic_attributes(bp);
4187 	bnxt_free_tx_rings(bp);
4188 	bnxt_free_rx_rings(bp);
4189 	bnxt_free_cp_rings(bp);
4190 	bnxt_free_ntp_fltrs(bp, irq_re_init);
4191 	if (irq_re_init) {
4192 		bnxt_free_ring_stats(bp);
4193 		if (!(bp->fw_cap & BNXT_FW_CAP_PORT_STATS_NO_RESET) ||
4194 		    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4195 			bnxt_free_port_stats(bp);
4196 		bnxt_free_ring_grps(bp);
4197 		bnxt_free_vnics(bp);
4198 		kfree(bp->tx_ring_map);
4199 		bp->tx_ring_map = NULL;
4200 		kfree(bp->tx_ring);
4201 		bp->tx_ring = NULL;
4202 		kfree(bp->rx_ring);
4203 		bp->rx_ring = NULL;
4204 		kfree(bp->bnapi);
4205 		bp->bnapi = NULL;
4206 	} else {
4207 		bnxt_clear_ring_indices(bp);
4208 	}
4209 }
4210 
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)4211 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4212 {
4213 	int i, j, rc, size, arr_size;
4214 	void *bnapi;
4215 
4216 	if (irq_re_init) {
4217 		/* Allocate bnapi mem pointer array and mem block for
4218 		 * all queues
4219 		 */
4220 		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4221 				bp->cp_nr_rings);
4222 		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4223 		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4224 		if (!bnapi)
4225 			return -ENOMEM;
4226 
4227 		bp->bnapi = bnapi;
4228 		bnapi += arr_size;
4229 		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4230 			bp->bnapi[i] = bnapi;
4231 			bp->bnapi[i]->index = i;
4232 			bp->bnapi[i]->bp = bp;
4233 			if (bp->flags & BNXT_FLAG_CHIP_P5) {
4234 				struct bnxt_cp_ring_info *cpr =
4235 					&bp->bnapi[i]->cp_ring;
4236 
4237 				cpr->cp_ring_struct.ring_mem.flags =
4238 					BNXT_RMEM_RING_PTE_FLAG;
4239 			}
4240 		}
4241 
4242 		bp->rx_ring = kcalloc(bp->rx_nr_rings,
4243 				      sizeof(struct bnxt_rx_ring_info),
4244 				      GFP_KERNEL);
4245 		if (!bp->rx_ring)
4246 			return -ENOMEM;
4247 
4248 		for (i = 0; i < bp->rx_nr_rings; i++) {
4249 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4250 
4251 			if (bp->flags & BNXT_FLAG_CHIP_P5) {
4252 				rxr->rx_ring_struct.ring_mem.flags =
4253 					BNXT_RMEM_RING_PTE_FLAG;
4254 				rxr->rx_agg_ring_struct.ring_mem.flags =
4255 					BNXT_RMEM_RING_PTE_FLAG;
4256 			}
4257 			rxr->bnapi = bp->bnapi[i];
4258 			bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4259 		}
4260 
4261 		bp->tx_ring = kcalloc(bp->tx_nr_rings,
4262 				      sizeof(struct bnxt_tx_ring_info),
4263 				      GFP_KERNEL);
4264 		if (!bp->tx_ring)
4265 			return -ENOMEM;
4266 
4267 		bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4268 					  GFP_KERNEL);
4269 
4270 		if (!bp->tx_ring_map)
4271 			return -ENOMEM;
4272 
4273 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4274 			j = 0;
4275 		else
4276 			j = bp->rx_nr_rings;
4277 
4278 		for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4279 			struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4280 
4281 			if (bp->flags & BNXT_FLAG_CHIP_P5)
4282 				txr->tx_ring_struct.ring_mem.flags =
4283 					BNXT_RMEM_RING_PTE_FLAG;
4284 			txr->bnapi = bp->bnapi[j];
4285 			bp->bnapi[j]->tx_ring = txr;
4286 			bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4287 			if (i >= bp->tx_nr_rings_xdp) {
4288 				txr->txq_index = i - bp->tx_nr_rings_xdp;
4289 				bp->bnapi[j]->tx_int = bnxt_tx_int;
4290 			} else {
4291 				bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4292 				bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4293 			}
4294 		}
4295 
4296 		rc = bnxt_alloc_stats(bp);
4297 		if (rc)
4298 			goto alloc_mem_err;
4299 		bnxt_init_stats(bp);
4300 
4301 		rc = bnxt_alloc_ntp_fltrs(bp);
4302 		if (rc)
4303 			goto alloc_mem_err;
4304 
4305 		rc = bnxt_alloc_vnics(bp);
4306 		if (rc)
4307 			goto alloc_mem_err;
4308 	}
4309 
4310 	bnxt_init_ring_struct(bp);
4311 
4312 	rc = bnxt_alloc_rx_rings(bp);
4313 	if (rc)
4314 		goto alloc_mem_err;
4315 
4316 	rc = bnxt_alloc_tx_rings(bp);
4317 	if (rc)
4318 		goto alloc_mem_err;
4319 
4320 	rc = bnxt_alloc_cp_rings(bp);
4321 	if (rc)
4322 		goto alloc_mem_err;
4323 
4324 	bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4325 				  BNXT_VNIC_UCAST_FLAG;
4326 	rc = bnxt_alloc_vnic_attributes(bp);
4327 	if (rc)
4328 		goto alloc_mem_err;
4329 	return 0;
4330 
4331 alloc_mem_err:
4332 	bnxt_free_mem(bp, true);
4333 	return rc;
4334 }
4335 
bnxt_disable_int(struct bnxt * bp)4336 static void bnxt_disable_int(struct bnxt *bp)
4337 {
4338 	int i;
4339 
4340 	if (!bp->bnapi)
4341 		return;
4342 
4343 	for (i = 0; i < bp->cp_nr_rings; i++) {
4344 		struct bnxt_napi *bnapi = bp->bnapi[i];
4345 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4346 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4347 
4348 		if (ring->fw_ring_id != INVALID_HW_RING_ID)
4349 			bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4350 	}
4351 }
4352 
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)4353 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4354 {
4355 	struct bnxt_napi *bnapi = bp->bnapi[n];
4356 	struct bnxt_cp_ring_info *cpr;
4357 
4358 	cpr = &bnapi->cp_ring;
4359 	return cpr->cp_ring_struct.map_idx;
4360 }
4361 
bnxt_disable_int_sync(struct bnxt * bp)4362 static void bnxt_disable_int_sync(struct bnxt *bp)
4363 {
4364 	int i;
4365 
4366 	atomic_inc(&bp->intr_sem);
4367 
4368 	bnxt_disable_int(bp);
4369 	for (i = 0; i < bp->cp_nr_rings; i++) {
4370 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4371 
4372 		synchronize_irq(bp->irq_tbl[map_idx].vector);
4373 	}
4374 }
4375 
bnxt_enable_int(struct bnxt * bp)4376 static void bnxt_enable_int(struct bnxt *bp)
4377 {
4378 	int i;
4379 
4380 	atomic_set(&bp->intr_sem, 0);
4381 	for (i = 0; i < bp->cp_nr_rings; i++) {
4382 		struct bnxt_napi *bnapi = bp->bnapi[i];
4383 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4384 
4385 		bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4386 	}
4387 }
4388 
bnxt_hwrm_cmd_hdr_init(struct bnxt * bp,void * request,u16 req_type,u16 cmpl_ring,u16 target_id)4389 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4390 			    u16 cmpl_ring, u16 target_id)
4391 {
4392 	struct input *req = request;
4393 
4394 	req->req_type = cpu_to_le16(req_type);
4395 	req->cmpl_ring = cpu_to_le16(cmpl_ring);
4396 	req->target_id = cpu_to_le16(target_id);
4397 	if (bnxt_kong_hwrm_message(bp, req))
4398 		req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4399 	else
4400 		req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4401 }
4402 
bnxt_hwrm_to_stderr(u32 hwrm_err)4403 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4404 {
4405 	switch (hwrm_err) {
4406 	case HWRM_ERR_CODE_SUCCESS:
4407 		return 0;
4408 	case HWRM_ERR_CODE_RESOURCE_LOCKED:
4409 		return -EROFS;
4410 	case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4411 		return -EACCES;
4412 	case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4413 		return -ENOSPC;
4414 	case HWRM_ERR_CODE_INVALID_PARAMS:
4415 	case HWRM_ERR_CODE_INVALID_FLAGS:
4416 	case HWRM_ERR_CODE_INVALID_ENABLES:
4417 	case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4418 	case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4419 		return -EINVAL;
4420 	case HWRM_ERR_CODE_NO_BUFFER:
4421 		return -ENOMEM;
4422 	case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4423 	case HWRM_ERR_CODE_BUSY:
4424 		return -EAGAIN;
4425 	case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4426 		return -EOPNOTSUPP;
4427 	default:
4428 		return -EIO;
4429 	}
4430 }
4431 
bnxt_hwrm_do_send_msg(struct bnxt * bp,void * msg,u32 msg_len,int timeout,bool silent)4432 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4433 				 int timeout, bool silent)
4434 {
4435 	int i, intr_process, rc, tmo_count;
4436 	struct input *req = msg;
4437 	u32 *data = msg;
4438 	u8 *valid;
4439 	u16 cp_ring_id, len = 0;
4440 	struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4441 	u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4442 	struct hwrm_short_input short_input = {0};
4443 	u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4444 	u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4445 	u16 dst = BNXT_HWRM_CHNL_CHIMP;
4446 
4447 	if (BNXT_NO_FW_ACCESS(bp) &&
4448 	    le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
4449 		return -EBUSY;
4450 
4451 	if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4452 		if (msg_len > bp->hwrm_max_ext_req_len ||
4453 		    !bp->hwrm_short_cmd_req_addr)
4454 			return -EINVAL;
4455 	}
4456 
4457 	if (bnxt_hwrm_kong_chnl(bp, req)) {
4458 		dst = BNXT_HWRM_CHNL_KONG;
4459 		bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4460 		doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4461 		resp = bp->hwrm_cmd_kong_resp_addr;
4462 	}
4463 
4464 	memset(resp, 0, PAGE_SIZE);
4465 	cp_ring_id = le16_to_cpu(req->cmpl_ring);
4466 	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4467 
4468 	req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4469 	/* currently supports only one outstanding message */
4470 	if (intr_process)
4471 		bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4472 
4473 	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4474 	    msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4475 		void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4476 		u16 max_msg_len;
4477 
4478 		/* Set boundary for maximum extended request length for short
4479 		 * cmd format. If passed up from device use the max supported
4480 		 * internal req length.
4481 		 */
4482 		max_msg_len = bp->hwrm_max_ext_req_len;
4483 
4484 		memcpy(short_cmd_req, req, msg_len);
4485 		if (msg_len < max_msg_len)
4486 			memset(short_cmd_req + msg_len, 0,
4487 			       max_msg_len - msg_len);
4488 
4489 		short_input.req_type = req->req_type;
4490 		short_input.signature =
4491 				cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4492 		short_input.size = cpu_to_le16(msg_len);
4493 		short_input.req_addr =
4494 			cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4495 
4496 		data = (u32 *)&short_input;
4497 		msg_len = sizeof(short_input);
4498 
4499 		/* Sync memory write before updating doorbell */
4500 		wmb();
4501 
4502 		max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4503 	}
4504 
4505 	/* Write request msg to hwrm channel */
4506 	__iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4507 
4508 	for (i = msg_len; i < max_req_len; i += 4)
4509 		writel(0, bp->bar0 + bar_offset + i);
4510 
4511 	/* Ring channel doorbell */
4512 	writel(1, bp->bar0 + doorbell_offset);
4513 
4514 	if (!pci_is_enabled(bp->pdev))
4515 		return 0;
4516 
4517 	if (!timeout)
4518 		timeout = DFLT_HWRM_CMD_TIMEOUT;
4519 	/* convert timeout to usec */
4520 	timeout *= 1000;
4521 
4522 	i = 0;
4523 	/* Short timeout for the first few iterations:
4524 	 * number of loops = number of loops for short timeout +
4525 	 * number of loops for standard timeout.
4526 	 */
4527 	tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4528 	timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4529 	tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4530 
4531 	if (intr_process) {
4532 		u16 seq_id = bp->hwrm_intr_seq_id;
4533 
4534 		/* Wait until hwrm response cmpl interrupt is processed */
4535 		while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4536 		       i++ < tmo_count) {
4537 			/* Abort the wait for completion if the FW health
4538 			 * check has failed.
4539 			 */
4540 			if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4541 				return -EBUSY;
4542 			/* on first few passes, just barely sleep */
4543 			if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4544 				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4545 					     HWRM_SHORT_MAX_TIMEOUT);
4546 			else
4547 				usleep_range(HWRM_MIN_TIMEOUT,
4548 					     HWRM_MAX_TIMEOUT);
4549 		}
4550 
4551 		if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4552 			if (!silent)
4553 				netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4554 					   le16_to_cpu(req->req_type));
4555 			return -EBUSY;
4556 		}
4557 		len = le16_to_cpu(resp->resp_len);
4558 		valid = ((u8 *)resp) + len - 1;
4559 	} else {
4560 		int j;
4561 
4562 		/* Check if response len is updated */
4563 		for (i = 0; i < tmo_count; i++) {
4564 			/* Abort the wait for completion if the FW health
4565 			 * check has failed.
4566 			 */
4567 			if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4568 				return -EBUSY;
4569 			len = le16_to_cpu(resp->resp_len);
4570 			if (len)
4571 				break;
4572 			/* on first few passes, just barely sleep */
4573 			if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4574 				usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4575 					     HWRM_SHORT_MAX_TIMEOUT);
4576 			else
4577 				usleep_range(HWRM_MIN_TIMEOUT,
4578 					     HWRM_MAX_TIMEOUT);
4579 		}
4580 
4581 		if (i >= tmo_count) {
4582 			if (!silent)
4583 				netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4584 					   HWRM_TOTAL_TIMEOUT(i),
4585 					   le16_to_cpu(req->req_type),
4586 					   le16_to_cpu(req->seq_id), len);
4587 			return -EBUSY;
4588 		}
4589 
4590 		/* Last byte of resp contains valid bit */
4591 		valid = ((u8 *)resp) + len - 1;
4592 		for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4593 			/* make sure we read from updated DMA memory */
4594 			dma_rmb();
4595 			if (*valid)
4596 				break;
4597 			usleep_range(1, 5);
4598 		}
4599 
4600 		if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4601 			if (!silent)
4602 				netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4603 					   HWRM_TOTAL_TIMEOUT(i),
4604 					   le16_to_cpu(req->req_type),
4605 					   le16_to_cpu(req->seq_id), len,
4606 					   *valid);
4607 			return -EBUSY;
4608 		}
4609 	}
4610 
4611 	/* Zero valid bit for compatibility.  Valid bit in an older spec
4612 	 * may become a new field in a newer spec.  We must make sure that
4613 	 * a new field not implemented by old spec will read zero.
4614 	 */
4615 	*valid = 0;
4616 	rc = le16_to_cpu(resp->error_code);
4617 	if (rc && !silent)
4618 		netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4619 			   le16_to_cpu(resp->req_type),
4620 			   le16_to_cpu(resp->seq_id), rc);
4621 	return bnxt_hwrm_to_stderr(rc);
4622 }
4623 
_hwrm_send_message(struct bnxt * bp,void * msg,u32 msg_len,int timeout)4624 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4625 {
4626 	return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4627 }
4628 
_hwrm_send_message_silent(struct bnxt * bp,void * msg,u32 msg_len,int timeout)4629 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4630 			      int timeout)
4631 {
4632 	return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4633 }
4634 
hwrm_send_message(struct bnxt * bp,void * msg,u32 msg_len,int timeout)4635 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4636 {
4637 	int rc;
4638 
4639 	mutex_lock(&bp->hwrm_cmd_lock);
4640 	rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4641 	mutex_unlock(&bp->hwrm_cmd_lock);
4642 	return rc;
4643 }
4644 
hwrm_send_message_silent(struct bnxt * bp,void * msg,u32 msg_len,int timeout)4645 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4646 			     int timeout)
4647 {
4648 	int rc;
4649 
4650 	mutex_lock(&bp->hwrm_cmd_lock);
4651 	rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4652 	mutex_unlock(&bp->hwrm_cmd_lock);
4653 	return rc;
4654 }
4655 
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)4656 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4657 			    bool async_only)
4658 {
4659 	struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4660 	struct hwrm_func_drv_rgtr_input req = {0};
4661 	DECLARE_BITMAP(async_events_bmap, 256);
4662 	u32 *events = (u32 *)async_events_bmap;
4663 	u32 flags;
4664 	int rc, i;
4665 
4666 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4667 
4668 	req.enables =
4669 		cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4670 			    FUNC_DRV_RGTR_REQ_ENABLES_VER |
4671 			    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4672 
4673 	req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4674 	flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4675 	if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4676 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4677 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4678 		flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4679 			 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4680 	req.flags = cpu_to_le32(flags);
4681 	req.ver_maj_8b = DRV_VER_MAJ;
4682 	req.ver_min_8b = DRV_VER_MIN;
4683 	req.ver_upd_8b = DRV_VER_UPD;
4684 	req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4685 	req.ver_min = cpu_to_le16(DRV_VER_MIN);
4686 	req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4687 
4688 	if (BNXT_PF(bp)) {
4689 		u32 data[8];
4690 		int i;
4691 
4692 		memset(data, 0, sizeof(data));
4693 		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4694 			u16 cmd = bnxt_vf_req_snif[i];
4695 			unsigned int bit, idx;
4696 
4697 			idx = cmd / 32;
4698 			bit = cmd % 32;
4699 			data[idx] |= 1 << bit;
4700 		}
4701 
4702 		for (i = 0; i < 8; i++)
4703 			req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4704 
4705 		req.enables |=
4706 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4707 	}
4708 
4709 	if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4710 		req.flags |= cpu_to_le32(
4711 			FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4712 
4713 	memset(async_events_bmap, 0, sizeof(async_events_bmap));
4714 	for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4715 		u16 event_id = bnxt_async_events_arr[i];
4716 
4717 		if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4718 		    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4719 			continue;
4720 		__set_bit(bnxt_async_events_arr[i], async_events_bmap);
4721 	}
4722 	if (bmap && bmap_size) {
4723 		for (i = 0; i < bmap_size; i++) {
4724 			if (test_bit(i, bmap))
4725 				__set_bit(i, async_events_bmap);
4726 		}
4727 	}
4728 	for (i = 0; i < 8; i++)
4729 		req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4730 
4731 	if (async_only)
4732 		req.enables =
4733 			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4734 
4735 	mutex_lock(&bp->hwrm_cmd_lock);
4736 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4737 	if (!rc) {
4738 		set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4739 		if (resp->flags &
4740 		    cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4741 			bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4742 	}
4743 	mutex_unlock(&bp->hwrm_cmd_lock);
4744 	return rc;
4745 }
4746 
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)4747 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4748 {
4749 	struct hwrm_func_drv_unrgtr_input req = {0};
4750 
4751 	if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4752 		return 0;
4753 
4754 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4755 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4756 }
4757 
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)4758 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4759 {
4760 	u32 rc = 0;
4761 	struct hwrm_tunnel_dst_port_free_input req = {0};
4762 
4763 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4764 	req.tunnel_type = tunnel_type;
4765 
4766 	switch (tunnel_type) {
4767 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4768 		req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4769 		bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4770 		break;
4771 	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4772 		req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4773 		bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4774 		break;
4775 	default:
4776 		break;
4777 	}
4778 
4779 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4780 	if (rc)
4781 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4782 			   rc);
4783 	return rc;
4784 }
4785 
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)4786 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4787 					   u8 tunnel_type)
4788 {
4789 	u32 rc = 0;
4790 	struct hwrm_tunnel_dst_port_alloc_input req = {0};
4791 	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4792 
4793 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4794 
4795 	req.tunnel_type = tunnel_type;
4796 	req.tunnel_dst_port_val = port;
4797 
4798 	mutex_lock(&bp->hwrm_cmd_lock);
4799 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4800 	if (rc) {
4801 		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4802 			   rc);
4803 		goto err_out;
4804 	}
4805 
4806 	switch (tunnel_type) {
4807 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4808 		bp->vxlan_fw_dst_port_id =
4809 			le16_to_cpu(resp->tunnel_dst_port_id);
4810 		break;
4811 	case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4812 		bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4813 		break;
4814 	default:
4815 		break;
4816 	}
4817 
4818 err_out:
4819 	mutex_unlock(&bp->hwrm_cmd_lock);
4820 	return rc;
4821 }
4822 
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)4823 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4824 {
4825 	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4826 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4827 
4828 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4829 	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4830 
4831 	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4832 	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4833 	req.mask = cpu_to_le32(vnic->rx_mask);
4834 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4835 }
4836 
4837 #ifdef CONFIG_RFS_ACCEL
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)4838 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4839 					    struct bnxt_ntuple_filter *fltr)
4840 {
4841 	struct hwrm_cfa_ntuple_filter_free_input req = {0};
4842 
4843 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4844 	req.ntuple_filter_id = fltr->filter_id;
4845 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4846 }
4847 
4848 #define BNXT_NTP_FLTR_FLAGS					\
4849 	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
4850 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
4851 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |	\
4852 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
4853 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
4854 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
4855 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
4856 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
4857 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
4858 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
4859 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
4860 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
4861 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
4862 	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4863 
4864 #define BNXT_NTP_TUNNEL_FLTR_FLAG				\
4865 		CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4866 
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)4867 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4868 					     struct bnxt_ntuple_filter *fltr)
4869 {
4870 	struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4871 	struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4872 	struct flow_keys *keys = &fltr->fkeys;
4873 	struct bnxt_vnic_info *vnic;
4874 	u32 flags = 0;
4875 	int rc = 0;
4876 
4877 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4878 	req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4879 
4880 	if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4881 		flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4882 		req.dst_id = cpu_to_le16(fltr->rxq);
4883 	} else {
4884 		vnic = &bp->vnic_info[fltr->rxq + 1];
4885 		req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4886 	}
4887 	req.flags = cpu_to_le32(flags);
4888 	req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4889 
4890 	req.ethertype = htons(ETH_P_IP);
4891 	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4892 	req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4893 	req.ip_protocol = keys->basic.ip_proto;
4894 
4895 	if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4896 		int i;
4897 
4898 		req.ethertype = htons(ETH_P_IPV6);
4899 		req.ip_addr_type =
4900 			CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4901 		*(struct in6_addr *)&req.src_ipaddr[0] =
4902 			keys->addrs.v6addrs.src;
4903 		*(struct in6_addr *)&req.dst_ipaddr[0] =
4904 			keys->addrs.v6addrs.dst;
4905 		for (i = 0; i < 4; i++) {
4906 			req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4907 			req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4908 		}
4909 	} else {
4910 		req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4911 		req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4912 		req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4913 		req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4914 	}
4915 	if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4916 		req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4917 		req.tunnel_type =
4918 			CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4919 	}
4920 
4921 	req.src_port = keys->ports.src;
4922 	req.src_port_mask = cpu_to_be16(0xffff);
4923 	req.dst_port = keys->ports.dst;
4924 	req.dst_port_mask = cpu_to_be16(0xffff);
4925 
4926 	mutex_lock(&bp->hwrm_cmd_lock);
4927 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4928 	if (!rc) {
4929 		resp = bnxt_get_hwrm_resp_addr(bp, &req);
4930 		fltr->filter_id = resp->ntuple_filter_id;
4931 	}
4932 	mutex_unlock(&bp->hwrm_cmd_lock);
4933 	return rc;
4934 }
4935 #endif
4936 
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,u8 * mac_addr)4937 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4938 				     u8 *mac_addr)
4939 {
4940 	u32 rc = 0;
4941 	struct hwrm_cfa_l2_filter_alloc_input req = {0};
4942 	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4943 
4944 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4945 	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4946 	if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4947 		req.flags |=
4948 			cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4949 	req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4950 	req.enables =
4951 		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4952 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4953 			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4954 	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4955 	req.l2_addr_mask[0] = 0xff;
4956 	req.l2_addr_mask[1] = 0xff;
4957 	req.l2_addr_mask[2] = 0xff;
4958 	req.l2_addr_mask[3] = 0xff;
4959 	req.l2_addr_mask[4] = 0xff;
4960 	req.l2_addr_mask[5] = 0xff;
4961 
4962 	mutex_lock(&bp->hwrm_cmd_lock);
4963 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4964 	if (!rc)
4965 		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4966 							resp->l2_filter_id;
4967 	mutex_unlock(&bp->hwrm_cmd_lock);
4968 	return rc;
4969 }
4970 
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)4971 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4972 {
4973 	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4974 	int rc = 0;
4975 
4976 	/* Any associated ntuple filters will also be cleared by firmware. */
4977 	mutex_lock(&bp->hwrm_cmd_lock);
4978 	for (i = 0; i < num_of_vnics; i++) {
4979 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4980 
4981 		for (j = 0; j < vnic->uc_filter_count; j++) {
4982 			struct hwrm_cfa_l2_filter_free_input req = {0};
4983 
4984 			bnxt_hwrm_cmd_hdr_init(bp, &req,
4985 					       HWRM_CFA_L2_FILTER_FREE, -1, -1);
4986 
4987 			req.l2_filter_id = vnic->fw_l2_filter_id[j];
4988 
4989 			rc = _hwrm_send_message(bp, &req, sizeof(req),
4990 						HWRM_CMD_TIMEOUT);
4991 		}
4992 		vnic->uc_filter_count = 0;
4993 	}
4994 	mutex_unlock(&bp->hwrm_cmd_lock);
4995 
4996 	return rc;
4997 }
4998 
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,u16 vnic_id,u32 tpa_flags)4999 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5000 {
5001 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5002 	u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5003 	struct hwrm_vnic_tpa_cfg_input req = {0};
5004 
5005 	if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5006 		return 0;
5007 
5008 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
5009 
5010 	if (tpa_flags) {
5011 		u16 mss = bp->dev->mtu - 40;
5012 		u32 nsegs, n, segs = 0, flags;
5013 
5014 		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5015 			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5016 			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5017 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5018 			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5019 		if (tpa_flags & BNXT_FLAG_GRO)
5020 			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5021 
5022 		req.flags = cpu_to_le32(flags);
5023 
5024 		req.enables =
5025 			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5026 				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5027 				    VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5028 
5029 		/* Number of segs are log2 units, and first packet is not
5030 		 * included as part of this units.
5031 		 */
5032 		if (mss <= BNXT_RX_PAGE_SIZE) {
5033 			n = BNXT_RX_PAGE_SIZE / mss;
5034 			nsegs = (MAX_SKB_FRAGS - 1) * n;
5035 		} else {
5036 			n = mss / BNXT_RX_PAGE_SIZE;
5037 			if (mss & (BNXT_RX_PAGE_SIZE - 1))
5038 				n++;
5039 			nsegs = (MAX_SKB_FRAGS - n) / n;
5040 		}
5041 
5042 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5043 			segs = MAX_TPA_SEGS_P5;
5044 			max_aggs = bp->max_tpa;
5045 		} else {
5046 			segs = ilog2(nsegs);
5047 		}
5048 		req.max_agg_segs = cpu_to_le16(segs);
5049 		req.max_aggs = cpu_to_le16(max_aggs);
5050 
5051 		req.min_agg_len = cpu_to_le32(512);
5052 	}
5053 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5054 
5055 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5056 }
5057 
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)5058 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5059 {
5060 	struct bnxt_ring_grp_info *grp_info;
5061 
5062 	grp_info = &bp->grp_info[ring->grp_idx];
5063 	return grp_info->cp_fw_ring_id;
5064 }
5065 
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)5066 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5067 {
5068 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5069 		struct bnxt_napi *bnapi = rxr->bnapi;
5070 		struct bnxt_cp_ring_info *cpr;
5071 
5072 		cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5073 		return cpr->cp_ring_struct.fw_ring_id;
5074 	} else {
5075 		return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5076 	}
5077 }
5078 
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)5079 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5080 {
5081 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5082 		struct bnxt_napi *bnapi = txr->bnapi;
5083 		struct bnxt_cp_ring_info *cpr;
5084 
5085 		cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5086 		return cpr->cp_ring_struct.fw_ring_id;
5087 	} else {
5088 		return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5089 	}
5090 }
5091 
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)5092 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5093 {
5094 	int entries;
5095 
5096 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5097 		entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5098 	else
5099 		entries = HW_HASH_INDEX_SIZE;
5100 
5101 	bp->rss_indir_tbl_entries = entries;
5102 	bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5103 					  GFP_KERNEL);
5104 	if (!bp->rss_indir_tbl)
5105 		return -ENOMEM;
5106 	return 0;
5107 }
5108 
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp)5109 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5110 {
5111 	u16 max_rings, max_entries, pad, i;
5112 
5113 	if (!bp->rx_nr_rings)
5114 		return;
5115 
5116 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5117 		max_rings = bp->rx_nr_rings - 1;
5118 	else
5119 		max_rings = bp->rx_nr_rings;
5120 
5121 	max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5122 
5123 	for (i = 0; i < max_entries; i++)
5124 		bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5125 
5126 	pad = bp->rss_indir_tbl_entries - max_entries;
5127 	if (pad)
5128 		memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5129 }
5130 
bnxt_get_max_rss_ring(struct bnxt * bp)5131 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5132 {
5133 	u16 i, tbl_size, max_ring = 0;
5134 
5135 	if (!bp->rss_indir_tbl)
5136 		return 0;
5137 
5138 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5139 	for (i = 0; i < tbl_size; i++)
5140 		max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5141 	return max_ring;
5142 }
5143 
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)5144 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5145 {
5146 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5147 		return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5148 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5149 		return 2;
5150 	return 1;
5151 }
5152 
__bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)5153 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5154 {
5155 	bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5156 	u16 i, j;
5157 
5158 	/* Fill the RSS indirection table with ring group ids */
5159 	for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5160 		if (!no_rss)
5161 			j = bp->rss_indir_tbl[i];
5162 		vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5163 	}
5164 }
5165 
__bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)5166 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5167 				      struct bnxt_vnic_info *vnic)
5168 {
5169 	__le16 *ring_tbl = vnic->rss_table;
5170 	struct bnxt_rx_ring_info *rxr;
5171 	u16 tbl_size, i;
5172 
5173 	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5174 
5175 	for (i = 0; i < tbl_size; i++) {
5176 		u16 ring_id, j;
5177 
5178 		j = bp->rss_indir_tbl[i];
5179 		rxr = &bp->rx_ring[j];
5180 
5181 		ring_id = rxr->rx_ring_struct.fw_ring_id;
5182 		*ring_tbl++ = cpu_to_le16(ring_id);
5183 		ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5184 		*ring_tbl++ = cpu_to_le16(ring_id);
5185 	}
5186 }
5187 
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)5188 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5189 {
5190 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5191 		__bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5192 	else
5193 		__bnxt_fill_hw_rss_tbl(bp, vnic);
5194 }
5195 
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,u16 vnic_id,bool set_rss)5196 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5197 {
5198 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5199 	struct hwrm_vnic_rss_cfg_input req = {0};
5200 
5201 	if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5202 	    vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5203 		return 0;
5204 
5205 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5206 	if (set_rss) {
5207 		bnxt_fill_hw_rss_tbl(bp, vnic);
5208 		req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5209 		req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5210 		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5211 		req.hash_key_tbl_addr =
5212 			cpu_to_le64(vnic->rss_hash_key_dma_addr);
5213 	}
5214 	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5215 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5216 }
5217 
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,u16 vnic_id,bool set_rss)5218 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5219 {
5220 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5221 	struct hwrm_vnic_rss_cfg_input req = {0};
5222 	dma_addr_t ring_tbl_map;
5223 	u32 i, nr_ctxs;
5224 
5225 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
5226 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5227 	if (!set_rss) {
5228 		hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5229 		return 0;
5230 	}
5231 	bnxt_fill_hw_rss_tbl(bp, vnic);
5232 	req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
5233 	req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5234 	req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5235 	ring_tbl_map = vnic->rss_table_dma_addr;
5236 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5237 	for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5238 		int rc;
5239 
5240 		req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5241 		req.ring_table_pair_index = i;
5242 		req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5243 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5244 		if (rc)
5245 			return rc;
5246 	}
5247 	return 0;
5248 }
5249 
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,u16 vnic_id)5250 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5251 {
5252 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5253 	struct hwrm_vnic_plcmodes_cfg_input req = {0};
5254 
5255 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
5256 	req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
5257 				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5258 				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5259 	req.enables =
5260 		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
5261 			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5262 	/* thresholds not implemented in firmware yet */
5263 	req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5264 	req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5265 	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5266 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5267 }
5268 
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,u16 vnic_id,u16 ctx_idx)5269 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5270 					u16 ctx_idx)
5271 {
5272 	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
5273 
5274 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
5275 	req.rss_cos_lb_ctx_id =
5276 		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5277 
5278 	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5279 	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5280 }
5281 
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)5282 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5283 {
5284 	int i, j;
5285 
5286 	for (i = 0; i < bp->nr_vnics; i++) {
5287 		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5288 
5289 		for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5290 			if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5291 				bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5292 		}
5293 	}
5294 	bp->rsscos_nr_ctxs = 0;
5295 }
5296 
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,u16 vnic_id,u16 ctx_idx)5297 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5298 {
5299 	int rc;
5300 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
5301 	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
5302 						bp->hwrm_cmd_resp_addr;
5303 
5304 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
5305 			       -1);
5306 
5307 	mutex_lock(&bp->hwrm_cmd_lock);
5308 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5309 	if (!rc)
5310 		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5311 			le16_to_cpu(resp->rss_cos_lb_ctx_id);
5312 	mutex_unlock(&bp->hwrm_cmd_lock);
5313 
5314 	return rc;
5315 }
5316 
bnxt_get_roce_vnic_mode(struct bnxt * bp)5317 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5318 {
5319 	if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5320 		return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5321 	return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5322 }
5323 
bnxt_hwrm_vnic_cfg(struct bnxt * bp,u16 vnic_id)5324 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5325 {
5326 	unsigned int ring = 0, grp_idx;
5327 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5328 	struct hwrm_vnic_cfg_input req = {0};
5329 	u16 def_vlan = 0;
5330 
5331 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5332 
5333 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5334 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5335 
5336 		req.default_rx_ring_id =
5337 			cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5338 		req.default_cmpl_ring_id =
5339 			cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5340 		req.enables =
5341 			cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5342 				    VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5343 		goto vnic_mru;
5344 	}
5345 	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5346 	/* Only RSS support for now TBD: COS & LB */
5347 	if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5348 		req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5349 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5350 					   VNIC_CFG_REQ_ENABLES_MRU);
5351 	} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5352 		req.rss_rule =
5353 			cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5354 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5355 					   VNIC_CFG_REQ_ENABLES_MRU);
5356 		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5357 	} else {
5358 		req.rss_rule = cpu_to_le16(0xffff);
5359 	}
5360 
5361 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5362 	    (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5363 		req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5364 		req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5365 	} else {
5366 		req.cos_rule = cpu_to_le16(0xffff);
5367 	}
5368 
5369 	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5370 		ring = 0;
5371 	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5372 		ring = vnic_id - 1;
5373 	else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5374 		ring = bp->rx_nr_rings - 1;
5375 
5376 	grp_idx = bp->rx_ring[ring].bnapi->index;
5377 	req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5378 	req.lb_rule = cpu_to_le16(0xffff);
5379 vnic_mru:
5380 	req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5381 
5382 	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5383 #ifdef CONFIG_BNXT_SRIOV
5384 	if (BNXT_VF(bp))
5385 		def_vlan = bp->vf.vlan;
5386 #endif
5387 	if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5388 		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5389 	if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5390 		req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5391 
5392 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5393 }
5394 
bnxt_hwrm_vnic_free_one(struct bnxt * bp,u16 vnic_id)5395 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5396 {
5397 	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5398 		struct hwrm_vnic_free_input req = {0};
5399 
5400 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5401 		req.vnic_id =
5402 			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5403 
5404 		hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5405 		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5406 	}
5407 }
5408 
bnxt_hwrm_vnic_free(struct bnxt * bp)5409 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5410 {
5411 	u16 i;
5412 
5413 	for (i = 0; i < bp->nr_vnics; i++)
5414 		bnxt_hwrm_vnic_free_one(bp, i);
5415 }
5416 
bnxt_hwrm_vnic_alloc(struct bnxt * bp,u16 vnic_id,unsigned int start_rx_ring_idx,unsigned int nr_rings)5417 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5418 				unsigned int start_rx_ring_idx,
5419 				unsigned int nr_rings)
5420 {
5421 	int rc = 0;
5422 	unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5423 	struct hwrm_vnic_alloc_input req = {0};
5424 	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5425 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5426 
5427 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5428 		goto vnic_no_ring_grps;
5429 
5430 	/* map ring groups to this vnic */
5431 	for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5432 		grp_idx = bp->rx_ring[i].bnapi->index;
5433 		if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5434 			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5435 				   j, nr_rings);
5436 			break;
5437 		}
5438 		vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5439 	}
5440 
5441 vnic_no_ring_grps:
5442 	for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5443 		vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5444 	if (vnic_id == 0)
5445 		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5446 
5447 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5448 
5449 	mutex_lock(&bp->hwrm_cmd_lock);
5450 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5451 	if (!rc)
5452 		vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5453 	mutex_unlock(&bp->hwrm_cmd_lock);
5454 	return rc;
5455 }
5456 
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)5457 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5458 {
5459 	struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5460 	struct hwrm_vnic_qcaps_input req = {0};
5461 	int rc;
5462 
5463 	bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5464 	bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5465 	if (bp->hwrm_spec_code < 0x10600)
5466 		return 0;
5467 
5468 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5469 	mutex_lock(&bp->hwrm_cmd_lock);
5470 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5471 	if (!rc) {
5472 		u32 flags = le32_to_cpu(resp->flags);
5473 
5474 		if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5475 		    (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5476 			bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5477 		if (flags &
5478 		    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5479 			bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5480 
5481 		/* Older P5 fw before EXT_HW_STATS support did not set
5482 		 * VLAN_STRIP_CAP properly.
5483 		 */
5484 		if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5485 		    (BNXT_CHIP_P5_THOR(bp) &&
5486 		     !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5487 			bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5488 		bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5489 		if (bp->max_tpa_v2) {
5490 			if (BNXT_CHIP_P5_THOR(bp))
5491 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5492 			else
5493 				bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5494 		}
5495 	}
5496 	mutex_unlock(&bp->hwrm_cmd_lock);
5497 	return rc;
5498 }
5499 
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)5500 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5501 {
5502 	u16 i;
5503 	u32 rc = 0;
5504 
5505 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5506 		return 0;
5507 
5508 	mutex_lock(&bp->hwrm_cmd_lock);
5509 	for (i = 0; i < bp->rx_nr_rings; i++) {
5510 		struct hwrm_ring_grp_alloc_input req = {0};
5511 		struct hwrm_ring_grp_alloc_output *resp =
5512 					bp->hwrm_cmd_resp_addr;
5513 		unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5514 
5515 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5516 
5517 		req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5518 		req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5519 		req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5520 		req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5521 
5522 		rc = _hwrm_send_message(bp, &req, sizeof(req),
5523 					HWRM_CMD_TIMEOUT);
5524 		if (rc)
5525 			break;
5526 
5527 		bp->grp_info[grp_idx].fw_grp_id =
5528 			le32_to_cpu(resp->ring_group_id);
5529 	}
5530 	mutex_unlock(&bp->hwrm_cmd_lock);
5531 	return rc;
5532 }
5533 
bnxt_hwrm_ring_grp_free(struct bnxt * bp)5534 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5535 {
5536 	u16 i;
5537 	struct hwrm_ring_grp_free_input req = {0};
5538 
5539 	if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5540 		return;
5541 
5542 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5543 
5544 	mutex_lock(&bp->hwrm_cmd_lock);
5545 	for (i = 0; i < bp->cp_nr_rings; i++) {
5546 		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5547 			continue;
5548 		req.ring_group_id =
5549 			cpu_to_le32(bp->grp_info[i].fw_grp_id);
5550 
5551 		_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5552 		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5553 	}
5554 	mutex_unlock(&bp->hwrm_cmd_lock);
5555 }
5556 
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)5557 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5558 				    struct bnxt_ring_struct *ring,
5559 				    u32 ring_type, u32 map_index)
5560 {
5561 	int rc = 0, err = 0;
5562 	struct hwrm_ring_alloc_input req = {0};
5563 	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5564 	struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5565 	struct bnxt_ring_grp_info *grp_info;
5566 	u16 ring_id;
5567 
5568 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5569 
5570 	req.enables = 0;
5571 	if (rmem->nr_pages > 1) {
5572 		req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5573 		/* Page size is in log2 units */
5574 		req.page_size = BNXT_PAGE_SHIFT;
5575 		req.page_tbl_depth = 1;
5576 	} else {
5577 		req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5578 	}
5579 	req.fbo = 0;
5580 	/* Association of ring index with doorbell index and MSIX number */
5581 	req.logical_id = cpu_to_le16(map_index);
5582 
5583 	switch (ring_type) {
5584 	case HWRM_RING_ALLOC_TX: {
5585 		struct bnxt_tx_ring_info *txr;
5586 
5587 		txr = container_of(ring, struct bnxt_tx_ring_info,
5588 				   tx_ring_struct);
5589 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5590 		/* Association of transmit ring with completion ring */
5591 		grp_info = &bp->grp_info[ring->grp_idx];
5592 		req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5593 		req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5594 		req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5595 		req.queue_id = cpu_to_le16(ring->queue_id);
5596 		break;
5597 	}
5598 	case HWRM_RING_ALLOC_RX:
5599 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5600 		req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5601 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5602 			u16 flags = 0;
5603 
5604 			/* Association of rx ring with stats context */
5605 			grp_info = &bp->grp_info[ring->grp_idx];
5606 			req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5607 			req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5608 			req.enables |= cpu_to_le32(
5609 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5610 			if (NET_IP_ALIGN == 2)
5611 				flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5612 			req.flags = cpu_to_le16(flags);
5613 		}
5614 		break;
5615 	case HWRM_RING_ALLOC_AGG:
5616 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5617 			req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5618 			/* Association of agg ring with rx ring */
5619 			grp_info = &bp->grp_info[ring->grp_idx];
5620 			req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5621 			req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5622 			req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5623 			req.enables |= cpu_to_le32(
5624 				RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5625 				RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5626 		} else {
5627 			req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5628 		}
5629 		req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5630 		break;
5631 	case HWRM_RING_ALLOC_CMPL:
5632 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5633 		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5634 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5635 			/* Association of cp ring with nq */
5636 			grp_info = &bp->grp_info[map_index];
5637 			req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5638 			req.cq_handle = cpu_to_le64(ring->handle);
5639 			req.enables |= cpu_to_le32(
5640 				RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5641 		} else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5642 			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5643 		}
5644 		break;
5645 	case HWRM_RING_ALLOC_NQ:
5646 		req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5647 		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5648 		if (bp->flags & BNXT_FLAG_USING_MSIX)
5649 			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5650 		break;
5651 	default:
5652 		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5653 			   ring_type);
5654 		return -1;
5655 	}
5656 
5657 	mutex_lock(&bp->hwrm_cmd_lock);
5658 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5659 	err = le16_to_cpu(resp->error_code);
5660 	ring_id = le16_to_cpu(resp->ring_id);
5661 	mutex_unlock(&bp->hwrm_cmd_lock);
5662 
5663 	if (rc || err) {
5664 		netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5665 			   ring_type, rc, err);
5666 		return -EIO;
5667 	}
5668 	ring->fw_ring_id = ring_id;
5669 	return rc;
5670 }
5671 
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)5672 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5673 {
5674 	int rc;
5675 
5676 	if (BNXT_PF(bp)) {
5677 		struct hwrm_func_cfg_input req = {0};
5678 
5679 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5680 		req.fid = cpu_to_le16(0xffff);
5681 		req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5682 		req.async_event_cr = cpu_to_le16(idx);
5683 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5684 	} else {
5685 		struct hwrm_func_vf_cfg_input req = {0};
5686 
5687 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5688 		req.enables =
5689 			cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5690 		req.async_event_cr = cpu_to_le16(idx);
5691 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5692 	}
5693 	return rc;
5694 }
5695 
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)5696 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5697 			u32 map_idx, u32 xid)
5698 {
5699 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
5700 		if (BNXT_PF(bp))
5701 			db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5702 		else
5703 			db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5704 		switch (ring_type) {
5705 		case HWRM_RING_ALLOC_TX:
5706 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5707 			break;
5708 		case HWRM_RING_ALLOC_RX:
5709 		case HWRM_RING_ALLOC_AGG:
5710 			db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5711 			break;
5712 		case HWRM_RING_ALLOC_CMPL:
5713 			db->db_key64 = DBR_PATH_L2;
5714 			break;
5715 		case HWRM_RING_ALLOC_NQ:
5716 			db->db_key64 = DBR_PATH_L2;
5717 			break;
5718 		}
5719 		db->db_key64 |= (u64)xid << DBR_XID_SFT;
5720 	} else {
5721 		db->doorbell = bp->bar1 + map_idx * 0x80;
5722 		switch (ring_type) {
5723 		case HWRM_RING_ALLOC_TX:
5724 			db->db_key32 = DB_KEY_TX;
5725 			break;
5726 		case HWRM_RING_ALLOC_RX:
5727 		case HWRM_RING_ALLOC_AGG:
5728 			db->db_key32 = DB_KEY_RX;
5729 			break;
5730 		case HWRM_RING_ALLOC_CMPL:
5731 			db->db_key32 = DB_KEY_CP;
5732 			break;
5733 		}
5734 	}
5735 }
5736 
bnxt_hwrm_ring_alloc(struct bnxt * bp)5737 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5738 {
5739 	bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5740 	int i, rc = 0;
5741 	u32 type;
5742 
5743 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5744 		type = HWRM_RING_ALLOC_NQ;
5745 	else
5746 		type = HWRM_RING_ALLOC_CMPL;
5747 	for (i = 0; i < bp->cp_nr_rings; i++) {
5748 		struct bnxt_napi *bnapi = bp->bnapi[i];
5749 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5750 		struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5751 		u32 map_idx = ring->map_idx;
5752 		unsigned int vector;
5753 
5754 		vector = bp->irq_tbl[map_idx].vector;
5755 		disable_irq_nosync(vector);
5756 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5757 		if (rc) {
5758 			enable_irq(vector);
5759 			goto err_out;
5760 		}
5761 		bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5762 		bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5763 		enable_irq(vector);
5764 		bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5765 
5766 		if (!i) {
5767 			rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5768 			if (rc)
5769 				netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5770 		}
5771 	}
5772 
5773 	type = HWRM_RING_ALLOC_TX;
5774 	for (i = 0; i < bp->tx_nr_rings; i++) {
5775 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5776 		struct bnxt_ring_struct *ring;
5777 		u32 map_idx;
5778 
5779 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5780 			struct bnxt_napi *bnapi = txr->bnapi;
5781 			struct bnxt_cp_ring_info *cpr, *cpr2;
5782 			u32 type2 = HWRM_RING_ALLOC_CMPL;
5783 
5784 			cpr = &bnapi->cp_ring;
5785 			cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5786 			ring = &cpr2->cp_ring_struct;
5787 			ring->handle = BNXT_TX_HDL;
5788 			map_idx = bnapi->index;
5789 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5790 			if (rc)
5791 				goto err_out;
5792 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5793 				    ring->fw_ring_id);
5794 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5795 		}
5796 		ring = &txr->tx_ring_struct;
5797 		map_idx = i;
5798 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5799 		if (rc)
5800 			goto err_out;
5801 		bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5802 	}
5803 
5804 	type = HWRM_RING_ALLOC_RX;
5805 	for (i = 0; i < bp->rx_nr_rings; i++) {
5806 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5807 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5808 		struct bnxt_napi *bnapi = rxr->bnapi;
5809 		u32 map_idx = bnapi->index;
5810 
5811 		rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5812 		if (rc)
5813 			goto err_out;
5814 		bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5815 		/* If we have agg rings, post agg buffers first. */
5816 		if (!agg_rings)
5817 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5818 		bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5819 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
5820 			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5821 			u32 type2 = HWRM_RING_ALLOC_CMPL;
5822 			struct bnxt_cp_ring_info *cpr2;
5823 
5824 			cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5825 			ring = &cpr2->cp_ring_struct;
5826 			ring->handle = BNXT_RX_HDL;
5827 			rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5828 			if (rc)
5829 				goto err_out;
5830 			bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5831 				    ring->fw_ring_id);
5832 			bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5833 		}
5834 	}
5835 
5836 	if (agg_rings) {
5837 		type = HWRM_RING_ALLOC_AGG;
5838 		for (i = 0; i < bp->rx_nr_rings; i++) {
5839 			struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5840 			struct bnxt_ring_struct *ring =
5841 						&rxr->rx_agg_ring_struct;
5842 			u32 grp_idx = ring->grp_idx;
5843 			u32 map_idx = grp_idx + bp->rx_nr_rings;
5844 
5845 			rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5846 			if (rc)
5847 				goto err_out;
5848 
5849 			bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5850 				    ring->fw_ring_id);
5851 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5852 			bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5853 			bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5854 		}
5855 	}
5856 err_out:
5857 	return rc;
5858 }
5859 
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)5860 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5861 				   struct bnxt_ring_struct *ring,
5862 				   u32 ring_type, int cmpl_ring_id)
5863 {
5864 	int rc;
5865 	struct hwrm_ring_free_input req = {0};
5866 	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5867 	u16 error_code;
5868 
5869 	if (BNXT_NO_FW_ACCESS(bp))
5870 		return 0;
5871 
5872 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5873 	req.ring_type = ring_type;
5874 	req.ring_id = cpu_to_le16(ring->fw_ring_id);
5875 
5876 	mutex_lock(&bp->hwrm_cmd_lock);
5877 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5878 	error_code = le16_to_cpu(resp->error_code);
5879 	mutex_unlock(&bp->hwrm_cmd_lock);
5880 
5881 	if (rc || error_code) {
5882 		netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5883 			   ring_type, rc, error_code);
5884 		return -EIO;
5885 	}
5886 	return 0;
5887 }
5888 
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)5889 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5890 {
5891 	u32 type;
5892 	int i;
5893 
5894 	if (!bp->bnapi)
5895 		return;
5896 
5897 	for (i = 0; i < bp->tx_nr_rings; i++) {
5898 		struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5899 		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5900 
5901 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5902 			u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5903 
5904 			hwrm_ring_free_send_msg(bp, ring,
5905 						RING_FREE_REQ_RING_TYPE_TX,
5906 						close_path ? cmpl_ring_id :
5907 						INVALID_HW_RING_ID);
5908 			ring->fw_ring_id = INVALID_HW_RING_ID;
5909 		}
5910 	}
5911 
5912 	for (i = 0; i < bp->rx_nr_rings; i++) {
5913 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5914 		struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5915 		u32 grp_idx = rxr->bnapi->index;
5916 
5917 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5918 			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5919 
5920 			hwrm_ring_free_send_msg(bp, ring,
5921 						RING_FREE_REQ_RING_TYPE_RX,
5922 						close_path ? cmpl_ring_id :
5923 						INVALID_HW_RING_ID);
5924 			ring->fw_ring_id = INVALID_HW_RING_ID;
5925 			bp->grp_info[grp_idx].rx_fw_ring_id =
5926 				INVALID_HW_RING_ID;
5927 		}
5928 	}
5929 
5930 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5931 		type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5932 	else
5933 		type = RING_FREE_REQ_RING_TYPE_RX;
5934 	for (i = 0; i < bp->rx_nr_rings; i++) {
5935 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5936 		struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5937 		u32 grp_idx = rxr->bnapi->index;
5938 
5939 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5940 			u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5941 
5942 			hwrm_ring_free_send_msg(bp, ring, type,
5943 						close_path ? cmpl_ring_id :
5944 						INVALID_HW_RING_ID);
5945 			ring->fw_ring_id = INVALID_HW_RING_ID;
5946 			bp->grp_info[grp_idx].agg_fw_ring_id =
5947 				INVALID_HW_RING_ID;
5948 		}
5949 	}
5950 
5951 	/* The completion rings are about to be freed.  After that the
5952 	 * IRQ doorbell will not work anymore.  So we need to disable
5953 	 * IRQ here.
5954 	 */
5955 	bnxt_disable_int_sync(bp);
5956 
5957 	if (bp->flags & BNXT_FLAG_CHIP_P5)
5958 		type = RING_FREE_REQ_RING_TYPE_NQ;
5959 	else
5960 		type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5961 	for (i = 0; i < bp->cp_nr_rings; i++) {
5962 		struct bnxt_napi *bnapi = bp->bnapi[i];
5963 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5964 		struct bnxt_ring_struct *ring;
5965 		int j;
5966 
5967 		for (j = 0; j < 2; j++) {
5968 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5969 
5970 			if (cpr2) {
5971 				ring = &cpr2->cp_ring_struct;
5972 				if (ring->fw_ring_id == INVALID_HW_RING_ID)
5973 					continue;
5974 				hwrm_ring_free_send_msg(bp, ring,
5975 					RING_FREE_REQ_RING_TYPE_L2_CMPL,
5976 					INVALID_HW_RING_ID);
5977 				ring->fw_ring_id = INVALID_HW_RING_ID;
5978 			}
5979 		}
5980 		ring = &cpr->cp_ring_struct;
5981 		if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5982 			hwrm_ring_free_send_msg(bp, ring, type,
5983 						INVALID_HW_RING_ID);
5984 			ring->fw_ring_id = INVALID_HW_RING_ID;
5985 			bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5986 		}
5987 	}
5988 }
5989 
5990 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5991 			   bool shared);
5992 
bnxt_hwrm_get_rings(struct bnxt * bp)5993 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5994 {
5995 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5996 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5997 	struct hwrm_func_qcfg_input req = {0};
5998 	int rc;
5999 
6000 	if (bp->hwrm_spec_code < 0x10601)
6001 		return 0;
6002 
6003 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6004 	req.fid = cpu_to_le16(0xffff);
6005 	mutex_lock(&bp->hwrm_cmd_lock);
6006 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6007 	if (rc) {
6008 		mutex_unlock(&bp->hwrm_cmd_lock);
6009 		return rc;
6010 	}
6011 
6012 	hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6013 	if (BNXT_NEW_RM(bp)) {
6014 		u16 cp, stats;
6015 
6016 		hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6017 		hw_resc->resv_hw_ring_grps =
6018 			le32_to_cpu(resp->alloc_hw_ring_grps);
6019 		hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6020 		cp = le16_to_cpu(resp->alloc_cmpl_rings);
6021 		stats = le16_to_cpu(resp->alloc_stat_ctx);
6022 		hw_resc->resv_irqs = cp;
6023 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
6024 			int rx = hw_resc->resv_rx_rings;
6025 			int tx = hw_resc->resv_tx_rings;
6026 
6027 			if (bp->flags & BNXT_FLAG_AGG_RINGS)
6028 				rx >>= 1;
6029 			if (cp < (rx + tx)) {
6030 				bnxt_trim_rings(bp, &rx, &tx, cp, false);
6031 				if (bp->flags & BNXT_FLAG_AGG_RINGS)
6032 					rx <<= 1;
6033 				hw_resc->resv_rx_rings = rx;
6034 				hw_resc->resv_tx_rings = tx;
6035 			}
6036 			hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6037 			hw_resc->resv_hw_ring_grps = rx;
6038 		}
6039 		hw_resc->resv_cp_rings = cp;
6040 		hw_resc->resv_stat_ctxs = stats;
6041 	}
6042 	mutex_unlock(&bp->hwrm_cmd_lock);
6043 	return 0;
6044 }
6045 
6046 /* Caller must hold bp->hwrm_cmd_lock */
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)6047 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6048 {
6049 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6050 	struct hwrm_func_qcfg_input req = {0};
6051 	int rc;
6052 
6053 	if (bp->hwrm_spec_code < 0x10601)
6054 		return 0;
6055 
6056 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6057 	req.fid = cpu_to_le16(fid);
6058 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6059 	if (!rc)
6060 		*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6061 
6062 	return rc;
6063 }
6064 
6065 static bool bnxt_rfs_supported(struct bnxt *bp);
6066 
6067 static void
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct hwrm_func_cfg_input * req,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6068 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
6069 			     int tx_rings, int rx_rings, int ring_grps,
6070 			     int cp_rings, int stats, int vnics)
6071 {
6072 	u32 enables = 0;
6073 
6074 	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
6075 	req->fid = cpu_to_le16(0xffff);
6076 	enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6077 	req->num_tx_rings = cpu_to_le16(tx_rings);
6078 	if (BNXT_NEW_RM(bp)) {
6079 		enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6080 		enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6081 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
6082 			enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6083 			enables |= tx_rings + ring_grps ?
6084 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6085 			enables |= rx_rings ?
6086 				FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6087 		} else {
6088 			enables |= cp_rings ?
6089 				   FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6090 			enables |= ring_grps ?
6091 				   FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6092 				   FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6093 		}
6094 		enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6095 
6096 		req->num_rx_rings = cpu_to_le16(rx_rings);
6097 		if (bp->flags & BNXT_FLAG_CHIP_P5) {
6098 			req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6099 			req->num_msix = cpu_to_le16(cp_rings);
6100 			req->num_rsscos_ctxs =
6101 				cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6102 		} else {
6103 			req->num_cmpl_rings = cpu_to_le16(cp_rings);
6104 			req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6105 			req->num_rsscos_ctxs = cpu_to_le16(1);
6106 			if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6107 			    bnxt_rfs_supported(bp))
6108 				req->num_rsscos_ctxs =
6109 					cpu_to_le16(ring_grps + 1);
6110 		}
6111 		req->num_stat_ctxs = cpu_to_le16(stats);
6112 		req->num_vnics = cpu_to_le16(vnics);
6113 	}
6114 	req->enables = cpu_to_le32(enables);
6115 }
6116 
6117 static void
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct hwrm_func_vf_cfg_input * req,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6118 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
6119 			     struct hwrm_func_vf_cfg_input *req, int tx_rings,
6120 			     int rx_rings, int ring_grps, int cp_rings,
6121 			     int stats, int vnics)
6122 {
6123 	u32 enables = 0;
6124 
6125 	bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
6126 	enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6127 	enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6128 			      FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6129 	enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6130 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
6131 		enables |= tx_rings + ring_grps ?
6132 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6133 	} else {
6134 		enables |= cp_rings ?
6135 			   FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6136 		enables |= ring_grps ?
6137 			   FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6138 	}
6139 	enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6140 	enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6141 
6142 	req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6143 	req->num_tx_rings = cpu_to_le16(tx_rings);
6144 	req->num_rx_rings = cpu_to_le16(rx_rings);
6145 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
6146 		req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6147 		req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6148 	} else {
6149 		req->num_cmpl_rings = cpu_to_le16(cp_rings);
6150 		req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6151 		req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6152 	}
6153 	req->num_stat_ctxs = cpu_to_le16(stats);
6154 	req->num_vnics = cpu_to_le16(vnics);
6155 
6156 	req->enables = cpu_to_le32(enables);
6157 }
6158 
6159 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6160 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6161 			   int ring_grps, int cp_rings, int stats, int vnics)
6162 {
6163 	struct hwrm_func_cfg_input req = {0};
6164 	int rc;
6165 
6166 	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6167 				     cp_rings, stats, vnics);
6168 	if (!req.enables)
6169 		return 0;
6170 
6171 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6172 	if (rc)
6173 		return rc;
6174 
6175 	if (bp->hwrm_spec_code < 0x10601)
6176 		bp->hw_resc.resv_tx_rings = tx_rings;
6177 
6178 	return bnxt_hwrm_get_rings(bp);
6179 }
6180 
6181 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6182 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6183 			   int ring_grps, int cp_rings, int stats, int vnics)
6184 {
6185 	struct hwrm_func_vf_cfg_input req = {0};
6186 	int rc;
6187 
6188 	if (!BNXT_NEW_RM(bp)) {
6189 		bp->hw_resc.resv_tx_rings = tx_rings;
6190 		return 0;
6191 	}
6192 
6193 	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6194 				     cp_rings, stats, vnics);
6195 	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6196 	if (rc)
6197 		return rc;
6198 
6199 	return bnxt_hwrm_get_rings(bp);
6200 }
6201 
bnxt_hwrm_reserve_rings(struct bnxt * bp,int tx,int rx,int grp,int cp,int stat,int vnic)6202 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6203 				   int cp, int stat, int vnic)
6204 {
6205 	if (BNXT_PF(bp))
6206 		return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6207 						  vnic);
6208 	else
6209 		return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6210 						  vnic);
6211 }
6212 
bnxt_nq_rings_in_use(struct bnxt * bp)6213 int bnxt_nq_rings_in_use(struct bnxt *bp)
6214 {
6215 	int cp = bp->cp_nr_rings;
6216 	int ulp_msix, ulp_base;
6217 
6218 	ulp_msix = bnxt_get_ulp_msix_num(bp);
6219 	if (ulp_msix) {
6220 		ulp_base = bnxt_get_ulp_msix_base(bp);
6221 		cp += ulp_msix;
6222 		if ((ulp_base + ulp_msix) > cp)
6223 			cp = ulp_base + ulp_msix;
6224 	}
6225 	return cp;
6226 }
6227 
bnxt_cp_rings_in_use(struct bnxt * bp)6228 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6229 {
6230 	int cp;
6231 
6232 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6233 		return bnxt_nq_rings_in_use(bp);
6234 
6235 	cp = bp->tx_nr_rings + bp->rx_nr_rings;
6236 	return cp;
6237 }
6238 
bnxt_get_func_stat_ctxs(struct bnxt * bp)6239 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6240 {
6241 	int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6242 	int cp = bp->cp_nr_rings;
6243 
6244 	if (!ulp_stat)
6245 		return cp;
6246 
6247 	if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6248 		return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6249 
6250 	return cp + ulp_stat;
6251 }
6252 
6253 /* Check if a default RSS map needs to be setup.  This function is only
6254  * used on older firmware that does not require reserving RX rings.
6255  */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)6256 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6257 {
6258 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6259 
6260 	/* The RSS map is valid for RX rings set to resv_rx_rings */
6261 	if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6262 		hw_resc->resv_rx_rings = bp->rx_nr_rings;
6263 		if (!netif_is_rxfh_configured(bp->dev))
6264 			bnxt_set_dflt_rss_indir_tbl(bp);
6265 	}
6266 }
6267 
bnxt_need_reserve_rings(struct bnxt * bp)6268 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6269 {
6270 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6271 	int cp = bnxt_cp_rings_in_use(bp);
6272 	int nq = bnxt_nq_rings_in_use(bp);
6273 	int rx = bp->rx_nr_rings, stat;
6274 	int vnic = 1, grp = rx;
6275 
6276 	if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6277 	    bp->hwrm_spec_code >= 0x10601)
6278 		return true;
6279 
6280 	/* Old firmware does not need RX ring reservations but we still
6281 	 * need to setup a default RSS map when needed.  With new firmware
6282 	 * we go through RX ring reservations first and then set up the
6283 	 * RSS map for the successfully reserved RX rings when needed.
6284 	 */
6285 	if (!BNXT_NEW_RM(bp)) {
6286 		bnxt_check_rss_tbl_no_rmgr(bp);
6287 		return false;
6288 	}
6289 	if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6290 		vnic = rx + 1;
6291 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
6292 		rx <<= 1;
6293 	stat = bnxt_get_func_stat_ctxs(bp);
6294 	if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6295 	    hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6296 	    (hw_resc->resv_hw_ring_grps != grp &&
6297 	     !(bp->flags & BNXT_FLAG_CHIP_P5)))
6298 		return true;
6299 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6300 	    hw_resc->resv_irqs != nq)
6301 		return true;
6302 	return false;
6303 }
6304 
__bnxt_reserve_rings(struct bnxt * bp)6305 static int __bnxt_reserve_rings(struct bnxt *bp)
6306 {
6307 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6308 	int cp = bnxt_nq_rings_in_use(bp);
6309 	int tx = bp->tx_nr_rings;
6310 	int rx = bp->rx_nr_rings;
6311 	int grp, rx_rings, rc;
6312 	int vnic = 1, stat;
6313 	bool sh = false;
6314 
6315 	if (!bnxt_need_reserve_rings(bp))
6316 		return 0;
6317 
6318 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6319 		sh = true;
6320 	if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6321 		vnic = rx + 1;
6322 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
6323 		rx <<= 1;
6324 	grp = bp->rx_nr_rings;
6325 	stat = bnxt_get_func_stat_ctxs(bp);
6326 
6327 	rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6328 	if (rc)
6329 		return rc;
6330 
6331 	tx = hw_resc->resv_tx_rings;
6332 	if (BNXT_NEW_RM(bp)) {
6333 		rx = hw_resc->resv_rx_rings;
6334 		cp = hw_resc->resv_irqs;
6335 		grp = hw_resc->resv_hw_ring_grps;
6336 		vnic = hw_resc->resv_vnics;
6337 		stat = hw_resc->resv_stat_ctxs;
6338 	}
6339 
6340 	rx_rings = rx;
6341 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6342 		if (rx >= 2) {
6343 			rx_rings = rx >> 1;
6344 		} else {
6345 			if (netif_running(bp->dev))
6346 				return -ENOMEM;
6347 
6348 			bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6349 			bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6350 			bp->dev->hw_features &= ~NETIF_F_LRO;
6351 			bp->dev->features &= ~NETIF_F_LRO;
6352 			bnxt_set_ring_params(bp);
6353 		}
6354 	}
6355 	rx_rings = min_t(int, rx_rings, grp);
6356 	cp = min_t(int, cp, bp->cp_nr_rings);
6357 	if (stat > bnxt_get_ulp_stat_ctxs(bp))
6358 		stat -= bnxt_get_ulp_stat_ctxs(bp);
6359 	cp = min_t(int, cp, stat);
6360 	rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6361 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
6362 		rx = rx_rings << 1;
6363 	cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6364 	bp->tx_nr_rings = tx;
6365 
6366 	/* If we cannot reserve all the RX rings, reset the RSS map only
6367 	 * if absolutely necessary
6368 	 */
6369 	if (rx_rings != bp->rx_nr_rings) {
6370 		netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6371 			    rx_rings, bp->rx_nr_rings);
6372 		if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
6373 		    (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6374 		     bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6375 		     bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6376 			netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6377 			bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6378 		}
6379 	}
6380 	bp->rx_nr_rings = rx_rings;
6381 	bp->cp_nr_rings = cp;
6382 
6383 	if (!tx || !rx || !cp || !grp || !vnic || !stat)
6384 		return -ENOMEM;
6385 
6386 	if (!netif_is_rxfh_configured(bp->dev))
6387 		bnxt_set_dflt_rss_indir_tbl(bp);
6388 
6389 	return rc;
6390 }
6391 
bnxt_hwrm_check_vf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6392 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6393 				    int ring_grps, int cp_rings, int stats,
6394 				    int vnics)
6395 {
6396 	struct hwrm_func_vf_cfg_input req = {0};
6397 	u32 flags;
6398 
6399 	if (!BNXT_NEW_RM(bp))
6400 		return 0;
6401 
6402 	__bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6403 				     cp_rings, stats, vnics);
6404 	flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6405 		FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6406 		FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6407 		FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6408 		FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6409 		FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6410 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6411 		flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6412 
6413 	req.flags = cpu_to_le32(flags);
6414 	return hwrm_send_message_silent(bp, &req, sizeof(req),
6415 					HWRM_CMD_TIMEOUT);
6416 }
6417 
bnxt_hwrm_check_pf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6418 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6419 				    int ring_grps, int cp_rings, int stats,
6420 				    int vnics)
6421 {
6422 	struct hwrm_func_cfg_input req = {0};
6423 	u32 flags;
6424 
6425 	__bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6426 				     cp_rings, stats, vnics);
6427 	flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6428 	if (BNXT_NEW_RM(bp)) {
6429 		flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6430 			 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6431 			 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6432 			 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6433 		if (bp->flags & BNXT_FLAG_CHIP_P5)
6434 			flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6435 				 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6436 		else
6437 			flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6438 	}
6439 
6440 	req.flags = cpu_to_le32(flags);
6441 	return hwrm_send_message_silent(bp, &req, sizeof(req),
6442 					HWRM_CMD_TIMEOUT);
6443 }
6444 
bnxt_hwrm_check_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6445 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6446 				 int ring_grps, int cp_rings, int stats,
6447 				 int vnics)
6448 {
6449 	if (bp->hwrm_spec_code < 0x10801)
6450 		return 0;
6451 
6452 	if (BNXT_PF(bp))
6453 		return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6454 						ring_grps, cp_rings, stats,
6455 						vnics);
6456 
6457 	return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6458 					cp_rings, stats, vnics);
6459 }
6460 
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)6461 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6462 {
6463 	struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6464 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6465 	struct hwrm_ring_aggint_qcaps_input req = {0};
6466 	int rc;
6467 
6468 	coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6469 	coal_cap->num_cmpl_dma_aggr_max = 63;
6470 	coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6471 	coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6472 	coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6473 	coal_cap->int_lat_tmr_min_max = 65535;
6474 	coal_cap->int_lat_tmr_max_max = 65535;
6475 	coal_cap->num_cmpl_aggr_int_max = 65535;
6476 	coal_cap->timer_units = 80;
6477 
6478 	if (bp->hwrm_spec_code < 0x10902)
6479 		return;
6480 
6481 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6482 	mutex_lock(&bp->hwrm_cmd_lock);
6483 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6484 	if (!rc) {
6485 		coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6486 		coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6487 		coal_cap->num_cmpl_dma_aggr_max =
6488 			le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6489 		coal_cap->num_cmpl_dma_aggr_during_int_max =
6490 			le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6491 		coal_cap->cmpl_aggr_dma_tmr_max =
6492 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6493 		coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6494 			le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6495 		coal_cap->int_lat_tmr_min_max =
6496 			le16_to_cpu(resp->int_lat_tmr_min_max);
6497 		coal_cap->int_lat_tmr_max_max =
6498 			le16_to_cpu(resp->int_lat_tmr_max_max);
6499 		coal_cap->num_cmpl_aggr_int_max =
6500 			le16_to_cpu(resp->num_cmpl_aggr_int_max);
6501 		coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6502 	}
6503 	mutex_unlock(&bp->hwrm_cmd_lock);
6504 }
6505 
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)6506 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6507 {
6508 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6509 
6510 	return usec * 1000 / coal_cap->timer_units;
6511 }
6512 
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)6513 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6514 	struct bnxt_coal *hw_coal,
6515 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6516 {
6517 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6518 	u32 cmpl_params = coal_cap->cmpl_params;
6519 	u16 val, tmr, max, flags = 0;
6520 
6521 	max = hw_coal->bufs_per_record * 128;
6522 	if (hw_coal->budget)
6523 		max = hw_coal->bufs_per_record * hw_coal->budget;
6524 	max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6525 
6526 	val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6527 	req->num_cmpl_aggr_int = cpu_to_le16(val);
6528 
6529 	val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6530 	req->num_cmpl_dma_aggr = cpu_to_le16(val);
6531 
6532 	val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6533 		      coal_cap->num_cmpl_dma_aggr_during_int_max);
6534 	req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6535 
6536 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6537 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6538 	req->int_lat_tmr_max = cpu_to_le16(tmr);
6539 
6540 	/* min timer set to 1/2 of interrupt timer */
6541 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6542 		val = tmr / 2;
6543 		val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6544 		req->int_lat_tmr_min = cpu_to_le16(val);
6545 		req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6546 	}
6547 
6548 	/* buf timer set to 1/4 of interrupt timer */
6549 	val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6550 	req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6551 
6552 	if (cmpl_params &
6553 	    RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6554 		tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6555 		val = clamp_t(u16, tmr, 1,
6556 			      coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6557 		req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6558 		req->enables |=
6559 			cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6560 	}
6561 
6562 	if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6563 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6564 	if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6565 	    hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6566 		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6567 	req->flags = cpu_to_le16(flags);
6568 	req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6569 }
6570 
6571 /* Caller holds bp->hwrm_cmd_lock */
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)6572 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6573 				   struct bnxt_coal *hw_coal)
6574 {
6575 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6576 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6577 	struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6578 	u32 nq_params = coal_cap->nq_params;
6579 	u16 tmr;
6580 
6581 	if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6582 		return 0;
6583 
6584 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6585 			       -1, -1);
6586 	req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6587 	req.flags =
6588 		cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6589 
6590 	tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6591 	tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6592 	req.int_lat_tmr_min = cpu_to_le16(tmr);
6593 	req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6594 	return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6595 }
6596 
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)6597 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6598 {
6599 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6600 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6601 	struct bnxt_coal coal;
6602 
6603 	/* Tick values in micro seconds.
6604 	 * 1 coal_buf x bufs_per_record = 1 completion record.
6605 	 */
6606 	memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6607 
6608 	coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6609 	coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6610 
6611 	if (!bnapi->rx_ring)
6612 		return -ENODEV;
6613 
6614 	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6615 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6616 
6617 	bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6618 
6619 	req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6620 
6621 	return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6622 				 HWRM_CMD_TIMEOUT);
6623 }
6624 
bnxt_hwrm_set_coal(struct bnxt * bp)6625 int bnxt_hwrm_set_coal(struct bnxt *bp)
6626 {
6627 	int i, rc = 0;
6628 	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6629 							   req_tx = {0}, *req;
6630 
6631 	bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6632 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6633 	bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6634 			       HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6635 
6636 	bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6637 	bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6638 
6639 	mutex_lock(&bp->hwrm_cmd_lock);
6640 	for (i = 0; i < bp->cp_nr_rings; i++) {
6641 		struct bnxt_napi *bnapi = bp->bnapi[i];
6642 		struct bnxt_coal *hw_coal;
6643 		u16 ring_id;
6644 
6645 		req = &req_rx;
6646 		if (!bnapi->rx_ring) {
6647 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6648 			req = &req_tx;
6649 		} else {
6650 			ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6651 		}
6652 		req->ring_id = cpu_to_le16(ring_id);
6653 
6654 		rc = _hwrm_send_message(bp, req, sizeof(*req),
6655 					HWRM_CMD_TIMEOUT);
6656 		if (rc)
6657 			break;
6658 
6659 		if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6660 			continue;
6661 
6662 		if (bnapi->rx_ring && bnapi->tx_ring) {
6663 			req = &req_tx;
6664 			ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6665 			req->ring_id = cpu_to_le16(ring_id);
6666 			rc = _hwrm_send_message(bp, req, sizeof(*req),
6667 						HWRM_CMD_TIMEOUT);
6668 			if (rc)
6669 				break;
6670 		}
6671 		if (bnapi->rx_ring)
6672 			hw_coal = &bp->rx_coal;
6673 		else
6674 			hw_coal = &bp->tx_coal;
6675 		__bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6676 	}
6677 	mutex_unlock(&bp->hwrm_cmd_lock);
6678 	return rc;
6679 }
6680 
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)6681 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6682 {
6683 	struct hwrm_stat_ctx_clr_stats_input req0 = {0};
6684 	struct hwrm_stat_ctx_free_input req = {0};
6685 	int i;
6686 
6687 	if (!bp->bnapi)
6688 		return;
6689 
6690 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6691 		return;
6692 
6693 	bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
6694 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6695 
6696 	mutex_lock(&bp->hwrm_cmd_lock);
6697 	for (i = 0; i < bp->cp_nr_rings; i++) {
6698 		struct bnxt_napi *bnapi = bp->bnapi[i];
6699 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6700 
6701 		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6702 			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6703 			if (BNXT_FW_MAJ(bp) <= 20) {
6704 				req0.stat_ctx_id = req.stat_ctx_id;
6705 				_hwrm_send_message(bp, &req0, sizeof(req0),
6706 						   HWRM_CMD_TIMEOUT);
6707 			}
6708 			_hwrm_send_message(bp, &req, sizeof(req),
6709 					   HWRM_CMD_TIMEOUT);
6710 
6711 			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6712 		}
6713 	}
6714 	mutex_unlock(&bp->hwrm_cmd_lock);
6715 }
6716 
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)6717 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6718 {
6719 	int rc = 0, i;
6720 	struct hwrm_stat_ctx_alloc_input req = {0};
6721 	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6722 
6723 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6724 		return 0;
6725 
6726 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6727 
6728 	req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6729 	req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6730 
6731 	mutex_lock(&bp->hwrm_cmd_lock);
6732 	for (i = 0; i < bp->cp_nr_rings; i++) {
6733 		struct bnxt_napi *bnapi = bp->bnapi[i];
6734 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6735 
6736 		req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6737 
6738 		rc = _hwrm_send_message(bp, &req, sizeof(req),
6739 					HWRM_CMD_TIMEOUT);
6740 		if (rc)
6741 			break;
6742 
6743 		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6744 
6745 		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6746 	}
6747 	mutex_unlock(&bp->hwrm_cmd_lock);
6748 	return rc;
6749 }
6750 
bnxt_hwrm_func_qcfg(struct bnxt * bp)6751 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6752 {
6753 	struct hwrm_func_qcfg_input req = {0};
6754 	struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6755 	u32 min_db_offset = 0;
6756 	u16 flags;
6757 	int rc;
6758 
6759 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6760 	req.fid = cpu_to_le16(0xffff);
6761 	mutex_lock(&bp->hwrm_cmd_lock);
6762 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6763 	if (rc)
6764 		goto func_qcfg_exit;
6765 
6766 #ifdef CONFIG_BNXT_SRIOV
6767 	if (BNXT_VF(bp)) {
6768 		struct bnxt_vf_info *vf = &bp->vf;
6769 
6770 		vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6771 	} else {
6772 		bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6773 	}
6774 #endif
6775 	flags = le16_to_cpu(resp->flags);
6776 	if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6777 		     FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6778 		bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6779 		if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6780 			bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6781 	}
6782 	if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6783 		bp->flags |= BNXT_FLAG_MULTI_HOST;
6784 	if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6785 		bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6786 
6787 	switch (resp->port_partition_type) {
6788 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6789 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6790 	case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6791 		bp->port_partition_type = resp->port_partition_type;
6792 		break;
6793 	}
6794 	if (bp->hwrm_spec_code < 0x10707 ||
6795 	    resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6796 		bp->br_mode = BRIDGE_MODE_VEB;
6797 	else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6798 		bp->br_mode = BRIDGE_MODE_VEPA;
6799 	else
6800 		bp->br_mode = BRIDGE_MODE_UNDEF;
6801 
6802 	bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6803 	if (!bp->max_mtu)
6804 		bp->max_mtu = BNXT_MAX_MTU;
6805 
6806 	if (bp->db_size)
6807 		goto func_qcfg_exit;
6808 
6809 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
6810 		if (BNXT_PF(bp))
6811 			min_db_offset = DB_PF_OFFSET_P5;
6812 		else
6813 			min_db_offset = DB_VF_OFFSET_P5;
6814 	}
6815 	bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6816 				 1024);
6817 	if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6818 	    bp->db_size <= min_db_offset)
6819 		bp->db_size = pci_resource_len(bp->pdev, 2);
6820 
6821 func_qcfg_exit:
6822 	mutex_unlock(&bp->hwrm_cmd_lock);
6823 	return rc;
6824 }
6825 
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)6826 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6827 {
6828 	struct hwrm_func_backing_store_qcaps_input req = {0};
6829 	struct hwrm_func_backing_store_qcaps_output *resp =
6830 		bp->hwrm_cmd_resp_addr;
6831 	int rc;
6832 
6833 	if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6834 		return 0;
6835 
6836 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6837 	mutex_lock(&bp->hwrm_cmd_lock);
6838 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6839 	if (!rc) {
6840 		struct bnxt_ctx_pg_info *ctx_pg;
6841 		struct bnxt_ctx_mem_info *ctx;
6842 		int i, tqm_rings;
6843 
6844 		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6845 		if (!ctx) {
6846 			rc = -ENOMEM;
6847 			goto ctx_err;
6848 		}
6849 		ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6850 		ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6851 		ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6852 		ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6853 		ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6854 		ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6855 		ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6856 		ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6857 		ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6858 		ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6859 		ctx->vnic_max_vnic_entries =
6860 			le16_to_cpu(resp->vnic_max_vnic_entries);
6861 		ctx->vnic_max_ring_table_entries =
6862 			le16_to_cpu(resp->vnic_max_ring_table_entries);
6863 		ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6864 		ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6865 		ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6866 		ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6867 		ctx->tqm_min_entries_per_ring =
6868 			le32_to_cpu(resp->tqm_min_entries_per_ring);
6869 		ctx->tqm_max_entries_per_ring =
6870 			le32_to_cpu(resp->tqm_max_entries_per_ring);
6871 		ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6872 		if (!ctx->tqm_entries_multiple)
6873 			ctx->tqm_entries_multiple = 1;
6874 		ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6875 		ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6876 		ctx->mrav_num_entries_units =
6877 			le16_to_cpu(resp->mrav_num_entries_units);
6878 		ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6879 		ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6880 		ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
6881 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
6882 		if (!ctx->tqm_fp_rings_count)
6883 			ctx->tqm_fp_rings_count = bp->max_q;
6884 		else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
6885 			ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
6886 
6887 		tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
6888 		ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
6889 		if (!ctx_pg) {
6890 			kfree(ctx);
6891 			rc = -ENOMEM;
6892 			goto ctx_err;
6893 		}
6894 		for (i = 0; i < tqm_rings; i++, ctx_pg++)
6895 			ctx->tqm_mem[i] = ctx_pg;
6896 		bp->ctx = ctx;
6897 	} else {
6898 		rc = 0;
6899 	}
6900 ctx_err:
6901 	mutex_unlock(&bp->hwrm_cmd_lock);
6902 	return rc;
6903 }
6904 
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)6905 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6906 				  __le64 *pg_dir)
6907 {
6908 	BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
6909 	if (rmem->depth >= 1) {
6910 		if (rmem->depth == 2)
6911 			*pg_attr |= 2;
6912 		else
6913 			*pg_attr |= 1;
6914 		*pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6915 	} else {
6916 		*pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6917 	}
6918 }
6919 
6920 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES			\
6921 	(FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |		\
6922 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |		\
6923 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |		\
6924 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |		\
6925 	 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6926 
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)6927 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6928 {
6929 	struct hwrm_func_backing_store_cfg_input req = {0};
6930 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
6931 	struct bnxt_ctx_pg_info *ctx_pg;
6932 	__le32 *num_entries;
6933 	__le64 *pg_dir;
6934 	u32 flags = 0;
6935 	u8 *pg_attr;
6936 	u32 ena;
6937 	int i;
6938 
6939 	if (!ctx)
6940 		return 0;
6941 
6942 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6943 	req.enables = cpu_to_le32(enables);
6944 
6945 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6946 		ctx_pg = &ctx->qp_mem;
6947 		req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6948 		req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6949 		req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6950 		req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6951 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6952 				      &req.qpc_pg_size_qpc_lvl,
6953 				      &req.qpc_page_dir);
6954 	}
6955 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6956 		ctx_pg = &ctx->srq_mem;
6957 		req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6958 		req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6959 		req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6960 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6961 				      &req.srq_pg_size_srq_lvl,
6962 				      &req.srq_page_dir);
6963 	}
6964 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6965 		ctx_pg = &ctx->cq_mem;
6966 		req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6967 		req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6968 		req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6969 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6970 				      &req.cq_page_dir);
6971 	}
6972 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6973 		ctx_pg = &ctx->vnic_mem;
6974 		req.vnic_num_vnic_entries =
6975 			cpu_to_le16(ctx->vnic_max_vnic_entries);
6976 		req.vnic_num_ring_table_entries =
6977 			cpu_to_le16(ctx->vnic_max_ring_table_entries);
6978 		req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6979 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6980 				      &req.vnic_pg_size_vnic_lvl,
6981 				      &req.vnic_page_dir);
6982 	}
6983 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6984 		ctx_pg = &ctx->stat_mem;
6985 		req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6986 		req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6987 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6988 				      &req.stat_pg_size_stat_lvl,
6989 				      &req.stat_page_dir);
6990 	}
6991 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6992 		ctx_pg = &ctx->mrav_mem;
6993 		req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6994 		if (ctx->mrav_num_entries_units)
6995 			flags |=
6996 			FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6997 		req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6998 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6999 				      &req.mrav_pg_size_mrav_lvl,
7000 				      &req.mrav_page_dir);
7001 	}
7002 	if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7003 		ctx_pg = &ctx->tim_mem;
7004 		req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
7005 		req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7006 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7007 				      &req.tim_pg_size_tim_lvl,
7008 				      &req.tim_page_dir);
7009 	}
7010 	for (i = 0, num_entries = &req.tqm_sp_num_entries,
7011 	     pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
7012 	     pg_dir = &req.tqm_sp_page_dir,
7013 	     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7014 	     i < BNXT_MAX_TQM_RINGS;
7015 	     i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7016 		if (!(enables & ena))
7017 			continue;
7018 
7019 		req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7020 		ctx_pg = ctx->tqm_mem[i];
7021 		*num_entries = cpu_to_le32(ctx_pg->entries);
7022 		bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7023 	}
7024 	req.flags = cpu_to_le32(flags);
7025 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7026 }
7027 
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)7028 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7029 				  struct bnxt_ctx_pg_info *ctx_pg)
7030 {
7031 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7032 
7033 	rmem->page_size = BNXT_PAGE_SIZE;
7034 	rmem->pg_arr = ctx_pg->ctx_pg_arr;
7035 	rmem->dma_arr = ctx_pg->ctx_dma_arr;
7036 	rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7037 	if (rmem->depth >= 1)
7038 		rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7039 	return bnxt_alloc_ring(bp, rmem);
7040 }
7041 
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,bool use_init_val)7042 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7043 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7044 				  u8 depth, bool use_init_val)
7045 {
7046 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7047 	int rc;
7048 
7049 	if (!mem_size)
7050 		return -EINVAL;
7051 
7052 	ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7053 	if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7054 		ctx_pg->nr_pages = 0;
7055 		return -EINVAL;
7056 	}
7057 	if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7058 		int nr_tbls, i;
7059 
7060 		rmem->depth = 2;
7061 		ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7062 					     GFP_KERNEL);
7063 		if (!ctx_pg->ctx_pg_tbl)
7064 			return -ENOMEM;
7065 		nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7066 		rmem->nr_pages = nr_tbls;
7067 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7068 		if (rc)
7069 			return rc;
7070 		for (i = 0; i < nr_tbls; i++) {
7071 			struct bnxt_ctx_pg_info *pg_tbl;
7072 
7073 			pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7074 			if (!pg_tbl)
7075 				return -ENOMEM;
7076 			ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7077 			rmem = &pg_tbl->ring_mem;
7078 			rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7079 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7080 			rmem->depth = 1;
7081 			rmem->nr_pages = MAX_CTX_PAGES;
7082 			if (use_init_val)
7083 				rmem->init_val = bp->ctx->ctx_kind_initializer;
7084 			if (i == (nr_tbls - 1)) {
7085 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7086 
7087 				if (rem)
7088 					rmem->nr_pages = rem;
7089 			}
7090 			rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7091 			if (rc)
7092 				break;
7093 		}
7094 	} else {
7095 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7096 		if (rmem->nr_pages > 1 || depth)
7097 			rmem->depth = 1;
7098 		if (use_init_val)
7099 			rmem->init_val = bp->ctx->ctx_kind_initializer;
7100 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7101 	}
7102 	return rc;
7103 }
7104 
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)7105 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7106 				  struct bnxt_ctx_pg_info *ctx_pg)
7107 {
7108 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7109 
7110 	if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7111 	    ctx_pg->ctx_pg_tbl) {
7112 		int i, nr_tbls = rmem->nr_pages;
7113 
7114 		for (i = 0; i < nr_tbls; i++) {
7115 			struct bnxt_ctx_pg_info *pg_tbl;
7116 			struct bnxt_ring_mem_info *rmem2;
7117 
7118 			pg_tbl = ctx_pg->ctx_pg_tbl[i];
7119 			if (!pg_tbl)
7120 				continue;
7121 			rmem2 = &pg_tbl->ring_mem;
7122 			bnxt_free_ring(bp, rmem2);
7123 			ctx_pg->ctx_pg_arr[i] = NULL;
7124 			kfree(pg_tbl);
7125 			ctx_pg->ctx_pg_tbl[i] = NULL;
7126 		}
7127 		kfree(ctx_pg->ctx_pg_tbl);
7128 		ctx_pg->ctx_pg_tbl = NULL;
7129 	}
7130 	bnxt_free_ring(bp, rmem);
7131 	ctx_pg->nr_pages = 0;
7132 }
7133 
bnxt_free_ctx_mem(struct bnxt * bp)7134 static void bnxt_free_ctx_mem(struct bnxt *bp)
7135 {
7136 	struct bnxt_ctx_mem_info *ctx = bp->ctx;
7137 	int i;
7138 
7139 	if (!ctx)
7140 		return;
7141 
7142 	if (ctx->tqm_mem[0]) {
7143 		for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7144 			bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7145 		kfree(ctx->tqm_mem[0]);
7146 		ctx->tqm_mem[0] = NULL;
7147 	}
7148 
7149 	bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7150 	bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7151 	bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7152 	bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7153 	bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7154 	bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7155 	bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7156 	ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7157 }
7158 
bnxt_alloc_ctx_mem(struct bnxt * bp)7159 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7160 {
7161 	struct bnxt_ctx_pg_info *ctx_pg;
7162 	struct bnxt_ctx_mem_info *ctx;
7163 	u32 mem_size, ena, entries;
7164 	u32 entries_sp, min;
7165 	u32 num_mr, num_ah;
7166 	u32 extra_srqs = 0;
7167 	u32 extra_qps = 0;
7168 	u8 pg_lvl = 1;
7169 	int i, rc;
7170 
7171 	rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7172 	if (rc) {
7173 		netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7174 			   rc);
7175 		return rc;
7176 	}
7177 	ctx = bp->ctx;
7178 	if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7179 		return 0;
7180 
7181 	if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7182 		pg_lvl = 2;
7183 		extra_qps = 65536;
7184 		extra_srqs = 8192;
7185 	}
7186 
7187 	ctx_pg = &ctx->qp_mem;
7188 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7189 			  extra_qps;
7190 	mem_size = ctx->qp_entry_size * ctx_pg->entries;
7191 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7192 	if (rc)
7193 		return rc;
7194 
7195 	ctx_pg = &ctx->srq_mem;
7196 	ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7197 	mem_size = ctx->srq_entry_size * ctx_pg->entries;
7198 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7199 	if (rc)
7200 		return rc;
7201 
7202 	ctx_pg = &ctx->cq_mem;
7203 	ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7204 	mem_size = ctx->cq_entry_size * ctx_pg->entries;
7205 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
7206 	if (rc)
7207 		return rc;
7208 
7209 	ctx_pg = &ctx->vnic_mem;
7210 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
7211 			  ctx->vnic_max_ring_table_entries;
7212 	mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7213 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7214 	if (rc)
7215 		return rc;
7216 
7217 	ctx_pg = &ctx->stat_mem;
7218 	ctx_pg->entries = ctx->stat_max_entries;
7219 	mem_size = ctx->stat_entry_size * ctx_pg->entries;
7220 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
7221 	if (rc)
7222 		return rc;
7223 
7224 	ena = 0;
7225 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7226 		goto skip_rdma;
7227 
7228 	ctx_pg = &ctx->mrav_mem;
7229 	/* 128K extra is needed to accommodate static AH context
7230 	 * allocation by f/w.
7231 	 */
7232 	num_mr = 1024 * 256;
7233 	num_ah = 1024 * 128;
7234 	ctx_pg->entries = num_mr + num_ah;
7235 	mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7236 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
7237 	if (rc)
7238 		return rc;
7239 	ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7240 	if (ctx->mrav_num_entries_units)
7241 		ctx_pg->entries =
7242 			((num_mr / ctx->mrav_num_entries_units) << 16) |
7243 			 (num_ah / ctx->mrav_num_entries_units);
7244 
7245 	ctx_pg = &ctx->tim_mem;
7246 	ctx_pg->entries = ctx->qp_mem.entries;
7247 	mem_size = ctx->tim_entry_size * ctx_pg->entries;
7248 	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7249 	if (rc)
7250 		return rc;
7251 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7252 
7253 skip_rdma:
7254 	min = ctx->tqm_min_entries_per_ring;
7255 	entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7256 		     2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7257 	entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7258 	entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7259 	entries = roundup(entries, ctx->tqm_entries_multiple);
7260 	entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7261 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7262 		ctx_pg = ctx->tqm_mem[i];
7263 		ctx_pg->entries = i ? entries : entries_sp;
7264 		mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7265 		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
7266 		if (rc)
7267 			return rc;
7268 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7269 	}
7270 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7271 	rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7272 	if (rc) {
7273 		netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7274 			   rc);
7275 		return rc;
7276 	}
7277 	ctx->flags |= BNXT_CTX_FLAG_INITED;
7278 	return 0;
7279 }
7280 
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)7281 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7282 {
7283 	struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7284 	struct hwrm_func_resource_qcaps_input req = {0};
7285 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7286 	int rc;
7287 
7288 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
7289 	req.fid = cpu_to_le16(0xffff);
7290 
7291 	mutex_lock(&bp->hwrm_cmd_lock);
7292 	rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
7293 				       HWRM_CMD_TIMEOUT);
7294 	if (rc)
7295 		goto hwrm_func_resc_qcaps_exit;
7296 
7297 	hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7298 	if (!all)
7299 		goto hwrm_func_resc_qcaps_exit;
7300 
7301 	hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7302 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7303 	hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7304 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7305 	hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7306 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7307 	hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7308 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7309 	hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7310 	hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7311 	hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7312 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7313 	hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7314 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7315 	hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7316 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7317 
7318 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
7319 		u16 max_msix = le16_to_cpu(resp->max_msix);
7320 
7321 		hw_resc->max_nqs = max_msix;
7322 		hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7323 	}
7324 
7325 	if (BNXT_PF(bp)) {
7326 		struct bnxt_pf_info *pf = &bp->pf;
7327 
7328 		pf->vf_resv_strategy =
7329 			le16_to_cpu(resp->vf_reservation_strategy);
7330 		if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7331 			pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7332 	}
7333 hwrm_func_resc_qcaps_exit:
7334 	mutex_unlock(&bp->hwrm_cmd_lock);
7335 	return rc;
7336 }
7337 
__bnxt_hwrm_func_qcaps(struct bnxt * bp)7338 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7339 {
7340 	int rc = 0;
7341 	struct hwrm_func_qcaps_input req = {0};
7342 	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7343 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7344 	u32 flags, flags_ext;
7345 
7346 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
7347 	req.fid = cpu_to_le16(0xffff);
7348 
7349 	mutex_lock(&bp->hwrm_cmd_lock);
7350 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7351 	if (rc)
7352 		goto hwrm_func_qcaps_exit;
7353 
7354 	flags = le32_to_cpu(resp->flags);
7355 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7356 		bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7357 	if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7358 		bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7359 	if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7360 		bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7361 	if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7362 		bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7363 	if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7364 		bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7365 	if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7366 		bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7367 	if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7368 		bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7369 	if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7370 		bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7371 
7372 	flags_ext = le32_to_cpu(resp->flags_ext);
7373 	if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7374 		bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7375 
7376 	bp->tx_push_thresh = 0;
7377 	if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7378 	    BNXT_FW_MAJ(bp) > 217)
7379 		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7380 
7381 	hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7382 	hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7383 	hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7384 	hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7385 	hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7386 	if (!hw_resc->max_hw_ring_grps)
7387 		hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7388 	hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7389 	hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7390 	hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7391 
7392 	if (BNXT_PF(bp)) {
7393 		struct bnxt_pf_info *pf = &bp->pf;
7394 
7395 		pf->fw_fid = le16_to_cpu(resp->fid);
7396 		pf->port_id = le16_to_cpu(resp->port_id);
7397 		memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7398 		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7399 		pf->max_vfs = le16_to_cpu(resp->max_vfs);
7400 		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7401 		pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7402 		pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7403 		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7404 		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7405 		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7406 		bp->flags &= ~BNXT_FLAG_WOL_CAP;
7407 		if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7408 			bp->flags |= BNXT_FLAG_WOL_CAP;
7409 	} else {
7410 #ifdef CONFIG_BNXT_SRIOV
7411 		struct bnxt_vf_info *vf = &bp->vf;
7412 
7413 		vf->fw_fid = le16_to_cpu(resp->fid);
7414 		memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7415 #endif
7416 	}
7417 
7418 hwrm_func_qcaps_exit:
7419 	mutex_unlock(&bp->hwrm_cmd_lock);
7420 	return rc;
7421 }
7422 
7423 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7424 
bnxt_hwrm_func_qcaps(struct bnxt * bp)7425 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7426 {
7427 	int rc;
7428 
7429 	rc = __bnxt_hwrm_func_qcaps(bp);
7430 	if (rc)
7431 		return rc;
7432 	rc = bnxt_hwrm_queue_qportcfg(bp);
7433 	if (rc) {
7434 		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7435 		return rc;
7436 	}
7437 	if (bp->hwrm_spec_code >= 0x10803) {
7438 		rc = bnxt_alloc_ctx_mem(bp);
7439 		if (rc)
7440 			return rc;
7441 		rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7442 		if (!rc)
7443 			bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7444 	}
7445 	return 0;
7446 }
7447 
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)7448 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7449 {
7450 	struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7451 	struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7452 	int rc = 0;
7453 	u32 flags;
7454 
7455 	if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7456 		return 0;
7457 
7458 	resp = bp->hwrm_cmd_resp_addr;
7459 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7460 
7461 	mutex_lock(&bp->hwrm_cmd_lock);
7462 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7463 	if (rc)
7464 		goto hwrm_cfa_adv_qcaps_exit;
7465 
7466 	flags = le32_to_cpu(resp->flags);
7467 	if (flags &
7468 	    CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7469 		bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7470 
7471 hwrm_cfa_adv_qcaps_exit:
7472 	mutex_unlock(&bp->hwrm_cmd_lock);
7473 	return rc;
7474 }
7475 
__bnxt_alloc_fw_health(struct bnxt * bp)7476 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7477 {
7478 	if (bp->fw_health)
7479 		return 0;
7480 
7481 	bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7482 	if (!bp->fw_health)
7483 		return -ENOMEM;
7484 
7485 	return 0;
7486 }
7487 
bnxt_alloc_fw_health(struct bnxt * bp)7488 static int bnxt_alloc_fw_health(struct bnxt *bp)
7489 {
7490 	int rc;
7491 
7492 	if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7493 	    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7494 		return 0;
7495 
7496 	rc = __bnxt_alloc_fw_health(bp);
7497 	if (rc) {
7498 		bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7499 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7500 		return rc;
7501 	}
7502 
7503 	return 0;
7504 }
7505 
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)7506 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7507 {
7508 	writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7509 					 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7510 					 BNXT_FW_HEALTH_WIN_MAP_OFF);
7511 }
7512 
bnxt_try_map_fw_health_reg(struct bnxt * bp)7513 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7514 {
7515 	void __iomem *hs;
7516 	u32 status_loc;
7517 	u32 reg_type;
7518 	u32 sig;
7519 
7520 	__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7521 	hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7522 
7523 	sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7524 	if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7525 		if (bp->fw_health)
7526 			bp->fw_health->status_reliable = false;
7527 		return;
7528 	}
7529 
7530 	if (__bnxt_alloc_fw_health(bp)) {
7531 		netdev_warn(bp->dev, "no memory for firmware status checks\n");
7532 		return;
7533 	}
7534 
7535 	status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
7536 	bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7537 	reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7538 	if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7539 		__bnxt_map_fw_health_reg(bp, status_loc);
7540 		bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7541 			BNXT_FW_HEALTH_WIN_OFF(status_loc);
7542 	}
7543 
7544 	bp->fw_health->status_reliable = true;
7545 }
7546 
bnxt_map_fw_health_regs(struct bnxt * bp)7547 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7548 {
7549 	struct bnxt_fw_health *fw_health = bp->fw_health;
7550 	u32 reg_base = 0xffffffff;
7551 	int i;
7552 
7553 	/* Only pre-map the monitoring GRC registers using window 3 */
7554 	for (i = 0; i < 4; i++) {
7555 		u32 reg = fw_health->regs[i];
7556 
7557 		if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7558 			continue;
7559 		if (reg_base == 0xffffffff)
7560 			reg_base = reg & BNXT_GRC_BASE_MASK;
7561 		if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7562 			return -ERANGE;
7563 		fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7564 	}
7565 	if (reg_base == 0xffffffff)
7566 		return 0;
7567 
7568 	__bnxt_map_fw_health_reg(bp, reg_base);
7569 	return 0;
7570 }
7571 
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)7572 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7573 {
7574 	struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7575 	struct bnxt_fw_health *fw_health = bp->fw_health;
7576 	struct hwrm_error_recovery_qcfg_input req = {0};
7577 	int rc, i;
7578 
7579 	if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7580 		return 0;
7581 
7582 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7583 	mutex_lock(&bp->hwrm_cmd_lock);
7584 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7585 	if (rc)
7586 		goto err_recovery_out;
7587 	fw_health->flags = le32_to_cpu(resp->flags);
7588 	if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7589 	    !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7590 		rc = -EINVAL;
7591 		goto err_recovery_out;
7592 	}
7593 	fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7594 	fw_health->master_func_wait_dsecs =
7595 		le32_to_cpu(resp->master_func_wait_period);
7596 	fw_health->normal_func_wait_dsecs =
7597 		le32_to_cpu(resp->normal_func_wait_period);
7598 	fw_health->post_reset_wait_dsecs =
7599 		le32_to_cpu(resp->master_func_wait_period_after_reset);
7600 	fw_health->post_reset_max_wait_dsecs =
7601 		le32_to_cpu(resp->max_bailout_time_after_reset);
7602 	fw_health->regs[BNXT_FW_HEALTH_REG] =
7603 		le32_to_cpu(resp->fw_health_status_reg);
7604 	fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7605 		le32_to_cpu(resp->fw_heartbeat_reg);
7606 	fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7607 		le32_to_cpu(resp->fw_reset_cnt_reg);
7608 	fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7609 		le32_to_cpu(resp->reset_inprogress_reg);
7610 	fw_health->fw_reset_inprog_reg_mask =
7611 		le32_to_cpu(resp->reset_inprogress_reg_mask);
7612 	fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7613 	if (fw_health->fw_reset_seq_cnt >= 16) {
7614 		rc = -EINVAL;
7615 		goto err_recovery_out;
7616 	}
7617 	for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7618 		fw_health->fw_reset_seq_regs[i] =
7619 			le32_to_cpu(resp->reset_reg[i]);
7620 		fw_health->fw_reset_seq_vals[i] =
7621 			le32_to_cpu(resp->reset_reg_val[i]);
7622 		fw_health->fw_reset_seq_delay_msec[i] =
7623 			resp->delay_after_reset[i];
7624 	}
7625 err_recovery_out:
7626 	mutex_unlock(&bp->hwrm_cmd_lock);
7627 	if (!rc)
7628 		rc = bnxt_map_fw_health_regs(bp);
7629 	if (rc)
7630 		bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7631 	return rc;
7632 }
7633 
bnxt_hwrm_func_reset(struct bnxt * bp)7634 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7635 {
7636 	struct hwrm_func_reset_input req = {0};
7637 
7638 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7639 	req.enables = 0;
7640 
7641 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7642 }
7643 
bnxt_nvm_cfg_ver_get(struct bnxt * bp)7644 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
7645 {
7646 	struct hwrm_nvm_get_dev_info_output nvm_info;
7647 
7648 	if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
7649 		snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
7650 			 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
7651 			 nvm_info.nvm_cfg_ver_upd);
7652 }
7653 
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)7654 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7655 {
7656 	int rc = 0;
7657 	struct hwrm_queue_qportcfg_input req = {0};
7658 	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7659 	u8 i, j, *qptr;
7660 	bool no_rdma;
7661 
7662 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7663 
7664 	mutex_lock(&bp->hwrm_cmd_lock);
7665 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7666 	if (rc)
7667 		goto qportcfg_exit;
7668 
7669 	if (!resp->max_configurable_queues) {
7670 		rc = -EINVAL;
7671 		goto qportcfg_exit;
7672 	}
7673 	bp->max_tc = resp->max_configurable_queues;
7674 	bp->max_lltc = resp->max_configurable_lossless_queues;
7675 	if (bp->max_tc > BNXT_MAX_QUEUE)
7676 		bp->max_tc = BNXT_MAX_QUEUE;
7677 
7678 	no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7679 	qptr = &resp->queue_id0;
7680 	for (i = 0, j = 0; i < bp->max_tc; i++) {
7681 		bp->q_info[j].queue_id = *qptr;
7682 		bp->q_ids[i] = *qptr++;
7683 		bp->q_info[j].queue_profile = *qptr++;
7684 		bp->tc_to_qidx[j] = j;
7685 		if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7686 		    (no_rdma && BNXT_PF(bp)))
7687 			j++;
7688 	}
7689 	bp->max_q = bp->max_tc;
7690 	bp->max_tc = max_t(u8, j, 1);
7691 
7692 	if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7693 		bp->max_tc = 1;
7694 
7695 	if (bp->max_lltc > bp->max_tc)
7696 		bp->max_lltc = bp->max_tc;
7697 
7698 qportcfg_exit:
7699 	mutex_unlock(&bp->hwrm_cmd_lock);
7700 	return rc;
7701 }
7702 
__bnxt_hwrm_ver_get(struct bnxt * bp,bool silent)7703 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7704 {
7705 	struct hwrm_ver_get_input req = {0};
7706 	int rc;
7707 
7708 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7709 	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7710 	req.hwrm_intf_min = HWRM_VERSION_MINOR;
7711 	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7712 
7713 	rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7714 				   silent);
7715 	return rc;
7716 }
7717 
bnxt_hwrm_ver_get(struct bnxt * bp)7718 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7719 {
7720 	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7721 	u16 fw_maj, fw_min, fw_bld, fw_rsv;
7722 	u32 dev_caps_cfg, hwrm_ver;
7723 	int rc, len;
7724 
7725 	bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7726 	mutex_lock(&bp->hwrm_cmd_lock);
7727 	rc = __bnxt_hwrm_ver_get(bp, false);
7728 	if (rc)
7729 		goto hwrm_ver_get_exit;
7730 
7731 	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7732 
7733 	bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7734 			     resp->hwrm_intf_min_8b << 8 |
7735 			     resp->hwrm_intf_upd_8b;
7736 	if (resp->hwrm_intf_maj_8b < 1) {
7737 		netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7738 			    resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7739 			    resp->hwrm_intf_upd_8b);
7740 		netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7741 	}
7742 
7743 	hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
7744 			HWRM_VERSION_UPDATE;
7745 
7746 	if (bp->hwrm_spec_code > hwrm_ver)
7747 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7748 			 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
7749 			 HWRM_VERSION_UPDATE);
7750 	else
7751 		snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
7752 			 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7753 			 resp->hwrm_intf_upd_8b);
7754 
7755 	fw_maj = le16_to_cpu(resp->hwrm_fw_major);
7756 	if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
7757 		fw_min = le16_to_cpu(resp->hwrm_fw_minor);
7758 		fw_bld = le16_to_cpu(resp->hwrm_fw_build);
7759 		fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
7760 		len = FW_VER_STR_LEN;
7761 	} else {
7762 		fw_maj = resp->hwrm_fw_maj_8b;
7763 		fw_min = resp->hwrm_fw_min_8b;
7764 		fw_bld = resp->hwrm_fw_bld_8b;
7765 		fw_rsv = resp->hwrm_fw_rsvd_8b;
7766 		len = BC_HWRM_STR_LEN;
7767 	}
7768 	bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
7769 	snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
7770 		 fw_rsv);
7771 
7772 	if (strlen(resp->active_pkg_name)) {
7773 		int fw_ver_len = strlen(bp->fw_ver_str);
7774 
7775 		snprintf(bp->fw_ver_str + fw_ver_len,
7776 			 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7777 			 resp->active_pkg_name);
7778 		bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7779 	}
7780 
7781 	bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7782 	if (!bp->hwrm_cmd_timeout)
7783 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7784 
7785 	if (resp->hwrm_intf_maj_8b >= 1) {
7786 		bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7787 		bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7788 	}
7789 	if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7790 		bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7791 
7792 	bp->chip_num = le16_to_cpu(resp->chip_num);
7793 	bp->chip_rev = resp->chip_rev;
7794 	if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7795 	    !resp->chip_metal)
7796 		bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7797 
7798 	dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7799 	if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7800 	    (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7801 		bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7802 
7803 	if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7804 		bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7805 
7806 	if (dev_caps_cfg &
7807 	    VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7808 		bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7809 
7810 	if (dev_caps_cfg &
7811 	    VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7812 		bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7813 
7814 	if (dev_caps_cfg &
7815 	    VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7816 		bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7817 
7818 hwrm_ver_get_exit:
7819 	mutex_unlock(&bp->hwrm_cmd_lock);
7820 	return rc;
7821 }
7822 
bnxt_hwrm_fw_set_time(struct bnxt * bp)7823 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7824 {
7825 	struct hwrm_fw_set_time_input req = {0};
7826 	struct tm tm;
7827 	time64_t now = ktime_get_real_seconds();
7828 
7829 	if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7830 	    bp->hwrm_spec_code < 0x10400)
7831 		return -EOPNOTSUPP;
7832 
7833 	time64_to_tm(now, 0, &tm);
7834 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7835 	req.year = cpu_to_le16(1900 + tm.tm_year);
7836 	req.month = 1 + tm.tm_mon;
7837 	req.day = tm.tm_mday;
7838 	req.hour = tm.tm_hour;
7839 	req.minute = tm.tm_min;
7840 	req.second = tm.tm_sec;
7841 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7842 }
7843 
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)7844 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
7845 {
7846 	u64 sw_tmp;
7847 
7848 	hw &= mask;
7849 	sw_tmp = (*sw & ~mask) | hw;
7850 	if (hw < (*sw & mask))
7851 		sw_tmp += mask + 1;
7852 	WRITE_ONCE(*sw, sw_tmp);
7853 }
7854 
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)7855 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
7856 				    int count, bool ignore_zero)
7857 {
7858 	int i;
7859 
7860 	for (i = 0; i < count; i++) {
7861 		u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
7862 
7863 		if (ignore_zero && !hw)
7864 			continue;
7865 
7866 		if (masks[i] == -1ULL)
7867 			sw_stats[i] = hw;
7868 		else
7869 			bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
7870 	}
7871 }
7872 
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)7873 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
7874 {
7875 	if (!stats->hw_stats)
7876 		return;
7877 
7878 	__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7879 				stats->hw_masks, stats->len / 8, false);
7880 }
7881 
bnxt_accumulate_all_stats(struct bnxt * bp)7882 static void bnxt_accumulate_all_stats(struct bnxt *bp)
7883 {
7884 	struct bnxt_stats_mem *ring0_stats;
7885 	bool ignore_zero = false;
7886 	int i;
7887 
7888 	/* Chip bug.  Counter intermittently becomes 0. */
7889 	if (bp->flags & BNXT_FLAG_CHIP_P5)
7890 		ignore_zero = true;
7891 
7892 	for (i = 0; i < bp->cp_nr_rings; i++) {
7893 		struct bnxt_napi *bnapi = bp->bnapi[i];
7894 		struct bnxt_cp_ring_info *cpr;
7895 		struct bnxt_stats_mem *stats;
7896 
7897 		cpr = &bnapi->cp_ring;
7898 		stats = &cpr->stats;
7899 		if (!i)
7900 			ring0_stats = stats;
7901 		__bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
7902 					ring0_stats->hw_masks,
7903 					ring0_stats->len / 8, ignore_zero);
7904 	}
7905 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
7906 		struct bnxt_stats_mem *stats = &bp->port_stats;
7907 		__le64 *hw_stats = stats->hw_stats;
7908 		u64 *sw_stats = stats->sw_stats;
7909 		u64 *masks = stats->hw_masks;
7910 		int cnt;
7911 
7912 		cnt = sizeof(struct rx_port_stats) / 8;
7913 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7914 
7915 		hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7916 		sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7917 		masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
7918 		cnt = sizeof(struct tx_port_stats) / 8;
7919 		__bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
7920 	}
7921 	if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
7922 		bnxt_accumulate_stats(&bp->rx_port_stats_ext);
7923 		bnxt_accumulate_stats(&bp->tx_port_stats_ext);
7924 	}
7925 }
7926 
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)7927 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
7928 {
7929 	struct bnxt_pf_info *pf = &bp->pf;
7930 	struct hwrm_port_qstats_input req = {0};
7931 
7932 	if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7933 		return 0;
7934 
7935 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7936 		return -EOPNOTSUPP;
7937 
7938 	req.flags = flags;
7939 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7940 	req.port_id = cpu_to_le16(pf->port_id);
7941 	req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
7942 					    BNXT_TX_PORT_STATS_BYTE_OFFSET);
7943 	req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
7944 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7945 }
7946 
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)7947 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
7948 {
7949 	struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7950 	struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7951 	struct hwrm_port_qstats_ext_input req = {0};
7952 	struct bnxt_pf_info *pf = &bp->pf;
7953 	u32 tx_stat_size;
7954 	int rc;
7955 
7956 	if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7957 		return 0;
7958 
7959 	if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
7960 		return -EOPNOTSUPP;
7961 
7962 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7963 	req.flags = flags;
7964 	req.port_id = cpu_to_le16(pf->port_id);
7965 	req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7966 	req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
7967 	tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
7968 		       sizeof(struct tx_port_stats_ext) : 0;
7969 	req.tx_stat_size = cpu_to_le16(tx_stat_size);
7970 	req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
7971 	mutex_lock(&bp->hwrm_cmd_lock);
7972 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7973 	if (!rc) {
7974 		bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7975 		bp->fw_tx_stats_ext_size = tx_stat_size ?
7976 			le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7977 	} else {
7978 		bp->fw_rx_stats_ext_size = 0;
7979 		bp->fw_tx_stats_ext_size = 0;
7980 	}
7981 	if (flags)
7982 		goto qstats_done;
7983 
7984 	if (bp->fw_tx_stats_ext_size <=
7985 	    offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7986 		mutex_unlock(&bp->hwrm_cmd_lock);
7987 		bp->pri2cos_valid = 0;
7988 		return rc;
7989 	}
7990 
7991 	bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7992 	req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7993 
7994 	rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7995 	if (!rc) {
7996 		struct hwrm_queue_pri2cos_qcfg_output *resp2;
7997 		u8 *pri2cos;
7998 		int i, j;
7999 
8000 		resp2 = bp->hwrm_cmd_resp_addr;
8001 		pri2cos = &resp2->pri0_cos_queue_id;
8002 		for (i = 0; i < 8; i++) {
8003 			u8 queue_id = pri2cos[i];
8004 			u8 queue_idx;
8005 
8006 			/* Per port queue IDs start from 0, 10, 20, etc */
8007 			queue_idx = queue_id % 10;
8008 			if (queue_idx > BNXT_MAX_QUEUE) {
8009 				bp->pri2cos_valid = false;
8010 				goto qstats_done;
8011 			}
8012 			for (j = 0; j < bp->max_q; j++) {
8013 				if (bp->q_ids[j] == queue_id)
8014 					bp->pri2cos_idx[i] = queue_idx;
8015 			}
8016 		}
8017 		bp->pri2cos_valid = 1;
8018 	}
8019 qstats_done:
8020 	mutex_unlock(&bp->hwrm_cmd_lock);
8021 	return rc;
8022 }
8023 
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)8024 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8025 {
8026 	if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
8027 		bnxt_hwrm_tunnel_dst_port_free(
8028 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8029 	if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
8030 		bnxt_hwrm_tunnel_dst_port_free(
8031 			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8032 }
8033 
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)8034 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8035 {
8036 	int rc, i;
8037 	u32 tpa_flags = 0;
8038 
8039 	if (set_tpa)
8040 		tpa_flags = bp->flags & BNXT_FLAG_TPA;
8041 	else if (BNXT_NO_FW_ACCESS(bp))
8042 		return 0;
8043 	for (i = 0; i < bp->nr_vnics; i++) {
8044 		rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8045 		if (rc) {
8046 			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8047 				   i, rc);
8048 			return rc;
8049 		}
8050 	}
8051 	return 0;
8052 }
8053 
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)8054 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8055 {
8056 	int i;
8057 
8058 	for (i = 0; i < bp->nr_vnics; i++)
8059 		bnxt_hwrm_vnic_set_rss(bp, i, false);
8060 }
8061 
bnxt_clear_vnic(struct bnxt * bp)8062 static void bnxt_clear_vnic(struct bnxt *bp)
8063 {
8064 	if (!bp->vnic_info)
8065 		return;
8066 
8067 	bnxt_hwrm_clear_vnic_filter(bp);
8068 	if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8069 		/* clear all RSS setting before free vnic ctx */
8070 		bnxt_hwrm_clear_vnic_rss(bp);
8071 		bnxt_hwrm_vnic_ctx_free(bp);
8072 	}
8073 	/* before free the vnic, undo the vnic tpa settings */
8074 	if (bp->flags & BNXT_FLAG_TPA)
8075 		bnxt_set_tpa(bp, false);
8076 	bnxt_hwrm_vnic_free(bp);
8077 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8078 		bnxt_hwrm_vnic_ctx_free(bp);
8079 }
8080 
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)8081 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8082 				    bool irq_re_init)
8083 {
8084 	bnxt_clear_vnic(bp);
8085 	bnxt_hwrm_ring_free(bp, close_path);
8086 	bnxt_hwrm_ring_grp_free(bp);
8087 	if (irq_re_init) {
8088 		bnxt_hwrm_stat_ctx_free(bp);
8089 		bnxt_hwrm_free_tunnel_ports(bp);
8090 	}
8091 }
8092 
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)8093 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8094 {
8095 	struct hwrm_func_cfg_input req = {0};
8096 
8097 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8098 	req.fid = cpu_to_le16(0xffff);
8099 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8100 	if (br_mode == BRIDGE_MODE_VEB)
8101 		req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8102 	else if (br_mode == BRIDGE_MODE_VEPA)
8103 		req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8104 	else
8105 		return -EINVAL;
8106 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8107 }
8108 
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)8109 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8110 {
8111 	struct hwrm_func_cfg_input req = {0};
8112 
8113 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8114 		return 0;
8115 
8116 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
8117 	req.fid = cpu_to_le16(0xffff);
8118 	req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8119 	req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8120 	if (size == 128)
8121 		req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8122 
8123 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8124 }
8125 
__bnxt_setup_vnic(struct bnxt * bp,u16 vnic_id)8126 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8127 {
8128 	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8129 	int rc;
8130 
8131 	if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8132 		goto skip_rss_ctx;
8133 
8134 	/* allocate context for vnic */
8135 	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8136 	if (rc) {
8137 		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8138 			   vnic_id, rc);
8139 		goto vnic_setup_err;
8140 	}
8141 	bp->rsscos_nr_ctxs++;
8142 
8143 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8144 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8145 		if (rc) {
8146 			netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8147 				   vnic_id, rc);
8148 			goto vnic_setup_err;
8149 		}
8150 		bp->rsscos_nr_ctxs++;
8151 	}
8152 
8153 skip_rss_ctx:
8154 	/* configure default vnic, ring grp */
8155 	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8156 	if (rc) {
8157 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8158 			   vnic_id, rc);
8159 		goto vnic_setup_err;
8160 	}
8161 
8162 	/* Enable RSS hashing on vnic */
8163 	rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8164 	if (rc) {
8165 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8166 			   vnic_id, rc);
8167 		goto vnic_setup_err;
8168 	}
8169 
8170 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8171 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8172 		if (rc) {
8173 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8174 				   vnic_id, rc);
8175 		}
8176 	}
8177 
8178 vnic_setup_err:
8179 	return rc;
8180 }
8181 
__bnxt_setup_vnic_p5(struct bnxt * bp,u16 vnic_id)8182 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8183 {
8184 	int rc, i, nr_ctxs;
8185 
8186 	nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8187 	for (i = 0; i < nr_ctxs; i++) {
8188 		rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8189 		if (rc) {
8190 			netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8191 				   vnic_id, i, rc);
8192 			break;
8193 		}
8194 		bp->rsscos_nr_ctxs++;
8195 	}
8196 	if (i < nr_ctxs)
8197 		return -ENOMEM;
8198 
8199 	rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8200 	if (rc) {
8201 		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8202 			   vnic_id, rc);
8203 		return rc;
8204 	}
8205 	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8206 	if (rc) {
8207 		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8208 			   vnic_id, rc);
8209 		return rc;
8210 	}
8211 	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8212 		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8213 		if (rc) {
8214 			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8215 				   vnic_id, rc);
8216 		}
8217 	}
8218 	return rc;
8219 }
8220 
bnxt_setup_vnic(struct bnxt * bp,u16 vnic_id)8221 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8222 {
8223 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8224 		return __bnxt_setup_vnic_p5(bp, vnic_id);
8225 	else
8226 		return __bnxt_setup_vnic(bp, vnic_id);
8227 }
8228 
bnxt_alloc_rfs_vnics(struct bnxt * bp)8229 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8230 {
8231 #ifdef CONFIG_RFS_ACCEL
8232 	int i, rc = 0;
8233 
8234 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8235 		return 0;
8236 
8237 	for (i = 0; i < bp->rx_nr_rings; i++) {
8238 		struct bnxt_vnic_info *vnic;
8239 		u16 vnic_id = i + 1;
8240 		u16 ring_id = i;
8241 
8242 		if (vnic_id >= bp->nr_vnics)
8243 			break;
8244 
8245 		vnic = &bp->vnic_info[vnic_id];
8246 		vnic->flags |= BNXT_VNIC_RFS_FLAG;
8247 		if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8248 			vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8249 		rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8250 		if (rc) {
8251 			netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8252 				   vnic_id, rc);
8253 			break;
8254 		}
8255 		rc = bnxt_setup_vnic(bp, vnic_id);
8256 		if (rc)
8257 			break;
8258 	}
8259 	return rc;
8260 #else
8261 	return 0;
8262 #endif
8263 }
8264 
8265 /* Allow PF and VF with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)8266 static bool bnxt_promisc_ok(struct bnxt *bp)
8267 {
8268 #ifdef CONFIG_BNXT_SRIOV
8269 	if (BNXT_VF(bp) && !bp->vf.vlan)
8270 		return false;
8271 #endif
8272 	return true;
8273 }
8274 
bnxt_setup_nitroa0_vnic(struct bnxt * bp)8275 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8276 {
8277 	unsigned int rc = 0;
8278 
8279 	rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8280 	if (rc) {
8281 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8282 			   rc);
8283 		return rc;
8284 	}
8285 
8286 	rc = bnxt_hwrm_vnic_cfg(bp, 1);
8287 	if (rc) {
8288 		netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8289 			   rc);
8290 		return rc;
8291 	}
8292 	return rc;
8293 }
8294 
8295 static int bnxt_cfg_rx_mode(struct bnxt *);
8296 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8297 
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)8298 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8299 {
8300 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8301 	int rc = 0;
8302 	unsigned int rx_nr_rings = bp->rx_nr_rings;
8303 
8304 	if (irq_re_init) {
8305 		rc = bnxt_hwrm_stat_ctx_alloc(bp);
8306 		if (rc) {
8307 			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8308 				   rc);
8309 			goto err_out;
8310 		}
8311 	}
8312 
8313 	rc = bnxt_hwrm_ring_alloc(bp);
8314 	if (rc) {
8315 		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8316 		goto err_out;
8317 	}
8318 
8319 	rc = bnxt_hwrm_ring_grp_alloc(bp);
8320 	if (rc) {
8321 		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8322 		goto err_out;
8323 	}
8324 
8325 	if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8326 		rx_nr_rings--;
8327 
8328 	/* default vnic 0 */
8329 	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8330 	if (rc) {
8331 		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8332 		goto err_out;
8333 	}
8334 
8335 	rc = bnxt_setup_vnic(bp, 0);
8336 	if (rc)
8337 		goto err_out;
8338 
8339 	if (bp->flags & BNXT_FLAG_RFS) {
8340 		rc = bnxt_alloc_rfs_vnics(bp);
8341 		if (rc)
8342 			goto err_out;
8343 	}
8344 
8345 	if (bp->flags & BNXT_FLAG_TPA) {
8346 		rc = bnxt_set_tpa(bp, true);
8347 		if (rc)
8348 			goto err_out;
8349 	}
8350 
8351 	if (BNXT_VF(bp))
8352 		bnxt_update_vf_mac(bp);
8353 
8354 	/* Filter for default vnic 0 */
8355 	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8356 	if (rc) {
8357 		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8358 		goto err_out;
8359 	}
8360 	vnic->uc_filter_count = 1;
8361 
8362 	vnic->rx_mask = 0;
8363 	if (bp->dev->flags & IFF_BROADCAST)
8364 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8365 
8366 	if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
8367 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8368 
8369 	if (bp->dev->flags & IFF_ALLMULTI) {
8370 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8371 		vnic->mc_list_count = 0;
8372 	} else {
8373 		u32 mask = 0;
8374 
8375 		bnxt_mc_list_updated(bp, &mask);
8376 		vnic->rx_mask |= mask;
8377 	}
8378 
8379 	rc = bnxt_cfg_rx_mode(bp);
8380 	if (rc)
8381 		goto err_out;
8382 
8383 	rc = bnxt_hwrm_set_coal(bp);
8384 	if (rc)
8385 		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8386 				rc);
8387 
8388 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8389 		rc = bnxt_setup_nitroa0_vnic(bp);
8390 		if (rc)
8391 			netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8392 				   rc);
8393 	}
8394 
8395 	if (BNXT_VF(bp)) {
8396 		bnxt_hwrm_func_qcfg(bp);
8397 		netdev_update_features(bp->dev);
8398 	}
8399 
8400 	return 0;
8401 
8402 err_out:
8403 	bnxt_hwrm_resource_free(bp, 0, true);
8404 
8405 	return rc;
8406 }
8407 
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)8408 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8409 {
8410 	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8411 	return 0;
8412 }
8413 
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)8414 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8415 {
8416 	bnxt_init_cp_rings(bp);
8417 	bnxt_init_rx_rings(bp);
8418 	bnxt_init_tx_rings(bp);
8419 	bnxt_init_ring_grps(bp, irq_re_init);
8420 	bnxt_init_vnics(bp);
8421 
8422 	return bnxt_init_chip(bp, irq_re_init);
8423 }
8424 
bnxt_set_real_num_queues(struct bnxt * bp)8425 static int bnxt_set_real_num_queues(struct bnxt *bp)
8426 {
8427 	int rc;
8428 	struct net_device *dev = bp->dev;
8429 
8430 	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8431 					  bp->tx_nr_rings_xdp);
8432 	if (rc)
8433 		return rc;
8434 
8435 	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8436 	if (rc)
8437 		return rc;
8438 
8439 #ifdef CONFIG_RFS_ACCEL
8440 	if (bp->flags & BNXT_FLAG_RFS)
8441 		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8442 #endif
8443 
8444 	return rc;
8445 }
8446 
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)8447 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8448 			   bool shared)
8449 {
8450 	int _rx = *rx, _tx = *tx;
8451 
8452 	if (shared) {
8453 		*rx = min_t(int, _rx, max);
8454 		*tx = min_t(int, _tx, max);
8455 	} else {
8456 		if (max < 2)
8457 			return -ENOMEM;
8458 
8459 		while (_rx + _tx > max) {
8460 			if (_rx > _tx && _rx > 1)
8461 				_rx--;
8462 			else if (_tx > 1)
8463 				_tx--;
8464 		}
8465 		*rx = _rx;
8466 		*tx = _tx;
8467 	}
8468 	return 0;
8469 }
8470 
bnxt_setup_msix(struct bnxt * bp)8471 static void bnxt_setup_msix(struct bnxt *bp)
8472 {
8473 	const int len = sizeof(bp->irq_tbl[0].name);
8474 	struct net_device *dev = bp->dev;
8475 	int tcs, i;
8476 
8477 	tcs = netdev_get_num_tc(dev);
8478 	if (tcs) {
8479 		int i, off, count;
8480 
8481 		for (i = 0; i < tcs; i++) {
8482 			count = bp->tx_nr_rings_per_tc;
8483 			off = i * count;
8484 			netdev_set_tc_queue(dev, i, count, off);
8485 		}
8486 	}
8487 
8488 	for (i = 0; i < bp->cp_nr_rings; i++) {
8489 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8490 		char *attr;
8491 
8492 		if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8493 			attr = "TxRx";
8494 		else if (i < bp->rx_nr_rings)
8495 			attr = "rx";
8496 		else
8497 			attr = "tx";
8498 
8499 		snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8500 			 attr, i);
8501 		bp->irq_tbl[map_idx].handler = bnxt_msix;
8502 	}
8503 }
8504 
bnxt_setup_inta(struct bnxt * bp)8505 static void bnxt_setup_inta(struct bnxt *bp)
8506 {
8507 	const int len = sizeof(bp->irq_tbl[0].name);
8508 
8509 	if (netdev_get_num_tc(bp->dev))
8510 		netdev_reset_tc(bp->dev);
8511 
8512 	snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8513 		 0);
8514 	bp->irq_tbl[0].handler = bnxt_inta;
8515 }
8516 
8517 static int bnxt_init_int_mode(struct bnxt *bp);
8518 
bnxt_setup_int_mode(struct bnxt * bp)8519 static int bnxt_setup_int_mode(struct bnxt *bp)
8520 {
8521 	int rc;
8522 
8523 	if (!bp->irq_tbl) {
8524 		rc = bnxt_init_int_mode(bp);
8525 		if (rc || !bp->irq_tbl)
8526 			return rc ?: -ENODEV;
8527 	}
8528 
8529 	if (bp->flags & BNXT_FLAG_USING_MSIX)
8530 		bnxt_setup_msix(bp);
8531 	else
8532 		bnxt_setup_inta(bp);
8533 
8534 	rc = bnxt_set_real_num_queues(bp);
8535 	return rc;
8536 }
8537 
8538 #ifdef CONFIG_RFS_ACCEL
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)8539 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
8540 {
8541 	return bp->hw_resc.max_rsscos_ctxs;
8542 }
8543 
bnxt_get_max_func_vnics(struct bnxt * bp)8544 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
8545 {
8546 	return bp->hw_resc.max_vnics;
8547 }
8548 #endif
8549 
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)8550 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
8551 {
8552 	return bp->hw_resc.max_stat_ctxs;
8553 }
8554 
bnxt_get_max_func_cp_rings(struct bnxt * bp)8555 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
8556 {
8557 	return bp->hw_resc.max_cp_rings;
8558 }
8559 
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)8560 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
8561 {
8562 	unsigned int cp = bp->hw_resc.max_cp_rings;
8563 
8564 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8565 		cp -= bnxt_get_ulp_msix_num(bp);
8566 
8567 	return cp;
8568 }
8569 
bnxt_get_max_func_irqs(struct bnxt * bp)8570 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
8571 {
8572 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8573 
8574 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8575 		return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
8576 
8577 	return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
8578 }
8579 
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)8580 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
8581 {
8582 	bp->hw_resc.max_irqs = max_irqs;
8583 }
8584 
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)8585 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
8586 {
8587 	unsigned int cp;
8588 
8589 	cp = bnxt_get_max_func_cp_rings_for_en(bp);
8590 	if (bp->flags & BNXT_FLAG_CHIP_P5)
8591 		return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8592 	else
8593 		return cp - bp->cp_nr_rings;
8594 }
8595 
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)8596 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8597 {
8598 	return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8599 }
8600 
bnxt_get_avail_msix(struct bnxt * bp,int num)8601 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8602 {
8603 	int max_cp = bnxt_get_max_func_cp_rings(bp);
8604 	int max_irq = bnxt_get_max_func_irqs(bp);
8605 	int total_req = bp->cp_nr_rings + num;
8606 	int max_idx, avail_msix;
8607 
8608 	max_idx = bp->total_irqs;
8609 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8610 		max_idx = min_t(int, bp->total_irqs, max_cp);
8611 	avail_msix = max_idx - bp->cp_nr_rings;
8612 	if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8613 		return avail_msix;
8614 
8615 	if (max_irq < total_req) {
8616 		num = max_irq - bp->cp_nr_rings;
8617 		if (num <= 0)
8618 			return 0;
8619 	}
8620 	return num;
8621 }
8622 
bnxt_get_num_msix(struct bnxt * bp)8623 static int bnxt_get_num_msix(struct bnxt *bp)
8624 {
8625 	if (!BNXT_NEW_RM(bp))
8626 		return bnxt_get_max_func_irqs(bp);
8627 
8628 	return bnxt_nq_rings_in_use(bp);
8629 }
8630 
bnxt_init_msix(struct bnxt * bp)8631 static int bnxt_init_msix(struct bnxt *bp)
8632 {
8633 	int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8634 	struct msix_entry *msix_ent;
8635 
8636 	total_vecs = bnxt_get_num_msix(bp);
8637 	max = bnxt_get_max_func_irqs(bp);
8638 	if (total_vecs > max)
8639 		total_vecs = max;
8640 
8641 	if (!total_vecs)
8642 		return 0;
8643 
8644 	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8645 	if (!msix_ent)
8646 		return -ENOMEM;
8647 
8648 	for (i = 0; i < total_vecs; i++) {
8649 		msix_ent[i].entry = i;
8650 		msix_ent[i].vector = 0;
8651 	}
8652 
8653 	if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8654 		min = 2;
8655 
8656 	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8657 	ulp_msix = bnxt_get_ulp_msix_num(bp);
8658 	if (total_vecs < 0 || total_vecs < ulp_msix) {
8659 		rc = -ENODEV;
8660 		goto msix_setup_exit;
8661 	}
8662 
8663 	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8664 	if (bp->irq_tbl) {
8665 		for (i = 0; i < total_vecs; i++)
8666 			bp->irq_tbl[i].vector = msix_ent[i].vector;
8667 
8668 		bp->total_irqs = total_vecs;
8669 		/* Trim rings based upon num of vectors allocated */
8670 		rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8671 				     total_vecs - ulp_msix, min == 1);
8672 		if (rc)
8673 			goto msix_setup_exit;
8674 
8675 		bp->cp_nr_rings = (min == 1) ?
8676 				  max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8677 				  bp->tx_nr_rings + bp->rx_nr_rings;
8678 
8679 	} else {
8680 		rc = -ENOMEM;
8681 		goto msix_setup_exit;
8682 	}
8683 	bp->flags |= BNXT_FLAG_USING_MSIX;
8684 	kfree(msix_ent);
8685 	return 0;
8686 
8687 msix_setup_exit:
8688 	netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8689 	kfree(bp->irq_tbl);
8690 	bp->irq_tbl = NULL;
8691 	pci_disable_msix(bp->pdev);
8692 	kfree(msix_ent);
8693 	return rc;
8694 }
8695 
bnxt_init_inta(struct bnxt * bp)8696 static int bnxt_init_inta(struct bnxt *bp)
8697 {
8698 	bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8699 	if (!bp->irq_tbl)
8700 		return -ENOMEM;
8701 
8702 	bp->total_irqs = 1;
8703 	bp->rx_nr_rings = 1;
8704 	bp->tx_nr_rings = 1;
8705 	bp->cp_nr_rings = 1;
8706 	bp->flags |= BNXT_FLAG_SHARED_RINGS;
8707 	bp->irq_tbl[0].vector = bp->pdev->irq;
8708 	return 0;
8709 }
8710 
bnxt_init_int_mode(struct bnxt * bp)8711 static int bnxt_init_int_mode(struct bnxt *bp)
8712 {
8713 	int rc = -ENODEV;
8714 
8715 	if (bp->flags & BNXT_FLAG_MSIX_CAP)
8716 		rc = bnxt_init_msix(bp);
8717 
8718 	if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8719 		/* fallback to INTA */
8720 		rc = bnxt_init_inta(bp);
8721 	}
8722 	return rc;
8723 }
8724 
bnxt_clear_int_mode(struct bnxt * bp)8725 static void bnxt_clear_int_mode(struct bnxt *bp)
8726 {
8727 	if (bp->flags & BNXT_FLAG_USING_MSIX)
8728 		pci_disable_msix(bp->pdev);
8729 
8730 	kfree(bp->irq_tbl);
8731 	bp->irq_tbl = NULL;
8732 	bp->flags &= ~BNXT_FLAG_USING_MSIX;
8733 }
8734 
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)8735 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8736 {
8737 	int tcs = netdev_get_num_tc(bp->dev);
8738 	bool irq_cleared = false;
8739 	int rc;
8740 
8741 	if (!bnxt_need_reserve_rings(bp))
8742 		return 0;
8743 
8744 	if (irq_re_init && BNXT_NEW_RM(bp) &&
8745 	    bnxt_get_num_msix(bp) != bp->total_irqs) {
8746 		bnxt_ulp_irq_stop(bp);
8747 		bnxt_clear_int_mode(bp);
8748 		irq_cleared = true;
8749 	}
8750 	rc = __bnxt_reserve_rings(bp);
8751 	if (irq_cleared) {
8752 		if (!rc)
8753 			rc = bnxt_init_int_mode(bp);
8754 		bnxt_ulp_irq_restart(bp, rc);
8755 	}
8756 	if (rc) {
8757 		netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8758 		return rc;
8759 	}
8760 	if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8761 		netdev_err(bp->dev, "tx ring reservation failure\n");
8762 		netdev_reset_tc(bp->dev);
8763 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8764 		return -ENOMEM;
8765 	}
8766 	return 0;
8767 }
8768 
bnxt_free_irq(struct bnxt * bp)8769 static void bnxt_free_irq(struct bnxt *bp)
8770 {
8771 	struct bnxt_irq *irq;
8772 	int i;
8773 
8774 #ifdef CONFIG_RFS_ACCEL
8775 	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8776 	bp->dev->rx_cpu_rmap = NULL;
8777 #endif
8778 	if (!bp->irq_tbl || !bp->bnapi)
8779 		return;
8780 
8781 	for (i = 0; i < bp->cp_nr_rings; i++) {
8782 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8783 
8784 		irq = &bp->irq_tbl[map_idx];
8785 		if (irq->requested) {
8786 			if (irq->have_cpumask) {
8787 				irq_set_affinity_hint(irq->vector, NULL);
8788 				free_cpumask_var(irq->cpu_mask);
8789 				irq->have_cpumask = 0;
8790 			}
8791 			free_irq(irq->vector, bp->bnapi[i]);
8792 		}
8793 
8794 		irq->requested = 0;
8795 	}
8796 }
8797 
bnxt_request_irq(struct bnxt * bp)8798 static int bnxt_request_irq(struct bnxt *bp)
8799 {
8800 	int i, j, rc = 0;
8801 	unsigned long flags = 0;
8802 #ifdef CONFIG_RFS_ACCEL
8803 	struct cpu_rmap *rmap;
8804 #endif
8805 
8806 	rc = bnxt_setup_int_mode(bp);
8807 	if (rc) {
8808 		netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8809 			   rc);
8810 		return rc;
8811 	}
8812 #ifdef CONFIG_RFS_ACCEL
8813 	rmap = bp->dev->rx_cpu_rmap;
8814 #endif
8815 	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8816 		flags = IRQF_SHARED;
8817 
8818 	for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8819 		int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8820 		struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8821 
8822 #ifdef CONFIG_RFS_ACCEL
8823 		if (rmap && bp->bnapi[i]->rx_ring) {
8824 			rc = irq_cpu_rmap_add(rmap, irq->vector);
8825 			if (rc)
8826 				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8827 					    j);
8828 			j++;
8829 		}
8830 #endif
8831 		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8832 				 bp->bnapi[i]);
8833 		if (rc)
8834 			break;
8835 
8836 		irq->requested = 1;
8837 
8838 		if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8839 			int numa_node = dev_to_node(&bp->pdev->dev);
8840 
8841 			irq->have_cpumask = 1;
8842 			cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8843 					irq->cpu_mask);
8844 			rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8845 			if (rc) {
8846 				netdev_warn(bp->dev,
8847 					    "Set affinity failed, IRQ = %d\n",
8848 					    irq->vector);
8849 				break;
8850 			}
8851 		}
8852 	}
8853 	return rc;
8854 }
8855 
bnxt_del_napi(struct bnxt * bp)8856 static void bnxt_del_napi(struct bnxt *bp)
8857 {
8858 	int i;
8859 
8860 	if (!bp->bnapi)
8861 		return;
8862 
8863 	for (i = 0; i < bp->cp_nr_rings; i++) {
8864 		struct bnxt_napi *bnapi = bp->bnapi[i];
8865 
8866 		__netif_napi_del(&bnapi->napi);
8867 	}
8868 	/* We called __netif_napi_del(), we need
8869 	 * to respect an RCU grace period before freeing napi structures.
8870 	 */
8871 	synchronize_net();
8872 }
8873 
bnxt_init_napi(struct bnxt * bp)8874 static void bnxt_init_napi(struct bnxt *bp)
8875 {
8876 	int i;
8877 	unsigned int cp_nr_rings = bp->cp_nr_rings;
8878 	struct bnxt_napi *bnapi;
8879 
8880 	if (bp->flags & BNXT_FLAG_USING_MSIX) {
8881 		int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8882 
8883 		if (bp->flags & BNXT_FLAG_CHIP_P5)
8884 			poll_fn = bnxt_poll_p5;
8885 		else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8886 			cp_nr_rings--;
8887 		for (i = 0; i < cp_nr_rings; i++) {
8888 			bnapi = bp->bnapi[i];
8889 			netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8890 		}
8891 		if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8892 			bnapi = bp->bnapi[cp_nr_rings];
8893 			netif_napi_add(bp->dev, &bnapi->napi,
8894 				       bnxt_poll_nitroa0, 64);
8895 		}
8896 	} else {
8897 		bnapi = bp->bnapi[0];
8898 		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8899 	}
8900 }
8901 
bnxt_disable_napi(struct bnxt * bp)8902 static void bnxt_disable_napi(struct bnxt *bp)
8903 {
8904 	int i;
8905 
8906 	if (!bp->bnapi)
8907 		return;
8908 
8909 	for (i = 0; i < bp->cp_nr_rings; i++) {
8910 		struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8911 
8912 		napi_disable(&bp->bnapi[i]->napi);
8913 		if (bp->bnapi[i]->rx_ring)
8914 			cancel_work_sync(&cpr->dim.work);
8915 	}
8916 }
8917 
bnxt_enable_napi(struct bnxt * bp)8918 static void bnxt_enable_napi(struct bnxt *bp)
8919 {
8920 	int i;
8921 
8922 	for (i = 0; i < bp->cp_nr_rings; i++) {
8923 		struct bnxt_napi *bnapi = bp->bnapi[i];
8924 		struct bnxt_cp_ring_info *cpr;
8925 
8926 		cpr = &bnapi->cp_ring;
8927 		if (bnapi->in_reset)
8928 			cpr->sw_stats.rx.rx_resets++;
8929 		bnapi->in_reset = false;
8930 
8931 		if (bnapi->rx_ring) {
8932 			INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8933 			cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8934 		}
8935 		napi_enable(&bnapi->napi);
8936 	}
8937 }
8938 
bnxt_tx_disable(struct bnxt * bp)8939 void bnxt_tx_disable(struct bnxt *bp)
8940 {
8941 	int i;
8942 	struct bnxt_tx_ring_info *txr;
8943 
8944 	if (bp->tx_ring) {
8945 		for (i = 0; i < bp->tx_nr_rings; i++) {
8946 			txr = &bp->tx_ring[i];
8947 			WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
8948 		}
8949 	}
8950 	/* Make sure napi polls see @dev_state change */
8951 	synchronize_net();
8952 	/* Drop carrier first to prevent TX timeout */
8953 	netif_carrier_off(bp->dev);
8954 	/* Stop all TX queues */
8955 	netif_tx_disable(bp->dev);
8956 }
8957 
bnxt_tx_enable(struct bnxt * bp)8958 void bnxt_tx_enable(struct bnxt *bp)
8959 {
8960 	int i;
8961 	struct bnxt_tx_ring_info *txr;
8962 
8963 	for (i = 0; i < bp->tx_nr_rings; i++) {
8964 		txr = &bp->tx_ring[i];
8965 		WRITE_ONCE(txr->dev_state, 0);
8966 	}
8967 	/* Make sure napi polls see @dev_state change */
8968 	synchronize_net();
8969 	netif_tx_wake_all_queues(bp->dev);
8970 	if (bp->link_info.link_up)
8971 		netif_carrier_on(bp->dev);
8972 }
8973 
bnxt_report_fec(struct bnxt_link_info * link_info)8974 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
8975 {
8976 	u8 active_fec = link_info->active_fec_sig_mode &
8977 			PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
8978 
8979 	switch (active_fec) {
8980 	default:
8981 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
8982 		return "None";
8983 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
8984 		return "Clause 74 BaseR";
8985 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
8986 		return "Clause 91 RS(528,514)";
8987 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
8988 		return "Clause 91 RS544_1XN";
8989 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
8990 		return "Clause 91 RS(544,514)";
8991 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
8992 		return "Clause 91 RS272_1XN";
8993 	case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
8994 		return "Clause 91 RS(272,257)";
8995 	}
8996 }
8997 
bnxt_report_link(struct bnxt * bp)8998 static void bnxt_report_link(struct bnxt *bp)
8999 {
9000 	if (bp->link_info.link_up) {
9001 		const char *duplex;
9002 		const char *flow_ctrl;
9003 		u32 speed;
9004 		u16 fec;
9005 
9006 		netif_carrier_on(bp->dev);
9007 		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9008 		if (speed == SPEED_UNKNOWN) {
9009 			netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9010 			return;
9011 		}
9012 		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9013 			duplex = "full";
9014 		else
9015 			duplex = "half";
9016 		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9017 			flow_ctrl = "ON - receive & transmit";
9018 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9019 			flow_ctrl = "ON - transmit";
9020 		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9021 			flow_ctrl = "ON - receive";
9022 		else
9023 			flow_ctrl = "none";
9024 		netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
9025 			    speed, duplex, flow_ctrl);
9026 		if (bp->flags & BNXT_FLAG_EEE_CAP)
9027 			netdev_info(bp->dev, "EEE is %s\n",
9028 				    bp->eee.eee_active ? "active" :
9029 							 "not active");
9030 		fec = bp->link_info.fec_cfg;
9031 		if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9032 			netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9033 				    (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9034 				    bnxt_report_fec(&bp->link_info));
9035 	} else {
9036 		netif_carrier_off(bp->dev);
9037 		netdev_err(bp->dev, "NIC Link is Down\n");
9038 	}
9039 }
9040 
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)9041 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9042 {
9043 	if (!resp->supported_speeds_auto_mode &&
9044 	    !resp->supported_speeds_force_mode &&
9045 	    !resp->supported_pam4_speeds_auto_mode &&
9046 	    !resp->supported_pam4_speeds_force_mode)
9047 		return true;
9048 	return false;
9049 }
9050 
bnxt_hwrm_phy_qcaps(struct bnxt * bp)9051 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9052 {
9053 	int rc = 0;
9054 	struct hwrm_port_phy_qcaps_input req = {0};
9055 	struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9056 	struct bnxt_link_info *link_info = &bp->link_info;
9057 
9058 	bp->flags &= ~BNXT_FLAG_EEE_CAP;
9059 	if (bp->test_info)
9060 		bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
9061 					  BNXT_TEST_FL_AN_PHY_LPBK);
9062 	if (bp->hwrm_spec_code < 0x10201)
9063 		return 0;
9064 
9065 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
9066 
9067 	mutex_lock(&bp->hwrm_cmd_lock);
9068 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9069 	if (rc)
9070 		goto hwrm_phy_qcaps_exit;
9071 
9072 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9073 		struct ethtool_eee *eee = &bp->eee;
9074 		u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9075 
9076 		bp->flags |= BNXT_FLAG_EEE_CAP;
9077 		eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9078 		bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9079 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9080 		bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9081 				 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9082 	}
9083 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
9084 		if (bp->test_info)
9085 			bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
9086 	}
9087 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
9088 		if (bp->test_info)
9089 			bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
9090 	}
9091 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
9092 		if (BNXT_PF(bp))
9093 			bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
9094 	}
9095 	if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
9096 		bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
9097 
9098 	if (bp->hwrm_spec_code >= 0x10a01) {
9099 		if (bnxt_phy_qcaps_no_speed(resp)) {
9100 			link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9101 			netdev_warn(bp->dev, "Ethernet link disabled\n");
9102 		} else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9103 			link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9104 			netdev_info(bp->dev, "Ethernet link enabled\n");
9105 			/* Phy re-enabled, reprobe the speeds */
9106 			link_info->support_auto_speeds = 0;
9107 			link_info->support_pam4_auto_speeds = 0;
9108 		}
9109 	}
9110 	if (resp->supported_speeds_auto_mode)
9111 		link_info->support_auto_speeds =
9112 			le16_to_cpu(resp->supported_speeds_auto_mode);
9113 	if (resp->supported_pam4_speeds_auto_mode)
9114 		link_info->support_pam4_auto_speeds =
9115 			le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9116 
9117 	bp->port_count = resp->port_cnt;
9118 
9119 hwrm_phy_qcaps_exit:
9120 	mutex_unlock(&bp->hwrm_cmd_lock);
9121 	return rc;
9122 }
9123 
bnxt_support_dropped(u16 advertising,u16 supported)9124 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9125 {
9126 	u16 diff = advertising ^ supported;
9127 
9128 	return ((supported | diff) != supported);
9129 }
9130 
bnxt_update_link(struct bnxt * bp,bool chng_link_state)9131 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9132 {
9133 	int rc = 0;
9134 	struct bnxt_link_info *link_info = &bp->link_info;
9135 	struct hwrm_port_phy_qcfg_input req = {0};
9136 	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9137 	u8 link_up = link_info->link_up;
9138 	bool support_changed = false;
9139 
9140 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
9141 
9142 	mutex_lock(&bp->hwrm_cmd_lock);
9143 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9144 	if (rc) {
9145 		mutex_unlock(&bp->hwrm_cmd_lock);
9146 		return rc;
9147 	}
9148 
9149 	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9150 	link_info->phy_link_status = resp->link;
9151 	link_info->duplex = resp->duplex_cfg;
9152 	if (bp->hwrm_spec_code >= 0x10800)
9153 		link_info->duplex = resp->duplex_state;
9154 	link_info->pause = resp->pause;
9155 	link_info->auto_mode = resp->auto_mode;
9156 	link_info->auto_pause_setting = resp->auto_pause;
9157 	link_info->lp_pause = resp->link_partner_adv_pause;
9158 	link_info->force_pause_setting = resp->force_pause;
9159 	link_info->duplex_setting = resp->duplex_cfg;
9160 	if (link_info->phy_link_status == BNXT_LINK_LINK)
9161 		link_info->link_speed = le16_to_cpu(resp->link_speed);
9162 	else
9163 		link_info->link_speed = 0;
9164 	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9165 	link_info->force_pam4_link_speed =
9166 		le16_to_cpu(resp->force_pam4_link_speed);
9167 	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9168 	link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9169 	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9170 	link_info->auto_pam4_link_speeds =
9171 		le16_to_cpu(resp->auto_pam4_link_speed_mask);
9172 	link_info->lp_auto_link_speeds =
9173 		le16_to_cpu(resp->link_partner_adv_speeds);
9174 	link_info->lp_auto_pam4_link_speeds =
9175 		resp->link_partner_pam4_adv_speeds;
9176 	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9177 	link_info->phy_ver[0] = resp->phy_maj;
9178 	link_info->phy_ver[1] = resp->phy_min;
9179 	link_info->phy_ver[2] = resp->phy_bld;
9180 	link_info->media_type = resp->media_type;
9181 	link_info->phy_type = resp->phy_type;
9182 	link_info->transceiver = resp->xcvr_pkg_type;
9183 	link_info->phy_addr = resp->eee_config_phy_addr &
9184 			      PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9185 	link_info->module_status = resp->module_status;
9186 
9187 	if (bp->flags & BNXT_FLAG_EEE_CAP) {
9188 		struct ethtool_eee *eee = &bp->eee;
9189 		u16 fw_speeds;
9190 
9191 		eee->eee_active = 0;
9192 		if (resp->eee_config_phy_addr &
9193 		    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9194 			eee->eee_active = 1;
9195 			fw_speeds = le16_to_cpu(
9196 				resp->link_partner_adv_eee_link_speed_mask);
9197 			eee->lp_advertised =
9198 				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9199 		}
9200 
9201 		/* Pull initial EEE config */
9202 		if (!chng_link_state) {
9203 			if (resp->eee_config_phy_addr &
9204 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9205 				eee->eee_enabled = 1;
9206 
9207 			fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9208 			eee->advertised =
9209 				_bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9210 
9211 			if (resp->eee_config_phy_addr &
9212 			    PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9213 				__le32 tmr;
9214 
9215 				eee->tx_lpi_enabled = 1;
9216 				tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9217 				eee->tx_lpi_timer = le32_to_cpu(tmr) &
9218 					PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9219 			}
9220 		}
9221 	}
9222 
9223 	link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9224 	if (bp->hwrm_spec_code >= 0x10504) {
9225 		link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9226 		link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9227 	}
9228 	/* TODO: need to add more logic to report VF link */
9229 	if (chng_link_state) {
9230 		if (link_info->phy_link_status == BNXT_LINK_LINK)
9231 			link_info->link_up = 1;
9232 		else
9233 			link_info->link_up = 0;
9234 		if (link_up != link_info->link_up)
9235 			bnxt_report_link(bp);
9236 	} else {
9237 		/* alwasy link down if not require to update link state */
9238 		link_info->link_up = 0;
9239 	}
9240 	mutex_unlock(&bp->hwrm_cmd_lock);
9241 
9242 	if (!BNXT_PHY_CFG_ABLE(bp))
9243 		return 0;
9244 
9245 	/* Check if any advertised speeds are no longer supported. The caller
9246 	 * holds the link_lock mutex, so we can modify link_info settings.
9247 	 */
9248 	if (bnxt_support_dropped(link_info->advertising,
9249 				 link_info->support_auto_speeds)) {
9250 		link_info->advertising = link_info->support_auto_speeds;
9251 		support_changed = true;
9252 	}
9253 	if (bnxt_support_dropped(link_info->advertising_pam4,
9254 				 link_info->support_pam4_auto_speeds)) {
9255 		link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9256 		support_changed = true;
9257 	}
9258 	if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9259 		bnxt_hwrm_set_link_setting(bp, true, false);
9260 	return 0;
9261 }
9262 
bnxt_get_port_module_status(struct bnxt * bp)9263 static void bnxt_get_port_module_status(struct bnxt *bp)
9264 {
9265 	struct bnxt_link_info *link_info = &bp->link_info;
9266 	struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9267 	u8 module_status;
9268 
9269 	if (bnxt_update_link(bp, true))
9270 		return;
9271 
9272 	module_status = link_info->module_status;
9273 	switch (module_status) {
9274 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9275 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9276 	case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9277 		netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9278 			    bp->pf.port_id);
9279 		if (bp->hwrm_spec_code >= 0x10201) {
9280 			netdev_warn(bp->dev, "Module part number %s\n",
9281 				    resp->phy_vendor_partnumber);
9282 		}
9283 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9284 			netdev_warn(bp->dev, "TX is disabled\n");
9285 		if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9286 			netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9287 	}
9288 }
9289 
9290 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)9291 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9292 {
9293 	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9294 		if (bp->hwrm_spec_code >= 0x10201)
9295 			req->auto_pause =
9296 				PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9297 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9298 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9299 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9300 			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9301 		req->enables |=
9302 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9303 	} else {
9304 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9305 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9306 		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9307 			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9308 		req->enables |=
9309 			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9310 		if (bp->hwrm_spec_code >= 0x10201) {
9311 			req->auto_pause = req->force_pause;
9312 			req->enables |= cpu_to_le32(
9313 				PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9314 		}
9315 	}
9316 }
9317 
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)9318 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9319 {
9320 	if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9321 		req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9322 		if (bp->link_info.advertising) {
9323 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9324 			req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9325 		}
9326 		if (bp->link_info.advertising_pam4) {
9327 			req->enables |=
9328 				cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9329 			req->auto_link_pam4_speed_mask =
9330 				cpu_to_le16(bp->link_info.advertising_pam4);
9331 		}
9332 		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9333 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9334 	} else {
9335 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9336 		if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9337 			req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9338 			req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9339 		} else {
9340 			req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9341 		}
9342 	}
9343 
9344 	/* tell chimp that the setting takes effect immediately */
9345 	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9346 }
9347 
bnxt_hwrm_set_pause(struct bnxt * bp)9348 int bnxt_hwrm_set_pause(struct bnxt *bp)
9349 {
9350 	struct hwrm_port_phy_cfg_input req = {0};
9351 	int rc;
9352 
9353 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9354 	bnxt_hwrm_set_pause_common(bp, &req);
9355 
9356 	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9357 	    bp->link_info.force_link_chng)
9358 		bnxt_hwrm_set_link_common(bp, &req);
9359 
9360 	mutex_lock(&bp->hwrm_cmd_lock);
9361 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9362 	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9363 		/* since changing of pause setting doesn't trigger any link
9364 		 * change event, the driver needs to update the current pause
9365 		 * result upon successfully return of the phy_cfg command
9366 		 */
9367 		bp->link_info.pause =
9368 		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9369 		bp->link_info.auto_pause_setting = 0;
9370 		if (!bp->link_info.force_link_chng)
9371 			bnxt_report_link(bp);
9372 	}
9373 	bp->link_info.force_link_chng = false;
9374 	mutex_unlock(&bp->hwrm_cmd_lock);
9375 	return rc;
9376 }
9377 
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)9378 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9379 			      struct hwrm_port_phy_cfg_input *req)
9380 {
9381 	struct ethtool_eee *eee = &bp->eee;
9382 
9383 	if (eee->eee_enabled) {
9384 		u16 eee_speeds;
9385 		u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9386 
9387 		if (eee->tx_lpi_enabled)
9388 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9389 		else
9390 			flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9391 
9392 		req->flags |= cpu_to_le32(flags);
9393 		eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9394 		req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9395 		req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9396 	} else {
9397 		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9398 	}
9399 }
9400 
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)9401 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9402 {
9403 	struct hwrm_port_phy_cfg_input req = {0};
9404 
9405 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9406 	if (set_pause)
9407 		bnxt_hwrm_set_pause_common(bp, &req);
9408 
9409 	bnxt_hwrm_set_link_common(bp, &req);
9410 
9411 	if (set_eee)
9412 		bnxt_hwrm_set_eee(bp, &req);
9413 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9414 }
9415 
bnxt_hwrm_shutdown_link(struct bnxt * bp)9416 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9417 {
9418 	struct hwrm_port_phy_cfg_input req = {0};
9419 
9420 	if (!BNXT_SINGLE_PF(bp))
9421 		return 0;
9422 
9423 	if (pci_num_vf(bp->pdev))
9424 		return 0;
9425 
9426 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
9427 	req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9428 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9429 }
9430 
9431 static int bnxt_fw_init_one(struct bnxt *bp);
9432 
bnxt_hwrm_if_change(struct bnxt * bp,bool up)9433 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
9434 {
9435 	struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
9436 	struct hwrm_func_drv_if_change_input req = {0};
9437 	bool fw_reset = !bp->irq_tbl;
9438 	bool resc_reinit = false;
9439 	u32 flags = 0;
9440 	int rc;
9441 
9442 	if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
9443 		return 0;
9444 
9445 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
9446 	if (up)
9447 		req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
9448 	mutex_lock(&bp->hwrm_cmd_lock);
9449 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9450 	if (!rc)
9451 		flags = le32_to_cpu(resp->flags);
9452 	mutex_unlock(&bp->hwrm_cmd_lock);
9453 	if (rc)
9454 		return rc;
9455 
9456 	if (!up)
9457 		return 0;
9458 
9459 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
9460 		resc_reinit = true;
9461 	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
9462 		fw_reset = true;
9463 
9464 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
9465 		netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
9466 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9467 		return -ENODEV;
9468 	}
9469 	if (resc_reinit || fw_reset) {
9470 		if (fw_reset) {
9471 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9472 				bnxt_ulp_stop(bp);
9473 			bnxt_free_ctx_mem(bp);
9474 			kfree(bp->ctx);
9475 			bp->ctx = NULL;
9476 			bnxt_dcb_free(bp);
9477 			rc = bnxt_fw_init_one(bp);
9478 			if (rc) {
9479 				set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9480 				return rc;
9481 			}
9482 			bnxt_clear_int_mode(bp);
9483 			rc = bnxt_init_int_mode(bp);
9484 			if (rc) {
9485 				netdev_err(bp->dev, "init int mode failed\n");
9486 				return rc;
9487 			}
9488 			set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
9489 		}
9490 		if (BNXT_NEW_RM(bp)) {
9491 			struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9492 
9493 			rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9494 			hw_resc->resv_cp_rings = 0;
9495 			hw_resc->resv_stat_ctxs = 0;
9496 			hw_resc->resv_irqs = 0;
9497 			hw_resc->resv_tx_rings = 0;
9498 			hw_resc->resv_rx_rings = 0;
9499 			hw_resc->resv_hw_ring_grps = 0;
9500 			hw_resc->resv_vnics = 0;
9501 			if (!fw_reset) {
9502 				bp->tx_nr_rings = 0;
9503 				bp->rx_nr_rings = 0;
9504 			}
9505 		}
9506 	}
9507 	return 0;
9508 }
9509 
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)9510 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
9511 {
9512 	struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
9513 	struct hwrm_port_led_qcaps_input req = {0};
9514 	struct bnxt_pf_info *pf = &bp->pf;
9515 	int rc;
9516 
9517 	bp->num_leds = 0;
9518 	if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
9519 		return 0;
9520 
9521 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
9522 	req.port_id = cpu_to_le16(pf->port_id);
9523 	mutex_lock(&bp->hwrm_cmd_lock);
9524 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9525 	if (rc) {
9526 		mutex_unlock(&bp->hwrm_cmd_lock);
9527 		return rc;
9528 	}
9529 	if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
9530 		int i;
9531 
9532 		bp->num_leds = resp->num_leds;
9533 		memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
9534 						 bp->num_leds);
9535 		for (i = 0; i < bp->num_leds; i++) {
9536 			struct bnxt_led_info *led = &bp->leds[i];
9537 			__le16 caps = led->led_state_caps;
9538 
9539 			if (!led->led_group_id ||
9540 			    !BNXT_LED_ALT_BLINK_CAP(caps)) {
9541 				bp->num_leds = 0;
9542 				break;
9543 			}
9544 		}
9545 	}
9546 	mutex_unlock(&bp->hwrm_cmd_lock);
9547 	return 0;
9548 }
9549 
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)9550 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
9551 {
9552 	struct hwrm_wol_filter_alloc_input req = {0};
9553 	struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
9554 	int rc;
9555 
9556 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
9557 	req.port_id = cpu_to_le16(bp->pf.port_id);
9558 	req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
9559 	req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
9560 	memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
9561 	mutex_lock(&bp->hwrm_cmd_lock);
9562 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9563 	if (!rc)
9564 		bp->wol_filter_id = resp->wol_filter_id;
9565 	mutex_unlock(&bp->hwrm_cmd_lock);
9566 	return rc;
9567 }
9568 
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)9569 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
9570 {
9571 	struct hwrm_wol_filter_free_input req = {0};
9572 
9573 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
9574 	req.port_id = cpu_to_le16(bp->pf.port_id);
9575 	req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
9576 	req.wol_filter_id = bp->wol_filter_id;
9577 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9578 }
9579 
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)9580 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
9581 {
9582 	struct hwrm_wol_filter_qcfg_input req = {0};
9583 	struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
9584 	u16 next_handle = 0;
9585 	int rc;
9586 
9587 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
9588 	req.port_id = cpu_to_le16(bp->pf.port_id);
9589 	req.handle = cpu_to_le16(handle);
9590 	mutex_lock(&bp->hwrm_cmd_lock);
9591 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9592 	if (!rc) {
9593 		next_handle = le16_to_cpu(resp->next_handle);
9594 		if (next_handle != 0) {
9595 			if (resp->wol_type ==
9596 			    WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
9597 				bp->wol = 1;
9598 				bp->wol_filter_id = resp->wol_filter_id;
9599 			}
9600 		}
9601 	}
9602 	mutex_unlock(&bp->hwrm_cmd_lock);
9603 	return next_handle;
9604 }
9605 
bnxt_get_wol_settings(struct bnxt * bp)9606 static void bnxt_get_wol_settings(struct bnxt *bp)
9607 {
9608 	u16 handle = 0;
9609 
9610 	bp->wol = 0;
9611 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
9612 		return;
9613 
9614 	do {
9615 		handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
9616 	} while (handle && handle != 0xffff);
9617 }
9618 
9619 #ifdef CONFIG_BNXT_HWMON
bnxt_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)9620 static ssize_t bnxt_show_temp(struct device *dev,
9621 			      struct device_attribute *devattr, char *buf)
9622 {
9623 	struct hwrm_temp_monitor_query_input req = {0};
9624 	struct hwrm_temp_monitor_query_output *resp;
9625 	struct bnxt *bp = dev_get_drvdata(dev);
9626 	u32 len = 0;
9627 	int rc;
9628 
9629 	resp = bp->hwrm_cmd_resp_addr;
9630 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9631 	mutex_lock(&bp->hwrm_cmd_lock);
9632 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9633 	if (!rc)
9634 		len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9635 	mutex_unlock(&bp->hwrm_cmd_lock);
9636 	if (rc)
9637 		return rc;
9638 	return len;
9639 }
9640 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9641 
9642 static struct attribute *bnxt_attrs[] = {
9643 	&sensor_dev_attr_temp1_input.dev_attr.attr,
9644 	NULL
9645 };
9646 ATTRIBUTE_GROUPS(bnxt);
9647 
bnxt_hwmon_close(struct bnxt * bp)9648 static void bnxt_hwmon_close(struct bnxt *bp)
9649 {
9650 	if (bp->hwmon_dev) {
9651 		hwmon_device_unregister(bp->hwmon_dev);
9652 		bp->hwmon_dev = NULL;
9653 	}
9654 }
9655 
bnxt_hwmon_open(struct bnxt * bp)9656 static void bnxt_hwmon_open(struct bnxt *bp)
9657 {
9658 	struct hwrm_temp_monitor_query_input req = {0};
9659 	struct pci_dev *pdev = bp->pdev;
9660 	int rc;
9661 
9662 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
9663 	rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9664 	if (rc == -EACCES || rc == -EOPNOTSUPP) {
9665 		bnxt_hwmon_close(bp);
9666 		return;
9667 	}
9668 
9669 	if (bp->hwmon_dev)
9670 		return;
9671 
9672 	bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
9673 							  DRV_MODULE_NAME, bp,
9674 							  bnxt_groups);
9675 	if (IS_ERR(bp->hwmon_dev)) {
9676 		bp->hwmon_dev = NULL;
9677 		dev_warn(&pdev->dev, "Cannot register hwmon device\n");
9678 	}
9679 }
9680 #else
bnxt_hwmon_close(struct bnxt * bp)9681 static void bnxt_hwmon_close(struct bnxt *bp)
9682 {
9683 }
9684 
bnxt_hwmon_open(struct bnxt * bp)9685 static void bnxt_hwmon_open(struct bnxt *bp)
9686 {
9687 }
9688 #endif
9689 
bnxt_eee_config_ok(struct bnxt * bp)9690 static bool bnxt_eee_config_ok(struct bnxt *bp)
9691 {
9692 	struct ethtool_eee *eee = &bp->eee;
9693 	struct bnxt_link_info *link_info = &bp->link_info;
9694 
9695 	if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9696 		return true;
9697 
9698 	if (eee->eee_enabled) {
9699 		u32 advertising =
9700 			_bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9701 
9702 		if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9703 			eee->eee_enabled = 0;
9704 			return false;
9705 		}
9706 		if (eee->advertised & ~advertising) {
9707 			eee->advertised = advertising & eee->supported;
9708 			return false;
9709 		}
9710 	}
9711 	return true;
9712 }
9713 
bnxt_update_phy_setting(struct bnxt * bp)9714 static int bnxt_update_phy_setting(struct bnxt *bp)
9715 {
9716 	int rc;
9717 	bool update_link = false;
9718 	bool update_pause = false;
9719 	bool update_eee = false;
9720 	struct bnxt_link_info *link_info = &bp->link_info;
9721 
9722 	rc = bnxt_update_link(bp, true);
9723 	if (rc) {
9724 		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9725 			   rc);
9726 		return rc;
9727 	}
9728 	if (!BNXT_SINGLE_PF(bp))
9729 		return 0;
9730 
9731 	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9732 	    (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9733 	    link_info->req_flow_ctrl)
9734 		update_pause = true;
9735 	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9736 	    link_info->force_pause_setting != link_info->req_flow_ctrl)
9737 		update_pause = true;
9738 	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9739 		if (BNXT_AUTO_MODE(link_info->auto_mode))
9740 			update_link = true;
9741 		if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
9742 		    link_info->req_link_speed != link_info->force_link_speed)
9743 			update_link = true;
9744 		else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
9745 			 link_info->req_link_speed != link_info->force_pam4_link_speed)
9746 			update_link = true;
9747 		if (link_info->req_duplex != link_info->duplex_setting)
9748 			update_link = true;
9749 	} else {
9750 		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9751 			update_link = true;
9752 		if (link_info->advertising != link_info->auto_link_speeds ||
9753 		    link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
9754 			update_link = true;
9755 	}
9756 
9757 	/* The last close may have shutdown the link, so need to call
9758 	 * PHY_CFG to bring it back up.
9759 	 */
9760 	if (!bp->link_info.link_up)
9761 		update_link = true;
9762 
9763 	if (!bnxt_eee_config_ok(bp))
9764 		update_eee = true;
9765 
9766 	if (update_link)
9767 		rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9768 	else if (update_pause)
9769 		rc = bnxt_hwrm_set_pause(bp);
9770 	if (rc) {
9771 		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9772 			   rc);
9773 		return rc;
9774 	}
9775 
9776 	return rc;
9777 }
9778 
9779 /* Common routine to pre-map certain register block to different GRC window.
9780  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9781  * in PF and 3 windows in VF that can be customized to map in different
9782  * register blocks.
9783  */
bnxt_preset_reg_win(struct bnxt * bp)9784 static void bnxt_preset_reg_win(struct bnxt *bp)
9785 {
9786 	if (BNXT_PF(bp)) {
9787 		/* CAG registers map to GRC window #4 */
9788 		writel(BNXT_CAG_REG_BASE,
9789 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9790 	}
9791 }
9792 
9793 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9794 
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)9795 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9796 {
9797 	int rc = 0;
9798 
9799 	bnxt_preset_reg_win(bp);
9800 	netif_carrier_off(bp->dev);
9801 	if (irq_re_init) {
9802 		/* Reserve rings now if none were reserved at driver probe. */
9803 		rc = bnxt_init_dflt_ring_mode(bp);
9804 		if (rc) {
9805 			netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9806 			return rc;
9807 		}
9808 	}
9809 	rc = bnxt_reserve_rings(bp, irq_re_init);
9810 	if (rc)
9811 		return rc;
9812 	if ((bp->flags & BNXT_FLAG_RFS) &&
9813 	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9814 		/* disable RFS if falling back to INTA */
9815 		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9816 		bp->flags &= ~BNXT_FLAG_RFS;
9817 	}
9818 
9819 	rc = bnxt_alloc_mem(bp, irq_re_init);
9820 	if (rc) {
9821 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9822 		goto open_err_free_mem;
9823 	}
9824 
9825 	if (irq_re_init) {
9826 		bnxt_init_napi(bp);
9827 		rc = bnxt_request_irq(bp);
9828 		if (rc) {
9829 			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9830 			goto open_err_irq;
9831 		}
9832 	}
9833 
9834 	rc = bnxt_init_nic(bp, irq_re_init);
9835 	if (rc) {
9836 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9837 		goto open_err_irq;
9838 	}
9839 
9840 	bnxt_enable_napi(bp);
9841 	bnxt_debug_dev_init(bp);
9842 
9843 	if (link_re_init) {
9844 		mutex_lock(&bp->link_lock);
9845 		rc = bnxt_update_phy_setting(bp);
9846 		mutex_unlock(&bp->link_lock);
9847 		if (rc) {
9848 			netdev_warn(bp->dev, "failed to update phy settings\n");
9849 			if (BNXT_SINGLE_PF(bp)) {
9850 				bp->link_info.phy_retry = true;
9851 				bp->link_info.phy_retry_expires =
9852 					jiffies + 5 * HZ;
9853 			}
9854 		}
9855 	}
9856 
9857 	if (irq_re_init)
9858 		udp_tunnel_nic_reset_ntf(bp->dev);
9859 
9860 	set_bit(BNXT_STATE_OPEN, &bp->state);
9861 	bnxt_enable_int(bp);
9862 	/* Enable TX queues */
9863 	bnxt_tx_enable(bp);
9864 	mod_timer(&bp->timer, jiffies + bp->current_interval);
9865 	/* Poll link status and check for SFP+ module status */
9866 	bnxt_get_port_module_status(bp);
9867 
9868 	/* VF-reps may need to be re-opened after the PF is re-opened */
9869 	if (BNXT_PF(bp))
9870 		bnxt_vf_reps_open(bp);
9871 	return 0;
9872 
9873 open_err_irq:
9874 	bnxt_del_napi(bp);
9875 
9876 open_err_free_mem:
9877 	bnxt_free_skbs(bp);
9878 	bnxt_free_irq(bp);
9879 	bnxt_free_mem(bp, true);
9880 	return rc;
9881 }
9882 
9883 /* rtnl_lock held */
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)9884 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9885 {
9886 	int rc = 0;
9887 
9888 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
9889 		rc = -EIO;
9890 	if (!rc)
9891 		rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9892 	if (rc) {
9893 		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9894 		dev_close(bp->dev);
9895 	}
9896 	return rc;
9897 }
9898 
9899 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9900  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
9901  * self tests.
9902  */
bnxt_half_open_nic(struct bnxt * bp)9903 int bnxt_half_open_nic(struct bnxt *bp)
9904 {
9905 	int rc = 0;
9906 
9907 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9908 		netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
9909 		rc = -ENODEV;
9910 		goto half_open_err;
9911 	}
9912 
9913 	rc = bnxt_alloc_mem(bp, false);
9914 	if (rc) {
9915 		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9916 		goto half_open_err;
9917 	}
9918 	rc = bnxt_init_nic(bp, false);
9919 	if (rc) {
9920 		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9921 		goto half_open_err;
9922 	}
9923 	return 0;
9924 
9925 half_open_err:
9926 	bnxt_free_skbs(bp);
9927 	bnxt_free_mem(bp, false);
9928 	dev_close(bp->dev);
9929 	return rc;
9930 }
9931 
9932 /* rtnl_lock held, this call can only be made after a previous successful
9933  * call to bnxt_half_open_nic().
9934  */
bnxt_half_close_nic(struct bnxt * bp)9935 void bnxt_half_close_nic(struct bnxt *bp)
9936 {
9937 	bnxt_hwrm_resource_free(bp, false, false);
9938 	bnxt_free_skbs(bp);
9939 	bnxt_free_mem(bp, false);
9940 }
9941 
bnxt_reenable_sriov(struct bnxt * bp)9942 static void bnxt_reenable_sriov(struct bnxt *bp)
9943 {
9944 	if (BNXT_PF(bp)) {
9945 		struct bnxt_pf_info *pf = &bp->pf;
9946 		int n = pf->active_vfs;
9947 
9948 		if (n)
9949 			bnxt_cfg_hw_sriov(bp, &n, true);
9950 	}
9951 }
9952 
bnxt_open(struct net_device * dev)9953 static int bnxt_open(struct net_device *dev)
9954 {
9955 	struct bnxt *bp = netdev_priv(dev);
9956 	int rc;
9957 
9958 	if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9959 		netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9960 		return -ENODEV;
9961 	}
9962 
9963 	rc = bnxt_hwrm_if_change(bp, true);
9964 	if (rc)
9965 		return rc;
9966 	rc = __bnxt_open_nic(bp, true, true);
9967 	if (rc) {
9968 		bnxt_hwrm_if_change(bp, false);
9969 	} else {
9970 		if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9971 			if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9972 				bnxt_ulp_start(bp, 0);
9973 				bnxt_reenable_sriov(bp);
9974 			}
9975 		}
9976 		bnxt_hwmon_open(bp);
9977 	}
9978 
9979 	return rc;
9980 }
9981 
bnxt_drv_busy(struct bnxt * bp)9982 static bool bnxt_drv_busy(struct bnxt *bp)
9983 {
9984 	return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9985 		test_bit(BNXT_STATE_READ_STATS, &bp->state));
9986 }
9987 
9988 static void bnxt_get_ring_stats(struct bnxt *bp,
9989 				struct rtnl_link_stats64 *stats);
9990 
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)9991 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9992 			     bool link_re_init)
9993 {
9994 	/* Close the VF-reps before closing PF */
9995 	if (BNXT_PF(bp))
9996 		bnxt_vf_reps_close(bp);
9997 
9998 	/* Change device state to avoid TX queue wake up's */
9999 	bnxt_tx_disable(bp);
10000 
10001 	clear_bit(BNXT_STATE_OPEN, &bp->state);
10002 	smp_mb__after_atomic();
10003 	while (bnxt_drv_busy(bp))
10004 		msleep(20);
10005 
10006 	/* Flush rings and and disable interrupts */
10007 	bnxt_shutdown_nic(bp, irq_re_init);
10008 
10009 	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10010 
10011 	bnxt_debug_dev_exit(bp);
10012 	bnxt_disable_napi(bp);
10013 	del_timer_sync(&bp->timer);
10014 	bnxt_free_skbs(bp);
10015 
10016 	/* Save ring stats before shutdown */
10017 	if (bp->bnapi && irq_re_init)
10018 		bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10019 	if (irq_re_init) {
10020 		bnxt_free_irq(bp);
10021 		bnxt_del_napi(bp);
10022 	}
10023 	bnxt_free_mem(bp, irq_re_init);
10024 }
10025 
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)10026 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10027 {
10028 	int rc = 0;
10029 
10030 	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10031 		/* If we get here, it means firmware reset is in progress
10032 		 * while we are trying to close.  We can safely proceed with
10033 		 * the close because we are holding rtnl_lock().  Some firmware
10034 		 * messages may fail as we proceed to close.  We set the
10035 		 * ABORT_ERR flag here so that the FW reset thread will later
10036 		 * abort when it gets the rtnl_lock() and sees the flag.
10037 		 */
10038 		netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10039 		set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10040 	}
10041 
10042 #ifdef CONFIG_BNXT_SRIOV
10043 	if (bp->sriov_cfg) {
10044 		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10045 						      !bp->sriov_cfg,
10046 						      BNXT_SRIOV_CFG_WAIT_TMO);
10047 		if (rc)
10048 			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10049 	}
10050 #endif
10051 	__bnxt_close_nic(bp, irq_re_init, link_re_init);
10052 	return rc;
10053 }
10054 
bnxt_close(struct net_device * dev)10055 static int bnxt_close(struct net_device *dev)
10056 {
10057 	struct bnxt *bp = netdev_priv(dev);
10058 
10059 	bnxt_hwmon_close(bp);
10060 	bnxt_close_nic(bp, true, true);
10061 	bnxt_hwrm_shutdown_link(bp);
10062 	bnxt_hwrm_if_change(bp, false);
10063 	return 0;
10064 }
10065 
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)10066 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10067 				   u16 *val)
10068 {
10069 	struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
10070 	struct hwrm_port_phy_mdio_read_input req = {0};
10071 	int rc;
10072 
10073 	if (bp->hwrm_spec_code < 0x10a00)
10074 		return -EOPNOTSUPP;
10075 
10076 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
10077 	req.port_id = cpu_to_le16(bp->pf.port_id);
10078 	req.phy_addr = phy_addr;
10079 	req.reg_addr = cpu_to_le16(reg & 0x1f);
10080 	if (mdio_phy_id_is_c45(phy_addr)) {
10081 		req.cl45_mdio = 1;
10082 		req.phy_addr = mdio_phy_id_prtad(phy_addr);
10083 		req.dev_addr = mdio_phy_id_devad(phy_addr);
10084 		req.reg_addr = cpu_to_le16(reg);
10085 	}
10086 
10087 	mutex_lock(&bp->hwrm_cmd_lock);
10088 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10089 	if (!rc)
10090 		*val = le16_to_cpu(resp->reg_data);
10091 	mutex_unlock(&bp->hwrm_cmd_lock);
10092 	return rc;
10093 }
10094 
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)10095 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10096 				    u16 val)
10097 {
10098 	struct hwrm_port_phy_mdio_write_input req = {0};
10099 
10100 	if (bp->hwrm_spec_code < 0x10a00)
10101 		return -EOPNOTSUPP;
10102 
10103 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
10104 	req.port_id = cpu_to_le16(bp->pf.port_id);
10105 	req.phy_addr = phy_addr;
10106 	req.reg_addr = cpu_to_le16(reg & 0x1f);
10107 	if (mdio_phy_id_is_c45(phy_addr)) {
10108 		req.cl45_mdio = 1;
10109 		req.phy_addr = mdio_phy_id_prtad(phy_addr);
10110 		req.dev_addr = mdio_phy_id_devad(phy_addr);
10111 		req.reg_addr = cpu_to_le16(reg);
10112 	}
10113 	req.reg_data = cpu_to_le16(val);
10114 
10115 	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10116 }
10117 
10118 /* rtnl_lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)10119 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10120 {
10121 	struct mii_ioctl_data *mdio = if_mii(ifr);
10122 	struct bnxt *bp = netdev_priv(dev);
10123 	int rc;
10124 
10125 	switch (cmd) {
10126 	case SIOCGMIIPHY:
10127 		mdio->phy_id = bp->link_info.phy_addr;
10128 
10129 		fallthrough;
10130 	case SIOCGMIIREG: {
10131 		u16 mii_regval = 0;
10132 
10133 		if (!netif_running(dev))
10134 			return -EAGAIN;
10135 
10136 		rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10137 					     &mii_regval);
10138 		mdio->val_out = mii_regval;
10139 		return rc;
10140 	}
10141 
10142 	case SIOCSMIIREG:
10143 		if (!netif_running(dev))
10144 			return -EAGAIN;
10145 
10146 		return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10147 						mdio->val_in);
10148 
10149 	default:
10150 		/* do nothing */
10151 		break;
10152 	}
10153 	return -EOPNOTSUPP;
10154 }
10155 
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)10156 static void bnxt_get_ring_stats(struct bnxt *bp,
10157 				struct rtnl_link_stats64 *stats)
10158 {
10159 	int i;
10160 
10161 	for (i = 0; i < bp->cp_nr_rings; i++) {
10162 		struct bnxt_napi *bnapi = bp->bnapi[i];
10163 		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10164 		u64 *sw = cpr->stats.sw_stats;
10165 
10166 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10167 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10168 		stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10169 
10170 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10171 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10172 		stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10173 
10174 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10175 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10176 		stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10177 
10178 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10179 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10180 		stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10181 
10182 		stats->rx_missed_errors +=
10183 			BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10184 
10185 		stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10186 
10187 		stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10188 	}
10189 }
10190 
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)10191 static void bnxt_add_prev_stats(struct bnxt *bp,
10192 				struct rtnl_link_stats64 *stats)
10193 {
10194 	struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10195 
10196 	stats->rx_packets += prev_stats->rx_packets;
10197 	stats->tx_packets += prev_stats->tx_packets;
10198 	stats->rx_bytes += prev_stats->rx_bytes;
10199 	stats->tx_bytes += prev_stats->tx_bytes;
10200 	stats->rx_missed_errors += prev_stats->rx_missed_errors;
10201 	stats->multicast += prev_stats->multicast;
10202 	stats->tx_dropped += prev_stats->tx_dropped;
10203 }
10204 
10205 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)10206 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10207 {
10208 	struct bnxt *bp = netdev_priv(dev);
10209 
10210 	set_bit(BNXT_STATE_READ_STATS, &bp->state);
10211 	/* Make sure bnxt_close_nic() sees that we are reading stats before
10212 	 * we check the BNXT_STATE_OPEN flag.
10213 	 */
10214 	smp_mb__after_atomic();
10215 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10216 		clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10217 		*stats = bp->net_stats_prev;
10218 		return;
10219 	}
10220 
10221 	bnxt_get_ring_stats(bp, stats);
10222 	bnxt_add_prev_stats(bp, stats);
10223 
10224 	if (bp->flags & BNXT_FLAG_PORT_STATS) {
10225 		u64 *rx = bp->port_stats.sw_stats;
10226 		u64 *tx = bp->port_stats.sw_stats +
10227 			  BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10228 
10229 		stats->rx_crc_errors =
10230 			BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10231 		stats->rx_frame_errors =
10232 			BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10233 		stats->rx_length_errors =
10234 			BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10235 			BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10236 			BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10237 		stats->rx_errors =
10238 			BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10239 			BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10240 		stats->collisions =
10241 			BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10242 		stats->tx_fifo_errors =
10243 			BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10244 		stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10245 	}
10246 	clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10247 }
10248 
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)10249 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10250 {
10251 	struct net_device *dev = bp->dev;
10252 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10253 	struct netdev_hw_addr *ha;
10254 	u8 *haddr;
10255 	int mc_count = 0;
10256 	bool update = false;
10257 	int off = 0;
10258 
10259 	netdev_for_each_mc_addr(ha, dev) {
10260 		if (mc_count >= BNXT_MAX_MC_ADDRS) {
10261 			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10262 			vnic->mc_list_count = 0;
10263 			return false;
10264 		}
10265 		haddr = ha->addr;
10266 		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10267 			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10268 			update = true;
10269 		}
10270 		off += ETH_ALEN;
10271 		mc_count++;
10272 	}
10273 	if (mc_count)
10274 		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10275 
10276 	if (mc_count != vnic->mc_list_count) {
10277 		vnic->mc_list_count = mc_count;
10278 		update = true;
10279 	}
10280 	return update;
10281 }
10282 
bnxt_uc_list_updated(struct bnxt * bp)10283 static bool bnxt_uc_list_updated(struct bnxt *bp)
10284 {
10285 	struct net_device *dev = bp->dev;
10286 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10287 	struct netdev_hw_addr *ha;
10288 	int off = 0;
10289 
10290 	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10291 		return true;
10292 
10293 	netdev_for_each_uc_addr(ha, dev) {
10294 		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10295 			return true;
10296 
10297 		off += ETH_ALEN;
10298 	}
10299 	return false;
10300 }
10301 
bnxt_set_rx_mode(struct net_device * dev)10302 static void bnxt_set_rx_mode(struct net_device *dev)
10303 {
10304 	struct bnxt *bp = netdev_priv(dev);
10305 	struct bnxt_vnic_info *vnic;
10306 	bool mc_update = false;
10307 	bool uc_update;
10308 	u32 mask;
10309 
10310 	if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10311 		return;
10312 
10313 	vnic = &bp->vnic_info[0];
10314 	mask = vnic->rx_mask;
10315 	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10316 		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10317 		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10318 		  CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10319 
10320 	if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
10321 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10322 
10323 	uc_update = bnxt_uc_list_updated(bp);
10324 
10325 	if (dev->flags & IFF_BROADCAST)
10326 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10327 	if (dev->flags & IFF_ALLMULTI) {
10328 		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10329 		vnic->mc_list_count = 0;
10330 	} else {
10331 		mc_update = bnxt_mc_list_updated(bp, &mask);
10332 	}
10333 
10334 	if (mask != vnic->rx_mask || uc_update || mc_update) {
10335 		vnic->rx_mask = mask;
10336 
10337 		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
10338 		bnxt_queue_sp_work(bp);
10339 	}
10340 }
10341 
bnxt_cfg_rx_mode(struct bnxt * bp)10342 static int bnxt_cfg_rx_mode(struct bnxt *bp)
10343 {
10344 	struct net_device *dev = bp->dev;
10345 	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10346 	struct netdev_hw_addr *ha;
10347 	int i, off = 0, rc;
10348 	bool uc_update;
10349 
10350 	netif_addr_lock_bh(dev);
10351 	uc_update = bnxt_uc_list_updated(bp);
10352 	netif_addr_unlock_bh(dev);
10353 
10354 	if (!uc_update)
10355 		goto skip_uc;
10356 
10357 	mutex_lock(&bp->hwrm_cmd_lock);
10358 	for (i = 1; i < vnic->uc_filter_count; i++) {
10359 		struct hwrm_cfa_l2_filter_free_input req = {0};
10360 
10361 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
10362 				       -1);
10363 
10364 		req.l2_filter_id = vnic->fw_l2_filter_id[i];
10365 
10366 		rc = _hwrm_send_message(bp, &req, sizeof(req),
10367 					HWRM_CMD_TIMEOUT);
10368 	}
10369 	mutex_unlock(&bp->hwrm_cmd_lock);
10370 
10371 	vnic->uc_filter_count = 1;
10372 
10373 	netif_addr_lock_bh(dev);
10374 	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
10375 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10376 	} else {
10377 		netdev_for_each_uc_addr(ha, dev) {
10378 			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
10379 			off += ETH_ALEN;
10380 			vnic->uc_filter_count++;
10381 		}
10382 	}
10383 	netif_addr_unlock_bh(dev);
10384 
10385 	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
10386 		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
10387 		if (rc) {
10388 			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
10389 				   rc);
10390 			vnic->uc_filter_count = i;
10391 			return rc;
10392 		}
10393 	}
10394 
10395 skip_uc:
10396 	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10397 	if (rc && vnic->mc_list_count) {
10398 		netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
10399 			    rc);
10400 		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10401 		vnic->mc_list_count = 0;
10402 		rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
10403 	}
10404 	if (rc)
10405 		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
10406 			   rc);
10407 
10408 	return rc;
10409 }
10410 
bnxt_can_reserve_rings(struct bnxt * bp)10411 static bool bnxt_can_reserve_rings(struct bnxt *bp)
10412 {
10413 #ifdef CONFIG_BNXT_SRIOV
10414 	if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
10415 		struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10416 
10417 		/* No minimum rings were provisioned by the PF.  Don't
10418 		 * reserve rings by default when device is down.
10419 		 */
10420 		if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
10421 			return true;
10422 
10423 		if (!netif_running(bp->dev))
10424 			return false;
10425 	}
10426 #endif
10427 	return true;
10428 }
10429 
10430 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)10431 static bool bnxt_rfs_supported(struct bnxt *bp)
10432 {
10433 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
10434 		if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
10435 			return true;
10436 		return false;
10437 	}
10438 	/* 212 firmware is broken for aRFS */
10439 	if (BNXT_FW_MAJ(bp) == 212)
10440 		return false;
10441 	if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
10442 		return true;
10443 	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10444 		return true;
10445 	return false;
10446 }
10447 
10448 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp)10449 static bool bnxt_rfs_capable(struct bnxt *bp)
10450 {
10451 #ifdef CONFIG_RFS_ACCEL
10452 	int vnics, max_vnics, max_rss_ctxs;
10453 
10454 	if (bp->flags & BNXT_FLAG_CHIP_P5)
10455 		return bnxt_rfs_supported(bp);
10456 	if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
10457 		return false;
10458 
10459 	vnics = 1 + bp->rx_nr_rings;
10460 	max_vnics = bnxt_get_max_func_vnics(bp);
10461 	max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
10462 
10463 	/* RSS contexts not a limiting factor */
10464 	if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
10465 		max_rss_ctxs = max_vnics;
10466 	if (vnics > max_vnics || vnics > max_rss_ctxs) {
10467 		if (bp->rx_nr_rings > 1)
10468 			netdev_warn(bp->dev,
10469 				    "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
10470 				    min(max_rss_ctxs - 1, max_vnics - 1));
10471 		return false;
10472 	}
10473 
10474 	if (!BNXT_NEW_RM(bp))
10475 		return true;
10476 
10477 	if (vnics == bp->hw_resc.resv_vnics)
10478 		return true;
10479 
10480 	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
10481 	if (vnics <= bp->hw_resc.resv_vnics)
10482 		return true;
10483 
10484 	netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
10485 	bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
10486 	return false;
10487 #else
10488 	return false;
10489 #endif
10490 }
10491 
bnxt_fix_features(struct net_device * dev,netdev_features_t features)10492 static netdev_features_t bnxt_fix_features(struct net_device *dev,
10493 					   netdev_features_t features)
10494 {
10495 	struct bnxt *bp = netdev_priv(dev);
10496 	netdev_features_t vlan_features;
10497 
10498 	if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
10499 		features &= ~NETIF_F_NTUPLE;
10500 
10501 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10502 		features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
10503 
10504 	if (!(features & NETIF_F_GRO))
10505 		features &= ~NETIF_F_GRO_HW;
10506 
10507 	if (features & NETIF_F_GRO_HW)
10508 		features &= ~NETIF_F_LRO;
10509 
10510 	/* Both CTAG and STAG VLAN accelaration on the RX side have to be
10511 	 * turned on or off together.
10512 	 */
10513 	vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
10514 	if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
10515 		if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10516 			features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10517 		else if (vlan_features)
10518 			features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
10519 	}
10520 #ifdef CONFIG_BNXT_SRIOV
10521 	if (BNXT_VF(bp) && bp->vf.vlan)
10522 		features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
10523 #endif
10524 	return features;
10525 }
10526 
bnxt_set_features(struct net_device * dev,netdev_features_t features)10527 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
10528 {
10529 	struct bnxt *bp = netdev_priv(dev);
10530 	u32 flags = bp->flags;
10531 	u32 changes;
10532 	int rc = 0;
10533 	bool re_init = false;
10534 	bool update_tpa = false;
10535 
10536 	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
10537 	if (features & NETIF_F_GRO_HW)
10538 		flags |= BNXT_FLAG_GRO;
10539 	else if (features & NETIF_F_LRO)
10540 		flags |= BNXT_FLAG_LRO;
10541 
10542 	if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
10543 		flags &= ~BNXT_FLAG_TPA;
10544 
10545 	if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
10546 		flags |= BNXT_FLAG_STRIP_VLAN;
10547 
10548 	if (features & NETIF_F_NTUPLE)
10549 		flags |= BNXT_FLAG_RFS;
10550 
10551 	changes = flags ^ bp->flags;
10552 	if (changes & BNXT_FLAG_TPA) {
10553 		update_tpa = true;
10554 		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
10555 		    (flags & BNXT_FLAG_TPA) == 0 ||
10556 		    (bp->flags & BNXT_FLAG_CHIP_P5))
10557 			re_init = true;
10558 	}
10559 
10560 	if (changes & ~BNXT_FLAG_TPA)
10561 		re_init = true;
10562 
10563 	if (flags != bp->flags) {
10564 		u32 old_flags = bp->flags;
10565 
10566 		if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10567 			bp->flags = flags;
10568 			if (update_tpa)
10569 				bnxt_set_ring_params(bp);
10570 			return rc;
10571 		}
10572 
10573 		if (re_init) {
10574 			bnxt_close_nic(bp, false, false);
10575 			bp->flags = flags;
10576 			if (update_tpa)
10577 				bnxt_set_ring_params(bp);
10578 
10579 			return bnxt_open_nic(bp, false, false);
10580 		}
10581 		if (update_tpa) {
10582 			bp->flags = flags;
10583 			rc = bnxt_set_tpa(bp,
10584 					  (flags & BNXT_FLAG_TPA) ?
10585 					  true : false);
10586 			if (rc)
10587 				bp->flags = old_flags;
10588 		}
10589 	}
10590 	return rc;
10591 }
10592 
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)10593 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
10594 			 u32 *reg_buf)
10595 {
10596 	struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr;
10597 	struct hwrm_dbg_read_direct_input req = {0};
10598 	__le32 *dbg_reg_buf;
10599 	dma_addr_t mapping;
10600 	int rc, i;
10601 
10602 	dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4,
10603 					 &mapping, GFP_KERNEL);
10604 	if (!dbg_reg_buf)
10605 		return -ENOMEM;
10606 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1);
10607 	req.host_dest_addr = cpu_to_le64(mapping);
10608 	req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
10609 	req.read_len32 = cpu_to_le32(num_words);
10610 	mutex_lock(&bp->hwrm_cmd_lock);
10611 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10612 	if (rc || resp->error_code) {
10613 		rc = -EIO;
10614 		goto dbg_rd_reg_exit;
10615 	}
10616 	for (i = 0; i < num_words; i++)
10617 		reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
10618 
10619 dbg_rd_reg_exit:
10620 	mutex_unlock(&bp->hwrm_cmd_lock);
10621 	dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping);
10622 	return rc;
10623 }
10624 
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)10625 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
10626 				       u32 ring_id, u32 *prod, u32 *cons)
10627 {
10628 	struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
10629 	struct hwrm_dbg_ring_info_get_input req = {0};
10630 	int rc;
10631 
10632 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
10633 	req.ring_type = ring_type;
10634 	req.fw_ring_id = cpu_to_le32(ring_id);
10635 	mutex_lock(&bp->hwrm_cmd_lock);
10636 	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10637 	if (!rc) {
10638 		*prod = le32_to_cpu(resp->producer_index);
10639 		*cons = le32_to_cpu(resp->consumer_index);
10640 	}
10641 	mutex_unlock(&bp->hwrm_cmd_lock);
10642 	return rc;
10643 }
10644 
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)10645 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
10646 {
10647 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
10648 	int i = bnapi->index;
10649 
10650 	if (!txr)
10651 		return;
10652 
10653 	netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
10654 		    i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
10655 		    txr->tx_cons);
10656 }
10657 
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)10658 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
10659 {
10660 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
10661 	int i = bnapi->index;
10662 
10663 	if (!rxr)
10664 		return;
10665 
10666 	netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
10667 		    i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
10668 		    rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
10669 		    rxr->rx_sw_agg_prod);
10670 }
10671 
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)10672 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
10673 {
10674 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10675 	int i = bnapi->index;
10676 
10677 	netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
10678 		    i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
10679 }
10680 
bnxt_dbg_dump_states(struct bnxt * bp)10681 static void bnxt_dbg_dump_states(struct bnxt *bp)
10682 {
10683 	int i;
10684 	struct bnxt_napi *bnapi;
10685 
10686 	for (i = 0; i < bp->cp_nr_rings; i++) {
10687 		bnapi = bp->bnapi[i];
10688 		if (netif_msg_drv(bp)) {
10689 			bnxt_dump_tx_sw_state(bnapi);
10690 			bnxt_dump_rx_sw_state(bnapi);
10691 			bnxt_dump_cp_sw_state(bnapi);
10692 		}
10693 	}
10694 }
10695 
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)10696 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
10697 {
10698 	struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
10699 	struct hwrm_ring_reset_input req = {0};
10700 	struct bnxt_napi *bnapi = rxr->bnapi;
10701 	struct bnxt_cp_ring_info *cpr;
10702 	u16 cp_ring_id;
10703 
10704 	cpr = &bnapi->cp_ring;
10705 	cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
10706 	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
10707 	req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
10708 	req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
10709 	return hwrm_send_message_silent(bp, &req, sizeof(req),
10710 					HWRM_CMD_TIMEOUT);
10711 }
10712 
bnxt_reset_task(struct bnxt * bp,bool silent)10713 static void bnxt_reset_task(struct bnxt *bp, bool silent)
10714 {
10715 	if (!silent)
10716 		bnxt_dbg_dump_states(bp);
10717 	if (netif_running(bp->dev)) {
10718 		int rc;
10719 
10720 		if (silent) {
10721 			bnxt_close_nic(bp, false, false);
10722 			bnxt_open_nic(bp, false, false);
10723 		} else {
10724 			bnxt_ulp_stop(bp);
10725 			bnxt_close_nic(bp, true, false);
10726 			rc = bnxt_open_nic(bp, true, false);
10727 			bnxt_ulp_start(bp, rc);
10728 		}
10729 	}
10730 }
10731 
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)10732 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
10733 {
10734 	struct bnxt *bp = netdev_priv(dev);
10735 
10736 	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
10737 	set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
10738 	bnxt_queue_sp_work(bp);
10739 }
10740 
bnxt_fw_health_check(struct bnxt * bp)10741 static void bnxt_fw_health_check(struct bnxt *bp)
10742 {
10743 	struct bnxt_fw_health *fw_health = bp->fw_health;
10744 	u32 val;
10745 
10746 	if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10747 		return;
10748 
10749 	/* Make sure it is enabled before checking the tmr_counter. */
10750 	smp_rmb();
10751 	if (fw_health->tmr_counter) {
10752 		fw_health->tmr_counter--;
10753 		return;
10754 	}
10755 
10756 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10757 	if (val == fw_health->last_fw_heartbeat)
10758 		goto fw_reset;
10759 
10760 	fw_health->last_fw_heartbeat = val;
10761 
10762 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10763 	if (val != fw_health->last_fw_reset_cnt)
10764 		goto fw_reset;
10765 
10766 	fw_health->tmr_counter = fw_health->tmr_multiplier;
10767 	return;
10768 
10769 fw_reset:
10770 	set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10771 	bnxt_queue_sp_work(bp);
10772 }
10773 
bnxt_timer(struct timer_list * t)10774 static void bnxt_timer(struct timer_list *t)
10775 {
10776 	struct bnxt *bp = from_timer(bp, t, timer);
10777 	struct net_device *dev = bp->dev;
10778 
10779 	if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
10780 		return;
10781 
10782 	if (atomic_read(&bp->intr_sem) != 0)
10783 		goto bnxt_restart_timer;
10784 
10785 	if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10786 		bnxt_fw_health_check(bp);
10787 
10788 	if (bp->link_info.link_up && bp->stats_coal_ticks) {
10789 		set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10790 		bnxt_queue_sp_work(bp);
10791 	}
10792 
10793 	if (bnxt_tc_flower_enabled(bp)) {
10794 		set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10795 		bnxt_queue_sp_work(bp);
10796 	}
10797 
10798 #ifdef CONFIG_RFS_ACCEL
10799 	if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
10800 		set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
10801 		bnxt_queue_sp_work(bp);
10802 	}
10803 #endif /*CONFIG_RFS_ACCEL*/
10804 
10805 	if (bp->link_info.phy_retry) {
10806 		if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10807 			bp->link_info.phy_retry = false;
10808 			netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10809 		} else {
10810 			set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10811 			bnxt_queue_sp_work(bp);
10812 		}
10813 	}
10814 
10815 	if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
10816 	    netif_carrier_ok(dev)) {
10817 		set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10818 		bnxt_queue_sp_work(bp);
10819 	}
10820 bnxt_restart_timer:
10821 	mod_timer(&bp->timer, jiffies + bp->current_interval);
10822 }
10823 
bnxt_rtnl_lock_sp(struct bnxt * bp)10824 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10825 {
10826 	/* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10827 	 * set.  If the device is being closed, bnxt_close() may be holding
10828 	 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
10829 	 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10830 	 */
10831 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10832 	rtnl_lock();
10833 }
10834 
bnxt_rtnl_unlock_sp(struct bnxt * bp)10835 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10836 {
10837 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10838 	rtnl_unlock();
10839 }
10840 
10841 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)10842 static void bnxt_reset(struct bnxt *bp, bool silent)
10843 {
10844 	bnxt_rtnl_lock_sp(bp);
10845 	if (test_bit(BNXT_STATE_OPEN, &bp->state))
10846 		bnxt_reset_task(bp, silent);
10847 	bnxt_rtnl_unlock_sp(bp);
10848 }
10849 
10850 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)10851 static void bnxt_rx_ring_reset(struct bnxt *bp)
10852 {
10853 	int i;
10854 
10855 	bnxt_rtnl_lock_sp(bp);
10856 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10857 		bnxt_rtnl_unlock_sp(bp);
10858 		return;
10859 	}
10860 	/* Disable and flush TPA before resetting the RX ring */
10861 	if (bp->flags & BNXT_FLAG_TPA)
10862 		bnxt_set_tpa(bp, false);
10863 	for (i = 0; i < bp->rx_nr_rings; i++) {
10864 		struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
10865 		struct bnxt_cp_ring_info *cpr;
10866 		int rc;
10867 
10868 		if (!rxr->bnapi->in_reset)
10869 			continue;
10870 
10871 		rc = bnxt_hwrm_rx_ring_reset(bp, i);
10872 		if (rc) {
10873 			if (rc == -EINVAL || rc == -EOPNOTSUPP)
10874 				netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
10875 			else
10876 				netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
10877 					    rc);
10878 			bnxt_reset_task(bp, true);
10879 			break;
10880 		}
10881 		bnxt_free_one_rx_ring_skbs(bp, i);
10882 		rxr->rx_prod = 0;
10883 		rxr->rx_agg_prod = 0;
10884 		rxr->rx_sw_agg_prod = 0;
10885 		rxr->rx_next_cons = 0;
10886 		rxr->bnapi->in_reset = false;
10887 		bnxt_alloc_one_rx_ring(bp, i);
10888 		cpr = &rxr->bnapi->cp_ring;
10889 		cpr->sw_stats.rx.rx_resets++;
10890 		if (bp->flags & BNXT_FLAG_AGG_RINGS)
10891 			bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
10892 		bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
10893 	}
10894 	if (bp->flags & BNXT_FLAG_TPA)
10895 		bnxt_set_tpa(bp, true);
10896 	bnxt_rtnl_unlock_sp(bp);
10897 }
10898 
bnxt_fw_reset_close(struct bnxt * bp)10899 static void bnxt_fw_reset_close(struct bnxt *bp)
10900 {
10901 	bnxt_ulp_stop(bp);
10902 	/* When firmware is fatal state, disable PCI device to prevent
10903 	 * any potential bad DMAs before freeing kernel memory.
10904 	 */
10905 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10906 		pci_disable_device(bp->pdev);
10907 	__bnxt_close_nic(bp, true, false);
10908 	bnxt_clear_int_mode(bp);
10909 	bnxt_hwrm_func_drv_unrgtr(bp);
10910 	if (pci_is_enabled(bp->pdev))
10911 		pci_disable_device(bp->pdev);
10912 	bnxt_free_ctx_mem(bp);
10913 	kfree(bp->ctx);
10914 	bp->ctx = NULL;
10915 }
10916 
is_bnxt_fw_ok(struct bnxt * bp)10917 static bool is_bnxt_fw_ok(struct bnxt *bp)
10918 {
10919 	struct bnxt_fw_health *fw_health = bp->fw_health;
10920 	bool no_heartbeat = false, has_reset = false;
10921 	u32 val;
10922 
10923 	val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10924 	if (val == fw_health->last_fw_heartbeat)
10925 		no_heartbeat = true;
10926 
10927 	val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10928 	if (val != fw_health->last_fw_reset_cnt)
10929 		has_reset = true;
10930 
10931 	if (!no_heartbeat && has_reset)
10932 		return true;
10933 
10934 	return false;
10935 }
10936 
10937 /* rtnl_lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)10938 static void bnxt_force_fw_reset(struct bnxt *bp)
10939 {
10940 	struct bnxt_fw_health *fw_health = bp->fw_health;
10941 	u32 wait_dsecs;
10942 
10943 	if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10944 	    test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10945 		return;
10946 
10947 	set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10948 	bnxt_fw_reset_close(bp);
10949 	wait_dsecs = fw_health->master_func_wait_dsecs;
10950 	if (fw_health->master) {
10951 		if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10952 			wait_dsecs = 0;
10953 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10954 	} else {
10955 		bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10956 		wait_dsecs = fw_health->normal_func_wait_dsecs;
10957 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10958 	}
10959 
10960 	bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10961 	bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10962 	bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10963 }
10964 
bnxt_fw_exception(struct bnxt * bp)10965 void bnxt_fw_exception(struct bnxt *bp)
10966 {
10967 	netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10968 	set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10969 	bnxt_rtnl_lock_sp(bp);
10970 	bnxt_force_fw_reset(bp);
10971 	bnxt_rtnl_unlock_sp(bp);
10972 }
10973 
10974 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10975  * < 0 on error.
10976  */
bnxt_get_registered_vfs(struct bnxt * bp)10977 static int bnxt_get_registered_vfs(struct bnxt *bp)
10978 {
10979 #ifdef CONFIG_BNXT_SRIOV
10980 	int rc;
10981 
10982 	if (!BNXT_PF(bp))
10983 		return 0;
10984 
10985 	rc = bnxt_hwrm_func_qcfg(bp);
10986 	if (rc) {
10987 		netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10988 		return rc;
10989 	}
10990 	if (bp->pf.registered_vfs)
10991 		return bp->pf.registered_vfs;
10992 	if (bp->sriov_cfg)
10993 		return 1;
10994 #endif
10995 	return 0;
10996 }
10997 
bnxt_fw_reset(struct bnxt * bp)10998 void bnxt_fw_reset(struct bnxt *bp)
10999 {
11000 	bnxt_rtnl_lock_sp(bp);
11001 	if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11002 	    !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11003 		int n = 0, tmo;
11004 
11005 		set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11006 		if (bp->pf.active_vfs &&
11007 		    !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11008 			n = bnxt_get_registered_vfs(bp);
11009 		if (n < 0) {
11010 			netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11011 				   n);
11012 			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11013 			dev_close(bp->dev);
11014 			goto fw_reset_exit;
11015 		} else if (n > 0) {
11016 			u16 vf_tmo_dsecs = n * 10;
11017 
11018 			if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11019 				bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11020 			bp->fw_reset_state =
11021 				BNXT_FW_RESET_STATE_POLL_VF;
11022 			bnxt_queue_fw_reset_work(bp, HZ / 10);
11023 			goto fw_reset_exit;
11024 		}
11025 		bnxt_fw_reset_close(bp);
11026 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11027 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11028 			tmo = HZ / 10;
11029 		} else {
11030 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11031 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
11032 		}
11033 		bnxt_queue_fw_reset_work(bp, tmo);
11034 	}
11035 fw_reset_exit:
11036 	bnxt_rtnl_unlock_sp(bp);
11037 }
11038 
bnxt_chk_missed_irq(struct bnxt * bp)11039 static void bnxt_chk_missed_irq(struct bnxt *bp)
11040 {
11041 	int i;
11042 
11043 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11044 		return;
11045 
11046 	for (i = 0; i < bp->cp_nr_rings; i++) {
11047 		struct bnxt_napi *bnapi = bp->bnapi[i];
11048 		struct bnxt_cp_ring_info *cpr;
11049 		u32 fw_ring_id;
11050 		int j;
11051 
11052 		if (!bnapi)
11053 			continue;
11054 
11055 		cpr = &bnapi->cp_ring;
11056 		for (j = 0; j < 2; j++) {
11057 			struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11058 			u32 val[2];
11059 
11060 			if (!cpr2 || cpr2->has_more_work ||
11061 			    !bnxt_has_work(bp, cpr2))
11062 				continue;
11063 
11064 			if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11065 				cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11066 				continue;
11067 			}
11068 			fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11069 			bnxt_dbg_hwrm_ring_info_get(bp,
11070 				DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11071 				fw_ring_id, &val[0], &val[1]);
11072 			cpr->sw_stats.cmn.missed_irqs++;
11073 		}
11074 	}
11075 }
11076 
11077 static void bnxt_cfg_ntp_filters(struct bnxt *);
11078 
bnxt_init_ethtool_link_settings(struct bnxt * bp)11079 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11080 {
11081 	struct bnxt_link_info *link_info = &bp->link_info;
11082 
11083 	if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11084 		link_info->autoneg = BNXT_AUTONEG_SPEED;
11085 		if (bp->hwrm_spec_code >= 0x10201) {
11086 			if (link_info->auto_pause_setting &
11087 			    PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11088 				link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11089 		} else {
11090 			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11091 		}
11092 		link_info->advertising = link_info->auto_link_speeds;
11093 		link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11094 	} else {
11095 		link_info->req_link_speed = link_info->force_link_speed;
11096 		link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11097 		if (link_info->force_pam4_link_speed) {
11098 			link_info->req_link_speed =
11099 				link_info->force_pam4_link_speed;
11100 			link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11101 		}
11102 		link_info->req_duplex = link_info->duplex_setting;
11103 	}
11104 	if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11105 		link_info->req_flow_ctrl =
11106 			link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11107 	else
11108 		link_info->req_flow_ctrl = link_info->force_pause_setting;
11109 }
11110 
bnxt_sp_task(struct work_struct * work)11111 static void bnxt_sp_task(struct work_struct *work)
11112 {
11113 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11114 
11115 	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11116 	smp_mb__after_atomic();
11117 	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11118 		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11119 		return;
11120 	}
11121 
11122 	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11123 		bnxt_cfg_rx_mode(bp);
11124 
11125 	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11126 		bnxt_cfg_ntp_filters(bp);
11127 	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11128 		bnxt_hwrm_exec_fwd_req(bp);
11129 	if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11130 		bnxt_hwrm_port_qstats(bp, 0);
11131 		bnxt_hwrm_port_qstats_ext(bp, 0);
11132 		bnxt_accumulate_all_stats(bp);
11133 	}
11134 
11135 	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
11136 		int rc;
11137 
11138 		mutex_lock(&bp->link_lock);
11139 		if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
11140 				       &bp->sp_event))
11141 			bnxt_hwrm_phy_qcaps(bp);
11142 
11143 		rc = bnxt_update_link(bp, true);
11144 		if (rc)
11145 			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
11146 				   rc);
11147 
11148 		if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
11149 				       &bp->sp_event))
11150 			bnxt_init_ethtool_link_settings(bp);
11151 		mutex_unlock(&bp->link_lock);
11152 	}
11153 	if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
11154 		int rc;
11155 
11156 		mutex_lock(&bp->link_lock);
11157 		rc = bnxt_update_phy_setting(bp);
11158 		mutex_unlock(&bp->link_lock);
11159 		if (rc) {
11160 			netdev_warn(bp->dev, "update phy settings retry failed\n");
11161 		} else {
11162 			bp->link_info.phy_retry = false;
11163 			netdev_info(bp->dev, "update phy settings retry succeeded\n");
11164 		}
11165 	}
11166 	if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
11167 		mutex_lock(&bp->link_lock);
11168 		bnxt_get_port_module_status(bp);
11169 		mutex_unlock(&bp->link_lock);
11170 	}
11171 
11172 	if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
11173 		bnxt_tc_flow_stats_work(bp);
11174 
11175 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
11176 		bnxt_chk_missed_irq(bp);
11177 
11178 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
11179 	 * must be the last functions to be called before exiting.
11180 	 */
11181 	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
11182 		bnxt_reset(bp, false);
11183 
11184 	if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
11185 		bnxt_reset(bp, true);
11186 
11187 	if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
11188 		bnxt_rx_ring_reset(bp);
11189 
11190 	if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
11191 		bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
11192 
11193 	if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
11194 		if (!is_bnxt_fw_ok(bp))
11195 			bnxt_devlink_health_report(bp,
11196 						   BNXT_FW_EXCEPTION_SP_EVENT);
11197 	}
11198 
11199 	smp_mb__before_atomic();
11200 	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11201 }
11202 
11203 /* Under rtnl_lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)11204 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
11205 		     int tx_xdp)
11206 {
11207 	int max_rx, max_tx, tx_sets = 1;
11208 	int tx_rings_needed, stats;
11209 	int rx_rings = rx;
11210 	int cp, vnics, rc;
11211 
11212 	if (tcs)
11213 		tx_sets = tcs;
11214 
11215 	rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
11216 	if (rc)
11217 		return rc;
11218 
11219 	if (max_rx < rx)
11220 		return -ENOMEM;
11221 
11222 	tx_rings_needed = tx * tx_sets + tx_xdp;
11223 	if (max_tx < tx_rings_needed)
11224 		return -ENOMEM;
11225 
11226 	vnics = 1;
11227 	if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
11228 		vnics += rx_rings;
11229 
11230 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
11231 		rx_rings <<= 1;
11232 	cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
11233 	stats = cp;
11234 	if (BNXT_NEW_RM(bp)) {
11235 		cp += bnxt_get_ulp_msix_num(bp);
11236 		stats += bnxt_get_ulp_stat_ctxs(bp);
11237 	}
11238 	return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
11239 				     stats, vnics);
11240 }
11241 
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)11242 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
11243 {
11244 	if (bp->bar2) {
11245 		pci_iounmap(pdev, bp->bar2);
11246 		bp->bar2 = NULL;
11247 	}
11248 
11249 	if (bp->bar1) {
11250 		pci_iounmap(pdev, bp->bar1);
11251 		bp->bar1 = NULL;
11252 	}
11253 
11254 	if (bp->bar0) {
11255 		pci_iounmap(pdev, bp->bar0);
11256 		bp->bar0 = NULL;
11257 	}
11258 }
11259 
bnxt_cleanup_pci(struct bnxt * bp)11260 static void bnxt_cleanup_pci(struct bnxt *bp)
11261 {
11262 	bnxt_unmap_bars(bp, bp->pdev);
11263 	pci_release_regions(bp->pdev);
11264 	if (pci_is_enabled(bp->pdev))
11265 		pci_disable_device(bp->pdev);
11266 }
11267 
bnxt_init_dflt_coal(struct bnxt * bp)11268 static void bnxt_init_dflt_coal(struct bnxt *bp)
11269 {
11270 	struct bnxt_coal *coal;
11271 
11272 	/* Tick values in micro seconds.
11273 	 * 1 coal_buf x bufs_per_record = 1 completion record.
11274 	 */
11275 	coal = &bp->rx_coal;
11276 	coal->coal_ticks = 10;
11277 	coal->coal_bufs = 30;
11278 	coal->coal_ticks_irq = 1;
11279 	coal->coal_bufs_irq = 2;
11280 	coal->idle_thresh = 50;
11281 	coal->bufs_per_record = 2;
11282 	coal->budget = 64;		/* NAPI budget */
11283 
11284 	coal = &bp->tx_coal;
11285 	coal->coal_ticks = 28;
11286 	coal->coal_bufs = 30;
11287 	coal->coal_ticks_irq = 2;
11288 	coal->coal_bufs_irq = 2;
11289 	coal->bufs_per_record = 1;
11290 
11291 	bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
11292 }
11293 
bnxt_fw_reset_via_optee(struct bnxt * bp)11294 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
11295 {
11296 #ifdef CONFIG_TEE_BNXT_FW
11297 	int rc = tee_bnxt_fw_load();
11298 
11299 	if (rc)
11300 		netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
11301 
11302 	return rc;
11303 #else
11304 	netdev_err(bp->dev, "OP-TEE not supported\n");
11305 	return -ENODEV;
11306 #endif
11307 }
11308 
bnxt_fw_init_one_p1(struct bnxt * bp)11309 static int bnxt_fw_init_one_p1(struct bnxt *bp)
11310 {
11311 	int rc;
11312 
11313 	bp->fw_cap = 0;
11314 	rc = bnxt_hwrm_ver_get(bp);
11315 	bnxt_try_map_fw_health_reg(bp);
11316 	if (rc) {
11317 		if (bp->fw_health && bp->fw_health->status_reliable) {
11318 			u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11319 
11320 			netdev_err(bp->dev,
11321 				   "Firmware not responding, status: 0x%x\n",
11322 				   sts);
11323 			if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
11324 				netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
11325 				rc = bnxt_fw_reset_via_optee(bp);
11326 				if (!rc)
11327 					rc = bnxt_hwrm_ver_get(bp);
11328 			}
11329 		}
11330 		if (rc)
11331 			return rc;
11332 	}
11333 
11334 	if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
11335 		rc = bnxt_alloc_kong_hwrm_resources(bp);
11336 		if (rc)
11337 			bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
11338 	}
11339 
11340 	if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
11341 	    bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
11342 		rc = bnxt_alloc_hwrm_short_cmd_req(bp);
11343 		if (rc)
11344 			return rc;
11345 	}
11346 	bnxt_nvm_cfg_ver_get(bp);
11347 
11348 	rc = bnxt_hwrm_func_reset(bp);
11349 	if (rc)
11350 		return -ENODEV;
11351 
11352 	bnxt_hwrm_fw_set_time(bp);
11353 	return 0;
11354 }
11355 
bnxt_fw_init_one_p2(struct bnxt * bp)11356 static int bnxt_fw_init_one_p2(struct bnxt *bp)
11357 {
11358 	int rc;
11359 
11360 	/* Get the MAX capabilities for this function */
11361 	rc = bnxt_hwrm_func_qcaps(bp);
11362 	if (rc) {
11363 		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
11364 			   rc);
11365 		return -ENODEV;
11366 	}
11367 
11368 	rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
11369 	if (rc)
11370 		netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
11371 			    rc);
11372 
11373 	if (bnxt_alloc_fw_health(bp)) {
11374 		netdev_warn(bp->dev, "no memory for firmware error recovery\n");
11375 	} else {
11376 		rc = bnxt_hwrm_error_recovery_qcfg(bp);
11377 		if (rc)
11378 			netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
11379 				    rc);
11380 	}
11381 
11382 	rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
11383 	if (rc)
11384 		return -ENODEV;
11385 
11386 	bnxt_hwrm_func_qcfg(bp);
11387 	bnxt_hwrm_vnic_qcaps(bp);
11388 	bnxt_hwrm_port_led_qcaps(bp);
11389 	bnxt_ethtool_init(bp);
11390 	bnxt_dcb_init(bp);
11391 	return 0;
11392 }
11393 
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)11394 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
11395 {
11396 	bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
11397 	bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
11398 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
11399 			   VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
11400 			   VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
11401 	if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
11402 		bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
11403 		bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
11404 				    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
11405 	}
11406 }
11407 
bnxt_set_dflt_rfs(struct bnxt * bp)11408 static void bnxt_set_dflt_rfs(struct bnxt *bp)
11409 {
11410 	struct net_device *dev = bp->dev;
11411 
11412 	dev->hw_features &= ~NETIF_F_NTUPLE;
11413 	dev->features &= ~NETIF_F_NTUPLE;
11414 	bp->flags &= ~BNXT_FLAG_RFS;
11415 	if (bnxt_rfs_supported(bp)) {
11416 		dev->hw_features |= NETIF_F_NTUPLE;
11417 		if (bnxt_rfs_capable(bp)) {
11418 			bp->flags |= BNXT_FLAG_RFS;
11419 			dev->features |= NETIF_F_NTUPLE;
11420 		}
11421 	}
11422 }
11423 
bnxt_fw_init_one_p3(struct bnxt * bp)11424 static void bnxt_fw_init_one_p3(struct bnxt *bp)
11425 {
11426 	struct pci_dev *pdev = bp->pdev;
11427 
11428 	bnxt_set_dflt_rss_hash_type(bp);
11429 	bnxt_set_dflt_rfs(bp);
11430 
11431 	bnxt_get_wol_settings(bp);
11432 	if (bp->flags & BNXT_FLAG_WOL_CAP)
11433 		device_set_wakeup_enable(&pdev->dev, bp->wol);
11434 	else
11435 		device_set_wakeup_capable(&pdev->dev, false);
11436 
11437 	bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
11438 	bnxt_hwrm_coal_params_qcaps(bp);
11439 }
11440 
11441 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
11442 
bnxt_fw_init_one(struct bnxt * bp)11443 static int bnxt_fw_init_one(struct bnxt *bp)
11444 {
11445 	int rc;
11446 
11447 	rc = bnxt_fw_init_one_p1(bp);
11448 	if (rc) {
11449 		netdev_err(bp->dev, "Firmware init phase 1 failed\n");
11450 		return rc;
11451 	}
11452 	rc = bnxt_fw_init_one_p2(bp);
11453 	if (rc) {
11454 		netdev_err(bp->dev, "Firmware init phase 2 failed\n");
11455 		return rc;
11456 	}
11457 	rc = bnxt_probe_phy(bp, false);
11458 	if (rc)
11459 		return rc;
11460 	rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
11461 	if (rc)
11462 		return rc;
11463 
11464 	/* In case fw capabilities have changed, destroy the unneeded
11465 	 * reporters and create newly capable ones.
11466 	 */
11467 	bnxt_dl_fw_reporters_destroy(bp, false);
11468 	bnxt_dl_fw_reporters_create(bp);
11469 	bnxt_fw_init_one_p3(bp);
11470 	return 0;
11471 }
11472 
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)11473 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
11474 {
11475 	struct bnxt_fw_health *fw_health = bp->fw_health;
11476 	u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
11477 	u32 val = fw_health->fw_reset_seq_vals[reg_idx];
11478 	u32 reg_type, reg_off, delay_msecs;
11479 
11480 	delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
11481 	reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
11482 	reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
11483 	switch (reg_type) {
11484 	case BNXT_FW_HEALTH_REG_TYPE_CFG:
11485 		pci_write_config_dword(bp->pdev, reg_off, val);
11486 		break;
11487 	case BNXT_FW_HEALTH_REG_TYPE_GRC:
11488 		writel(reg_off & BNXT_GRC_BASE_MASK,
11489 		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
11490 		reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
11491 		fallthrough;
11492 	case BNXT_FW_HEALTH_REG_TYPE_BAR0:
11493 		writel(val, bp->bar0 + reg_off);
11494 		break;
11495 	case BNXT_FW_HEALTH_REG_TYPE_BAR1:
11496 		writel(val, bp->bar1 + reg_off);
11497 		break;
11498 	}
11499 	if (delay_msecs) {
11500 		pci_read_config_dword(bp->pdev, 0, &val);
11501 		msleep(delay_msecs);
11502 	}
11503 }
11504 
bnxt_reset_all(struct bnxt * bp)11505 static void bnxt_reset_all(struct bnxt *bp)
11506 {
11507 	struct bnxt_fw_health *fw_health = bp->fw_health;
11508 	int i, rc;
11509 
11510 	if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11511 		bnxt_fw_reset_via_optee(bp);
11512 		bp->fw_reset_timestamp = jiffies;
11513 		return;
11514 	}
11515 
11516 	if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
11517 		for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
11518 			bnxt_fw_reset_writel(bp, i);
11519 	} else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
11520 		struct hwrm_fw_reset_input req = {0};
11521 
11522 		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
11523 		req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
11524 		req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
11525 		req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
11526 		req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
11527 		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
11528 		if (rc)
11529 			netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
11530 	}
11531 	bp->fw_reset_timestamp = jiffies;
11532 }
11533 
bnxt_fw_reset_task(struct work_struct * work)11534 static void bnxt_fw_reset_task(struct work_struct *work)
11535 {
11536 	struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
11537 	int rc;
11538 
11539 	if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11540 		netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
11541 		return;
11542 	}
11543 
11544 	switch (bp->fw_reset_state) {
11545 	case BNXT_FW_RESET_STATE_POLL_VF: {
11546 		int n = bnxt_get_registered_vfs(bp);
11547 		int tmo;
11548 
11549 		if (n < 0) {
11550 			netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
11551 				   n, jiffies_to_msecs(jiffies -
11552 				   bp->fw_reset_timestamp));
11553 			goto fw_reset_abort;
11554 		} else if (n > 0) {
11555 			if (time_after(jiffies, bp->fw_reset_timestamp +
11556 				       (bp->fw_reset_max_dsecs * HZ / 10))) {
11557 				clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11558 				bp->fw_reset_state = 0;
11559 				netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
11560 					   n);
11561 				return;
11562 			}
11563 			bnxt_queue_fw_reset_work(bp, HZ / 10);
11564 			return;
11565 		}
11566 		bp->fw_reset_timestamp = jiffies;
11567 		rtnl_lock();
11568 		if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
11569 			rtnl_unlock();
11570 			goto fw_reset_abort;
11571 		}
11572 		bnxt_fw_reset_close(bp);
11573 		if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11574 			bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11575 			tmo = HZ / 10;
11576 		} else {
11577 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11578 			tmo = bp->fw_reset_min_dsecs * HZ / 10;
11579 		}
11580 		rtnl_unlock();
11581 		bnxt_queue_fw_reset_work(bp, tmo);
11582 		return;
11583 	}
11584 	case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
11585 		u32 val;
11586 
11587 		val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11588 		if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
11589 		    !time_after(jiffies, bp->fw_reset_timestamp +
11590 		    (bp->fw_reset_max_dsecs * HZ / 10))) {
11591 			bnxt_queue_fw_reset_work(bp, HZ / 5);
11592 			return;
11593 		}
11594 
11595 		if (!bp->fw_health->master) {
11596 			u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
11597 
11598 			bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11599 			bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11600 			return;
11601 		}
11602 		bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11603 	}
11604 		fallthrough;
11605 	case BNXT_FW_RESET_STATE_RESET_FW:
11606 		bnxt_reset_all(bp);
11607 		bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11608 		bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
11609 		return;
11610 	case BNXT_FW_RESET_STATE_ENABLE_DEV:
11611 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11612 			u32 val;
11613 
11614 			val = bnxt_fw_health_readl(bp,
11615 						   BNXT_FW_RESET_INPROG_REG);
11616 			if (val)
11617 				netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
11618 					    val);
11619 		}
11620 		clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11621 		if (pci_enable_device(bp->pdev)) {
11622 			netdev_err(bp->dev, "Cannot re-enable PCI device\n");
11623 			goto fw_reset_abort;
11624 		}
11625 		pci_set_master(bp->pdev);
11626 		bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
11627 		fallthrough;
11628 	case BNXT_FW_RESET_STATE_POLL_FW:
11629 		bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
11630 		rc = __bnxt_hwrm_ver_get(bp, true);
11631 		if (rc) {
11632 			if (time_after(jiffies, bp->fw_reset_timestamp +
11633 				       (bp->fw_reset_max_dsecs * HZ / 10))) {
11634 				netdev_err(bp->dev, "Firmware reset aborted\n");
11635 				goto fw_reset_abort_status;
11636 			}
11637 			bnxt_queue_fw_reset_work(bp, HZ / 5);
11638 			return;
11639 		}
11640 		bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
11641 		bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
11642 		fallthrough;
11643 	case BNXT_FW_RESET_STATE_OPENING:
11644 		while (!rtnl_trylock()) {
11645 			bnxt_queue_fw_reset_work(bp, HZ / 10);
11646 			return;
11647 		}
11648 		rc = bnxt_open(bp->dev);
11649 		if (rc) {
11650 			netdev_err(bp->dev, "bnxt_open_nic() failed\n");
11651 			clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11652 			dev_close(bp->dev);
11653 		}
11654 
11655 		if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
11656 		    bp->fw_health->enabled) {
11657 			bp->fw_health->last_fw_reset_cnt =
11658 				bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11659 		}
11660 		bp->fw_reset_state = 0;
11661 		/* Make sure fw_reset_state is 0 before clearing the flag */
11662 		smp_mb__before_atomic();
11663 		clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11664 		bnxt_ulp_start(bp, rc);
11665 		if (!rc)
11666 			bnxt_reenable_sriov(bp);
11667 		bnxt_dl_health_recovery_done(bp);
11668 		bnxt_dl_health_status_update(bp, true);
11669 		rtnl_unlock();
11670 		break;
11671 	}
11672 	return;
11673 
11674 fw_reset_abort_status:
11675 	if (bp->fw_health->status_reliable ||
11676 	    (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
11677 		u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
11678 
11679 		netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
11680 	}
11681 fw_reset_abort:
11682 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11683 	if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
11684 		bnxt_dl_health_status_update(bp, false);
11685 	bp->fw_reset_state = 0;
11686 	rtnl_lock();
11687 	dev_close(bp->dev);
11688 	rtnl_unlock();
11689 }
11690 
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)11691 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
11692 {
11693 	int rc;
11694 	struct bnxt *bp = netdev_priv(dev);
11695 
11696 	SET_NETDEV_DEV(dev, &pdev->dev);
11697 
11698 	/* enable device (incl. PCI PM wakeup), and bus-mastering */
11699 	rc = pci_enable_device(pdev);
11700 	if (rc) {
11701 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
11702 		goto init_err;
11703 	}
11704 
11705 	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11706 		dev_err(&pdev->dev,
11707 			"Cannot find PCI device base address, aborting\n");
11708 		rc = -ENODEV;
11709 		goto init_err_disable;
11710 	}
11711 
11712 	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11713 	if (rc) {
11714 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
11715 		goto init_err_disable;
11716 	}
11717 
11718 	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
11719 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11720 		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
11721 		rc = -EIO;
11722 		goto init_err_release;
11723 	}
11724 
11725 	pci_set_master(pdev);
11726 
11727 	bp->dev = dev;
11728 	bp->pdev = pdev;
11729 
11730 	/* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
11731 	 * determines the BAR size.
11732 	 */
11733 	bp->bar0 = pci_ioremap_bar(pdev, 0);
11734 	if (!bp->bar0) {
11735 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
11736 		rc = -ENOMEM;
11737 		goto init_err_release;
11738 	}
11739 
11740 	bp->bar2 = pci_ioremap_bar(pdev, 4);
11741 	if (!bp->bar2) {
11742 		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
11743 		rc = -ENOMEM;
11744 		goto init_err_release;
11745 	}
11746 
11747 	pci_enable_pcie_error_reporting(pdev);
11748 
11749 	INIT_WORK(&bp->sp_task, bnxt_sp_task);
11750 	INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
11751 
11752 	spin_lock_init(&bp->ntp_fltr_lock);
11753 #if BITS_PER_LONG == 32
11754 	spin_lock_init(&bp->db_lock);
11755 #endif
11756 
11757 	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
11758 	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
11759 
11760 	bnxt_init_dflt_coal(bp);
11761 
11762 	timer_setup(&bp->timer, bnxt_timer, 0);
11763 	bp->current_interval = BNXT_TIMER_INTERVAL;
11764 
11765 	bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
11766 	bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
11767 
11768 	clear_bit(BNXT_STATE_OPEN, &bp->state);
11769 	return 0;
11770 
11771 init_err_release:
11772 	bnxt_unmap_bars(bp, pdev);
11773 	pci_release_regions(pdev);
11774 
11775 init_err_disable:
11776 	pci_disable_device(pdev);
11777 
11778 init_err:
11779 	return rc;
11780 }
11781 
11782 /* rtnl_lock held */
bnxt_change_mac_addr(struct net_device * dev,void * p)11783 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
11784 {
11785 	struct sockaddr *addr = p;
11786 	struct bnxt *bp = netdev_priv(dev);
11787 	int rc = 0;
11788 
11789 	if (!is_valid_ether_addr(addr->sa_data))
11790 		return -EADDRNOTAVAIL;
11791 
11792 	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
11793 		return 0;
11794 
11795 	rc = bnxt_approve_mac(bp, addr->sa_data, true);
11796 	if (rc)
11797 		return rc;
11798 
11799 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11800 	if (netif_running(dev)) {
11801 		bnxt_close_nic(bp, false, false);
11802 		rc = bnxt_open_nic(bp, false, false);
11803 	}
11804 
11805 	return rc;
11806 }
11807 
11808 /* rtnl_lock held */
bnxt_change_mtu(struct net_device * dev,int new_mtu)11809 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
11810 {
11811 	struct bnxt *bp = netdev_priv(dev);
11812 
11813 	if (netif_running(dev))
11814 		bnxt_close_nic(bp, true, false);
11815 
11816 	dev->mtu = new_mtu;
11817 	bnxt_set_ring_params(bp);
11818 
11819 	if (netif_running(dev))
11820 		return bnxt_open_nic(bp, true, false);
11821 
11822 	return 0;
11823 }
11824 
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)11825 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
11826 {
11827 	struct bnxt *bp = netdev_priv(dev);
11828 	bool sh = false;
11829 	int rc;
11830 
11831 	if (tc > bp->max_tc) {
11832 		netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
11833 			   tc, bp->max_tc);
11834 		return -EINVAL;
11835 	}
11836 
11837 	if (netdev_get_num_tc(dev) == tc)
11838 		return 0;
11839 
11840 	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
11841 		sh = true;
11842 
11843 	rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
11844 			      sh, tc, bp->tx_nr_rings_xdp);
11845 	if (rc)
11846 		return rc;
11847 
11848 	/* Needs to close the device and do hw resource re-allocations */
11849 	if (netif_running(bp->dev))
11850 		bnxt_close_nic(bp, true, false);
11851 
11852 	if (tc) {
11853 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11854 		netdev_set_num_tc(dev, tc);
11855 	} else {
11856 		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11857 		netdev_reset_tc(dev);
11858 	}
11859 	bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11860 	bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11861 			       bp->tx_nr_rings + bp->rx_nr_rings;
11862 
11863 	if (netif_running(bp->dev))
11864 		return bnxt_open_nic(bp, true, false);
11865 
11866 	return 0;
11867 }
11868 
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)11869 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11870 				  void *cb_priv)
11871 {
11872 	struct bnxt *bp = cb_priv;
11873 
11874 	if (!bnxt_tc_flower_enabled(bp) ||
11875 	    !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11876 		return -EOPNOTSUPP;
11877 
11878 	switch (type) {
11879 	case TC_SETUP_CLSFLOWER:
11880 		return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11881 	default:
11882 		return -EOPNOTSUPP;
11883 	}
11884 }
11885 
11886 LIST_HEAD(bnxt_block_cb_list);
11887 
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)11888 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11889 			 void *type_data)
11890 {
11891 	struct bnxt *bp = netdev_priv(dev);
11892 
11893 	switch (type) {
11894 	case TC_SETUP_BLOCK:
11895 		return flow_block_cb_setup_simple(type_data,
11896 						  &bnxt_block_cb_list,
11897 						  bnxt_setup_tc_block_cb,
11898 						  bp, bp, true);
11899 	case TC_SETUP_QDISC_MQPRIO: {
11900 		struct tc_mqprio_qopt *mqprio = type_data;
11901 
11902 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
11903 
11904 		return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11905 	}
11906 	default:
11907 		return -EOPNOTSUPP;
11908 	}
11909 }
11910 
11911 #ifdef CONFIG_RFS_ACCEL
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)11912 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11913 			    struct bnxt_ntuple_filter *f2)
11914 {
11915 	struct flow_keys *keys1 = &f1->fkeys;
11916 	struct flow_keys *keys2 = &f2->fkeys;
11917 
11918 	if (keys1->basic.n_proto != keys2->basic.n_proto ||
11919 	    keys1->basic.ip_proto != keys2->basic.ip_proto)
11920 		return false;
11921 
11922 	if (keys1->basic.n_proto == htons(ETH_P_IP)) {
11923 		if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
11924 		    keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
11925 			return false;
11926 	} else {
11927 		if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
11928 			   sizeof(keys1->addrs.v6addrs.src)) ||
11929 		    memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
11930 			   sizeof(keys1->addrs.v6addrs.dst)))
11931 			return false;
11932 	}
11933 
11934 	if (keys1->ports.ports == keys2->ports.ports &&
11935 	    keys1->control.flags == keys2->control.flags &&
11936 	    ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11937 	    ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11938 		return true;
11939 
11940 	return false;
11941 }
11942 
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)11943 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11944 			      u16 rxq_index, u32 flow_id)
11945 {
11946 	struct bnxt *bp = netdev_priv(dev);
11947 	struct bnxt_ntuple_filter *fltr, *new_fltr;
11948 	struct flow_keys *fkeys;
11949 	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11950 	int rc = 0, idx, bit_id, l2_idx = 0;
11951 	struct hlist_head *head;
11952 	u32 flags;
11953 
11954 	if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11955 		struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11956 		int off = 0, j;
11957 
11958 		netif_addr_lock_bh(dev);
11959 		for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11960 			if (ether_addr_equal(eth->h_dest,
11961 					     vnic->uc_list + off)) {
11962 				l2_idx = j + 1;
11963 				break;
11964 			}
11965 		}
11966 		netif_addr_unlock_bh(dev);
11967 		if (!l2_idx)
11968 			return -EINVAL;
11969 	}
11970 	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11971 	if (!new_fltr)
11972 		return -ENOMEM;
11973 
11974 	fkeys = &new_fltr->fkeys;
11975 	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11976 		rc = -EPROTONOSUPPORT;
11977 		goto err_free;
11978 	}
11979 
11980 	if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11981 	     fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11982 	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11983 	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11984 		rc = -EPROTONOSUPPORT;
11985 		goto err_free;
11986 	}
11987 	if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11988 	    bp->hwrm_spec_code < 0x10601) {
11989 		rc = -EPROTONOSUPPORT;
11990 		goto err_free;
11991 	}
11992 	flags = fkeys->control.flags;
11993 	if (((flags & FLOW_DIS_ENCAPSULATION) &&
11994 	     bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
11995 		rc = -EPROTONOSUPPORT;
11996 		goto err_free;
11997 	}
11998 
11999 	memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12000 	memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12001 
12002 	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12003 	head = &bp->ntp_fltr_hash_tbl[idx];
12004 	rcu_read_lock();
12005 	hlist_for_each_entry_rcu(fltr, head, hash) {
12006 		if (bnxt_fltr_match(fltr, new_fltr)) {
12007 			rcu_read_unlock();
12008 			rc = 0;
12009 			goto err_free;
12010 		}
12011 	}
12012 	rcu_read_unlock();
12013 
12014 	spin_lock_bh(&bp->ntp_fltr_lock);
12015 	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12016 					 BNXT_NTP_FLTR_MAX_FLTR, 0);
12017 	if (bit_id < 0) {
12018 		spin_unlock_bh(&bp->ntp_fltr_lock);
12019 		rc = -ENOMEM;
12020 		goto err_free;
12021 	}
12022 
12023 	new_fltr->sw_id = (u16)bit_id;
12024 	new_fltr->flow_id = flow_id;
12025 	new_fltr->l2_fltr_idx = l2_idx;
12026 	new_fltr->rxq = rxq_index;
12027 	hlist_add_head_rcu(&new_fltr->hash, head);
12028 	bp->ntp_fltr_count++;
12029 	spin_unlock_bh(&bp->ntp_fltr_lock);
12030 
12031 	set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12032 	bnxt_queue_sp_work(bp);
12033 
12034 	return new_fltr->sw_id;
12035 
12036 err_free:
12037 	kfree(new_fltr);
12038 	return rc;
12039 }
12040 
bnxt_cfg_ntp_filters(struct bnxt * bp)12041 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12042 {
12043 	int i;
12044 
12045 	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12046 		struct hlist_head *head;
12047 		struct hlist_node *tmp;
12048 		struct bnxt_ntuple_filter *fltr;
12049 		int rc;
12050 
12051 		head = &bp->ntp_fltr_hash_tbl[i];
12052 		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12053 			bool del = false;
12054 
12055 			if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12056 				if (rps_may_expire_flow(bp->dev, fltr->rxq,
12057 							fltr->flow_id,
12058 							fltr->sw_id)) {
12059 					bnxt_hwrm_cfa_ntuple_filter_free(bp,
12060 									 fltr);
12061 					del = true;
12062 				}
12063 			} else {
12064 				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12065 								       fltr);
12066 				if (rc)
12067 					del = true;
12068 				else
12069 					set_bit(BNXT_FLTR_VALID, &fltr->state);
12070 			}
12071 
12072 			if (del) {
12073 				spin_lock_bh(&bp->ntp_fltr_lock);
12074 				hlist_del_rcu(&fltr->hash);
12075 				bp->ntp_fltr_count--;
12076 				spin_unlock_bh(&bp->ntp_fltr_lock);
12077 				synchronize_rcu();
12078 				clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12079 				kfree(fltr);
12080 			}
12081 		}
12082 	}
12083 	if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12084 		netdev_info(bp->dev, "Receive PF driver unload event!\n");
12085 }
12086 
12087 #else
12088 
bnxt_cfg_ntp_filters(struct bnxt * bp)12089 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12090 {
12091 }
12092 
12093 #endif /* CONFIG_RFS_ACCEL */
12094 
bnxt_udp_tunnel_sync(struct net_device * netdev,unsigned int table)12095 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12096 {
12097 	struct bnxt *bp = netdev_priv(netdev);
12098 	struct udp_tunnel_info ti;
12099 	unsigned int cmd;
12100 
12101 	udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12102 	if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
12103 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12104 	else
12105 		cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12106 
12107 	if (ti.port)
12108 		return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12109 
12110 	return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
12111 }
12112 
12113 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
12114 	.sync_table	= bnxt_udp_tunnel_sync,
12115 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
12116 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
12117 	.tables		= {
12118 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
12119 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
12120 	},
12121 };
12122 
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)12123 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
12124 			       struct net_device *dev, u32 filter_mask,
12125 			       int nlflags)
12126 {
12127 	struct bnxt *bp = netdev_priv(dev);
12128 
12129 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
12130 				       nlflags, filter_mask, NULL);
12131 }
12132 
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)12133 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
12134 			       u16 flags, struct netlink_ext_ack *extack)
12135 {
12136 	struct bnxt *bp = netdev_priv(dev);
12137 	struct nlattr *attr, *br_spec;
12138 	int rem, rc = 0;
12139 
12140 	if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
12141 		return -EOPNOTSUPP;
12142 
12143 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
12144 	if (!br_spec)
12145 		return -EINVAL;
12146 
12147 	nla_for_each_nested(attr, br_spec, rem) {
12148 		u16 mode;
12149 
12150 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
12151 			continue;
12152 
12153 		if (nla_len(attr) < sizeof(mode))
12154 			return -EINVAL;
12155 
12156 		mode = nla_get_u16(attr);
12157 		if (mode == bp->br_mode)
12158 			break;
12159 
12160 		rc = bnxt_hwrm_set_br_mode(bp, mode);
12161 		if (!rc)
12162 			bp->br_mode = mode;
12163 		break;
12164 	}
12165 	return rc;
12166 }
12167 
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)12168 int bnxt_get_port_parent_id(struct net_device *dev,
12169 			    struct netdev_phys_item_id *ppid)
12170 {
12171 	struct bnxt *bp = netdev_priv(dev);
12172 
12173 	if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
12174 		return -EOPNOTSUPP;
12175 
12176 	/* The PF and it's VF-reps only support the switchdev framework */
12177 	if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
12178 		return -EOPNOTSUPP;
12179 
12180 	ppid->id_len = sizeof(bp->dsn);
12181 	memcpy(ppid->id, bp->dsn, ppid->id_len);
12182 
12183 	return 0;
12184 }
12185 
bnxt_get_devlink_port(struct net_device * dev)12186 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
12187 {
12188 	struct bnxt *bp = netdev_priv(dev);
12189 
12190 	return &bp->dl_port;
12191 }
12192 
12193 static const struct net_device_ops bnxt_netdev_ops = {
12194 	.ndo_open		= bnxt_open,
12195 	.ndo_start_xmit		= bnxt_start_xmit,
12196 	.ndo_stop		= bnxt_close,
12197 	.ndo_get_stats64	= bnxt_get_stats64,
12198 	.ndo_set_rx_mode	= bnxt_set_rx_mode,
12199 	.ndo_do_ioctl		= bnxt_ioctl,
12200 	.ndo_validate_addr	= eth_validate_addr,
12201 	.ndo_set_mac_address	= bnxt_change_mac_addr,
12202 	.ndo_change_mtu		= bnxt_change_mtu,
12203 	.ndo_fix_features	= bnxt_fix_features,
12204 	.ndo_set_features	= bnxt_set_features,
12205 	.ndo_tx_timeout		= bnxt_tx_timeout,
12206 #ifdef CONFIG_BNXT_SRIOV
12207 	.ndo_get_vf_config	= bnxt_get_vf_config,
12208 	.ndo_set_vf_mac		= bnxt_set_vf_mac,
12209 	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
12210 	.ndo_set_vf_rate	= bnxt_set_vf_bw,
12211 	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
12212 	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
12213 	.ndo_set_vf_trust	= bnxt_set_vf_trust,
12214 #endif
12215 	.ndo_setup_tc           = bnxt_setup_tc,
12216 #ifdef CONFIG_RFS_ACCEL
12217 	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
12218 #endif
12219 	.ndo_udp_tunnel_add	= udp_tunnel_nic_add_port,
12220 	.ndo_udp_tunnel_del	= udp_tunnel_nic_del_port,
12221 	.ndo_bpf		= bnxt_xdp,
12222 	.ndo_xdp_xmit		= bnxt_xdp_xmit,
12223 	.ndo_bridge_getlink	= bnxt_bridge_getlink,
12224 	.ndo_bridge_setlink	= bnxt_bridge_setlink,
12225 	.ndo_get_devlink_port	= bnxt_get_devlink_port,
12226 };
12227 
bnxt_remove_one(struct pci_dev * pdev)12228 static void bnxt_remove_one(struct pci_dev *pdev)
12229 {
12230 	struct net_device *dev = pci_get_drvdata(pdev);
12231 	struct bnxt *bp = netdev_priv(dev);
12232 
12233 	if (BNXT_PF(bp))
12234 		bnxt_sriov_disable(bp);
12235 
12236 	if (BNXT_PF(bp))
12237 		devlink_port_type_clear(&bp->dl_port);
12238 	pci_disable_pcie_error_reporting(pdev);
12239 	unregister_netdev(dev);
12240 	clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12241 	/* Flush any pending tasks */
12242 	cancel_work_sync(&bp->sp_task);
12243 	cancel_delayed_work_sync(&bp->fw_reset_task);
12244 	bp->sp_event = 0;
12245 
12246 	bnxt_dl_fw_reporters_destroy(bp, true);
12247 	bnxt_dl_unregister(bp);
12248 	bnxt_shutdown_tc(bp);
12249 
12250 	bnxt_clear_int_mode(bp);
12251 	bnxt_hwrm_func_drv_unrgtr(bp);
12252 	bnxt_free_hwrm_resources(bp);
12253 	bnxt_free_hwrm_short_cmd_req(bp);
12254 	bnxt_ethtool_free(bp);
12255 	bnxt_dcb_free(bp);
12256 	kfree(bp->edev);
12257 	bp->edev = NULL;
12258 	kfree(bp->fw_health);
12259 	bp->fw_health = NULL;
12260 	bnxt_cleanup_pci(bp);
12261 	bnxt_free_ctx_mem(bp);
12262 	kfree(bp->ctx);
12263 	bp->ctx = NULL;
12264 	kfree(bp->rss_indir_tbl);
12265 	bp->rss_indir_tbl = NULL;
12266 	bnxt_free_port_stats(bp);
12267 	free_netdev(dev);
12268 }
12269 
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)12270 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
12271 {
12272 	int rc = 0;
12273 	struct bnxt_link_info *link_info = &bp->link_info;
12274 
12275 	rc = bnxt_hwrm_phy_qcaps(bp);
12276 	if (rc) {
12277 		netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
12278 			   rc);
12279 		return rc;
12280 	}
12281 	if (!fw_dflt)
12282 		return 0;
12283 
12284 	rc = bnxt_update_link(bp, false);
12285 	if (rc) {
12286 		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
12287 			   rc);
12288 		return rc;
12289 	}
12290 
12291 	/* Older firmware does not have supported_auto_speeds, so assume
12292 	 * that all supported speeds can be autonegotiated.
12293 	 */
12294 	if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
12295 		link_info->support_auto_speeds = link_info->support_speeds;
12296 
12297 	bnxt_init_ethtool_link_settings(bp);
12298 	return 0;
12299 }
12300 
bnxt_get_max_irq(struct pci_dev * pdev)12301 static int bnxt_get_max_irq(struct pci_dev *pdev)
12302 {
12303 	u16 ctrl;
12304 
12305 	if (!pdev->msix_cap)
12306 		return 1;
12307 
12308 	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
12309 	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
12310 }
12311 
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)12312 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12313 				int *max_cp)
12314 {
12315 	struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
12316 	int max_ring_grps = 0, max_irq;
12317 
12318 	*max_tx = hw_resc->max_tx_rings;
12319 	*max_rx = hw_resc->max_rx_rings;
12320 	*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
12321 	max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
12322 			bnxt_get_ulp_msix_num(bp),
12323 			hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
12324 	if (!(bp->flags & BNXT_FLAG_CHIP_P5))
12325 		*max_cp = min_t(int, *max_cp, max_irq);
12326 	max_ring_grps = hw_resc->max_hw_ring_grps;
12327 	if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
12328 		*max_cp -= 1;
12329 		*max_rx -= 2;
12330 	}
12331 	if (bp->flags & BNXT_FLAG_AGG_RINGS)
12332 		*max_rx >>= 1;
12333 	if (bp->flags & BNXT_FLAG_CHIP_P5) {
12334 		bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
12335 		/* On P5 chips, max_cp output param should be available NQs */
12336 		*max_cp = max_irq;
12337 	}
12338 	*max_rx = min_t(int, *max_rx, max_ring_grps);
12339 }
12340 
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)12341 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
12342 {
12343 	int rx, tx, cp;
12344 
12345 	_bnxt_get_max_rings(bp, &rx, &tx, &cp);
12346 	*max_rx = rx;
12347 	*max_tx = tx;
12348 	if (!rx || !tx || !cp)
12349 		return -ENOMEM;
12350 
12351 	return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
12352 }
12353 
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)12354 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
12355 			       bool shared)
12356 {
12357 	int rc;
12358 
12359 	rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12360 	if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
12361 		/* Not enough rings, try disabling agg rings. */
12362 		bp->flags &= ~BNXT_FLAG_AGG_RINGS;
12363 		rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
12364 		if (rc) {
12365 			/* set BNXT_FLAG_AGG_RINGS back for consistency */
12366 			bp->flags |= BNXT_FLAG_AGG_RINGS;
12367 			return rc;
12368 		}
12369 		bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
12370 		bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12371 		bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12372 		bnxt_set_ring_params(bp);
12373 	}
12374 
12375 	if (bp->flags & BNXT_FLAG_ROCE_CAP) {
12376 		int max_cp, max_stat, max_irq;
12377 
12378 		/* Reserve minimum resources for RoCE */
12379 		max_cp = bnxt_get_max_func_cp_rings(bp);
12380 		max_stat = bnxt_get_max_func_stat_ctxs(bp);
12381 		max_irq = bnxt_get_max_func_irqs(bp);
12382 		if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
12383 		    max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
12384 		    max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
12385 			return 0;
12386 
12387 		max_cp -= BNXT_MIN_ROCE_CP_RINGS;
12388 		max_irq -= BNXT_MIN_ROCE_CP_RINGS;
12389 		max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
12390 		max_cp = min_t(int, max_cp, max_irq);
12391 		max_cp = min_t(int, max_cp, max_stat);
12392 		rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
12393 		if (rc)
12394 			rc = 0;
12395 	}
12396 	return rc;
12397 }
12398 
12399 /* In initial default shared ring setting, each shared ring must have a
12400  * RX/TX ring pair.
12401  */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)12402 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
12403 {
12404 	bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
12405 	bp->rx_nr_rings = bp->cp_nr_rings;
12406 	bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
12407 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12408 }
12409 
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)12410 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
12411 {
12412 	int dflt_rings, max_rx_rings, max_tx_rings, rc;
12413 
12414 	if (!bnxt_can_reserve_rings(bp))
12415 		return 0;
12416 
12417 	if (sh)
12418 		bp->flags |= BNXT_FLAG_SHARED_RINGS;
12419 	dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
12420 	/* Reduce default rings on multi-port cards so that total default
12421 	 * rings do not exceed CPU count.
12422 	 */
12423 	if (bp->port_count > 1) {
12424 		int max_rings =
12425 			max_t(int, num_online_cpus() / bp->port_count, 1);
12426 
12427 		dflt_rings = min_t(int, dflt_rings, max_rings);
12428 	}
12429 	rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
12430 	if (rc)
12431 		return rc;
12432 	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
12433 	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
12434 	if (sh)
12435 		bnxt_trim_dflt_sh_rings(bp);
12436 	else
12437 		bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
12438 	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12439 
12440 	rc = __bnxt_reserve_rings(bp);
12441 	if (rc)
12442 		netdev_warn(bp->dev, "Unable to reserve tx rings\n");
12443 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12444 	if (sh)
12445 		bnxt_trim_dflt_sh_rings(bp);
12446 
12447 	/* Rings may have been trimmed, re-reserve the trimmed rings. */
12448 	if (bnxt_need_reserve_rings(bp)) {
12449 		rc = __bnxt_reserve_rings(bp);
12450 		if (rc)
12451 			netdev_warn(bp->dev, "2nd rings reservation failed.\n");
12452 		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12453 	}
12454 	if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
12455 		bp->rx_nr_rings++;
12456 		bp->cp_nr_rings++;
12457 	}
12458 	if (rc) {
12459 		bp->tx_nr_rings = 0;
12460 		bp->rx_nr_rings = 0;
12461 	}
12462 	return rc;
12463 }
12464 
bnxt_init_dflt_ring_mode(struct bnxt * bp)12465 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
12466 {
12467 	int rc;
12468 
12469 	if (bp->tx_nr_rings)
12470 		return 0;
12471 
12472 	bnxt_ulp_irq_stop(bp);
12473 	bnxt_clear_int_mode(bp);
12474 	rc = bnxt_set_dflt_rings(bp, true);
12475 	if (rc) {
12476 		netdev_err(bp->dev, "Not enough rings available.\n");
12477 		goto init_dflt_ring_err;
12478 	}
12479 	rc = bnxt_init_int_mode(bp);
12480 	if (rc)
12481 		goto init_dflt_ring_err;
12482 
12483 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12484 	if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
12485 		bp->flags |= BNXT_FLAG_RFS;
12486 		bp->dev->features |= NETIF_F_NTUPLE;
12487 	}
12488 init_dflt_ring_err:
12489 	bnxt_ulp_irq_restart(bp, rc);
12490 	return rc;
12491 }
12492 
bnxt_restore_pf_fw_resources(struct bnxt * bp)12493 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
12494 {
12495 	int rc;
12496 
12497 	ASSERT_RTNL();
12498 	bnxt_hwrm_func_qcaps(bp);
12499 
12500 	if (netif_running(bp->dev))
12501 		__bnxt_close_nic(bp, true, false);
12502 
12503 	bnxt_ulp_irq_stop(bp);
12504 	bnxt_clear_int_mode(bp);
12505 	rc = bnxt_init_int_mode(bp);
12506 	bnxt_ulp_irq_restart(bp, rc);
12507 
12508 	if (netif_running(bp->dev)) {
12509 		if (rc)
12510 			dev_close(bp->dev);
12511 		else
12512 			rc = bnxt_open_nic(bp, true, false);
12513 	}
12514 
12515 	return rc;
12516 }
12517 
bnxt_init_mac_addr(struct bnxt * bp)12518 static int bnxt_init_mac_addr(struct bnxt *bp)
12519 {
12520 	int rc = 0;
12521 
12522 	if (BNXT_PF(bp)) {
12523 		memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
12524 	} else {
12525 #ifdef CONFIG_BNXT_SRIOV
12526 		struct bnxt_vf_info *vf = &bp->vf;
12527 		bool strict_approval = true;
12528 
12529 		if (is_valid_ether_addr(vf->mac_addr)) {
12530 			/* overwrite netdev dev_addr with admin VF MAC */
12531 			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
12532 			/* Older PF driver or firmware may not approve this
12533 			 * correctly.
12534 			 */
12535 			strict_approval = false;
12536 		} else {
12537 			eth_hw_addr_random(bp->dev);
12538 		}
12539 		rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
12540 #endif
12541 	}
12542 	return rc;
12543 }
12544 
12545 #define BNXT_VPD_LEN	512
bnxt_vpd_read_info(struct bnxt * bp)12546 static void bnxt_vpd_read_info(struct bnxt *bp)
12547 {
12548 	struct pci_dev *pdev = bp->pdev;
12549 	int i, len, pos, ro_size, size;
12550 	ssize_t vpd_size;
12551 	u8 *vpd_data;
12552 
12553 	vpd_data = kmalloc(BNXT_VPD_LEN, GFP_KERNEL);
12554 	if (!vpd_data)
12555 		return;
12556 
12557 	vpd_size = pci_read_vpd(pdev, 0, BNXT_VPD_LEN, vpd_data);
12558 	if (vpd_size <= 0) {
12559 		netdev_err(bp->dev, "Unable to read VPD\n");
12560 		goto exit;
12561 	}
12562 
12563 	i = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
12564 	if (i < 0) {
12565 		netdev_err(bp->dev, "VPD READ-Only not found\n");
12566 		goto exit;
12567 	}
12568 
12569 	ro_size = pci_vpd_lrdt_size(&vpd_data[i]);
12570 	i += PCI_VPD_LRDT_TAG_SIZE;
12571 	if (i + ro_size > vpd_size)
12572 		goto exit;
12573 
12574 	pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12575 					PCI_VPD_RO_KEYWORD_PARTNO);
12576 	if (pos < 0)
12577 		goto read_sn;
12578 
12579 	len = pci_vpd_info_field_size(&vpd_data[pos]);
12580 	pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12581 	if (len + pos > vpd_size)
12582 		goto read_sn;
12583 
12584 	size = min(len, BNXT_VPD_FLD_LEN - 1);
12585 	memcpy(bp->board_partno, &vpd_data[pos], size);
12586 
12587 read_sn:
12588 	pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size,
12589 					PCI_VPD_RO_KEYWORD_SERIALNO);
12590 	if (pos < 0)
12591 		goto exit;
12592 
12593 	len = pci_vpd_info_field_size(&vpd_data[pos]);
12594 	pos += PCI_VPD_INFO_FLD_HDR_SIZE;
12595 	if (len + pos > vpd_size)
12596 		goto exit;
12597 
12598 	size = min(len, BNXT_VPD_FLD_LEN - 1);
12599 	memcpy(bp->board_serialno, &vpd_data[pos], size);
12600 exit:
12601 	kfree(vpd_data);
12602 }
12603 
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])12604 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
12605 {
12606 	struct pci_dev *pdev = bp->pdev;
12607 	u64 qword;
12608 
12609 	qword = pci_get_dsn(pdev);
12610 	if (!qword) {
12611 		netdev_info(bp->dev, "Unable to read adapter's DSN\n");
12612 		return -EOPNOTSUPP;
12613 	}
12614 
12615 	put_unaligned_le64(qword, dsn);
12616 
12617 	bp->flags |= BNXT_FLAG_DSN_VALID;
12618 	return 0;
12619 }
12620 
bnxt_map_db_bar(struct bnxt * bp)12621 static int bnxt_map_db_bar(struct bnxt *bp)
12622 {
12623 	if (!bp->db_size)
12624 		return -ENODEV;
12625 	bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
12626 	if (!bp->bar1)
12627 		return -ENOMEM;
12628 	return 0;
12629 }
12630 
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)12631 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
12632 {
12633 	struct net_device *dev;
12634 	struct bnxt *bp;
12635 	int rc, max_irqs;
12636 
12637 	if (pci_is_bridge(pdev))
12638 		return -ENODEV;
12639 
12640 	/* Clear any pending DMA transactions from crash kernel
12641 	 * while loading driver in capture kernel.
12642 	 */
12643 	if (is_kdump_kernel()) {
12644 		pci_clear_master(pdev);
12645 		pcie_flr(pdev);
12646 	}
12647 
12648 	max_irqs = bnxt_get_max_irq(pdev);
12649 	dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
12650 	if (!dev)
12651 		return -ENOMEM;
12652 
12653 	bp = netdev_priv(dev);
12654 	bp->msg_enable = BNXT_DEF_MSG_ENABLE;
12655 	bnxt_set_max_func_irqs(bp, max_irqs);
12656 
12657 	if (bnxt_vf_pciid(ent->driver_data))
12658 		bp->flags |= BNXT_FLAG_VF;
12659 
12660 	if (pdev->msix_cap)
12661 		bp->flags |= BNXT_FLAG_MSIX_CAP;
12662 
12663 	rc = bnxt_init_board(pdev, dev);
12664 	if (rc < 0)
12665 		goto init_err_free;
12666 
12667 	dev->netdev_ops = &bnxt_netdev_ops;
12668 	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
12669 	dev->ethtool_ops = &bnxt_ethtool_ops;
12670 	pci_set_drvdata(pdev, dev);
12671 
12672 	if (BNXT_PF(bp))
12673 		bnxt_vpd_read_info(bp);
12674 
12675 	rc = bnxt_alloc_hwrm_resources(bp);
12676 	if (rc)
12677 		goto init_err_pci_clean;
12678 
12679 	mutex_init(&bp->hwrm_cmd_lock);
12680 	mutex_init(&bp->link_lock);
12681 
12682 	rc = bnxt_fw_init_one_p1(bp);
12683 	if (rc)
12684 		goto init_err_pci_clean;
12685 
12686 	if (BNXT_CHIP_P5(bp)) {
12687 		bp->flags |= BNXT_FLAG_CHIP_P5;
12688 		if (BNXT_CHIP_SR2(bp))
12689 			bp->flags |= BNXT_FLAG_CHIP_SR2;
12690 	}
12691 
12692 	rc = bnxt_alloc_rss_indir_tbl(bp);
12693 	if (rc)
12694 		goto init_err_pci_clean;
12695 
12696 	rc = bnxt_fw_init_one_p2(bp);
12697 	if (rc)
12698 		goto init_err_pci_clean;
12699 
12700 	rc = bnxt_map_db_bar(bp);
12701 	if (rc) {
12702 		dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
12703 			rc);
12704 		goto init_err_pci_clean;
12705 	}
12706 
12707 	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12708 			   NETIF_F_TSO | NETIF_F_TSO6 |
12709 			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12710 			   NETIF_F_GSO_IPXIP4 |
12711 			   NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12712 			   NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
12713 			   NETIF_F_RXCSUM | NETIF_F_GRO;
12714 
12715 	if (BNXT_SUPPORTS_TPA(bp))
12716 		dev->hw_features |= NETIF_F_LRO;
12717 
12718 	dev->hw_enc_features =
12719 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12720 			NETIF_F_TSO | NETIF_F_TSO6 |
12721 			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
12722 			NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
12723 			NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
12724 	dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
12725 
12726 	dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
12727 				    NETIF_F_GSO_GRE_CSUM;
12728 	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
12729 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
12730 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
12731 	if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
12732 		dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
12733 	if (BNXT_SUPPORTS_TPA(bp))
12734 		dev->hw_features |= NETIF_F_GRO_HW;
12735 	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
12736 	if (dev->features & NETIF_F_GRO_HW)
12737 		dev->features &= ~NETIF_F_LRO;
12738 	dev->priv_flags |= IFF_UNICAST_FLT;
12739 
12740 #ifdef CONFIG_BNXT_SRIOV
12741 	init_waitqueue_head(&bp->sriov_cfg_wait);
12742 	mutex_init(&bp->sriov_lock);
12743 #endif
12744 	if (BNXT_SUPPORTS_TPA(bp)) {
12745 		bp->gro_func = bnxt_gro_func_5730x;
12746 		if (BNXT_CHIP_P4(bp))
12747 			bp->gro_func = bnxt_gro_func_5731x;
12748 		else if (BNXT_CHIP_P5(bp))
12749 			bp->gro_func = bnxt_gro_func_5750x;
12750 	}
12751 	if (!BNXT_CHIP_P4_PLUS(bp))
12752 		bp->flags |= BNXT_FLAG_DOUBLE_DB;
12753 
12754 	bp->ulp_probe = bnxt_ulp_probe;
12755 
12756 	rc = bnxt_init_mac_addr(bp);
12757 	if (rc) {
12758 		dev_err(&pdev->dev, "Unable to initialize mac address.\n");
12759 		rc = -EADDRNOTAVAIL;
12760 		goto init_err_pci_clean;
12761 	}
12762 
12763 	if (BNXT_PF(bp)) {
12764 		/* Read the adapter's DSN to use as the eswitch switch_id */
12765 		rc = bnxt_pcie_dsn_get(bp, bp->dsn);
12766 	}
12767 
12768 	/* MTU range: 60 - FW defined max */
12769 	dev->min_mtu = ETH_ZLEN;
12770 	dev->max_mtu = bp->max_mtu;
12771 
12772 	rc = bnxt_probe_phy(bp, true);
12773 	if (rc)
12774 		goto init_err_pci_clean;
12775 
12776 	bnxt_set_rx_skb_mode(bp, false);
12777 	bnxt_set_tpa_flags(bp);
12778 	bnxt_set_ring_params(bp);
12779 	rc = bnxt_set_dflt_rings(bp, true);
12780 	if (rc) {
12781 		netdev_err(bp->dev, "Not enough rings available.\n");
12782 		rc = -ENOMEM;
12783 		goto init_err_pci_clean;
12784 	}
12785 
12786 	bnxt_fw_init_one_p3(bp);
12787 
12788 	if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
12789 		bp->flags |= BNXT_FLAG_STRIP_VLAN;
12790 
12791 	rc = bnxt_init_int_mode(bp);
12792 	if (rc)
12793 		goto init_err_pci_clean;
12794 
12795 	/* No TC has been set yet and rings may have been trimmed due to
12796 	 * limited MSIX, so we re-initialize the TX rings per TC.
12797 	 */
12798 	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
12799 
12800 	if (BNXT_PF(bp)) {
12801 		if (!bnxt_pf_wq) {
12802 			bnxt_pf_wq =
12803 				create_singlethread_workqueue("bnxt_pf_wq");
12804 			if (!bnxt_pf_wq) {
12805 				dev_err(&pdev->dev, "Unable to create workqueue.\n");
12806 				rc = -ENOMEM;
12807 				goto init_err_pci_clean;
12808 			}
12809 		}
12810 		rc = bnxt_init_tc(bp);
12811 		if (rc)
12812 			netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
12813 				   rc);
12814 	}
12815 
12816 	bnxt_dl_register(bp);
12817 
12818 	rc = register_netdev(dev);
12819 	if (rc)
12820 		goto init_err_cleanup;
12821 
12822 	if (BNXT_PF(bp))
12823 		devlink_port_type_eth_set(&bp->dl_port, bp->dev);
12824 	bnxt_dl_fw_reporters_create(bp);
12825 
12826 	netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
12827 		    board_info[ent->driver_data].name,
12828 		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
12829 	pcie_print_link_status(pdev);
12830 
12831 	pci_save_state(pdev);
12832 	return 0;
12833 
12834 init_err_cleanup:
12835 	bnxt_dl_unregister(bp);
12836 	bnxt_shutdown_tc(bp);
12837 	bnxt_clear_int_mode(bp);
12838 
12839 init_err_pci_clean:
12840 	bnxt_hwrm_func_drv_unrgtr(bp);
12841 	bnxt_free_hwrm_short_cmd_req(bp);
12842 	bnxt_free_hwrm_resources(bp);
12843 	bnxt_ethtool_free(bp);
12844 	kfree(bp->fw_health);
12845 	bp->fw_health = NULL;
12846 	bnxt_cleanup_pci(bp);
12847 	bnxt_free_ctx_mem(bp);
12848 	kfree(bp->ctx);
12849 	bp->ctx = NULL;
12850 	kfree(bp->rss_indir_tbl);
12851 	bp->rss_indir_tbl = NULL;
12852 
12853 init_err_free:
12854 	free_netdev(dev);
12855 	return rc;
12856 }
12857 
bnxt_shutdown(struct pci_dev * pdev)12858 static void bnxt_shutdown(struct pci_dev *pdev)
12859 {
12860 	struct net_device *dev = pci_get_drvdata(pdev);
12861 	struct bnxt *bp;
12862 
12863 	if (!dev)
12864 		return;
12865 
12866 	rtnl_lock();
12867 	bp = netdev_priv(dev);
12868 	if (!bp)
12869 		goto shutdown_exit;
12870 
12871 	if (netif_running(dev))
12872 		dev_close(dev);
12873 
12874 	bnxt_ulp_shutdown(bp);
12875 	bnxt_clear_int_mode(bp);
12876 	pci_disable_device(pdev);
12877 
12878 	if (system_state == SYSTEM_POWER_OFF) {
12879 		pci_wake_from_d3(pdev, bp->wol);
12880 		pci_set_power_state(pdev, PCI_D3hot);
12881 	}
12882 
12883 shutdown_exit:
12884 	rtnl_unlock();
12885 }
12886 
12887 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)12888 static int bnxt_suspend(struct device *device)
12889 {
12890 	struct net_device *dev = dev_get_drvdata(device);
12891 	struct bnxt *bp = netdev_priv(dev);
12892 	int rc = 0;
12893 
12894 	rtnl_lock();
12895 	bnxt_ulp_stop(bp);
12896 	if (netif_running(dev)) {
12897 		netif_device_detach(dev);
12898 		rc = bnxt_close(dev);
12899 	}
12900 	bnxt_hwrm_func_drv_unrgtr(bp);
12901 	pci_disable_device(bp->pdev);
12902 	bnxt_free_ctx_mem(bp);
12903 	kfree(bp->ctx);
12904 	bp->ctx = NULL;
12905 	rtnl_unlock();
12906 	return rc;
12907 }
12908 
bnxt_resume(struct device * device)12909 static int bnxt_resume(struct device *device)
12910 {
12911 	struct net_device *dev = dev_get_drvdata(device);
12912 	struct bnxt *bp = netdev_priv(dev);
12913 	int rc = 0;
12914 
12915 	rtnl_lock();
12916 	rc = pci_enable_device(bp->pdev);
12917 	if (rc) {
12918 		netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
12919 			   rc);
12920 		goto resume_exit;
12921 	}
12922 	pci_set_master(bp->pdev);
12923 	if (bnxt_hwrm_ver_get(bp)) {
12924 		rc = -ENODEV;
12925 		goto resume_exit;
12926 	}
12927 	rc = bnxt_hwrm_func_reset(bp);
12928 	if (rc) {
12929 		rc = -EBUSY;
12930 		goto resume_exit;
12931 	}
12932 
12933 	rc = bnxt_hwrm_func_qcaps(bp);
12934 	if (rc)
12935 		goto resume_exit;
12936 
12937 	if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12938 		rc = -ENODEV;
12939 		goto resume_exit;
12940 	}
12941 
12942 	bnxt_get_wol_settings(bp);
12943 	if (netif_running(dev)) {
12944 		rc = bnxt_open(dev);
12945 		if (!rc)
12946 			netif_device_attach(dev);
12947 	}
12948 
12949 resume_exit:
12950 	bnxt_ulp_start(bp, rc);
12951 	if (!rc)
12952 		bnxt_reenable_sriov(bp);
12953 	rtnl_unlock();
12954 	return rc;
12955 }
12956 
12957 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12958 #define BNXT_PM_OPS (&bnxt_pm_ops)
12959 
12960 #else
12961 
12962 #define BNXT_PM_OPS NULL
12963 
12964 #endif /* CONFIG_PM_SLEEP */
12965 
12966 /**
12967  * bnxt_io_error_detected - called when PCI error is detected
12968  * @pdev: Pointer to PCI device
12969  * @state: The current pci connection state
12970  *
12971  * This function is called after a PCI bus error affecting
12972  * this device has been detected.
12973  */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)12974 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
12975 					       pci_channel_state_t state)
12976 {
12977 	struct net_device *netdev = pci_get_drvdata(pdev);
12978 	struct bnxt *bp = netdev_priv(netdev);
12979 
12980 	netdev_info(netdev, "PCI I/O error detected\n");
12981 
12982 	rtnl_lock();
12983 	netif_device_detach(netdev);
12984 
12985 	bnxt_ulp_stop(bp);
12986 
12987 	if (state == pci_channel_io_perm_failure) {
12988 		rtnl_unlock();
12989 		return PCI_ERS_RESULT_DISCONNECT;
12990 	}
12991 
12992 	if (state == pci_channel_io_frozen)
12993 		set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
12994 
12995 	if (netif_running(netdev))
12996 		bnxt_close(netdev);
12997 
12998 	if (pci_is_enabled(pdev))
12999 		pci_disable_device(pdev);
13000 	bnxt_free_ctx_mem(bp);
13001 	kfree(bp->ctx);
13002 	bp->ctx = NULL;
13003 	rtnl_unlock();
13004 
13005 	/* Request a slot slot reset. */
13006 	return PCI_ERS_RESULT_NEED_RESET;
13007 }
13008 
13009 /**
13010  * bnxt_io_slot_reset - called after the pci bus has been reset.
13011  * @pdev: Pointer to PCI device
13012  *
13013  * Restart the card from scratch, as if from a cold-boot.
13014  * At this point, the card has exprienced a hard reset,
13015  * followed by fixups by BIOS, and has its config space
13016  * set up identically to what it was at cold boot.
13017  */
bnxt_io_slot_reset(struct pci_dev * pdev)13018 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13019 {
13020 	pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13021 	struct net_device *netdev = pci_get_drvdata(pdev);
13022 	struct bnxt *bp = netdev_priv(netdev);
13023 	int err = 0, off;
13024 
13025 	netdev_info(bp->dev, "PCI Slot Reset\n");
13026 
13027 	rtnl_lock();
13028 
13029 	if (pci_enable_device(pdev)) {
13030 		dev_err(&pdev->dev,
13031 			"Cannot re-enable PCI device after reset.\n");
13032 	} else {
13033 		pci_set_master(pdev);
13034 		/* Upon fatal error, our device internal logic that latches to
13035 		 * BAR value is getting reset and will restore only upon
13036 		 * rewritting the BARs.
13037 		 *
13038 		 * As pci_restore_state() does not re-write the BARs if the
13039 		 * value is same as saved value earlier, driver needs to
13040 		 * write the BARs to 0 to force restore, in case of fatal error.
13041 		 */
13042 		if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13043 				       &bp->state)) {
13044 			for (off = PCI_BASE_ADDRESS_0;
13045 			     off <= PCI_BASE_ADDRESS_5; off += 4)
13046 				pci_write_config_dword(bp->pdev, off, 0);
13047 		}
13048 		pci_restore_state(pdev);
13049 		pci_save_state(pdev);
13050 
13051 		err = bnxt_hwrm_func_reset(bp);
13052 		if (!err)
13053 			result = PCI_ERS_RESULT_RECOVERED;
13054 	}
13055 
13056 	rtnl_unlock();
13057 
13058 	return result;
13059 }
13060 
13061 /**
13062  * bnxt_io_resume - called when traffic can start flowing again.
13063  * @pdev: Pointer to PCI device
13064  *
13065  * This callback is called when the error recovery driver tells
13066  * us that its OK to resume normal operation.
13067  */
bnxt_io_resume(struct pci_dev * pdev)13068 static void bnxt_io_resume(struct pci_dev *pdev)
13069 {
13070 	struct net_device *netdev = pci_get_drvdata(pdev);
13071 	struct bnxt *bp = netdev_priv(netdev);
13072 	int err;
13073 
13074 	netdev_info(bp->dev, "PCI Slot Resume\n");
13075 	rtnl_lock();
13076 
13077 	err = bnxt_hwrm_func_qcaps(bp);
13078 	if (!err && netif_running(netdev))
13079 		err = bnxt_open(netdev);
13080 
13081 	bnxt_ulp_start(bp, err);
13082 	if (!err) {
13083 		bnxt_reenable_sriov(bp);
13084 		netif_device_attach(netdev);
13085 	}
13086 
13087 	rtnl_unlock();
13088 }
13089 
13090 static const struct pci_error_handlers bnxt_err_handler = {
13091 	.error_detected	= bnxt_io_error_detected,
13092 	.slot_reset	= bnxt_io_slot_reset,
13093 	.resume		= bnxt_io_resume
13094 };
13095 
13096 static struct pci_driver bnxt_pci_driver = {
13097 	.name		= DRV_MODULE_NAME,
13098 	.id_table	= bnxt_pci_tbl,
13099 	.probe		= bnxt_init_one,
13100 	.remove		= bnxt_remove_one,
13101 	.shutdown	= bnxt_shutdown,
13102 	.driver.pm	= BNXT_PM_OPS,
13103 	.err_handler	= &bnxt_err_handler,
13104 #if defined(CONFIG_BNXT_SRIOV)
13105 	.sriov_configure = bnxt_sriov_configure,
13106 #endif
13107 };
13108 
bnxt_init(void)13109 static int __init bnxt_init(void)
13110 {
13111 	bnxt_debug_init();
13112 	return pci_register_driver(&bnxt_pci_driver);
13113 }
13114 
bnxt_exit(void)13115 static void __exit bnxt_exit(void)
13116 {
13117 	pci_unregister_driver(&bnxt_pci_driver);
13118 	if (bnxt_pf_wq)
13119 		destroy_workqueue(bnxt_pf_wq);
13120 	bnxt_debug_exit();
13121 }
13122 
13123 module_init(bnxt_init);
13124 module_exit(bnxt_exit);
13125