1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT (5 * HZ)
72
73 static const char version[] =
74 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
75
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
83
84 #define BNXT_TX_PUSH_THRESH 164
85
86 enum board_idx {
87 BCM57301,
88 BCM57302,
89 BCM57304,
90 BCM57417_NPAR,
91 BCM58700,
92 BCM57311,
93 BCM57312,
94 BCM57402,
95 BCM57404,
96 BCM57406,
97 BCM57402_NPAR,
98 BCM57407,
99 BCM57412,
100 BCM57414,
101 BCM57416,
102 BCM57417,
103 BCM57412_NPAR,
104 BCM57314,
105 BCM57417_SFP,
106 BCM57416_SFP,
107 BCM57404_NPAR,
108 BCM57406_NPAR,
109 BCM57407_SFP,
110 BCM57407_NPAR,
111 BCM57414_NPAR,
112 BCM57416_NPAR,
113 BCM57452,
114 BCM57454,
115 BCM5745x_NPAR,
116 BCM57508,
117 BCM57504,
118 BCM57502,
119 BCM57508_NPAR,
120 BCM57504_NPAR,
121 BCM57502_NPAR,
122 BCM58802,
123 BCM58804,
124 BCM58808,
125 NETXTREME_E_VF,
126 NETXTREME_C_VF,
127 NETXTREME_S_VF,
128 NETXTREME_E_P5_VF,
129 };
130
131 /* indexed by enum above */
132 static const struct {
133 char *name;
134 } board_info[] = {
135 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
177 };
178
179 static const struct pci_device_id bnxt_pci_tbl[] = {
180 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
183 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
185 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
186 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
187 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
188 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
189 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
190 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
191 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
192 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
193 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
194 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
195 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
196 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
197 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
198 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
199 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
200 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
201 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
202 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
203 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
204 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
205 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
206 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
207 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
208 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
209 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
210 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
211 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
212 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
213 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
214 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
215 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
216 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
217 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
218 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
219 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
220 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
221 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
222 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
223 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
224 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
225 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
226 #ifdef CONFIG_BNXT_SRIOV
227 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
228 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
229 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
230 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
231 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
232 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
233 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
234 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
235 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
236 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
237 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
238 #endif
239 { 0 }
240 };
241
242 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
243
244 static const u16 bnxt_vf_req_snif[] = {
245 HWRM_FUNC_CFG,
246 HWRM_FUNC_VF_CFG,
247 HWRM_PORT_PHY_QCFG,
248 HWRM_CFA_L2_FILTER_ALLOC,
249 };
250
251 static const u16 bnxt_async_events_arr[] = {
252 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
253 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
254 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
255 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
256 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
257 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
258 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
259 };
260
261 static struct workqueue_struct *bnxt_pf_wq;
262
bnxt_vf_pciid(enum board_idx idx)263 static bool bnxt_vf_pciid(enum board_idx idx)
264 {
265 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
266 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
267 }
268
269 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
270 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
271 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
272
273 #define BNXT_CP_DB_IRQ_DIS(db) \
274 writel(DB_CP_IRQ_DIS_FLAGS, db)
275
276 #define BNXT_DB_CQ(db, idx) \
277 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
278
279 #define BNXT_DB_NQ_P5(db, idx) \
280 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
281
282 #define BNXT_DB_CQ_ARM(db, idx) \
283 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
284
285 #define BNXT_DB_NQ_ARM_P5(db, idx) \
286 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
287
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)288 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
289 {
290 if (bp->flags & BNXT_FLAG_CHIP_P5)
291 BNXT_DB_NQ_P5(db, idx);
292 else
293 BNXT_DB_CQ(db, idx);
294 }
295
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)296 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
297 {
298 if (bp->flags & BNXT_FLAG_CHIP_P5)
299 BNXT_DB_NQ_ARM_P5(db, idx);
300 else
301 BNXT_DB_CQ_ARM(db, idx);
302 }
303
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)304 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
305 {
306 if (bp->flags & BNXT_FLAG_CHIP_P5)
307 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
308 db->doorbell);
309 else
310 BNXT_DB_CQ(db, idx);
311 }
312
313 const u16 bnxt_lhint_arr[] = {
314 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
315 TX_BD_FLAGS_LHINT_512_TO_1023,
316 TX_BD_FLAGS_LHINT_1024_TO_2047,
317 TX_BD_FLAGS_LHINT_1024_TO_2047,
318 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
319 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
320 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
332 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
333 };
334
bnxt_xmit_get_cfa_action(struct sk_buff * skb)335 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
336 {
337 struct metadata_dst *md_dst = skb_metadata_dst(skb);
338
339 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
340 return 0;
341
342 return md_dst->u.port_info.port_id;
343 }
344
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)345 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
346 {
347 struct bnxt *bp = netdev_priv(dev);
348 struct tx_bd *txbd;
349 struct tx_bd_ext *txbd1;
350 struct netdev_queue *txq;
351 int i;
352 dma_addr_t mapping;
353 unsigned int length, pad = 0;
354 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
355 u16 prod, last_frag;
356 struct pci_dev *pdev = bp->pdev;
357 struct bnxt_tx_ring_info *txr;
358 struct bnxt_sw_tx_bd *tx_buf;
359
360 i = skb_get_queue_mapping(skb);
361 if (unlikely(i >= bp->tx_nr_rings)) {
362 dev_kfree_skb_any(skb);
363 return NETDEV_TX_OK;
364 }
365
366 txq = netdev_get_tx_queue(dev, i);
367 txr = &bp->tx_ring[bp->tx_ring_map[i]];
368 prod = txr->tx_prod;
369
370 free_size = bnxt_tx_avail(bp, txr);
371 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
372 netif_tx_stop_queue(txq);
373 return NETDEV_TX_BUSY;
374 }
375
376 length = skb->len;
377 len = skb_headlen(skb);
378 last_frag = skb_shinfo(skb)->nr_frags;
379
380 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
381
382 txbd->tx_bd_opaque = prod;
383
384 tx_buf = &txr->tx_buf_ring[prod];
385 tx_buf->skb = skb;
386 tx_buf->nr_frags = last_frag;
387
388 vlan_tag_flags = 0;
389 cfa_action = bnxt_xmit_get_cfa_action(skb);
390 if (skb_vlan_tag_present(skb)) {
391 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
392 skb_vlan_tag_get(skb);
393 /* Currently supports 8021Q, 8021AD vlan offloads
394 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
395 */
396 if (skb->vlan_proto == htons(ETH_P_8021Q))
397 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
398 }
399
400 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
401 struct tx_push_buffer *tx_push_buf = txr->tx_push;
402 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
403 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
404 void __iomem *db = txr->tx_db.doorbell;
405 void *pdata = tx_push_buf->data;
406 u64 *end;
407 int j, push_len;
408
409 /* Set COAL_NOW to be ready quickly for the next push */
410 tx_push->tx_bd_len_flags_type =
411 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
412 TX_BD_TYPE_LONG_TX_BD |
413 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
414 TX_BD_FLAGS_COAL_NOW |
415 TX_BD_FLAGS_PACKET_END |
416 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
417
418 if (skb->ip_summed == CHECKSUM_PARTIAL)
419 tx_push1->tx_bd_hsize_lflags =
420 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
421 else
422 tx_push1->tx_bd_hsize_lflags = 0;
423
424 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
425 tx_push1->tx_bd_cfa_action =
426 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
427
428 end = pdata + length;
429 end = PTR_ALIGN(end, 8) - 1;
430 *end = 0;
431
432 skb_copy_from_linear_data(skb, pdata, len);
433 pdata += len;
434 for (j = 0; j < last_frag; j++) {
435 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
436 void *fptr;
437
438 fptr = skb_frag_address_safe(frag);
439 if (!fptr)
440 goto normal_tx;
441
442 memcpy(pdata, fptr, skb_frag_size(frag));
443 pdata += skb_frag_size(frag);
444 }
445
446 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
447 txbd->tx_bd_haddr = txr->data_mapping;
448 prod = NEXT_TX(prod);
449 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
450 memcpy(txbd, tx_push1, sizeof(*txbd));
451 prod = NEXT_TX(prod);
452 tx_push->doorbell =
453 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
454 txr->tx_prod = prod;
455
456 tx_buf->is_push = 1;
457 netdev_tx_sent_queue(txq, skb->len);
458 wmb(); /* Sync is_push and byte queue before pushing data */
459
460 push_len = (length + sizeof(*tx_push) + 7) / 8;
461 if (push_len > 16) {
462 __iowrite64_copy(db, tx_push_buf, 16);
463 __iowrite32_copy(db + 4, tx_push_buf + 1,
464 (push_len - 16) << 1);
465 } else {
466 __iowrite64_copy(db, tx_push_buf, push_len);
467 }
468
469 goto tx_done;
470 }
471
472 normal_tx:
473 if (length < BNXT_MIN_PKT_SIZE) {
474 pad = BNXT_MIN_PKT_SIZE - length;
475 if (skb_pad(skb, pad)) {
476 /* SKB already freed. */
477 tx_buf->skb = NULL;
478 return NETDEV_TX_OK;
479 }
480 length = BNXT_MIN_PKT_SIZE;
481 }
482
483 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
484
485 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
486 dev_kfree_skb_any(skb);
487 tx_buf->skb = NULL;
488 return NETDEV_TX_OK;
489 }
490
491 dma_unmap_addr_set(tx_buf, mapping, mapping);
492 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
493 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
494
495 txbd->tx_bd_haddr = cpu_to_le64(mapping);
496
497 prod = NEXT_TX(prod);
498 txbd1 = (struct tx_bd_ext *)
499 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
500
501 txbd1->tx_bd_hsize_lflags = 0;
502 if (skb_is_gso(skb)) {
503 u32 hdr_len;
504
505 if (skb->encapsulation)
506 hdr_len = skb_inner_network_offset(skb) +
507 skb_inner_network_header_len(skb) +
508 inner_tcp_hdrlen(skb);
509 else
510 hdr_len = skb_transport_offset(skb) +
511 tcp_hdrlen(skb);
512
513 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
514 TX_BD_FLAGS_T_IPID |
515 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
516 length = skb_shinfo(skb)->gso_size;
517 txbd1->tx_bd_mss = cpu_to_le32(length);
518 length += hdr_len;
519 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
520 txbd1->tx_bd_hsize_lflags =
521 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
522 txbd1->tx_bd_mss = 0;
523 }
524
525 length >>= 9;
526 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
527 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
528 skb->len);
529 i = 0;
530 goto tx_dma_error;
531 }
532 flags |= bnxt_lhint_arr[length];
533 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
534
535 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
536 txbd1->tx_bd_cfa_action =
537 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
538 for (i = 0; i < last_frag; i++) {
539 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
540
541 prod = NEXT_TX(prod);
542 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
543
544 len = skb_frag_size(frag);
545 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
546 DMA_TO_DEVICE);
547
548 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
549 goto tx_dma_error;
550
551 tx_buf = &txr->tx_buf_ring[prod];
552 dma_unmap_addr_set(tx_buf, mapping, mapping);
553
554 txbd->tx_bd_haddr = cpu_to_le64(mapping);
555
556 flags = len << TX_BD_LEN_SHIFT;
557 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
558 }
559
560 flags &= ~TX_BD_LEN;
561 txbd->tx_bd_len_flags_type =
562 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
563 TX_BD_FLAGS_PACKET_END);
564
565 netdev_tx_sent_queue(txq, skb->len);
566
567 /* Sync BD data before updating doorbell */
568 wmb();
569
570 prod = NEXT_TX(prod);
571 txr->tx_prod = prod;
572
573 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
574 bnxt_db_write(bp, &txr->tx_db, prod);
575
576 tx_done:
577
578 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
579 if (netdev_xmit_more() && !tx_buf->is_push)
580 bnxt_db_write(bp, &txr->tx_db, prod);
581
582 netif_tx_stop_queue(txq);
583
584 /* netif_tx_stop_queue() must be done before checking
585 * tx index in bnxt_tx_avail() below, because in
586 * bnxt_tx_int(), we update tx index before checking for
587 * netif_tx_queue_stopped().
588 */
589 smp_mb();
590 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
591 netif_tx_wake_queue(txq);
592 }
593 return NETDEV_TX_OK;
594
595 tx_dma_error:
596 last_frag = i;
597
598 /* start back at beginning and unmap skb */
599 prod = txr->tx_prod;
600 tx_buf = &txr->tx_buf_ring[prod];
601 tx_buf->skb = NULL;
602 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
603 skb_headlen(skb), PCI_DMA_TODEVICE);
604 prod = NEXT_TX(prod);
605
606 /* unmap remaining mapped pages */
607 for (i = 0; i < last_frag; i++) {
608 prod = NEXT_TX(prod);
609 tx_buf = &txr->tx_buf_ring[prod];
610 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
611 skb_frag_size(&skb_shinfo(skb)->frags[i]),
612 PCI_DMA_TODEVICE);
613 }
614
615 dev_kfree_skb_any(skb);
616 return NETDEV_TX_OK;
617 }
618
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int nr_pkts)619 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
620 {
621 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
622 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
623 u16 cons = txr->tx_cons;
624 struct pci_dev *pdev = bp->pdev;
625 int i;
626 unsigned int tx_bytes = 0;
627
628 for (i = 0; i < nr_pkts; i++) {
629 struct bnxt_sw_tx_bd *tx_buf;
630 struct sk_buff *skb;
631 int j, last;
632
633 tx_buf = &txr->tx_buf_ring[cons];
634 cons = NEXT_TX(cons);
635 skb = tx_buf->skb;
636 tx_buf->skb = NULL;
637
638 if (tx_buf->is_push) {
639 tx_buf->is_push = 0;
640 goto next_tx_int;
641 }
642
643 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
644 skb_headlen(skb), PCI_DMA_TODEVICE);
645 last = tx_buf->nr_frags;
646
647 for (j = 0; j < last; j++) {
648 cons = NEXT_TX(cons);
649 tx_buf = &txr->tx_buf_ring[cons];
650 dma_unmap_page(
651 &pdev->dev,
652 dma_unmap_addr(tx_buf, mapping),
653 skb_frag_size(&skb_shinfo(skb)->frags[j]),
654 PCI_DMA_TODEVICE);
655 }
656
657 next_tx_int:
658 cons = NEXT_TX(cons);
659
660 tx_bytes += skb->len;
661 dev_kfree_skb_any(skb);
662 }
663
664 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
665 txr->tx_cons = cons;
666
667 /* Need to make the tx_cons update visible to bnxt_start_xmit()
668 * before checking for netif_tx_queue_stopped(). Without the
669 * memory barrier, there is a small possibility that bnxt_start_xmit()
670 * will miss it and cause the queue to be stopped forever.
671 */
672 smp_mb();
673
674 if (unlikely(netif_tx_queue_stopped(txq)) &&
675 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
676 __netif_tx_lock(txq, smp_processor_id());
677 if (netif_tx_queue_stopped(txq) &&
678 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
679 txr->dev_state != BNXT_DEV_STATE_CLOSING)
680 netif_tx_wake_queue(txq);
681 __netif_tx_unlock(txq);
682 }
683 }
684
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,gfp_t gfp)685 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
686 struct bnxt_rx_ring_info *rxr,
687 gfp_t gfp)
688 {
689 struct device *dev = &bp->pdev->dev;
690 struct page *page;
691
692 page = page_pool_dev_alloc_pages(rxr->page_pool);
693 if (!page)
694 return NULL;
695
696 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
697 DMA_ATTR_WEAK_ORDERING);
698 if (dma_mapping_error(dev, *mapping)) {
699 page_pool_recycle_direct(rxr->page_pool, page);
700 return NULL;
701 }
702 *mapping += bp->rx_dma_offset;
703 return page;
704 }
705
__bnxt_alloc_rx_data(struct bnxt * bp,dma_addr_t * mapping,gfp_t gfp)706 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
707 gfp_t gfp)
708 {
709 u8 *data;
710 struct pci_dev *pdev = bp->pdev;
711
712 data = kmalloc(bp->rx_buf_size, gfp);
713 if (!data)
714 return NULL;
715
716 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
717 bp->rx_buf_use_size, bp->rx_dir,
718 DMA_ATTR_WEAK_ORDERING);
719
720 if (dma_mapping_error(&pdev->dev, *mapping)) {
721 kfree(data);
722 data = NULL;
723 }
724 return data;
725 }
726
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)727 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
728 u16 prod, gfp_t gfp)
729 {
730 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
731 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
732 dma_addr_t mapping;
733
734 if (BNXT_RX_PAGE_MODE(bp)) {
735 struct page *page =
736 __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
737
738 if (!page)
739 return -ENOMEM;
740
741 rx_buf->data = page;
742 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
743 } else {
744 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
745
746 if (!data)
747 return -ENOMEM;
748
749 rx_buf->data = data;
750 rx_buf->data_ptr = data + bp->rx_offset;
751 }
752 rx_buf->mapping = mapping;
753
754 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
755 return 0;
756 }
757
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)758 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
759 {
760 u16 prod = rxr->rx_prod;
761 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
762 struct rx_bd *cons_bd, *prod_bd;
763
764 prod_rx_buf = &rxr->rx_buf_ring[prod];
765 cons_rx_buf = &rxr->rx_buf_ring[cons];
766
767 prod_rx_buf->data = data;
768 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
769
770 prod_rx_buf->mapping = cons_rx_buf->mapping;
771
772 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
773 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
774
775 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
776 }
777
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)778 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
779 {
780 u16 next, max = rxr->rx_agg_bmap_size;
781
782 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
783 if (next >= max)
784 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
785 return next;
786 }
787
bnxt_alloc_rx_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)788 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
789 struct bnxt_rx_ring_info *rxr,
790 u16 prod, gfp_t gfp)
791 {
792 struct rx_bd *rxbd =
793 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
794 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
795 struct pci_dev *pdev = bp->pdev;
796 struct page *page;
797 dma_addr_t mapping;
798 u16 sw_prod = rxr->rx_sw_agg_prod;
799 unsigned int offset = 0;
800
801 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
802 page = rxr->rx_page;
803 if (!page) {
804 page = alloc_page(gfp);
805 if (!page)
806 return -ENOMEM;
807 rxr->rx_page = page;
808 rxr->rx_page_offset = 0;
809 }
810 offset = rxr->rx_page_offset;
811 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
812 if (rxr->rx_page_offset == PAGE_SIZE)
813 rxr->rx_page = NULL;
814 else
815 get_page(page);
816 } else {
817 page = alloc_page(gfp);
818 if (!page)
819 return -ENOMEM;
820 }
821
822 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
823 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
824 DMA_ATTR_WEAK_ORDERING);
825 if (dma_mapping_error(&pdev->dev, mapping)) {
826 __free_page(page);
827 return -EIO;
828 }
829
830 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
831 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
832
833 __set_bit(sw_prod, rxr->rx_agg_bmap);
834 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
835 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
836
837 rx_agg_buf->page = page;
838 rx_agg_buf->offset = offset;
839 rx_agg_buf->mapping = mapping;
840 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
841 rxbd->rx_bd_opaque = sw_prod;
842 return 0;
843 }
844
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)845 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
846 struct bnxt_cp_ring_info *cpr,
847 u16 cp_cons, u16 curr)
848 {
849 struct rx_agg_cmp *agg;
850
851 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
852 agg = (struct rx_agg_cmp *)
853 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
854 return agg;
855 }
856
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)857 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
858 struct bnxt_rx_ring_info *rxr,
859 u16 agg_id, u16 curr)
860 {
861 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
862
863 return &tpa_info->agg_arr[curr];
864 }
865
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)866 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
867 u16 start, u32 agg_bufs, bool tpa)
868 {
869 struct bnxt_napi *bnapi = cpr->bnapi;
870 struct bnxt *bp = bnapi->bp;
871 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
872 u16 prod = rxr->rx_agg_prod;
873 u16 sw_prod = rxr->rx_sw_agg_prod;
874 bool p5_tpa = false;
875 u32 i;
876
877 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
878 p5_tpa = true;
879
880 for (i = 0; i < agg_bufs; i++) {
881 u16 cons;
882 struct rx_agg_cmp *agg;
883 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
884 struct rx_bd *prod_bd;
885 struct page *page;
886
887 if (p5_tpa)
888 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
889 else
890 agg = bnxt_get_agg(bp, cpr, idx, start + i);
891 cons = agg->rx_agg_cmp_opaque;
892 __clear_bit(cons, rxr->rx_agg_bmap);
893
894 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
895 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
896
897 __set_bit(sw_prod, rxr->rx_agg_bmap);
898 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
899 cons_rx_buf = &rxr->rx_agg_ring[cons];
900
901 /* It is possible for sw_prod to be equal to cons, so
902 * set cons_rx_buf->page to NULL first.
903 */
904 page = cons_rx_buf->page;
905 cons_rx_buf->page = NULL;
906 prod_rx_buf->page = page;
907 prod_rx_buf->offset = cons_rx_buf->offset;
908
909 prod_rx_buf->mapping = cons_rx_buf->mapping;
910
911 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
912
913 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
914 prod_bd->rx_bd_opaque = sw_prod;
915
916 prod = NEXT_RX_AGG(prod);
917 sw_prod = NEXT_RX_AGG(sw_prod);
918 }
919 rxr->rx_agg_prod = prod;
920 rxr->rx_sw_agg_prod = sw_prod;
921 }
922
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)923 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
924 struct bnxt_rx_ring_info *rxr,
925 u16 cons, void *data, u8 *data_ptr,
926 dma_addr_t dma_addr,
927 unsigned int offset_and_len)
928 {
929 unsigned int payload = offset_and_len >> 16;
930 unsigned int len = offset_and_len & 0xffff;
931 skb_frag_t *frag;
932 struct page *page = data;
933 u16 prod = rxr->rx_prod;
934 struct sk_buff *skb;
935 int off, err;
936
937 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
938 if (unlikely(err)) {
939 bnxt_reuse_rx_data(rxr, cons, data);
940 return NULL;
941 }
942 dma_addr -= bp->rx_dma_offset;
943 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
944 DMA_ATTR_WEAK_ORDERING);
945
946 if (unlikely(!payload))
947 payload = eth_get_headlen(bp->dev, data_ptr, len);
948
949 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
950 if (!skb) {
951 __free_page(page);
952 return NULL;
953 }
954
955 off = (void *)data_ptr - page_address(page);
956 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
957 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
958 payload + NET_IP_ALIGN);
959
960 frag = &skb_shinfo(skb)->frags[0];
961 skb_frag_size_sub(frag, payload);
962 skb_frag_off_add(frag, payload);
963 skb->data_len -= payload;
964 skb->tail += payload;
965
966 return skb;
967 }
968
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)969 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
970 struct bnxt_rx_ring_info *rxr, u16 cons,
971 void *data, u8 *data_ptr,
972 dma_addr_t dma_addr,
973 unsigned int offset_and_len)
974 {
975 u16 prod = rxr->rx_prod;
976 struct sk_buff *skb;
977 int err;
978
979 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
980 if (unlikely(err)) {
981 bnxt_reuse_rx_data(rxr, cons, data);
982 return NULL;
983 }
984
985 skb = build_skb(data, 0);
986 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
987 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
988 if (!skb) {
989 kfree(data);
990 return NULL;
991 }
992
993 skb_reserve(skb, bp->rx_offset);
994 skb_put(skb, offset_and_len & 0xffff);
995 return skb;
996 }
997
bnxt_rx_pages(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)998 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
999 struct bnxt_cp_ring_info *cpr,
1000 struct sk_buff *skb, u16 idx,
1001 u32 agg_bufs, bool tpa)
1002 {
1003 struct bnxt_napi *bnapi = cpr->bnapi;
1004 struct pci_dev *pdev = bp->pdev;
1005 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1006 u16 prod = rxr->rx_agg_prod;
1007 bool p5_tpa = false;
1008 u32 i;
1009
1010 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1011 p5_tpa = true;
1012
1013 for (i = 0; i < agg_bufs; i++) {
1014 u16 cons, frag_len;
1015 struct rx_agg_cmp *agg;
1016 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1017 struct page *page;
1018 dma_addr_t mapping;
1019
1020 if (p5_tpa)
1021 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1022 else
1023 agg = bnxt_get_agg(bp, cpr, idx, i);
1024 cons = agg->rx_agg_cmp_opaque;
1025 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1026 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1027
1028 cons_rx_buf = &rxr->rx_agg_ring[cons];
1029 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1030 cons_rx_buf->offset, frag_len);
1031 __clear_bit(cons, rxr->rx_agg_bmap);
1032
1033 /* It is possible for bnxt_alloc_rx_page() to allocate
1034 * a sw_prod index that equals the cons index, so we
1035 * need to clear the cons entry now.
1036 */
1037 mapping = cons_rx_buf->mapping;
1038 page = cons_rx_buf->page;
1039 cons_rx_buf->page = NULL;
1040
1041 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1042 struct skb_shared_info *shinfo;
1043 unsigned int nr_frags;
1044
1045 shinfo = skb_shinfo(skb);
1046 nr_frags = --shinfo->nr_frags;
1047 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1048
1049 dev_kfree_skb(skb);
1050
1051 cons_rx_buf->page = page;
1052
1053 /* Update prod since possibly some pages have been
1054 * allocated already.
1055 */
1056 rxr->rx_agg_prod = prod;
1057 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1058 return NULL;
1059 }
1060
1061 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1062 PCI_DMA_FROMDEVICE,
1063 DMA_ATTR_WEAK_ORDERING);
1064
1065 skb->data_len += frag_len;
1066 skb->len += frag_len;
1067 skb->truesize += PAGE_SIZE;
1068
1069 prod = NEXT_RX_AGG(prod);
1070 }
1071 rxr->rx_agg_prod = prod;
1072 return skb;
1073 }
1074
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1075 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1076 u8 agg_bufs, u32 *raw_cons)
1077 {
1078 u16 last;
1079 struct rx_agg_cmp *agg;
1080
1081 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1082 last = RING_CMP(*raw_cons);
1083 agg = (struct rx_agg_cmp *)
1084 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1085 return RX_AGG_CMP_VALID(agg, *raw_cons);
1086 }
1087
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1088 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1089 unsigned int len,
1090 dma_addr_t mapping)
1091 {
1092 struct bnxt *bp = bnapi->bp;
1093 struct pci_dev *pdev = bp->pdev;
1094 struct sk_buff *skb;
1095
1096 skb = napi_alloc_skb(&bnapi->napi, len);
1097 if (!skb)
1098 return NULL;
1099
1100 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1101 bp->rx_dir);
1102
1103 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1104 len + NET_IP_ALIGN);
1105
1106 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1107 bp->rx_dir);
1108
1109 skb_put(skb, len);
1110 return skb;
1111 }
1112
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1113 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1114 u32 *raw_cons, void *cmp)
1115 {
1116 struct rx_cmp *rxcmp = cmp;
1117 u32 tmp_raw_cons = *raw_cons;
1118 u8 cmp_type, agg_bufs = 0;
1119
1120 cmp_type = RX_CMP_TYPE(rxcmp);
1121
1122 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1123 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1124 RX_CMP_AGG_BUFS) >>
1125 RX_CMP_AGG_BUFS_SHIFT;
1126 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1127 struct rx_tpa_end_cmp *tpa_end = cmp;
1128
1129 if (bp->flags & BNXT_FLAG_CHIP_P5)
1130 return 0;
1131
1132 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1133 }
1134
1135 if (agg_bufs) {
1136 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1137 return -EBUSY;
1138 }
1139 *raw_cons = tmp_raw_cons;
1140 return 0;
1141 }
1142
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)1143 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1144 {
1145 if (BNXT_PF(bp))
1146 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1147 else
1148 schedule_delayed_work(&bp->fw_reset_task, delay);
1149 }
1150
bnxt_queue_sp_work(struct bnxt * bp)1151 static void bnxt_queue_sp_work(struct bnxt *bp)
1152 {
1153 if (BNXT_PF(bp))
1154 queue_work(bnxt_pf_wq, &bp->sp_task);
1155 else
1156 schedule_work(&bp->sp_task);
1157 }
1158
bnxt_cancel_sp_work(struct bnxt * bp)1159 static void bnxt_cancel_sp_work(struct bnxt *bp)
1160 {
1161 if (BNXT_PF(bp))
1162 flush_workqueue(bnxt_pf_wq);
1163 else
1164 cancel_work_sync(&bp->sp_task);
1165 }
1166
bnxt_sched_reset(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)1167 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1168 {
1169 if (!rxr->bnapi->in_reset) {
1170 rxr->bnapi->in_reset = true;
1171 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1172 bnxt_queue_sp_work(bp);
1173 }
1174 rxr->rx_next_cons = 0xffff;
1175 }
1176
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1177 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1178 {
1179 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1180 u16 idx = agg_id & MAX_TPA_P5_MASK;
1181
1182 if (test_bit(idx, map->agg_idx_bmap))
1183 idx = find_first_zero_bit(map->agg_idx_bmap,
1184 BNXT_AGG_IDX_BMAP_SIZE);
1185 __set_bit(idx, map->agg_idx_bmap);
1186 map->agg_id_tbl[agg_id] = idx;
1187 return idx;
1188 }
1189
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1190 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1191 {
1192 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1193
1194 __clear_bit(idx, map->agg_idx_bmap);
1195 }
1196
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1197 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1198 {
1199 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1200
1201 return map->agg_id_tbl[agg_id];
1202 }
1203
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1204 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1205 struct rx_tpa_start_cmp *tpa_start,
1206 struct rx_tpa_start_cmp_ext *tpa_start1)
1207 {
1208 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1209 struct bnxt_tpa_info *tpa_info;
1210 u16 cons, prod, agg_id;
1211 struct rx_bd *prod_bd;
1212 dma_addr_t mapping;
1213
1214 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1215 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1216 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1217 } else {
1218 agg_id = TPA_START_AGG_ID(tpa_start);
1219 }
1220 cons = tpa_start->rx_tpa_start_cmp_opaque;
1221 prod = rxr->rx_prod;
1222 cons_rx_buf = &rxr->rx_buf_ring[cons];
1223 prod_rx_buf = &rxr->rx_buf_ring[prod];
1224 tpa_info = &rxr->rx_tpa[agg_id];
1225
1226 if (unlikely(cons != rxr->rx_next_cons ||
1227 TPA_START_ERROR(tpa_start))) {
1228 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1229 cons, rxr->rx_next_cons,
1230 TPA_START_ERROR_CODE(tpa_start1));
1231 bnxt_sched_reset(bp, rxr);
1232 return;
1233 }
1234 /* Store cfa_code in tpa_info to use in tpa_end
1235 * completion processing.
1236 */
1237 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1238 prod_rx_buf->data = tpa_info->data;
1239 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1240
1241 mapping = tpa_info->mapping;
1242 prod_rx_buf->mapping = mapping;
1243
1244 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1245
1246 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1247
1248 tpa_info->data = cons_rx_buf->data;
1249 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1250 cons_rx_buf->data = NULL;
1251 tpa_info->mapping = cons_rx_buf->mapping;
1252
1253 tpa_info->len =
1254 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1255 RX_TPA_START_CMP_LEN_SHIFT;
1256 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1257 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1258
1259 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1260 tpa_info->gso_type = SKB_GSO_TCPV4;
1261 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1262 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1263 tpa_info->gso_type = SKB_GSO_TCPV6;
1264 tpa_info->rss_hash =
1265 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1266 } else {
1267 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1268 tpa_info->gso_type = 0;
1269 if (netif_msg_rx_err(bp))
1270 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1271 }
1272 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1273 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1274 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1275 tpa_info->agg_count = 0;
1276
1277 rxr->rx_prod = NEXT_RX(prod);
1278 cons = NEXT_RX(cons);
1279 rxr->rx_next_cons = NEXT_RX(cons);
1280 cons_rx_buf = &rxr->rx_buf_ring[cons];
1281
1282 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1283 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1284 cons_rx_buf->data = NULL;
1285 }
1286
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1287 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1288 {
1289 if (agg_bufs)
1290 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1291 }
1292
1293 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1294 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1295 {
1296 struct udphdr *uh = NULL;
1297
1298 if (ip_proto == htons(ETH_P_IP)) {
1299 struct iphdr *iph = (struct iphdr *)skb->data;
1300
1301 if (iph->protocol == IPPROTO_UDP)
1302 uh = (struct udphdr *)(iph + 1);
1303 } else {
1304 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1305
1306 if (iph->nexthdr == IPPROTO_UDP)
1307 uh = (struct udphdr *)(iph + 1);
1308 }
1309 if (uh) {
1310 if (uh->check)
1311 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1312 else
1313 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1314 }
1315 }
1316 #endif
1317
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1318 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1319 int payload_off, int tcp_ts,
1320 struct sk_buff *skb)
1321 {
1322 #ifdef CONFIG_INET
1323 struct tcphdr *th;
1324 int len, nw_off;
1325 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1326 u32 hdr_info = tpa_info->hdr_info;
1327 bool loopback = false;
1328
1329 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1330 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1331 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1332
1333 /* If the packet is an internal loopback packet, the offsets will
1334 * have an extra 4 bytes.
1335 */
1336 if (inner_mac_off == 4) {
1337 loopback = true;
1338 } else if (inner_mac_off > 4) {
1339 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1340 ETH_HLEN - 2));
1341
1342 /* We only support inner iPv4/ipv6. If we don't see the
1343 * correct protocol ID, it must be a loopback packet where
1344 * the offsets are off by 4.
1345 */
1346 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1347 loopback = true;
1348 }
1349 if (loopback) {
1350 /* internal loopback packet, subtract all offsets by 4 */
1351 inner_ip_off -= 4;
1352 inner_mac_off -= 4;
1353 outer_ip_off -= 4;
1354 }
1355
1356 nw_off = inner_ip_off - ETH_HLEN;
1357 skb_set_network_header(skb, nw_off);
1358 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1359 struct ipv6hdr *iph = ipv6_hdr(skb);
1360
1361 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1362 len = skb->len - skb_transport_offset(skb);
1363 th = tcp_hdr(skb);
1364 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1365 } else {
1366 struct iphdr *iph = ip_hdr(skb);
1367
1368 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1369 len = skb->len - skb_transport_offset(skb);
1370 th = tcp_hdr(skb);
1371 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1372 }
1373
1374 if (inner_mac_off) { /* tunnel */
1375 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1376 ETH_HLEN - 2));
1377
1378 bnxt_gro_tunnel(skb, proto);
1379 }
1380 #endif
1381 return skb;
1382 }
1383
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1384 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1385 int payload_off, int tcp_ts,
1386 struct sk_buff *skb)
1387 {
1388 #ifdef CONFIG_INET
1389 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1390 u32 hdr_info = tpa_info->hdr_info;
1391 int iphdr_len, nw_off;
1392
1393 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1394 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1395 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1396
1397 nw_off = inner_ip_off - ETH_HLEN;
1398 skb_set_network_header(skb, nw_off);
1399 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1400 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1401 skb_set_transport_header(skb, nw_off + iphdr_len);
1402
1403 if (inner_mac_off) { /* tunnel */
1404 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1405 ETH_HLEN - 2));
1406
1407 bnxt_gro_tunnel(skb, proto);
1408 }
1409 #endif
1410 return skb;
1411 }
1412
1413 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1414 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1415
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1416 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1417 int payload_off, int tcp_ts,
1418 struct sk_buff *skb)
1419 {
1420 #ifdef CONFIG_INET
1421 struct tcphdr *th;
1422 int len, nw_off, tcp_opt_len = 0;
1423
1424 if (tcp_ts)
1425 tcp_opt_len = 12;
1426
1427 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1428 struct iphdr *iph;
1429
1430 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1431 ETH_HLEN;
1432 skb_set_network_header(skb, nw_off);
1433 iph = ip_hdr(skb);
1434 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1435 len = skb->len - skb_transport_offset(skb);
1436 th = tcp_hdr(skb);
1437 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1438 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1439 struct ipv6hdr *iph;
1440
1441 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1442 ETH_HLEN;
1443 skb_set_network_header(skb, nw_off);
1444 iph = ipv6_hdr(skb);
1445 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1446 len = skb->len - skb_transport_offset(skb);
1447 th = tcp_hdr(skb);
1448 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1449 } else {
1450 dev_kfree_skb_any(skb);
1451 return NULL;
1452 }
1453
1454 if (nw_off) /* tunnel */
1455 bnxt_gro_tunnel(skb, skb->protocol);
1456 #endif
1457 return skb;
1458 }
1459
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1460 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1461 struct bnxt_tpa_info *tpa_info,
1462 struct rx_tpa_end_cmp *tpa_end,
1463 struct rx_tpa_end_cmp_ext *tpa_end1,
1464 struct sk_buff *skb)
1465 {
1466 #ifdef CONFIG_INET
1467 int payload_off;
1468 u16 segs;
1469
1470 segs = TPA_END_TPA_SEGS(tpa_end);
1471 if (segs == 1)
1472 return skb;
1473
1474 NAPI_GRO_CB(skb)->count = segs;
1475 skb_shinfo(skb)->gso_size =
1476 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1477 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1478 if (bp->flags & BNXT_FLAG_CHIP_P5)
1479 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1480 else
1481 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1482 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1483 if (likely(skb))
1484 tcp_gro_complete(skb);
1485 #endif
1486 return skb;
1487 }
1488
1489 /* Given the cfa_code of a received packet determine which
1490 * netdev (vf-rep or PF) the packet is destined to.
1491 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1492 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1493 {
1494 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1495
1496 /* if vf-rep dev is NULL, the must belongs to the PF */
1497 return dev ? dev : bp->dev;
1498 }
1499
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1500 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1501 struct bnxt_cp_ring_info *cpr,
1502 u32 *raw_cons,
1503 struct rx_tpa_end_cmp *tpa_end,
1504 struct rx_tpa_end_cmp_ext *tpa_end1,
1505 u8 *event)
1506 {
1507 struct bnxt_napi *bnapi = cpr->bnapi;
1508 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1509 u8 *data_ptr, agg_bufs;
1510 unsigned int len;
1511 struct bnxt_tpa_info *tpa_info;
1512 dma_addr_t mapping;
1513 struct sk_buff *skb;
1514 u16 idx = 0, agg_id;
1515 void *data;
1516 bool gro;
1517
1518 if (unlikely(bnapi->in_reset)) {
1519 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1520
1521 if (rc < 0)
1522 return ERR_PTR(-EBUSY);
1523 return NULL;
1524 }
1525
1526 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1527 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1528 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1529 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1530 tpa_info = &rxr->rx_tpa[agg_id];
1531 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1532 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1533 agg_bufs, tpa_info->agg_count);
1534 agg_bufs = tpa_info->agg_count;
1535 }
1536 tpa_info->agg_count = 0;
1537 *event |= BNXT_AGG_EVENT;
1538 bnxt_free_agg_idx(rxr, agg_id);
1539 idx = agg_id;
1540 gro = !!(bp->flags & BNXT_FLAG_GRO);
1541 } else {
1542 agg_id = TPA_END_AGG_ID(tpa_end);
1543 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1544 tpa_info = &rxr->rx_tpa[agg_id];
1545 idx = RING_CMP(*raw_cons);
1546 if (agg_bufs) {
1547 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1548 return ERR_PTR(-EBUSY);
1549
1550 *event |= BNXT_AGG_EVENT;
1551 idx = NEXT_CMP(idx);
1552 }
1553 gro = !!TPA_END_GRO(tpa_end);
1554 }
1555 data = tpa_info->data;
1556 data_ptr = tpa_info->data_ptr;
1557 prefetch(data_ptr);
1558 len = tpa_info->len;
1559 mapping = tpa_info->mapping;
1560
1561 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1562 bnxt_abort_tpa(cpr, idx, agg_bufs);
1563 if (agg_bufs > MAX_SKB_FRAGS)
1564 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1565 agg_bufs, (int)MAX_SKB_FRAGS);
1566 return NULL;
1567 }
1568
1569 if (len <= bp->rx_copy_thresh) {
1570 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1571 if (!skb) {
1572 bnxt_abort_tpa(cpr, idx, agg_bufs);
1573 return NULL;
1574 }
1575 } else {
1576 u8 *new_data;
1577 dma_addr_t new_mapping;
1578
1579 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1580 if (!new_data) {
1581 bnxt_abort_tpa(cpr, idx, agg_bufs);
1582 return NULL;
1583 }
1584
1585 tpa_info->data = new_data;
1586 tpa_info->data_ptr = new_data + bp->rx_offset;
1587 tpa_info->mapping = new_mapping;
1588
1589 skb = build_skb(data, 0);
1590 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1591 bp->rx_buf_use_size, bp->rx_dir,
1592 DMA_ATTR_WEAK_ORDERING);
1593
1594 if (!skb) {
1595 kfree(data);
1596 bnxt_abort_tpa(cpr, idx, agg_bufs);
1597 return NULL;
1598 }
1599 skb_reserve(skb, bp->rx_offset);
1600 skb_put(skb, len);
1601 }
1602
1603 if (agg_bufs) {
1604 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1605 if (!skb) {
1606 /* Page reuse already handled by bnxt_rx_pages(). */
1607 return NULL;
1608 }
1609 }
1610
1611 skb->protocol =
1612 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1613
1614 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1615 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1616
1617 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1618 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1619 u16 vlan_proto = tpa_info->metadata >>
1620 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1621 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1622
1623 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1624 }
1625
1626 skb_checksum_none_assert(skb);
1627 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1628 skb->ip_summed = CHECKSUM_UNNECESSARY;
1629 skb->csum_level =
1630 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1631 }
1632
1633 if (gro)
1634 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1635
1636 return skb;
1637 }
1638
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1639 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1640 struct rx_agg_cmp *rx_agg)
1641 {
1642 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1643 struct bnxt_tpa_info *tpa_info;
1644
1645 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1646 tpa_info = &rxr->rx_tpa[agg_id];
1647 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1648 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1649 }
1650
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1651 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1652 struct sk_buff *skb)
1653 {
1654 if (skb->dev != bp->dev) {
1655 /* this packet belongs to a vf-rep */
1656 bnxt_vf_rep_rx(bp, skb);
1657 return;
1658 }
1659 skb_record_rx_queue(skb, bnapi->index);
1660 napi_gro_receive(&bnapi->napi, skb);
1661 }
1662
1663 /* returns the following:
1664 * 1 - 1 packet successfully received
1665 * 0 - successful TPA_START, packet not completed yet
1666 * -EBUSY - completion ring does not have all the agg buffers yet
1667 * -ENOMEM - packet aborted due to out of memory
1668 * -EIO - packet aborted due to hw error indicated in BD
1669 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)1670 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1671 u32 *raw_cons, u8 *event)
1672 {
1673 struct bnxt_napi *bnapi = cpr->bnapi;
1674 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1675 struct net_device *dev = bp->dev;
1676 struct rx_cmp *rxcmp;
1677 struct rx_cmp_ext *rxcmp1;
1678 u32 tmp_raw_cons = *raw_cons;
1679 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1680 struct bnxt_sw_rx_bd *rx_buf;
1681 unsigned int len;
1682 u8 *data_ptr, agg_bufs, cmp_type;
1683 dma_addr_t dma_addr;
1684 struct sk_buff *skb;
1685 void *data;
1686 int rc = 0;
1687 u32 misc;
1688
1689 rxcmp = (struct rx_cmp *)
1690 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1691
1692 cmp_type = RX_CMP_TYPE(rxcmp);
1693
1694 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1695 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1696 goto next_rx_no_prod_no_len;
1697 }
1698
1699 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1700 cp_cons = RING_CMP(tmp_raw_cons);
1701 rxcmp1 = (struct rx_cmp_ext *)
1702 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1703
1704 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1705 return -EBUSY;
1706
1707 prod = rxr->rx_prod;
1708
1709 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1710 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1711 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1712
1713 *event |= BNXT_RX_EVENT;
1714 goto next_rx_no_prod_no_len;
1715
1716 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1717 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1718 (struct rx_tpa_end_cmp *)rxcmp,
1719 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1720
1721 if (IS_ERR(skb))
1722 return -EBUSY;
1723
1724 rc = -ENOMEM;
1725 if (likely(skb)) {
1726 bnxt_deliver_skb(bp, bnapi, skb);
1727 rc = 1;
1728 }
1729 *event |= BNXT_RX_EVENT;
1730 goto next_rx_no_prod_no_len;
1731 }
1732
1733 cons = rxcmp->rx_cmp_opaque;
1734 if (unlikely(cons != rxr->rx_next_cons)) {
1735 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1736
1737 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1738 cons, rxr->rx_next_cons);
1739 bnxt_sched_reset(bp, rxr);
1740 return rc1;
1741 }
1742 rx_buf = &rxr->rx_buf_ring[cons];
1743 data = rx_buf->data;
1744 data_ptr = rx_buf->data_ptr;
1745 prefetch(data_ptr);
1746
1747 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1748 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1749
1750 if (agg_bufs) {
1751 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1752 return -EBUSY;
1753
1754 cp_cons = NEXT_CMP(cp_cons);
1755 *event |= BNXT_AGG_EVENT;
1756 }
1757 *event |= BNXT_RX_EVENT;
1758
1759 rx_buf->data = NULL;
1760 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1761 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1762
1763 bnxt_reuse_rx_data(rxr, cons, data);
1764 if (agg_bufs)
1765 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1766 false);
1767
1768 rc = -EIO;
1769 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1770 bnapi->cp_ring.rx_buf_errors++;
1771 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
1772 netdev_warn(bp->dev, "RX buffer error %x\n",
1773 rx_err);
1774 bnxt_sched_reset(bp, rxr);
1775 }
1776 }
1777 goto next_rx_no_len;
1778 }
1779
1780 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1781 dma_addr = rx_buf->mapping;
1782
1783 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1784 rc = 1;
1785 goto next_rx;
1786 }
1787
1788 if (len <= bp->rx_copy_thresh) {
1789 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1790 bnxt_reuse_rx_data(rxr, cons, data);
1791 if (!skb) {
1792 if (agg_bufs)
1793 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1794 agg_bufs, false);
1795 rc = -ENOMEM;
1796 goto next_rx;
1797 }
1798 } else {
1799 u32 payload;
1800
1801 if (rx_buf->data_ptr == data_ptr)
1802 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1803 else
1804 payload = 0;
1805 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1806 payload | len);
1807 if (!skb) {
1808 rc = -ENOMEM;
1809 goto next_rx;
1810 }
1811 }
1812
1813 if (agg_bufs) {
1814 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1815 if (!skb) {
1816 rc = -ENOMEM;
1817 goto next_rx;
1818 }
1819 }
1820
1821 if (RX_CMP_HASH_VALID(rxcmp)) {
1822 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1823 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1824
1825 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1826 if (hash_type != 1 && hash_type != 3)
1827 type = PKT_HASH_TYPE_L3;
1828 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1829 }
1830
1831 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1832 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1833
1834 if ((rxcmp1->rx_cmp_flags2 &
1835 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1836 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1837 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1838 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1839 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1840
1841 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1842 }
1843
1844 skb_checksum_none_assert(skb);
1845 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1846 if (dev->features & NETIF_F_RXCSUM) {
1847 skb->ip_summed = CHECKSUM_UNNECESSARY;
1848 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1849 }
1850 } else {
1851 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1852 if (dev->features & NETIF_F_RXCSUM)
1853 bnapi->cp_ring.rx_l4_csum_errors++;
1854 }
1855 }
1856
1857 bnxt_deliver_skb(bp, bnapi, skb);
1858 rc = 1;
1859
1860 next_rx:
1861 cpr->rx_packets += 1;
1862 cpr->rx_bytes += len;
1863
1864 next_rx_no_len:
1865 rxr->rx_prod = NEXT_RX(prod);
1866 rxr->rx_next_cons = NEXT_RX(cons);
1867
1868 next_rx_no_prod_no_len:
1869 *raw_cons = tmp_raw_cons;
1870
1871 return rc;
1872 }
1873
1874 /* In netpoll mode, if we are using a combined completion ring, we need to
1875 * discard the rx packets and recycle the buffers.
1876 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)1877 static int bnxt_force_rx_discard(struct bnxt *bp,
1878 struct bnxt_cp_ring_info *cpr,
1879 u32 *raw_cons, u8 *event)
1880 {
1881 u32 tmp_raw_cons = *raw_cons;
1882 struct rx_cmp_ext *rxcmp1;
1883 struct rx_cmp *rxcmp;
1884 u16 cp_cons;
1885 u8 cmp_type;
1886
1887 cp_cons = RING_CMP(tmp_raw_cons);
1888 rxcmp = (struct rx_cmp *)
1889 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1890
1891 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1892 cp_cons = RING_CMP(tmp_raw_cons);
1893 rxcmp1 = (struct rx_cmp_ext *)
1894 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1895
1896 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1897 return -EBUSY;
1898
1899 cmp_type = RX_CMP_TYPE(rxcmp);
1900 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1901 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1902 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1903 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1904 struct rx_tpa_end_cmp_ext *tpa_end1;
1905
1906 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1907 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1908 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1909 }
1910 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1911 }
1912
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)1913 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1914 {
1915 struct bnxt_fw_health *fw_health = bp->fw_health;
1916 u32 reg = fw_health->regs[reg_idx];
1917 u32 reg_type, reg_off, val = 0;
1918
1919 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1920 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1921 switch (reg_type) {
1922 case BNXT_FW_HEALTH_REG_TYPE_CFG:
1923 pci_read_config_dword(bp->pdev, reg_off, &val);
1924 break;
1925 case BNXT_FW_HEALTH_REG_TYPE_GRC:
1926 reg_off = fw_health->mapped_regs[reg_idx];
1927 /* fall through */
1928 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1929 val = readl(bp->bar0 + reg_off);
1930 break;
1931 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1932 val = readl(bp->bar1 + reg_off);
1933 break;
1934 }
1935 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1936 val &= fw_health->fw_reset_inprog_reg_mask;
1937 return val;
1938 }
1939
1940 #define BNXT_GET_EVENT_PORT(data) \
1941 ((data) & \
1942 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1943
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)1944 static int bnxt_async_event_process(struct bnxt *bp,
1945 struct hwrm_async_event_cmpl *cmpl)
1946 {
1947 u16 event_id = le16_to_cpu(cmpl->event_id);
1948
1949 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1950 switch (event_id) {
1951 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1952 u32 data1 = le32_to_cpu(cmpl->event_data1);
1953 struct bnxt_link_info *link_info = &bp->link_info;
1954
1955 if (BNXT_VF(bp))
1956 goto async_event_process_exit;
1957
1958 /* print unsupported speed warning in forced speed mode only */
1959 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1960 (data1 & 0x20000)) {
1961 u16 fw_speed = link_info->force_link_speed;
1962 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1963
1964 if (speed != SPEED_UNKNOWN)
1965 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1966 speed);
1967 }
1968 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1969 }
1970 /* fall through */
1971 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1972 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1973 break;
1974 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1975 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1976 break;
1977 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1978 u32 data1 = le32_to_cpu(cmpl->event_data1);
1979 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1980
1981 if (BNXT_VF(bp))
1982 break;
1983
1984 if (bp->pf.port_id != port_id)
1985 break;
1986
1987 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1988 break;
1989 }
1990 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1991 if (BNXT_PF(bp))
1992 goto async_event_process_exit;
1993 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1994 break;
1995 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
1996 u32 data1 = le32_to_cpu(cmpl->event_data1);
1997
1998 if (!bp->fw_health)
1999 goto async_event_process_exit;
2000
2001 bp->fw_reset_timestamp = jiffies;
2002 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2003 if (!bp->fw_reset_min_dsecs)
2004 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2005 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2006 if (!bp->fw_reset_max_dsecs)
2007 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2008 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2009 netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2010 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2011 } else {
2012 netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2013 bp->fw_reset_max_dsecs * 100);
2014 }
2015 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2016 break;
2017 }
2018 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2019 struct bnxt_fw_health *fw_health = bp->fw_health;
2020 u32 data1 = le32_to_cpu(cmpl->event_data1);
2021
2022 if (!fw_health)
2023 goto async_event_process_exit;
2024
2025 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2026 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2027 if (!fw_health->enabled)
2028 break;
2029
2030 if (netif_msg_drv(bp))
2031 netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2032 fw_health->enabled, fw_health->master,
2033 bnxt_fw_health_readl(bp,
2034 BNXT_FW_RESET_CNT_REG),
2035 bnxt_fw_health_readl(bp,
2036 BNXT_FW_HEALTH_REG));
2037 fw_health->tmr_multiplier =
2038 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2039 bp->current_interval * 10);
2040 fw_health->tmr_counter = fw_health->tmr_multiplier;
2041 fw_health->last_fw_heartbeat =
2042 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2043 fw_health->last_fw_reset_cnt =
2044 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2045 goto async_event_process_exit;
2046 }
2047 default:
2048 goto async_event_process_exit;
2049 }
2050 bnxt_queue_sp_work(bp);
2051 async_event_process_exit:
2052 bnxt_ulp_async_events(bp, cmpl);
2053 return 0;
2054 }
2055
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2056 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2057 {
2058 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2059 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2060 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2061 (struct hwrm_fwd_req_cmpl *)txcmp;
2062
2063 switch (cmpl_type) {
2064 case CMPL_BASE_TYPE_HWRM_DONE:
2065 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2066 if (seq_id == bp->hwrm_intr_seq_id)
2067 bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2068 else
2069 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2070 break;
2071
2072 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2073 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2074
2075 if ((vf_id < bp->pf.first_vf_id) ||
2076 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2077 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2078 vf_id);
2079 return -EINVAL;
2080 }
2081
2082 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2083 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2084 bnxt_queue_sp_work(bp);
2085 break;
2086
2087 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2088 bnxt_async_event_process(bp,
2089 (struct hwrm_async_event_cmpl *)txcmp);
2090
2091 default:
2092 break;
2093 }
2094
2095 return 0;
2096 }
2097
bnxt_msix(int irq,void * dev_instance)2098 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2099 {
2100 struct bnxt_napi *bnapi = dev_instance;
2101 struct bnxt *bp = bnapi->bp;
2102 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2103 u32 cons = RING_CMP(cpr->cp_raw_cons);
2104
2105 cpr->event_ctr++;
2106 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2107 napi_schedule(&bnapi->napi);
2108 return IRQ_HANDLED;
2109 }
2110
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2111 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2112 {
2113 u32 raw_cons = cpr->cp_raw_cons;
2114 u16 cons = RING_CMP(raw_cons);
2115 struct tx_cmp *txcmp;
2116
2117 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2118
2119 return TX_CMP_VALID(txcmp, raw_cons);
2120 }
2121
bnxt_inta(int irq,void * dev_instance)2122 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2123 {
2124 struct bnxt_napi *bnapi = dev_instance;
2125 struct bnxt *bp = bnapi->bp;
2126 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2127 u32 cons = RING_CMP(cpr->cp_raw_cons);
2128 u32 int_status;
2129
2130 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2131
2132 if (!bnxt_has_work(bp, cpr)) {
2133 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2134 /* return if erroneous interrupt */
2135 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2136 return IRQ_NONE;
2137 }
2138
2139 /* disable ring IRQ */
2140 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2141
2142 /* Return here if interrupt is shared and is disabled. */
2143 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2144 return IRQ_HANDLED;
2145
2146 napi_schedule(&bnapi->napi);
2147 return IRQ_HANDLED;
2148 }
2149
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2150 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2151 int budget)
2152 {
2153 struct bnxt_napi *bnapi = cpr->bnapi;
2154 u32 raw_cons = cpr->cp_raw_cons;
2155 u32 cons;
2156 int tx_pkts = 0;
2157 int rx_pkts = 0;
2158 u8 event = 0;
2159 struct tx_cmp *txcmp;
2160
2161 cpr->has_more_work = 0;
2162 while (1) {
2163 int rc;
2164
2165 cons = RING_CMP(raw_cons);
2166 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2167
2168 if (!TX_CMP_VALID(txcmp, raw_cons))
2169 break;
2170
2171 /* The valid test of the entry must be done first before
2172 * reading any further.
2173 */
2174 dma_rmb();
2175 cpr->had_work_done = 1;
2176 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2177 tx_pkts++;
2178 /* return full budget so NAPI will complete. */
2179 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2180 rx_pkts = budget;
2181 raw_cons = NEXT_RAW_CMP(raw_cons);
2182 if (budget)
2183 cpr->has_more_work = 1;
2184 break;
2185 }
2186 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2187 if (likely(budget))
2188 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2189 else
2190 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2191 &event);
2192 if (likely(rc >= 0))
2193 rx_pkts += rc;
2194 /* Increment rx_pkts when rc is -ENOMEM to count towards
2195 * the NAPI budget. Otherwise, we may potentially loop
2196 * here forever if we consistently cannot allocate
2197 * buffers.
2198 */
2199 else if (rc == -ENOMEM && budget)
2200 rx_pkts++;
2201 else if (rc == -EBUSY) /* partial completion */
2202 break;
2203 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2204 CMPL_BASE_TYPE_HWRM_DONE) ||
2205 (TX_CMP_TYPE(txcmp) ==
2206 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2207 (TX_CMP_TYPE(txcmp) ==
2208 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2209 bnxt_hwrm_handler(bp, txcmp);
2210 }
2211 raw_cons = NEXT_RAW_CMP(raw_cons);
2212
2213 if (rx_pkts && rx_pkts == budget) {
2214 cpr->has_more_work = 1;
2215 break;
2216 }
2217 }
2218
2219 if (event & BNXT_REDIRECT_EVENT)
2220 xdp_do_flush_map();
2221
2222 if (event & BNXT_TX_EVENT) {
2223 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2224 u16 prod = txr->tx_prod;
2225
2226 /* Sync BD data before updating doorbell */
2227 wmb();
2228
2229 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2230 }
2231
2232 cpr->cp_raw_cons = raw_cons;
2233 bnapi->tx_pkts += tx_pkts;
2234 bnapi->events |= event;
2235 return rx_pkts;
2236 }
2237
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi)2238 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2239 {
2240 if (bnapi->tx_pkts) {
2241 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2242 bnapi->tx_pkts = 0;
2243 }
2244
2245 if (bnapi->events & BNXT_RX_EVENT) {
2246 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2247
2248 if (bnapi->events & BNXT_AGG_EVENT)
2249 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2250 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2251 }
2252 bnapi->events = 0;
2253 }
2254
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2255 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2256 int budget)
2257 {
2258 struct bnxt_napi *bnapi = cpr->bnapi;
2259 int rx_pkts;
2260
2261 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2262
2263 /* ACK completion ring before freeing tx ring and producing new
2264 * buffers in rx/agg rings to prevent overflowing the completion
2265 * ring.
2266 */
2267 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2268
2269 __bnxt_poll_work_done(bp, bnapi);
2270 return rx_pkts;
2271 }
2272
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)2273 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2274 {
2275 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2276 struct bnxt *bp = bnapi->bp;
2277 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2278 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2279 struct tx_cmp *txcmp;
2280 struct rx_cmp_ext *rxcmp1;
2281 u32 cp_cons, tmp_raw_cons;
2282 u32 raw_cons = cpr->cp_raw_cons;
2283 u32 rx_pkts = 0;
2284 u8 event = 0;
2285
2286 while (1) {
2287 int rc;
2288
2289 cp_cons = RING_CMP(raw_cons);
2290 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2291
2292 if (!TX_CMP_VALID(txcmp, raw_cons))
2293 break;
2294
2295 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2296 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2297 cp_cons = RING_CMP(tmp_raw_cons);
2298 rxcmp1 = (struct rx_cmp_ext *)
2299 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2300
2301 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2302 break;
2303
2304 /* force an error to recycle the buffer */
2305 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2306 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2307
2308 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2309 if (likely(rc == -EIO) && budget)
2310 rx_pkts++;
2311 else if (rc == -EBUSY) /* partial completion */
2312 break;
2313 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2314 CMPL_BASE_TYPE_HWRM_DONE)) {
2315 bnxt_hwrm_handler(bp, txcmp);
2316 } else {
2317 netdev_err(bp->dev,
2318 "Invalid completion received on special ring\n");
2319 }
2320 raw_cons = NEXT_RAW_CMP(raw_cons);
2321
2322 if (rx_pkts == budget)
2323 break;
2324 }
2325
2326 cpr->cp_raw_cons = raw_cons;
2327 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2328 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2329
2330 if (event & BNXT_AGG_EVENT)
2331 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2332
2333 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2334 napi_complete_done(napi, rx_pkts);
2335 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2336 }
2337 return rx_pkts;
2338 }
2339
bnxt_poll(struct napi_struct * napi,int budget)2340 static int bnxt_poll(struct napi_struct *napi, int budget)
2341 {
2342 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2343 struct bnxt *bp = bnapi->bp;
2344 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2345 int work_done = 0;
2346
2347 while (1) {
2348 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2349
2350 if (work_done >= budget) {
2351 if (!budget)
2352 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2353 break;
2354 }
2355
2356 if (!bnxt_has_work(bp, cpr)) {
2357 if (napi_complete_done(napi, work_done))
2358 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2359 break;
2360 }
2361 }
2362 if (bp->flags & BNXT_FLAG_DIM) {
2363 struct dim_sample dim_sample = {};
2364
2365 dim_update_sample(cpr->event_ctr,
2366 cpr->rx_packets,
2367 cpr->rx_bytes,
2368 &dim_sample);
2369 net_dim(&cpr->dim, dim_sample);
2370 }
2371 return work_done;
2372 }
2373
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)2374 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2375 {
2376 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2377 int i, work_done = 0;
2378
2379 for (i = 0; i < 2; i++) {
2380 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2381
2382 if (cpr2) {
2383 work_done += __bnxt_poll_work(bp, cpr2,
2384 budget - work_done);
2385 cpr->has_more_work |= cpr2->has_more_work;
2386 }
2387 }
2388 return work_done;
2389 }
2390
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,bool all)2391 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2392 u64 dbr_type, bool all)
2393 {
2394 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2395 int i;
2396
2397 for (i = 0; i < 2; i++) {
2398 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2399 struct bnxt_db_info *db;
2400
2401 if (cpr2 && (all || cpr2->had_work_done)) {
2402 db = &cpr2->cp_db;
2403 writeq(db->db_key64 | dbr_type |
2404 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2405 cpr2->had_work_done = 0;
2406 }
2407 }
2408 __bnxt_poll_work_done(bp, bnapi);
2409 }
2410
bnxt_poll_p5(struct napi_struct * napi,int budget)2411 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2412 {
2413 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2414 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2415 u32 raw_cons = cpr->cp_raw_cons;
2416 struct bnxt *bp = bnapi->bp;
2417 struct nqe_cn *nqcmp;
2418 int work_done = 0;
2419 u32 cons;
2420
2421 if (cpr->has_more_work) {
2422 cpr->has_more_work = 0;
2423 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2424 if (cpr->has_more_work) {
2425 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2426 return work_done;
2427 }
2428 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2429 if (napi_complete_done(napi, work_done))
2430 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2431 return work_done;
2432 }
2433 while (1) {
2434 cons = RING_CMP(raw_cons);
2435 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2436
2437 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2438 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2439 false);
2440 cpr->cp_raw_cons = raw_cons;
2441 if (napi_complete_done(napi, work_done))
2442 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2443 cpr->cp_raw_cons);
2444 return work_done;
2445 }
2446
2447 /* The valid test of the entry must be done first before
2448 * reading any further.
2449 */
2450 dma_rmb();
2451
2452 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2453 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2454 struct bnxt_cp_ring_info *cpr2;
2455
2456 cpr2 = cpr->cp_ring_arr[idx];
2457 work_done += __bnxt_poll_work(bp, cpr2,
2458 budget - work_done);
2459 cpr->has_more_work = cpr2->has_more_work;
2460 } else {
2461 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2462 }
2463 raw_cons = NEXT_RAW_CMP(raw_cons);
2464 if (cpr->has_more_work)
2465 break;
2466 }
2467 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2468 cpr->cp_raw_cons = raw_cons;
2469 return work_done;
2470 }
2471
bnxt_free_tx_skbs(struct bnxt * bp)2472 static void bnxt_free_tx_skbs(struct bnxt *bp)
2473 {
2474 int i, max_idx;
2475 struct pci_dev *pdev = bp->pdev;
2476
2477 if (!bp->tx_ring)
2478 return;
2479
2480 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2481 for (i = 0; i < bp->tx_nr_rings; i++) {
2482 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2483 int j;
2484
2485 for (j = 0; j < max_idx;) {
2486 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2487 struct sk_buff *skb;
2488 int k, last;
2489
2490 if (i < bp->tx_nr_rings_xdp &&
2491 tx_buf->action == XDP_REDIRECT) {
2492 dma_unmap_single(&pdev->dev,
2493 dma_unmap_addr(tx_buf, mapping),
2494 dma_unmap_len(tx_buf, len),
2495 PCI_DMA_TODEVICE);
2496 xdp_return_frame(tx_buf->xdpf);
2497 tx_buf->action = 0;
2498 tx_buf->xdpf = NULL;
2499 j++;
2500 continue;
2501 }
2502
2503 skb = tx_buf->skb;
2504 if (!skb) {
2505 j++;
2506 continue;
2507 }
2508
2509 tx_buf->skb = NULL;
2510
2511 if (tx_buf->is_push) {
2512 dev_kfree_skb(skb);
2513 j += 2;
2514 continue;
2515 }
2516
2517 dma_unmap_single(&pdev->dev,
2518 dma_unmap_addr(tx_buf, mapping),
2519 skb_headlen(skb),
2520 PCI_DMA_TODEVICE);
2521
2522 last = tx_buf->nr_frags;
2523 j += 2;
2524 for (k = 0; k < last; k++, j++) {
2525 int ring_idx = j & bp->tx_ring_mask;
2526 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2527
2528 tx_buf = &txr->tx_buf_ring[ring_idx];
2529 dma_unmap_page(
2530 &pdev->dev,
2531 dma_unmap_addr(tx_buf, mapping),
2532 skb_frag_size(frag), PCI_DMA_TODEVICE);
2533 }
2534 dev_kfree_skb(skb);
2535 }
2536 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2537 }
2538 }
2539
bnxt_free_rx_skbs(struct bnxt * bp)2540 static void bnxt_free_rx_skbs(struct bnxt *bp)
2541 {
2542 int i, max_idx, max_agg_idx;
2543 struct pci_dev *pdev = bp->pdev;
2544
2545 if (!bp->rx_ring)
2546 return;
2547
2548 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2549 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2550 for (i = 0; i < bp->rx_nr_rings; i++) {
2551 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2552 struct bnxt_tpa_idx_map *map;
2553 int j;
2554
2555 if (rxr->rx_tpa) {
2556 for (j = 0; j < bp->max_tpa; j++) {
2557 struct bnxt_tpa_info *tpa_info =
2558 &rxr->rx_tpa[j];
2559 u8 *data = tpa_info->data;
2560
2561 if (!data)
2562 continue;
2563
2564 dma_unmap_single_attrs(&pdev->dev,
2565 tpa_info->mapping,
2566 bp->rx_buf_use_size,
2567 bp->rx_dir,
2568 DMA_ATTR_WEAK_ORDERING);
2569
2570 tpa_info->data = NULL;
2571
2572 kfree(data);
2573 }
2574 }
2575
2576 for (j = 0; j < max_idx; j++) {
2577 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2578 dma_addr_t mapping = rx_buf->mapping;
2579 void *data = rx_buf->data;
2580
2581 if (!data)
2582 continue;
2583
2584 rx_buf->data = NULL;
2585
2586 if (BNXT_RX_PAGE_MODE(bp)) {
2587 mapping -= bp->rx_dma_offset;
2588 dma_unmap_page_attrs(&pdev->dev, mapping,
2589 PAGE_SIZE, bp->rx_dir,
2590 DMA_ATTR_WEAK_ORDERING);
2591 page_pool_recycle_direct(rxr->page_pool, data);
2592 } else {
2593 dma_unmap_single_attrs(&pdev->dev, mapping,
2594 bp->rx_buf_use_size,
2595 bp->rx_dir,
2596 DMA_ATTR_WEAK_ORDERING);
2597 kfree(data);
2598 }
2599 }
2600
2601 for (j = 0; j < max_agg_idx; j++) {
2602 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2603 &rxr->rx_agg_ring[j];
2604 struct page *page = rx_agg_buf->page;
2605
2606 if (!page)
2607 continue;
2608
2609 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2610 BNXT_RX_PAGE_SIZE,
2611 PCI_DMA_FROMDEVICE,
2612 DMA_ATTR_WEAK_ORDERING);
2613
2614 rx_agg_buf->page = NULL;
2615 __clear_bit(j, rxr->rx_agg_bmap);
2616
2617 __free_page(page);
2618 }
2619 if (rxr->rx_page) {
2620 __free_page(rxr->rx_page);
2621 rxr->rx_page = NULL;
2622 }
2623 map = rxr->rx_tpa_idx_map;
2624 if (map)
2625 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2626 }
2627 }
2628
bnxt_free_skbs(struct bnxt * bp)2629 static void bnxt_free_skbs(struct bnxt *bp)
2630 {
2631 bnxt_free_tx_skbs(bp);
2632 bnxt_free_rx_skbs(bp);
2633 }
2634
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)2635 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2636 {
2637 struct pci_dev *pdev = bp->pdev;
2638 int i;
2639
2640 for (i = 0; i < rmem->nr_pages; i++) {
2641 if (!rmem->pg_arr[i])
2642 continue;
2643
2644 dma_free_coherent(&pdev->dev, rmem->page_size,
2645 rmem->pg_arr[i], rmem->dma_arr[i]);
2646
2647 rmem->pg_arr[i] = NULL;
2648 }
2649 if (rmem->pg_tbl) {
2650 size_t pg_tbl_size = rmem->nr_pages * 8;
2651
2652 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2653 pg_tbl_size = rmem->page_size;
2654 dma_free_coherent(&pdev->dev, pg_tbl_size,
2655 rmem->pg_tbl, rmem->pg_tbl_map);
2656 rmem->pg_tbl = NULL;
2657 }
2658 if (rmem->vmem_size && *rmem->vmem) {
2659 vfree(*rmem->vmem);
2660 *rmem->vmem = NULL;
2661 }
2662 }
2663
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)2664 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2665 {
2666 struct pci_dev *pdev = bp->pdev;
2667 u64 valid_bit = 0;
2668 int i;
2669
2670 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2671 valid_bit = PTU_PTE_VALID;
2672 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2673 size_t pg_tbl_size = rmem->nr_pages * 8;
2674
2675 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2676 pg_tbl_size = rmem->page_size;
2677 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2678 &rmem->pg_tbl_map,
2679 GFP_KERNEL);
2680 if (!rmem->pg_tbl)
2681 return -ENOMEM;
2682 }
2683
2684 for (i = 0; i < rmem->nr_pages; i++) {
2685 u64 extra_bits = valid_bit;
2686
2687 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2688 rmem->page_size,
2689 &rmem->dma_arr[i],
2690 GFP_KERNEL);
2691 if (!rmem->pg_arr[i])
2692 return -ENOMEM;
2693
2694 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2695 if (i == rmem->nr_pages - 2 &&
2696 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2697 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2698 else if (i == rmem->nr_pages - 1 &&
2699 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2700 extra_bits |= PTU_PTE_LAST;
2701 rmem->pg_tbl[i] =
2702 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2703 }
2704 }
2705
2706 if (rmem->vmem_size) {
2707 *rmem->vmem = vzalloc(rmem->vmem_size);
2708 if (!(*rmem->vmem))
2709 return -ENOMEM;
2710 }
2711 return 0;
2712 }
2713
bnxt_free_tpa_info(struct bnxt * bp)2714 static void bnxt_free_tpa_info(struct bnxt *bp)
2715 {
2716 int i;
2717
2718 for (i = 0; i < bp->rx_nr_rings; i++) {
2719 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2720
2721 kfree(rxr->rx_tpa_idx_map);
2722 rxr->rx_tpa_idx_map = NULL;
2723 if (rxr->rx_tpa) {
2724 kfree(rxr->rx_tpa[0].agg_arr);
2725 rxr->rx_tpa[0].agg_arr = NULL;
2726 }
2727 kfree(rxr->rx_tpa);
2728 rxr->rx_tpa = NULL;
2729 }
2730 }
2731
bnxt_alloc_tpa_info(struct bnxt * bp)2732 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2733 {
2734 int i, j, total_aggs = 0;
2735
2736 bp->max_tpa = MAX_TPA;
2737 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2738 if (!bp->max_tpa_v2)
2739 return 0;
2740 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2741 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2742 }
2743
2744 for (i = 0; i < bp->rx_nr_rings; i++) {
2745 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2746 struct rx_agg_cmp *agg;
2747
2748 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2749 GFP_KERNEL);
2750 if (!rxr->rx_tpa)
2751 return -ENOMEM;
2752
2753 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2754 continue;
2755 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2756 rxr->rx_tpa[0].agg_arr = agg;
2757 if (!agg)
2758 return -ENOMEM;
2759 for (j = 1; j < bp->max_tpa; j++)
2760 rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2761 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2762 GFP_KERNEL);
2763 if (!rxr->rx_tpa_idx_map)
2764 return -ENOMEM;
2765 }
2766 return 0;
2767 }
2768
bnxt_free_rx_rings(struct bnxt * bp)2769 static void bnxt_free_rx_rings(struct bnxt *bp)
2770 {
2771 int i;
2772
2773 if (!bp->rx_ring)
2774 return;
2775
2776 bnxt_free_tpa_info(bp);
2777 for (i = 0; i < bp->rx_nr_rings; i++) {
2778 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2779 struct bnxt_ring_struct *ring;
2780
2781 if (rxr->xdp_prog)
2782 bpf_prog_put(rxr->xdp_prog);
2783
2784 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2785 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2786
2787 page_pool_destroy(rxr->page_pool);
2788 rxr->page_pool = NULL;
2789
2790 kfree(rxr->rx_agg_bmap);
2791 rxr->rx_agg_bmap = NULL;
2792
2793 ring = &rxr->rx_ring_struct;
2794 bnxt_free_ring(bp, &ring->ring_mem);
2795
2796 ring = &rxr->rx_agg_ring_struct;
2797 bnxt_free_ring(bp, &ring->ring_mem);
2798 }
2799 }
2800
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)2801 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2802 struct bnxt_rx_ring_info *rxr)
2803 {
2804 struct page_pool_params pp = { 0 };
2805
2806 pp.pool_size = bp->rx_ring_size;
2807 pp.nid = dev_to_node(&bp->pdev->dev);
2808 pp.dev = &bp->pdev->dev;
2809 pp.dma_dir = DMA_BIDIRECTIONAL;
2810
2811 rxr->page_pool = page_pool_create(&pp);
2812 if (IS_ERR(rxr->page_pool)) {
2813 int err = PTR_ERR(rxr->page_pool);
2814
2815 rxr->page_pool = NULL;
2816 return err;
2817 }
2818 return 0;
2819 }
2820
bnxt_alloc_rx_rings(struct bnxt * bp)2821 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2822 {
2823 int i, rc = 0, agg_rings = 0;
2824
2825 if (!bp->rx_ring)
2826 return -ENOMEM;
2827
2828 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2829 agg_rings = 1;
2830
2831 for (i = 0; i < bp->rx_nr_rings; i++) {
2832 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2833 struct bnxt_ring_struct *ring;
2834
2835 ring = &rxr->rx_ring_struct;
2836
2837 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2838 if (rc)
2839 return rc;
2840
2841 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2842 if (rc < 0)
2843 return rc;
2844
2845 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2846 MEM_TYPE_PAGE_POOL,
2847 rxr->page_pool);
2848 if (rc) {
2849 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2850 return rc;
2851 }
2852
2853 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2854 if (rc)
2855 return rc;
2856
2857 ring->grp_idx = i;
2858 if (agg_rings) {
2859 u16 mem_size;
2860
2861 ring = &rxr->rx_agg_ring_struct;
2862 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2863 if (rc)
2864 return rc;
2865
2866 ring->grp_idx = i;
2867 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2868 mem_size = rxr->rx_agg_bmap_size / 8;
2869 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2870 if (!rxr->rx_agg_bmap)
2871 return -ENOMEM;
2872 }
2873 }
2874 if (bp->flags & BNXT_FLAG_TPA)
2875 rc = bnxt_alloc_tpa_info(bp);
2876 return rc;
2877 }
2878
bnxt_free_tx_rings(struct bnxt * bp)2879 static void bnxt_free_tx_rings(struct bnxt *bp)
2880 {
2881 int i;
2882 struct pci_dev *pdev = bp->pdev;
2883
2884 if (!bp->tx_ring)
2885 return;
2886
2887 for (i = 0; i < bp->tx_nr_rings; i++) {
2888 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2889 struct bnxt_ring_struct *ring;
2890
2891 if (txr->tx_push) {
2892 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2893 txr->tx_push, txr->tx_push_mapping);
2894 txr->tx_push = NULL;
2895 }
2896
2897 ring = &txr->tx_ring_struct;
2898
2899 bnxt_free_ring(bp, &ring->ring_mem);
2900 }
2901 }
2902
bnxt_alloc_tx_rings(struct bnxt * bp)2903 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2904 {
2905 int i, j, rc;
2906 struct pci_dev *pdev = bp->pdev;
2907
2908 bp->tx_push_size = 0;
2909 if (bp->tx_push_thresh) {
2910 int push_size;
2911
2912 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2913 bp->tx_push_thresh);
2914
2915 if (push_size > 256) {
2916 push_size = 0;
2917 bp->tx_push_thresh = 0;
2918 }
2919
2920 bp->tx_push_size = push_size;
2921 }
2922
2923 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2924 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2925 struct bnxt_ring_struct *ring;
2926 u8 qidx;
2927
2928 ring = &txr->tx_ring_struct;
2929
2930 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2931 if (rc)
2932 return rc;
2933
2934 ring->grp_idx = txr->bnapi->index;
2935 if (bp->tx_push_size) {
2936 dma_addr_t mapping;
2937
2938 /* One pre-allocated DMA buffer to backup
2939 * TX push operation
2940 */
2941 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2942 bp->tx_push_size,
2943 &txr->tx_push_mapping,
2944 GFP_KERNEL);
2945
2946 if (!txr->tx_push)
2947 return -ENOMEM;
2948
2949 mapping = txr->tx_push_mapping +
2950 sizeof(struct tx_push_bd);
2951 txr->data_mapping = cpu_to_le64(mapping);
2952 }
2953 qidx = bp->tc_to_qidx[j];
2954 ring->queue_id = bp->q_info[qidx].queue_id;
2955 if (i < bp->tx_nr_rings_xdp)
2956 continue;
2957 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2958 j++;
2959 }
2960 return 0;
2961 }
2962
bnxt_free_cp_rings(struct bnxt * bp)2963 static void bnxt_free_cp_rings(struct bnxt *bp)
2964 {
2965 int i;
2966
2967 if (!bp->bnapi)
2968 return;
2969
2970 for (i = 0; i < bp->cp_nr_rings; i++) {
2971 struct bnxt_napi *bnapi = bp->bnapi[i];
2972 struct bnxt_cp_ring_info *cpr;
2973 struct bnxt_ring_struct *ring;
2974 int j;
2975
2976 if (!bnapi)
2977 continue;
2978
2979 cpr = &bnapi->cp_ring;
2980 ring = &cpr->cp_ring_struct;
2981
2982 bnxt_free_ring(bp, &ring->ring_mem);
2983
2984 for (j = 0; j < 2; j++) {
2985 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2986
2987 if (cpr2) {
2988 ring = &cpr2->cp_ring_struct;
2989 bnxt_free_ring(bp, &ring->ring_mem);
2990 kfree(cpr2);
2991 cpr->cp_ring_arr[j] = NULL;
2992 }
2993 }
2994 }
2995 }
2996
bnxt_alloc_cp_sub_ring(struct bnxt * bp)2997 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2998 {
2999 struct bnxt_ring_mem_info *rmem;
3000 struct bnxt_ring_struct *ring;
3001 struct bnxt_cp_ring_info *cpr;
3002 int rc;
3003
3004 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3005 if (!cpr)
3006 return NULL;
3007
3008 ring = &cpr->cp_ring_struct;
3009 rmem = &ring->ring_mem;
3010 rmem->nr_pages = bp->cp_nr_pages;
3011 rmem->page_size = HW_CMPD_RING_SIZE;
3012 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3013 rmem->dma_arr = cpr->cp_desc_mapping;
3014 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3015 rc = bnxt_alloc_ring(bp, rmem);
3016 if (rc) {
3017 bnxt_free_ring(bp, rmem);
3018 kfree(cpr);
3019 cpr = NULL;
3020 }
3021 return cpr;
3022 }
3023
bnxt_alloc_cp_rings(struct bnxt * bp)3024 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3025 {
3026 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3027 int i, rc, ulp_base_vec, ulp_msix;
3028
3029 ulp_msix = bnxt_get_ulp_msix_num(bp);
3030 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3031 for (i = 0; i < bp->cp_nr_rings; i++) {
3032 struct bnxt_napi *bnapi = bp->bnapi[i];
3033 struct bnxt_cp_ring_info *cpr;
3034 struct bnxt_ring_struct *ring;
3035
3036 if (!bnapi)
3037 continue;
3038
3039 cpr = &bnapi->cp_ring;
3040 cpr->bnapi = bnapi;
3041 ring = &cpr->cp_ring_struct;
3042
3043 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3044 if (rc)
3045 return rc;
3046
3047 if (ulp_msix && i >= ulp_base_vec)
3048 ring->map_idx = i + ulp_msix;
3049 else
3050 ring->map_idx = i;
3051
3052 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3053 continue;
3054
3055 if (i < bp->rx_nr_rings) {
3056 struct bnxt_cp_ring_info *cpr2 =
3057 bnxt_alloc_cp_sub_ring(bp);
3058
3059 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3060 if (!cpr2)
3061 return -ENOMEM;
3062 cpr2->bnapi = bnapi;
3063 }
3064 if ((sh && i < bp->tx_nr_rings) ||
3065 (!sh && i >= bp->rx_nr_rings)) {
3066 struct bnxt_cp_ring_info *cpr2 =
3067 bnxt_alloc_cp_sub_ring(bp);
3068
3069 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3070 if (!cpr2)
3071 return -ENOMEM;
3072 cpr2->bnapi = bnapi;
3073 }
3074 }
3075 return 0;
3076 }
3077
bnxt_init_ring_struct(struct bnxt * bp)3078 static void bnxt_init_ring_struct(struct bnxt *bp)
3079 {
3080 int i;
3081
3082 for (i = 0; i < bp->cp_nr_rings; i++) {
3083 struct bnxt_napi *bnapi = bp->bnapi[i];
3084 struct bnxt_ring_mem_info *rmem;
3085 struct bnxt_cp_ring_info *cpr;
3086 struct bnxt_rx_ring_info *rxr;
3087 struct bnxt_tx_ring_info *txr;
3088 struct bnxt_ring_struct *ring;
3089
3090 if (!bnapi)
3091 continue;
3092
3093 cpr = &bnapi->cp_ring;
3094 ring = &cpr->cp_ring_struct;
3095 rmem = &ring->ring_mem;
3096 rmem->nr_pages = bp->cp_nr_pages;
3097 rmem->page_size = HW_CMPD_RING_SIZE;
3098 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3099 rmem->dma_arr = cpr->cp_desc_mapping;
3100 rmem->vmem_size = 0;
3101
3102 rxr = bnapi->rx_ring;
3103 if (!rxr)
3104 goto skip_rx;
3105
3106 ring = &rxr->rx_ring_struct;
3107 rmem = &ring->ring_mem;
3108 rmem->nr_pages = bp->rx_nr_pages;
3109 rmem->page_size = HW_RXBD_RING_SIZE;
3110 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3111 rmem->dma_arr = rxr->rx_desc_mapping;
3112 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3113 rmem->vmem = (void **)&rxr->rx_buf_ring;
3114
3115 ring = &rxr->rx_agg_ring_struct;
3116 rmem = &ring->ring_mem;
3117 rmem->nr_pages = bp->rx_agg_nr_pages;
3118 rmem->page_size = HW_RXBD_RING_SIZE;
3119 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3120 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3121 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3122 rmem->vmem = (void **)&rxr->rx_agg_ring;
3123
3124 skip_rx:
3125 txr = bnapi->tx_ring;
3126 if (!txr)
3127 continue;
3128
3129 ring = &txr->tx_ring_struct;
3130 rmem = &ring->ring_mem;
3131 rmem->nr_pages = bp->tx_nr_pages;
3132 rmem->page_size = HW_RXBD_RING_SIZE;
3133 rmem->pg_arr = (void **)txr->tx_desc_ring;
3134 rmem->dma_arr = txr->tx_desc_mapping;
3135 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3136 rmem->vmem = (void **)&txr->tx_buf_ring;
3137 }
3138 }
3139
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)3140 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3141 {
3142 int i;
3143 u32 prod;
3144 struct rx_bd **rx_buf_ring;
3145
3146 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3147 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3148 int j;
3149 struct rx_bd *rxbd;
3150
3151 rxbd = rx_buf_ring[i];
3152 if (!rxbd)
3153 continue;
3154
3155 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3156 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3157 rxbd->rx_bd_opaque = prod;
3158 }
3159 }
3160 }
3161
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)3162 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3163 {
3164 struct net_device *dev = bp->dev;
3165 struct bnxt_rx_ring_info *rxr;
3166 struct bnxt_ring_struct *ring;
3167 u32 prod, type;
3168 int i;
3169
3170 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3171 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3172
3173 if (NET_IP_ALIGN == 2)
3174 type |= RX_BD_FLAGS_SOP;
3175
3176 rxr = &bp->rx_ring[ring_nr];
3177 ring = &rxr->rx_ring_struct;
3178 bnxt_init_rxbd_pages(ring, type);
3179
3180 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3181 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
3182 if (IS_ERR(rxr->xdp_prog)) {
3183 int rc = PTR_ERR(rxr->xdp_prog);
3184
3185 rxr->xdp_prog = NULL;
3186 return rc;
3187 }
3188 }
3189 prod = rxr->rx_prod;
3190 for (i = 0; i < bp->rx_ring_size; i++) {
3191 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3192 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3193 ring_nr, i, bp->rx_ring_size);
3194 break;
3195 }
3196 prod = NEXT_RX(prod);
3197 }
3198 rxr->rx_prod = prod;
3199 ring->fw_ring_id = INVALID_HW_RING_ID;
3200
3201 ring = &rxr->rx_agg_ring_struct;
3202 ring->fw_ring_id = INVALID_HW_RING_ID;
3203
3204 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3205 return 0;
3206
3207 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3208 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3209
3210 bnxt_init_rxbd_pages(ring, type);
3211
3212 prod = rxr->rx_agg_prod;
3213 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3214 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3215 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3216 ring_nr, i, bp->rx_ring_size);
3217 break;
3218 }
3219 prod = NEXT_RX_AGG(prod);
3220 }
3221 rxr->rx_agg_prod = prod;
3222
3223 if (bp->flags & BNXT_FLAG_TPA) {
3224 if (rxr->rx_tpa) {
3225 u8 *data;
3226 dma_addr_t mapping;
3227
3228 for (i = 0; i < bp->max_tpa; i++) {
3229 data = __bnxt_alloc_rx_data(bp, &mapping,
3230 GFP_KERNEL);
3231 if (!data)
3232 return -ENOMEM;
3233
3234 rxr->rx_tpa[i].data = data;
3235 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3236 rxr->rx_tpa[i].mapping = mapping;
3237 }
3238 } else {
3239 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3240 return -ENOMEM;
3241 }
3242 }
3243
3244 return 0;
3245 }
3246
bnxt_init_cp_rings(struct bnxt * bp)3247 static void bnxt_init_cp_rings(struct bnxt *bp)
3248 {
3249 int i, j;
3250
3251 for (i = 0; i < bp->cp_nr_rings; i++) {
3252 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3253 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3254
3255 ring->fw_ring_id = INVALID_HW_RING_ID;
3256 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3257 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3258 for (j = 0; j < 2; j++) {
3259 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3260
3261 if (!cpr2)
3262 continue;
3263
3264 ring = &cpr2->cp_ring_struct;
3265 ring->fw_ring_id = INVALID_HW_RING_ID;
3266 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3267 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3268 }
3269 }
3270 }
3271
bnxt_init_rx_rings(struct bnxt * bp)3272 static int bnxt_init_rx_rings(struct bnxt *bp)
3273 {
3274 int i, rc = 0;
3275
3276 if (BNXT_RX_PAGE_MODE(bp)) {
3277 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3278 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3279 } else {
3280 bp->rx_offset = BNXT_RX_OFFSET;
3281 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3282 }
3283
3284 for (i = 0; i < bp->rx_nr_rings; i++) {
3285 rc = bnxt_init_one_rx_ring(bp, i);
3286 if (rc)
3287 break;
3288 }
3289
3290 return rc;
3291 }
3292
bnxt_init_tx_rings(struct bnxt * bp)3293 static int bnxt_init_tx_rings(struct bnxt *bp)
3294 {
3295 u16 i;
3296
3297 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3298 MAX_SKB_FRAGS + 1);
3299
3300 for (i = 0; i < bp->tx_nr_rings; i++) {
3301 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3302 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3303
3304 ring->fw_ring_id = INVALID_HW_RING_ID;
3305 }
3306
3307 return 0;
3308 }
3309
bnxt_free_ring_grps(struct bnxt * bp)3310 static void bnxt_free_ring_grps(struct bnxt *bp)
3311 {
3312 kfree(bp->grp_info);
3313 bp->grp_info = NULL;
3314 }
3315
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)3316 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3317 {
3318 int i;
3319
3320 if (irq_re_init) {
3321 bp->grp_info = kcalloc(bp->cp_nr_rings,
3322 sizeof(struct bnxt_ring_grp_info),
3323 GFP_KERNEL);
3324 if (!bp->grp_info)
3325 return -ENOMEM;
3326 }
3327 for (i = 0; i < bp->cp_nr_rings; i++) {
3328 if (irq_re_init)
3329 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3330 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3331 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3332 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3333 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3334 }
3335 return 0;
3336 }
3337
bnxt_free_vnics(struct bnxt * bp)3338 static void bnxt_free_vnics(struct bnxt *bp)
3339 {
3340 kfree(bp->vnic_info);
3341 bp->vnic_info = NULL;
3342 bp->nr_vnics = 0;
3343 }
3344
bnxt_alloc_vnics(struct bnxt * bp)3345 static int bnxt_alloc_vnics(struct bnxt *bp)
3346 {
3347 int num_vnics = 1;
3348
3349 #ifdef CONFIG_RFS_ACCEL
3350 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3351 num_vnics += bp->rx_nr_rings;
3352 #endif
3353
3354 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3355 num_vnics++;
3356
3357 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3358 GFP_KERNEL);
3359 if (!bp->vnic_info)
3360 return -ENOMEM;
3361
3362 bp->nr_vnics = num_vnics;
3363 return 0;
3364 }
3365
bnxt_init_vnics(struct bnxt * bp)3366 static void bnxt_init_vnics(struct bnxt *bp)
3367 {
3368 int i;
3369
3370 for (i = 0; i < bp->nr_vnics; i++) {
3371 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3372 int j;
3373
3374 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3375 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3376 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3377
3378 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3379
3380 if (bp->vnic_info[i].rss_hash_key) {
3381 if (i == 0)
3382 prandom_bytes(vnic->rss_hash_key,
3383 HW_HASH_KEY_SIZE);
3384 else
3385 memcpy(vnic->rss_hash_key,
3386 bp->vnic_info[0].rss_hash_key,
3387 HW_HASH_KEY_SIZE);
3388 }
3389 }
3390 }
3391
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)3392 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3393 {
3394 int pages;
3395
3396 pages = ring_size / desc_per_pg;
3397
3398 if (!pages)
3399 return 1;
3400
3401 pages++;
3402
3403 while (pages & (pages - 1))
3404 pages++;
3405
3406 return pages;
3407 }
3408
bnxt_set_tpa_flags(struct bnxt * bp)3409 void bnxt_set_tpa_flags(struct bnxt *bp)
3410 {
3411 bp->flags &= ~BNXT_FLAG_TPA;
3412 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3413 return;
3414 if (bp->dev->features & NETIF_F_LRO)
3415 bp->flags |= BNXT_FLAG_LRO;
3416 else if (bp->dev->features & NETIF_F_GRO_HW)
3417 bp->flags |= BNXT_FLAG_GRO;
3418 }
3419
3420 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3421 * be set on entry.
3422 */
bnxt_set_ring_params(struct bnxt * bp)3423 void bnxt_set_ring_params(struct bnxt *bp)
3424 {
3425 u32 ring_size, rx_size, rx_space;
3426 u32 agg_factor = 0, agg_ring_size = 0;
3427
3428 /* 8 for CRC and VLAN */
3429 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3430
3431 rx_space = rx_size + NET_SKB_PAD +
3432 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3433
3434 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3435 ring_size = bp->rx_ring_size;
3436 bp->rx_agg_ring_size = 0;
3437 bp->rx_agg_nr_pages = 0;
3438
3439 if (bp->flags & BNXT_FLAG_TPA)
3440 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3441
3442 bp->flags &= ~BNXT_FLAG_JUMBO;
3443 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3444 u32 jumbo_factor;
3445
3446 bp->flags |= BNXT_FLAG_JUMBO;
3447 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3448 if (jumbo_factor > agg_factor)
3449 agg_factor = jumbo_factor;
3450 }
3451 agg_ring_size = ring_size * agg_factor;
3452
3453 if (agg_ring_size) {
3454 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3455 RX_DESC_CNT);
3456 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3457 u32 tmp = agg_ring_size;
3458
3459 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3460 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3461 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3462 tmp, agg_ring_size);
3463 }
3464 bp->rx_agg_ring_size = agg_ring_size;
3465 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3466 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3467 rx_space = rx_size + NET_SKB_PAD +
3468 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3469 }
3470
3471 bp->rx_buf_use_size = rx_size;
3472 bp->rx_buf_size = rx_space;
3473
3474 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3475 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3476
3477 ring_size = bp->tx_ring_size;
3478 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3479 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3480
3481 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3482 bp->cp_ring_size = ring_size;
3483
3484 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3485 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3486 bp->cp_nr_pages = MAX_CP_PAGES;
3487 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3488 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3489 ring_size, bp->cp_ring_size);
3490 }
3491 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3492 bp->cp_ring_mask = bp->cp_bit - 1;
3493 }
3494
3495 /* Changing allocation mode of RX rings.
3496 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3497 */
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)3498 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3499 {
3500 if (page_mode) {
3501 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3502 return -EOPNOTSUPP;
3503 bp->dev->max_mtu =
3504 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3505 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3506 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3507 bp->rx_dir = DMA_BIDIRECTIONAL;
3508 bp->rx_skb_func = bnxt_rx_page_skb;
3509 /* Disable LRO or GRO_HW */
3510 netdev_update_features(bp->dev);
3511 } else {
3512 bp->dev->max_mtu = bp->max_mtu;
3513 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3514 bp->rx_dir = DMA_FROM_DEVICE;
3515 bp->rx_skb_func = bnxt_rx_skb;
3516 }
3517 return 0;
3518 }
3519
bnxt_free_vnic_attributes(struct bnxt * bp)3520 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3521 {
3522 int i;
3523 struct bnxt_vnic_info *vnic;
3524 struct pci_dev *pdev = bp->pdev;
3525
3526 if (!bp->vnic_info)
3527 return;
3528
3529 for (i = 0; i < bp->nr_vnics; i++) {
3530 vnic = &bp->vnic_info[i];
3531
3532 kfree(vnic->fw_grp_ids);
3533 vnic->fw_grp_ids = NULL;
3534
3535 kfree(vnic->uc_list);
3536 vnic->uc_list = NULL;
3537
3538 if (vnic->mc_list) {
3539 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3540 vnic->mc_list, vnic->mc_list_mapping);
3541 vnic->mc_list = NULL;
3542 }
3543
3544 if (vnic->rss_table) {
3545 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3546 vnic->rss_table,
3547 vnic->rss_table_dma_addr);
3548 vnic->rss_table = NULL;
3549 }
3550
3551 vnic->rss_hash_key = NULL;
3552 vnic->flags = 0;
3553 }
3554 }
3555
bnxt_alloc_vnic_attributes(struct bnxt * bp)3556 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3557 {
3558 int i, rc = 0, size;
3559 struct bnxt_vnic_info *vnic;
3560 struct pci_dev *pdev = bp->pdev;
3561 int max_rings;
3562
3563 for (i = 0; i < bp->nr_vnics; i++) {
3564 vnic = &bp->vnic_info[i];
3565
3566 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3567 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3568
3569 if (mem_size > 0) {
3570 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3571 if (!vnic->uc_list) {
3572 rc = -ENOMEM;
3573 goto out;
3574 }
3575 }
3576 }
3577
3578 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3579 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3580 vnic->mc_list =
3581 dma_alloc_coherent(&pdev->dev,
3582 vnic->mc_list_size,
3583 &vnic->mc_list_mapping,
3584 GFP_KERNEL);
3585 if (!vnic->mc_list) {
3586 rc = -ENOMEM;
3587 goto out;
3588 }
3589 }
3590
3591 if (bp->flags & BNXT_FLAG_CHIP_P5)
3592 goto vnic_skip_grps;
3593
3594 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3595 max_rings = bp->rx_nr_rings;
3596 else
3597 max_rings = 1;
3598
3599 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3600 if (!vnic->fw_grp_ids) {
3601 rc = -ENOMEM;
3602 goto out;
3603 }
3604 vnic_skip_grps:
3605 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3606 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3607 continue;
3608
3609 /* Allocate rss table and hash key */
3610 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3611 &vnic->rss_table_dma_addr,
3612 GFP_KERNEL);
3613 if (!vnic->rss_table) {
3614 rc = -ENOMEM;
3615 goto out;
3616 }
3617
3618 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3619
3620 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3621 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3622 }
3623 return 0;
3624
3625 out:
3626 return rc;
3627 }
3628
bnxt_free_hwrm_resources(struct bnxt * bp)3629 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3630 {
3631 struct pci_dev *pdev = bp->pdev;
3632
3633 if (bp->hwrm_cmd_resp_addr) {
3634 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3635 bp->hwrm_cmd_resp_dma_addr);
3636 bp->hwrm_cmd_resp_addr = NULL;
3637 }
3638
3639 if (bp->hwrm_cmd_kong_resp_addr) {
3640 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3641 bp->hwrm_cmd_kong_resp_addr,
3642 bp->hwrm_cmd_kong_resp_dma_addr);
3643 bp->hwrm_cmd_kong_resp_addr = NULL;
3644 }
3645 }
3646
bnxt_alloc_kong_hwrm_resources(struct bnxt * bp)3647 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3648 {
3649 struct pci_dev *pdev = bp->pdev;
3650
3651 if (bp->hwrm_cmd_kong_resp_addr)
3652 return 0;
3653
3654 bp->hwrm_cmd_kong_resp_addr =
3655 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3656 &bp->hwrm_cmd_kong_resp_dma_addr,
3657 GFP_KERNEL);
3658 if (!bp->hwrm_cmd_kong_resp_addr)
3659 return -ENOMEM;
3660
3661 return 0;
3662 }
3663
bnxt_alloc_hwrm_resources(struct bnxt * bp)3664 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3665 {
3666 struct pci_dev *pdev = bp->pdev;
3667
3668 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3669 &bp->hwrm_cmd_resp_dma_addr,
3670 GFP_KERNEL);
3671 if (!bp->hwrm_cmd_resp_addr)
3672 return -ENOMEM;
3673
3674 return 0;
3675 }
3676
bnxt_free_hwrm_short_cmd_req(struct bnxt * bp)3677 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3678 {
3679 if (bp->hwrm_short_cmd_req_addr) {
3680 struct pci_dev *pdev = bp->pdev;
3681
3682 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3683 bp->hwrm_short_cmd_req_addr,
3684 bp->hwrm_short_cmd_req_dma_addr);
3685 bp->hwrm_short_cmd_req_addr = NULL;
3686 }
3687 }
3688
bnxt_alloc_hwrm_short_cmd_req(struct bnxt * bp)3689 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3690 {
3691 struct pci_dev *pdev = bp->pdev;
3692
3693 if (bp->hwrm_short_cmd_req_addr)
3694 return 0;
3695
3696 bp->hwrm_short_cmd_req_addr =
3697 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3698 &bp->hwrm_short_cmd_req_dma_addr,
3699 GFP_KERNEL);
3700 if (!bp->hwrm_short_cmd_req_addr)
3701 return -ENOMEM;
3702
3703 return 0;
3704 }
3705
bnxt_free_port_stats(struct bnxt * bp)3706 static void bnxt_free_port_stats(struct bnxt *bp)
3707 {
3708 struct pci_dev *pdev = bp->pdev;
3709
3710 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3711 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3712
3713 if (bp->hw_rx_port_stats) {
3714 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3715 bp->hw_rx_port_stats,
3716 bp->hw_rx_port_stats_map);
3717 bp->hw_rx_port_stats = NULL;
3718 }
3719
3720 if (bp->hw_tx_port_stats_ext) {
3721 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3722 bp->hw_tx_port_stats_ext,
3723 bp->hw_tx_port_stats_ext_map);
3724 bp->hw_tx_port_stats_ext = NULL;
3725 }
3726
3727 if (bp->hw_rx_port_stats_ext) {
3728 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3729 bp->hw_rx_port_stats_ext,
3730 bp->hw_rx_port_stats_ext_map);
3731 bp->hw_rx_port_stats_ext = NULL;
3732 }
3733
3734 if (bp->hw_pcie_stats) {
3735 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3736 bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3737 bp->hw_pcie_stats = NULL;
3738 }
3739 }
3740
bnxt_free_ring_stats(struct bnxt * bp)3741 static void bnxt_free_ring_stats(struct bnxt *bp)
3742 {
3743 struct pci_dev *pdev = bp->pdev;
3744 int size, i;
3745
3746 if (!bp->bnapi)
3747 return;
3748
3749 size = bp->hw_ring_stats_size;
3750
3751 for (i = 0; i < bp->cp_nr_rings; i++) {
3752 struct bnxt_napi *bnapi = bp->bnapi[i];
3753 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3754
3755 if (cpr->hw_stats) {
3756 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3757 cpr->hw_stats_map);
3758 cpr->hw_stats = NULL;
3759 }
3760 }
3761 }
3762
bnxt_alloc_stats(struct bnxt * bp)3763 static int bnxt_alloc_stats(struct bnxt *bp)
3764 {
3765 u32 size, i;
3766 struct pci_dev *pdev = bp->pdev;
3767
3768 size = bp->hw_ring_stats_size;
3769
3770 for (i = 0; i < bp->cp_nr_rings; i++) {
3771 struct bnxt_napi *bnapi = bp->bnapi[i];
3772 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3773
3774 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3775 &cpr->hw_stats_map,
3776 GFP_KERNEL);
3777 if (!cpr->hw_stats)
3778 return -ENOMEM;
3779
3780 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3781 }
3782
3783 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3784 return 0;
3785
3786 if (bp->hw_rx_port_stats)
3787 goto alloc_ext_stats;
3788
3789 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3790 sizeof(struct tx_port_stats) + 1024;
3791
3792 bp->hw_rx_port_stats =
3793 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3794 &bp->hw_rx_port_stats_map,
3795 GFP_KERNEL);
3796 if (!bp->hw_rx_port_stats)
3797 return -ENOMEM;
3798
3799 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3800 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3801 sizeof(struct rx_port_stats) + 512;
3802 bp->flags |= BNXT_FLAG_PORT_STATS;
3803
3804 alloc_ext_stats:
3805 /* Display extended statistics only if FW supports it */
3806 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3807 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3808 return 0;
3809
3810 if (bp->hw_rx_port_stats_ext)
3811 goto alloc_tx_ext_stats;
3812
3813 bp->hw_rx_port_stats_ext =
3814 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3815 &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3816 if (!bp->hw_rx_port_stats_ext)
3817 return 0;
3818
3819 alloc_tx_ext_stats:
3820 if (bp->hw_tx_port_stats_ext)
3821 goto alloc_pcie_stats;
3822
3823 if (bp->hwrm_spec_code >= 0x10902 ||
3824 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
3825 bp->hw_tx_port_stats_ext =
3826 dma_alloc_coherent(&pdev->dev,
3827 sizeof(struct tx_port_stats_ext),
3828 &bp->hw_tx_port_stats_ext_map,
3829 GFP_KERNEL);
3830 }
3831 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3832
3833 alloc_pcie_stats:
3834 if (bp->hw_pcie_stats ||
3835 !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3836 return 0;
3837
3838 bp->hw_pcie_stats =
3839 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3840 &bp->hw_pcie_stats_map, GFP_KERNEL);
3841 if (!bp->hw_pcie_stats)
3842 return 0;
3843
3844 bp->flags |= BNXT_FLAG_PCIE_STATS;
3845 return 0;
3846 }
3847
bnxt_clear_ring_indices(struct bnxt * bp)3848 static void bnxt_clear_ring_indices(struct bnxt *bp)
3849 {
3850 int i;
3851
3852 if (!bp->bnapi)
3853 return;
3854
3855 for (i = 0; i < bp->cp_nr_rings; i++) {
3856 struct bnxt_napi *bnapi = bp->bnapi[i];
3857 struct bnxt_cp_ring_info *cpr;
3858 struct bnxt_rx_ring_info *rxr;
3859 struct bnxt_tx_ring_info *txr;
3860
3861 if (!bnapi)
3862 continue;
3863
3864 cpr = &bnapi->cp_ring;
3865 cpr->cp_raw_cons = 0;
3866
3867 txr = bnapi->tx_ring;
3868 if (txr) {
3869 txr->tx_prod = 0;
3870 txr->tx_cons = 0;
3871 }
3872
3873 rxr = bnapi->rx_ring;
3874 if (rxr) {
3875 rxr->rx_prod = 0;
3876 rxr->rx_agg_prod = 0;
3877 rxr->rx_sw_agg_prod = 0;
3878 rxr->rx_next_cons = 0;
3879 }
3880 }
3881 }
3882
bnxt_free_ntp_fltrs(struct bnxt * bp,bool irq_reinit)3883 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3884 {
3885 #ifdef CONFIG_RFS_ACCEL
3886 int i;
3887
3888 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3889 * safe to delete the hash table.
3890 */
3891 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3892 struct hlist_head *head;
3893 struct hlist_node *tmp;
3894 struct bnxt_ntuple_filter *fltr;
3895
3896 head = &bp->ntp_fltr_hash_tbl[i];
3897 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3898 hlist_del(&fltr->hash);
3899 kfree(fltr);
3900 }
3901 }
3902 if (irq_reinit) {
3903 kfree(bp->ntp_fltr_bmap);
3904 bp->ntp_fltr_bmap = NULL;
3905 }
3906 bp->ntp_fltr_count = 0;
3907 #endif
3908 }
3909
bnxt_alloc_ntp_fltrs(struct bnxt * bp)3910 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3911 {
3912 #ifdef CONFIG_RFS_ACCEL
3913 int i, rc = 0;
3914
3915 if (!(bp->flags & BNXT_FLAG_RFS))
3916 return 0;
3917
3918 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3919 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3920
3921 bp->ntp_fltr_count = 0;
3922 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3923 sizeof(long),
3924 GFP_KERNEL);
3925
3926 if (!bp->ntp_fltr_bmap)
3927 rc = -ENOMEM;
3928
3929 return rc;
3930 #else
3931 return 0;
3932 #endif
3933 }
3934
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)3935 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3936 {
3937 bnxt_free_vnic_attributes(bp);
3938 bnxt_free_tx_rings(bp);
3939 bnxt_free_rx_rings(bp);
3940 bnxt_free_cp_rings(bp);
3941 bnxt_free_ntp_fltrs(bp, irq_re_init);
3942 if (irq_re_init) {
3943 bnxt_free_ring_stats(bp);
3944 bnxt_free_ring_grps(bp);
3945 bnxt_free_vnics(bp);
3946 kfree(bp->tx_ring_map);
3947 bp->tx_ring_map = NULL;
3948 kfree(bp->tx_ring);
3949 bp->tx_ring = NULL;
3950 kfree(bp->rx_ring);
3951 bp->rx_ring = NULL;
3952 kfree(bp->bnapi);
3953 bp->bnapi = NULL;
3954 } else {
3955 bnxt_clear_ring_indices(bp);
3956 }
3957 }
3958
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)3959 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3960 {
3961 int i, j, rc, size, arr_size;
3962 void *bnapi;
3963
3964 if (irq_re_init) {
3965 /* Allocate bnapi mem pointer array and mem block for
3966 * all queues
3967 */
3968 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3969 bp->cp_nr_rings);
3970 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3971 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3972 if (!bnapi)
3973 return -ENOMEM;
3974
3975 bp->bnapi = bnapi;
3976 bnapi += arr_size;
3977 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3978 bp->bnapi[i] = bnapi;
3979 bp->bnapi[i]->index = i;
3980 bp->bnapi[i]->bp = bp;
3981 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3982 struct bnxt_cp_ring_info *cpr =
3983 &bp->bnapi[i]->cp_ring;
3984
3985 cpr->cp_ring_struct.ring_mem.flags =
3986 BNXT_RMEM_RING_PTE_FLAG;
3987 }
3988 }
3989
3990 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3991 sizeof(struct bnxt_rx_ring_info),
3992 GFP_KERNEL);
3993 if (!bp->rx_ring)
3994 return -ENOMEM;
3995
3996 for (i = 0; i < bp->rx_nr_rings; i++) {
3997 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3998
3999 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4000 rxr->rx_ring_struct.ring_mem.flags =
4001 BNXT_RMEM_RING_PTE_FLAG;
4002 rxr->rx_agg_ring_struct.ring_mem.flags =
4003 BNXT_RMEM_RING_PTE_FLAG;
4004 }
4005 rxr->bnapi = bp->bnapi[i];
4006 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4007 }
4008
4009 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4010 sizeof(struct bnxt_tx_ring_info),
4011 GFP_KERNEL);
4012 if (!bp->tx_ring)
4013 return -ENOMEM;
4014
4015 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4016 GFP_KERNEL);
4017
4018 if (!bp->tx_ring_map)
4019 return -ENOMEM;
4020
4021 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4022 j = 0;
4023 else
4024 j = bp->rx_nr_rings;
4025
4026 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4027 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4028
4029 if (bp->flags & BNXT_FLAG_CHIP_P5)
4030 txr->tx_ring_struct.ring_mem.flags =
4031 BNXT_RMEM_RING_PTE_FLAG;
4032 txr->bnapi = bp->bnapi[j];
4033 bp->bnapi[j]->tx_ring = txr;
4034 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4035 if (i >= bp->tx_nr_rings_xdp) {
4036 txr->txq_index = i - bp->tx_nr_rings_xdp;
4037 bp->bnapi[j]->tx_int = bnxt_tx_int;
4038 } else {
4039 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4040 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4041 }
4042 }
4043
4044 rc = bnxt_alloc_stats(bp);
4045 if (rc)
4046 goto alloc_mem_err;
4047
4048 rc = bnxt_alloc_ntp_fltrs(bp);
4049 if (rc)
4050 goto alloc_mem_err;
4051
4052 rc = bnxt_alloc_vnics(bp);
4053 if (rc)
4054 goto alloc_mem_err;
4055 }
4056
4057 bnxt_init_ring_struct(bp);
4058
4059 rc = bnxt_alloc_rx_rings(bp);
4060 if (rc)
4061 goto alloc_mem_err;
4062
4063 rc = bnxt_alloc_tx_rings(bp);
4064 if (rc)
4065 goto alloc_mem_err;
4066
4067 rc = bnxt_alloc_cp_rings(bp);
4068 if (rc)
4069 goto alloc_mem_err;
4070
4071 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4072 BNXT_VNIC_UCAST_FLAG;
4073 rc = bnxt_alloc_vnic_attributes(bp);
4074 if (rc)
4075 goto alloc_mem_err;
4076 return 0;
4077
4078 alloc_mem_err:
4079 bnxt_free_mem(bp, true);
4080 return rc;
4081 }
4082
bnxt_disable_int(struct bnxt * bp)4083 static void bnxt_disable_int(struct bnxt *bp)
4084 {
4085 int i;
4086
4087 if (!bp->bnapi)
4088 return;
4089
4090 for (i = 0; i < bp->cp_nr_rings; i++) {
4091 struct bnxt_napi *bnapi = bp->bnapi[i];
4092 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4093 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4094
4095 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4096 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4097 }
4098 }
4099
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)4100 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4101 {
4102 struct bnxt_napi *bnapi = bp->bnapi[n];
4103 struct bnxt_cp_ring_info *cpr;
4104
4105 cpr = &bnapi->cp_ring;
4106 return cpr->cp_ring_struct.map_idx;
4107 }
4108
bnxt_disable_int_sync(struct bnxt * bp)4109 static void bnxt_disable_int_sync(struct bnxt *bp)
4110 {
4111 int i;
4112
4113 atomic_inc(&bp->intr_sem);
4114
4115 bnxt_disable_int(bp);
4116 for (i = 0; i < bp->cp_nr_rings; i++) {
4117 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4118
4119 synchronize_irq(bp->irq_tbl[map_idx].vector);
4120 }
4121 }
4122
bnxt_enable_int(struct bnxt * bp)4123 static void bnxt_enable_int(struct bnxt *bp)
4124 {
4125 int i;
4126
4127 atomic_set(&bp->intr_sem, 0);
4128 for (i = 0; i < bp->cp_nr_rings; i++) {
4129 struct bnxt_napi *bnapi = bp->bnapi[i];
4130 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4131
4132 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4133 }
4134 }
4135
bnxt_hwrm_cmd_hdr_init(struct bnxt * bp,void * request,u16 req_type,u16 cmpl_ring,u16 target_id)4136 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4137 u16 cmpl_ring, u16 target_id)
4138 {
4139 struct input *req = request;
4140
4141 req->req_type = cpu_to_le16(req_type);
4142 req->cmpl_ring = cpu_to_le16(cmpl_ring);
4143 req->target_id = cpu_to_le16(target_id);
4144 if (bnxt_kong_hwrm_message(bp, req))
4145 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4146 else
4147 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4148 }
4149
bnxt_hwrm_to_stderr(u32 hwrm_err)4150 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4151 {
4152 switch (hwrm_err) {
4153 case HWRM_ERR_CODE_SUCCESS:
4154 return 0;
4155 case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4156 return -EACCES;
4157 case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4158 return -ENOSPC;
4159 case HWRM_ERR_CODE_INVALID_PARAMS:
4160 case HWRM_ERR_CODE_INVALID_FLAGS:
4161 case HWRM_ERR_CODE_INVALID_ENABLES:
4162 case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4163 case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4164 return -EINVAL;
4165 case HWRM_ERR_CODE_NO_BUFFER:
4166 return -ENOMEM;
4167 case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4168 return -EAGAIN;
4169 case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4170 return -EOPNOTSUPP;
4171 default:
4172 return -EIO;
4173 }
4174 }
4175
bnxt_hwrm_do_send_msg(struct bnxt * bp,void * msg,u32 msg_len,int timeout,bool silent)4176 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4177 int timeout, bool silent)
4178 {
4179 int i, intr_process, rc, tmo_count;
4180 struct input *req = msg;
4181 u32 *data = msg;
4182 __le32 *resp_len;
4183 u8 *valid;
4184 u16 cp_ring_id, len = 0;
4185 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4186 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4187 struct hwrm_short_input short_input = {0};
4188 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4189 u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
4190 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4191 u16 dst = BNXT_HWRM_CHNL_CHIMP;
4192
4193 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4194 return -EBUSY;
4195
4196 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4197 if (msg_len > bp->hwrm_max_ext_req_len ||
4198 !bp->hwrm_short_cmd_req_addr)
4199 return -EINVAL;
4200 }
4201
4202 if (bnxt_hwrm_kong_chnl(bp, req)) {
4203 dst = BNXT_HWRM_CHNL_KONG;
4204 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4205 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4206 resp = bp->hwrm_cmd_kong_resp_addr;
4207 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4208 }
4209
4210 memset(resp, 0, PAGE_SIZE);
4211 cp_ring_id = le16_to_cpu(req->cmpl_ring);
4212 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4213
4214 req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4215 /* currently supports only one outstanding message */
4216 if (intr_process)
4217 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4218
4219 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4220 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4221 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4222 u16 max_msg_len;
4223
4224 /* Set boundary for maximum extended request length for short
4225 * cmd format. If passed up from device use the max supported
4226 * internal req length.
4227 */
4228 max_msg_len = bp->hwrm_max_ext_req_len;
4229
4230 memcpy(short_cmd_req, req, msg_len);
4231 if (msg_len < max_msg_len)
4232 memset(short_cmd_req + msg_len, 0,
4233 max_msg_len - msg_len);
4234
4235 short_input.req_type = req->req_type;
4236 short_input.signature =
4237 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4238 short_input.size = cpu_to_le16(msg_len);
4239 short_input.req_addr =
4240 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4241
4242 data = (u32 *)&short_input;
4243 msg_len = sizeof(short_input);
4244
4245 /* Sync memory write before updating doorbell */
4246 wmb();
4247
4248 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4249 }
4250
4251 /* Write request msg to hwrm channel */
4252 __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4253
4254 for (i = msg_len; i < max_req_len; i += 4)
4255 writel(0, bp->bar0 + bar_offset + i);
4256
4257 /* Ring channel doorbell */
4258 writel(1, bp->bar0 + doorbell_offset);
4259
4260 if (!pci_is_enabled(bp->pdev))
4261 return 0;
4262
4263 if (!timeout)
4264 timeout = DFLT_HWRM_CMD_TIMEOUT;
4265 /* convert timeout to usec */
4266 timeout *= 1000;
4267
4268 i = 0;
4269 /* Short timeout for the first few iterations:
4270 * number of loops = number of loops for short timeout +
4271 * number of loops for standard timeout.
4272 */
4273 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4274 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4275 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4276 resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4277
4278 if (intr_process) {
4279 u16 seq_id = bp->hwrm_intr_seq_id;
4280
4281 /* Wait until hwrm response cmpl interrupt is processed */
4282 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4283 i++ < tmo_count) {
4284 /* on first few passes, just barely sleep */
4285 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4286 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4287 HWRM_SHORT_MAX_TIMEOUT);
4288 else
4289 usleep_range(HWRM_MIN_TIMEOUT,
4290 HWRM_MAX_TIMEOUT);
4291 }
4292
4293 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4294 if (!silent)
4295 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4296 le16_to_cpu(req->req_type));
4297 return -EBUSY;
4298 }
4299 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4300 HWRM_RESP_LEN_SFT;
4301 valid = resp_addr + len - 1;
4302 } else {
4303 int j;
4304
4305 /* Check if response len is updated */
4306 for (i = 0; i < tmo_count; i++) {
4307 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4308 HWRM_RESP_LEN_SFT;
4309 if (len)
4310 break;
4311 /* on first few passes, just barely sleep */
4312 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4313 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4314 HWRM_SHORT_MAX_TIMEOUT);
4315 else
4316 usleep_range(HWRM_MIN_TIMEOUT,
4317 HWRM_MAX_TIMEOUT);
4318 }
4319
4320 if (i >= tmo_count) {
4321 if (!silent)
4322 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4323 HWRM_TOTAL_TIMEOUT(i),
4324 le16_to_cpu(req->req_type),
4325 le16_to_cpu(req->seq_id), len);
4326 return -EBUSY;
4327 }
4328
4329 /* Last byte of resp contains valid bit */
4330 valid = resp_addr + len - 1;
4331 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4332 /* make sure we read from updated DMA memory */
4333 dma_rmb();
4334 if (*valid)
4335 break;
4336 usleep_range(1, 5);
4337 }
4338
4339 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4340 if (!silent)
4341 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4342 HWRM_TOTAL_TIMEOUT(i),
4343 le16_to_cpu(req->req_type),
4344 le16_to_cpu(req->seq_id), len,
4345 *valid);
4346 return -EBUSY;
4347 }
4348 }
4349
4350 /* Zero valid bit for compatibility. Valid bit in an older spec
4351 * may become a new field in a newer spec. We must make sure that
4352 * a new field not implemented by old spec will read zero.
4353 */
4354 *valid = 0;
4355 rc = le16_to_cpu(resp->error_code);
4356 if (rc && !silent)
4357 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4358 le16_to_cpu(resp->req_type),
4359 le16_to_cpu(resp->seq_id), rc);
4360 return bnxt_hwrm_to_stderr(rc);
4361 }
4362
_hwrm_send_message(struct bnxt * bp,void * msg,u32 msg_len,int timeout)4363 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4364 {
4365 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4366 }
4367
_hwrm_send_message_silent(struct bnxt * bp,void * msg,u32 msg_len,int timeout)4368 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4369 int timeout)
4370 {
4371 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4372 }
4373
hwrm_send_message(struct bnxt * bp,void * msg,u32 msg_len,int timeout)4374 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4375 {
4376 int rc;
4377
4378 mutex_lock(&bp->hwrm_cmd_lock);
4379 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4380 mutex_unlock(&bp->hwrm_cmd_lock);
4381 return rc;
4382 }
4383
hwrm_send_message_silent(struct bnxt * bp,void * msg,u32 msg_len,int timeout)4384 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4385 int timeout)
4386 {
4387 int rc;
4388
4389 mutex_lock(&bp->hwrm_cmd_lock);
4390 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4391 mutex_unlock(&bp->hwrm_cmd_lock);
4392 return rc;
4393 }
4394
bnxt_hwrm_func_rgtr_async_events(struct bnxt * bp,unsigned long * bmap,int bmap_size)4395 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
4396 int bmap_size)
4397 {
4398 struct hwrm_func_drv_rgtr_input req = {0};
4399 DECLARE_BITMAP(async_events_bmap, 256);
4400 u32 *events = (u32 *)async_events_bmap;
4401 int i;
4402
4403 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4404
4405 req.enables =
4406 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4407
4408 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4409 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4410 u16 event_id = bnxt_async_events_arr[i];
4411
4412 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4413 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4414 continue;
4415 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4416 }
4417 if (bmap && bmap_size) {
4418 for (i = 0; i < bmap_size; i++) {
4419 if (test_bit(i, bmap))
4420 __set_bit(i, async_events_bmap);
4421 }
4422 }
4423
4424 for (i = 0; i < 8; i++)
4425 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4426
4427 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4428 }
4429
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp)4430 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
4431 {
4432 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4433 struct hwrm_func_drv_rgtr_input req = {0};
4434 u32 flags;
4435 int rc;
4436
4437 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4438
4439 req.enables =
4440 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4441 FUNC_DRV_RGTR_REQ_ENABLES_VER);
4442
4443 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4444 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4445 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4446 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4447 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4448 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT;
4449 req.flags = cpu_to_le32(flags);
4450 req.ver_maj_8b = DRV_VER_MAJ;
4451 req.ver_min_8b = DRV_VER_MIN;
4452 req.ver_upd_8b = DRV_VER_UPD;
4453 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4454 req.ver_min = cpu_to_le16(DRV_VER_MIN);
4455 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4456
4457 if (BNXT_PF(bp)) {
4458 u32 data[8];
4459 int i;
4460
4461 memset(data, 0, sizeof(data));
4462 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4463 u16 cmd = bnxt_vf_req_snif[i];
4464 unsigned int bit, idx;
4465
4466 idx = cmd / 32;
4467 bit = cmd % 32;
4468 data[idx] |= 1 << bit;
4469 }
4470
4471 for (i = 0; i < 8; i++)
4472 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4473
4474 req.enables |=
4475 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4476 }
4477
4478 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4479 req.flags |= cpu_to_le32(
4480 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4481
4482 mutex_lock(&bp->hwrm_cmd_lock);
4483 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4484 if (!rc && (resp->flags &
4485 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)))
4486 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4487 mutex_unlock(&bp->hwrm_cmd_lock);
4488 return rc;
4489 }
4490
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)4491 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4492 {
4493 struct hwrm_func_drv_unrgtr_input req = {0};
4494
4495 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4496 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4497 }
4498
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)4499 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4500 {
4501 u32 rc = 0;
4502 struct hwrm_tunnel_dst_port_free_input req = {0};
4503
4504 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4505 req.tunnel_type = tunnel_type;
4506
4507 switch (tunnel_type) {
4508 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4509 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4510 break;
4511 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4512 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4513 break;
4514 default:
4515 break;
4516 }
4517
4518 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4519 if (rc)
4520 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4521 rc);
4522 return rc;
4523 }
4524
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)4525 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4526 u8 tunnel_type)
4527 {
4528 u32 rc = 0;
4529 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4530 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4531
4532 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4533
4534 req.tunnel_type = tunnel_type;
4535 req.tunnel_dst_port_val = port;
4536
4537 mutex_lock(&bp->hwrm_cmd_lock);
4538 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4539 if (rc) {
4540 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4541 rc);
4542 goto err_out;
4543 }
4544
4545 switch (tunnel_type) {
4546 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4547 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4548 break;
4549 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4550 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4551 break;
4552 default:
4553 break;
4554 }
4555
4556 err_out:
4557 mutex_unlock(&bp->hwrm_cmd_lock);
4558 return rc;
4559 }
4560
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)4561 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4562 {
4563 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4564 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4565
4566 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4567 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4568
4569 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4570 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4571 req.mask = cpu_to_le32(vnic->rx_mask);
4572 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4573 }
4574
4575 #ifdef CONFIG_RFS_ACCEL
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)4576 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4577 struct bnxt_ntuple_filter *fltr)
4578 {
4579 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4580
4581 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4582 req.ntuple_filter_id = fltr->filter_id;
4583 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4584 }
4585
4586 #define BNXT_NTP_FLTR_FLAGS \
4587 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4588 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4589 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4590 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4591 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4592 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4593 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4594 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4595 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4596 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4597 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4598 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4599 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4600 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4601
4602 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4603 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4604
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)4605 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4606 struct bnxt_ntuple_filter *fltr)
4607 {
4608 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4609 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4610 struct flow_keys *keys = &fltr->fkeys;
4611 struct bnxt_vnic_info *vnic;
4612 u32 dst_ena = 0;
4613 int rc = 0;
4614
4615 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4616 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4617
4618 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
4619 dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
4620 req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
4621 vnic = &bp->vnic_info[0];
4622 } else {
4623 vnic = &bp->vnic_info[fltr->rxq + 1];
4624 }
4625 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4626 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
4627
4628 req.ethertype = htons(ETH_P_IP);
4629 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4630 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4631 req.ip_protocol = keys->basic.ip_proto;
4632
4633 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4634 int i;
4635
4636 req.ethertype = htons(ETH_P_IPV6);
4637 req.ip_addr_type =
4638 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4639 *(struct in6_addr *)&req.src_ipaddr[0] =
4640 keys->addrs.v6addrs.src;
4641 *(struct in6_addr *)&req.dst_ipaddr[0] =
4642 keys->addrs.v6addrs.dst;
4643 for (i = 0; i < 4; i++) {
4644 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4645 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4646 }
4647 } else {
4648 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4649 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4650 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4651 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4652 }
4653 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4654 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4655 req.tunnel_type =
4656 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4657 }
4658
4659 req.src_port = keys->ports.src;
4660 req.src_port_mask = cpu_to_be16(0xffff);
4661 req.dst_port = keys->ports.dst;
4662 req.dst_port_mask = cpu_to_be16(0xffff);
4663
4664 mutex_lock(&bp->hwrm_cmd_lock);
4665 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4666 if (!rc) {
4667 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4668 fltr->filter_id = resp->ntuple_filter_id;
4669 }
4670 mutex_unlock(&bp->hwrm_cmd_lock);
4671 return rc;
4672 }
4673 #endif
4674
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,u8 * mac_addr)4675 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4676 u8 *mac_addr)
4677 {
4678 u32 rc = 0;
4679 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4680 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4681
4682 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4683 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4684 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4685 req.flags |=
4686 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4687 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4688 req.enables =
4689 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4690 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4691 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4692 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4693 req.l2_addr_mask[0] = 0xff;
4694 req.l2_addr_mask[1] = 0xff;
4695 req.l2_addr_mask[2] = 0xff;
4696 req.l2_addr_mask[3] = 0xff;
4697 req.l2_addr_mask[4] = 0xff;
4698 req.l2_addr_mask[5] = 0xff;
4699
4700 mutex_lock(&bp->hwrm_cmd_lock);
4701 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4702 if (!rc)
4703 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4704 resp->l2_filter_id;
4705 mutex_unlock(&bp->hwrm_cmd_lock);
4706 return rc;
4707 }
4708
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)4709 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4710 {
4711 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4712 int rc = 0;
4713
4714 /* Any associated ntuple filters will also be cleared by firmware. */
4715 mutex_lock(&bp->hwrm_cmd_lock);
4716 for (i = 0; i < num_of_vnics; i++) {
4717 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4718
4719 for (j = 0; j < vnic->uc_filter_count; j++) {
4720 struct hwrm_cfa_l2_filter_free_input req = {0};
4721
4722 bnxt_hwrm_cmd_hdr_init(bp, &req,
4723 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4724
4725 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4726
4727 rc = _hwrm_send_message(bp, &req, sizeof(req),
4728 HWRM_CMD_TIMEOUT);
4729 }
4730 vnic->uc_filter_count = 0;
4731 }
4732 mutex_unlock(&bp->hwrm_cmd_lock);
4733
4734 return rc;
4735 }
4736
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,u16 vnic_id,u32 tpa_flags)4737 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4738 {
4739 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4740 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4741 struct hwrm_vnic_tpa_cfg_input req = {0};
4742
4743 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4744 return 0;
4745
4746 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4747
4748 if (tpa_flags) {
4749 u16 mss = bp->dev->mtu - 40;
4750 u32 nsegs, n, segs = 0, flags;
4751
4752 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4753 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4754 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4755 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4756 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4757 if (tpa_flags & BNXT_FLAG_GRO)
4758 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4759
4760 req.flags = cpu_to_le32(flags);
4761
4762 req.enables =
4763 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4764 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4765 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4766
4767 /* Number of segs are log2 units, and first packet is not
4768 * included as part of this units.
4769 */
4770 if (mss <= BNXT_RX_PAGE_SIZE) {
4771 n = BNXT_RX_PAGE_SIZE / mss;
4772 nsegs = (MAX_SKB_FRAGS - 1) * n;
4773 } else {
4774 n = mss / BNXT_RX_PAGE_SIZE;
4775 if (mss & (BNXT_RX_PAGE_SIZE - 1))
4776 n++;
4777 nsegs = (MAX_SKB_FRAGS - n) / n;
4778 }
4779
4780 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4781 segs = MAX_TPA_SEGS_P5;
4782 max_aggs = bp->max_tpa;
4783 } else {
4784 segs = ilog2(nsegs);
4785 }
4786 req.max_agg_segs = cpu_to_le16(segs);
4787 req.max_aggs = cpu_to_le16(max_aggs);
4788
4789 req.min_agg_len = cpu_to_le32(512);
4790 }
4791 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4792
4793 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4794 }
4795
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)4796 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4797 {
4798 struct bnxt_ring_grp_info *grp_info;
4799
4800 grp_info = &bp->grp_info[ring->grp_idx];
4801 return grp_info->cp_fw_ring_id;
4802 }
4803
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)4804 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4805 {
4806 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4807 struct bnxt_napi *bnapi = rxr->bnapi;
4808 struct bnxt_cp_ring_info *cpr;
4809
4810 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4811 return cpr->cp_ring_struct.fw_ring_id;
4812 } else {
4813 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4814 }
4815 }
4816
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)4817 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4818 {
4819 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4820 struct bnxt_napi *bnapi = txr->bnapi;
4821 struct bnxt_cp_ring_info *cpr;
4822
4823 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4824 return cpr->cp_ring_struct.fw_ring_id;
4825 } else {
4826 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4827 }
4828 }
4829
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,u16 vnic_id,bool set_rss)4830 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4831 {
4832 u32 i, j, max_rings;
4833 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4834 struct hwrm_vnic_rss_cfg_input req = {0};
4835
4836 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4837 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4838 return 0;
4839
4840 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4841 if (set_rss) {
4842 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4843 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4844 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4845 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4846 max_rings = bp->rx_nr_rings - 1;
4847 else
4848 max_rings = bp->rx_nr_rings;
4849 } else {
4850 max_rings = 1;
4851 }
4852
4853 /* Fill the RSS indirection table with ring group ids */
4854 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4855 if (j == max_rings)
4856 j = 0;
4857 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4858 }
4859
4860 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4861 req.hash_key_tbl_addr =
4862 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4863 }
4864 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4865 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4866 }
4867
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,u16 vnic_id,bool set_rss)4868 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4869 {
4870 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4871 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4872 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4873 struct hwrm_vnic_rss_cfg_input req = {0};
4874
4875 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4876 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4877 if (!set_rss) {
4878 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4879 return 0;
4880 }
4881 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4882 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4883 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4884 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4885 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4886 for (i = 0, k = 0; i < nr_ctxs; i++) {
4887 __le16 *ring_tbl = vnic->rss_table;
4888 int rc;
4889
4890 req.ring_table_pair_index = i;
4891 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4892 for (j = 0; j < 64; j++) {
4893 u16 ring_id;
4894
4895 ring_id = rxr->rx_ring_struct.fw_ring_id;
4896 *ring_tbl++ = cpu_to_le16(ring_id);
4897 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4898 *ring_tbl++ = cpu_to_le16(ring_id);
4899 rxr++;
4900 k++;
4901 if (k == max_rings) {
4902 k = 0;
4903 rxr = &bp->rx_ring[0];
4904 }
4905 }
4906 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4907 if (rc)
4908 return rc;
4909 }
4910 return 0;
4911 }
4912
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,u16 vnic_id)4913 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4914 {
4915 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4916 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4917
4918 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4919 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4920 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4921 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4922 req.enables =
4923 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4924 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4925 /* thresholds not implemented in firmware yet */
4926 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4927 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4928 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4929 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4930 }
4931
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,u16 vnic_id,u16 ctx_idx)4932 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4933 u16 ctx_idx)
4934 {
4935 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4936
4937 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4938 req.rss_cos_lb_ctx_id =
4939 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4940
4941 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4942 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4943 }
4944
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)4945 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4946 {
4947 int i, j;
4948
4949 for (i = 0; i < bp->nr_vnics; i++) {
4950 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4951
4952 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4953 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4954 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4955 }
4956 }
4957 bp->rsscos_nr_ctxs = 0;
4958 }
4959
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,u16 vnic_id,u16 ctx_idx)4960 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4961 {
4962 int rc;
4963 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4964 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4965 bp->hwrm_cmd_resp_addr;
4966
4967 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4968 -1);
4969
4970 mutex_lock(&bp->hwrm_cmd_lock);
4971 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4972 if (!rc)
4973 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4974 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4975 mutex_unlock(&bp->hwrm_cmd_lock);
4976
4977 return rc;
4978 }
4979
bnxt_get_roce_vnic_mode(struct bnxt * bp)4980 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4981 {
4982 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4983 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4984 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4985 }
4986
bnxt_hwrm_vnic_cfg(struct bnxt * bp,u16 vnic_id)4987 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
4988 {
4989 unsigned int ring = 0, grp_idx;
4990 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4991 struct hwrm_vnic_cfg_input req = {0};
4992 u16 def_vlan = 0;
4993
4994 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
4995
4996 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4997 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4998
4999 req.default_rx_ring_id =
5000 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5001 req.default_cmpl_ring_id =
5002 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5003 req.enables =
5004 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5005 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5006 goto vnic_mru;
5007 }
5008 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5009 /* Only RSS support for now TBD: COS & LB */
5010 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5011 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5012 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5013 VNIC_CFG_REQ_ENABLES_MRU);
5014 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5015 req.rss_rule =
5016 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5017 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5018 VNIC_CFG_REQ_ENABLES_MRU);
5019 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5020 } else {
5021 req.rss_rule = cpu_to_le16(0xffff);
5022 }
5023
5024 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5025 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5026 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5027 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5028 } else {
5029 req.cos_rule = cpu_to_le16(0xffff);
5030 }
5031
5032 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5033 ring = 0;
5034 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5035 ring = vnic_id - 1;
5036 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5037 ring = bp->rx_nr_rings - 1;
5038
5039 grp_idx = bp->rx_ring[ring].bnapi->index;
5040 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5041 req.lb_rule = cpu_to_le16(0xffff);
5042 vnic_mru:
5043 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
5044 VLAN_HLEN);
5045
5046 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5047 #ifdef CONFIG_BNXT_SRIOV
5048 if (BNXT_VF(bp))
5049 def_vlan = bp->vf.vlan;
5050 #endif
5051 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5052 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5053 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5054 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5055
5056 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5057 }
5058
bnxt_hwrm_vnic_free_one(struct bnxt * bp,u16 vnic_id)5059 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5060 {
5061 u32 rc = 0;
5062
5063 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5064 struct hwrm_vnic_free_input req = {0};
5065
5066 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5067 req.vnic_id =
5068 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5069
5070 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5071 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5072 }
5073 return rc;
5074 }
5075
bnxt_hwrm_vnic_free(struct bnxt * bp)5076 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5077 {
5078 u16 i;
5079
5080 for (i = 0; i < bp->nr_vnics; i++)
5081 bnxt_hwrm_vnic_free_one(bp, i);
5082 }
5083
bnxt_hwrm_vnic_alloc(struct bnxt * bp,u16 vnic_id,unsigned int start_rx_ring_idx,unsigned int nr_rings)5084 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5085 unsigned int start_rx_ring_idx,
5086 unsigned int nr_rings)
5087 {
5088 int rc = 0;
5089 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5090 struct hwrm_vnic_alloc_input req = {0};
5091 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5092 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5093
5094 if (bp->flags & BNXT_FLAG_CHIP_P5)
5095 goto vnic_no_ring_grps;
5096
5097 /* map ring groups to this vnic */
5098 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5099 grp_idx = bp->rx_ring[i].bnapi->index;
5100 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5101 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5102 j, nr_rings);
5103 break;
5104 }
5105 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5106 }
5107
5108 vnic_no_ring_grps:
5109 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5110 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5111 if (vnic_id == 0)
5112 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5113
5114 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5115
5116 mutex_lock(&bp->hwrm_cmd_lock);
5117 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5118 if (!rc)
5119 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5120 mutex_unlock(&bp->hwrm_cmd_lock);
5121 return rc;
5122 }
5123
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)5124 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5125 {
5126 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5127 struct hwrm_vnic_qcaps_input req = {0};
5128 int rc;
5129
5130 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5131 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5132 if (bp->hwrm_spec_code < 0x10600)
5133 return 0;
5134
5135 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5136 mutex_lock(&bp->hwrm_cmd_lock);
5137 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5138 if (!rc) {
5139 u32 flags = le32_to_cpu(resp->flags);
5140
5141 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5142 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5143 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5144 if (flags &
5145 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5146 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5147 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5148 if (bp->max_tpa_v2)
5149 bp->hw_ring_stats_size =
5150 sizeof(struct ctx_hw_stats_ext);
5151 }
5152 mutex_unlock(&bp->hwrm_cmd_lock);
5153 return rc;
5154 }
5155
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)5156 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5157 {
5158 u16 i;
5159 u32 rc = 0;
5160
5161 if (bp->flags & BNXT_FLAG_CHIP_P5)
5162 return 0;
5163
5164 mutex_lock(&bp->hwrm_cmd_lock);
5165 for (i = 0; i < bp->rx_nr_rings; i++) {
5166 struct hwrm_ring_grp_alloc_input req = {0};
5167 struct hwrm_ring_grp_alloc_output *resp =
5168 bp->hwrm_cmd_resp_addr;
5169 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5170
5171 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5172
5173 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5174 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5175 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5176 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5177
5178 rc = _hwrm_send_message(bp, &req, sizeof(req),
5179 HWRM_CMD_TIMEOUT);
5180 if (rc)
5181 break;
5182
5183 bp->grp_info[grp_idx].fw_grp_id =
5184 le32_to_cpu(resp->ring_group_id);
5185 }
5186 mutex_unlock(&bp->hwrm_cmd_lock);
5187 return rc;
5188 }
5189
bnxt_hwrm_ring_grp_free(struct bnxt * bp)5190 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5191 {
5192 u16 i;
5193 u32 rc = 0;
5194 struct hwrm_ring_grp_free_input req = {0};
5195
5196 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5197 return 0;
5198
5199 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5200
5201 mutex_lock(&bp->hwrm_cmd_lock);
5202 for (i = 0; i < bp->cp_nr_rings; i++) {
5203 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5204 continue;
5205 req.ring_group_id =
5206 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5207
5208 rc = _hwrm_send_message(bp, &req, sizeof(req),
5209 HWRM_CMD_TIMEOUT);
5210 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5211 }
5212 mutex_unlock(&bp->hwrm_cmd_lock);
5213 return rc;
5214 }
5215
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)5216 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5217 struct bnxt_ring_struct *ring,
5218 u32 ring_type, u32 map_index)
5219 {
5220 int rc = 0, err = 0;
5221 struct hwrm_ring_alloc_input req = {0};
5222 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5223 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5224 struct bnxt_ring_grp_info *grp_info;
5225 u16 ring_id;
5226
5227 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5228
5229 req.enables = 0;
5230 if (rmem->nr_pages > 1) {
5231 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5232 /* Page size is in log2 units */
5233 req.page_size = BNXT_PAGE_SHIFT;
5234 req.page_tbl_depth = 1;
5235 } else {
5236 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5237 }
5238 req.fbo = 0;
5239 /* Association of ring index with doorbell index and MSIX number */
5240 req.logical_id = cpu_to_le16(map_index);
5241
5242 switch (ring_type) {
5243 case HWRM_RING_ALLOC_TX: {
5244 struct bnxt_tx_ring_info *txr;
5245
5246 txr = container_of(ring, struct bnxt_tx_ring_info,
5247 tx_ring_struct);
5248 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5249 /* Association of transmit ring with completion ring */
5250 grp_info = &bp->grp_info[ring->grp_idx];
5251 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5252 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5253 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5254 req.queue_id = cpu_to_le16(ring->queue_id);
5255 break;
5256 }
5257 case HWRM_RING_ALLOC_RX:
5258 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5259 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5260 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5261 u16 flags = 0;
5262
5263 /* Association of rx ring with stats context */
5264 grp_info = &bp->grp_info[ring->grp_idx];
5265 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5266 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5267 req.enables |= cpu_to_le32(
5268 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5269 if (NET_IP_ALIGN == 2)
5270 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5271 req.flags = cpu_to_le16(flags);
5272 }
5273 break;
5274 case HWRM_RING_ALLOC_AGG:
5275 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5276 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5277 /* Association of agg ring with rx ring */
5278 grp_info = &bp->grp_info[ring->grp_idx];
5279 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5280 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5281 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5282 req.enables |= cpu_to_le32(
5283 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5284 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5285 } else {
5286 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5287 }
5288 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5289 break;
5290 case HWRM_RING_ALLOC_CMPL:
5291 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5292 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5293 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5294 /* Association of cp ring with nq */
5295 grp_info = &bp->grp_info[map_index];
5296 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5297 req.cq_handle = cpu_to_le64(ring->handle);
5298 req.enables |= cpu_to_le32(
5299 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5300 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5301 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5302 }
5303 break;
5304 case HWRM_RING_ALLOC_NQ:
5305 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5306 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5307 if (bp->flags & BNXT_FLAG_USING_MSIX)
5308 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5309 break;
5310 default:
5311 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5312 ring_type);
5313 return -1;
5314 }
5315
5316 mutex_lock(&bp->hwrm_cmd_lock);
5317 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5318 err = le16_to_cpu(resp->error_code);
5319 ring_id = le16_to_cpu(resp->ring_id);
5320 mutex_unlock(&bp->hwrm_cmd_lock);
5321
5322 if (rc || err) {
5323 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5324 ring_type, rc, err);
5325 return -EIO;
5326 }
5327 ring->fw_ring_id = ring_id;
5328 return rc;
5329 }
5330
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)5331 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5332 {
5333 int rc;
5334
5335 if (BNXT_PF(bp)) {
5336 struct hwrm_func_cfg_input req = {0};
5337
5338 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5339 req.fid = cpu_to_le16(0xffff);
5340 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5341 req.async_event_cr = cpu_to_le16(idx);
5342 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5343 } else {
5344 struct hwrm_func_vf_cfg_input req = {0};
5345
5346 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5347 req.enables =
5348 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5349 req.async_event_cr = cpu_to_le16(idx);
5350 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5351 }
5352 return rc;
5353 }
5354
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)5355 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5356 u32 map_idx, u32 xid)
5357 {
5358 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5359 if (BNXT_PF(bp))
5360 db->doorbell = bp->bar1 + 0x10000;
5361 else
5362 db->doorbell = bp->bar1 + 0x4000;
5363 switch (ring_type) {
5364 case HWRM_RING_ALLOC_TX:
5365 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5366 break;
5367 case HWRM_RING_ALLOC_RX:
5368 case HWRM_RING_ALLOC_AGG:
5369 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5370 break;
5371 case HWRM_RING_ALLOC_CMPL:
5372 db->db_key64 = DBR_PATH_L2;
5373 break;
5374 case HWRM_RING_ALLOC_NQ:
5375 db->db_key64 = DBR_PATH_L2;
5376 break;
5377 }
5378 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5379 } else {
5380 db->doorbell = bp->bar1 + map_idx * 0x80;
5381 switch (ring_type) {
5382 case HWRM_RING_ALLOC_TX:
5383 db->db_key32 = DB_KEY_TX;
5384 break;
5385 case HWRM_RING_ALLOC_RX:
5386 case HWRM_RING_ALLOC_AGG:
5387 db->db_key32 = DB_KEY_RX;
5388 break;
5389 case HWRM_RING_ALLOC_CMPL:
5390 db->db_key32 = DB_KEY_CP;
5391 break;
5392 }
5393 }
5394 }
5395
bnxt_hwrm_ring_alloc(struct bnxt * bp)5396 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5397 {
5398 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5399 int i, rc = 0;
5400 u32 type;
5401
5402 if (bp->flags & BNXT_FLAG_CHIP_P5)
5403 type = HWRM_RING_ALLOC_NQ;
5404 else
5405 type = HWRM_RING_ALLOC_CMPL;
5406 for (i = 0; i < bp->cp_nr_rings; i++) {
5407 struct bnxt_napi *bnapi = bp->bnapi[i];
5408 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5409 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5410 u32 map_idx = ring->map_idx;
5411 unsigned int vector;
5412
5413 vector = bp->irq_tbl[map_idx].vector;
5414 disable_irq_nosync(vector);
5415 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5416 if (rc) {
5417 enable_irq(vector);
5418 goto err_out;
5419 }
5420 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5421 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5422 enable_irq(vector);
5423 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5424
5425 if (!i) {
5426 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5427 if (rc)
5428 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5429 }
5430 }
5431
5432 type = HWRM_RING_ALLOC_TX;
5433 for (i = 0; i < bp->tx_nr_rings; i++) {
5434 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5435 struct bnxt_ring_struct *ring;
5436 u32 map_idx;
5437
5438 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5439 struct bnxt_napi *bnapi = txr->bnapi;
5440 struct bnxt_cp_ring_info *cpr, *cpr2;
5441 u32 type2 = HWRM_RING_ALLOC_CMPL;
5442
5443 cpr = &bnapi->cp_ring;
5444 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5445 ring = &cpr2->cp_ring_struct;
5446 ring->handle = BNXT_TX_HDL;
5447 map_idx = bnapi->index;
5448 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5449 if (rc)
5450 goto err_out;
5451 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5452 ring->fw_ring_id);
5453 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5454 }
5455 ring = &txr->tx_ring_struct;
5456 map_idx = i;
5457 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5458 if (rc)
5459 goto err_out;
5460 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5461 }
5462
5463 type = HWRM_RING_ALLOC_RX;
5464 for (i = 0; i < bp->rx_nr_rings; i++) {
5465 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5466 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5467 struct bnxt_napi *bnapi = rxr->bnapi;
5468 u32 map_idx = bnapi->index;
5469
5470 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5471 if (rc)
5472 goto err_out;
5473 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5474 /* If we have agg rings, post agg buffers first. */
5475 if (!agg_rings)
5476 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5477 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5478 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5479 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5480 u32 type2 = HWRM_RING_ALLOC_CMPL;
5481 struct bnxt_cp_ring_info *cpr2;
5482
5483 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5484 ring = &cpr2->cp_ring_struct;
5485 ring->handle = BNXT_RX_HDL;
5486 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5487 if (rc)
5488 goto err_out;
5489 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5490 ring->fw_ring_id);
5491 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5492 }
5493 }
5494
5495 if (agg_rings) {
5496 type = HWRM_RING_ALLOC_AGG;
5497 for (i = 0; i < bp->rx_nr_rings; i++) {
5498 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5499 struct bnxt_ring_struct *ring =
5500 &rxr->rx_agg_ring_struct;
5501 u32 grp_idx = ring->grp_idx;
5502 u32 map_idx = grp_idx + bp->rx_nr_rings;
5503
5504 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5505 if (rc)
5506 goto err_out;
5507
5508 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5509 ring->fw_ring_id);
5510 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5511 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5512 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5513 }
5514 }
5515 err_out:
5516 return rc;
5517 }
5518
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)5519 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5520 struct bnxt_ring_struct *ring,
5521 u32 ring_type, int cmpl_ring_id)
5522 {
5523 int rc;
5524 struct hwrm_ring_free_input req = {0};
5525 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5526 u16 error_code;
5527
5528 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
5529 return 0;
5530
5531 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5532 req.ring_type = ring_type;
5533 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5534
5535 mutex_lock(&bp->hwrm_cmd_lock);
5536 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5537 error_code = le16_to_cpu(resp->error_code);
5538 mutex_unlock(&bp->hwrm_cmd_lock);
5539
5540 if (rc || error_code) {
5541 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5542 ring_type, rc, error_code);
5543 return -EIO;
5544 }
5545 return 0;
5546 }
5547
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)5548 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5549 {
5550 u32 type;
5551 int i;
5552
5553 if (!bp->bnapi)
5554 return;
5555
5556 for (i = 0; i < bp->tx_nr_rings; i++) {
5557 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5558 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5559
5560 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5561 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5562
5563 hwrm_ring_free_send_msg(bp, ring,
5564 RING_FREE_REQ_RING_TYPE_TX,
5565 close_path ? cmpl_ring_id :
5566 INVALID_HW_RING_ID);
5567 ring->fw_ring_id = INVALID_HW_RING_ID;
5568 }
5569 }
5570
5571 for (i = 0; i < bp->rx_nr_rings; i++) {
5572 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5573 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5574 u32 grp_idx = rxr->bnapi->index;
5575
5576 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5577 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5578
5579 hwrm_ring_free_send_msg(bp, ring,
5580 RING_FREE_REQ_RING_TYPE_RX,
5581 close_path ? cmpl_ring_id :
5582 INVALID_HW_RING_ID);
5583 ring->fw_ring_id = INVALID_HW_RING_ID;
5584 bp->grp_info[grp_idx].rx_fw_ring_id =
5585 INVALID_HW_RING_ID;
5586 }
5587 }
5588
5589 if (bp->flags & BNXT_FLAG_CHIP_P5)
5590 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5591 else
5592 type = RING_FREE_REQ_RING_TYPE_RX;
5593 for (i = 0; i < bp->rx_nr_rings; i++) {
5594 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5595 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5596 u32 grp_idx = rxr->bnapi->index;
5597
5598 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5599 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5600
5601 hwrm_ring_free_send_msg(bp, ring, type,
5602 close_path ? cmpl_ring_id :
5603 INVALID_HW_RING_ID);
5604 ring->fw_ring_id = INVALID_HW_RING_ID;
5605 bp->grp_info[grp_idx].agg_fw_ring_id =
5606 INVALID_HW_RING_ID;
5607 }
5608 }
5609
5610 /* The completion rings are about to be freed. After that the
5611 * IRQ doorbell will not work anymore. So we need to disable
5612 * IRQ here.
5613 */
5614 bnxt_disable_int_sync(bp);
5615
5616 if (bp->flags & BNXT_FLAG_CHIP_P5)
5617 type = RING_FREE_REQ_RING_TYPE_NQ;
5618 else
5619 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5620 for (i = 0; i < bp->cp_nr_rings; i++) {
5621 struct bnxt_napi *bnapi = bp->bnapi[i];
5622 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5623 struct bnxt_ring_struct *ring;
5624 int j;
5625
5626 for (j = 0; j < 2; j++) {
5627 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5628
5629 if (cpr2) {
5630 ring = &cpr2->cp_ring_struct;
5631 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5632 continue;
5633 hwrm_ring_free_send_msg(bp, ring,
5634 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5635 INVALID_HW_RING_ID);
5636 ring->fw_ring_id = INVALID_HW_RING_ID;
5637 }
5638 }
5639 ring = &cpr->cp_ring_struct;
5640 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5641 hwrm_ring_free_send_msg(bp, ring, type,
5642 INVALID_HW_RING_ID);
5643 ring->fw_ring_id = INVALID_HW_RING_ID;
5644 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5645 }
5646 }
5647 }
5648
5649 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5650 bool shared);
5651
bnxt_hwrm_get_rings(struct bnxt * bp)5652 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5653 {
5654 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5655 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5656 struct hwrm_func_qcfg_input req = {0};
5657 int rc;
5658
5659 if (bp->hwrm_spec_code < 0x10601)
5660 return 0;
5661
5662 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5663 req.fid = cpu_to_le16(0xffff);
5664 mutex_lock(&bp->hwrm_cmd_lock);
5665 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5666 if (rc) {
5667 mutex_unlock(&bp->hwrm_cmd_lock);
5668 return rc;
5669 }
5670
5671 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5672 if (BNXT_NEW_RM(bp)) {
5673 u16 cp, stats;
5674
5675 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5676 hw_resc->resv_hw_ring_grps =
5677 le32_to_cpu(resp->alloc_hw_ring_grps);
5678 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5679 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5680 stats = le16_to_cpu(resp->alloc_stat_ctx);
5681 hw_resc->resv_irqs = cp;
5682 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5683 int rx = hw_resc->resv_rx_rings;
5684 int tx = hw_resc->resv_tx_rings;
5685
5686 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5687 rx >>= 1;
5688 if (cp < (rx + tx)) {
5689 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5690 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5691 rx <<= 1;
5692 hw_resc->resv_rx_rings = rx;
5693 hw_resc->resv_tx_rings = tx;
5694 }
5695 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5696 hw_resc->resv_hw_ring_grps = rx;
5697 }
5698 hw_resc->resv_cp_rings = cp;
5699 hw_resc->resv_stat_ctxs = stats;
5700 }
5701 mutex_unlock(&bp->hwrm_cmd_lock);
5702 return 0;
5703 }
5704
5705 /* Caller must hold bp->hwrm_cmd_lock */
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)5706 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5707 {
5708 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5709 struct hwrm_func_qcfg_input req = {0};
5710 int rc;
5711
5712 if (bp->hwrm_spec_code < 0x10601)
5713 return 0;
5714
5715 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5716 req.fid = cpu_to_le16(fid);
5717 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5718 if (!rc)
5719 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5720
5721 return rc;
5722 }
5723
5724 static bool bnxt_rfs_supported(struct bnxt *bp);
5725
5726 static void
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,struct hwrm_func_cfg_input * req,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)5727 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5728 int tx_rings, int rx_rings, int ring_grps,
5729 int cp_rings, int stats, int vnics)
5730 {
5731 u32 enables = 0;
5732
5733 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5734 req->fid = cpu_to_le16(0xffff);
5735 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5736 req->num_tx_rings = cpu_to_le16(tx_rings);
5737 if (BNXT_NEW_RM(bp)) {
5738 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5739 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5740 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5741 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5742 enables |= tx_rings + ring_grps ?
5743 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5744 enables |= rx_rings ?
5745 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5746 } else {
5747 enables |= cp_rings ?
5748 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5749 enables |= ring_grps ?
5750 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5751 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5752 }
5753 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5754
5755 req->num_rx_rings = cpu_to_le16(rx_rings);
5756 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5757 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5758 req->num_msix = cpu_to_le16(cp_rings);
5759 req->num_rsscos_ctxs =
5760 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5761 } else {
5762 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5763 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5764 req->num_rsscos_ctxs = cpu_to_le16(1);
5765 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5766 bnxt_rfs_supported(bp))
5767 req->num_rsscos_ctxs =
5768 cpu_to_le16(ring_grps + 1);
5769 }
5770 req->num_stat_ctxs = cpu_to_le16(stats);
5771 req->num_vnics = cpu_to_le16(vnics);
5772 }
5773 req->enables = cpu_to_le32(enables);
5774 }
5775
5776 static void
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,struct hwrm_func_vf_cfg_input * req,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)5777 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5778 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5779 int rx_rings, int ring_grps, int cp_rings,
5780 int stats, int vnics)
5781 {
5782 u32 enables = 0;
5783
5784 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5785 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5786 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5787 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5788 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5789 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5790 enables |= tx_rings + ring_grps ?
5791 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5792 } else {
5793 enables |= cp_rings ?
5794 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5795 enables |= ring_grps ?
5796 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5797 }
5798 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5799 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5800
5801 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5802 req->num_tx_rings = cpu_to_le16(tx_rings);
5803 req->num_rx_rings = cpu_to_le16(rx_rings);
5804 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5805 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5806 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5807 } else {
5808 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5809 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5810 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5811 }
5812 req->num_stat_ctxs = cpu_to_le16(stats);
5813 req->num_vnics = cpu_to_le16(vnics);
5814
5815 req->enables = cpu_to_le32(enables);
5816 }
5817
5818 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)5819 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5820 int ring_grps, int cp_rings, int stats, int vnics)
5821 {
5822 struct hwrm_func_cfg_input req = {0};
5823 int rc;
5824
5825 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5826 cp_rings, stats, vnics);
5827 if (!req.enables)
5828 return 0;
5829
5830 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5831 if (rc)
5832 return rc;
5833
5834 if (bp->hwrm_spec_code < 0x10601)
5835 bp->hw_resc.resv_tx_rings = tx_rings;
5836
5837 rc = bnxt_hwrm_get_rings(bp);
5838 return rc;
5839 }
5840
5841 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)5842 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5843 int ring_grps, int cp_rings, int stats, int vnics)
5844 {
5845 struct hwrm_func_vf_cfg_input req = {0};
5846 int rc;
5847
5848 if (!BNXT_NEW_RM(bp)) {
5849 bp->hw_resc.resv_tx_rings = tx_rings;
5850 return 0;
5851 }
5852
5853 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5854 cp_rings, stats, vnics);
5855 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5856 if (rc)
5857 return rc;
5858
5859 rc = bnxt_hwrm_get_rings(bp);
5860 return rc;
5861 }
5862
bnxt_hwrm_reserve_rings(struct bnxt * bp,int tx,int rx,int grp,int cp,int stat,int vnic)5863 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5864 int cp, int stat, int vnic)
5865 {
5866 if (BNXT_PF(bp))
5867 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5868 vnic);
5869 else
5870 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5871 vnic);
5872 }
5873
bnxt_nq_rings_in_use(struct bnxt * bp)5874 int bnxt_nq_rings_in_use(struct bnxt *bp)
5875 {
5876 int cp = bp->cp_nr_rings;
5877 int ulp_msix, ulp_base;
5878
5879 ulp_msix = bnxt_get_ulp_msix_num(bp);
5880 if (ulp_msix) {
5881 ulp_base = bnxt_get_ulp_msix_base(bp);
5882 cp += ulp_msix;
5883 if ((ulp_base + ulp_msix) > cp)
5884 cp = ulp_base + ulp_msix;
5885 }
5886 return cp;
5887 }
5888
bnxt_cp_rings_in_use(struct bnxt * bp)5889 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5890 {
5891 int cp;
5892
5893 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5894 return bnxt_nq_rings_in_use(bp);
5895
5896 cp = bp->tx_nr_rings + bp->rx_nr_rings;
5897 return cp;
5898 }
5899
bnxt_get_func_stat_ctxs(struct bnxt * bp)5900 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5901 {
5902 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5903 int cp = bp->cp_nr_rings;
5904
5905 if (!ulp_stat)
5906 return cp;
5907
5908 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5909 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5910
5911 return cp + ulp_stat;
5912 }
5913
bnxt_need_reserve_rings(struct bnxt * bp)5914 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5915 {
5916 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5917 int cp = bnxt_cp_rings_in_use(bp);
5918 int nq = bnxt_nq_rings_in_use(bp);
5919 int rx = bp->rx_nr_rings, stat;
5920 int vnic = 1, grp = rx;
5921
5922 if (bp->hwrm_spec_code < 0x10601)
5923 return false;
5924
5925 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5926 return true;
5927
5928 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5929 vnic = rx + 1;
5930 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5931 rx <<= 1;
5932 stat = bnxt_get_func_stat_ctxs(bp);
5933 if (BNXT_NEW_RM(bp) &&
5934 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5935 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
5936 (hw_resc->resv_hw_ring_grps != grp &&
5937 !(bp->flags & BNXT_FLAG_CHIP_P5))))
5938 return true;
5939 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5940 hw_resc->resv_irqs != nq)
5941 return true;
5942 return false;
5943 }
5944
__bnxt_reserve_rings(struct bnxt * bp)5945 static int __bnxt_reserve_rings(struct bnxt *bp)
5946 {
5947 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5948 int cp = bnxt_nq_rings_in_use(bp);
5949 int tx = bp->tx_nr_rings;
5950 int rx = bp->rx_nr_rings;
5951 int grp, rx_rings, rc;
5952 int vnic = 1, stat;
5953 bool sh = false;
5954
5955 if (!bnxt_need_reserve_rings(bp))
5956 return 0;
5957
5958 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5959 sh = true;
5960 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5961 vnic = rx + 1;
5962 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5963 rx <<= 1;
5964 grp = bp->rx_nr_rings;
5965 stat = bnxt_get_func_stat_ctxs(bp);
5966
5967 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5968 if (rc)
5969 return rc;
5970
5971 tx = hw_resc->resv_tx_rings;
5972 if (BNXT_NEW_RM(bp)) {
5973 rx = hw_resc->resv_rx_rings;
5974 cp = hw_resc->resv_irqs;
5975 grp = hw_resc->resv_hw_ring_grps;
5976 vnic = hw_resc->resv_vnics;
5977 stat = hw_resc->resv_stat_ctxs;
5978 }
5979
5980 rx_rings = rx;
5981 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5982 if (rx >= 2) {
5983 rx_rings = rx >> 1;
5984 } else {
5985 if (netif_running(bp->dev))
5986 return -ENOMEM;
5987
5988 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5989 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5990 bp->dev->hw_features &= ~NETIF_F_LRO;
5991 bp->dev->features &= ~NETIF_F_LRO;
5992 bnxt_set_ring_params(bp);
5993 }
5994 }
5995 rx_rings = min_t(int, rx_rings, grp);
5996 cp = min_t(int, cp, bp->cp_nr_rings);
5997 if (stat > bnxt_get_ulp_stat_ctxs(bp))
5998 stat -= bnxt_get_ulp_stat_ctxs(bp);
5999 cp = min_t(int, cp, stat);
6000 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6001 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6002 rx = rx_rings << 1;
6003 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6004 bp->tx_nr_rings = tx;
6005 bp->rx_nr_rings = rx_rings;
6006 bp->cp_nr_rings = cp;
6007
6008 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6009 return -ENOMEM;
6010
6011 return rc;
6012 }
6013
bnxt_hwrm_check_vf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6014 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6015 int ring_grps, int cp_rings, int stats,
6016 int vnics)
6017 {
6018 struct hwrm_func_vf_cfg_input req = {0};
6019 u32 flags;
6020 int rc;
6021
6022 if (!BNXT_NEW_RM(bp))
6023 return 0;
6024
6025 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6026 cp_rings, stats, vnics);
6027 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6028 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6029 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6030 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6031 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6032 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6033 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6034 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6035
6036 req.flags = cpu_to_le32(flags);
6037 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6038 return rc;
6039 }
6040
bnxt_hwrm_check_pf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6041 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6042 int ring_grps, int cp_rings, int stats,
6043 int vnics)
6044 {
6045 struct hwrm_func_cfg_input req = {0};
6046 u32 flags;
6047 int rc;
6048
6049 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6050 cp_rings, stats, vnics);
6051 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6052 if (BNXT_NEW_RM(bp)) {
6053 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6054 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6055 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6056 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6057 if (bp->flags & BNXT_FLAG_CHIP_P5)
6058 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6059 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6060 else
6061 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6062 }
6063
6064 req.flags = cpu_to_le32(flags);
6065 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6066 return rc;
6067 }
6068
bnxt_hwrm_check_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6069 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6070 int ring_grps, int cp_rings, int stats,
6071 int vnics)
6072 {
6073 if (bp->hwrm_spec_code < 0x10801)
6074 return 0;
6075
6076 if (BNXT_PF(bp))
6077 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6078 ring_grps, cp_rings, stats,
6079 vnics);
6080
6081 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6082 cp_rings, stats, vnics);
6083 }
6084
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)6085 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6086 {
6087 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6088 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6089 struct hwrm_ring_aggint_qcaps_input req = {0};
6090 int rc;
6091
6092 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6093 coal_cap->num_cmpl_dma_aggr_max = 63;
6094 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6095 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6096 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6097 coal_cap->int_lat_tmr_min_max = 65535;
6098 coal_cap->int_lat_tmr_max_max = 65535;
6099 coal_cap->num_cmpl_aggr_int_max = 65535;
6100 coal_cap->timer_units = 80;
6101
6102 if (bp->hwrm_spec_code < 0x10902)
6103 return;
6104
6105 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6106 mutex_lock(&bp->hwrm_cmd_lock);
6107 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6108 if (!rc) {
6109 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6110 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6111 coal_cap->num_cmpl_dma_aggr_max =
6112 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6113 coal_cap->num_cmpl_dma_aggr_during_int_max =
6114 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6115 coal_cap->cmpl_aggr_dma_tmr_max =
6116 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6117 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6118 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6119 coal_cap->int_lat_tmr_min_max =
6120 le16_to_cpu(resp->int_lat_tmr_min_max);
6121 coal_cap->int_lat_tmr_max_max =
6122 le16_to_cpu(resp->int_lat_tmr_max_max);
6123 coal_cap->num_cmpl_aggr_int_max =
6124 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6125 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6126 }
6127 mutex_unlock(&bp->hwrm_cmd_lock);
6128 }
6129
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)6130 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6131 {
6132 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6133
6134 return usec * 1000 / coal_cap->timer_units;
6135 }
6136
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)6137 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6138 struct bnxt_coal *hw_coal,
6139 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6140 {
6141 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6142 u32 cmpl_params = coal_cap->cmpl_params;
6143 u16 val, tmr, max, flags = 0;
6144
6145 max = hw_coal->bufs_per_record * 128;
6146 if (hw_coal->budget)
6147 max = hw_coal->bufs_per_record * hw_coal->budget;
6148 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6149
6150 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6151 req->num_cmpl_aggr_int = cpu_to_le16(val);
6152
6153 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6154 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6155
6156 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6157 coal_cap->num_cmpl_dma_aggr_during_int_max);
6158 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6159
6160 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6161 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6162 req->int_lat_tmr_max = cpu_to_le16(tmr);
6163
6164 /* min timer set to 1/2 of interrupt timer */
6165 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6166 val = tmr / 2;
6167 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6168 req->int_lat_tmr_min = cpu_to_le16(val);
6169 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6170 }
6171
6172 /* buf timer set to 1/4 of interrupt timer */
6173 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6174 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6175
6176 if (cmpl_params &
6177 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6178 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6179 val = clamp_t(u16, tmr, 1,
6180 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6181 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6182 req->enables |=
6183 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6184 }
6185
6186 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6187 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6188 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6189 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6190 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6191 req->flags = cpu_to_le16(flags);
6192 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6193 }
6194
6195 /* Caller holds bp->hwrm_cmd_lock */
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)6196 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6197 struct bnxt_coal *hw_coal)
6198 {
6199 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6200 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6201 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6202 u32 nq_params = coal_cap->nq_params;
6203 u16 tmr;
6204
6205 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6206 return 0;
6207
6208 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6209 -1, -1);
6210 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6211 req.flags =
6212 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6213
6214 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6215 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6216 req.int_lat_tmr_min = cpu_to_le16(tmr);
6217 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6218 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6219 }
6220
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)6221 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6222 {
6223 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6224 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6225 struct bnxt_coal coal;
6226
6227 /* Tick values in micro seconds.
6228 * 1 coal_buf x bufs_per_record = 1 completion record.
6229 */
6230 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6231
6232 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6233 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6234
6235 if (!bnapi->rx_ring)
6236 return -ENODEV;
6237
6238 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6239 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6240
6241 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6242
6243 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6244
6245 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6246 HWRM_CMD_TIMEOUT);
6247 }
6248
bnxt_hwrm_set_coal(struct bnxt * bp)6249 int bnxt_hwrm_set_coal(struct bnxt *bp)
6250 {
6251 int i, rc = 0;
6252 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6253 req_tx = {0}, *req;
6254
6255 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6256 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6257 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6258 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6259
6260 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6261 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6262
6263 mutex_lock(&bp->hwrm_cmd_lock);
6264 for (i = 0; i < bp->cp_nr_rings; i++) {
6265 struct bnxt_napi *bnapi = bp->bnapi[i];
6266 struct bnxt_coal *hw_coal;
6267 u16 ring_id;
6268
6269 req = &req_rx;
6270 if (!bnapi->rx_ring) {
6271 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6272 req = &req_tx;
6273 } else {
6274 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6275 }
6276 req->ring_id = cpu_to_le16(ring_id);
6277
6278 rc = _hwrm_send_message(bp, req, sizeof(*req),
6279 HWRM_CMD_TIMEOUT);
6280 if (rc)
6281 break;
6282
6283 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6284 continue;
6285
6286 if (bnapi->rx_ring && bnapi->tx_ring) {
6287 req = &req_tx;
6288 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6289 req->ring_id = cpu_to_le16(ring_id);
6290 rc = _hwrm_send_message(bp, req, sizeof(*req),
6291 HWRM_CMD_TIMEOUT);
6292 if (rc)
6293 break;
6294 }
6295 if (bnapi->rx_ring)
6296 hw_coal = &bp->rx_coal;
6297 else
6298 hw_coal = &bp->tx_coal;
6299 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6300 }
6301 mutex_unlock(&bp->hwrm_cmd_lock);
6302 return rc;
6303 }
6304
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)6305 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6306 {
6307 int rc = 0, i;
6308 struct hwrm_stat_ctx_free_input req = {0};
6309
6310 if (!bp->bnapi)
6311 return 0;
6312
6313 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6314 return 0;
6315
6316 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6317
6318 mutex_lock(&bp->hwrm_cmd_lock);
6319 for (i = 0; i < bp->cp_nr_rings; i++) {
6320 struct bnxt_napi *bnapi = bp->bnapi[i];
6321 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6322
6323 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6324 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6325
6326 rc = _hwrm_send_message(bp, &req, sizeof(req),
6327 HWRM_CMD_TIMEOUT);
6328
6329 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6330 }
6331 }
6332 mutex_unlock(&bp->hwrm_cmd_lock);
6333 return rc;
6334 }
6335
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)6336 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6337 {
6338 int rc = 0, i;
6339 struct hwrm_stat_ctx_alloc_input req = {0};
6340 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6341
6342 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6343 return 0;
6344
6345 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6346
6347 req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6348 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6349
6350 mutex_lock(&bp->hwrm_cmd_lock);
6351 for (i = 0; i < bp->cp_nr_rings; i++) {
6352 struct bnxt_napi *bnapi = bp->bnapi[i];
6353 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6354
6355 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6356
6357 rc = _hwrm_send_message(bp, &req, sizeof(req),
6358 HWRM_CMD_TIMEOUT);
6359 if (rc)
6360 break;
6361
6362 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6363
6364 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6365 }
6366 mutex_unlock(&bp->hwrm_cmd_lock);
6367 return rc;
6368 }
6369
bnxt_hwrm_func_qcfg(struct bnxt * bp)6370 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6371 {
6372 struct hwrm_func_qcfg_input req = {0};
6373 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6374 u16 flags;
6375 int rc;
6376
6377 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6378 req.fid = cpu_to_le16(0xffff);
6379 mutex_lock(&bp->hwrm_cmd_lock);
6380 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6381 if (rc)
6382 goto func_qcfg_exit;
6383
6384 #ifdef CONFIG_BNXT_SRIOV
6385 if (BNXT_VF(bp)) {
6386 struct bnxt_vf_info *vf = &bp->vf;
6387
6388 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6389 } else {
6390 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6391 }
6392 #endif
6393 flags = le16_to_cpu(resp->flags);
6394 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6395 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6396 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6397 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6398 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6399 }
6400 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6401 bp->flags |= BNXT_FLAG_MULTI_HOST;
6402
6403 switch (resp->port_partition_type) {
6404 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6405 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6406 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6407 bp->port_partition_type = resp->port_partition_type;
6408 break;
6409 }
6410 if (bp->hwrm_spec_code < 0x10707 ||
6411 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6412 bp->br_mode = BRIDGE_MODE_VEB;
6413 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6414 bp->br_mode = BRIDGE_MODE_VEPA;
6415 else
6416 bp->br_mode = BRIDGE_MODE_UNDEF;
6417
6418 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6419 if (!bp->max_mtu)
6420 bp->max_mtu = BNXT_MAX_MTU;
6421
6422 func_qcfg_exit:
6423 mutex_unlock(&bp->hwrm_cmd_lock);
6424 return rc;
6425 }
6426
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)6427 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6428 {
6429 struct hwrm_func_backing_store_qcaps_input req = {0};
6430 struct hwrm_func_backing_store_qcaps_output *resp =
6431 bp->hwrm_cmd_resp_addr;
6432 int rc;
6433
6434 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6435 return 0;
6436
6437 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6438 mutex_lock(&bp->hwrm_cmd_lock);
6439 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6440 if (!rc) {
6441 struct bnxt_ctx_pg_info *ctx_pg;
6442 struct bnxt_ctx_mem_info *ctx;
6443 int i;
6444
6445 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6446 if (!ctx) {
6447 rc = -ENOMEM;
6448 goto ctx_err;
6449 }
6450 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6451 if (!ctx_pg) {
6452 kfree(ctx);
6453 rc = -ENOMEM;
6454 goto ctx_err;
6455 }
6456 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6457 ctx->tqm_mem[i] = ctx_pg;
6458
6459 bp->ctx = ctx;
6460 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6461 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6462 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6463 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6464 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6465 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6466 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6467 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6468 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6469 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6470 ctx->vnic_max_vnic_entries =
6471 le16_to_cpu(resp->vnic_max_vnic_entries);
6472 ctx->vnic_max_ring_table_entries =
6473 le16_to_cpu(resp->vnic_max_ring_table_entries);
6474 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6475 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6476 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6477 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6478 ctx->tqm_min_entries_per_ring =
6479 le32_to_cpu(resp->tqm_min_entries_per_ring);
6480 ctx->tqm_max_entries_per_ring =
6481 le32_to_cpu(resp->tqm_max_entries_per_ring);
6482 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6483 if (!ctx->tqm_entries_multiple)
6484 ctx->tqm_entries_multiple = 1;
6485 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6486 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6487 ctx->mrav_num_entries_units =
6488 le16_to_cpu(resp->mrav_num_entries_units);
6489 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6490 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6491 } else {
6492 rc = 0;
6493 }
6494 ctx_err:
6495 mutex_unlock(&bp->hwrm_cmd_lock);
6496 return rc;
6497 }
6498
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)6499 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6500 __le64 *pg_dir)
6501 {
6502 u8 pg_size = 0;
6503
6504 if (BNXT_PAGE_SHIFT == 13)
6505 pg_size = 1 << 4;
6506 else if (BNXT_PAGE_SIZE == 16)
6507 pg_size = 2 << 4;
6508
6509 *pg_attr = pg_size;
6510 if (rmem->depth >= 1) {
6511 if (rmem->depth == 2)
6512 *pg_attr |= 2;
6513 else
6514 *pg_attr |= 1;
6515 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6516 } else {
6517 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6518 }
6519 }
6520
6521 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
6522 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
6523 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
6524 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
6525 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
6526 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6527
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)6528 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6529 {
6530 struct hwrm_func_backing_store_cfg_input req = {0};
6531 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6532 struct bnxt_ctx_pg_info *ctx_pg;
6533 __le32 *num_entries;
6534 __le64 *pg_dir;
6535 u32 flags = 0;
6536 u8 *pg_attr;
6537 int i, rc;
6538 u32 ena;
6539
6540 if (!ctx)
6541 return 0;
6542
6543 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6544 req.enables = cpu_to_le32(enables);
6545
6546 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6547 ctx_pg = &ctx->qp_mem;
6548 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6549 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6550 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6551 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6552 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6553 &req.qpc_pg_size_qpc_lvl,
6554 &req.qpc_page_dir);
6555 }
6556 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6557 ctx_pg = &ctx->srq_mem;
6558 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6559 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6560 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6561 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6562 &req.srq_pg_size_srq_lvl,
6563 &req.srq_page_dir);
6564 }
6565 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6566 ctx_pg = &ctx->cq_mem;
6567 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6568 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6569 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6570 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6571 &req.cq_page_dir);
6572 }
6573 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6574 ctx_pg = &ctx->vnic_mem;
6575 req.vnic_num_vnic_entries =
6576 cpu_to_le16(ctx->vnic_max_vnic_entries);
6577 req.vnic_num_ring_table_entries =
6578 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6579 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6580 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6581 &req.vnic_pg_size_vnic_lvl,
6582 &req.vnic_page_dir);
6583 }
6584 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6585 ctx_pg = &ctx->stat_mem;
6586 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6587 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6588 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6589 &req.stat_pg_size_stat_lvl,
6590 &req.stat_page_dir);
6591 }
6592 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6593 ctx_pg = &ctx->mrav_mem;
6594 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6595 if (ctx->mrav_num_entries_units)
6596 flags |=
6597 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6598 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6599 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6600 &req.mrav_pg_size_mrav_lvl,
6601 &req.mrav_page_dir);
6602 }
6603 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6604 ctx_pg = &ctx->tim_mem;
6605 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6606 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6607 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6608 &req.tim_pg_size_tim_lvl,
6609 &req.tim_page_dir);
6610 }
6611 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6612 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6613 pg_dir = &req.tqm_sp_page_dir,
6614 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6615 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6616 if (!(enables & ena))
6617 continue;
6618
6619 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6620 ctx_pg = ctx->tqm_mem[i];
6621 *num_entries = cpu_to_le32(ctx_pg->entries);
6622 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6623 }
6624 req.flags = cpu_to_le32(flags);
6625 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6626 return rc;
6627 }
6628
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)6629 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6630 struct bnxt_ctx_pg_info *ctx_pg)
6631 {
6632 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6633
6634 rmem->page_size = BNXT_PAGE_SIZE;
6635 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6636 rmem->dma_arr = ctx_pg->ctx_dma_arr;
6637 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6638 if (rmem->depth >= 1)
6639 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6640 return bnxt_alloc_ring(bp, rmem);
6641 }
6642
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth)6643 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6644 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6645 u8 depth)
6646 {
6647 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6648 int rc;
6649
6650 if (!mem_size)
6651 return 0;
6652
6653 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6654 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6655 ctx_pg->nr_pages = 0;
6656 return -EINVAL;
6657 }
6658 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6659 int nr_tbls, i;
6660
6661 rmem->depth = 2;
6662 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6663 GFP_KERNEL);
6664 if (!ctx_pg->ctx_pg_tbl)
6665 return -ENOMEM;
6666 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6667 rmem->nr_pages = nr_tbls;
6668 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6669 if (rc)
6670 return rc;
6671 for (i = 0; i < nr_tbls; i++) {
6672 struct bnxt_ctx_pg_info *pg_tbl;
6673
6674 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6675 if (!pg_tbl)
6676 return -ENOMEM;
6677 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6678 rmem = &pg_tbl->ring_mem;
6679 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6680 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6681 rmem->depth = 1;
6682 rmem->nr_pages = MAX_CTX_PAGES;
6683 if (i == (nr_tbls - 1)) {
6684 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6685
6686 if (rem)
6687 rmem->nr_pages = rem;
6688 }
6689 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6690 if (rc)
6691 break;
6692 }
6693 } else {
6694 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6695 if (rmem->nr_pages > 1 || depth)
6696 rmem->depth = 1;
6697 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6698 }
6699 return rc;
6700 }
6701
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)6702 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6703 struct bnxt_ctx_pg_info *ctx_pg)
6704 {
6705 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6706
6707 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6708 ctx_pg->ctx_pg_tbl) {
6709 int i, nr_tbls = rmem->nr_pages;
6710
6711 for (i = 0; i < nr_tbls; i++) {
6712 struct bnxt_ctx_pg_info *pg_tbl;
6713 struct bnxt_ring_mem_info *rmem2;
6714
6715 pg_tbl = ctx_pg->ctx_pg_tbl[i];
6716 if (!pg_tbl)
6717 continue;
6718 rmem2 = &pg_tbl->ring_mem;
6719 bnxt_free_ring(bp, rmem2);
6720 ctx_pg->ctx_pg_arr[i] = NULL;
6721 kfree(pg_tbl);
6722 ctx_pg->ctx_pg_tbl[i] = NULL;
6723 }
6724 kfree(ctx_pg->ctx_pg_tbl);
6725 ctx_pg->ctx_pg_tbl = NULL;
6726 }
6727 bnxt_free_ring(bp, rmem);
6728 ctx_pg->nr_pages = 0;
6729 }
6730
bnxt_free_ctx_mem(struct bnxt * bp)6731 static void bnxt_free_ctx_mem(struct bnxt *bp)
6732 {
6733 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6734 int i;
6735
6736 if (!ctx)
6737 return;
6738
6739 if (ctx->tqm_mem[0]) {
6740 for (i = 0; i < bp->max_q + 1; i++)
6741 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6742 kfree(ctx->tqm_mem[0]);
6743 ctx->tqm_mem[0] = NULL;
6744 }
6745
6746 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6747 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6748 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6749 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6750 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6751 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6752 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6753 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6754 }
6755
bnxt_alloc_ctx_mem(struct bnxt * bp)6756 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6757 {
6758 struct bnxt_ctx_pg_info *ctx_pg;
6759 struct bnxt_ctx_mem_info *ctx;
6760 u32 mem_size, ena, entries;
6761 u32 num_mr, num_ah;
6762 u32 extra_srqs = 0;
6763 u32 extra_qps = 0;
6764 u8 pg_lvl = 1;
6765 int i, rc;
6766
6767 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6768 if (rc) {
6769 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6770 rc);
6771 return rc;
6772 }
6773 ctx = bp->ctx;
6774 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6775 return 0;
6776
6777 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
6778 pg_lvl = 2;
6779 extra_qps = 65536;
6780 extra_srqs = 8192;
6781 }
6782
6783 ctx_pg = &ctx->qp_mem;
6784 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6785 extra_qps;
6786 mem_size = ctx->qp_entry_size * ctx_pg->entries;
6787 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6788 if (rc)
6789 return rc;
6790
6791 ctx_pg = &ctx->srq_mem;
6792 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6793 mem_size = ctx->srq_entry_size * ctx_pg->entries;
6794 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6795 if (rc)
6796 return rc;
6797
6798 ctx_pg = &ctx->cq_mem;
6799 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6800 mem_size = ctx->cq_entry_size * ctx_pg->entries;
6801 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl);
6802 if (rc)
6803 return rc;
6804
6805 ctx_pg = &ctx->vnic_mem;
6806 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6807 ctx->vnic_max_ring_table_entries;
6808 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6809 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6810 if (rc)
6811 return rc;
6812
6813 ctx_pg = &ctx->stat_mem;
6814 ctx_pg->entries = ctx->stat_max_entries;
6815 mem_size = ctx->stat_entry_size * ctx_pg->entries;
6816 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6817 if (rc)
6818 return rc;
6819
6820 ena = 0;
6821 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6822 goto skip_rdma;
6823
6824 ctx_pg = &ctx->mrav_mem;
6825 /* 128K extra is needed to accommodate static AH context
6826 * allocation by f/w.
6827 */
6828 num_mr = 1024 * 256;
6829 num_ah = 1024 * 128;
6830 ctx_pg->entries = num_mr + num_ah;
6831 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6832 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
6833 if (rc)
6834 return rc;
6835 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6836 if (ctx->mrav_num_entries_units)
6837 ctx_pg->entries =
6838 ((num_mr / ctx->mrav_num_entries_units) << 16) |
6839 (num_ah / ctx->mrav_num_entries_units);
6840
6841 ctx_pg = &ctx->tim_mem;
6842 ctx_pg->entries = ctx->qp_mem.entries;
6843 mem_size = ctx->tim_entry_size * ctx_pg->entries;
6844 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6845 if (rc)
6846 return rc;
6847 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6848
6849 skip_rdma:
6850 entries = ctx->qp_max_l2_entries + extra_qps;
6851 entries = roundup(entries, ctx->tqm_entries_multiple);
6852 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6853 ctx->tqm_max_entries_per_ring);
6854 for (i = 0; i < bp->max_q + 1; i++) {
6855 ctx_pg = ctx->tqm_mem[i];
6856 ctx_pg->entries = entries;
6857 mem_size = ctx->tqm_entry_size * entries;
6858 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1);
6859 if (rc)
6860 return rc;
6861 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6862 }
6863 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6864 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6865 if (rc)
6866 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6867 rc);
6868 else
6869 ctx->flags |= BNXT_CTX_FLAG_INITED;
6870
6871 return 0;
6872 }
6873
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)6874 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6875 {
6876 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6877 struct hwrm_func_resource_qcaps_input req = {0};
6878 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6879 int rc;
6880
6881 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6882 req.fid = cpu_to_le16(0xffff);
6883
6884 mutex_lock(&bp->hwrm_cmd_lock);
6885 rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6886 HWRM_CMD_TIMEOUT);
6887 if (rc)
6888 goto hwrm_func_resc_qcaps_exit;
6889
6890 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6891 if (!all)
6892 goto hwrm_func_resc_qcaps_exit;
6893
6894 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6895 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6896 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6897 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6898 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6899 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6900 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6901 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6902 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6903 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6904 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6905 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6906 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6907 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6908 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6909 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6910
6911 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6912 u16 max_msix = le16_to_cpu(resp->max_msix);
6913
6914 hw_resc->max_nqs = max_msix;
6915 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6916 }
6917
6918 if (BNXT_PF(bp)) {
6919 struct bnxt_pf_info *pf = &bp->pf;
6920
6921 pf->vf_resv_strategy =
6922 le16_to_cpu(resp->vf_reservation_strategy);
6923 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6924 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6925 }
6926 hwrm_func_resc_qcaps_exit:
6927 mutex_unlock(&bp->hwrm_cmd_lock);
6928 return rc;
6929 }
6930
__bnxt_hwrm_func_qcaps(struct bnxt * bp)6931 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6932 {
6933 int rc = 0;
6934 struct hwrm_func_qcaps_input req = {0};
6935 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6936 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6937 u32 flags;
6938
6939 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6940 req.fid = cpu_to_le16(0xffff);
6941
6942 mutex_lock(&bp->hwrm_cmd_lock);
6943 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6944 if (rc)
6945 goto hwrm_func_qcaps_exit;
6946
6947 flags = le32_to_cpu(resp->flags);
6948 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6949 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6950 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6951 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6952 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6953 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
6954 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6955 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
6956 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
6957 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
6958 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
6959 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
6960
6961 bp->tx_push_thresh = 0;
6962 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6963 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6964
6965 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6966 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6967 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6968 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6969 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6970 if (!hw_resc->max_hw_ring_grps)
6971 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6972 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6973 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6974 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6975
6976 if (BNXT_PF(bp)) {
6977 struct bnxt_pf_info *pf = &bp->pf;
6978
6979 pf->fw_fid = le16_to_cpu(resp->fid);
6980 pf->port_id = le16_to_cpu(resp->port_id);
6981 bp->dev->dev_port = pf->port_id;
6982 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
6983 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6984 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6985 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6986 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6987 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6988 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6989 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6990 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
6991 bp->flags &= ~BNXT_FLAG_WOL_CAP;
6992 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
6993 bp->flags |= BNXT_FLAG_WOL_CAP;
6994 } else {
6995 #ifdef CONFIG_BNXT_SRIOV
6996 struct bnxt_vf_info *vf = &bp->vf;
6997
6998 vf->fw_fid = le16_to_cpu(resp->fid);
6999 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7000 #endif
7001 }
7002
7003 hwrm_func_qcaps_exit:
7004 mutex_unlock(&bp->hwrm_cmd_lock);
7005 return rc;
7006 }
7007
7008 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7009
bnxt_hwrm_func_qcaps(struct bnxt * bp)7010 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7011 {
7012 int rc;
7013
7014 rc = __bnxt_hwrm_func_qcaps(bp);
7015 if (rc)
7016 return rc;
7017 rc = bnxt_hwrm_queue_qportcfg(bp);
7018 if (rc) {
7019 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7020 return rc;
7021 }
7022 if (bp->hwrm_spec_code >= 0x10803) {
7023 rc = bnxt_alloc_ctx_mem(bp);
7024 if (rc)
7025 return rc;
7026 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7027 if (!rc)
7028 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7029 }
7030 return 0;
7031 }
7032
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)7033 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7034 {
7035 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7036 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7037 int rc = 0;
7038 u32 flags;
7039
7040 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7041 return 0;
7042
7043 resp = bp->hwrm_cmd_resp_addr;
7044 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7045
7046 mutex_lock(&bp->hwrm_cmd_lock);
7047 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7048 if (rc)
7049 goto hwrm_cfa_adv_qcaps_exit;
7050
7051 flags = le32_to_cpu(resp->flags);
7052 if (flags &
7053 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
7054 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
7055
7056 hwrm_cfa_adv_qcaps_exit:
7057 mutex_unlock(&bp->hwrm_cmd_lock);
7058 return rc;
7059 }
7060
bnxt_map_fw_health_regs(struct bnxt * bp)7061 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7062 {
7063 struct bnxt_fw_health *fw_health = bp->fw_health;
7064 u32 reg_base = 0xffffffff;
7065 int i;
7066
7067 /* Only pre-map the monitoring GRC registers using window 3 */
7068 for (i = 0; i < 4; i++) {
7069 u32 reg = fw_health->regs[i];
7070
7071 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7072 continue;
7073 if (reg_base == 0xffffffff)
7074 reg_base = reg & BNXT_GRC_BASE_MASK;
7075 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7076 return -ERANGE;
7077 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7078 (reg & BNXT_GRC_OFFSET_MASK);
7079 }
7080 if (reg_base == 0xffffffff)
7081 return 0;
7082
7083 writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7084 BNXT_FW_HEALTH_WIN_MAP_OFF);
7085 return 0;
7086 }
7087
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)7088 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7089 {
7090 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7091 struct bnxt_fw_health *fw_health = bp->fw_health;
7092 struct hwrm_error_recovery_qcfg_input req = {0};
7093 int rc, i;
7094
7095 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7096 return 0;
7097
7098 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7099 mutex_lock(&bp->hwrm_cmd_lock);
7100 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7101 if (rc)
7102 goto err_recovery_out;
7103 fw_health->flags = le32_to_cpu(resp->flags);
7104 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7105 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7106 rc = -EINVAL;
7107 goto err_recovery_out;
7108 }
7109 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7110 fw_health->master_func_wait_dsecs =
7111 le32_to_cpu(resp->master_func_wait_period);
7112 fw_health->normal_func_wait_dsecs =
7113 le32_to_cpu(resp->normal_func_wait_period);
7114 fw_health->post_reset_wait_dsecs =
7115 le32_to_cpu(resp->master_func_wait_period_after_reset);
7116 fw_health->post_reset_max_wait_dsecs =
7117 le32_to_cpu(resp->max_bailout_time_after_reset);
7118 fw_health->regs[BNXT_FW_HEALTH_REG] =
7119 le32_to_cpu(resp->fw_health_status_reg);
7120 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7121 le32_to_cpu(resp->fw_heartbeat_reg);
7122 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7123 le32_to_cpu(resp->fw_reset_cnt_reg);
7124 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7125 le32_to_cpu(resp->reset_inprogress_reg);
7126 fw_health->fw_reset_inprog_reg_mask =
7127 le32_to_cpu(resp->reset_inprogress_reg_mask);
7128 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7129 if (fw_health->fw_reset_seq_cnt >= 16) {
7130 rc = -EINVAL;
7131 goto err_recovery_out;
7132 }
7133 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7134 fw_health->fw_reset_seq_regs[i] =
7135 le32_to_cpu(resp->reset_reg[i]);
7136 fw_health->fw_reset_seq_vals[i] =
7137 le32_to_cpu(resp->reset_reg_val[i]);
7138 fw_health->fw_reset_seq_delay_msec[i] =
7139 resp->delay_after_reset[i];
7140 }
7141 err_recovery_out:
7142 mutex_unlock(&bp->hwrm_cmd_lock);
7143 if (!rc)
7144 rc = bnxt_map_fw_health_regs(bp);
7145 if (rc)
7146 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7147 return rc;
7148 }
7149
bnxt_hwrm_func_reset(struct bnxt * bp)7150 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7151 {
7152 struct hwrm_func_reset_input req = {0};
7153
7154 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7155 req.enables = 0;
7156
7157 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7158 }
7159
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)7160 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7161 {
7162 int rc = 0;
7163 struct hwrm_queue_qportcfg_input req = {0};
7164 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7165 u8 i, j, *qptr;
7166 bool no_rdma;
7167
7168 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7169
7170 mutex_lock(&bp->hwrm_cmd_lock);
7171 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7172 if (rc)
7173 goto qportcfg_exit;
7174
7175 if (!resp->max_configurable_queues) {
7176 rc = -EINVAL;
7177 goto qportcfg_exit;
7178 }
7179 bp->max_tc = resp->max_configurable_queues;
7180 bp->max_lltc = resp->max_configurable_lossless_queues;
7181 if (bp->max_tc > BNXT_MAX_QUEUE)
7182 bp->max_tc = BNXT_MAX_QUEUE;
7183
7184 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7185 qptr = &resp->queue_id0;
7186 for (i = 0, j = 0; i < bp->max_tc; i++) {
7187 bp->q_info[j].queue_id = *qptr;
7188 bp->q_ids[i] = *qptr++;
7189 bp->q_info[j].queue_profile = *qptr++;
7190 bp->tc_to_qidx[j] = j;
7191 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7192 (no_rdma && BNXT_PF(bp)))
7193 j++;
7194 }
7195 bp->max_q = bp->max_tc;
7196 bp->max_tc = max_t(u8, j, 1);
7197
7198 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7199 bp->max_tc = 1;
7200
7201 if (bp->max_lltc > bp->max_tc)
7202 bp->max_lltc = bp->max_tc;
7203
7204 qportcfg_exit:
7205 mutex_unlock(&bp->hwrm_cmd_lock);
7206 return rc;
7207 }
7208
__bnxt_hwrm_ver_get(struct bnxt * bp,bool silent)7209 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7210 {
7211 struct hwrm_ver_get_input req = {0};
7212 int rc;
7213
7214 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7215 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7216 req.hwrm_intf_min = HWRM_VERSION_MINOR;
7217 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7218
7219 rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7220 silent);
7221 return rc;
7222 }
7223
bnxt_hwrm_ver_get(struct bnxt * bp)7224 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7225 {
7226 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7227 u32 dev_caps_cfg;
7228 int rc;
7229
7230 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7231 mutex_lock(&bp->hwrm_cmd_lock);
7232 rc = __bnxt_hwrm_ver_get(bp, false);
7233 if (rc)
7234 goto hwrm_ver_get_exit;
7235
7236 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7237
7238 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7239 resp->hwrm_intf_min_8b << 8 |
7240 resp->hwrm_intf_upd_8b;
7241 if (resp->hwrm_intf_maj_8b < 1) {
7242 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7243 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7244 resp->hwrm_intf_upd_8b);
7245 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7246 }
7247 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
7248 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
7249 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
7250
7251 if (strlen(resp->active_pkg_name)) {
7252 int fw_ver_len = strlen(bp->fw_ver_str);
7253
7254 snprintf(bp->fw_ver_str + fw_ver_len,
7255 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7256 resp->active_pkg_name);
7257 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7258 }
7259
7260 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7261 if (!bp->hwrm_cmd_timeout)
7262 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7263
7264 if (resp->hwrm_intf_maj_8b >= 1) {
7265 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7266 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7267 }
7268 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7269 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7270
7271 bp->chip_num = le16_to_cpu(resp->chip_num);
7272 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7273 !resp->chip_metal)
7274 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7275
7276 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7277 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7278 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7279 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7280
7281 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7282 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7283
7284 if (dev_caps_cfg &
7285 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7286 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7287
7288 if (dev_caps_cfg &
7289 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7290 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7291
7292 if (dev_caps_cfg &
7293 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7294 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7295
7296 hwrm_ver_get_exit:
7297 mutex_unlock(&bp->hwrm_cmd_lock);
7298 return rc;
7299 }
7300
bnxt_hwrm_fw_set_time(struct bnxt * bp)7301 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7302 {
7303 struct hwrm_fw_set_time_input req = {0};
7304 struct tm tm;
7305 time64_t now = ktime_get_real_seconds();
7306
7307 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7308 bp->hwrm_spec_code < 0x10400)
7309 return -EOPNOTSUPP;
7310
7311 time64_to_tm(now, 0, &tm);
7312 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7313 req.year = cpu_to_le16(1900 + tm.tm_year);
7314 req.month = 1 + tm.tm_mon;
7315 req.day = tm.tm_mday;
7316 req.hour = tm.tm_hour;
7317 req.minute = tm.tm_min;
7318 req.second = tm.tm_sec;
7319 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7320 }
7321
bnxt_hwrm_port_qstats(struct bnxt * bp)7322 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7323 {
7324 int rc;
7325 struct bnxt_pf_info *pf = &bp->pf;
7326 struct hwrm_port_qstats_input req = {0};
7327
7328 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7329 return 0;
7330
7331 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7332 req.port_id = cpu_to_le16(pf->port_id);
7333 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7334 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7335 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7336 return rc;
7337 }
7338
bnxt_hwrm_port_qstats_ext(struct bnxt * bp)7339 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7340 {
7341 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7342 struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7343 struct hwrm_port_qstats_ext_input req = {0};
7344 struct bnxt_pf_info *pf = &bp->pf;
7345 u32 tx_stat_size;
7346 int rc;
7347
7348 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7349 return 0;
7350
7351 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7352 req.port_id = cpu_to_le16(pf->port_id);
7353 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7354 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
7355 tx_stat_size = bp->hw_tx_port_stats_ext ?
7356 sizeof(*bp->hw_tx_port_stats_ext) : 0;
7357 req.tx_stat_size = cpu_to_le16(tx_stat_size);
7358 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7359 mutex_lock(&bp->hwrm_cmd_lock);
7360 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7361 if (!rc) {
7362 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7363 bp->fw_tx_stats_ext_size = tx_stat_size ?
7364 le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7365 } else {
7366 bp->fw_rx_stats_ext_size = 0;
7367 bp->fw_tx_stats_ext_size = 0;
7368 }
7369 if (bp->fw_tx_stats_ext_size <=
7370 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7371 mutex_unlock(&bp->hwrm_cmd_lock);
7372 bp->pri2cos_valid = 0;
7373 return rc;
7374 }
7375
7376 bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7377 req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7378
7379 rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7380 if (!rc) {
7381 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7382 u8 *pri2cos;
7383 int i, j;
7384
7385 resp2 = bp->hwrm_cmd_resp_addr;
7386 pri2cos = &resp2->pri0_cos_queue_id;
7387 for (i = 0; i < 8; i++) {
7388 u8 queue_id = pri2cos[i];
7389
7390 for (j = 0; j < bp->max_q; j++) {
7391 if (bp->q_ids[j] == queue_id)
7392 bp->pri2cos[i] = j;
7393 }
7394 }
7395 bp->pri2cos_valid = 1;
7396 }
7397 mutex_unlock(&bp->hwrm_cmd_lock);
7398 return rc;
7399 }
7400
bnxt_hwrm_pcie_qstats(struct bnxt * bp)7401 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7402 {
7403 struct hwrm_pcie_qstats_input req = {0};
7404
7405 if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7406 return 0;
7407
7408 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7409 req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7410 req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7411 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7412 }
7413
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)7414 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7415 {
7416 if (bp->vxlan_port_cnt) {
7417 bnxt_hwrm_tunnel_dst_port_free(
7418 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7419 }
7420 bp->vxlan_port_cnt = 0;
7421 if (bp->nge_port_cnt) {
7422 bnxt_hwrm_tunnel_dst_port_free(
7423 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7424 }
7425 bp->nge_port_cnt = 0;
7426 }
7427
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)7428 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7429 {
7430 int rc, i;
7431 u32 tpa_flags = 0;
7432
7433 if (set_tpa)
7434 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7435 else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
7436 return 0;
7437 for (i = 0; i < bp->nr_vnics; i++) {
7438 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7439 if (rc) {
7440 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7441 i, rc);
7442 return rc;
7443 }
7444 }
7445 return 0;
7446 }
7447
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)7448 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7449 {
7450 int i;
7451
7452 for (i = 0; i < bp->nr_vnics; i++)
7453 bnxt_hwrm_vnic_set_rss(bp, i, false);
7454 }
7455
bnxt_clear_vnic(struct bnxt * bp)7456 static void bnxt_clear_vnic(struct bnxt *bp)
7457 {
7458 if (!bp->vnic_info)
7459 return;
7460
7461 bnxt_hwrm_clear_vnic_filter(bp);
7462 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7463 /* clear all RSS setting before free vnic ctx */
7464 bnxt_hwrm_clear_vnic_rss(bp);
7465 bnxt_hwrm_vnic_ctx_free(bp);
7466 }
7467 /* before free the vnic, undo the vnic tpa settings */
7468 if (bp->flags & BNXT_FLAG_TPA)
7469 bnxt_set_tpa(bp, false);
7470 bnxt_hwrm_vnic_free(bp);
7471 if (bp->flags & BNXT_FLAG_CHIP_P5)
7472 bnxt_hwrm_vnic_ctx_free(bp);
7473 }
7474
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)7475 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7476 bool irq_re_init)
7477 {
7478 bnxt_clear_vnic(bp);
7479 bnxt_hwrm_ring_free(bp, close_path);
7480 bnxt_hwrm_ring_grp_free(bp);
7481 if (irq_re_init) {
7482 bnxt_hwrm_stat_ctx_free(bp);
7483 bnxt_hwrm_free_tunnel_ports(bp);
7484 }
7485 }
7486
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)7487 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7488 {
7489 struct hwrm_func_cfg_input req = {0};
7490 int rc;
7491
7492 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7493 req.fid = cpu_to_le16(0xffff);
7494 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7495 if (br_mode == BRIDGE_MODE_VEB)
7496 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7497 else if (br_mode == BRIDGE_MODE_VEPA)
7498 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7499 else
7500 return -EINVAL;
7501 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7502 return rc;
7503 }
7504
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)7505 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7506 {
7507 struct hwrm_func_cfg_input req = {0};
7508 int rc;
7509
7510 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7511 return 0;
7512
7513 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7514 req.fid = cpu_to_le16(0xffff);
7515 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
7516 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
7517 if (size == 128)
7518 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
7519
7520 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7521 return rc;
7522 }
7523
__bnxt_setup_vnic(struct bnxt * bp,u16 vnic_id)7524 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7525 {
7526 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7527 int rc;
7528
7529 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7530 goto skip_rss_ctx;
7531
7532 /* allocate context for vnic */
7533 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
7534 if (rc) {
7535 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7536 vnic_id, rc);
7537 goto vnic_setup_err;
7538 }
7539 bp->rsscos_nr_ctxs++;
7540
7541 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7542 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7543 if (rc) {
7544 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7545 vnic_id, rc);
7546 goto vnic_setup_err;
7547 }
7548 bp->rsscos_nr_ctxs++;
7549 }
7550
7551 skip_rss_ctx:
7552 /* configure default vnic, ring grp */
7553 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7554 if (rc) {
7555 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7556 vnic_id, rc);
7557 goto vnic_setup_err;
7558 }
7559
7560 /* Enable RSS hashing on vnic */
7561 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7562 if (rc) {
7563 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7564 vnic_id, rc);
7565 goto vnic_setup_err;
7566 }
7567
7568 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7569 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7570 if (rc) {
7571 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7572 vnic_id, rc);
7573 }
7574 }
7575
7576 vnic_setup_err:
7577 return rc;
7578 }
7579
__bnxt_setup_vnic_p5(struct bnxt * bp,u16 vnic_id)7580 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7581 {
7582 int rc, i, nr_ctxs;
7583
7584 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7585 for (i = 0; i < nr_ctxs; i++) {
7586 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7587 if (rc) {
7588 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7589 vnic_id, i, rc);
7590 break;
7591 }
7592 bp->rsscos_nr_ctxs++;
7593 }
7594 if (i < nr_ctxs)
7595 return -ENOMEM;
7596
7597 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7598 if (rc) {
7599 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7600 vnic_id, rc);
7601 return rc;
7602 }
7603 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7604 if (rc) {
7605 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7606 vnic_id, rc);
7607 return rc;
7608 }
7609 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7610 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7611 if (rc) {
7612 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7613 vnic_id, rc);
7614 }
7615 }
7616 return rc;
7617 }
7618
bnxt_setup_vnic(struct bnxt * bp,u16 vnic_id)7619 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7620 {
7621 if (bp->flags & BNXT_FLAG_CHIP_P5)
7622 return __bnxt_setup_vnic_p5(bp, vnic_id);
7623 else
7624 return __bnxt_setup_vnic(bp, vnic_id);
7625 }
7626
bnxt_alloc_rfs_vnics(struct bnxt * bp)7627 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7628 {
7629 #ifdef CONFIG_RFS_ACCEL
7630 int i, rc = 0;
7631
7632 if (bp->flags & BNXT_FLAG_CHIP_P5)
7633 return 0;
7634
7635 for (i = 0; i < bp->rx_nr_rings; i++) {
7636 struct bnxt_vnic_info *vnic;
7637 u16 vnic_id = i + 1;
7638 u16 ring_id = i;
7639
7640 if (vnic_id >= bp->nr_vnics)
7641 break;
7642
7643 vnic = &bp->vnic_info[vnic_id];
7644 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7645 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7646 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7647 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7648 if (rc) {
7649 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7650 vnic_id, rc);
7651 break;
7652 }
7653 rc = bnxt_setup_vnic(bp, vnic_id);
7654 if (rc)
7655 break;
7656 }
7657 return rc;
7658 #else
7659 return 0;
7660 #endif
7661 }
7662
7663 /* Allow PF and VF with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)7664 static bool bnxt_promisc_ok(struct bnxt *bp)
7665 {
7666 #ifdef CONFIG_BNXT_SRIOV
7667 if (BNXT_VF(bp) && !bp->vf.vlan)
7668 return false;
7669 #endif
7670 return true;
7671 }
7672
bnxt_setup_nitroa0_vnic(struct bnxt * bp)7673 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7674 {
7675 unsigned int rc = 0;
7676
7677 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7678 if (rc) {
7679 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7680 rc);
7681 return rc;
7682 }
7683
7684 rc = bnxt_hwrm_vnic_cfg(bp, 1);
7685 if (rc) {
7686 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7687 rc);
7688 return rc;
7689 }
7690 return rc;
7691 }
7692
7693 static int bnxt_cfg_rx_mode(struct bnxt *);
7694 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7695
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)7696 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7697 {
7698 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7699 int rc = 0;
7700 unsigned int rx_nr_rings = bp->rx_nr_rings;
7701
7702 if (irq_re_init) {
7703 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7704 if (rc) {
7705 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7706 rc);
7707 goto err_out;
7708 }
7709 }
7710
7711 rc = bnxt_hwrm_ring_alloc(bp);
7712 if (rc) {
7713 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7714 goto err_out;
7715 }
7716
7717 rc = bnxt_hwrm_ring_grp_alloc(bp);
7718 if (rc) {
7719 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7720 goto err_out;
7721 }
7722
7723 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7724 rx_nr_rings--;
7725
7726 /* default vnic 0 */
7727 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7728 if (rc) {
7729 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7730 goto err_out;
7731 }
7732
7733 rc = bnxt_setup_vnic(bp, 0);
7734 if (rc)
7735 goto err_out;
7736
7737 if (bp->flags & BNXT_FLAG_RFS) {
7738 rc = bnxt_alloc_rfs_vnics(bp);
7739 if (rc)
7740 goto err_out;
7741 }
7742
7743 if (bp->flags & BNXT_FLAG_TPA) {
7744 rc = bnxt_set_tpa(bp, true);
7745 if (rc)
7746 goto err_out;
7747 }
7748
7749 if (BNXT_VF(bp))
7750 bnxt_update_vf_mac(bp);
7751
7752 /* Filter for default vnic 0 */
7753 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7754 if (rc) {
7755 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7756 goto err_out;
7757 }
7758 vnic->uc_filter_count = 1;
7759
7760 vnic->rx_mask = 0;
7761 if (bp->dev->flags & IFF_BROADCAST)
7762 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7763
7764 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7765 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7766
7767 if (bp->dev->flags & IFF_ALLMULTI) {
7768 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7769 vnic->mc_list_count = 0;
7770 } else {
7771 u32 mask = 0;
7772
7773 bnxt_mc_list_updated(bp, &mask);
7774 vnic->rx_mask |= mask;
7775 }
7776
7777 rc = bnxt_cfg_rx_mode(bp);
7778 if (rc)
7779 goto err_out;
7780
7781 rc = bnxt_hwrm_set_coal(bp);
7782 if (rc)
7783 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7784 rc);
7785
7786 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7787 rc = bnxt_setup_nitroa0_vnic(bp);
7788 if (rc)
7789 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7790 rc);
7791 }
7792
7793 if (BNXT_VF(bp)) {
7794 bnxt_hwrm_func_qcfg(bp);
7795 netdev_update_features(bp->dev);
7796 }
7797
7798 return 0;
7799
7800 err_out:
7801 bnxt_hwrm_resource_free(bp, 0, true);
7802
7803 return rc;
7804 }
7805
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)7806 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7807 {
7808 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7809 return 0;
7810 }
7811
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)7812 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7813 {
7814 bnxt_init_cp_rings(bp);
7815 bnxt_init_rx_rings(bp);
7816 bnxt_init_tx_rings(bp);
7817 bnxt_init_ring_grps(bp, irq_re_init);
7818 bnxt_init_vnics(bp);
7819
7820 return bnxt_init_chip(bp, irq_re_init);
7821 }
7822
bnxt_set_real_num_queues(struct bnxt * bp)7823 static int bnxt_set_real_num_queues(struct bnxt *bp)
7824 {
7825 int rc;
7826 struct net_device *dev = bp->dev;
7827
7828 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7829 bp->tx_nr_rings_xdp);
7830 if (rc)
7831 return rc;
7832
7833 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7834 if (rc)
7835 return rc;
7836
7837 #ifdef CONFIG_RFS_ACCEL
7838 if (bp->flags & BNXT_FLAG_RFS)
7839 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7840 #endif
7841
7842 return rc;
7843 }
7844
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)7845 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7846 bool shared)
7847 {
7848 int _rx = *rx, _tx = *tx;
7849
7850 if (shared) {
7851 *rx = min_t(int, _rx, max);
7852 *tx = min_t(int, _tx, max);
7853 } else {
7854 if (max < 2)
7855 return -ENOMEM;
7856
7857 while (_rx + _tx > max) {
7858 if (_rx > _tx && _rx > 1)
7859 _rx--;
7860 else if (_tx > 1)
7861 _tx--;
7862 }
7863 *rx = _rx;
7864 *tx = _tx;
7865 }
7866 return 0;
7867 }
7868
bnxt_setup_msix(struct bnxt * bp)7869 static void bnxt_setup_msix(struct bnxt *bp)
7870 {
7871 const int len = sizeof(bp->irq_tbl[0].name);
7872 struct net_device *dev = bp->dev;
7873 int tcs, i;
7874
7875 tcs = netdev_get_num_tc(dev);
7876 if (tcs > 1) {
7877 int i, off, count;
7878
7879 for (i = 0; i < tcs; i++) {
7880 count = bp->tx_nr_rings_per_tc;
7881 off = i * count;
7882 netdev_set_tc_queue(dev, i, count, off);
7883 }
7884 }
7885
7886 for (i = 0; i < bp->cp_nr_rings; i++) {
7887 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7888 char *attr;
7889
7890 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7891 attr = "TxRx";
7892 else if (i < bp->rx_nr_rings)
7893 attr = "rx";
7894 else
7895 attr = "tx";
7896
7897 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7898 attr, i);
7899 bp->irq_tbl[map_idx].handler = bnxt_msix;
7900 }
7901 }
7902
bnxt_setup_inta(struct bnxt * bp)7903 static void bnxt_setup_inta(struct bnxt *bp)
7904 {
7905 const int len = sizeof(bp->irq_tbl[0].name);
7906
7907 if (netdev_get_num_tc(bp->dev))
7908 netdev_reset_tc(bp->dev);
7909
7910 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7911 0);
7912 bp->irq_tbl[0].handler = bnxt_inta;
7913 }
7914
bnxt_setup_int_mode(struct bnxt * bp)7915 static int bnxt_setup_int_mode(struct bnxt *bp)
7916 {
7917 int rc;
7918
7919 if (bp->flags & BNXT_FLAG_USING_MSIX)
7920 bnxt_setup_msix(bp);
7921 else
7922 bnxt_setup_inta(bp);
7923
7924 rc = bnxt_set_real_num_queues(bp);
7925 return rc;
7926 }
7927
7928 #ifdef CONFIG_RFS_ACCEL
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)7929 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7930 {
7931 return bp->hw_resc.max_rsscos_ctxs;
7932 }
7933
bnxt_get_max_func_vnics(struct bnxt * bp)7934 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7935 {
7936 return bp->hw_resc.max_vnics;
7937 }
7938 #endif
7939
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)7940 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7941 {
7942 return bp->hw_resc.max_stat_ctxs;
7943 }
7944
bnxt_get_max_func_cp_rings(struct bnxt * bp)7945 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7946 {
7947 return bp->hw_resc.max_cp_rings;
7948 }
7949
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)7950 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7951 {
7952 unsigned int cp = bp->hw_resc.max_cp_rings;
7953
7954 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7955 cp -= bnxt_get_ulp_msix_num(bp);
7956
7957 return cp;
7958 }
7959
bnxt_get_max_func_irqs(struct bnxt * bp)7960 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7961 {
7962 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7963
7964 if (bp->flags & BNXT_FLAG_CHIP_P5)
7965 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7966
7967 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7968 }
7969
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)7970 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7971 {
7972 bp->hw_resc.max_irqs = max_irqs;
7973 }
7974
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)7975 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7976 {
7977 unsigned int cp;
7978
7979 cp = bnxt_get_max_func_cp_rings_for_en(bp);
7980 if (bp->flags & BNXT_FLAG_CHIP_P5)
7981 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
7982 else
7983 return cp - bp->cp_nr_rings;
7984 }
7985
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)7986 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
7987 {
7988 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
7989 }
7990
bnxt_get_avail_msix(struct bnxt * bp,int num)7991 int bnxt_get_avail_msix(struct bnxt *bp, int num)
7992 {
7993 int max_cp = bnxt_get_max_func_cp_rings(bp);
7994 int max_irq = bnxt_get_max_func_irqs(bp);
7995 int total_req = bp->cp_nr_rings + num;
7996 int max_idx, avail_msix;
7997
7998 max_idx = bp->total_irqs;
7999 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8000 max_idx = min_t(int, bp->total_irqs, max_cp);
8001 avail_msix = max_idx - bp->cp_nr_rings;
8002 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8003 return avail_msix;
8004
8005 if (max_irq < total_req) {
8006 num = max_irq - bp->cp_nr_rings;
8007 if (num <= 0)
8008 return 0;
8009 }
8010 return num;
8011 }
8012
bnxt_get_num_msix(struct bnxt * bp)8013 static int bnxt_get_num_msix(struct bnxt *bp)
8014 {
8015 if (!BNXT_NEW_RM(bp))
8016 return bnxt_get_max_func_irqs(bp);
8017
8018 return bnxt_nq_rings_in_use(bp);
8019 }
8020
bnxt_init_msix(struct bnxt * bp)8021 static int bnxt_init_msix(struct bnxt *bp)
8022 {
8023 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8024 struct msix_entry *msix_ent;
8025
8026 total_vecs = bnxt_get_num_msix(bp);
8027 max = bnxt_get_max_func_irqs(bp);
8028 if (total_vecs > max)
8029 total_vecs = max;
8030
8031 if (!total_vecs)
8032 return 0;
8033
8034 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8035 if (!msix_ent)
8036 return -ENOMEM;
8037
8038 for (i = 0; i < total_vecs; i++) {
8039 msix_ent[i].entry = i;
8040 msix_ent[i].vector = 0;
8041 }
8042
8043 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8044 min = 2;
8045
8046 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8047 ulp_msix = bnxt_get_ulp_msix_num(bp);
8048 if (total_vecs < 0 || total_vecs < ulp_msix) {
8049 rc = -ENODEV;
8050 goto msix_setup_exit;
8051 }
8052
8053 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8054 if (bp->irq_tbl) {
8055 for (i = 0; i < total_vecs; i++)
8056 bp->irq_tbl[i].vector = msix_ent[i].vector;
8057
8058 bp->total_irqs = total_vecs;
8059 /* Trim rings based upon num of vectors allocated */
8060 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8061 total_vecs - ulp_msix, min == 1);
8062 if (rc)
8063 goto msix_setup_exit;
8064
8065 bp->cp_nr_rings = (min == 1) ?
8066 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8067 bp->tx_nr_rings + bp->rx_nr_rings;
8068
8069 } else {
8070 rc = -ENOMEM;
8071 goto msix_setup_exit;
8072 }
8073 bp->flags |= BNXT_FLAG_USING_MSIX;
8074 kfree(msix_ent);
8075 return 0;
8076
8077 msix_setup_exit:
8078 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8079 kfree(bp->irq_tbl);
8080 bp->irq_tbl = NULL;
8081 pci_disable_msix(bp->pdev);
8082 kfree(msix_ent);
8083 return rc;
8084 }
8085
bnxt_init_inta(struct bnxt * bp)8086 static int bnxt_init_inta(struct bnxt *bp)
8087 {
8088 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8089 if (!bp->irq_tbl)
8090 return -ENOMEM;
8091
8092 bp->total_irqs = 1;
8093 bp->rx_nr_rings = 1;
8094 bp->tx_nr_rings = 1;
8095 bp->cp_nr_rings = 1;
8096 bp->flags |= BNXT_FLAG_SHARED_RINGS;
8097 bp->irq_tbl[0].vector = bp->pdev->irq;
8098 return 0;
8099 }
8100
bnxt_init_int_mode(struct bnxt * bp)8101 static int bnxt_init_int_mode(struct bnxt *bp)
8102 {
8103 int rc = 0;
8104
8105 if (bp->flags & BNXT_FLAG_MSIX_CAP)
8106 rc = bnxt_init_msix(bp);
8107
8108 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8109 /* fallback to INTA */
8110 rc = bnxt_init_inta(bp);
8111 }
8112 return rc;
8113 }
8114
bnxt_clear_int_mode(struct bnxt * bp)8115 static void bnxt_clear_int_mode(struct bnxt *bp)
8116 {
8117 if (bp->flags & BNXT_FLAG_USING_MSIX)
8118 pci_disable_msix(bp->pdev);
8119
8120 kfree(bp->irq_tbl);
8121 bp->irq_tbl = NULL;
8122 bp->flags &= ~BNXT_FLAG_USING_MSIX;
8123 }
8124
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)8125 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8126 {
8127 int tcs = netdev_get_num_tc(bp->dev);
8128 bool irq_cleared = false;
8129 int rc;
8130
8131 if (!bnxt_need_reserve_rings(bp))
8132 return 0;
8133
8134 if (irq_re_init && BNXT_NEW_RM(bp) &&
8135 bnxt_get_num_msix(bp) != bp->total_irqs) {
8136 bnxt_ulp_irq_stop(bp);
8137 bnxt_clear_int_mode(bp);
8138 irq_cleared = true;
8139 }
8140 rc = __bnxt_reserve_rings(bp);
8141 if (irq_cleared) {
8142 if (!rc)
8143 rc = bnxt_init_int_mode(bp);
8144 bnxt_ulp_irq_restart(bp, rc);
8145 }
8146 if (rc) {
8147 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8148 return rc;
8149 }
8150 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8151 netdev_err(bp->dev, "tx ring reservation failure\n");
8152 netdev_reset_tc(bp->dev);
8153 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8154 return -ENOMEM;
8155 }
8156 return 0;
8157 }
8158
bnxt_free_irq(struct bnxt * bp)8159 static void bnxt_free_irq(struct bnxt *bp)
8160 {
8161 struct bnxt_irq *irq;
8162 int i;
8163
8164 #ifdef CONFIG_RFS_ACCEL
8165 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8166 bp->dev->rx_cpu_rmap = NULL;
8167 #endif
8168 if (!bp->irq_tbl || !bp->bnapi)
8169 return;
8170
8171 for (i = 0; i < bp->cp_nr_rings; i++) {
8172 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8173
8174 irq = &bp->irq_tbl[map_idx];
8175 if (irq->requested) {
8176 if (irq->have_cpumask) {
8177 irq_set_affinity_hint(irq->vector, NULL);
8178 free_cpumask_var(irq->cpu_mask);
8179 irq->have_cpumask = 0;
8180 }
8181 free_irq(irq->vector, bp->bnapi[i]);
8182 }
8183
8184 irq->requested = 0;
8185 }
8186 }
8187
bnxt_request_irq(struct bnxt * bp)8188 static int bnxt_request_irq(struct bnxt *bp)
8189 {
8190 int i, j, rc = 0;
8191 unsigned long flags = 0;
8192 #ifdef CONFIG_RFS_ACCEL
8193 struct cpu_rmap *rmap;
8194 #endif
8195
8196 rc = bnxt_setup_int_mode(bp);
8197 if (rc) {
8198 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8199 rc);
8200 return rc;
8201 }
8202 #ifdef CONFIG_RFS_ACCEL
8203 rmap = bp->dev->rx_cpu_rmap;
8204 #endif
8205 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8206 flags = IRQF_SHARED;
8207
8208 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8209 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8210 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8211
8212 #ifdef CONFIG_RFS_ACCEL
8213 if (rmap && bp->bnapi[i]->rx_ring) {
8214 rc = irq_cpu_rmap_add(rmap, irq->vector);
8215 if (rc)
8216 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8217 j);
8218 j++;
8219 }
8220 #endif
8221 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8222 bp->bnapi[i]);
8223 if (rc)
8224 break;
8225
8226 irq->requested = 1;
8227
8228 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8229 int numa_node = dev_to_node(&bp->pdev->dev);
8230
8231 irq->have_cpumask = 1;
8232 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8233 irq->cpu_mask);
8234 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8235 if (rc) {
8236 netdev_warn(bp->dev,
8237 "Set affinity failed, IRQ = %d\n",
8238 irq->vector);
8239 break;
8240 }
8241 }
8242 }
8243 return rc;
8244 }
8245
bnxt_del_napi(struct bnxt * bp)8246 static void bnxt_del_napi(struct bnxt *bp)
8247 {
8248 int i;
8249
8250 if (!bp->bnapi)
8251 return;
8252
8253 for (i = 0; i < bp->cp_nr_rings; i++) {
8254 struct bnxt_napi *bnapi = bp->bnapi[i];
8255
8256 napi_hash_del(&bnapi->napi);
8257 netif_napi_del(&bnapi->napi);
8258 }
8259 /* We called napi_hash_del() before netif_napi_del(), we need
8260 * to respect an RCU grace period before freeing napi structures.
8261 */
8262 synchronize_net();
8263 }
8264
bnxt_init_napi(struct bnxt * bp)8265 static void bnxt_init_napi(struct bnxt *bp)
8266 {
8267 int i;
8268 unsigned int cp_nr_rings = bp->cp_nr_rings;
8269 struct bnxt_napi *bnapi;
8270
8271 if (bp->flags & BNXT_FLAG_USING_MSIX) {
8272 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8273
8274 if (bp->flags & BNXT_FLAG_CHIP_P5)
8275 poll_fn = bnxt_poll_p5;
8276 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8277 cp_nr_rings--;
8278 for (i = 0; i < cp_nr_rings; i++) {
8279 bnapi = bp->bnapi[i];
8280 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8281 }
8282 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8283 bnapi = bp->bnapi[cp_nr_rings];
8284 netif_napi_add(bp->dev, &bnapi->napi,
8285 bnxt_poll_nitroa0, 64);
8286 }
8287 } else {
8288 bnapi = bp->bnapi[0];
8289 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8290 }
8291 }
8292
bnxt_disable_napi(struct bnxt * bp)8293 static void bnxt_disable_napi(struct bnxt *bp)
8294 {
8295 int i;
8296
8297 if (!bp->bnapi)
8298 return;
8299
8300 for (i = 0; i < bp->cp_nr_rings; i++) {
8301 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8302
8303 if (bp->bnapi[i]->rx_ring)
8304 cancel_work_sync(&cpr->dim.work);
8305
8306 napi_disable(&bp->bnapi[i]->napi);
8307 }
8308 }
8309
bnxt_enable_napi(struct bnxt * bp)8310 static void bnxt_enable_napi(struct bnxt *bp)
8311 {
8312 int i;
8313
8314 for (i = 0; i < bp->cp_nr_rings; i++) {
8315 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8316 bp->bnapi[i]->in_reset = false;
8317
8318 if (bp->bnapi[i]->rx_ring) {
8319 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8320 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8321 }
8322 napi_enable(&bp->bnapi[i]->napi);
8323 }
8324 }
8325
bnxt_tx_disable(struct bnxt * bp)8326 void bnxt_tx_disable(struct bnxt *bp)
8327 {
8328 int i;
8329 struct bnxt_tx_ring_info *txr;
8330
8331 if (bp->tx_ring) {
8332 for (i = 0; i < bp->tx_nr_rings; i++) {
8333 txr = &bp->tx_ring[i];
8334 txr->dev_state = BNXT_DEV_STATE_CLOSING;
8335 }
8336 }
8337 /* Stop all TX queues */
8338 netif_tx_disable(bp->dev);
8339 netif_carrier_off(bp->dev);
8340 }
8341
bnxt_tx_enable(struct bnxt * bp)8342 void bnxt_tx_enable(struct bnxt *bp)
8343 {
8344 int i;
8345 struct bnxt_tx_ring_info *txr;
8346
8347 for (i = 0; i < bp->tx_nr_rings; i++) {
8348 txr = &bp->tx_ring[i];
8349 txr->dev_state = 0;
8350 }
8351 netif_tx_wake_all_queues(bp->dev);
8352 if (bp->link_info.link_up)
8353 netif_carrier_on(bp->dev);
8354 }
8355
bnxt_report_link(struct bnxt * bp)8356 static void bnxt_report_link(struct bnxt *bp)
8357 {
8358 if (bp->link_info.link_up) {
8359 const char *duplex;
8360 const char *flow_ctrl;
8361 u32 speed;
8362 u16 fec;
8363
8364 netif_carrier_on(bp->dev);
8365 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8366 duplex = "full";
8367 else
8368 duplex = "half";
8369 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8370 flow_ctrl = "ON - receive & transmit";
8371 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8372 flow_ctrl = "ON - transmit";
8373 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8374 flow_ctrl = "ON - receive";
8375 else
8376 flow_ctrl = "none";
8377 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8378 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8379 speed, duplex, flow_ctrl);
8380 if (bp->flags & BNXT_FLAG_EEE_CAP)
8381 netdev_info(bp->dev, "EEE is %s\n",
8382 bp->eee.eee_active ? "active" :
8383 "not active");
8384 fec = bp->link_info.fec_cfg;
8385 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8386 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8387 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8388 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8389 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
8390 } else {
8391 netif_carrier_off(bp->dev);
8392 netdev_err(bp->dev, "NIC Link is Down\n");
8393 }
8394 }
8395
bnxt_hwrm_phy_qcaps(struct bnxt * bp)8396 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8397 {
8398 int rc = 0;
8399 struct hwrm_port_phy_qcaps_input req = {0};
8400 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8401 struct bnxt_link_info *link_info = &bp->link_info;
8402
8403 bp->flags &= ~BNXT_FLAG_EEE_CAP;
8404 if (bp->test_info)
8405 bp->test_info->flags &= ~BNXT_TEST_FL_EXT_LPBK;
8406 if (bp->hwrm_spec_code < 0x10201)
8407 return 0;
8408
8409 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8410
8411 mutex_lock(&bp->hwrm_cmd_lock);
8412 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8413 if (rc)
8414 goto hwrm_phy_qcaps_exit;
8415
8416 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8417 struct ethtool_eee *eee = &bp->eee;
8418 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8419
8420 bp->flags |= BNXT_FLAG_EEE_CAP;
8421 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8422 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8423 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8424 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8425 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8426 }
8427 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8428 if (bp->test_info)
8429 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8430 }
8431 if (resp->supported_speeds_auto_mode)
8432 link_info->support_auto_speeds =
8433 le16_to_cpu(resp->supported_speeds_auto_mode);
8434
8435 bp->port_count = resp->port_cnt;
8436
8437 hwrm_phy_qcaps_exit:
8438 mutex_unlock(&bp->hwrm_cmd_lock);
8439 return rc;
8440 }
8441
bnxt_update_link(struct bnxt * bp,bool chng_link_state)8442 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8443 {
8444 int rc = 0;
8445 struct bnxt_link_info *link_info = &bp->link_info;
8446 struct hwrm_port_phy_qcfg_input req = {0};
8447 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8448 u8 link_up = link_info->link_up;
8449 u16 diff;
8450
8451 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8452
8453 mutex_lock(&bp->hwrm_cmd_lock);
8454 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8455 if (rc) {
8456 mutex_unlock(&bp->hwrm_cmd_lock);
8457 return rc;
8458 }
8459
8460 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8461 link_info->phy_link_status = resp->link;
8462 link_info->duplex = resp->duplex_cfg;
8463 if (bp->hwrm_spec_code >= 0x10800)
8464 link_info->duplex = resp->duplex_state;
8465 link_info->pause = resp->pause;
8466 link_info->auto_mode = resp->auto_mode;
8467 link_info->auto_pause_setting = resp->auto_pause;
8468 link_info->lp_pause = resp->link_partner_adv_pause;
8469 link_info->force_pause_setting = resp->force_pause;
8470 link_info->duplex_setting = resp->duplex_cfg;
8471 if (link_info->phy_link_status == BNXT_LINK_LINK)
8472 link_info->link_speed = le16_to_cpu(resp->link_speed);
8473 else
8474 link_info->link_speed = 0;
8475 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
8476 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8477 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
8478 link_info->lp_auto_link_speeds =
8479 le16_to_cpu(resp->link_partner_adv_speeds);
8480 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8481 link_info->phy_ver[0] = resp->phy_maj;
8482 link_info->phy_ver[1] = resp->phy_min;
8483 link_info->phy_ver[2] = resp->phy_bld;
8484 link_info->media_type = resp->media_type;
8485 link_info->phy_type = resp->phy_type;
8486 link_info->transceiver = resp->xcvr_pkg_type;
8487 link_info->phy_addr = resp->eee_config_phy_addr &
8488 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
8489 link_info->module_status = resp->module_status;
8490
8491 if (bp->flags & BNXT_FLAG_EEE_CAP) {
8492 struct ethtool_eee *eee = &bp->eee;
8493 u16 fw_speeds;
8494
8495 eee->eee_active = 0;
8496 if (resp->eee_config_phy_addr &
8497 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8498 eee->eee_active = 1;
8499 fw_speeds = le16_to_cpu(
8500 resp->link_partner_adv_eee_link_speed_mask);
8501 eee->lp_advertised =
8502 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8503 }
8504
8505 /* Pull initial EEE config */
8506 if (!chng_link_state) {
8507 if (resp->eee_config_phy_addr &
8508 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8509 eee->eee_enabled = 1;
8510
8511 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8512 eee->advertised =
8513 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8514
8515 if (resp->eee_config_phy_addr &
8516 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8517 __le32 tmr;
8518
8519 eee->tx_lpi_enabled = 1;
8520 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8521 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8522 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8523 }
8524 }
8525 }
8526
8527 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8528 if (bp->hwrm_spec_code >= 0x10504)
8529 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8530
8531 /* TODO: need to add more logic to report VF link */
8532 if (chng_link_state) {
8533 if (link_info->phy_link_status == BNXT_LINK_LINK)
8534 link_info->link_up = 1;
8535 else
8536 link_info->link_up = 0;
8537 if (link_up != link_info->link_up)
8538 bnxt_report_link(bp);
8539 } else {
8540 /* alwasy link down if not require to update link state */
8541 link_info->link_up = 0;
8542 }
8543 mutex_unlock(&bp->hwrm_cmd_lock);
8544
8545 if (!BNXT_SINGLE_PF(bp))
8546 return 0;
8547
8548 diff = link_info->support_auto_speeds ^ link_info->advertising;
8549 if ((link_info->support_auto_speeds | diff) !=
8550 link_info->support_auto_speeds) {
8551 /* An advertised speed is no longer supported, so we need to
8552 * update the advertisement settings. Caller holds RTNL
8553 * so we can modify link settings.
8554 */
8555 link_info->advertising = link_info->support_auto_speeds;
8556 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
8557 bnxt_hwrm_set_link_setting(bp, true, false);
8558 }
8559 return 0;
8560 }
8561
bnxt_get_port_module_status(struct bnxt * bp)8562 static void bnxt_get_port_module_status(struct bnxt *bp)
8563 {
8564 struct bnxt_link_info *link_info = &bp->link_info;
8565 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8566 u8 module_status;
8567
8568 if (bnxt_update_link(bp, true))
8569 return;
8570
8571 module_status = link_info->module_status;
8572 switch (module_status) {
8573 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8574 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8575 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8576 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8577 bp->pf.port_id);
8578 if (bp->hwrm_spec_code >= 0x10201) {
8579 netdev_warn(bp->dev, "Module part number %s\n",
8580 resp->phy_vendor_partnumber);
8581 }
8582 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8583 netdev_warn(bp->dev, "TX is disabled\n");
8584 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8585 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8586 }
8587 }
8588
8589 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)8590 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8591 {
8592 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
8593 if (bp->hwrm_spec_code >= 0x10201)
8594 req->auto_pause =
8595 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
8596 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8597 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8598 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8599 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
8600 req->enables |=
8601 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8602 } else {
8603 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8604 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8605 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8606 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8607 req->enables |=
8608 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
8609 if (bp->hwrm_spec_code >= 0x10201) {
8610 req->auto_pause = req->force_pause;
8611 req->enables |= cpu_to_le32(
8612 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8613 }
8614 }
8615 }
8616
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)8617 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8618 struct hwrm_port_phy_cfg_input *req)
8619 {
8620 u8 autoneg = bp->link_info.autoneg;
8621 u16 fw_link_speed = bp->link_info.req_link_speed;
8622 u16 advertising = bp->link_info.advertising;
8623
8624 if (autoneg & BNXT_AUTONEG_SPEED) {
8625 req->auto_mode |=
8626 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
8627
8628 req->enables |= cpu_to_le32(
8629 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8630 req->auto_link_speed_mask = cpu_to_le16(advertising);
8631
8632 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8633 req->flags |=
8634 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8635 } else {
8636 req->force_link_speed = cpu_to_le16(fw_link_speed);
8637 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8638 }
8639
8640 /* tell chimp that the setting takes effect immediately */
8641 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8642 }
8643
bnxt_hwrm_set_pause(struct bnxt * bp)8644 int bnxt_hwrm_set_pause(struct bnxt *bp)
8645 {
8646 struct hwrm_port_phy_cfg_input req = {0};
8647 int rc;
8648
8649 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8650 bnxt_hwrm_set_pause_common(bp, &req);
8651
8652 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8653 bp->link_info.force_link_chng)
8654 bnxt_hwrm_set_link_common(bp, &req);
8655
8656 mutex_lock(&bp->hwrm_cmd_lock);
8657 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8658 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8659 /* since changing of pause setting doesn't trigger any link
8660 * change event, the driver needs to update the current pause
8661 * result upon successfully return of the phy_cfg command
8662 */
8663 bp->link_info.pause =
8664 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8665 bp->link_info.auto_pause_setting = 0;
8666 if (!bp->link_info.force_link_chng)
8667 bnxt_report_link(bp);
8668 }
8669 bp->link_info.force_link_chng = false;
8670 mutex_unlock(&bp->hwrm_cmd_lock);
8671 return rc;
8672 }
8673
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)8674 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8675 struct hwrm_port_phy_cfg_input *req)
8676 {
8677 struct ethtool_eee *eee = &bp->eee;
8678
8679 if (eee->eee_enabled) {
8680 u16 eee_speeds;
8681 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8682
8683 if (eee->tx_lpi_enabled)
8684 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8685 else
8686 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8687
8688 req->flags |= cpu_to_le32(flags);
8689 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8690 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8691 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8692 } else {
8693 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8694 }
8695 }
8696
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)8697 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8698 {
8699 struct hwrm_port_phy_cfg_input req = {0};
8700
8701 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8702 if (set_pause)
8703 bnxt_hwrm_set_pause_common(bp, &req);
8704
8705 bnxt_hwrm_set_link_common(bp, &req);
8706
8707 if (set_eee)
8708 bnxt_hwrm_set_eee(bp, &req);
8709 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8710 }
8711
bnxt_hwrm_shutdown_link(struct bnxt * bp)8712 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8713 {
8714 struct hwrm_port_phy_cfg_input req = {0};
8715
8716 if (!BNXT_SINGLE_PF(bp))
8717 return 0;
8718
8719 if (pci_num_vf(bp->pdev))
8720 return 0;
8721
8722 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8723 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8724 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8725 }
8726
8727 static int bnxt_fw_init_one(struct bnxt *bp);
8728
bnxt_hwrm_if_change(struct bnxt * bp,bool up)8729 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8730 {
8731 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8732 struct hwrm_func_drv_if_change_input req = {0};
8733 bool resc_reinit = false, fw_reset = false;
8734 u32 flags = 0;
8735 int rc;
8736
8737 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8738 return 0;
8739
8740 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8741 if (up)
8742 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8743 mutex_lock(&bp->hwrm_cmd_lock);
8744 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8745 if (!rc)
8746 flags = le32_to_cpu(resp->flags);
8747 mutex_unlock(&bp->hwrm_cmd_lock);
8748 if (rc)
8749 return rc;
8750
8751 if (!up)
8752 return 0;
8753
8754 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
8755 resc_reinit = true;
8756 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
8757 fw_reset = true;
8758
8759 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
8760 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
8761 return -ENODEV;
8762 }
8763 if (resc_reinit || fw_reset) {
8764 if (fw_reset) {
8765 bnxt_free_ctx_mem(bp);
8766 kfree(bp->ctx);
8767 bp->ctx = NULL;
8768 rc = bnxt_fw_init_one(bp);
8769 if (rc) {
8770 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
8771 return rc;
8772 }
8773 bnxt_clear_int_mode(bp);
8774 rc = bnxt_init_int_mode(bp);
8775 if (rc) {
8776 netdev_err(bp->dev, "init int mode failed\n");
8777 return rc;
8778 }
8779 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
8780 }
8781 if (BNXT_NEW_RM(bp)) {
8782 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8783
8784 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8785 hw_resc->resv_cp_rings = 0;
8786 hw_resc->resv_stat_ctxs = 0;
8787 hw_resc->resv_irqs = 0;
8788 hw_resc->resv_tx_rings = 0;
8789 hw_resc->resv_rx_rings = 0;
8790 hw_resc->resv_hw_ring_grps = 0;
8791 hw_resc->resv_vnics = 0;
8792 if (!fw_reset) {
8793 bp->tx_nr_rings = 0;
8794 bp->rx_nr_rings = 0;
8795 }
8796 }
8797 }
8798 return 0;
8799 }
8800
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)8801 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8802 {
8803 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8804 struct hwrm_port_led_qcaps_input req = {0};
8805 struct bnxt_pf_info *pf = &bp->pf;
8806 int rc;
8807
8808 bp->num_leds = 0;
8809 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8810 return 0;
8811
8812 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8813 req.port_id = cpu_to_le16(pf->port_id);
8814 mutex_lock(&bp->hwrm_cmd_lock);
8815 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8816 if (rc) {
8817 mutex_unlock(&bp->hwrm_cmd_lock);
8818 return rc;
8819 }
8820 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8821 int i;
8822
8823 bp->num_leds = resp->num_leds;
8824 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8825 bp->num_leds);
8826 for (i = 0; i < bp->num_leds; i++) {
8827 struct bnxt_led_info *led = &bp->leds[i];
8828 __le16 caps = led->led_state_caps;
8829
8830 if (!led->led_group_id ||
8831 !BNXT_LED_ALT_BLINK_CAP(caps)) {
8832 bp->num_leds = 0;
8833 break;
8834 }
8835 }
8836 }
8837 mutex_unlock(&bp->hwrm_cmd_lock);
8838 return 0;
8839 }
8840
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)8841 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8842 {
8843 struct hwrm_wol_filter_alloc_input req = {0};
8844 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8845 int rc;
8846
8847 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8848 req.port_id = cpu_to_le16(bp->pf.port_id);
8849 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8850 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8851 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8852 mutex_lock(&bp->hwrm_cmd_lock);
8853 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8854 if (!rc)
8855 bp->wol_filter_id = resp->wol_filter_id;
8856 mutex_unlock(&bp->hwrm_cmd_lock);
8857 return rc;
8858 }
8859
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)8860 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8861 {
8862 struct hwrm_wol_filter_free_input req = {0};
8863 int rc;
8864
8865 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8866 req.port_id = cpu_to_le16(bp->pf.port_id);
8867 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8868 req.wol_filter_id = bp->wol_filter_id;
8869 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8870 return rc;
8871 }
8872
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)8873 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8874 {
8875 struct hwrm_wol_filter_qcfg_input req = {0};
8876 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8877 u16 next_handle = 0;
8878 int rc;
8879
8880 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8881 req.port_id = cpu_to_le16(bp->pf.port_id);
8882 req.handle = cpu_to_le16(handle);
8883 mutex_lock(&bp->hwrm_cmd_lock);
8884 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8885 if (!rc) {
8886 next_handle = le16_to_cpu(resp->next_handle);
8887 if (next_handle != 0) {
8888 if (resp->wol_type ==
8889 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8890 bp->wol = 1;
8891 bp->wol_filter_id = resp->wol_filter_id;
8892 }
8893 }
8894 }
8895 mutex_unlock(&bp->hwrm_cmd_lock);
8896 return next_handle;
8897 }
8898
bnxt_get_wol_settings(struct bnxt * bp)8899 static void bnxt_get_wol_settings(struct bnxt *bp)
8900 {
8901 u16 handle = 0;
8902
8903 bp->wol = 0;
8904 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8905 return;
8906
8907 do {
8908 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8909 } while (handle && handle != 0xffff);
8910 }
8911
8912 #ifdef CONFIG_BNXT_HWMON
bnxt_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)8913 static ssize_t bnxt_show_temp(struct device *dev,
8914 struct device_attribute *devattr, char *buf)
8915 {
8916 struct hwrm_temp_monitor_query_input req = {0};
8917 struct hwrm_temp_monitor_query_output *resp;
8918 struct bnxt *bp = dev_get_drvdata(dev);
8919 u32 temp = 0;
8920
8921 resp = bp->hwrm_cmd_resp_addr;
8922 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8923 mutex_lock(&bp->hwrm_cmd_lock);
8924 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8925 temp = resp->temp * 1000; /* display millidegree */
8926 mutex_unlock(&bp->hwrm_cmd_lock);
8927
8928 return sprintf(buf, "%u\n", temp);
8929 }
8930 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8931
8932 static struct attribute *bnxt_attrs[] = {
8933 &sensor_dev_attr_temp1_input.dev_attr.attr,
8934 NULL
8935 };
8936 ATTRIBUTE_GROUPS(bnxt);
8937
bnxt_hwmon_close(struct bnxt * bp)8938 static void bnxt_hwmon_close(struct bnxt *bp)
8939 {
8940 if (bp->hwmon_dev) {
8941 hwmon_device_unregister(bp->hwmon_dev);
8942 bp->hwmon_dev = NULL;
8943 }
8944 }
8945
bnxt_hwmon_open(struct bnxt * bp)8946 static void bnxt_hwmon_open(struct bnxt *bp)
8947 {
8948 struct pci_dev *pdev = bp->pdev;
8949
8950 if (bp->hwmon_dev)
8951 return;
8952
8953 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8954 DRV_MODULE_NAME, bp,
8955 bnxt_groups);
8956 if (IS_ERR(bp->hwmon_dev)) {
8957 bp->hwmon_dev = NULL;
8958 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8959 }
8960 }
8961 #else
bnxt_hwmon_close(struct bnxt * bp)8962 static void bnxt_hwmon_close(struct bnxt *bp)
8963 {
8964 }
8965
bnxt_hwmon_open(struct bnxt * bp)8966 static void bnxt_hwmon_open(struct bnxt *bp)
8967 {
8968 }
8969 #endif
8970
bnxt_eee_config_ok(struct bnxt * bp)8971 static bool bnxt_eee_config_ok(struct bnxt *bp)
8972 {
8973 struct ethtool_eee *eee = &bp->eee;
8974 struct bnxt_link_info *link_info = &bp->link_info;
8975
8976 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
8977 return true;
8978
8979 if (eee->eee_enabled) {
8980 u32 advertising =
8981 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
8982
8983 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8984 eee->eee_enabled = 0;
8985 return false;
8986 }
8987 if (eee->advertised & ~advertising) {
8988 eee->advertised = advertising & eee->supported;
8989 return false;
8990 }
8991 }
8992 return true;
8993 }
8994
bnxt_update_phy_setting(struct bnxt * bp)8995 static int bnxt_update_phy_setting(struct bnxt *bp)
8996 {
8997 int rc;
8998 bool update_link = false;
8999 bool update_pause = false;
9000 bool update_eee = false;
9001 struct bnxt_link_info *link_info = &bp->link_info;
9002
9003 rc = bnxt_update_link(bp, true);
9004 if (rc) {
9005 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9006 rc);
9007 return rc;
9008 }
9009 if (!BNXT_SINGLE_PF(bp))
9010 return 0;
9011
9012 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9013 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9014 link_info->req_flow_ctrl)
9015 update_pause = true;
9016 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9017 link_info->force_pause_setting != link_info->req_flow_ctrl)
9018 update_pause = true;
9019 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9020 if (BNXT_AUTO_MODE(link_info->auto_mode))
9021 update_link = true;
9022 if (link_info->req_link_speed != link_info->force_link_speed)
9023 update_link = true;
9024 if (link_info->req_duplex != link_info->duplex_setting)
9025 update_link = true;
9026 } else {
9027 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9028 update_link = true;
9029 if (link_info->advertising != link_info->auto_link_speeds)
9030 update_link = true;
9031 }
9032
9033 /* The last close may have shutdown the link, so need to call
9034 * PHY_CFG to bring it back up.
9035 */
9036 if (!netif_carrier_ok(bp->dev))
9037 update_link = true;
9038
9039 if (!bnxt_eee_config_ok(bp))
9040 update_eee = true;
9041
9042 if (update_link)
9043 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9044 else if (update_pause)
9045 rc = bnxt_hwrm_set_pause(bp);
9046 if (rc) {
9047 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9048 rc);
9049 return rc;
9050 }
9051
9052 return rc;
9053 }
9054
9055 /* Common routine to pre-map certain register block to different GRC window.
9056 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9057 * in PF and 3 windows in VF that can be customized to map in different
9058 * register blocks.
9059 */
bnxt_preset_reg_win(struct bnxt * bp)9060 static void bnxt_preset_reg_win(struct bnxt *bp)
9061 {
9062 if (BNXT_PF(bp)) {
9063 /* CAG registers map to GRC window #4 */
9064 writel(BNXT_CAG_REG_BASE,
9065 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9066 }
9067 }
9068
9069 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9070
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)9071 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9072 {
9073 int rc = 0;
9074
9075 bnxt_preset_reg_win(bp);
9076 netif_carrier_off(bp->dev);
9077 if (irq_re_init) {
9078 /* Reserve rings now if none were reserved at driver probe. */
9079 rc = bnxt_init_dflt_ring_mode(bp);
9080 if (rc) {
9081 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9082 return rc;
9083 }
9084 }
9085 rc = bnxt_reserve_rings(bp, irq_re_init);
9086 if (rc)
9087 return rc;
9088 if ((bp->flags & BNXT_FLAG_RFS) &&
9089 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9090 /* disable RFS if falling back to INTA */
9091 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9092 bp->flags &= ~BNXT_FLAG_RFS;
9093 }
9094
9095 rc = bnxt_alloc_mem(bp, irq_re_init);
9096 if (rc) {
9097 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9098 goto open_err_free_mem;
9099 }
9100
9101 if (irq_re_init) {
9102 bnxt_init_napi(bp);
9103 rc = bnxt_request_irq(bp);
9104 if (rc) {
9105 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9106 goto open_err_irq;
9107 }
9108 }
9109
9110 bnxt_enable_napi(bp);
9111 bnxt_debug_dev_init(bp);
9112
9113 rc = bnxt_init_nic(bp, irq_re_init);
9114 if (rc) {
9115 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9116 goto open_err;
9117 }
9118
9119 if (link_re_init) {
9120 mutex_lock(&bp->link_lock);
9121 rc = bnxt_update_phy_setting(bp);
9122 mutex_unlock(&bp->link_lock);
9123 if (rc) {
9124 netdev_warn(bp->dev, "failed to update phy settings\n");
9125 if (BNXT_SINGLE_PF(bp)) {
9126 bp->link_info.phy_retry = true;
9127 bp->link_info.phy_retry_expires =
9128 jiffies + 5 * HZ;
9129 }
9130 }
9131 }
9132
9133 if (irq_re_init)
9134 udp_tunnel_get_rx_info(bp->dev);
9135
9136 set_bit(BNXT_STATE_OPEN, &bp->state);
9137 bnxt_enable_int(bp);
9138 /* Enable TX queues */
9139 bnxt_tx_enable(bp);
9140 mod_timer(&bp->timer, jiffies + bp->current_interval);
9141 /* Poll link status and check for SFP+ module status */
9142 bnxt_get_port_module_status(bp);
9143
9144 /* VF-reps may need to be re-opened after the PF is re-opened */
9145 if (BNXT_PF(bp))
9146 bnxt_vf_reps_open(bp);
9147 return 0;
9148
9149 open_err:
9150 bnxt_debug_dev_exit(bp);
9151 bnxt_disable_napi(bp);
9152
9153 open_err_irq:
9154 bnxt_del_napi(bp);
9155
9156 open_err_free_mem:
9157 bnxt_free_skbs(bp);
9158 bnxt_free_irq(bp);
9159 bnxt_free_mem(bp, true);
9160 return rc;
9161 }
9162
9163 /* rtnl_lock held */
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)9164 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9165 {
9166 int rc = 0;
9167
9168 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9169 if (rc) {
9170 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9171 dev_close(bp->dev);
9172 }
9173 return rc;
9174 }
9175
9176 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9177 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
9178 * self tests.
9179 */
bnxt_half_open_nic(struct bnxt * bp)9180 int bnxt_half_open_nic(struct bnxt *bp)
9181 {
9182 int rc = 0;
9183
9184 rc = bnxt_alloc_mem(bp, false);
9185 if (rc) {
9186 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9187 goto half_open_err;
9188 }
9189 rc = bnxt_init_nic(bp, false);
9190 if (rc) {
9191 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9192 goto half_open_err;
9193 }
9194 return 0;
9195
9196 half_open_err:
9197 bnxt_free_skbs(bp);
9198 bnxt_free_mem(bp, false);
9199 dev_close(bp->dev);
9200 return rc;
9201 }
9202
9203 /* rtnl_lock held, this call can only be made after a previous successful
9204 * call to bnxt_half_open_nic().
9205 */
bnxt_half_close_nic(struct bnxt * bp)9206 void bnxt_half_close_nic(struct bnxt *bp)
9207 {
9208 bnxt_hwrm_resource_free(bp, false, false);
9209 bnxt_free_skbs(bp);
9210 bnxt_free_mem(bp, false);
9211 }
9212
bnxt_open(struct net_device * dev)9213 static int bnxt_open(struct net_device *dev)
9214 {
9215 struct bnxt *bp = netdev_priv(dev);
9216 int rc;
9217
9218 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9219 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9220 return -ENODEV;
9221 }
9222
9223 rc = bnxt_hwrm_if_change(bp, true);
9224 if (rc)
9225 return rc;
9226 rc = __bnxt_open_nic(bp, true, true);
9227 if (rc) {
9228 bnxt_hwrm_if_change(bp, false);
9229 } else {
9230 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state) &&
9231 BNXT_PF(bp)) {
9232 struct bnxt_pf_info *pf = &bp->pf;
9233 int n = pf->active_vfs;
9234
9235 if (n)
9236 bnxt_cfg_hw_sriov(bp, &n, true);
9237 }
9238 bnxt_hwmon_open(bp);
9239 }
9240
9241 return rc;
9242 }
9243
bnxt_drv_busy(struct bnxt * bp)9244 static bool bnxt_drv_busy(struct bnxt *bp)
9245 {
9246 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9247 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9248 }
9249
9250 static void bnxt_get_ring_stats(struct bnxt *bp,
9251 struct rtnl_link_stats64 *stats);
9252
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)9253 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9254 bool link_re_init)
9255 {
9256 /* Close the VF-reps before closing PF */
9257 if (BNXT_PF(bp))
9258 bnxt_vf_reps_close(bp);
9259
9260 /* Change device state to avoid TX queue wake up's */
9261 bnxt_tx_disable(bp);
9262
9263 clear_bit(BNXT_STATE_OPEN, &bp->state);
9264 smp_mb__after_atomic();
9265 while (bnxt_drv_busy(bp))
9266 msleep(20);
9267
9268 /* Flush rings and and disable interrupts */
9269 bnxt_shutdown_nic(bp, irq_re_init);
9270
9271 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9272
9273 bnxt_debug_dev_exit(bp);
9274 bnxt_disable_napi(bp);
9275 del_timer_sync(&bp->timer);
9276 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
9277 pci_is_enabled(bp->pdev))
9278 pci_disable_device(bp->pdev);
9279
9280 bnxt_free_skbs(bp);
9281
9282 /* Save ring stats before shutdown */
9283 if (bp->bnapi)
9284 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
9285 if (irq_re_init) {
9286 bnxt_free_irq(bp);
9287 bnxt_del_napi(bp);
9288 }
9289 bnxt_free_mem(bp, irq_re_init);
9290 }
9291
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)9292 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9293 {
9294 int rc = 0;
9295
9296 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9297 /* If we get here, it means firmware reset is in progress
9298 * while we are trying to close. We can safely proceed with
9299 * the close because we are holding rtnl_lock(). Some firmware
9300 * messages may fail as we proceed to close. We set the
9301 * ABORT_ERR flag here so that the FW reset thread will later
9302 * abort when it gets the rtnl_lock() and sees the flag.
9303 */
9304 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9305 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9306 }
9307
9308 #ifdef CONFIG_BNXT_SRIOV
9309 if (bp->sriov_cfg) {
9310 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9311 !bp->sriov_cfg,
9312 BNXT_SRIOV_CFG_WAIT_TMO);
9313 if (rc)
9314 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9315 }
9316 #endif
9317 __bnxt_close_nic(bp, irq_re_init, link_re_init);
9318 return rc;
9319 }
9320
bnxt_close(struct net_device * dev)9321 static int bnxt_close(struct net_device *dev)
9322 {
9323 struct bnxt *bp = netdev_priv(dev);
9324
9325 bnxt_hwmon_close(bp);
9326 bnxt_close_nic(bp, true, true);
9327 bnxt_hwrm_shutdown_link(bp);
9328 bnxt_hwrm_if_change(bp, false);
9329 return 0;
9330 }
9331
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)9332 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9333 u16 *val)
9334 {
9335 struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9336 struct hwrm_port_phy_mdio_read_input req = {0};
9337 int rc;
9338
9339 if (bp->hwrm_spec_code < 0x10a00)
9340 return -EOPNOTSUPP;
9341
9342 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9343 req.port_id = cpu_to_le16(bp->pf.port_id);
9344 req.phy_addr = phy_addr;
9345 req.reg_addr = cpu_to_le16(reg & 0x1f);
9346 if (mdio_phy_id_is_c45(phy_addr)) {
9347 req.cl45_mdio = 1;
9348 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9349 req.dev_addr = mdio_phy_id_devad(phy_addr);
9350 req.reg_addr = cpu_to_le16(reg);
9351 }
9352
9353 mutex_lock(&bp->hwrm_cmd_lock);
9354 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9355 if (!rc)
9356 *val = le16_to_cpu(resp->reg_data);
9357 mutex_unlock(&bp->hwrm_cmd_lock);
9358 return rc;
9359 }
9360
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)9361 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9362 u16 val)
9363 {
9364 struct hwrm_port_phy_mdio_write_input req = {0};
9365
9366 if (bp->hwrm_spec_code < 0x10a00)
9367 return -EOPNOTSUPP;
9368
9369 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9370 req.port_id = cpu_to_le16(bp->pf.port_id);
9371 req.phy_addr = phy_addr;
9372 req.reg_addr = cpu_to_le16(reg & 0x1f);
9373 if (mdio_phy_id_is_c45(phy_addr)) {
9374 req.cl45_mdio = 1;
9375 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9376 req.dev_addr = mdio_phy_id_devad(phy_addr);
9377 req.reg_addr = cpu_to_le16(reg);
9378 }
9379 req.reg_data = cpu_to_le16(val);
9380
9381 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9382 }
9383
9384 /* rtnl_lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)9385 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9386 {
9387 struct mii_ioctl_data *mdio = if_mii(ifr);
9388 struct bnxt *bp = netdev_priv(dev);
9389 int rc;
9390
9391 switch (cmd) {
9392 case SIOCGMIIPHY:
9393 mdio->phy_id = bp->link_info.phy_addr;
9394
9395 /* fallthru */
9396 case SIOCGMIIREG: {
9397 u16 mii_regval = 0;
9398
9399 if (!netif_running(dev))
9400 return -EAGAIN;
9401
9402 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9403 &mii_regval);
9404 mdio->val_out = mii_regval;
9405 return rc;
9406 }
9407
9408 case SIOCSMIIREG:
9409 if (!netif_running(dev))
9410 return -EAGAIN;
9411
9412 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9413 mdio->val_in);
9414
9415 default:
9416 /* do nothing */
9417 break;
9418 }
9419 return -EOPNOTSUPP;
9420 }
9421
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)9422 static void bnxt_get_ring_stats(struct bnxt *bp,
9423 struct rtnl_link_stats64 *stats)
9424 {
9425 int i;
9426
9427
9428 for (i = 0; i < bp->cp_nr_rings; i++) {
9429 struct bnxt_napi *bnapi = bp->bnapi[i];
9430 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9431 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9432
9433 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9434 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9435 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9436
9437 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9438 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9439 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9440
9441 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9442 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9443 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9444
9445 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9446 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9447 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9448
9449 stats->rx_missed_errors +=
9450 le64_to_cpu(hw_stats->rx_discard_pkts);
9451
9452 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9453
9454 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9455 }
9456 }
9457
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)9458 static void bnxt_add_prev_stats(struct bnxt *bp,
9459 struct rtnl_link_stats64 *stats)
9460 {
9461 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9462
9463 stats->rx_packets += prev_stats->rx_packets;
9464 stats->tx_packets += prev_stats->tx_packets;
9465 stats->rx_bytes += prev_stats->rx_bytes;
9466 stats->tx_bytes += prev_stats->tx_bytes;
9467 stats->rx_missed_errors += prev_stats->rx_missed_errors;
9468 stats->multicast += prev_stats->multicast;
9469 stats->tx_dropped += prev_stats->tx_dropped;
9470 }
9471
9472 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)9473 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9474 {
9475 struct bnxt *bp = netdev_priv(dev);
9476
9477 set_bit(BNXT_STATE_READ_STATS, &bp->state);
9478 /* Make sure bnxt_close_nic() sees that we are reading stats before
9479 * we check the BNXT_STATE_OPEN flag.
9480 */
9481 smp_mb__after_atomic();
9482 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9483 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9484 *stats = bp->net_stats_prev;
9485 return;
9486 }
9487
9488 bnxt_get_ring_stats(bp, stats);
9489 bnxt_add_prev_stats(bp, stats);
9490
9491 if (bp->flags & BNXT_FLAG_PORT_STATS) {
9492 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9493 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9494
9495 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9496 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9497 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9498 le64_to_cpu(rx->rx_ovrsz_frames) +
9499 le64_to_cpu(rx->rx_runt_frames);
9500 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9501 le64_to_cpu(rx->rx_jbr_frames);
9502 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9503 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9504 stats->tx_errors = le64_to_cpu(tx->tx_err);
9505 }
9506 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9507 }
9508
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)9509 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9510 {
9511 struct net_device *dev = bp->dev;
9512 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9513 struct netdev_hw_addr *ha;
9514 u8 *haddr;
9515 int mc_count = 0;
9516 bool update = false;
9517 int off = 0;
9518
9519 netdev_for_each_mc_addr(ha, dev) {
9520 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9521 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9522 vnic->mc_list_count = 0;
9523 return false;
9524 }
9525 haddr = ha->addr;
9526 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9527 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9528 update = true;
9529 }
9530 off += ETH_ALEN;
9531 mc_count++;
9532 }
9533 if (mc_count)
9534 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9535
9536 if (mc_count != vnic->mc_list_count) {
9537 vnic->mc_list_count = mc_count;
9538 update = true;
9539 }
9540 return update;
9541 }
9542
bnxt_uc_list_updated(struct bnxt * bp)9543 static bool bnxt_uc_list_updated(struct bnxt *bp)
9544 {
9545 struct net_device *dev = bp->dev;
9546 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9547 struct netdev_hw_addr *ha;
9548 int off = 0;
9549
9550 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9551 return true;
9552
9553 netdev_for_each_uc_addr(ha, dev) {
9554 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9555 return true;
9556
9557 off += ETH_ALEN;
9558 }
9559 return false;
9560 }
9561
bnxt_set_rx_mode(struct net_device * dev)9562 static void bnxt_set_rx_mode(struct net_device *dev)
9563 {
9564 struct bnxt *bp = netdev_priv(dev);
9565 struct bnxt_vnic_info *vnic;
9566 bool mc_update = false;
9567 bool uc_update;
9568 u32 mask;
9569
9570 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
9571 return;
9572
9573 vnic = &bp->vnic_info[0];
9574 mask = vnic->rx_mask;
9575 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9576 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
9577 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9578 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
9579
9580 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
9581 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9582
9583 uc_update = bnxt_uc_list_updated(bp);
9584
9585 if (dev->flags & IFF_BROADCAST)
9586 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9587 if (dev->flags & IFF_ALLMULTI) {
9588 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9589 vnic->mc_list_count = 0;
9590 } else {
9591 mc_update = bnxt_mc_list_updated(bp, &mask);
9592 }
9593
9594 if (mask != vnic->rx_mask || uc_update || mc_update) {
9595 vnic->rx_mask = mask;
9596
9597 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
9598 bnxt_queue_sp_work(bp);
9599 }
9600 }
9601
bnxt_cfg_rx_mode(struct bnxt * bp)9602 static int bnxt_cfg_rx_mode(struct bnxt *bp)
9603 {
9604 struct net_device *dev = bp->dev;
9605 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9606 struct netdev_hw_addr *ha;
9607 int i, off = 0, rc;
9608 bool uc_update;
9609
9610 netif_addr_lock_bh(dev);
9611 uc_update = bnxt_uc_list_updated(bp);
9612 netif_addr_unlock_bh(dev);
9613
9614 if (!uc_update)
9615 goto skip_uc;
9616
9617 mutex_lock(&bp->hwrm_cmd_lock);
9618 for (i = 1; i < vnic->uc_filter_count; i++) {
9619 struct hwrm_cfa_l2_filter_free_input req = {0};
9620
9621 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9622 -1);
9623
9624 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9625
9626 rc = _hwrm_send_message(bp, &req, sizeof(req),
9627 HWRM_CMD_TIMEOUT);
9628 }
9629 mutex_unlock(&bp->hwrm_cmd_lock);
9630
9631 vnic->uc_filter_count = 1;
9632
9633 netif_addr_lock_bh(dev);
9634 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9635 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9636 } else {
9637 netdev_for_each_uc_addr(ha, dev) {
9638 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9639 off += ETH_ALEN;
9640 vnic->uc_filter_count++;
9641 }
9642 }
9643 netif_addr_unlock_bh(dev);
9644
9645 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9646 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9647 if (rc) {
9648 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9649 rc);
9650 vnic->uc_filter_count = i;
9651 return rc;
9652 }
9653 }
9654
9655 skip_uc:
9656 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9657 if (rc && vnic->mc_list_count) {
9658 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9659 rc);
9660 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9661 vnic->mc_list_count = 0;
9662 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9663 }
9664 if (rc)
9665 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
9666 rc);
9667
9668 return rc;
9669 }
9670
bnxt_can_reserve_rings(struct bnxt * bp)9671 static bool bnxt_can_reserve_rings(struct bnxt *bp)
9672 {
9673 #ifdef CONFIG_BNXT_SRIOV
9674 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
9675 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9676
9677 /* No minimum rings were provisioned by the PF. Don't
9678 * reserve rings by default when device is down.
9679 */
9680 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9681 return true;
9682
9683 if (!netif_running(bp->dev))
9684 return false;
9685 }
9686 #endif
9687 return true;
9688 }
9689
9690 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)9691 static bool bnxt_rfs_supported(struct bnxt *bp)
9692 {
9693 if (bp->flags & BNXT_FLAG_CHIP_P5) {
9694 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
9695 return true;
9696 return false;
9697 }
9698 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9699 return true;
9700 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9701 return true;
9702 return false;
9703 }
9704
9705 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp)9706 static bool bnxt_rfs_capable(struct bnxt *bp)
9707 {
9708 #ifdef CONFIG_RFS_ACCEL
9709 int vnics, max_vnics, max_rss_ctxs;
9710
9711 if (bp->flags & BNXT_FLAG_CHIP_P5)
9712 return bnxt_rfs_supported(bp);
9713 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
9714 return false;
9715
9716 vnics = 1 + bp->rx_nr_rings;
9717 max_vnics = bnxt_get_max_func_vnics(bp);
9718 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
9719
9720 /* RSS contexts not a limiting factor */
9721 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9722 max_rss_ctxs = max_vnics;
9723 if (vnics > max_vnics || vnics > max_rss_ctxs) {
9724 if (bp->rx_nr_rings > 1)
9725 netdev_warn(bp->dev,
9726 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9727 min(max_rss_ctxs - 1, max_vnics - 1));
9728 return false;
9729 }
9730
9731 if (!BNXT_NEW_RM(bp))
9732 return true;
9733
9734 if (vnics == bp->hw_resc.resv_vnics)
9735 return true;
9736
9737 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
9738 if (vnics <= bp->hw_resc.resv_vnics)
9739 return true;
9740
9741 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
9742 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
9743 return false;
9744 #else
9745 return false;
9746 #endif
9747 }
9748
bnxt_fix_features(struct net_device * dev,netdev_features_t features)9749 static netdev_features_t bnxt_fix_features(struct net_device *dev,
9750 netdev_features_t features)
9751 {
9752 struct bnxt *bp = netdev_priv(dev);
9753
9754 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
9755 features &= ~NETIF_F_NTUPLE;
9756
9757 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9758 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9759
9760 if (!(features & NETIF_F_GRO))
9761 features &= ~NETIF_F_GRO_HW;
9762
9763 if (features & NETIF_F_GRO_HW)
9764 features &= ~NETIF_F_LRO;
9765
9766 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9767 * turned on or off together.
9768 */
9769 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9770 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9771 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9772 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9773 NETIF_F_HW_VLAN_STAG_RX);
9774 else
9775 features |= NETIF_F_HW_VLAN_CTAG_RX |
9776 NETIF_F_HW_VLAN_STAG_RX;
9777 }
9778 #ifdef CONFIG_BNXT_SRIOV
9779 if (BNXT_VF(bp)) {
9780 if (bp->vf.vlan) {
9781 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9782 NETIF_F_HW_VLAN_STAG_RX);
9783 }
9784 }
9785 #endif
9786 return features;
9787 }
9788
bnxt_set_features(struct net_device * dev,netdev_features_t features)9789 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9790 {
9791 struct bnxt *bp = netdev_priv(dev);
9792 u32 flags = bp->flags;
9793 u32 changes;
9794 int rc = 0;
9795 bool re_init = false;
9796 bool update_tpa = false;
9797
9798 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9799 if (features & NETIF_F_GRO_HW)
9800 flags |= BNXT_FLAG_GRO;
9801 else if (features & NETIF_F_LRO)
9802 flags |= BNXT_FLAG_LRO;
9803
9804 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9805 flags &= ~BNXT_FLAG_TPA;
9806
9807 if (features & NETIF_F_HW_VLAN_CTAG_RX)
9808 flags |= BNXT_FLAG_STRIP_VLAN;
9809
9810 if (features & NETIF_F_NTUPLE)
9811 flags |= BNXT_FLAG_RFS;
9812
9813 changes = flags ^ bp->flags;
9814 if (changes & BNXT_FLAG_TPA) {
9815 update_tpa = true;
9816 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9817 (flags & BNXT_FLAG_TPA) == 0 ||
9818 (bp->flags & BNXT_FLAG_CHIP_P5))
9819 re_init = true;
9820 }
9821
9822 if (changes & ~BNXT_FLAG_TPA)
9823 re_init = true;
9824
9825 if (flags != bp->flags) {
9826 u32 old_flags = bp->flags;
9827
9828 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9829 bp->flags = flags;
9830 if (update_tpa)
9831 bnxt_set_ring_params(bp);
9832 return rc;
9833 }
9834
9835 if (re_init) {
9836 bnxt_close_nic(bp, false, false);
9837 bp->flags = flags;
9838 if (update_tpa)
9839 bnxt_set_ring_params(bp);
9840
9841 return bnxt_open_nic(bp, false, false);
9842 }
9843 if (update_tpa) {
9844 bp->flags = flags;
9845 rc = bnxt_set_tpa(bp,
9846 (flags & BNXT_FLAG_TPA) ?
9847 true : false);
9848 if (rc)
9849 bp->flags = old_flags;
9850 }
9851 }
9852 return rc;
9853 }
9854
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)9855 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9856 u32 ring_id, u32 *prod, u32 *cons)
9857 {
9858 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9859 struct hwrm_dbg_ring_info_get_input req = {0};
9860 int rc;
9861
9862 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9863 req.ring_type = ring_type;
9864 req.fw_ring_id = cpu_to_le32(ring_id);
9865 mutex_lock(&bp->hwrm_cmd_lock);
9866 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9867 if (!rc) {
9868 *prod = le32_to_cpu(resp->producer_index);
9869 *cons = le32_to_cpu(resp->consumer_index);
9870 }
9871 mutex_unlock(&bp->hwrm_cmd_lock);
9872 return rc;
9873 }
9874
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)9875 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9876 {
9877 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9878 int i = bnapi->index;
9879
9880 if (!txr)
9881 return;
9882
9883 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9884 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9885 txr->tx_cons);
9886 }
9887
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)9888 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9889 {
9890 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9891 int i = bnapi->index;
9892
9893 if (!rxr)
9894 return;
9895
9896 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9897 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9898 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9899 rxr->rx_sw_agg_prod);
9900 }
9901
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)9902 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9903 {
9904 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9905 int i = bnapi->index;
9906
9907 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9908 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9909 }
9910
bnxt_dbg_dump_states(struct bnxt * bp)9911 static void bnxt_dbg_dump_states(struct bnxt *bp)
9912 {
9913 int i;
9914 struct bnxt_napi *bnapi;
9915
9916 for (i = 0; i < bp->cp_nr_rings; i++) {
9917 bnapi = bp->bnapi[i];
9918 if (netif_msg_drv(bp)) {
9919 bnxt_dump_tx_sw_state(bnapi);
9920 bnxt_dump_rx_sw_state(bnapi);
9921 bnxt_dump_cp_sw_state(bnapi);
9922 }
9923 }
9924 }
9925
bnxt_reset_task(struct bnxt * bp,bool silent)9926 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9927 {
9928 if (!silent)
9929 bnxt_dbg_dump_states(bp);
9930 if (netif_running(bp->dev)) {
9931 int rc;
9932
9933 if (!silent)
9934 bnxt_ulp_stop(bp);
9935 bnxt_close_nic(bp, false, false);
9936 rc = bnxt_open_nic(bp, false, false);
9937 if (!silent && !rc)
9938 bnxt_ulp_start(bp);
9939 }
9940 }
9941
bnxt_tx_timeout(struct net_device * dev)9942 static void bnxt_tx_timeout(struct net_device *dev)
9943 {
9944 struct bnxt *bp = netdev_priv(dev);
9945
9946 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
9947 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9948 bnxt_queue_sp_work(bp);
9949 }
9950
bnxt_fw_health_check(struct bnxt * bp)9951 static void bnxt_fw_health_check(struct bnxt *bp)
9952 {
9953 struct bnxt_fw_health *fw_health = bp->fw_health;
9954 u32 val;
9955
9956 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9957 return;
9958
9959 if (fw_health->tmr_counter) {
9960 fw_health->tmr_counter--;
9961 return;
9962 }
9963
9964 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
9965 if (val == fw_health->last_fw_heartbeat)
9966 goto fw_reset;
9967
9968 fw_health->last_fw_heartbeat = val;
9969
9970 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
9971 if (val != fw_health->last_fw_reset_cnt)
9972 goto fw_reset;
9973
9974 fw_health->tmr_counter = fw_health->tmr_multiplier;
9975 return;
9976
9977 fw_reset:
9978 set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
9979 bnxt_queue_sp_work(bp);
9980 }
9981
bnxt_timer(struct timer_list * t)9982 static void bnxt_timer(struct timer_list *t)
9983 {
9984 struct bnxt *bp = from_timer(bp, t, timer);
9985 struct net_device *dev = bp->dev;
9986
9987 if (!netif_running(dev))
9988 return;
9989
9990 if (atomic_read(&bp->intr_sem) != 0)
9991 goto bnxt_restart_timer;
9992
9993 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
9994 bnxt_fw_health_check(bp);
9995
9996 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
9997 bp->stats_coal_ticks) {
9998 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
9999 bnxt_queue_sp_work(bp);
10000 }
10001
10002 if (bnxt_tc_flower_enabled(bp)) {
10003 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10004 bnxt_queue_sp_work(bp);
10005 }
10006
10007 if (bp->link_info.phy_retry) {
10008 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10009 bp->link_info.phy_retry = 0;
10010 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10011 } else {
10012 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10013 bnxt_queue_sp_work(bp);
10014 }
10015 }
10016
10017 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
10018 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10019 bnxt_queue_sp_work(bp);
10020 }
10021 bnxt_restart_timer:
10022 mod_timer(&bp->timer, jiffies + bp->current_interval);
10023 }
10024
bnxt_rtnl_lock_sp(struct bnxt * bp)10025 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10026 {
10027 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10028 * set. If the device is being closed, bnxt_close() may be holding
10029 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
10030 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10031 */
10032 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10033 rtnl_lock();
10034 }
10035
bnxt_rtnl_unlock_sp(struct bnxt * bp)10036 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10037 {
10038 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10039 rtnl_unlock();
10040 }
10041
10042 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)10043 static void bnxt_reset(struct bnxt *bp, bool silent)
10044 {
10045 bnxt_rtnl_lock_sp(bp);
10046 if (test_bit(BNXT_STATE_OPEN, &bp->state))
10047 bnxt_reset_task(bp, silent);
10048 bnxt_rtnl_unlock_sp(bp);
10049 }
10050
bnxt_fw_reset_close(struct bnxt * bp)10051 static void bnxt_fw_reset_close(struct bnxt *bp)
10052 {
10053 __bnxt_close_nic(bp, true, false);
10054 bnxt_ulp_irq_stop(bp);
10055 bnxt_clear_int_mode(bp);
10056 bnxt_hwrm_func_drv_unrgtr(bp);
10057 bnxt_free_ctx_mem(bp);
10058 kfree(bp->ctx);
10059 bp->ctx = NULL;
10060 }
10061
is_bnxt_fw_ok(struct bnxt * bp)10062 static bool is_bnxt_fw_ok(struct bnxt *bp)
10063 {
10064 struct bnxt_fw_health *fw_health = bp->fw_health;
10065 bool no_heartbeat = false, has_reset = false;
10066 u32 val;
10067
10068 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10069 if (val == fw_health->last_fw_heartbeat)
10070 no_heartbeat = true;
10071
10072 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10073 if (val != fw_health->last_fw_reset_cnt)
10074 has_reset = true;
10075
10076 if (!no_heartbeat && has_reset)
10077 return true;
10078
10079 return false;
10080 }
10081
10082 /* rtnl_lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)10083 static void bnxt_force_fw_reset(struct bnxt *bp)
10084 {
10085 struct bnxt_fw_health *fw_health = bp->fw_health;
10086 u32 wait_dsecs;
10087
10088 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10089 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10090 return;
10091
10092 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10093 bnxt_fw_reset_close(bp);
10094 wait_dsecs = fw_health->master_func_wait_dsecs;
10095 if (fw_health->master) {
10096 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10097 wait_dsecs = 0;
10098 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10099 } else {
10100 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10101 wait_dsecs = fw_health->normal_func_wait_dsecs;
10102 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10103 }
10104
10105 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10106 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10107 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10108 }
10109
bnxt_fw_exception(struct bnxt * bp)10110 void bnxt_fw_exception(struct bnxt *bp)
10111 {
10112 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10113 bnxt_rtnl_lock_sp(bp);
10114 bnxt_force_fw_reset(bp);
10115 bnxt_rtnl_unlock_sp(bp);
10116 }
10117
10118 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10119 * < 0 on error.
10120 */
bnxt_get_registered_vfs(struct bnxt * bp)10121 static int bnxt_get_registered_vfs(struct bnxt *bp)
10122 {
10123 #ifdef CONFIG_BNXT_SRIOV
10124 int rc;
10125
10126 if (!BNXT_PF(bp))
10127 return 0;
10128
10129 rc = bnxt_hwrm_func_qcfg(bp);
10130 if (rc) {
10131 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10132 return rc;
10133 }
10134 if (bp->pf.registered_vfs)
10135 return bp->pf.registered_vfs;
10136 if (bp->sriov_cfg)
10137 return 1;
10138 #endif
10139 return 0;
10140 }
10141
bnxt_fw_reset(struct bnxt * bp)10142 void bnxt_fw_reset(struct bnxt *bp)
10143 {
10144 bnxt_rtnl_lock_sp(bp);
10145 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10146 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10147 int n = 0, tmo;
10148
10149 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10150 if (bp->pf.active_vfs &&
10151 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10152 n = bnxt_get_registered_vfs(bp);
10153 if (n < 0) {
10154 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10155 n);
10156 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10157 dev_close(bp->dev);
10158 goto fw_reset_exit;
10159 } else if (n > 0) {
10160 u16 vf_tmo_dsecs = n * 10;
10161
10162 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10163 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10164 bp->fw_reset_state =
10165 BNXT_FW_RESET_STATE_POLL_VF;
10166 bnxt_queue_fw_reset_work(bp, HZ / 10);
10167 goto fw_reset_exit;
10168 }
10169 bnxt_fw_reset_close(bp);
10170 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10171 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10172 tmo = HZ / 10;
10173 } else {
10174 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10175 tmo = bp->fw_reset_min_dsecs * HZ / 10;
10176 }
10177 bnxt_queue_fw_reset_work(bp, tmo);
10178 }
10179 fw_reset_exit:
10180 bnxt_rtnl_unlock_sp(bp);
10181 }
10182
bnxt_chk_missed_irq(struct bnxt * bp)10183 static void bnxt_chk_missed_irq(struct bnxt *bp)
10184 {
10185 int i;
10186
10187 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10188 return;
10189
10190 for (i = 0; i < bp->cp_nr_rings; i++) {
10191 struct bnxt_napi *bnapi = bp->bnapi[i];
10192 struct bnxt_cp_ring_info *cpr;
10193 u32 fw_ring_id;
10194 int j;
10195
10196 if (!bnapi)
10197 continue;
10198
10199 cpr = &bnapi->cp_ring;
10200 for (j = 0; j < 2; j++) {
10201 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10202 u32 val[2];
10203
10204 if (!cpr2 || cpr2->has_more_work ||
10205 !bnxt_has_work(bp, cpr2))
10206 continue;
10207
10208 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10209 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10210 continue;
10211 }
10212 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10213 bnxt_dbg_hwrm_ring_info_get(bp,
10214 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10215 fw_ring_id, &val[0], &val[1]);
10216 cpr->missed_irqs++;
10217 }
10218 }
10219 }
10220
10221 static void bnxt_cfg_ntp_filters(struct bnxt *);
10222
bnxt_sp_task(struct work_struct * work)10223 static void bnxt_sp_task(struct work_struct *work)
10224 {
10225 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
10226
10227 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10228 smp_mb__after_atomic();
10229 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10230 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10231 return;
10232 }
10233
10234 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10235 bnxt_cfg_rx_mode(bp);
10236
10237 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10238 bnxt_cfg_ntp_filters(bp);
10239 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10240 bnxt_hwrm_exec_fwd_req(bp);
10241 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10242 bnxt_hwrm_tunnel_dst_port_alloc(
10243 bp, bp->vxlan_port,
10244 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10245 }
10246 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10247 bnxt_hwrm_tunnel_dst_port_free(
10248 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10249 }
10250 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10251 bnxt_hwrm_tunnel_dst_port_alloc(
10252 bp, bp->nge_port,
10253 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10254 }
10255 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10256 bnxt_hwrm_tunnel_dst_port_free(
10257 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10258 }
10259 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
10260 bnxt_hwrm_port_qstats(bp);
10261 bnxt_hwrm_port_qstats_ext(bp);
10262 bnxt_hwrm_pcie_qstats(bp);
10263 }
10264
10265 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
10266 int rc;
10267
10268 mutex_lock(&bp->link_lock);
10269 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10270 &bp->sp_event))
10271 bnxt_hwrm_phy_qcaps(bp);
10272
10273 rc = bnxt_update_link(bp, true);
10274 mutex_unlock(&bp->link_lock);
10275 if (rc)
10276 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10277 rc);
10278 }
10279 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10280 int rc;
10281
10282 mutex_lock(&bp->link_lock);
10283 rc = bnxt_update_phy_setting(bp);
10284 mutex_unlock(&bp->link_lock);
10285 if (rc) {
10286 netdev_warn(bp->dev, "update phy settings retry failed\n");
10287 } else {
10288 bp->link_info.phy_retry = false;
10289 netdev_info(bp->dev, "update phy settings retry succeeded\n");
10290 }
10291 }
10292 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
10293 mutex_lock(&bp->link_lock);
10294 bnxt_get_port_module_status(bp);
10295 mutex_unlock(&bp->link_lock);
10296 }
10297
10298 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10299 bnxt_tc_flow_stats_work(bp);
10300
10301 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10302 bnxt_chk_missed_irq(bp);
10303
10304 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
10305 * must be the last functions to be called before exiting.
10306 */
10307 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10308 bnxt_reset(bp, false);
10309
10310 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10311 bnxt_reset(bp, true);
10312
10313 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
10314 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
10315
10316 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
10317 if (!is_bnxt_fw_ok(bp))
10318 bnxt_devlink_health_report(bp,
10319 BNXT_FW_EXCEPTION_SP_EVENT);
10320 }
10321
10322 smp_mb__before_atomic();
10323 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10324 }
10325
10326 /* Under rtnl_lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)10327 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10328 int tx_xdp)
10329 {
10330 int max_rx, max_tx, tx_sets = 1;
10331 int tx_rings_needed, stats;
10332 int rx_rings = rx;
10333 int cp, vnics, rc;
10334
10335 if (tcs)
10336 tx_sets = tcs;
10337
10338 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10339 if (rc)
10340 return rc;
10341
10342 if (max_rx < rx)
10343 return -ENOMEM;
10344
10345 tx_rings_needed = tx * tx_sets + tx_xdp;
10346 if (max_tx < tx_rings_needed)
10347 return -ENOMEM;
10348
10349 vnics = 1;
10350 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
10351 vnics += rx_rings;
10352
10353 if (bp->flags & BNXT_FLAG_AGG_RINGS)
10354 rx_rings <<= 1;
10355 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
10356 stats = cp;
10357 if (BNXT_NEW_RM(bp)) {
10358 cp += bnxt_get_ulp_msix_num(bp);
10359 stats += bnxt_get_ulp_stat_ctxs(bp);
10360 }
10361 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
10362 stats, vnics);
10363 }
10364
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)10365 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10366 {
10367 if (bp->bar2) {
10368 pci_iounmap(pdev, bp->bar2);
10369 bp->bar2 = NULL;
10370 }
10371
10372 if (bp->bar1) {
10373 pci_iounmap(pdev, bp->bar1);
10374 bp->bar1 = NULL;
10375 }
10376
10377 if (bp->bar0) {
10378 pci_iounmap(pdev, bp->bar0);
10379 bp->bar0 = NULL;
10380 }
10381 }
10382
bnxt_cleanup_pci(struct bnxt * bp)10383 static void bnxt_cleanup_pci(struct bnxt *bp)
10384 {
10385 bnxt_unmap_bars(bp, bp->pdev);
10386 pci_release_regions(bp->pdev);
10387 if (pci_is_enabled(bp->pdev))
10388 pci_disable_device(bp->pdev);
10389 }
10390
bnxt_init_dflt_coal(struct bnxt * bp)10391 static void bnxt_init_dflt_coal(struct bnxt *bp)
10392 {
10393 struct bnxt_coal *coal;
10394
10395 /* Tick values in micro seconds.
10396 * 1 coal_buf x bufs_per_record = 1 completion record.
10397 */
10398 coal = &bp->rx_coal;
10399 coal->coal_ticks = 10;
10400 coal->coal_bufs = 30;
10401 coal->coal_ticks_irq = 1;
10402 coal->coal_bufs_irq = 2;
10403 coal->idle_thresh = 50;
10404 coal->bufs_per_record = 2;
10405 coal->budget = 64; /* NAPI budget */
10406
10407 coal = &bp->tx_coal;
10408 coal->coal_ticks = 28;
10409 coal->coal_bufs = 30;
10410 coal->coal_ticks_irq = 2;
10411 coal->coal_bufs_irq = 2;
10412 coal->bufs_per_record = 1;
10413
10414 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10415 }
10416
bnxt_alloc_fw_health(struct bnxt * bp)10417 static void bnxt_alloc_fw_health(struct bnxt *bp)
10418 {
10419 if (bp->fw_health)
10420 return;
10421
10422 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10423 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10424 return;
10425
10426 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
10427 if (!bp->fw_health) {
10428 netdev_warn(bp->dev, "Failed to allocate fw_health\n");
10429 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10430 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10431 }
10432 }
10433
bnxt_fw_init_one_p1(struct bnxt * bp)10434 static int bnxt_fw_init_one_p1(struct bnxt *bp)
10435 {
10436 int rc;
10437
10438 bp->fw_cap = 0;
10439 rc = bnxt_hwrm_ver_get(bp);
10440 if (rc)
10441 return rc;
10442
10443 if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10444 rc = bnxt_alloc_kong_hwrm_resources(bp);
10445 if (rc)
10446 bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10447 }
10448
10449 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10450 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10451 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10452 if (rc)
10453 return rc;
10454 }
10455 rc = bnxt_hwrm_func_reset(bp);
10456 if (rc)
10457 return -ENODEV;
10458
10459 bnxt_hwrm_fw_set_time(bp);
10460 return 0;
10461 }
10462
bnxt_fw_init_one_p2(struct bnxt * bp)10463 static int bnxt_fw_init_one_p2(struct bnxt *bp)
10464 {
10465 int rc;
10466
10467 /* Get the MAX capabilities for this function */
10468 rc = bnxt_hwrm_func_qcaps(bp);
10469 if (rc) {
10470 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10471 rc);
10472 return -ENODEV;
10473 }
10474
10475 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10476 if (rc)
10477 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10478 rc);
10479
10480 bnxt_alloc_fw_health(bp);
10481 rc = bnxt_hwrm_error_recovery_qcfg(bp);
10482 if (rc)
10483 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10484 rc);
10485
10486 rc = bnxt_hwrm_func_drv_rgtr(bp);
10487 if (rc)
10488 return -ENODEV;
10489
10490 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10491 if (rc)
10492 return -ENODEV;
10493
10494 bnxt_hwrm_func_qcfg(bp);
10495 bnxt_hwrm_vnic_qcaps(bp);
10496 bnxt_hwrm_port_led_qcaps(bp);
10497 bnxt_ethtool_init(bp);
10498 bnxt_dcb_init(bp);
10499 return 0;
10500 }
10501
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)10502 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10503 {
10504 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10505 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10506 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10507 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10508 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10509 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10510 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10511 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10512 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10513 }
10514 }
10515
bnxt_set_dflt_rfs(struct bnxt * bp)10516 static void bnxt_set_dflt_rfs(struct bnxt *bp)
10517 {
10518 struct net_device *dev = bp->dev;
10519
10520 dev->hw_features &= ~NETIF_F_NTUPLE;
10521 dev->features &= ~NETIF_F_NTUPLE;
10522 bp->flags &= ~BNXT_FLAG_RFS;
10523 if (bnxt_rfs_supported(bp)) {
10524 dev->hw_features |= NETIF_F_NTUPLE;
10525 if (bnxt_rfs_capable(bp)) {
10526 bp->flags |= BNXT_FLAG_RFS;
10527 dev->features |= NETIF_F_NTUPLE;
10528 }
10529 }
10530 }
10531
bnxt_fw_init_one_p3(struct bnxt * bp)10532 static void bnxt_fw_init_one_p3(struct bnxt *bp)
10533 {
10534 struct pci_dev *pdev = bp->pdev;
10535
10536 bnxt_set_dflt_rss_hash_type(bp);
10537 bnxt_set_dflt_rfs(bp);
10538
10539 bnxt_get_wol_settings(bp);
10540 if (bp->flags & BNXT_FLAG_WOL_CAP)
10541 device_set_wakeup_enable(&pdev->dev, bp->wol);
10542 else
10543 device_set_wakeup_capable(&pdev->dev, false);
10544
10545 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10546 bnxt_hwrm_coal_params_qcaps(bp);
10547 }
10548
bnxt_fw_init_one(struct bnxt * bp)10549 static int bnxt_fw_init_one(struct bnxt *bp)
10550 {
10551 int rc;
10552
10553 rc = bnxt_fw_init_one_p1(bp);
10554 if (rc) {
10555 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
10556 return rc;
10557 }
10558 rc = bnxt_fw_init_one_p2(bp);
10559 if (rc) {
10560 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
10561 return rc;
10562 }
10563 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
10564 if (rc)
10565 return rc;
10566
10567 /* In case fw capabilities have changed, destroy the unneeded
10568 * reporters and create newly capable ones.
10569 */
10570 bnxt_dl_fw_reporters_destroy(bp, false);
10571 bnxt_dl_fw_reporters_create(bp);
10572 bnxt_fw_init_one_p3(bp);
10573 return 0;
10574 }
10575
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)10576 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
10577 {
10578 struct bnxt_fw_health *fw_health = bp->fw_health;
10579 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
10580 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
10581 u32 reg_type, reg_off, delay_msecs;
10582
10583 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
10584 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
10585 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
10586 switch (reg_type) {
10587 case BNXT_FW_HEALTH_REG_TYPE_CFG:
10588 pci_write_config_dword(bp->pdev, reg_off, val);
10589 break;
10590 case BNXT_FW_HEALTH_REG_TYPE_GRC:
10591 writel(reg_off & BNXT_GRC_BASE_MASK,
10592 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
10593 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
10594 /* fall through */
10595 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
10596 writel(val, bp->bar0 + reg_off);
10597 break;
10598 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
10599 writel(val, bp->bar1 + reg_off);
10600 break;
10601 }
10602 if (delay_msecs) {
10603 pci_read_config_dword(bp->pdev, 0, &val);
10604 msleep(delay_msecs);
10605 }
10606 }
10607
bnxt_reset_all(struct bnxt * bp)10608 static void bnxt_reset_all(struct bnxt *bp)
10609 {
10610 struct bnxt_fw_health *fw_health = bp->fw_health;
10611 int i;
10612
10613 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
10614 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
10615 bnxt_fw_reset_writel(bp, i);
10616 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
10617 struct hwrm_fw_reset_input req = {0};
10618 int rc;
10619
10620 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
10621 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
10622 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
10623 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
10624 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
10625 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10626 if (rc)
10627 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
10628 }
10629 bp->fw_reset_timestamp = jiffies;
10630 }
10631
bnxt_fw_reset_task(struct work_struct * work)10632 static void bnxt_fw_reset_task(struct work_struct *work)
10633 {
10634 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
10635 int rc;
10636
10637 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10638 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
10639 return;
10640 }
10641
10642 switch (bp->fw_reset_state) {
10643 case BNXT_FW_RESET_STATE_POLL_VF: {
10644 int n = bnxt_get_registered_vfs(bp);
10645 int tmo;
10646
10647 if (n < 0) {
10648 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
10649 n, jiffies_to_msecs(jiffies -
10650 bp->fw_reset_timestamp));
10651 goto fw_reset_abort;
10652 } else if (n > 0) {
10653 if (time_after(jiffies, bp->fw_reset_timestamp +
10654 (bp->fw_reset_max_dsecs * HZ / 10))) {
10655 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10656 bp->fw_reset_state = 0;
10657 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
10658 n);
10659 return;
10660 }
10661 bnxt_queue_fw_reset_work(bp, HZ / 10);
10662 return;
10663 }
10664 bp->fw_reset_timestamp = jiffies;
10665 rtnl_lock();
10666 bnxt_fw_reset_close(bp);
10667 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10668 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10669 tmo = HZ / 10;
10670 } else {
10671 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10672 tmo = bp->fw_reset_min_dsecs * HZ / 10;
10673 }
10674 rtnl_unlock();
10675 bnxt_queue_fw_reset_work(bp, tmo);
10676 return;
10677 }
10678 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
10679 u32 val;
10680
10681 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
10682 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
10683 !time_after(jiffies, bp->fw_reset_timestamp +
10684 (bp->fw_reset_max_dsecs * HZ / 10))) {
10685 bnxt_queue_fw_reset_work(bp, HZ / 5);
10686 return;
10687 }
10688
10689 if (!bp->fw_health->master) {
10690 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
10691
10692 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10693 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10694 return;
10695 }
10696 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10697 }
10698 /* fall through */
10699 case BNXT_FW_RESET_STATE_RESET_FW:
10700 bnxt_reset_all(bp);
10701 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10702 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
10703 return;
10704 case BNXT_FW_RESET_STATE_ENABLE_DEV:
10705 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
10706 u32 val;
10707
10708 val = bnxt_fw_health_readl(bp,
10709 BNXT_FW_RESET_INPROG_REG);
10710 if (val)
10711 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
10712 val);
10713 }
10714 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10715 if (pci_enable_device(bp->pdev)) {
10716 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
10717 goto fw_reset_abort;
10718 }
10719 pci_set_master(bp->pdev);
10720 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
10721 /* fall through */
10722 case BNXT_FW_RESET_STATE_POLL_FW:
10723 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
10724 rc = __bnxt_hwrm_ver_get(bp, true);
10725 if (rc) {
10726 if (time_after(jiffies, bp->fw_reset_timestamp +
10727 (bp->fw_reset_max_dsecs * HZ / 10))) {
10728 netdev_err(bp->dev, "Firmware reset aborted\n");
10729 goto fw_reset_abort;
10730 }
10731 bnxt_queue_fw_reset_work(bp, HZ / 5);
10732 return;
10733 }
10734 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10735 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
10736 /* fall through */
10737 case BNXT_FW_RESET_STATE_OPENING:
10738 while (!rtnl_trylock()) {
10739 bnxt_queue_fw_reset_work(bp, HZ / 10);
10740 return;
10741 }
10742 rc = bnxt_open(bp->dev);
10743 if (rc) {
10744 netdev_err(bp->dev, "bnxt_open_nic() failed\n");
10745 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10746 dev_close(bp->dev);
10747 }
10748 bnxt_ulp_irq_restart(bp, rc);
10749 rtnl_unlock();
10750
10751 bp->fw_reset_state = 0;
10752 /* Make sure fw_reset_state is 0 before clearing the flag */
10753 smp_mb__before_atomic();
10754 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10755 break;
10756 }
10757 return;
10758
10759 fw_reset_abort:
10760 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10761 bp->fw_reset_state = 0;
10762 rtnl_lock();
10763 dev_close(bp->dev);
10764 rtnl_unlock();
10765 }
10766
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)10767 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
10768 {
10769 int rc;
10770 struct bnxt *bp = netdev_priv(dev);
10771
10772 SET_NETDEV_DEV(dev, &pdev->dev);
10773
10774 /* enable device (incl. PCI PM wakeup), and bus-mastering */
10775 rc = pci_enable_device(pdev);
10776 if (rc) {
10777 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
10778 goto init_err;
10779 }
10780
10781 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10782 dev_err(&pdev->dev,
10783 "Cannot find PCI device base address, aborting\n");
10784 rc = -ENODEV;
10785 goto init_err_disable;
10786 }
10787
10788 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10789 if (rc) {
10790 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
10791 goto init_err_disable;
10792 }
10793
10794 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
10795 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10796 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
10797 goto init_err_disable;
10798 }
10799
10800 pci_set_master(pdev);
10801
10802 bp->dev = dev;
10803 bp->pdev = pdev;
10804
10805 bp->bar0 = pci_ioremap_bar(pdev, 0);
10806 if (!bp->bar0) {
10807 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
10808 rc = -ENOMEM;
10809 goto init_err_release;
10810 }
10811
10812 bp->bar1 = pci_ioremap_bar(pdev, 2);
10813 if (!bp->bar1) {
10814 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
10815 rc = -ENOMEM;
10816 goto init_err_release;
10817 }
10818
10819 bp->bar2 = pci_ioremap_bar(pdev, 4);
10820 if (!bp->bar2) {
10821 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
10822 rc = -ENOMEM;
10823 goto init_err_release;
10824 }
10825
10826 pci_enable_pcie_error_reporting(pdev);
10827
10828 INIT_WORK(&bp->sp_task, bnxt_sp_task);
10829 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
10830
10831 spin_lock_init(&bp->ntp_fltr_lock);
10832 #if BITS_PER_LONG == 32
10833 spin_lock_init(&bp->db_lock);
10834 #endif
10835
10836 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
10837 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
10838
10839 bnxt_init_dflt_coal(bp);
10840
10841 timer_setup(&bp->timer, bnxt_timer, 0);
10842 bp->current_interval = BNXT_TIMER_INTERVAL;
10843
10844 clear_bit(BNXT_STATE_OPEN, &bp->state);
10845 return 0;
10846
10847 init_err_release:
10848 bnxt_unmap_bars(bp, pdev);
10849 pci_release_regions(pdev);
10850
10851 init_err_disable:
10852 pci_disable_device(pdev);
10853
10854 init_err:
10855 return rc;
10856 }
10857
10858 /* rtnl_lock held */
bnxt_change_mac_addr(struct net_device * dev,void * p)10859 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
10860 {
10861 struct sockaddr *addr = p;
10862 struct bnxt *bp = netdev_priv(dev);
10863 int rc = 0;
10864
10865 if (!is_valid_ether_addr(addr->sa_data))
10866 return -EADDRNOTAVAIL;
10867
10868 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
10869 return 0;
10870
10871 rc = bnxt_approve_mac(bp, addr->sa_data, true);
10872 if (rc)
10873 return rc;
10874
10875 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10876 if (netif_running(dev)) {
10877 bnxt_close_nic(bp, false, false);
10878 rc = bnxt_open_nic(bp, false, false);
10879 }
10880
10881 return rc;
10882 }
10883
10884 /* rtnl_lock held */
bnxt_change_mtu(struct net_device * dev,int new_mtu)10885 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10886 {
10887 struct bnxt *bp = netdev_priv(dev);
10888
10889 if (netif_running(dev))
10890 bnxt_close_nic(bp, false, false);
10891
10892 dev->mtu = new_mtu;
10893 bnxt_set_ring_params(bp);
10894
10895 if (netif_running(dev))
10896 return bnxt_open_nic(bp, false, false);
10897
10898 return 0;
10899 }
10900
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)10901 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
10902 {
10903 struct bnxt *bp = netdev_priv(dev);
10904 bool sh = false;
10905 int rc;
10906
10907 if (tc > bp->max_tc) {
10908 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
10909 tc, bp->max_tc);
10910 return -EINVAL;
10911 }
10912
10913 if (netdev_get_num_tc(dev) == tc)
10914 return 0;
10915
10916 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10917 sh = true;
10918
10919 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
10920 sh, tc, bp->tx_nr_rings_xdp);
10921 if (rc)
10922 return rc;
10923
10924 /* Needs to close the device and do hw resource re-allocations */
10925 if (netif_running(bp->dev))
10926 bnxt_close_nic(bp, true, false);
10927
10928 if (tc) {
10929 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
10930 netdev_set_num_tc(dev, tc);
10931 } else {
10932 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
10933 netdev_reset_tc(dev);
10934 }
10935 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
10936 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
10937 bp->tx_nr_rings + bp->rx_nr_rings;
10938
10939 if (netif_running(bp->dev))
10940 return bnxt_open_nic(bp, true, false);
10941
10942 return 0;
10943 }
10944
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)10945 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
10946 void *cb_priv)
10947 {
10948 struct bnxt *bp = cb_priv;
10949
10950 if (!bnxt_tc_flower_enabled(bp) ||
10951 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
10952 return -EOPNOTSUPP;
10953
10954 switch (type) {
10955 case TC_SETUP_CLSFLOWER:
10956 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
10957 default:
10958 return -EOPNOTSUPP;
10959 }
10960 }
10961
10962 static LIST_HEAD(bnxt_block_cb_list);
10963
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)10964 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
10965 void *type_data)
10966 {
10967 struct bnxt *bp = netdev_priv(dev);
10968
10969 switch (type) {
10970 case TC_SETUP_BLOCK:
10971 return flow_block_cb_setup_simple(type_data,
10972 &bnxt_block_cb_list,
10973 bnxt_setup_tc_block_cb,
10974 bp, bp, true);
10975 case TC_SETUP_QDISC_MQPRIO: {
10976 struct tc_mqprio_qopt *mqprio = type_data;
10977
10978 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
10979
10980 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
10981 }
10982 default:
10983 return -EOPNOTSUPP;
10984 }
10985 }
10986
10987 #ifdef CONFIG_RFS_ACCEL
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)10988 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
10989 struct bnxt_ntuple_filter *f2)
10990 {
10991 struct flow_keys *keys1 = &f1->fkeys;
10992 struct flow_keys *keys2 = &f2->fkeys;
10993
10994 if (keys1->basic.n_proto != keys2->basic.n_proto ||
10995 keys1->basic.ip_proto != keys2->basic.ip_proto)
10996 return false;
10997
10998 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
10999 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
11000 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
11001 return false;
11002 } else {
11003 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
11004 sizeof(keys1->addrs.v6addrs.src)) ||
11005 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
11006 sizeof(keys1->addrs.v6addrs.dst)))
11007 return false;
11008 }
11009
11010 if (keys1->ports.ports == keys2->ports.ports &&
11011 keys1->control.flags == keys2->control.flags &&
11012 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11013 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11014 return true;
11015
11016 return false;
11017 }
11018
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)11019 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11020 u16 rxq_index, u32 flow_id)
11021 {
11022 struct bnxt *bp = netdev_priv(dev);
11023 struct bnxt_ntuple_filter *fltr, *new_fltr;
11024 struct flow_keys *fkeys;
11025 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11026 int rc = 0, idx, bit_id, l2_idx = 0;
11027 struct hlist_head *head;
11028
11029 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11030 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11031 int off = 0, j;
11032
11033 netif_addr_lock_bh(dev);
11034 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11035 if (ether_addr_equal(eth->h_dest,
11036 vnic->uc_list + off)) {
11037 l2_idx = j + 1;
11038 break;
11039 }
11040 }
11041 netif_addr_unlock_bh(dev);
11042 if (!l2_idx)
11043 return -EINVAL;
11044 }
11045 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11046 if (!new_fltr)
11047 return -ENOMEM;
11048
11049 fkeys = &new_fltr->fkeys;
11050 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11051 rc = -EPROTONOSUPPORT;
11052 goto err_free;
11053 }
11054
11055 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11056 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11057 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11058 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11059 rc = -EPROTONOSUPPORT;
11060 goto err_free;
11061 }
11062 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11063 bp->hwrm_spec_code < 0x10601) {
11064 rc = -EPROTONOSUPPORT;
11065 goto err_free;
11066 }
11067 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
11068 bp->hwrm_spec_code < 0x10601) {
11069 rc = -EPROTONOSUPPORT;
11070 goto err_free;
11071 }
11072
11073 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
11074 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11075
11076 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11077 head = &bp->ntp_fltr_hash_tbl[idx];
11078 rcu_read_lock();
11079 hlist_for_each_entry_rcu(fltr, head, hash) {
11080 if (bnxt_fltr_match(fltr, new_fltr)) {
11081 rcu_read_unlock();
11082 rc = 0;
11083 goto err_free;
11084 }
11085 }
11086 rcu_read_unlock();
11087
11088 spin_lock_bh(&bp->ntp_fltr_lock);
11089 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11090 BNXT_NTP_FLTR_MAX_FLTR, 0);
11091 if (bit_id < 0) {
11092 spin_unlock_bh(&bp->ntp_fltr_lock);
11093 rc = -ENOMEM;
11094 goto err_free;
11095 }
11096
11097 new_fltr->sw_id = (u16)bit_id;
11098 new_fltr->flow_id = flow_id;
11099 new_fltr->l2_fltr_idx = l2_idx;
11100 new_fltr->rxq = rxq_index;
11101 hlist_add_head_rcu(&new_fltr->hash, head);
11102 bp->ntp_fltr_count++;
11103 spin_unlock_bh(&bp->ntp_fltr_lock);
11104
11105 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11106 bnxt_queue_sp_work(bp);
11107
11108 return new_fltr->sw_id;
11109
11110 err_free:
11111 kfree(new_fltr);
11112 return rc;
11113 }
11114
bnxt_cfg_ntp_filters(struct bnxt * bp)11115 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11116 {
11117 int i;
11118
11119 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11120 struct hlist_head *head;
11121 struct hlist_node *tmp;
11122 struct bnxt_ntuple_filter *fltr;
11123 int rc;
11124
11125 head = &bp->ntp_fltr_hash_tbl[i];
11126 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11127 bool del = false;
11128
11129 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11130 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11131 fltr->flow_id,
11132 fltr->sw_id)) {
11133 bnxt_hwrm_cfa_ntuple_filter_free(bp,
11134 fltr);
11135 del = true;
11136 }
11137 } else {
11138 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11139 fltr);
11140 if (rc)
11141 del = true;
11142 else
11143 set_bit(BNXT_FLTR_VALID, &fltr->state);
11144 }
11145
11146 if (del) {
11147 spin_lock_bh(&bp->ntp_fltr_lock);
11148 hlist_del_rcu(&fltr->hash);
11149 bp->ntp_fltr_count--;
11150 spin_unlock_bh(&bp->ntp_fltr_lock);
11151 synchronize_rcu();
11152 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11153 kfree(fltr);
11154 }
11155 }
11156 }
11157 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11158 netdev_info(bp->dev, "Receive PF driver unload event!");
11159 }
11160
11161 #else
11162
bnxt_cfg_ntp_filters(struct bnxt * bp)11163 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11164 {
11165 }
11166
11167 #endif /* CONFIG_RFS_ACCEL */
11168
bnxt_udp_tunnel_add(struct net_device * dev,struct udp_tunnel_info * ti)11169 static void bnxt_udp_tunnel_add(struct net_device *dev,
11170 struct udp_tunnel_info *ti)
11171 {
11172 struct bnxt *bp = netdev_priv(dev);
11173
11174 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11175 return;
11176
11177 if (!netif_running(dev))
11178 return;
11179
11180 switch (ti->type) {
11181 case UDP_TUNNEL_TYPE_VXLAN:
11182 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
11183 return;
11184
11185 bp->vxlan_port_cnt++;
11186 if (bp->vxlan_port_cnt == 1) {
11187 bp->vxlan_port = ti->port;
11188 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
11189 bnxt_queue_sp_work(bp);
11190 }
11191 break;
11192 case UDP_TUNNEL_TYPE_GENEVE:
11193 if (bp->nge_port_cnt && bp->nge_port != ti->port)
11194 return;
11195
11196 bp->nge_port_cnt++;
11197 if (bp->nge_port_cnt == 1) {
11198 bp->nge_port = ti->port;
11199 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
11200 }
11201 break;
11202 default:
11203 return;
11204 }
11205
11206 bnxt_queue_sp_work(bp);
11207 }
11208
bnxt_udp_tunnel_del(struct net_device * dev,struct udp_tunnel_info * ti)11209 static void bnxt_udp_tunnel_del(struct net_device *dev,
11210 struct udp_tunnel_info *ti)
11211 {
11212 struct bnxt *bp = netdev_priv(dev);
11213
11214 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11215 return;
11216
11217 if (!netif_running(dev))
11218 return;
11219
11220 switch (ti->type) {
11221 case UDP_TUNNEL_TYPE_VXLAN:
11222 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
11223 return;
11224 bp->vxlan_port_cnt--;
11225
11226 if (bp->vxlan_port_cnt != 0)
11227 return;
11228
11229 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
11230 break;
11231 case UDP_TUNNEL_TYPE_GENEVE:
11232 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
11233 return;
11234 bp->nge_port_cnt--;
11235
11236 if (bp->nge_port_cnt != 0)
11237 return;
11238
11239 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
11240 break;
11241 default:
11242 return;
11243 }
11244
11245 bnxt_queue_sp_work(bp);
11246 }
11247
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)11248 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11249 struct net_device *dev, u32 filter_mask,
11250 int nlflags)
11251 {
11252 struct bnxt *bp = netdev_priv(dev);
11253
11254 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
11255 nlflags, filter_mask, NULL);
11256 }
11257
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)11258 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
11259 u16 flags, struct netlink_ext_ack *extack)
11260 {
11261 struct bnxt *bp = netdev_priv(dev);
11262 struct nlattr *attr, *br_spec;
11263 int rem, rc = 0;
11264
11265 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
11266 return -EOPNOTSUPP;
11267
11268 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11269 if (!br_spec)
11270 return -EINVAL;
11271
11272 nla_for_each_nested(attr, br_spec, rem) {
11273 u16 mode;
11274
11275 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11276 continue;
11277
11278 if (nla_len(attr) < sizeof(mode))
11279 return -EINVAL;
11280
11281 mode = nla_get_u16(attr);
11282 if (mode == bp->br_mode)
11283 break;
11284
11285 rc = bnxt_hwrm_set_br_mode(bp, mode);
11286 if (!rc)
11287 bp->br_mode = mode;
11288 break;
11289 }
11290 return rc;
11291 }
11292
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)11293 int bnxt_get_port_parent_id(struct net_device *dev,
11294 struct netdev_phys_item_id *ppid)
11295 {
11296 struct bnxt *bp = netdev_priv(dev);
11297
11298 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
11299 return -EOPNOTSUPP;
11300
11301 /* The PF and it's VF-reps only support the switchdev framework */
11302 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
11303 return -EOPNOTSUPP;
11304
11305 ppid->id_len = sizeof(bp->switch_id);
11306 memcpy(ppid->id, bp->switch_id, ppid->id_len);
11307
11308 return 0;
11309 }
11310
bnxt_get_devlink_port(struct net_device * dev)11311 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
11312 {
11313 struct bnxt *bp = netdev_priv(dev);
11314
11315 return &bp->dl_port;
11316 }
11317
11318 static const struct net_device_ops bnxt_netdev_ops = {
11319 .ndo_open = bnxt_open,
11320 .ndo_start_xmit = bnxt_start_xmit,
11321 .ndo_stop = bnxt_close,
11322 .ndo_get_stats64 = bnxt_get_stats64,
11323 .ndo_set_rx_mode = bnxt_set_rx_mode,
11324 .ndo_do_ioctl = bnxt_ioctl,
11325 .ndo_validate_addr = eth_validate_addr,
11326 .ndo_set_mac_address = bnxt_change_mac_addr,
11327 .ndo_change_mtu = bnxt_change_mtu,
11328 .ndo_fix_features = bnxt_fix_features,
11329 .ndo_set_features = bnxt_set_features,
11330 .ndo_tx_timeout = bnxt_tx_timeout,
11331 #ifdef CONFIG_BNXT_SRIOV
11332 .ndo_get_vf_config = bnxt_get_vf_config,
11333 .ndo_set_vf_mac = bnxt_set_vf_mac,
11334 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
11335 .ndo_set_vf_rate = bnxt_set_vf_bw,
11336 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
11337 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
11338 .ndo_set_vf_trust = bnxt_set_vf_trust,
11339 #endif
11340 .ndo_setup_tc = bnxt_setup_tc,
11341 #ifdef CONFIG_RFS_ACCEL
11342 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
11343 #endif
11344 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
11345 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
11346 .ndo_bpf = bnxt_xdp,
11347 .ndo_xdp_xmit = bnxt_xdp_xmit,
11348 .ndo_bridge_getlink = bnxt_bridge_getlink,
11349 .ndo_bridge_setlink = bnxt_bridge_setlink,
11350 .ndo_get_devlink_port = bnxt_get_devlink_port,
11351 };
11352
bnxt_remove_one(struct pci_dev * pdev)11353 static void bnxt_remove_one(struct pci_dev *pdev)
11354 {
11355 struct net_device *dev = pci_get_drvdata(pdev);
11356 struct bnxt *bp = netdev_priv(dev);
11357
11358 if (BNXT_PF(bp))
11359 bnxt_sriov_disable(bp);
11360
11361 bnxt_dl_fw_reporters_destroy(bp, true);
11362 bnxt_dl_unregister(bp);
11363 pci_disable_pcie_error_reporting(pdev);
11364 unregister_netdev(dev);
11365 bnxt_shutdown_tc(bp);
11366 bnxt_cancel_sp_work(bp);
11367 bp->sp_event = 0;
11368
11369 bnxt_clear_int_mode(bp);
11370 bnxt_hwrm_func_drv_unrgtr(bp);
11371 bnxt_free_hwrm_resources(bp);
11372 bnxt_free_hwrm_short_cmd_req(bp);
11373 bnxt_ethtool_free(bp);
11374 bnxt_dcb_free(bp);
11375 kfree(bp->edev);
11376 bp->edev = NULL;
11377 kfree(bp->fw_health);
11378 bp->fw_health = NULL;
11379 bnxt_cleanup_pci(bp);
11380 bnxt_free_ctx_mem(bp);
11381 kfree(bp->ctx);
11382 bp->ctx = NULL;
11383 bnxt_free_port_stats(bp);
11384 free_netdev(dev);
11385 }
11386
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)11387 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
11388 {
11389 int rc = 0;
11390 struct bnxt_link_info *link_info = &bp->link_info;
11391
11392 rc = bnxt_hwrm_phy_qcaps(bp);
11393 if (rc) {
11394 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
11395 rc);
11396 return rc;
11397 }
11398 rc = bnxt_update_link(bp, false);
11399 if (rc) {
11400 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
11401 rc);
11402 return rc;
11403 }
11404
11405 /* Older firmware does not have supported_auto_speeds, so assume
11406 * that all supported speeds can be autonegotiated.
11407 */
11408 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
11409 link_info->support_auto_speeds = link_info->support_speeds;
11410
11411 if (!fw_dflt)
11412 return 0;
11413
11414 /*initialize the ethool setting copy with NVM settings */
11415 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11416 link_info->autoneg = BNXT_AUTONEG_SPEED;
11417 if (bp->hwrm_spec_code >= 0x10201) {
11418 if (link_info->auto_pause_setting &
11419 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11420 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11421 } else {
11422 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11423 }
11424 link_info->advertising = link_info->auto_link_speeds;
11425 } else {
11426 link_info->req_link_speed = link_info->force_link_speed;
11427 link_info->req_duplex = link_info->duplex_setting;
11428 }
11429 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11430 link_info->req_flow_ctrl =
11431 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11432 else
11433 link_info->req_flow_ctrl = link_info->force_pause_setting;
11434 return 0;
11435 }
11436
bnxt_get_max_irq(struct pci_dev * pdev)11437 static int bnxt_get_max_irq(struct pci_dev *pdev)
11438 {
11439 u16 ctrl;
11440
11441 if (!pdev->msix_cap)
11442 return 1;
11443
11444 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11445 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11446 }
11447
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)11448 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11449 int *max_cp)
11450 {
11451 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11452 int max_ring_grps = 0, max_irq;
11453
11454 *max_tx = hw_resc->max_tx_rings;
11455 *max_rx = hw_resc->max_rx_rings;
11456 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11457 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11458 bnxt_get_ulp_msix_num(bp),
11459 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
11460 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11461 *max_cp = min_t(int, *max_cp, max_irq);
11462 max_ring_grps = hw_resc->max_hw_ring_grps;
11463 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11464 *max_cp -= 1;
11465 *max_rx -= 2;
11466 }
11467 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11468 *max_rx >>= 1;
11469 if (bp->flags & BNXT_FLAG_CHIP_P5) {
11470 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11471 /* On P5 chips, max_cp output param should be available NQs */
11472 *max_cp = max_irq;
11473 }
11474 *max_rx = min_t(int, *max_rx, max_ring_grps);
11475 }
11476
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)11477 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11478 {
11479 int rx, tx, cp;
11480
11481 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
11482 *max_rx = rx;
11483 *max_tx = tx;
11484 if (!rx || !tx || !cp)
11485 return -ENOMEM;
11486
11487 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11488 }
11489
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)11490 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11491 bool shared)
11492 {
11493 int rc;
11494
11495 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11496 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11497 /* Not enough rings, try disabling agg rings. */
11498 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11499 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11500 if (rc) {
11501 /* set BNXT_FLAG_AGG_RINGS back for consistency */
11502 bp->flags |= BNXT_FLAG_AGG_RINGS;
11503 return rc;
11504 }
11505 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
11506 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11507 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11508 bnxt_set_ring_params(bp);
11509 }
11510
11511 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11512 int max_cp, max_stat, max_irq;
11513
11514 /* Reserve minimum resources for RoCE */
11515 max_cp = bnxt_get_max_func_cp_rings(bp);
11516 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11517 max_irq = bnxt_get_max_func_irqs(bp);
11518 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11519 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11520 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11521 return 0;
11522
11523 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11524 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11525 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11526 max_cp = min_t(int, max_cp, max_irq);
11527 max_cp = min_t(int, max_cp, max_stat);
11528 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11529 if (rc)
11530 rc = 0;
11531 }
11532 return rc;
11533 }
11534
11535 /* In initial default shared ring setting, each shared ring must have a
11536 * RX/TX ring pair.
11537 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)11538 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11539 {
11540 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11541 bp->rx_nr_rings = bp->cp_nr_rings;
11542 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11543 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11544 }
11545
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)11546 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
11547 {
11548 int dflt_rings, max_rx_rings, max_tx_rings, rc;
11549
11550 if (!bnxt_can_reserve_rings(bp))
11551 return 0;
11552
11553 if (sh)
11554 bp->flags |= BNXT_FLAG_SHARED_RINGS;
11555 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
11556 /* Reduce default rings on multi-port cards so that total default
11557 * rings do not exceed CPU count.
11558 */
11559 if (bp->port_count > 1) {
11560 int max_rings =
11561 max_t(int, num_online_cpus() / bp->port_count, 1);
11562
11563 dflt_rings = min_t(int, dflt_rings, max_rings);
11564 }
11565 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
11566 if (rc)
11567 return rc;
11568 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11569 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
11570 if (sh)
11571 bnxt_trim_dflt_sh_rings(bp);
11572 else
11573 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11574 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11575
11576 rc = __bnxt_reserve_rings(bp);
11577 if (rc)
11578 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
11579 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11580 if (sh)
11581 bnxt_trim_dflt_sh_rings(bp);
11582
11583 /* Rings may have been trimmed, re-reserve the trimmed rings. */
11584 if (bnxt_need_reserve_rings(bp)) {
11585 rc = __bnxt_reserve_rings(bp);
11586 if (rc)
11587 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11588 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11589 }
11590 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11591 bp->rx_nr_rings++;
11592 bp->cp_nr_rings++;
11593 }
11594 return rc;
11595 }
11596
bnxt_init_dflt_ring_mode(struct bnxt * bp)11597 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
11598 {
11599 int rc;
11600
11601 if (bp->tx_nr_rings)
11602 return 0;
11603
11604 bnxt_ulp_irq_stop(bp);
11605 bnxt_clear_int_mode(bp);
11606 rc = bnxt_set_dflt_rings(bp, true);
11607 if (rc) {
11608 netdev_err(bp->dev, "Not enough rings available.\n");
11609 goto init_dflt_ring_err;
11610 }
11611 rc = bnxt_init_int_mode(bp);
11612 if (rc)
11613 goto init_dflt_ring_err;
11614
11615 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11616 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
11617 bp->flags |= BNXT_FLAG_RFS;
11618 bp->dev->features |= NETIF_F_NTUPLE;
11619 }
11620 init_dflt_ring_err:
11621 bnxt_ulp_irq_restart(bp, rc);
11622 return rc;
11623 }
11624
bnxt_restore_pf_fw_resources(struct bnxt * bp)11625 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
11626 {
11627 int rc;
11628
11629 ASSERT_RTNL();
11630 bnxt_hwrm_func_qcaps(bp);
11631
11632 if (netif_running(bp->dev))
11633 __bnxt_close_nic(bp, true, false);
11634
11635 bnxt_ulp_irq_stop(bp);
11636 bnxt_clear_int_mode(bp);
11637 rc = bnxt_init_int_mode(bp);
11638 bnxt_ulp_irq_restart(bp, rc);
11639
11640 if (netif_running(bp->dev)) {
11641 if (rc)
11642 dev_close(bp->dev);
11643 else
11644 rc = bnxt_open_nic(bp, true, false);
11645 }
11646
11647 return rc;
11648 }
11649
bnxt_init_mac_addr(struct bnxt * bp)11650 static int bnxt_init_mac_addr(struct bnxt *bp)
11651 {
11652 int rc = 0;
11653
11654 if (BNXT_PF(bp)) {
11655 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
11656 } else {
11657 #ifdef CONFIG_BNXT_SRIOV
11658 struct bnxt_vf_info *vf = &bp->vf;
11659 bool strict_approval = true;
11660
11661 if (is_valid_ether_addr(vf->mac_addr)) {
11662 /* overwrite netdev dev_addr with admin VF MAC */
11663 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
11664 /* Older PF driver or firmware may not approve this
11665 * correctly.
11666 */
11667 strict_approval = false;
11668 } else {
11669 eth_hw_addr_random(bp->dev);
11670 }
11671 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
11672 #endif
11673 }
11674 return rc;
11675 }
11676
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])11677 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
11678 {
11679 struct pci_dev *pdev = bp->pdev;
11680 int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
11681 u32 dw;
11682
11683 if (!pos) {
11684 netdev_info(bp->dev, "Unable do read adapter's DSN");
11685 return -EOPNOTSUPP;
11686 }
11687
11688 /* DSN (two dw) is at an offset of 4 from the cap pos */
11689 pos += 4;
11690 pci_read_config_dword(pdev, pos, &dw);
11691 put_unaligned_le32(dw, &dsn[0]);
11692 pci_read_config_dword(pdev, pos + 4, &dw);
11693 put_unaligned_le32(dw, &dsn[4]);
11694 bp->flags |= BNXT_FLAG_DSN_VALID;
11695 return 0;
11696 }
11697
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)11698 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11699 {
11700 static int version_printed;
11701 struct net_device *dev;
11702 struct bnxt *bp;
11703 int rc, max_irqs;
11704
11705 if (pci_is_bridge(pdev))
11706 return -ENODEV;
11707
11708 if (version_printed++ == 0)
11709 pr_info("%s", version);
11710
11711 max_irqs = bnxt_get_max_irq(pdev);
11712 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
11713 if (!dev)
11714 return -ENOMEM;
11715
11716 bp = netdev_priv(dev);
11717 bnxt_set_max_func_irqs(bp, max_irqs);
11718
11719 if (bnxt_vf_pciid(ent->driver_data))
11720 bp->flags |= BNXT_FLAG_VF;
11721
11722 if (pdev->msix_cap)
11723 bp->flags |= BNXT_FLAG_MSIX_CAP;
11724
11725 rc = bnxt_init_board(pdev, dev);
11726 if (rc < 0)
11727 goto init_err_free;
11728
11729 dev->netdev_ops = &bnxt_netdev_ops;
11730 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
11731 dev->ethtool_ops = &bnxt_ethtool_ops;
11732 pci_set_drvdata(pdev, dev);
11733
11734 rc = bnxt_alloc_hwrm_resources(bp);
11735 if (rc)
11736 goto init_err_pci_clean;
11737
11738 mutex_init(&bp->hwrm_cmd_lock);
11739 mutex_init(&bp->link_lock);
11740
11741 rc = bnxt_fw_init_one_p1(bp);
11742 if (rc)
11743 goto init_err_pci_clean;
11744
11745 if (BNXT_CHIP_P5(bp))
11746 bp->flags |= BNXT_FLAG_CHIP_P5;
11747
11748 rc = bnxt_fw_init_one_p2(bp);
11749 if (rc)
11750 goto init_err_pci_clean;
11751
11752 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11753 NETIF_F_TSO | NETIF_F_TSO6 |
11754 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11755 NETIF_F_GSO_IPXIP4 |
11756 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11757 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
11758 NETIF_F_RXCSUM | NETIF_F_GRO;
11759
11760 if (BNXT_SUPPORTS_TPA(bp))
11761 dev->hw_features |= NETIF_F_LRO;
11762
11763 dev->hw_enc_features =
11764 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11765 NETIF_F_TSO | NETIF_F_TSO6 |
11766 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11767 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11768 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
11769 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
11770 NETIF_F_GSO_GRE_CSUM;
11771 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
11772 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
11773 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
11774 if (BNXT_SUPPORTS_TPA(bp))
11775 dev->hw_features |= NETIF_F_GRO_HW;
11776 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
11777 if (dev->features & NETIF_F_GRO_HW)
11778 dev->features &= ~NETIF_F_LRO;
11779 dev->priv_flags |= IFF_UNICAST_FLT;
11780
11781 #ifdef CONFIG_BNXT_SRIOV
11782 init_waitqueue_head(&bp->sriov_cfg_wait);
11783 mutex_init(&bp->sriov_lock);
11784 #endif
11785 if (BNXT_SUPPORTS_TPA(bp)) {
11786 bp->gro_func = bnxt_gro_func_5730x;
11787 if (BNXT_CHIP_P4(bp))
11788 bp->gro_func = bnxt_gro_func_5731x;
11789 else if (BNXT_CHIP_P5(bp))
11790 bp->gro_func = bnxt_gro_func_5750x;
11791 }
11792 if (!BNXT_CHIP_P4_PLUS(bp))
11793 bp->flags |= BNXT_FLAG_DOUBLE_DB;
11794
11795 bp->ulp_probe = bnxt_ulp_probe;
11796
11797 rc = bnxt_init_mac_addr(bp);
11798 if (rc) {
11799 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
11800 rc = -EADDRNOTAVAIL;
11801 goto init_err_pci_clean;
11802 }
11803
11804 if (BNXT_PF(bp)) {
11805 /* Read the adapter's DSN to use as the eswitch switch_id */
11806 bnxt_pcie_dsn_get(bp, bp->switch_id);
11807 }
11808
11809 /* MTU range: 60 - FW defined max */
11810 dev->min_mtu = ETH_ZLEN;
11811 dev->max_mtu = bp->max_mtu;
11812
11813 rc = bnxt_probe_phy(bp, true);
11814 if (rc)
11815 goto init_err_pci_clean;
11816
11817 bnxt_set_rx_skb_mode(bp, false);
11818 bnxt_set_tpa_flags(bp);
11819 bnxt_set_ring_params(bp);
11820 rc = bnxt_set_dflt_rings(bp, true);
11821 if (rc) {
11822 netdev_err(bp->dev, "Not enough rings available.\n");
11823 rc = -ENOMEM;
11824 goto init_err_pci_clean;
11825 }
11826
11827 bnxt_fw_init_one_p3(bp);
11828
11829 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11830 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11831
11832 rc = bnxt_init_int_mode(bp);
11833 if (rc)
11834 goto init_err_pci_clean;
11835
11836 /* No TC has been set yet and rings may have been trimmed due to
11837 * limited MSIX, so we re-initialize the TX rings per TC.
11838 */
11839 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11840
11841 if (BNXT_PF(bp)) {
11842 if (!bnxt_pf_wq) {
11843 bnxt_pf_wq =
11844 create_singlethread_workqueue("bnxt_pf_wq");
11845 if (!bnxt_pf_wq) {
11846 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11847 goto init_err_pci_clean;
11848 }
11849 }
11850 bnxt_init_tc(bp);
11851 }
11852
11853 rc = register_netdev(dev);
11854 if (rc)
11855 goto init_err_cleanup_tc;
11856
11857 bnxt_dl_register(bp);
11858 bnxt_dl_fw_reporters_create(bp);
11859
11860 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11861 board_info[ent->driver_data].name,
11862 (long)pci_resource_start(pdev, 0), dev->dev_addr);
11863 pcie_print_link_status(pdev);
11864
11865 return 0;
11866
11867 init_err_cleanup_tc:
11868 bnxt_shutdown_tc(bp);
11869 bnxt_clear_int_mode(bp);
11870
11871 init_err_pci_clean:
11872 bnxt_free_hwrm_short_cmd_req(bp);
11873 bnxt_free_hwrm_resources(bp);
11874 bnxt_free_ctx_mem(bp);
11875 kfree(bp->ctx);
11876 bp->ctx = NULL;
11877 kfree(bp->fw_health);
11878 bp->fw_health = NULL;
11879 bnxt_cleanup_pci(bp);
11880
11881 init_err_free:
11882 free_netdev(dev);
11883 return rc;
11884 }
11885
bnxt_shutdown(struct pci_dev * pdev)11886 static void bnxt_shutdown(struct pci_dev *pdev)
11887 {
11888 struct net_device *dev = pci_get_drvdata(pdev);
11889 struct bnxt *bp;
11890
11891 if (!dev)
11892 return;
11893
11894 rtnl_lock();
11895 bp = netdev_priv(dev);
11896 if (!bp)
11897 goto shutdown_exit;
11898
11899 if (netif_running(dev))
11900 dev_close(dev);
11901
11902 bnxt_ulp_shutdown(bp);
11903
11904 if (system_state == SYSTEM_POWER_OFF) {
11905 bnxt_clear_int_mode(bp);
11906 pci_disable_device(pdev);
11907 pci_wake_from_d3(pdev, bp->wol);
11908 pci_set_power_state(pdev, PCI_D3hot);
11909 }
11910
11911 shutdown_exit:
11912 rtnl_unlock();
11913 }
11914
11915 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)11916 static int bnxt_suspend(struct device *device)
11917 {
11918 struct net_device *dev = dev_get_drvdata(device);
11919 struct bnxt *bp = netdev_priv(dev);
11920 int rc = 0;
11921
11922 rtnl_lock();
11923 if (netif_running(dev)) {
11924 netif_device_detach(dev);
11925 rc = bnxt_close(dev);
11926 }
11927 bnxt_hwrm_func_drv_unrgtr(bp);
11928 rtnl_unlock();
11929 return rc;
11930 }
11931
bnxt_resume(struct device * device)11932 static int bnxt_resume(struct device *device)
11933 {
11934 struct net_device *dev = dev_get_drvdata(device);
11935 struct bnxt *bp = netdev_priv(dev);
11936 int rc = 0;
11937
11938 rtnl_lock();
11939 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
11940 rc = -ENODEV;
11941 goto resume_exit;
11942 }
11943 rc = bnxt_hwrm_func_reset(bp);
11944 if (rc) {
11945 rc = -EBUSY;
11946 goto resume_exit;
11947 }
11948 bnxt_get_wol_settings(bp);
11949 if (netif_running(dev)) {
11950 rc = bnxt_open(dev);
11951 if (!rc)
11952 netif_device_attach(dev);
11953 }
11954
11955 resume_exit:
11956 rtnl_unlock();
11957 return rc;
11958 }
11959
11960 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
11961 #define BNXT_PM_OPS (&bnxt_pm_ops)
11962
11963 #else
11964
11965 #define BNXT_PM_OPS NULL
11966
11967 #endif /* CONFIG_PM_SLEEP */
11968
11969 /**
11970 * bnxt_io_error_detected - called when PCI error is detected
11971 * @pdev: Pointer to PCI device
11972 * @state: The current pci connection state
11973 *
11974 * This function is called after a PCI bus error affecting
11975 * this device has been detected.
11976 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)11977 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
11978 pci_channel_state_t state)
11979 {
11980 struct net_device *netdev = pci_get_drvdata(pdev);
11981 struct bnxt *bp = netdev_priv(netdev);
11982
11983 netdev_info(netdev, "PCI I/O error detected\n");
11984
11985 rtnl_lock();
11986 netif_device_detach(netdev);
11987
11988 bnxt_ulp_stop(bp);
11989
11990 if (state == pci_channel_io_perm_failure) {
11991 rtnl_unlock();
11992 return PCI_ERS_RESULT_DISCONNECT;
11993 }
11994
11995 if (netif_running(netdev))
11996 bnxt_close(netdev);
11997
11998 pci_disable_device(pdev);
11999 rtnl_unlock();
12000
12001 /* Request a slot slot reset. */
12002 return PCI_ERS_RESULT_NEED_RESET;
12003 }
12004
12005 /**
12006 * bnxt_io_slot_reset - called after the pci bus has been reset.
12007 * @pdev: Pointer to PCI device
12008 *
12009 * Restart the card from scratch, as if from a cold-boot.
12010 * At this point, the card has exprienced a hard reset,
12011 * followed by fixups by BIOS, and has its config space
12012 * set up identically to what it was at cold boot.
12013 */
bnxt_io_slot_reset(struct pci_dev * pdev)12014 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
12015 {
12016 struct net_device *netdev = pci_get_drvdata(pdev);
12017 struct bnxt *bp = netdev_priv(netdev);
12018 int err = 0;
12019 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
12020
12021 netdev_info(bp->dev, "PCI Slot Reset\n");
12022
12023 rtnl_lock();
12024
12025 if (pci_enable_device(pdev)) {
12026 dev_err(&pdev->dev,
12027 "Cannot re-enable PCI device after reset.\n");
12028 } else {
12029 pci_set_master(pdev);
12030
12031 err = bnxt_hwrm_func_reset(bp);
12032 if (!err && netif_running(netdev))
12033 err = bnxt_open(netdev);
12034
12035 if (!err) {
12036 result = PCI_ERS_RESULT_RECOVERED;
12037 bnxt_ulp_start(bp);
12038 }
12039 }
12040
12041 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
12042 dev_close(netdev);
12043
12044 rtnl_unlock();
12045
12046 return PCI_ERS_RESULT_RECOVERED;
12047 }
12048
12049 /**
12050 * bnxt_io_resume - called when traffic can start flowing again.
12051 * @pdev: Pointer to PCI device
12052 *
12053 * This callback is called when the error recovery driver tells
12054 * us that its OK to resume normal operation.
12055 */
bnxt_io_resume(struct pci_dev * pdev)12056 static void bnxt_io_resume(struct pci_dev *pdev)
12057 {
12058 struct net_device *netdev = pci_get_drvdata(pdev);
12059
12060 rtnl_lock();
12061
12062 netif_device_attach(netdev);
12063
12064 rtnl_unlock();
12065 }
12066
12067 static const struct pci_error_handlers bnxt_err_handler = {
12068 .error_detected = bnxt_io_error_detected,
12069 .slot_reset = bnxt_io_slot_reset,
12070 .resume = bnxt_io_resume
12071 };
12072
12073 static struct pci_driver bnxt_pci_driver = {
12074 .name = DRV_MODULE_NAME,
12075 .id_table = bnxt_pci_tbl,
12076 .probe = bnxt_init_one,
12077 .remove = bnxt_remove_one,
12078 .shutdown = bnxt_shutdown,
12079 .driver.pm = BNXT_PM_OPS,
12080 .err_handler = &bnxt_err_handler,
12081 #if defined(CONFIG_BNXT_SRIOV)
12082 .sriov_configure = bnxt_sriov_configure,
12083 #endif
12084 };
12085
bnxt_init(void)12086 static int __init bnxt_init(void)
12087 {
12088 bnxt_debug_init();
12089 return pci_register_driver(&bnxt_pci_driver);
12090 }
12091
bnxt_exit(void)12092 static void __exit bnxt_exit(void)
12093 {
12094 pci_unregister_driver(&bnxt_pci_driver);
12095 if (bnxt_pf_wq)
12096 destroy_workqueue(bnxt_pf_wq);
12097 bnxt_debug_exit();
12098 }
12099
12100 module_init(bnxt_init);
12101 module_exit(bnxt_exit);
12102