1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/if_bridge.h>
37 #include <linux/rtc.h>
38 #include <linux/bpf.h>
39 #include <net/ip.h>
40 #include <net/tcp.h>
41 #include <net/udp.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <net/udp_tunnel.h>
45 #include <linux/workqueue.h>
46 #include <linux/prefetch.h>
47 #include <linux/cache.h>
48 #include <linux/log2.h>
49 #include <linux/aer.h>
50 #include <linux/bitmap.h>
51 #include <linux/cpu_rmap.h>
52 #include <linux/cpumask.h>
53 #include <net/pkt_cls.h>
54
55 #include "bnxt_hsi.h"
56 #include "bnxt.h"
57 #include "bnxt_ulp.h"
58 #include "bnxt_sriov.h"
59 #include "bnxt_ethtool.h"
60 #include "bnxt_dcb.h"
61 #include "bnxt_xdp.h"
62 #include "bnxt_vfr.h"
63 #include "bnxt_tc.h"
64
65 #define BNXT_TX_TIMEOUT (5 * HZ)
66
67 static const char version[] =
68 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
69
70 MODULE_LICENSE("GPL");
71 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
72 MODULE_VERSION(DRV_MODULE_VERSION);
73
74 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
75 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
76 #define BNXT_RX_COPY_THRESH 256
77
78 #define BNXT_TX_PUSH_THRESH 164
79
80 enum board_idx {
81 BCM57301,
82 BCM57302,
83 BCM57304,
84 BCM57417_NPAR,
85 BCM58700,
86 BCM57311,
87 BCM57312,
88 BCM57402,
89 BCM57404,
90 BCM57406,
91 BCM57402_NPAR,
92 BCM57407,
93 BCM57412,
94 BCM57414,
95 BCM57416,
96 BCM57417,
97 BCM57412_NPAR,
98 BCM57314,
99 BCM57417_SFP,
100 BCM57416_SFP,
101 BCM57404_NPAR,
102 BCM57406_NPAR,
103 BCM57407_SFP,
104 BCM57407_NPAR,
105 BCM57414_NPAR,
106 BCM57416_NPAR,
107 BCM57452,
108 BCM57454,
109 BCM58802,
110 BCM58808,
111 NETXTREME_E_VF,
112 NETXTREME_C_VF,
113 };
114
115 /* indexed by enum above */
116 static const struct {
117 char *name;
118 } board_info[] = {
119 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
120 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
121 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
122 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
123 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
124 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
125 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
126 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
127 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
128 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
129 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
130 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
131 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
132 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
133 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
134 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
135 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
136 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
137 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
138 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
139 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
140 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
141 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
142 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
143 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
144 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
145 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
146 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
147 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
148 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
149 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
150 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
151 };
152
153 static const struct pci_device_id bnxt_pci_tbl[] = {
154 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
155 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
156 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
157 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
158 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
159 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
160 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
161 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
162 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
163 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
164 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
165 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
166 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
167 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
168 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
169 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
170 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
171 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
172 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
174 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
175 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
176 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
177 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
178 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
179 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
183 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
185 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
186 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
187 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
188 #ifdef CONFIG_BNXT_SRIOV
189 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
190 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
191 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
192 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
193 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
194 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
195 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
196 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
197 #endif
198 { 0 }
199 };
200
201 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
202
203 static const u16 bnxt_vf_req_snif[] = {
204 HWRM_FUNC_CFG,
205 HWRM_PORT_PHY_QCFG,
206 HWRM_CFA_L2_FILTER_ALLOC,
207 };
208
209 static const u16 bnxt_async_events_arr[] = {
210 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
211 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
212 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
213 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
214 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
215 };
216
217 static struct workqueue_struct *bnxt_pf_wq;
218
bnxt_vf_pciid(enum board_idx idx)219 static bool bnxt_vf_pciid(enum board_idx idx)
220 {
221 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
222 }
223
224 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
225 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
226 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
227
228 #define BNXT_CP_DB_REARM(db, raw_cons) \
229 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
230
231 #define BNXT_CP_DB(db, raw_cons) \
232 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
233
234 #define BNXT_CP_DB_IRQ_DIS(db) \
235 writel(DB_CP_IRQ_DIS_FLAGS, db)
236
237 const u16 bnxt_lhint_arr[] = {
238 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
239 TX_BD_FLAGS_LHINT_512_TO_1023,
240 TX_BD_FLAGS_LHINT_1024_TO_2047,
241 TX_BD_FLAGS_LHINT_1024_TO_2047,
242 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
243 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
244 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
245 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
246 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
247 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
248 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
249 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
250 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
251 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
252 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
253 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
254 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
255 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
256 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
257 };
258
bnxt_xmit_get_cfa_action(struct sk_buff * skb)259 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
260 {
261 struct metadata_dst *md_dst = skb_metadata_dst(skb);
262
263 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
264 return 0;
265
266 return md_dst->u.port_info.port_id;
267 }
268
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)269 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
270 {
271 struct bnxt *bp = netdev_priv(dev);
272 struct tx_bd *txbd;
273 struct tx_bd_ext *txbd1;
274 struct netdev_queue *txq;
275 int i;
276 dma_addr_t mapping;
277 unsigned int length, pad = 0;
278 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
279 u16 prod, last_frag;
280 struct pci_dev *pdev = bp->pdev;
281 struct bnxt_tx_ring_info *txr;
282 struct bnxt_sw_tx_bd *tx_buf;
283
284 i = skb_get_queue_mapping(skb);
285 if (unlikely(i >= bp->tx_nr_rings)) {
286 dev_kfree_skb_any(skb);
287 return NETDEV_TX_OK;
288 }
289
290 txq = netdev_get_tx_queue(dev, i);
291 txr = &bp->tx_ring[bp->tx_ring_map[i]];
292 prod = txr->tx_prod;
293
294 free_size = bnxt_tx_avail(bp, txr);
295 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
296 netif_tx_stop_queue(txq);
297 return NETDEV_TX_BUSY;
298 }
299
300 length = skb->len;
301 len = skb_headlen(skb);
302 last_frag = skb_shinfo(skb)->nr_frags;
303
304 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
305
306 txbd->tx_bd_opaque = prod;
307
308 tx_buf = &txr->tx_buf_ring[prod];
309 tx_buf->skb = skb;
310 tx_buf->nr_frags = last_frag;
311
312 vlan_tag_flags = 0;
313 cfa_action = bnxt_xmit_get_cfa_action(skb);
314 if (skb_vlan_tag_present(skb)) {
315 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
316 skb_vlan_tag_get(skb);
317 /* Currently supports 8021Q, 8021AD vlan offloads
318 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
319 */
320 if (skb->vlan_proto == htons(ETH_P_8021Q))
321 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
322 }
323
324 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
325 struct tx_push_buffer *tx_push_buf = txr->tx_push;
326 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
327 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
328 void *pdata = tx_push_buf->data;
329 u64 *end;
330 int j, push_len;
331
332 /* Set COAL_NOW to be ready quickly for the next push */
333 tx_push->tx_bd_len_flags_type =
334 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
335 TX_BD_TYPE_LONG_TX_BD |
336 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
337 TX_BD_FLAGS_COAL_NOW |
338 TX_BD_FLAGS_PACKET_END |
339 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
340
341 if (skb->ip_summed == CHECKSUM_PARTIAL)
342 tx_push1->tx_bd_hsize_lflags =
343 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
344 else
345 tx_push1->tx_bd_hsize_lflags = 0;
346
347 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
348 tx_push1->tx_bd_cfa_action =
349 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
350
351 end = pdata + length;
352 end = PTR_ALIGN(end, 8) - 1;
353 *end = 0;
354
355 skb_copy_from_linear_data(skb, pdata, len);
356 pdata += len;
357 for (j = 0; j < last_frag; j++) {
358 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
359 void *fptr;
360
361 fptr = skb_frag_address_safe(frag);
362 if (!fptr)
363 goto normal_tx;
364
365 memcpy(pdata, fptr, skb_frag_size(frag));
366 pdata += skb_frag_size(frag);
367 }
368
369 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
370 txbd->tx_bd_haddr = txr->data_mapping;
371 prod = NEXT_TX(prod);
372 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
373 memcpy(txbd, tx_push1, sizeof(*txbd));
374 prod = NEXT_TX(prod);
375 tx_push->doorbell =
376 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
377 txr->tx_prod = prod;
378
379 tx_buf->is_push = 1;
380 netdev_tx_sent_queue(txq, skb->len);
381 wmb(); /* Sync is_push and byte queue before pushing data */
382
383 push_len = (length + sizeof(*tx_push) + 7) / 8;
384 if (push_len > 16) {
385 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
386 __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
387 (push_len - 16) << 1);
388 } else {
389 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
390 push_len);
391 }
392
393 goto tx_done;
394 }
395
396 normal_tx:
397 if (length < BNXT_MIN_PKT_SIZE) {
398 pad = BNXT_MIN_PKT_SIZE - length;
399 if (skb_pad(skb, pad)) {
400 /* SKB already freed. */
401 tx_buf->skb = NULL;
402 return NETDEV_TX_OK;
403 }
404 length = BNXT_MIN_PKT_SIZE;
405 }
406
407 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
408
409 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
410 dev_kfree_skb_any(skb);
411 tx_buf->skb = NULL;
412 return NETDEV_TX_OK;
413 }
414
415 dma_unmap_addr_set(tx_buf, mapping, mapping);
416 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
417 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
418
419 txbd->tx_bd_haddr = cpu_to_le64(mapping);
420
421 prod = NEXT_TX(prod);
422 txbd1 = (struct tx_bd_ext *)
423 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
424
425 txbd1->tx_bd_hsize_lflags = 0;
426 if (skb_is_gso(skb)) {
427 u32 hdr_len;
428
429 if (skb->encapsulation)
430 hdr_len = skb_inner_network_offset(skb) +
431 skb_inner_network_header_len(skb) +
432 inner_tcp_hdrlen(skb);
433 else
434 hdr_len = skb_transport_offset(skb) +
435 tcp_hdrlen(skb);
436
437 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
438 TX_BD_FLAGS_T_IPID |
439 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
440 length = skb_shinfo(skb)->gso_size;
441 txbd1->tx_bd_mss = cpu_to_le32(length);
442 length += hdr_len;
443 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
444 txbd1->tx_bd_hsize_lflags =
445 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
446 txbd1->tx_bd_mss = 0;
447 }
448
449 length >>= 9;
450 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
451 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
452 skb->len);
453 i = 0;
454 goto tx_dma_error;
455 }
456 flags |= bnxt_lhint_arr[length];
457 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
458
459 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
460 txbd1->tx_bd_cfa_action =
461 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
462 for (i = 0; i < last_frag; i++) {
463 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
464
465 prod = NEXT_TX(prod);
466 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
467
468 len = skb_frag_size(frag);
469 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
470 DMA_TO_DEVICE);
471
472 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
473 goto tx_dma_error;
474
475 tx_buf = &txr->tx_buf_ring[prod];
476 dma_unmap_addr_set(tx_buf, mapping, mapping);
477
478 txbd->tx_bd_haddr = cpu_to_le64(mapping);
479
480 flags = len << TX_BD_LEN_SHIFT;
481 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
482 }
483
484 flags &= ~TX_BD_LEN;
485 txbd->tx_bd_len_flags_type =
486 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
487 TX_BD_FLAGS_PACKET_END);
488
489 netdev_tx_sent_queue(txq, skb->len);
490
491 /* Sync BD data before updating doorbell */
492 wmb();
493
494 prod = NEXT_TX(prod);
495 txr->tx_prod = prod;
496
497 if (!skb->xmit_more || netif_xmit_stopped(txq))
498 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
499
500 tx_done:
501
502 mmiowb();
503
504 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
505 if (skb->xmit_more && !tx_buf->is_push)
506 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
507
508 netif_tx_stop_queue(txq);
509
510 /* netif_tx_stop_queue() must be done before checking
511 * tx index in bnxt_tx_avail() below, because in
512 * bnxt_tx_int(), we update tx index before checking for
513 * netif_tx_queue_stopped().
514 */
515 smp_mb();
516 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
517 netif_tx_wake_queue(txq);
518 }
519 return NETDEV_TX_OK;
520
521 tx_dma_error:
522 last_frag = i;
523
524 /* start back at beginning and unmap skb */
525 prod = txr->tx_prod;
526 tx_buf = &txr->tx_buf_ring[prod];
527 tx_buf->skb = NULL;
528 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
529 skb_headlen(skb), PCI_DMA_TODEVICE);
530 prod = NEXT_TX(prod);
531
532 /* unmap remaining mapped pages */
533 for (i = 0; i < last_frag; i++) {
534 prod = NEXT_TX(prod);
535 tx_buf = &txr->tx_buf_ring[prod];
536 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
537 skb_frag_size(&skb_shinfo(skb)->frags[i]),
538 PCI_DMA_TODEVICE);
539 }
540
541 dev_kfree_skb_any(skb);
542 return NETDEV_TX_OK;
543 }
544
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int nr_pkts)545 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
546 {
547 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
548 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
549 u16 cons = txr->tx_cons;
550 struct pci_dev *pdev = bp->pdev;
551 int i;
552 unsigned int tx_bytes = 0;
553
554 for (i = 0; i < nr_pkts; i++) {
555 struct bnxt_sw_tx_bd *tx_buf;
556 struct sk_buff *skb;
557 int j, last;
558
559 tx_buf = &txr->tx_buf_ring[cons];
560 cons = NEXT_TX(cons);
561 skb = tx_buf->skb;
562 tx_buf->skb = NULL;
563
564 if (tx_buf->is_push) {
565 tx_buf->is_push = 0;
566 goto next_tx_int;
567 }
568
569 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
570 skb_headlen(skb), PCI_DMA_TODEVICE);
571 last = tx_buf->nr_frags;
572
573 for (j = 0; j < last; j++) {
574 cons = NEXT_TX(cons);
575 tx_buf = &txr->tx_buf_ring[cons];
576 dma_unmap_page(
577 &pdev->dev,
578 dma_unmap_addr(tx_buf, mapping),
579 skb_frag_size(&skb_shinfo(skb)->frags[j]),
580 PCI_DMA_TODEVICE);
581 }
582
583 next_tx_int:
584 cons = NEXT_TX(cons);
585
586 tx_bytes += skb->len;
587 dev_kfree_skb_any(skb);
588 }
589
590 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
591 txr->tx_cons = cons;
592
593 /* Need to make the tx_cons update visible to bnxt_start_xmit()
594 * before checking for netif_tx_queue_stopped(). Without the
595 * memory barrier, there is a small possibility that bnxt_start_xmit()
596 * will miss it and cause the queue to be stopped forever.
597 */
598 smp_mb();
599
600 if (unlikely(netif_tx_queue_stopped(txq)) &&
601 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
602 __netif_tx_lock(txq, smp_processor_id());
603 if (netif_tx_queue_stopped(txq) &&
604 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
605 txr->dev_state != BNXT_DEV_STATE_CLOSING)
606 netif_tx_wake_queue(txq);
607 __netif_tx_unlock(txq);
608 }
609 }
610
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,gfp_t gfp)611 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
612 gfp_t gfp)
613 {
614 struct device *dev = &bp->pdev->dev;
615 struct page *page;
616
617 page = alloc_page(gfp);
618 if (!page)
619 return NULL;
620
621 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
622 DMA_ATTR_WEAK_ORDERING);
623 if (dma_mapping_error(dev, *mapping)) {
624 __free_page(page);
625 return NULL;
626 }
627 *mapping += bp->rx_dma_offset;
628 return page;
629 }
630
__bnxt_alloc_rx_data(struct bnxt * bp,dma_addr_t * mapping,gfp_t gfp)631 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
632 gfp_t gfp)
633 {
634 u8 *data;
635 struct pci_dev *pdev = bp->pdev;
636
637 data = kmalloc(bp->rx_buf_size, gfp);
638 if (!data)
639 return NULL;
640
641 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
642 bp->rx_buf_use_size, bp->rx_dir,
643 DMA_ATTR_WEAK_ORDERING);
644
645 if (dma_mapping_error(&pdev->dev, *mapping)) {
646 kfree(data);
647 data = NULL;
648 }
649 return data;
650 }
651
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)652 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
653 u16 prod, gfp_t gfp)
654 {
655 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
656 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
657 dma_addr_t mapping;
658
659 if (BNXT_RX_PAGE_MODE(bp)) {
660 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
661
662 if (!page)
663 return -ENOMEM;
664
665 rx_buf->data = page;
666 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
667 } else {
668 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
669
670 if (!data)
671 return -ENOMEM;
672
673 rx_buf->data = data;
674 rx_buf->data_ptr = data + bp->rx_offset;
675 }
676 rx_buf->mapping = mapping;
677
678 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
679 return 0;
680 }
681
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)682 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
683 {
684 u16 prod = rxr->rx_prod;
685 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
686 struct rx_bd *cons_bd, *prod_bd;
687
688 prod_rx_buf = &rxr->rx_buf_ring[prod];
689 cons_rx_buf = &rxr->rx_buf_ring[cons];
690
691 prod_rx_buf->data = data;
692 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
693
694 prod_rx_buf->mapping = cons_rx_buf->mapping;
695
696 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
697 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
698
699 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
700 }
701
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)702 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
703 {
704 u16 next, max = rxr->rx_agg_bmap_size;
705
706 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
707 if (next >= max)
708 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
709 return next;
710 }
711
bnxt_alloc_rx_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)712 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
713 struct bnxt_rx_ring_info *rxr,
714 u16 prod, gfp_t gfp)
715 {
716 struct rx_bd *rxbd =
717 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
718 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
719 struct pci_dev *pdev = bp->pdev;
720 struct page *page;
721 dma_addr_t mapping;
722 u16 sw_prod = rxr->rx_sw_agg_prod;
723 unsigned int offset = 0;
724
725 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
726 page = rxr->rx_page;
727 if (!page) {
728 page = alloc_page(gfp);
729 if (!page)
730 return -ENOMEM;
731 rxr->rx_page = page;
732 rxr->rx_page_offset = 0;
733 }
734 offset = rxr->rx_page_offset;
735 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
736 if (rxr->rx_page_offset == PAGE_SIZE)
737 rxr->rx_page = NULL;
738 else
739 get_page(page);
740 } else {
741 page = alloc_page(gfp);
742 if (!page)
743 return -ENOMEM;
744 }
745
746 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
747 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
748 DMA_ATTR_WEAK_ORDERING);
749 if (dma_mapping_error(&pdev->dev, mapping)) {
750 __free_page(page);
751 return -EIO;
752 }
753
754 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
755 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
756
757 __set_bit(sw_prod, rxr->rx_agg_bmap);
758 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
759 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
760
761 rx_agg_buf->page = page;
762 rx_agg_buf->offset = offset;
763 rx_agg_buf->mapping = mapping;
764 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
765 rxbd->rx_bd_opaque = sw_prod;
766 return 0;
767 }
768
bnxt_reuse_rx_agg_bufs(struct bnxt_napi * bnapi,u16 cp_cons,u32 agg_bufs)769 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
770 u32 agg_bufs)
771 {
772 struct bnxt *bp = bnapi->bp;
773 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
774 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
775 u16 prod = rxr->rx_agg_prod;
776 u16 sw_prod = rxr->rx_sw_agg_prod;
777 u32 i;
778
779 for (i = 0; i < agg_bufs; i++) {
780 u16 cons;
781 struct rx_agg_cmp *agg;
782 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
783 struct rx_bd *prod_bd;
784 struct page *page;
785
786 agg = (struct rx_agg_cmp *)
787 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
788 cons = agg->rx_agg_cmp_opaque;
789 __clear_bit(cons, rxr->rx_agg_bmap);
790
791 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
792 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
793
794 __set_bit(sw_prod, rxr->rx_agg_bmap);
795 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
796 cons_rx_buf = &rxr->rx_agg_ring[cons];
797
798 /* It is possible for sw_prod to be equal to cons, so
799 * set cons_rx_buf->page to NULL first.
800 */
801 page = cons_rx_buf->page;
802 cons_rx_buf->page = NULL;
803 prod_rx_buf->page = page;
804 prod_rx_buf->offset = cons_rx_buf->offset;
805
806 prod_rx_buf->mapping = cons_rx_buf->mapping;
807
808 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
809
810 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
811 prod_bd->rx_bd_opaque = sw_prod;
812
813 prod = NEXT_RX_AGG(prod);
814 sw_prod = NEXT_RX_AGG(sw_prod);
815 cp_cons = NEXT_CMP(cp_cons);
816 }
817 rxr->rx_agg_prod = prod;
818 rxr->rx_sw_agg_prod = sw_prod;
819 }
820
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)821 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
822 struct bnxt_rx_ring_info *rxr,
823 u16 cons, void *data, u8 *data_ptr,
824 dma_addr_t dma_addr,
825 unsigned int offset_and_len)
826 {
827 unsigned int payload = offset_and_len >> 16;
828 unsigned int len = offset_and_len & 0xffff;
829 struct skb_frag_struct *frag;
830 struct page *page = data;
831 u16 prod = rxr->rx_prod;
832 struct sk_buff *skb;
833 int off, err;
834
835 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
836 if (unlikely(err)) {
837 bnxt_reuse_rx_data(rxr, cons, data);
838 return NULL;
839 }
840 dma_addr -= bp->rx_dma_offset;
841 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
842 DMA_ATTR_WEAK_ORDERING);
843
844 if (unlikely(!payload))
845 payload = eth_get_headlen(data_ptr, len);
846
847 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
848 if (!skb) {
849 __free_page(page);
850 return NULL;
851 }
852
853 off = (void *)data_ptr - page_address(page);
854 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
855 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
856 payload + NET_IP_ALIGN);
857
858 frag = &skb_shinfo(skb)->frags[0];
859 skb_frag_size_sub(frag, payload);
860 frag->page_offset += payload;
861 skb->data_len -= payload;
862 skb->tail += payload;
863
864 return skb;
865 }
866
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)867 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
868 struct bnxt_rx_ring_info *rxr, u16 cons,
869 void *data, u8 *data_ptr,
870 dma_addr_t dma_addr,
871 unsigned int offset_and_len)
872 {
873 u16 prod = rxr->rx_prod;
874 struct sk_buff *skb;
875 int err;
876
877 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
878 if (unlikely(err)) {
879 bnxt_reuse_rx_data(rxr, cons, data);
880 return NULL;
881 }
882
883 skb = build_skb(data, 0);
884 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
885 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
886 if (!skb) {
887 kfree(data);
888 return NULL;
889 }
890
891 skb_reserve(skb, bp->rx_offset);
892 skb_put(skb, offset_and_len & 0xffff);
893 return skb;
894 }
895
bnxt_rx_pages(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb,u16 cp_cons,u32 agg_bufs)896 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
897 struct sk_buff *skb, u16 cp_cons,
898 u32 agg_bufs)
899 {
900 struct pci_dev *pdev = bp->pdev;
901 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
902 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
903 u16 prod = rxr->rx_agg_prod;
904 u32 i;
905
906 for (i = 0; i < agg_bufs; i++) {
907 u16 cons, frag_len;
908 struct rx_agg_cmp *agg;
909 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
910 struct page *page;
911 dma_addr_t mapping;
912
913 agg = (struct rx_agg_cmp *)
914 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
915 cons = agg->rx_agg_cmp_opaque;
916 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
917 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
918
919 cons_rx_buf = &rxr->rx_agg_ring[cons];
920 skb_fill_page_desc(skb, i, cons_rx_buf->page,
921 cons_rx_buf->offset, frag_len);
922 __clear_bit(cons, rxr->rx_agg_bmap);
923
924 /* It is possible for bnxt_alloc_rx_page() to allocate
925 * a sw_prod index that equals the cons index, so we
926 * need to clear the cons entry now.
927 */
928 mapping = cons_rx_buf->mapping;
929 page = cons_rx_buf->page;
930 cons_rx_buf->page = NULL;
931
932 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
933 struct skb_shared_info *shinfo;
934 unsigned int nr_frags;
935
936 shinfo = skb_shinfo(skb);
937 nr_frags = --shinfo->nr_frags;
938 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
939
940 dev_kfree_skb(skb);
941
942 cons_rx_buf->page = page;
943
944 /* Update prod since possibly some pages have been
945 * allocated already.
946 */
947 rxr->rx_agg_prod = prod;
948 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
949 return NULL;
950 }
951
952 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
953 PCI_DMA_FROMDEVICE,
954 DMA_ATTR_WEAK_ORDERING);
955
956 skb->data_len += frag_len;
957 skb->len += frag_len;
958 skb->truesize += PAGE_SIZE;
959
960 prod = NEXT_RX_AGG(prod);
961 cp_cons = NEXT_CMP(cp_cons);
962 }
963 rxr->rx_agg_prod = prod;
964 return skb;
965 }
966
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)967 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
968 u8 agg_bufs, u32 *raw_cons)
969 {
970 u16 last;
971 struct rx_agg_cmp *agg;
972
973 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
974 last = RING_CMP(*raw_cons);
975 agg = (struct rx_agg_cmp *)
976 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
977 return RX_AGG_CMP_VALID(agg, *raw_cons);
978 }
979
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)980 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
981 unsigned int len,
982 dma_addr_t mapping)
983 {
984 struct bnxt *bp = bnapi->bp;
985 struct pci_dev *pdev = bp->pdev;
986 struct sk_buff *skb;
987
988 skb = napi_alloc_skb(&bnapi->napi, len);
989 if (!skb)
990 return NULL;
991
992 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
993 bp->rx_dir);
994
995 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
996 len + NET_IP_ALIGN);
997
998 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
999 bp->rx_dir);
1000
1001 skb_put(skb, len);
1002 return skb;
1003 }
1004
bnxt_discard_rx(struct bnxt * bp,struct bnxt_napi * bnapi,u32 * raw_cons,void * cmp)1005 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
1006 u32 *raw_cons, void *cmp)
1007 {
1008 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1009 struct rx_cmp *rxcmp = cmp;
1010 u32 tmp_raw_cons = *raw_cons;
1011 u8 cmp_type, agg_bufs = 0;
1012
1013 cmp_type = RX_CMP_TYPE(rxcmp);
1014
1015 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1016 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1017 RX_CMP_AGG_BUFS) >>
1018 RX_CMP_AGG_BUFS_SHIFT;
1019 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1020 struct rx_tpa_end_cmp *tpa_end = cmp;
1021
1022 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1023 RX_TPA_END_CMP_AGG_BUFS) >>
1024 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1025 }
1026
1027 if (agg_bufs) {
1028 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1029 return -EBUSY;
1030 }
1031 *raw_cons = tmp_raw_cons;
1032 return 0;
1033 }
1034
bnxt_queue_sp_work(struct bnxt * bp)1035 static void bnxt_queue_sp_work(struct bnxt *bp)
1036 {
1037 if (BNXT_PF(bp))
1038 queue_work(bnxt_pf_wq, &bp->sp_task);
1039 else
1040 schedule_work(&bp->sp_task);
1041 }
1042
bnxt_cancel_sp_work(struct bnxt * bp)1043 static void bnxt_cancel_sp_work(struct bnxt *bp)
1044 {
1045 if (BNXT_PF(bp))
1046 flush_workqueue(bnxt_pf_wq);
1047 else
1048 cancel_work_sync(&bp->sp_task);
1049 }
1050
bnxt_sched_reset(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)1051 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1052 {
1053 if (!rxr->bnapi->in_reset) {
1054 rxr->bnapi->in_reset = true;
1055 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1056 bnxt_queue_sp_work(bp);
1057 }
1058 rxr->rx_next_cons = 0xffff;
1059 }
1060
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1061 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1062 struct rx_tpa_start_cmp *tpa_start,
1063 struct rx_tpa_start_cmp_ext *tpa_start1)
1064 {
1065 u8 agg_id = TPA_START_AGG_ID(tpa_start);
1066 u16 cons, prod;
1067 struct bnxt_tpa_info *tpa_info;
1068 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1069 struct rx_bd *prod_bd;
1070 dma_addr_t mapping;
1071
1072 cons = tpa_start->rx_tpa_start_cmp_opaque;
1073 prod = rxr->rx_prod;
1074 cons_rx_buf = &rxr->rx_buf_ring[cons];
1075 prod_rx_buf = &rxr->rx_buf_ring[prod];
1076 tpa_info = &rxr->rx_tpa[agg_id];
1077
1078 if (unlikely(cons != rxr->rx_next_cons)) {
1079 netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
1080 cons, rxr->rx_next_cons);
1081 bnxt_sched_reset(bp, rxr);
1082 return;
1083 }
1084 /* Store cfa_code in tpa_info to use in tpa_end
1085 * completion processing.
1086 */
1087 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1088 prod_rx_buf->data = tpa_info->data;
1089 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1090
1091 mapping = tpa_info->mapping;
1092 prod_rx_buf->mapping = mapping;
1093
1094 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1095
1096 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1097
1098 tpa_info->data = cons_rx_buf->data;
1099 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1100 cons_rx_buf->data = NULL;
1101 tpa_info->mapping = cons_rx_buf->mapping;
1102
1103 tpa_info->len =
1104 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1105 RX_TPA_START_CMP_LEN_SHIFT;
1106 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1107 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1108
1109 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1110 tpa_info->gso_type = SKB_GSO_TCPV4;
1111 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1112 if (hash_type == 3)
1113 tpa_info->gso_type = SKB_GSO_TCPV6;
1114 tpa_info->rss_hash =
1115 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1116 } else {
1117 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1118 tpa_info->gso_type = 0;
1119 if (netif_msg_rx_err(bp))
1120 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1121 }
1122 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1123 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1124 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1125
1126 rxr->rx_prod = NEXT_RX(prod);
1127 cons = NEXT_RX(cons);
1128 rxr->rx_next_cons = NEXT_RX(cons);
1129 cons_rx_buf = &rxr->rx_buf_ring[cons];
1130
1131 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1132 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1133 cons_rx_buf->data = NULL;
1134 }
1135
bnxt_abort_tpa(struct bnxt * bp,struct bnxt_napi * bnapi,u16 cp_cons,u32 agg_bufs)1136 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1137 u16 cp_cons, u32 agg_bufs)
1138 {
1139 if (agg_bufs)
1140 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1141 }
1142
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1143 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1144 int payload_off, int tcp_ts,
1145 struct sk_buff *skb)
1146 {
1147 #ifdef CONFIG_INET
1148 struct tcphdr *th;
1149 int len, nw_off;
1150 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1151 u32 hdr_info = tpa_info->hdr_info;
1152 bool loopback = false;
1153
1154 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1155 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1156 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1157
1158 /* If the packet is an internal loopback packet, the offsets will
1159 * have an extra 4 bytes.
1160 */
1161 if (inner_mac_off == 4) {
1162 loopback = true;
1163 } else if (inner_mac_off > 4) {
1164 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1165 ETH_HLEN - 2));
1166
1167 /* We only support inner iPv4/ipv6. If we don't see the
1168 * correct protocol ID, it must be a loopback packet where
1169 * the offsets are off by 4.
1170 */
1171 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1172 loopback = true;
1173 }
1174 if (loopback) {
1175 /* internal loopback packet, subtract all offsets by 4 */
1176 inner_ip_off -= 4;
1177 inner_mac_off -= 4;
1178 outer_ip_off -= 4;
1179 }
1180
1181 nw_off = inner_ip_off - ETH_HLEN;
1182 skb_set_network_header(skb, nw_off);
1183 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1184 struct ipv6hdr *iph = ipv6_hdr(skb);
1185
1186 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1187 len = skb->len - skb_transport_offset(skb);
1188 th = tcp_hdr(skb);
1189 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1190 } else {
1191 struct iphdr *iph = ip_hdr(skb);
1192
1193 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1194 len = skb->len - skb_transport_offset(skb);
1195 th = tcp_hdr(skb);
1196 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1197 }
1198
1199 if (inner_mac_off) { /* tunnel */
1200 struct udphdr *uh = NULL;
1201 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1202 ETH_HLEN - 2));
1203
1204 if (proto == htons(ETH_P_IP)) {
1205 struct iphdr *iph = (struct iphdr *)skb->data;
1206
1207 if (iph->protocol == IPPROTO_UDP)
1208 uh = (struct udphdr *)(iph + 1);
1209 } else {
1210 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1211
1212 if (iph->nexthdr == IPPROTO_UDP)
1213 uh = (struct udphdr *)(iph + 1);
1214 }
1215 if (uh) {
1216 if (uh->check)
1217 skb_shinfo(skb)->gso_type |=
1218 SKB_GSO_UDP_TUNNEL_CSUM;
1219 else
1220 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1221 }
1222 }
1223 #endif
1224 return skb;
1225 }
1226
1227 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1228 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1229
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1230 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1231 int payload_off, int tcp_ts,
1232 struct sk_buff *skb)
1233 {
1234 #ifdef CONFIG_INET
1235 struct tcphdr *th;
1236 int len, nw_off, tcp_opt_len = 0;
1237
1238 if (tcp_ts)
1239 tcp_opt_len = 12;
1240
1241 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1242 struct iphdr *iph;
1243
1244 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1245 ETH_HLEN;
1246 skb_set_network_header(skb, nw_off);
1247 iph = ip_hdr(skb);
1248 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1249 len = skb->len - skb_transport_offset(skb);
1250 th = tcp_hdr(skb);
1251 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1252 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1253 struct ipv6hdr *iph;
1254
1255 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1256 ETH_HLEN;
1257 skb_set_network_header(skb, nw_off);
1258 iph = ipv6_hdr(skb);
1259 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1260 len = skb->len - skb_transport_offset(skb);
1261 th = tcp_hdr(skb);
1262 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1263 } else {
1264 dev_kfree_skb_any(skb);
1265 return NULL;
1266 }
1267
1268 if (nw_off) { /* tunnel */
1269 struct udphdr *uh = NULL;
1270
1271 if (skb->protocol == htons(ETH_P_IP)) {
1272 struct iphdr *iph = (struct iphdr *)skb->data;
1273
1274 if (iph->protocol == IPPROTO_UDP)
1275 uh = (struct udphdr *)(iph + 1);
1276 } else {
1277 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1278
1279 if (iph->nexthdr == IPPROTO_UDP)
1280 uh = (struct udphdr *)(iph + 1);
1281 }
1282 if (uh) {
1283 if (uh->check)
1284 skb_shinfo(skb)->gso_type |=
1285 SKB_GSO_UDP_TUNNEL_CSUM;
1286 else
1287 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1288 }
1289 }
1290 #endif
1291 return skb;
1292 }
1293
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1294 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1295 struct bnxt_tpa_info *tpa_info,
1296 struct rx_tpa_end_cmp *tpa_end,
1297 struct rx_tpa_end_cmp_ext *tpa_end1,
1298 struct sk_buff *skb)
1299 {
1300 #ifdef CONFIG_INET
1301 int payload_off;
1302 u16 segs;
1303
1304 segs = TPA_END_TPA_SEGS(tpa_end);
1305 if (segs == 1)
1306 return skb;
1307
1308 NAPI_GRO_CB(skb)->count = segs;
1309 skb_shinfo(skb)->gso_size =
1310 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1311 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1312 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1313 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1314 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1315 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1316 if (likely(skb))
1317 tcp_gro_complete(skb);
1318 #endif
1319 return skb;
1320 }
1321
1322 /* Given the cfa_code of a received packet determine which
1323 * netdev (vf-rep or PF) the packet is destined to.
1324 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1325 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1326 {
1327 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1328
1329 /* if vf-rep dev is NULL, the must belongs to the PF */
1330 return dev ? dev : bp->dev;
1331 }
1332
bnxt_tpa_end(struct bnxt * bp,struct bnxt_napi * bnapi,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1333 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1334 struct bnxt_napi *bnapi,
1335 u32 *raw_cons,
1336 struct rx_tpa_end_cmp *tpa_end,
1337 struct rx_tpa_end_cmp_ext *tpa_end1,
1338 u8 *event)
1339 {
1340 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1341 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1342 u8 agg_id = TPA_END_AGG_ID(tpa_end);
1343 u8 *data_ptr, agg_bufs;
1344 u16 cp_cons = RING_CMP(*raw_cons);
1345 unsigned int len;
1346 struct bnxt_tpa_info *tpa_info;
1347 dma_addr_t mapping;
1348 struct sk_buff *skb;
1349 void *data;
1350
1351 if (unlikely(bnapi->in_reset)) {
1352 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1353
1354 if (rc < 0)
1355 return ERR_PTR(-EBUSY);
1356 return NULL;
1357 }
1358
1359 tpa_info = &rxr->rx_tpa[agg_id];
1360 data = tpa_info->data;
1361 data_ptr = tpa_info->data_ptr;
1362 prefetch(data_ptr);
1363 len = tpa_info->len;
1364 mapping = tpa_info->mapping;
1365
1366 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1367 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1368
1369 if (agg_bufs) {
1370 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1371 return ERR_PTR(-EBUSY);
1372
1373 *event |= BNXT_AGG_EVENT;
1374 cp_cons = NEXT_CMP(cp_cons);
1375 }
1376
1377 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1378 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1379 if (agg_bufs > MAX_SKB_FRAGS)
1380 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1381 agg_bufs, (int)MAX_SKB_FRAGS);
1382 return NULL;
1383 }
1384
1385 if (len <= bp->rx_copy_thresh) {
1386 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1387 if (!skb) {
1388 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1389 return NULL;
1390 }
1391 } else {
1392 u8 *new_data;
1393 dma_addr_t new_mapping;
1394
1395 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1396 if (!new_data) {
1397 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1398 return NULL;
1399 }
1400
1401 tpa_info->data = new_data;
1402 tpa_info->data_ptr = new_data + bp->rx_offset;
1403 tpa_info->mapping = new_mapping;
1404
1405 skb = build_skb(data, 0);
1406 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1407 bp->rx_buf_use_size, bp->rx_dir,
1408 DMA_ATTR_WEAK_ORDERING);
1409
1410 if (!skb) {
1411 kfree(data);
1412 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1413 return NULL;
1414 }
1415 skb_reserve(skb, bp->rx_offset);
1416 skb_put(skb, len);
1417 }
1418
1419 if (agg_bufs) {
1420 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1421 if (!skb) {
1422 /* Page reuse already handled by bnxt_rx_pages(). */
1423 return NULL;
1424 }
1425 }
1426
1427 skb->protocol =
1428 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1429
1430 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1431 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1432
1433 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1434 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1435 u16 vlan_proto = tpa_info->metadata >>
1436 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1437 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
1438
1439 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1440 }
1441
1442 skb_checksum_none_assert(skb);
1443 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1444 skb->ip_summed = CHECKSUM_UNNECESSARY;
1445 skb->csum_level =
1446 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1447 }
1448
1449 if (TPA_END_GRO(tpa_end))
1450 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1451
1452 return skb;
1453 }
1454
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1455 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1456 struct sk_buff *skb)
1457 {
1458 if (skb->dev != bp->dev) {
1459 /* this packet belongs to a vf-rep */
1460 bnxt_vf_rep_rx(bp, skb);
1461 return;
1462 }
1463 skb_record_rx_queue(skb, bnapi->index);
1464 napi_gro_receive(&bnapi->napi, skb);
1465 }
1466
1467 /* returns the following:
1468 * 1 - 1 packet successfully received
1469 * 0 - successful TPA_START, packet not completed yet
1470 * -EBUSY - completion ring does not have all the agg buffers yet
1471 * -ENOMEM - packet aborted due to out of memory
1472 * -EIO - packet aborted due to hw error indicated in BD
1473 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_napi * bnapi,u32 * raw_cons,u8 * event)1474 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1475 u8 *event)
1476 {
1477 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1478 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1479 struct net_device *dev = bp->dev;
1480 struct rx_cmp *rxcmp;
1481 struct rx_cmp_ext *rxcmp1;
1482 u32 tmp_raw_cons = *raw_cons;
1483 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1484 struct bnxt_sw_rx_bd *rx_buf;
1485 unsigned int len;
1486 u8 *data_ptr, agg_bufs, cmp_type;
1487 dma_addr_t dma_addr;
1488 struct sk_buff *skb;
1489 void *data;
1490 int rc = 0;
1491 u32 misc;
1492
1493 rxcmp = (struct rx_cmp *)
1494 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1495
1496 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1497 cp_cons = RING_CMP(tmp_raw_cons);
1498 rxcmp1 = (struct rx_cmp_ext *)
1499 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1500
1501 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1502 return -EBUSY;
1503
1504 cmp_type = RX_CMP_TYPE(rxcmp);
1505
1506 prod = rxr->rx_prod;
1507
1508 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1509 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1510 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1511
1512 *event |= BNXT_RX_EVENT;
1513 goto next_rx_no_prod;
1514
1515 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1516 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1517 (struct rx_tpa_end_cmp *)rxcmp,
1518 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1519
1520 if (unlikely(IS_ERR(skb)))
1521 return -EBUSY;
1522
1523 rc = -ENOMEM;
1524 if (likely(skb)) {
1525 bnxt_deliver_skb(bp, bnapi, skb);
1526 rc = 1;
1527 }
1528 *event |= BNXT_RX_EVENT;
1529 goto next_rx_no_prod;
1530 }
1531
1532 cons = rxcmp->rx_cmp_opaque;
1533 if (unlikely(cons != rxr->rx_next_cons)) {
1534 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1535
1536 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1537 cons, rxr->rx_next_cons);
1538 bnxt_sched_reset(bp, rxr);
1539 return rc1;
1540 }
1541 rx_buf = &rxr->rx_buf_ring[cons];
1542 data = rx_buf->data;
1543 data_ptr = rx_buf->data_ptr;
1544 prefetch(data_ptr);
1545
1546 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1547 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1548
1549 if (agg_bufs) {
1550 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1551 return -EBUSY;
1552
1553 cp_cons = NEXT_CMP(cp_cons);
1554 *event |= BNXT_AGG_EVENT;
1555 }
1556 *event |= BNXT_RX_EVENT;
1557
1558 rx_buf->data = NULL;
1559 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1560 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1561
1562 bnxt_reuse_rx_data(rxr, cons, data);
1563 if (agg_bufs)
1564 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1565
1566 rc = -EIO;
1567 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1568 netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
1569 bnxt_sched_reset(bp, rxr);
1570 }
1571 goto next_rx;
1572 }
1573
1574 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1575 dma_addr = rx_buf->mapping;
1576
1577 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1578 rc = 1;
1579 goto next_rx;
1580 }
1581
1582 if (len <= bp->rx_copy_thresh) {
1583 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1584 bnxt_reuse_rx_data(rxr, cons, data);
1585 if (!skb) {
1586 if (agg_bufs)
1587 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1588 rc = -ENOMEM;
1589 goto next_rx;
1590 }
1591 } else {
1592 u32 payload;
1593
1594 if (rx_buf->data_ptr == data_ptr)
1595 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1596 else
1597 payload = 0;
1598 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1599 payload | len);
1600 if (!skb) {
1601 rc = -ENOMEM;
1602 goto next_rx;
1603 }
1604 }
1605
1606 if (agg_bufs) {
1607 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1608 if (!skb) {
1609 rc = -ENOMEM;
1610 goto next_rx;
1611 }
1612 }
1613
1614 if (RX_CMP_HASH_VALID(rxcmp)) {
1615 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1616 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1617
1618 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1619 if (hash_type != 1 && hash_type != 3)
1620 type = PKT_HASH_TYPE_L3;
1621 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1622 }
1623
1624 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1625 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1626
1627 if ((rxcmp1->rx_cmp_flags2 &
1628 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1629 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1630 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1631 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
1632 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1633
1634 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1635 }
1636
1637 skb_checksum_none_assert(skb);
1638 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1639 if (dev->features & NETIF_F_RXCSUM) {
1640 skb->ip_summed = CHECKSUM_UNNECESSARY;
1641 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1642 }
1643 } else {
1644 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1645 if (dev->features & NETIF_F_RXCSUM)
1646 cpr->rx_l4_csum_errors++;
1647 }
1648 }
1649
1650 bnxt_deliver_skb(bp, bnapi, skb);
1651 rc = 1;
1652
1653 next_rx:
1654 rxr->rx_prod = NEXT_RX(prod);
1655 rxr->rx_next_cons = NEXT_RX(cons);
1656
1657 next_rx_no_prod:
1658 *raw_cons = tmp_raw_cons;
1659
1660 return rc;
1661 }
1662
1663 /* In netpoll mode, if we are using a combined completion ring, we need to
1664 * discard the rx packets and recycle the buffers.
1665 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_napi * bnapi,u32 * raw_cons,u8 * event)1666 static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
1667 u32 *raw_cons, u8 *event)
1668 {
1669 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1670 u32 tmp_raw_cons = *raw_cons;
1671 struct rx_cmp_ext *rxcmp1;
1672 struct rx_cmp *rxcmp;
1673 u16 cp_cons;
1674 u8 cmp_type;
1675
1676 cp_cons = RING_CMP(tmp_raw_cons);
1677 rxcmp = (struct rx_cmp *)
1678 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1679
1680 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1681 cp_cons = RING_CMP(tmp_raw_cons);
1682 rxcmp1 = (struct rx_cmp_ext *)
1683 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1684
1685 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1686 return -EBUSY;
1687
1688 cmp_type = RX_CMP_TYPE(rxcmp);
1689 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1690 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1691 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1692 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1693 struct rx_tpa_end_cmp_ext *tpa_end1;
1694
1695 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1696 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1697 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1698 }
1699 return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
1700 }
1701
1702 #define BNXT_GET_EVENT_PORT(data) \
1703 ((data) & \
1704 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1705
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)1706 static int bnxt_async_event_process(struct bnxt *bp,
1707 struct hwrm_async_event_cmpl *cmpl)
1708 {
1709 u16 event_id = le16_to_cpu(cmpl->event_id);
1710
1711 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1712 switch (event_id) {
1713 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1714 u32 data1 = le32_to_cpu(cmpl->event_data1);
1715 struct bnxt_link_info *link_info = &bp->link_info;
1716
1717 if (BNXT_VF(bp))
1718 goto async_event_process_exit;
1719
1720 /* print unsupported speed warning in forced speed mode only */
1721 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1722 (data1 & 0x20000)) {
1723 u16 fw_speed = link_info->force_link_speed;
1724 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1725
1726 if (speed != SPEED_UNKNOWN)
1727 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1728 speed);
1729 }
1730 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1731 /* fall thru */
1732 }
1733 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1734 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1735 break;
1736 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1737 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1738 break;
1739 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1740 u32 data1 = le32_to_cpu(cmpl->event_data1);
1741 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1742
1743 if (BNXT_VF(bp))
1744 break;
1745
1746 if (bp->pf.port_id != port_id)
1747 break;
1748
1749 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1750 break;
1751 }
1752 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1753 if (BNXT_PF(bp))
1754 goto async_event_process_exit;
1755 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1756 break;
1757 default:
1758 goto async_event_process_exit;
1759 }
1760 bnxt_queue_sp_work(bp);
1761 async_event_process_exit:
1762 bnxt_ulp_async_events(bp, cmpl);
1763 return 0;
1764 }
1765
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)1766 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1767 {
1768 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1769 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1770 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1771 (struct hwrm_fwd_req_cmpl *)txcmp;
1772
1773 switch (cmpl_type) {
1774 case CMPL_BASE_TYPE_HWRM_DONE:
1775 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1776 if (seq_id == bp->hwrm_intr_seq_id)
1777 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1778 else
1779 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1780 break;
1781
1782 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1783 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1784
1785 if ((vf_id < bp->pf.first_vf_id) ||
1786 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1787 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1788 vf_id);
1789 return -EINVAL;
1790 }
1791
1792 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1793 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1794 bnxt_queue_sp_work(bp);
1795 break;
1796
1797 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1798 bnxt_async_event_process(bp,
1799 (struct hwrm_async_event_cmpl *)txcmp);
1800
1801 default:
1802 break;
1803 }
1804
1805 return 0;
1806 }
1807
bnxt_msix(int irq,void * dev_instance)1808 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1809 {
1810 struct bnxt_napi *bnapi = dev_instance;
1811 struct bnxt *bp = bnapi->bp;
1812 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1813 u32 cons = RING_CMP(cpr->cp_raw_cons);
1814
1815 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1816 napi_schedule(&bnapi->napi);
1817 return IRQ_HANDLED;
1818 }
1819
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)1820 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1821 {
1822 u32 raw_cons = cpr->cp_raw_cons;
1823 u16 cons = RING_CMP(raw_cons);
1824 struct tx_cmp *txcmp;
1825
1826 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1827
1828 return TX_CMP_VALID(txcmp, raw_cons);
1829 }
1830
bnxt_inta(int irq,void * dev_instance)1831 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1832 {
1833 struct bnxt_napi *bnapi = dev_instance;
1834 struct bnxt *bp = bnapi->bp;
1835 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1836 u32 cons = RING_CMP(cpr->cp_raw_cons);
1837 u32 int_status;
1838
1839 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1840
1841 if (!bnxt_has_work(bp, cpr)) {
1842 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1843 /* return if erroneous interrupt */
1844 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1845 return IRQ_NONE;
1846 }
1847
1848 /* disable ring IRQ */
1849 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1850
1851 /* Return here if interrupt is shared and is disabled. */
1852 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1853 return IRQ_HANDLED;
1854
1855 napi_schedule(&bnapi->napi);
1856 return IRQ_HANDLED;
1857 }
1858
bnxt_poll_work(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)1859 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1860 {
1861 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1862 u32 raw_cons = cpr->cp_raw_cons;
1863 u32 cons;
1864 int tx_pkts = 0;
1865 int rx_pkts = 0;
1866 u8 event = 0;
1867 struct tx_cmp *txcmp;
1868
1869 while (1) {
1870 int rc;
1871
1872 cons = RING_CMP(raw_cons);
1873 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1874
1875 if (!TX_CMP_VALID(txcmp, raw_cons))
1876 break;
1877
1878 /* The valid test of the entry must be done first before
1879 * reading any further.
1880 */
1881 dma_rmb();
1882 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1883 tx_pkts++;
1884 /* return full budget so NAPI will complete. */
1885 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
1886 rx_pkts = budget;
1887 raw_cons = NEXT_RAW_CMP(raw_cons);
1888 break;
1889 }
1890 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1891 if (likely(budget))
1892 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1893 else
1894 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
1895 &event);
1896 if (likely(rc >= 0))
1897 rx_pkts += rc;
1898 /* Increment rx_pkts when rc is -ENOMEM to count towards
1899 * the NAPI budget. Otherwise, we may potentially loop
1900 * here forever if we consistently cannot allocate
1901 * buffers.
1902 */
1903 else if (rc == -ENOMEM && budget)
1904 rx_pkts++;
1905 else if (rc == -EBUSY) /* partial completion */
1906 break;
1907 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1908 CMPL_BASE_TYPE_HWRM_DONE) ||
1909 (TX_CMP_TYPE(txcmp) ==
1910 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1911 (TX_CMP_TYPE(txcmp) ==
1912 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1913 bnxt_hwrm_handler(bp, txcmp);
1914 }
1915 raw_cons = NEXT_RAW_CMP(raw_cons);
1916
1917 if (rx_pkts && rx_pkts == budget)
1918 break;
1919 }
1920
1921 if (event & BNXT_TX_EVENT) {
1922 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1923 void __iomem *db = txr->tx_doorbell;
1924 u16 prod = txr->tx_prod;
1925
1926 /* Sync BD data before updating doorbell */
1927 wmb();
1928
1929 bnxt_db_write(bp, db, DB_KEY_TX | prod);
1930 }
1931
1932 cpr->cp_raw_cons = raw_cons;
1933 /* ACK completion ring before freeing tx ring and producing new
1934 * buffers in rx/agg rings to prevent overflowing the completion
1935 * ring.
1936 */
1937 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1938
1939 if (tx_pkts)
1940 bnapi->tx_int(bp, bnapi, tx_pkts);
1941
1942 if (event & BNXT_RX_EVENT) {
1943 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1944
1945 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1946 if (event & BNXT_AGG_EVENT)
1947 bnxt_db_write(bp, rxr->rx_agg_doorbell,
1948 DB_KEY_RX | rxr->rx_agg_prod);
1949 }
1950 return rx_pkts;
1951 }
1952
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)1953 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1954 {
1955 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1956 struct bnxt *bp = bnapi->bp;
1957 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1958 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1959 struct tx_cmp *txcmp;
1960 struct rx_cmp_ext *rxcmp1;
1961 u32 cp_cons, tmp_raw_cons;
1962 u32 raw_cons = cpr->cp_raw_cons;
1963 u32 rx_pkts = 0;
1964 u8 event = 0;
1965
1966 while (1) {
1967 int rc;
1968
1969 cp_cons = RING_CMP(raw_cons);
1970 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1971
1972 if (!TX_CMP_VALID(txcmp, raw_cons))
1973 break;
1974
1975 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1976 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1977 cp_cons = RING_CMP(tmp_raw_cons);
1978 rxcmp1 = (struct rx_cmp_ext *)
1979 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1980
1981 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1982 break;
1983
1984 /* force an error to recycle the buffer */
1985 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1986 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1987
1988 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1989 if (likely(rc == -EIO) && budget)
1990 rx_pkts++;
1991 else if (rc == -EBUSY) /* partial completion */
1992 break;
1993 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1994 CMPL_BASE_TYPE_HWRM_DONE)) {
1995 bnxt_hwrm_handler(bp, txcmp);
1996 } else {
1997 netdev_err(bp->dev,
1998 "Invalid completion received on special ring\n");
1999 }
2000 raw_cons = NEXT_RAW_CMP(raw_cons);
2001
2002 if (rx_pkts == budget)
2003 break;
2004 }
2005
2006 cpr->cp_raw_cons = raw_cons;
2007 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
2008 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
2009
2010 if (event & BNXT_AGG_EVENT)
2011 bnxt_db_write(bp, rxr->rx_agg_doorbell,
2012 DB_KEY_RX | rxr->rx_agg_prod);
2013
2014 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2015 napi_complete_done(napi, rx_pkts);
2016 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
2017 }
2018 return rx_pkts;
2019 }
2020
bnxt_poll(struct napi_struct * napi,int budget)2021 static int bnxt_poll(struct napi_struct *napi, int budget)
2022 {
2023 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2024 struct bnxt *bp = bnapi->bp;
2025 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2026 int work_done = 0;
2027
2028 while (1) {
2029 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
2030
2031 if (work_done >= budget) {
2032 if (!budget)
2033 BNXT_CP_DB_REARM(cpr->cp_doorbell,
2034 cpr->cp_raw_cons);
2035 break;
2036 }
2037
2038 if (!bnxt_has_work(bp, cpr)) {
2039 if (napi_complete_done(napi, work_done))
2040 BNXT_CP_DB_REARM(cpr->cp_doorbell,
2041 cpr->cp_raw_cons);
2042 break;
2043 }
2044 }
2045 mmiowb();
2046 return work_done;
2047 }
2048
bnxt_free_tx_skbs(struct bnxt * bp)2049 static void bnxt_free_tx_skbs(struct bnxt *bp)
2050 {
2051 int i, max_idx;
2052 struct pci_dev *pdev = bp->pdev;
2053
2054 if (!bp->tx_ring)
2055 return;
2056
2057 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2058 for (i = 0; i < bp->tx_nr_rings; i++) {
2059 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2060 int j;
2061
2062 for (j = 0; j < max_idx;) {
2063 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2064 struct sk_buff *skb = tx_buf->skb;
2065 int k, last;
2066
2067 if (!skb) {
2068 j++;
2069 continue;
2070 }
2071
2072 tx_buf->skb = NULL;
2073
2074 if (tx_buf->is_push) {
2075 dev_kfree_skb(skb);
2076 j += 2;
2077 continue;
2078 }
2079
2080 dma_unmap_single(&pdev->dev,
2081 dma_unmap_addr(tx_buf, mapping),
2082 skb_headlen(skb),
2083 PCI_DMA_TODEVICE);
2084
2085 last = tx_buf->nr_frags;
2086 j += 2;
2087 for (k = 0; k < last; k++, j++) {
2088 int ring_idx = j & bp->tx_ring_mask;
2089 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2090
2091 tx_buf = &txr->tx_buf_ring[ring_idx];
2092 dma_unmap_page(
2093 &pdev->dev,
2094 dma_unmap_addr(tx_buf, mapping),
2095 skb_frag_size(frag), PCI_DMA_TODEVICE);
2096 }
2097 dev_kfree_skb(skb);
2098 }
2099 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2100 }
2101 }
2102
bnxt_free_rx_skbs(struct bnxt * bp)2103 static void bnxt_free_rx_skbs(struct bnxt *bp)
2104 {
2105 int i, max_idx, max_agg_idx;
2106 struct pci_dev *pdev = bp->pdev;
2107
2108 if (!bp->rx_ring)
2109 return;
2110
2111 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2112 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2113 for (i = 0; i < bp->rx_nr_rings; i++) {
2114 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2115 int j;
2116
2117 if (rxr->rx_tpa) {
2118 for (j = 0; j < MAX_TPA; j++) {
2119 struct bnxt_tpa_info *tpa_info =
2120 &rxr->rx_tpa[j];
2121 u8 *data = tpa_info->data;
2122
2123 if (!data)
2124 continue;
2125
2126 dma_unmap_single_attrs(&pdev->dev,
2127 tpa_info->mapping,
2128 bp->rx_buf_use_size,
2129 bp->rx_dir,
2130 DMA_ATTR_WEAK_ORDERING);
2131
2132 tpa_info->data = NULL;
2133
2134 kfree(data);
2135 }
2136 }
2137
2138 for (j = 0; j < max_idx; j++) {
2139 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2140 dma_addr_t mapping = rx_buf->mapping;
2141 void *data = rx_buf->data;
2142
2143 if (!data)
2144 continue;
2145
2146 rx_buf->data = NULL;
2147
2148 if (BNXT_RX_PAGE_MODE(bp)) {
2149 mapping -= bp->rx_dma_offset;
2150 dma_unmap_page_attrs(&pdev->dev, mapping,
2151 PAGE_SIZE, bp->rx_dir,
2152 DMA_ATTR_WEAK_ORDERING);
2153 __free_page(data);
2154 } else {
2155 dma_unmap_single_attrs(&pdev->dev, mapping,
2156 bp->rx_buf_use_size,
2157 bp->rx_dir,
2158 DMA_ATTR_WEAK_ORDERING);
2159 kfree(data);
2160 }
2161 }
2162
2163 for (j = 0; j < max_agg_idx; j++) {
2164 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2165 &rxr->rx_agg_ring[j];
2166 struct page *page = rx_agg_buf->page;
2167
2168 if (!page)
2169 continue;
2170
2171 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2172 BNXT_RX_PAGE_SIZE,
2173 PCI_DMA_FROMDEVICE,
2174 DMA_ATTR_WEAK_ORDERING);
2175
2176 rx_agg_buf->page = NULL;
2177 __clear_bit(j, rxr->rx_agg_bmap);
2178
2179 __free_page(page);
2180 }
2181 if (rxr->rx_page) {
2182 __free_page(rxr->rx_page);
2183 rxr->rx_page = NULL;
2184 }
2185 }
2186 }
2187
bnxt_free_skbs(struct bnxt * bp)2188 static void bnxt_free_skbs(struct bnxt *bp)
2189 {
2190 bnxt_free_tx_skbs(bp);
2191 bnxt_free_rx_skbs(bp);
2192 }
2193
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_struct * ring)2194 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2195 {
2196 struct pci_dev *pdev = bp->pdev;
2197 int i;
2198
2199 for (i = 0; i < ring->nr_pages; i++) {
2200 if (!ring->pg_arr[i])
2201 continue;
2202
2203 dma_free_coherent(&pdev->dev, ring->page_size,
2204 ring->pg_arr[i], ring->dma_arr[i]);
2205
2206 ring->pg_arr[i] = NULL;
2207 }
2208 if (ring->pg_tbl) {
2209 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
2210 ring->pg_tbl, ring->pg_tbl_map);
2211 ring->pg_tbl = NULL;
2212 }
2213 if (ring->vmem_size && *ring->vmem) {
2214 vfree(*ring->vmem);
2215 *ring->vmem = NULL;
2216 }
2217 }
2218
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_struct * ring)2219 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2220 {
2221 int i;
2222 struct pci_dev *pdev = bp->pdev;
2223
2224 if (ring->nr_pages > 1) {
2225 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
2226 ring->nr_pages * 8,
2227 &ring->pg_tbl_map,
2228 GFP_KERNEL);
2229 if (!ring->pg_tbl)
2230 return -ENOMEM;
2231 }
2232
2233 for (i = 0; i < ring->nr_pages; i++) {
2234 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2235 ring->page_size,
2236 &ring->dma_arr[i],
2237 GFP_KERNEL);
2238 if (!ring->pg_arr[i])
2239 return -ENOMEM;
2240
2241 if (ring->nr_pages > 1)
2242 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2243 }
2244
2245 if (ring->vmem_size) {
2246 *ring->vmem = vzalloc(ring->vmem_size);
2247 if (!(*ring->vmem))
2248 return -ENOMEM;
2249 }
2250 return 0;
2251 }
2252
bnxt_free_rx_rings(struct bnxt * bp)2253 static void bnxt_free_rx_rings(struct bnxt *bp)
2254 {
2255 int i;
2256
2257 if (!bp->rx_ring)
2258 return;
2259
2260 for (i = 0; i < bp->rx_nr_rings; i++) {
2261 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2262 struct bnxt_ring_struct *ring;
2263
2264 if (rxr->xdp_prog)
2265 bpf_prog_put(rxr->xdp_prog);
2266
2267 kfree(rxr->rx_tpa);
2268 rxr->rx_tpa = NULL;
2269
2270 kfree(rxr->rx_agg_bmap);
2271 rxr->rx_agg_bmap = NULL;
2272
2273 ring = &rxr->rx_ring_struct;
2274 bnxt_free_ring(bp, ring);
2275
2276 ring = &rxr->rx_agg_ring_struct;
2277 bnxt_free_ring(bp, ring);
2278 }
2279 }
2280
bnxt_alloc_rx_rings(struct bnxt * bp)2281 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2282 {
2283 int i, rc, agg_rings = 0, tpa_rings = 0;
2284
2285 if (!bp->rx_ring)
2286 return -ENOMEM;
2287
2288 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2289 agg_rings = 1;
2290
2291 if (bp->flags & BNXT_FLAG_TPA)
2292 tpa_rings = 1;
2293
2294 for (i = 0; i < bp->rx_nr_rings; i++) {
2295 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2296 struct bnxt_ring_struct *ring;
2297
2298 ring = &rxr->rx_ring_struct;
2299
2300 rc = bnxt_alloc_ring(bp, ring);
2301 if (rc)
2302 return rc;
2303
2304 if (agg_rings) {
2305 u16 mem_size;
2306
2307 ring = &rxr->rx_agg_ring_struct;
2308 rc = bnxt_alloc_ring(bp, ring);
2309 if (rc)
2310 return rc;
2311
2312 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2313 mem_size = rxr->rx_agg_bmap_size / 8;
2314 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2315 if (!rxr->rx_agg_bmap)
2316 return -ENOMEM;
2317
2318 if (tpa_rings) {
2319 rxr->rx_tpa = kcalloc(MAX_TPA,
2320 sizeof(struct bnxt_tpa_info),
2321 GFP_KERNEL);
2322 if (!rxr->rx_tpa)
2323 return -ENOMEM;
2324 }
2325 }
2326 }
2327 return 0;
2328 }
2329
bnxt_free_tx_rings(struct bnxt * bp)2330 static void bnxt_free_tx_rings(struct bnxt *bp)
2331 {
2332 int i;
2333 struct pci_dev *pdev = bp->pdev;
2334
2335 if (!bp->tx_ring)
2336 return;
2337
2338 for (i = 0; i < bp->tx_nr_rings; i++) {
2339 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2340 struct bnxt_ring_struct *ring;
2341
2342 if (txr->tx_push) {
2343 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2344 txr->tx_push, txr->tx_push_mapping);
2345 txr->tx_push = NULL;
2346 }
2347
2348 ring = &txr->tx_ring_struct;
2349
2350 bnxt_free_ring(bp, ring);
2351 }
2352 }
2353
bnxt_alloc_tx_rings(struct bnxt * bp)2354 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2355 {
2356 int i, j, rc;
2357 struct pci_dev *pdev = bp->pdev;
2358
2359 bp->tx_push_size = 0;
2360 if (bp->tx_push_thresh) {
2361 int push_size;
2362
2363 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2364 bp->tx_push_thresh);
2365
2366 if (push_size > 256) {
2367 push_size = 0;
2368 bp->tx_push_thresh = 0;
2369 }
2370
2371 bp->tx_push_size = push_size;
2372 }
2373
2374 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2375 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2376 struct bnxt_ring_struct *ring;
2377
2378 ring = &txr->tx_ring_struct;
2379
2380 rc = bnxt_alloc_ring(bp, ring);
2381 if (rc)
2382 return rc;
2383
2384 if (bp->tx_push_size) {
2385 dma_addr_t mapping;
2386
2387 /* One pre-allocated DMA buffer to backup
2388 * TX push operation
2389 */
2390 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2391 bp->tx_push_size,
2392 &txr->tx_push_mapping,
2393 GFP_KERNEL);
2394
2395 if (!txr->tx_push)
2396 return -ENOMEM;
2397
2398 mapping = txr->tx_push_mapping +
2399 sizeof(struct tx_push_bd);
2400 txr->data_mapping = cpu_to_le64(mapping);
2401
2402 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2403 }
2404 ring->queue_id = bp->q_info[j].queue_id;
2405 if (i < bp->tx_nr_rings_xdp)
2406 continue;
2407 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2408 j++;
2409 }
2410 return 0;
2411 }
2412
bnxt_free_cp_rings(struct bnxt * bp)2413 static void bnxt_free_cp_rings(struct bnxt *bp)
2414 {
2415 int i;
2416
2417 if (!bp->bnapi)
2418 return;
2419
2420 for (i = 0; i < bp->cp_nr_rings; i++) {
2421 struct bnxt_napi *bnapi = bp->bnapi[i];
2422 struct bnxt_cp_ring_info *cpr;
2423 struct bnxt_ring_struct *ring;
2424
2425 if (!bnapi)
2426 continue;
2427
2428 cpr = &bnapi->cp_ring;
2429 ring = &cpr->cp_ring_struct;
2430
2431 bnxt_free_ring(bp, ring);
2432 }
2433 }
2434
bnxt_alloc_cp_rings(struct bnxt * bp)2435 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2436 {
2437 int i, rc;
2438
2439 for (i = 0; i < bp->cp_nr_rings; i++) {
2440 struct bnxt_napi *bnapi = bp->bnapi[i];
2441 struct bnxt_cp_ring_info *cpr;
2442 struct bnxt_ring_struct *ring;
2443
2444 if (!bnapi)
2445 continue;
2446
2447 cpr = &bnapi->cp_ring;
2448 ring = &cpr->cp_ring_struct;
2449
2450 rc = bnxt_alloc_ring(bp, ring);
2451 if (rc)
2452 return rc;
2453 }
2454 return 0;
2455 }
2456
bnxt_init_ring_struct(struct bnxt * bp)2457 static void bnxt_init_ring_struct(struct bnxt *bp)
2458 {
2459 int i;
2460
2461 for (i = 0; i < bp->cp_nr_rings; i++) {
2462 struct bnxt_napi *bnapi = bp->bnapi[i];
2463 struct bnxt_cp_ring_info *cpr;
2464 struct bnxt_rx_ring_info *rxr;
2465 struct bnxt_tx_ring_info *txr;
2466 struct bnxt_ring_struct *ring;
2467
2468 if (!bnapi)
2469 continue;
2470
2471 cpr = &bnapi->cp_ring;
2472 ring = &cpr->cp_ring_struct;
2473 ring->nr_pages = bp->cp_nr_pages;
2474 ring->page_size = HW_CMPD_RING_SIZE;
2475 ring->pg_arr = (void **)cpr->cp_desc_ring;
2476 ring->dma_arr = cpr->cp_desc_mapping;
2477 ring->vmem_size = 0;
2478
2479 rxr = bnapi->rx_ring;
2480 if (!rxr)
2481 goto skip_rx;
2482
2483 ring = &rxr->rx_ring_struct;
2484 ring->nr_pages = bp->rx_nr_pages;
2485 ring->page_size = HW_RXBD_RING_SIZE;
2486 ring->pg_arr = (void **)rxr->rx_desc_ring;
2487 ring->dma_arr = rxr->rx_desc_mapping;
2488 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2489 ring->vmem = (void **)&rxr->rx_buf_ring;
2490
2491 ring = &rxr->rx_agg_ring_struct;
2492 ring->nr_pages = bp->rx_agg_nr_pages;
2493 ring->page_size = HW_RXBD_RING_SIZE;
2494 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2495 ring->dma_arr = rxr->rx_agg_desc_mapping;
2496 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2497 ring->vmem = (void **)&rxr->rx_agg_ring;
2498
2499 skip_rx:
2500 txr = bnapi->tx_ring;
2501 if (!txr)
2502 continue;
2503
2504 ring = &txr->tx_ring_struct;
2505 ring->nr_pages = bp->tx_nr_pages;
2506 ring->page_size = HW_RXBD_RING_SIZE;
2507 ring->pg_arr = (void **)txr->tx_desc_ring;
2508 ring->dma_arr = txr->tx_desc_mapping;
2509 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2510 ring->vmem = (void **)&txr->tx_buf_ring;
2511 }
2512 }
2513
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)2514 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2515 {
2516 int i;
2517 u32 prod;
2518 struct rx_bd **rx_buf_ring;
2519
2520 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2521 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2522 int j;
2523 struct rx_bd *rxbd;
2524
2525 rxbd = rx_buf_ring[i];
2526 if (!rxbd)
2527 continue;
2528
2529 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2530 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2531 rxbd->rx_bd_opaque = prod;
2532 }
2533 }
2534 }
2535
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)2536 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2537 {
2538 struct net_device *dev = bp->dev;
2539 struct bnxt_rx_ring_info *rxr;
2540 struct bnxt_ring_struct *ring;
2541 u32 prod, type;
2542 int i;
2543
2544 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2545 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2546
2547 if (NET_IP_ALIGN == 2)
2548 type |= RX_BD_FLAGS_SOP;
2549
2550 rxr = &bp->rx_ring[ring_nr];
2551 ring = &rxr->rx_ring_struct;
2552 bnxt_init_rxbd_pages(ring, type);
2553
2554 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2555 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2556 if (IS_ERR(rxr->xdp_prog)) {
2557 int rc = PTR_ERR(rxr->xdp_prog);
2558
2559 rxr->xdp_prog = NULL;
2560 return rc;
2561 }
2562 }
2563 prod = rxr->rx_prod;
2564 for (i = 0; i < bp->rx_ring_size; i++) {
2565 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2566 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2567 ring_nr, i, bp->rx_ring_size);
2568 break;
2569 }
2570 prod = NEXT_RX(prod);
2571 }
2572 rxr->rx_prod = prod;
2573 ring->fw_ring_id = INVALID_HW_RING_ID;
2574
2575 ring = &rxr->rx_agg_ring_struct;
2576 ring->fw_ring_id = INVALID_HW_RING_ID;
2577
2578 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2579 return 0;
2580
2581 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2582 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2583
2584 bnxt_init_rxbd_pages(ring, type);
2585
2586 prod = rxr->rx_agg_prod;
2587 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2588 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2589 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2590 ring_nr, i, bp->rx_ring_size);
2591 break;
2592 }
2593 prod = NEXT_RX_AGG(prod);
2594 }
2595 rxr->rx_agg_prod = prod;
2596
2597 if (bp->flags & BNXT_FLAG_TPA) {
2598 if (rxr->rx_tpa) {
2599 u8 *data;
2600 dma_addr_t mapping;
2601
2602 for (i = 0; i < MAX_TPA; i++) {
2603 data = __bnxt_alloc_rx_data(bp, &mapping,
2604 GFP_KERNEL);
2605 if (!data)
2606 return -ENOMEM;
2607
2608 rxr->rx_tpa[i].data = data;
2609 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2610 rxr->rx_tpa[i].mapping = mapping;
2611 }
2612 } else {
2613 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2614 return -ENOMEM;
2615 }
2616 }
2617
2618 return 0;
2619 }
2620
bnxt_init_cp_rings(struct bnxt * bp)2621 static void bnxt_init_cp_rings(struct bnxt *bp)
2622 {
2623 int i;
2624
2625 for (i = 0; i < bp->cp_nr_rings; i++) {
2626 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2627 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2628
2629 ring->fw_ring_id = INVALID_HW_RING_ID;
2630 }
2631 }
2632
bnxt_init_rx_rings(struct bnxt * bp)2633 static int bnxt_init_rx_rings(struct bnxt *bp)
2634 {
2635 int i, rc = 0;
2636
2637 if (BNXT_RX_PAGE_MODE(bp)) {
2638 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2639 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2640 } else {
2641 bp->rx_offset = BNXT_RX_OFFSET;
2642 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2643 }
2644
2645 for (i = 0; i < bp->rx_nr_rings; i++) {
2646 rc = bnxt_init_one_rx_ring(bp, i);
2647 if (rc)
2648 break;
2649 }
2650
2651 return rc;
2652 }
2653
bnxt_init_tx_rings(struct bnxt * bp)2654 static int bnxt_init_tx_rings(struct bnxt *bp)
2655 {
2656 u16 i;
2657
2658 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2659 MAX_SKB_FRAGS + 1);
2660
2661 for (i = 0; i < bp->tx_nr_rings; i++) {
2662 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2663 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2664
2665 ring->fw_ring_id = INVALID_HW_RING_ID;
2666 }
2667
2668 return 0;
2669 }
2670
bnxt_free_ring_grps(struct bnxt * bp)2671 static void bnxt_free_ring_grps(struct bnxt *bp)
2672 {
2673 kfree(bp->grp_info);
2674 bp->grp_info = NULL;
2675 }
2676
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)2677 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2678 {
2679 int i;
2680
2681 if (irq_re_init) {
2682 bp->grp_info = kcalloc(bp->cp_nr_rings,
2683 sizeof(struct bnxt_ring_grp_info),
2684 GFP_KERNEL);
2685 if (!bp->grp_info)
2686 return -ENOMEM;
2687 }
2688 for (i = 0; i < bp->cp_nr_rings; i++) {
2689 if (irq_re_init)
2690 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2691 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2692 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2693 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2694 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2695 }
2696 return 0;
2697 }
2698
bnxt_free_vnics(struct bnxt * bp)2699 static void bnxt_free_vnics(struct bnxt *bp)
2700 {
2701 kfree(bp->vnic_info);
2702 bp->vnic_info = NULL;
2703 bp->nr_vnics = 0;
2704 }
2705
bnxt_alloc_vnics(struct bnxt * bp)2706 static int bnxt_alloc_vnics(struct bnxt *bp)
2707 {
2708 int num_vnics = 1;
2709
2710 #ifdef CONFIG_RFS_ACCEL
2711 if (bp->flags & BNXT_FLAG_RFS)
2712 num_vnics += bp->rx_nr_rings;
2713 #endif
2714
2715 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2716 num_vnics++;
2717
2718 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2719 GFP_KERNEL);
2720 if (!bp->vnic_info)
2721 return -ENOMEM;
2722
2723 bp->nr_vnics = num_vnics;
2724 return 0;
2725 }
2726
bnxt_init_vnics(struct bnxt * bp)2727 static void bnxt_init_vnics(struct bnxt *bp)
2728 {
2729 int i;
2730
2731 for (i = 0; i < bp->nr_vnics; i++) {
2732 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2733
2734 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2735 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2736 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
2737 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2738
2739 if (bp->vnic_info[i].rss_hash_key) {
2740 if (i == 0)
2741 prandom_bytes(vnic->rss_hash_key,
2742 HW_HASH_KEY_SIZE);
2743 else
2744 memcpy(vnic->rss_hash_key,
2745 bp->vnic_info[0].rss_hash_key,
2746 HW_HASH_KEY_SIZE);
2747 }
2748 }
2749 }
2750
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)2751 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2752 {
2753 int pages;
2754
2755 pages = ring_size / desc_per_pg;
2756
2757 if (!pages)
2758 return 1;
2759
2760 pages++;
2761
2762 while (pages & (pages - 1))
2763 pages++;
2764
2765 return pages;
2766 }
2767
bnxt_set_tpa_flags(struct bnxt * bp)2768 void bnxt_set_tpa_flags(struct bnxt *bp)
2769 {
2770 bp->flags &= ~BNXT_FLAG_TPA;
2771 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
2772 return;
2773 if (bp->dev->features & NETIF_F_LRO)
2774 bp->flags |= BNXT_FLAG_LRO;
2775 if (bp->dev->features & NETIF_F_GRO)
2776 bp->flags |= BNXT_FLAG_GRO;
2777 }
2778
2779 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2780 * be set on entry.
2781 */
bnxt_set_ring_params(struct bnxt * bp)2782 void bnxt_set_ring_params(struct bnxt *bp)
2783 {
2784 u32 ring_size, rx_size, rx_space;
2785 u32 agg_factor = 0, agg_ring_size = 0;
2786
2787 /* 8 for CRC and VLAN */
2788 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2789
2790 rx_space = rx_size + NET_SKB_PAD +
2791 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2792
2793 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2794 ring_size = bp->rx_ring_size;
2795 bp->rx_agg_ring_size = 0;
2796 bp->rx_agg_nr_pages = 0;
2797
2798 if (bp->flags & BNXT_FLAG_TPA)
2799 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2800
2801 bp->flags &= ~BNXT_FLAG_JUMBO;
2802 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
2803 u32 jumbo_factor;
2804
2805 bp->flags |= BNXT_FLAG_JUMBO;
2806 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2807 if (jumbo_factor > agg_factor)
2808 agg_factor = jumbo_factor;
2809 }
2810 agg_ring_size = ring_size * agg_factor;
2811
2812 if (agg_ring_size) {
2813 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2814 RX_DESC_CNT);
2815 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2816 u32 tmp = agg_ring_size;
2817
2818 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2819 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2820 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2821 tmp, agg_ring_size);
2822 }
2823 bp->rx_agg_ring_size = agg_ring_size;
2824 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2825 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2826 rx_space = rx_size + NET_SKB_PAD +
2827 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2828 }
2829
2830 bp->rx_buf_use_size = rx_size;
2831 bp->rx_buf_size = rx_space;
2832
2833 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2834 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2835
2836 ring_size = bp->tx_ring_size;
2837 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2838 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2839
2840 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2841 bp->cp_ring_size = ring_size;
2842
2843 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2844 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2845 bp->cp_nr_pages = MAX_CP_PAGES;
2846 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2847 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2848 ring_size, bp->cp_ring_size);
2849 }
2850 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2851 bp->cp_ring_mask = bp->cp_bit - 1;
2852 }
2853
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)2854 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
2855 {
2856 if (page_mode) {
2857 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
2858 return -EOPNOTSUPP;
2859 bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
2860 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
2861 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
2862 bp->dev->hw_features &= ~NETIF_F_LRO;
2863 bp->dev->features &= ~NETIF_F_LRO;
2864 bp->rx_dir = DMA_BIDIRECTIONAL;
2865 bp->rx_skb_func = bnxt_rx_page_skb;
2866 } else {
2867 bp->dev->max_mtu = BNXT_MAX_MTU;
2868 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
2869 bp->rx_dir = DMA_FROM_DEVICE;
2870 bp->rx_skb_func = bnxt_rx_skb;
2871 }
2872 return 0;
2873 }
2874
bnxt_free_vnic_attributes(struct bnxt * bp)2875 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2876 {
2877 int i;
2878 struct bnxt_vnic_info *vnic;
2879 struct pci_dev *pdev = bp->pdev;
2880
2881 if (!bp->vnic_info)
2882 return;
2883
2884 for (i = 0; i < bp->nr_vnics; i++) {
2885 vnic = &bp->vnic_info[i];
2886
2887 kfree(vnic->fw_grp_ids);
2888 vnic->fw_grp_ids = NULL;
2889
2890 kfree(vnic->uc_list);
2891 vnic->uc_list = NULL;
2892
2893 if (vnic->mc_list) {
2894 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2895 vnic->mc_list, vnic->mc_list_mapping);
2896 vnic->mc_list = NULL;
2897 }
2898
2899 if (vnic->rss_table) {
2900 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2901 vnic->rss_table,
2902 vnic->rss_table_dma_addr);
2903 vnic->rss_table = NULL;
2904 }
2905
2906 vnic->rss_hash_key = NULL;
2907 vnic->flags = 0;
2908 }
2909 }
2910
bnxt_alloc_vnic_attributes(struct bnxt * bp)2911 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2912 {
2913 int i, rc = 0, size;
2914 struct bnxt_vnic_info *vnic;
2915 struct pci_dev *pdev = bp->pdev;
2916 int max_rings;
2917
2918 for (i = 0; i < bp->nr_vnics; i++) {
2919 vnic = &bp->vnic_info[i];
2920
2921 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2922 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2923
2924 if (mem_size > 0) {
2925 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2926 if (!vnic->uc_list) {
2927 rc = -ENOMEM;
2928 goto out;
2929 }
2930 }
2931 }
2932
2933 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2934 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2935 vnic->mc_list =
2936 dma_alloc_coherent(&pdev->dev,
2937 vnic->mc_list_size,
2938 &vnic->mc_list_mapping,
2939 GFP_KERNEL);
2940 if (!vnic->mc_list) {
2941 rc = -ENOMEM;
2942 goto out;
2943 }
2944 }
2945
2946 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2947 max_rings = bp->rx_nr_rings;
2948 else
2949 max_rings = 1;
2950
2951 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2952 if (!vnic->fw_grp_ids) {
2953 rc = -ENOMEM;
2954 goto out;
2955 }
2956
2957 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
2958 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
2959 continue;
2960
2961 /* Allocate rss table and hash key */
2962 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2963 &vnic->rss_table_dma_addr,
2964 GFP_KERNEL);
2965 if (!vnic->rss_table) {
2966 rc = -ENOMEM;
2967 goto out;
2968 }
2969
2970 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2971
2972 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2973 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2974 }
2975 return 0;
2976
2977 out:
2978 return rc;
2979 }
2980
bnxt_free_hwrm_resources(struct bnxt * bp)2981 static void bnxt_free_hwrm_resources(struct bnxt *bp)
2982 {
2983 struct pci_dev *pdev = bp->pdev;
2984
2985 if (bp->hwrm_cmd_resp_addr) {
2986 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2987 bp->hwrm_cmd_resp_dma_addr);
2988 bp->hwrm_cmd_resp_addr = NULL;
2989 }
2990 if (bp->hwrm_dbg_resp_addr) {
2991 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2992 bp->hwrm_dbg_resp_addr,
2993 bp->hwrm_dbg_resp_dma_addr);
2994
2995 bp->hwrm_dbg_resp_addr = NULL;
2996 }
2997 }
2998
bnxt_alloc_hwrm_resources(struct bnxt * bp)2999 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3000 {
3001 struct pci_dev *pdev = bp->pdev;
3002
3003 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3004 &bp->hwrm_cmd_resp_dma_addr,
3005 GFP_KERNEL);
3006 if (!bp->hwrm_cmd_resp_addr)
3007 return -ENOMEM;
3008 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
3009 HWRM_DBG_REG_BUF_SIZE,
3010 &bp->hwrm_dbg_resp_dma_addr,
3011 GFP_KERNEL);
3012 if (!bp->hwrm_dbg_resp_addr)
3013 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
3014
3015 return 0;
3016 }
3017
bnxt_free_hwrm_short_cmd_req(struct bnxt * bp)3018 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3019 {
3020 if (bp->hwrm_short_cmd_req_addr) {
3021 struct pci_dev *pdev = bp->pdev;
3022
3023 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
3024 bp->hwrm_short_cmd_req_addr,
3025 bp->hwrm_short_cmd_req_dma_addr);
3026 bp->hwrm_short_cmd_req_addr = NULL;
3027 }
3028 }
3029
bnxt_alloc_hwrm_short_cmd_req(struct bnxt * bp)3030 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3031 {
3032 struct pci_dev *pdev = bp->pdev;
3033
3034 bp->hwrm_short_cmd_req_addr =
3035 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
3036 &bp->hwrm_short_cmd_req_dma_addr,
3037 GFP_KERNEL);
3038 if (!bp->hwrm_short_cmd_req_addr)
3039 return -ENOMEM;
3040
3041 return 0;
3042 }
3043
bnxt_free_stats(struct bnxt * bp)3044 static void bnxt_free_stats(struct bnxt *bp)
3045 {
3046 u32 size, i;
3047 struct pci_dev *pdev = bp->pdev;
3048
3049 if (bp->hw_rx_port_stats) {
3050 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3051 bp->hw_rx_port_stats,
3052 bp->hw_rx_port_stats_map);
3053 bp->hw_rx_port_stats = NULL;
3054 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3055 }
3056
3057 if (!bp->bnapi)
3058 return;
3059
3060 size = sizeof(struct ctx_hw_stats);
3061
3062 for (i = 0; i < bp->cp_nr_rings; i++) {
3063 struct bnxt_napi *bnapi = bp->bnapi[i];
3064 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3065
3066 if (cpr->hw_stats) {
3067 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3068 cpr->hw_stats_map);
3069 cpr->hw_stats = NULL;
3070 }
3071 }
3072 }
3073
bnxt_alloc_stats(struct bnxt * bp)3074 static int bnxt_alloc_stats(struct bnxt *bp)
3075 {
3076 u32 size, i;
3077 struct pci_dev *pdev = bp->pdev;
3078
3079 size = sizeof(struct ctx_hw_stats);
3080
3081 for (i = 0; i < bp->cp_nr_rings; i++) {
3082 struct bnxt_napi *bnapi = bp->bnapi[i];
3083 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3084
3085 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3086 &cpr->hw_stats_map,
3087 GFP_KERNEL);
3088 if (!cpr->hw_stats)
3089 return -ENOMEM;
3090
3091 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3092 }
3093
3094 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
3095 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3096 sizeof(struct tx_port_stats) + 1024;
3097
3098 bp->hw_rx_port_stats =
3099 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3100 &bp->hw_rx_port_stats_map,
3101 GFP_KERNEL);
3102 if (!bp->hw_rx_port_stats)
3103 return -ENOMEM;
3104
3105 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3106 512;
3107 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3108 sizeof(struct rx_port_stats) + 512;
3109 bp->flags |= BNXT_FLAG_PORT_STATS;
3110 }
3111 return 0;
3112 }
3113
bnxt_clear_ring_indices(struct bnxt * bp)3114 static void bnxt_clear_ring_indices(struct bnxt *bp)
3115 {
3116 int i;
3117
3118 if (!bp->bnapi)
3119 return;
3120
3121 for (i = 0; i < bp->cp_nr_rings; i++) {
3122 struct bnxt_napi *bnapi = bp->bnapi[i];
3123 struct bnxt_cp_ring_info *cpr;
3124 struct bnxt_rx_ring_info *rxr;
3125 struct bnxt_tx_ring_info *txr;
3126
3127 if (!bnapi)
3128 continue;
3129
3130 cpr = &bnapi->cp_ring;
3131 cpr->cp_raw_cons = 0;
3132
3133 txr = bnapi->tx_ring;
3134 if (txr) {
3135 txr->tx_prod = 0;
3136 txr->tx_cons = 0;
3137 }
3138
3139 rxr = bnapi->rx_ring;
3140 if (rxr) {
3141 rxr->rx_prod = 0;
3142 rxr->rx_agg_prod = 0;
3143 rxr->rx_sw_agg_prod = 0;
3144 rxr->rx_next_cons = 0;
3145 }
3146 }
3147 }
3148
bnxt_free_ntp_fltrs(struct bnxt * bp,bool irq_reinit)3149 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3150 {
3151 #ifdef CONFIG_RFS_ACCEL
3152 int i;
3153
3154 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3155 * safe to delete the hash table.
3156 */
3157 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3158 struct hlist_head *head;
3159 struct hlist_node *tmp;
3160 struct bnxt_ntuple_filter *fltr;
3161
3162 head = &bp->ntp_fltr_hash_tbl[i];
3163 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3164 hlist_del(&fltr->hash);
3165 kfree(fltr);
3166 }
3167 }
3168 if (irq_reinit) {
3169 kfree(bp->ntp_fltr_bmap);
3170 bp->ntp_fltr_bmap = NULL;
3171 }
3172 bp->ntp_fltr_count = 0;
3173 #endif
3174 }
3175
bnxt_alloc_ntp_fltrs(struct bnxt * bp)3176 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3177 {
3178 #ifdef CONFIG_RFS_ACCEL
3179 int i, rc = 0;
3180
3181 if (!(bp->flags & BNXT_FLAG_RFS))
3182 return 0;
3183
3184 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3185 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3186
3187 bp->ntp_fltr_count = 0;
3188 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3189 sizeof(long),
3190 GFP_KERNEL);
3191
3192 if (!bp->ntp_fltr_bmap)
3193 rc = -ENOMEM;
3194
3195 return rc;
3196 #else
3197 return 0;
3198 #endif
3199 }
3200
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)3201 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3202 {
3203 bnxt_free_vnic_attributes(bp);
3204 bnxt_free_tx_rings(bp);
3205 bnxt_free_rx_rings(bp);
3206 bnxt_free_cp_rings(bp);
3207 bnxt_free_ntp_fltrs(bp, irq_re_init);
3208 if (irq_re_init) {
3209 bnxt_free_stats(bp);
3210 bnxt_free_ring_grps(bp);
3211 bnxt_free_vnics(bp);
3212 kfree(bp->tx_ring_map);
3213 bp->tx_ring_map = NULL;
3214 kfree(bp->tx_ring);
3215 bp->tx_ring = NULL;
3216 kfree(bp->rx_ring);
3217 bp->rx_ring = NULL;
3218 kfree(bp->bnapi);
3219 bp->bnapi = NULL;
3220 } else {
3221 bnxt_clear_ring_indices(bp);
3222 }
3223 }
3224
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)3225 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3226 {
3227 int i, j, rc, size, arr_size;
3228 void *bnapi;
3229
3230 if (irq_re_init) {
3231 /* Allocate bnapi mem pointer array and mem block for
3232 * all queues
3233 */
3234 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3235 bp->cp_nr_rings);
3236 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3237 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3238 if (!bnapi)
3239 return -ENOMEM;
3240
3241 bp->bnapi = bnapi;
3242 bnapi += arr_size;
3243 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3244 bp->bnapi[i] = bnapi;
3245 bp->bnapi[i]->index = i;
3246 bp->bnapi[i]->bp = bp;
3247 }
3248
3249 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3250 sizeof(struct bnxt_rx_ring_info),
3251 GFP_KERNEL);
3252 if (!bp->rx_ring)
3253 return -ENOMEM;
3254
3255 for (i = 0; i < bp->rx_nr_rings; i++) {
3256 bp->rx_ring[i].bnapi = bp->bnapi[i];
3257 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3258 }
3259
3260 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3261 sizeof(struct bnxt_tx_ring_info),
3262 GFP_KERNEL);
3263 if (!bp->tx_ring)
3264 return -ENOMEM;
3265
3266 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3267 GFP_KERNEL);
3268
3269 if (!bp->tx_ring_map)
3270 return -ENOMEM;
3271
3272 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3273 j = 0;
3274 else
3275 j = bp->rx_nr_rings;
3276
3277 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3278 bp->tx_ring[i].bnapi = bp->bnapi[j];
3279 bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
3280 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3281 if (i >= bp->tx_nr_rings_xdp) {
3282 bp->tx_ring[i].txq_index = i -
3283 bp->tx_nr_rings_xdp;
3284 bp->bnapi[j]->tx_int = bnxt_tx_int;
3285 } else {
3286 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3287 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3288 }
3289 }
3290
3291 rc = bnxt_alloc_stats(bp);
3292 if (rc)
3293 goto alloc_mem_err;
3294
3295 rc = bnxt_alloc_ntp_fltrs(bp);
3296 if (rc)
3297 goto alloc_mem_err;
3298
3299 rc = bnxt_alloc_vnics(bp);
3300 if (rc)
3301 goto alloc_mem_err;
3302 }
3303
3304 bnxt_init_ring_struct(bp);
3305
3306 rc = bnxt_alloc_rx_rings(bp);
3307 if (rc)
3308 goto alloc_mem_err;
3309
3310 rc = bnxt_alloc_tx_rings(bp);
3311 if (rc)
3312 goto alloc_mem_err;
3313
3314 rc = bnxt_alloc_cp_rings(bp);
3315 if (rc)
3316 goto alloc_mem_err;
3317
3318 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3319 BNXT_VNIC_UCAST_FLAG;
3320 rc = bnxt_alloc_vnic_attributes(bp);
3321 if (rc)
3322 goto alloc_mem_err;
3323 return 0;
3324
3325 alloc_mem_err:
3326 bnxt_free_mem(bp, true);
3327 return rc;
3328 }
3329
bnxt_disable_int(struct bnxt * bp)3330 static void bnxt_disable_int(struct bnxt *bp)
3331 {
3332 int i;
3333
3334 if (!bp->bnapi)
3335 return;
3336
3337 for (i = 0; i < bp->cp_nr_rings; i++) {
3338 struct bnxt_napi *bnapi = bp->bnapi[i];
3339 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3340 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3341
3342 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3343 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3344 }
3345 }
3346
bnxt_disable_int_sync(struct bnxt * bp)3347 static void bnxt_disable_int_sync(struct bnxt *bp)
3348 {
3349 int i;
3350
3351 atomic_inc(&bp->intr_sem);
3352
3353 bnxt_disable_int(bp);
3354 for (i = 0; i < bp->cp_nr_rings; i++)
3355 synchronize_irq(bp->irq_tbl[i].vector);
3356 }
3357
bnxt_enable_int(struct bnxt * bp)3358 static void bnxt_enable_int(struct bnxt *bp)
3359 {
3360 int i;
3361
3362 atomic_set(&bp->intr_sem, 0);
3363 for (i = 0; i < bp->cp_nr_rings; i++) {
3364 struct bnxt_napi *bnapi = bp->bnapi[i];
3365 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3366
3367 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
3368 }
3369 }
3370
bnxt_hwrm_cmd_hdr_init(struct bnxt * bp,void * request,u16 req_type,u16 cmpl_ring,u16 target_id)3371 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3372 u16 cmpl_ring, u16 target_id)
3373 {
3374 struct input *req = request;
3375
3376 req->req_type = cpu_to_le16(req_type);
3377 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3378 req->target_id = cpu_to_le16(target_id);
3379 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3380 }
3381
bnxt_hwrm_do_send_msg(struct bnxt * bp,void * msg,u32 msg_len,int timeout,bool silent)3382 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3383 int timeout, bool silent)
3384 {
3385 int i, intr_process, rc, tmo_count;
3386 struct input *req = msg;
3387 u32 *data = msg;
3388 __le32 *resp_len, *valid;
3389 u16 cp_ring_id, len = 0;
3390 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3391 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3392
3393 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
3394 memset(resp, 0, PAGE_SIZE);
3395 cp_ring_id = le16_to_cpu(req->cmpl_ring);
3396 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3397
3398 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
3399 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3400 struct hwrm_short_input short_input = {0};
3401
3402 memcpy(short_cmd_req, req, msg_len);
3403 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
3404 msg_len);
3405
3406 short_input.req_type = req->req_type;
3407 short_input.signature =
3408 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3409 short_input.size = cpu_to_le16(msg_len);
3410 short_input.req_addr =
3411 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3412
3413 data = (u32 *)&short_input;
3414 msg_len = sizeof(short_input);
3415
3416 /* Sync memory write before updating doorbell */
3417 wmb();
3418
3419 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3420 }
3421
3422 /* Write request msg to hwrm channel */
3423 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3424
3425 for (i = msg_len; i < max_req_len; i += 4)
3426 writel(0, bp->bar0 + i);
3427
3428 /* currently supports only one outstanding message */
3429 if (intr_process)
3430 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3431
3432 /* Ring channel doorbell */
3433 writel(1, bp->bar0 + 0x100);
3434
3435 if (!timeout)
3436 timeout = DFLT_HWRM_CMD_TIMEOUT;
3437
3438 i = 0;
3439 tmo_count = timeout * 40;
3440 if (intr_process) {
3441 /* Wait until hwrm response cmpl interrupt is processed */
3442 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
3443 i++ < tmo_count) {
3444 usleep_range(25, 40);
3445 }
3446
3447 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3448 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3449 le16_to_cpu(req->req_type));
3450 return -1;
3451 }
3452 } else {
3453 /* Check if response len is updated */
3454 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
3455 for (i = 0; i < tmo_count; i++) {
3456 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3457 HWRM_RESP_LEN_SFT;
3458 if (len)
3459 break;
3460 usleep_range(25, 40);
3461 }
3462
3463 if (i >= tmo_count) {
3464 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3465 timeout, le16_to_cpu(req->req_type),
3466 le16_to_cpu(req->seq_id), len);
3467 return -1;
3468 }
3469
3470 /* Last word of resp contains valid bit */
3471 valid = bp->hwrm_cmd_resp_addr + len - 4;
3472 for (i = 0; i < 5; i++) {
3473 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3474 break;
3475 udelay(1);
3476 }
3477
3478 if (i >= 5) {
3479 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3480 timeout, le16_to_cpu(req->req_type),
3481 le16_to_cpu(req->seq_id), len, *valid);
3482 return -1;
3483 }
3484 }
3485
3486 rc = le16_to_cpu(resp->error_code);
3487 if (rc && !silent)
3488 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3489 le16_to_cpu(resp->req_type),
3490 le16_to_cpu(resp->seq_id), rc);
3491 return rc;
3492 }
3493
_hwrm_send_message(struct bnxt * bp,void * msg,u32 msg_len,int timeout)3494 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3495 {
3496 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3497 }
3498
_hwrm_send_message_silent(struct bnxt * bp,void * msg,u32 msg_len,int timeout)3499 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3500 int timeout)
3501 {
3502 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3503 }
3504
hwrm_send_message(struct bnxt * bp,void * msg,u32 msg_len,int timeout)3505 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3506 {
3507 int rc;
3508
3509 mutex_lock(&bp->hwrm_cmd_lock);
3510 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3511 mutex_unlock(&bp->hwrm_cmd_lock);
3512 return rc;
3513 }
3514
hwrm_send_message_silent(struct bnxt * bp,void * msg,u32 msg_len,int timeout)3515 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3516 int timeout)
3517 {
3518 int rc;
3519
3520 mutex_lock(&bp->hwrm_cmd_lock);
3521 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3522 mutex_unlock(&bp->hwrm_cmd_lock);
3523 return rc;
3524 }
3525
bnxt_hwrm_func_rgtr_async_events(struct bnxt * bp,unsigned long * bmap,int bmap_size)3526 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3527 int bmap_size)
3528 {
3529 struct hwrm_func_drv_rgtr_input req = {0};
3530 DECLARE_BITMAP(async_events_bmap, 256);
3531 u32 *events = (u32 *)async_events_bmap;
3532 int i;
3533
3534 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3535
3536 req.enables =
3537 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3538
3539 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3540 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3541 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3542
3543 if (bmap && bmap_size) {
3544 for (i = 0; i < bmap_size; i++) {
3545 if (test_bit(i, bmap))
3546 __set_bit(i, async_events_bmap);
3547 }
3548 }
3549
3550 for (i = 0; i < 8; i++)
3551 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3552
3553 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3554 }
3555
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp)3556 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3557 {
3558 struct hwrm_func_drv_rgtr_input req = {0};
3559
3560 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3561
3562 req.enables =
3563 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3564 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3565
3566 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
3567 req.ver_maj = DRV_VER_MAJ;
3568 req.ver_min = DRV_VER_MIN;
3569 req.ver_upd = DRV_VER_UPD;
3570
3571 if (BNXT_PF(bp)) {
3572 u32 data[8];
3573 int i;
3574
3575 memset(data, 0, sizeof(data));
3576 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
3577 u16 cmd = bnxt_vf_req_snif[i];
3578 unsigned int bit, idx;
3579
3580 idx = cmd / 32;
3581 bit = cmd % 32;
3582 data[idx] |= 1 << bit;
3583 }
3584
3585 for (i = 0; i < 8; i++)
3586 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3587
3588 req.enables |=
3589 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3590 }
3591
3592 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3593 }
3594
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)3595 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3596 {
3597 struct hwrm_func_drv_unrgtr_input req = {0};
3598
3599 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3600 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3601 }
3602
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)3603 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3604 {
3605 u32 rc = 0;
3606 struct hwrm_tunnel_dst_port_free_input req = {0};
3607
3608 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3609 req.tunnel_type = tunnel_type;
3610
3611 switch (tunnel_type) {
3612 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3613 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3614 break;
3615 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3616 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3617 break;
3618 default:
3619 break;
3620 }
3621
3622 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3623 if (rc)
3624 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3625 rc);
3626 return rc;
3627 }
3628
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)3629 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3630 u8 tunnel_type)
3631 {
3632 u32 rc = 0;
3633 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3634 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3635
3636 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3637
3638 req.tunnel_type = tunnel_type;
3639 req.tunnel_dst_port_val = port;
3640
3641 mutex_lock(&bp->hwrm_cmd_lock);
3642 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3643 if (rc) {
3644 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3645 rc);
3646 goto err_out;
3647 }
3648
3649 switch (tunnel_type) {
3650 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
3651 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3652 break;
3653 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
3654 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
3655 break;
3656 default:
3657 break;
3658 }
3659
3660 err_out:
3661 mutex_unlock(&bp->hwrm_cmd_lock);
3662 return rc;
3663 }
3664
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)3665 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3666 {
3667 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3668 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3669
3670 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
3671 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3672
3673 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3674 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3675 req.mask = cpu_to_le32(vnic->rx_mask);
3676 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3677 }
3678
3679 #ifdef CONFIG_RFS_ACCEL
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)3680 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3681 struct bnxt_ntuple_filter *fltr)
3682 {
3683 struct hwrm_cfa_ntuple_filter_free_input req = {0};
3684
3685 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3686 req.ntuple_filter_id = fltr->filter_id;
3687 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3688 }
3689
3690 #define BNXT_NTP_FLTR_FLAGS \
3691 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
3692 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
3693 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
3694 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
3695 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
3696 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
3697 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
3698 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
3699 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
3700 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
3701 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
3702 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
3703 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
3704 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3705
3706 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
3707 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3708
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)3709 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3710 struct bnxt_ntuple_filter *fltr)
3711 {
3712 int rc = 0;
3713 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3714 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3715 bp->hwrm_cmd_resp_addr;
3716 struct flow_keys *keys = &fltr->fkeys;
3717 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3718
3719 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
3720 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
3721
3722 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3723
3724 req.ethertype = htons(ETH_P_IP);
3725 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
3726 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
3727 req.ip_protocol = keys->basic.ip_proto;
3728
3729 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3730 int i;
3731
3732 req.ethertype = htons(ETH_P_IPV6);
3733 req.ip_addr_type =
3734 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3735 *(struct in6_addr *)&req.src_ipaddr[0] =
3736 keys->addrs.v6addrs.src;
3737 *(struct in6_addr *)&req.dst_ipaddr[0] =
3738 keys->addrs.v6addrs.dst;
3739 for (i = 0; i < 4; i++) {
3740 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3741 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3742 }
3743 } else {
3744 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3745 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3746 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3747 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3748 }
3749 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
3750 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
3751 req.tunnel_type =
3752 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
3753 }
3754
3755 req.src_port = keys->ports.src;
3756 req.src_port_mask = cpu_to_be16(0xffff);
3757 req.dst_port = keys->ports.dst;
3758 req.dst_port_mask = cpu_to_be16(0xffff);
3759
3760 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
3761 mutex_lock(&bp->hwrm_cmd_lock);
3762 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3763 if (!rc)
3764 fltr->filter_id = resp->ntuple_filter_id;
3765 mutex_unlock(&bp->hwrm_cmd_lock);
3766 return rc;
3767 }
3768 #endif
3769
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,u8 * mac_addr)3770 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3771 u8 *mac_addr)
3772 {
3773 u32 rc = 0;
3774 struct hwrm_cfa_l2_filter_alloc_input req = {0};
3775 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3776
3777 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
3778 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3779 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3780 req.flags |=
3781 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
3782 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
3783 req.enables =
3784 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
3785 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
3786 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3787 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3788 req.l2_addr_mask[0] = 0xff;
3789 req.l2_addr_mask[1] = 0xff;
3790 req.l2_addr_mask[2] = 0xff;
3791 req.l2_addr_mask[3] = 0xff;
3792 req.l2_addr_mask[4] = 0xff;
3793 req.l2_addr_mask[5] = 0xff;
3794
3795 mutex_lock(&bp->hwrm_cmd_lock);
3796 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3797 if (!rc)
3798 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3799 resp->l2_filter_id;
3800 mutex_unlock(&bp->hwrm_cmd_lock);
3801 return rc;
3802 }
3803
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)3804 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3805 {
3806 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3807 int rc = 0;
3808
3809 /* Any associated ntuple filters will also be cleared by firmware. */
3810 mutex_lock(&bp->hwrm_cmd_lock);
3811 for (i = 0; i < num_of_vnics; i++) {
3812 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3813
3814 for (j = 0; j < vnic->uc_filter_count; j++) {
3815 struct hwrm_cfa_l2_filter_free_input req = {0};
3816
3817 bnxt_hwrm_cmd_hdr_init(bp, &req,
3818 HWRM_CFA_L2_FILTER_FREE, -1, -1);
3819
3820 req.l2_filter_id = vnic->fw_l2_filter_id[j];
3821
3822 rc = _hwrm_send_message(bp, &req, sizeof(req),
3823 HWRM_CMD_TIMEOUT);
3824 }
3825 vnic->uc_filter_count = 0;
3826 }
3827 mutex_unlock(&bp->hwrm_cmd_lock);
3828
3829 return rc;
3830 }
3831
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,u16 vnic_id,u32 tpa_flags)3832 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3833 {
3834 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3835 struct hwrm_vnic_tpa_cfg_input req = {0};
3836
3837 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3838 return 0;
3839
3840 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3841
3842 if (tpa_flags) {
3843 u16 mss = bp->dev->mtu - 40;
3844 u32 nsegs, n, segs = 0, flags;
3845
3846 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3847 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3848 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3849 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3850 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3851 if (tpa_flags & BNXT_FLAG_GRO)
3852 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3853
3854 req.flags = cpu_to_le32(flags);
3855
3856 req.enables =
3857 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3858 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3859 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3860
3861 /* Number of segs are log2 units, and first packet is not
3862 * included as part of this units.
3863 */
3864 if (mss <= BNXT_RX_PAGE_SIZE) {
3865 n = BNXT_RX_PAGE_SIZE / mss;
3866 nsegs = (MAX_SKB_FRAGS - 1) * n;
3867 } else {
3868 n = mss / BNXT_RX_PAGE_SIZE;
3869 if (mss & (BNXT_RX_PAGE_SIZE - 1))
3870 n++;
3871 nsegs = (MAX_SKB_FRAGS - n) / n;
3872 }
3873
3874 segs = ilog2(nsegs);
3875 req.max_agg_segs = cpu_to_le16(segs);
3876 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
3877
3878 req.min_agg_len = cpu_to_le32(512);
3879 }
3880 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3881
3882 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3883 }
3884
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,u16 vnic_id,bool set_rss)3885 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3886 {
3887 u32 i, j, max_rings;
3888 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3889 struct hwrm_vnic_rss_cfg_input req = {0};
3890
3891 if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
3892 return 0;
3893
3894 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3895 if (set_rss) {
3896 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
3897 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3898 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3899 max_rings = bp->rx_nr_rings - 1;
3900 else
3901 max_rings = bp->rx_nr_rings;
3902 } else {
3903 max_rings = 1;
3904 }
3905
3906 /* Fill the RSS indirection table with ring group ids */
3907 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3908 if (j == max_rings)
3909 j = 0;
3910 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3911 }
3912
3913 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3914 req.hash_key_tbl_addr =
3915 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3916 }
3917 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3918 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3919 }
3920
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,u16 vnic_id)3921 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3922 {
3923 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3924 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3925
3926 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3927 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3928 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3929 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3930 req.enables =
3931 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3932 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3933 /* thresholds not implemented in firmware yet */
3934 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3935 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3936 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3937 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3938 }
3939
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,u16 vnic_id,u16 ctx_idx)3940 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3941 u16 ctx_idx)
3942 {
3943 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3944
3945 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3946 req.rss_cos_lb_ctx_id =
3947 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
3948
3949 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3950 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
3951 }
3952
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)3953 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3954 {
3955 int i, j;
3956
3957 for (i = 0; i < bp->nr_vnics; i++) {
3958 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3959
3960 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3961 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3962 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3963 }
3964 }
3965 bp->rsscos_nr_ctxs = 0;
3966 }
3967
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,u16 vnic_id,u16 ctx_idx)3968 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
3969 {
3970 int rc;
3971 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3972 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3973 bp->hwrm_cmd_resp_addr;
3974
3975 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3976 -1);
3977
3978 mutex_lock(&bp->hwrm_cmd_lock);
3979 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3980 if (!rc)
3981 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
3982 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3983 mutex_unlock(&bp->hwrm_cmd_lock);
3984
3985 return rc;
3986 }
3987
bnxt_hwrm_vnic_cfg(struct bnxt * bp,u16 vnic_id)3988 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3989 {
3990 unsigned int ring = 0, grp_idx;
3991 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3992 struct hwrm_vnic_cfg_input req = {0};
3993 u16 def_vlan = 0;
3994
3995 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3996
3997 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3998 /* Only RSS support for now TBD: COS & LB */
3999 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4000 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4001 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4002 VNIC_CFG_REQ_ENABLES_MRU);
4003 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4004 req.rss_rule =
4005 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4006 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4007 VNIC_CFG_REQ_ENABLES_MRU);
4008 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
4009 } else {
4010 req.rss_rule = cpu_to_le16(0xffff);
4011 }
4012
4013 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4014 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
4015 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4016 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4017 } else {
4018 req.cos_rule = cpu_to_le16(0xffff);
4019 }
4020
4021 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4022 ring = 0;
4023 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
4024 ring = vnic_id - 1;
4025 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4026 ring = bp->rx_nr_rings - 1;
4027
4028 grp_idx = bp->rx_ring[ring].bnapi->index;
4029 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4030 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
4031
4032 req.lb_rule = cpu_to_le16(0xffff);
4033 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4034 VLAN_HLEN);
4035
4036 #ifdef CONFIG_BNXT_SRIOV
4037 if (BNXT_VF(bp))
4038 def_vlan = bp->vf.vlan;
4039 #endif
4040 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
4041 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
4042 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
4043 req.flags |=
4044 cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
4045
4046 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4047 }
4048
bnxt_hwrm_vnic_free_one(struct bnxt * bp,u16 vnic_id)4049 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4050 {
4051 u32 rc = 0;
4052
4053 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4054 struct hwrm_vnic_free_input req = {0};
4055
4056 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4057 req.vnic_id =
4058 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4059
4060 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4061 if (rc)
4062 return rc;
4063 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4064 }
4065 return rc;
4066 }
4067
bnxt_hwrm_vnic_free(struct bnxt * bp)4068 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4069 {
4070 u16 i;
4071
4072 for (i = 0; i < bp->nr_vnics; i++)
4073 bnxt_hwrm_vnic_free_one(bp, i);
4074 }
4075
bnxt_hwrm_vnic_alloc(struct bnxt * bp,u16 vnic_id,unsigned int start_rx_ring_idx,unsigned int nr_rings)4076 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4077 unsigned int start_rx_ring_idx,
4078 unsigned int nr_rings)
4079 {
4080 int rc = 0;
4081 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
4082 struct hwrm_vnic_alloc_input req = {0};
4083 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4084
4085 /* map ring groups to this vnic */
4086 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4087 grp_idx = bp->rx_ring[i].bnapi->index;
4088 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
4089 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
4090 j, nr_rings);
4091 break;
4092 }
4093 bp->vnic_info[vnic_id].fw_grp_ids[j] =
4094 bp->grp_info[grp_idx].fw_grp_id;
4095 }
4096
4097 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
4098 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
4099 if (vnic_id == 0)
4100 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4101
4102 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4103
4104 mutex_lock(&bp->hwrm_cmd_lock);
4105 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4106 if (!rc)
4107 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
4108 mutex_unlock(&bp->hwrm_cmd_lock);
4109 return rc;
4110 }
4111
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)4112 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4113 {
4114 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4115 struct hwrm_vnic_qcaps_input req = {0};
4116 int rc;
4117
4118 if (bp->hwrm_spec_code < 0x10600)
4119 return 0;
4120
4121 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4122 mutex_lock(&bp->hwrm_cmd_lock);
4123 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4124 if (!rc) {
4125 if (resp->flags &
4126 cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4127 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4128 }
4129 mutex_unlock(&bp->hwrm_cmd_lock);
4130 return rc;
4131 }
4132
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)4133 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4134 {
4135 u16 i;
4136 u32 rc = 0;
4137
4138 mutex_lock(&bp->hwrm_cmd_lock);
4139 for (i = 0; i < bp->rx_nr_rings; i++) {
4140 struct hwrm_ring_grp_alloc_input req = {0};
4141 struct hwrm_ring_grp_alloc_output *resp =
4142 bp->hwrm_cmd_resp_addr;
4143 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4144
4145 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4146
4147 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4148 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4149 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4150 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4151
4152 rc = _hwrm_send_message(bp, &req, sizeof(req),
4153 HWRM_CMD_TIMEOUT);
4154 if (rc)
4155 break;
4156
4157 bp->grp_info[grp_idx].fw_grp_id =
4158 le32_to_cpu(resp->ring_group_id);
4159 }
4160 mutex_unlock(&bp->hwrm_cmd_lock);
4161 return rc;
4162 }
4163
bnxt_hwrm_ring_grp_free(struct bnxt * bp)4164 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4165 {
4166 u16 i;
4167 u32 rc = 0;
4168 struct hwrm_ring_grp_free_input req = {0};
4169
4170 if (!bp->grp_info)
4171 return 0;
4172
4173 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4174
4175 mutex_lock(&bp->hwrm_cmd_lock);
4176 for (i = 0; i < bp->cp_nr_rings; i++) {
4177 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4178 continue;
4179 req.ring_group_id =
4180 cpu_to_le32(bp->grp_info[i].fw_grp_id);
4181
4182 rc = _hwrm_send_message(bp, &req, sizeof(req),
4183 HWRM_CMD_TIMEOUT);
4184 if (rc)
4185 break;
4186 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4187 }
4188 mutex_unlock(&bp->hwrm_cmd_lock);
4189 return rc;
4190 }
4191
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index,u32 stats_ctx_id)4192 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4193 struct bnxt_ring_struct *ring,
4194 u32 ring_type, u32 map_index,
4195 u32 stats_ctx_id)
4196 {
4197 int rc = 0, err = 0;
4198 struct hwrm_ring_alloc_input req = {0};
4199 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4200 u16 ring_id;
4201
4202 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4203
4204 req.enables = 0;
4205 if (ring->nr_pages > 1) {
4206 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
4207 /* Page size is in log2 units */
4208 req.page_size = BNXT_PAGE_SHIFT;
4209 req.page_tbl_depth = 1;
4210 } else {
4211 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
4212 }
4213 req.fbo = 0;
4214 /* Association of ring index with doorbell index and MSIX number */
4215 req.logical_id = cpu_to_le16(map_index);
4216
4217 switch (ring_type) {
4218 case HWRM_RING_ALLOC_TX:
4219 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4220 /* Association of transmit ring with completion ring */
4221 req.cmpl_ring_id =
4222 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
4223 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4224 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
4225 req.queue_id = cpu_to_le16(ring->queue_id);
4226 break;
4227 case HWRM_RING_ALLOC_RX:
4228 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4229 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4230 break;
4231 case HWRM_RING_ALLOC_AGG:
4232 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4233 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4234 break;
4235 case HWRM_RING_ALLOC_CMPL:
4236 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4237 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4238 if (bp->flags & BNXT_FLAG_USING_MSIX)
4239 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4240 break;
4241 default:
4242 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4243 ring_type);
4244 return -1;
4245 }
4246
4247 mutex_lock(&bp->hwrm_cmd_lock);
4248 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4249 err = le16_to_cpu(resp->error_code);
4250 ring_id = le16_to_cpu(resp->ring_id);
4251 mutex_unlock(&bp->hwrm_cmd_lock);
4252
4253 if (rc || err) {
4254 switch (ring_type) {
4255 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
4256 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
4257 rc, err);
4258 return -1;
4259
4260 case RING_FREE_REQ_RING_TYPE_RX:
4261 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4262 rc, err);
4263 return -1;
4264
4265 case RING_FREE_REQ_RING_TYPE_TX:
4266 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4267 rc, err);
4268 return -1;
4269
4270 default:
4271 netdev_err(bp->dev, "Invalid ring\n");
4272 return -1;
4273 }
4274 }
4275 ring->fw_ring_id = ring_id;
4276 return rc;
4277 }
4278
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)4279 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4280 {
4281 int rc;
4282
4283 if (BNXT_PF(bp)) {
4284 struct hwrm_func_cfg_input req = {0};
4285
4286 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4287 req.fid = cpu_to_le16(0xffff);
4288 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4289 req.async_event_cr = cpu_to_le16(idx);
4290 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4291 } else {
4292 struct hwrm_func_vf_cfg_input req = {0};
4293
4294 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4295 req.enables =
4296 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4297 req.async_event_cr = cpu_to_le16(idx);
4298 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4299 }
4300 return rc;
4301 }
4302
bnxt_hwrm_ring_alloc(struct bnxt * bp)4303 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4304 {
4305 int i, rc = 0;
4306
4307 for (i = 0; i < bp->cp_nr_rings; i++) {
4308 struct bnxt_napi *bnapi = bp->bnapi[i];
4309 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4310 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4311
4312 cpr->cp_doorbell = bp->bar1 + i * 0x80;
4313 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
4314 INVALID_STATS_CTX_ID);
4315 if (rc)
4316 goto err_out;
4317 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4318 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4319
4320 if (!i) {
4321 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4322 if (rc)
4323 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4324 }
4325 }
4326
4327 for (i = 0; i < bp->tx_nr_rings; i++) {
4328 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4329 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4330 u32 map_idx = txr->bnapi->index;
4331 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
4332
4333 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
4334 map_idx, fw_stats_ctx);
4335 if (rc)
4336 goto err_out;
4337 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
4338 }
4339
4340 for (i = 0; i < bp->rx_nr_rings; i++) {
4341 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4342 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4343 u32 map_idx = rxr->bnapi->index;
4344
4345 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
4346 map_idx, INVALID_STATS_CTX_ID);
4347 if (rc)
4348 goto err_out;
4349 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
4350 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
4351 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
4352 }
4353
4354 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4355 for (i = 0; i < bp->rx_nr_rings; i++) {
4356 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4357 struct bnxt_ring_struct *ring =
4358 &rxr->rx_agg_ring_struct;
4359 u32 grp_idx = rxr->bnapi->index;
4360 u32 map_idx = grp_idx + bp->rx_nr_rings;
4361
4362 rc = hwrm_ring_alloc_send_msg(bp, ring,
4363 HWRM_RING_ALLOC_AGG,
4364 map_idx,
4365 INVALID_STATS_CTX_ID);
4366 if (rc)
4367 goto err_out;
4368
4369 rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
4370 writel(DB_KEY_RX | rxr->rx_agg_prod,
4371 rxr->rx_agg_doorbell);
4372 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
4373 }
4374 }
4375 err_out:
4376 return rc;
4377 }
4378
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)4379 static int hwrm_ring_free_send_msg(struct bnxt *bp,
4380 struct bnxt_ring_struct *ring,
4381 u32 ring_type, int cmpl_ring_id)
4382 {
4383 int rc;
4384 struct hwrm_ring_free_input req = {0};
4385 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
4386 u16 error_code;
4387
4388 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
4389 req.ring_type = ring_type;
4390 req.ring_id = cpu_to_le16(ring->fw_ring_id);
4391
4392 mutex_lock(&bp->hwrm_cmd_lock);
4393 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4394 error_code = le16_to_cpu(resp->error_code);
4395 mutex_unlock(&bp->hwrm_cmd_lock);
4396
4397 if (rc || error_code) {
4398 switch (ring_type) {
4399 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
4400 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
4401 rc);
4402 return rc;
4403 case RING_FREE_REQ_RING_TYPE_RX:
4404 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
4405 rc);
4406 return rc;
4407 case RING_FREE_REQ_RING_TYPE_TX:
4408 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
4409 rc);
4410 return rc;
4411 default:
4412 netdev_err(bp->dev, "Invalid ring\n");
4413 return -1;
4414 }
4415 }
4416 return 0;
4417 }
4418
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)4419 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
4420 {
4421 int i;
4422
4423 if (!bp->bnapi)
4424 return;
4425
4426 for (i = 0; i < bp->tx_nr_rings; i++) {
4427 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4428 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4429 u32 grp_idx = txr->bnapi->index;
4430 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4431
4432 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4433 hwrm_ring_free_send_msg(bp, ring,
4434 RING_FREE_REQ_RING_TYPE_TX,
4435 close_path ? cmpl_ring_id :
4436 INVALID_HW_RING_ID);
4437 ring->fw_ring_id = INVALID_HW_RING_ID;
4438 }
4439 }
4440
4441 for (i = 0; i < bp->rx_nr_rings; i++) {
4442 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4443 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4444 u32 grp_idx = rxr->bnapi->index;
4445 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4446
4447 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4448 hwrm_ring_free_send_msg(bp, ring,
4449 RING_FREE_REQ_RING_TYPE_RX,
4450 close_path ? cmpl_ring_id :
4451 INVALID_HW_RING_ID);
4452 ring->fw_ring_id = INVALID_HW_RING_ID;
4453 bp->grp_info[grp_idx].rx_fw_ring_id =
4454 INVALID_HW_RING_ID;
4455 }
4456 }
4457
4458 for (i = 0; i < bp->rx_nr_rings; i++) {
4459 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4460 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
4461 u32 grp_idx = rxr->bnapi->index;
4462 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4463
4464 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4465 hwrm_ring_free_send_msg(bp, ring,
4466 RING_FREE_REQ_RING_TYPE_RX,
4467 close_path ? cmpl_ring_id :
4468 INVALID_HW_RING_ID);
4469 ring->fw_ring_id = INVALID_HW_RING_ID;
4470 bp->grp_info[grp_idx].agg_fw_ring_id =
4471 INVALID_HW_RING_ID;
4472 }
4473 }
4474
4475 /* The completion rings are about to be freed. After that the
4476 * IRQ doorbell will not work anymore. So we need to disable
4477 * IRQ here.
4478 */
4479 bnxt_disable_int_sync(bp);
4480
4481 for (i = 0; i < bp->cp_nr_rings; i++) {
4482 struct bnxt_napi *bnapi = bp->bnapi[i];
4483 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4484 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4485
4486 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4487 hwrm_ring_free_send_msg(bp, ring,
4488 RING_FREE_REQ_RING_TYPE_L2_CMPL,
4489 INVALID_HW_RING_ID);
4490 ring->fw_ring_id = INVALID_HW_RING_ID;
4491 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4492 }
4493 }
4494 }
4495
4496 /* Caller must hold bp->hwrm_cmd_lock */
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)4497 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4498 {
4499 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4500 struct hwrm_func_qcfg_input req = {0};
4501 int rc;
4502
4503 if (bp->hwrm_spec_code < 0x10601)
4504 return 0;
4505
4506 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4507 req.fid = cpu_to_le16(fid);
4508 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4509 if (!rc)
4510 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4511
4512 return rc;
4513 }
4514
bnxt_hwrm_reserve_tx_rings(struct bnxt * bp,int * tx_rings)4515 static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
4516 {
4517 struct hwrm_func_cfg_input req = {0};
4518 int rc;
4519
4520 if (bp->hwrm_spec_code < 0x10601)
4521 return 0;
4522
4523 if (BNXT_VF(bp))
4524 return 0;
4525
4526 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4527 req.fid = cpu_to_le16(0xffff);
4528 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4529 req.num_tx_rings = cpu_to_le16(*tx_rings);
4530 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4531 if (rc)
4532 return rc;
4533
4534 mutex_lock(&bp->hwrm_cmd_lock);
4535 rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
4536 mutex_unlock(&bp->hwrm_cmd_lock);
4537 if (!rc)
4538 bp->tx_reserved_rings = *tx_rings;
4539 return rc;
4540 }
4541
bnxt_hwrm_check_tx_rings(struct bnxt * bp,int tx_rings)4542 static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
4543 {
4544 struct hwrm_func_cfg_input req = {0};
4545 int rc;
4546
4547 if (bp->hwrm_spec_code < 0x10801)
4548 return 0;
4549
4550 if (BNXT_VF(bp))
4551 return 0;
4552
4553 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4554 req.fid = cpu_to_le16(0xffff);
4555 req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
4556 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4557 req.num_tx_rings = cpu_to_le16(tx_rings);
4558 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4559 if (rc)
4560 return -ENOMEM;
4561 return 0;
4562 }
4563
bnxt_hwrm_set_coal_params(struct bnxt * bp,u32 max_bufs,u32 buf_tmrs,u16 flags,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)4564 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4565 u32 buf_tmrs, u16 flags,
4566 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4567 {
4568 req->flags = cpu_to_le16(flags);
4569 req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4570 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4571 req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4572 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4573 /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4574 req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4575 req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4576 req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4577 }
4578
bnxt_hwrm_set_coal(struct bnxt * bp)4579 int bnxt_hwrm_set_coal(struct bnxt *bp)
4580 {
4581 int i, rc = 0;
4582 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4583 req_tx = {0}, *req;
4584 u16 max_buf, max_buf_irq;
4585 u16 buf_tmr, buf_tmr_irq;
4586 u32 flags;
4587
4588 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4589 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4590 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4591 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4592
4593 /* Each rx completion (2 records) should be DMAed immediately.
4594 * DMA 1/4 of the completion buffers at a time.
4595 */
4596 max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
4597 /* max_buf must not be zero */
4598 max_buf = clamp_t(u16, max_buf, 1, 63);
4599 max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4600 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4601 /* buf timer set to 1/4 of interrupt timer */
4602 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4603 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4604 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4605
4606 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4607
4608 /* RING_IDLE generates more IRQs for lower latency. Enable it only
4609 * if coal_ticks is less than 25 us.
4610 */
4611 if (bp->rx_coal_ticks < 25)
4612 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4613
4614 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4615 buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4616
4617 /* max_buf must not be zero */
4618 max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4619 max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4620 buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4621 /* buf timer set to 1/4 of interrupt timer */
4622 buf_tmr = max_t(u16, buf_tmr / 4, 1);
4623 buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4624 buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4625
4626 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4627 bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4628 buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
4629
4630 mutex_lock(&bp->hwrm_cmd_lock);
4631 for (i = 0; i < bp->cp_nr_rings; i++) {
4632 struct bnxt_napi *bnapi = bp->bnapi[i];
4633
4634 req = &req_rx;
4635 if (!bnapi->rx_ring)
4636 req = &req_tx;
4637 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4638
4639 rc = _hwrm_send_message(bp, req, sizeof(*req),
4640 HWRM_CMD_TIMEOUT);
4641 if (rc)
4642 break;
4643 }
4644 mutex_unlock(&bp->hwrm_cmd_lock);
4645 return rc;
4646 }
4647
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)4648 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4649 {
4650 int rc = 0, i;
4651 struct hwrm_stat_ctx_free_input req = {0};
4652
4653 if (!bp->bnapi)
4654 return 0;
4655
4656 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4657 return 0;
4658
4659 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4660
4661 mutex_lock(&bp->hwrm_cmd_lock);
4662 for (i = 0; i < bp->cp_nr_rings; i++) {
4663 struct bnxt_napi *bnapi = bp->bnapi[i];
4664 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4665
4666 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4667 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4668
4669 rc = _hwrm_send_message(bp, &req, sizeof(req),
4670 HWRM_CMD_TIMEOUT);
4671 if (rc)
4672 break;
4673
4674 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4675 }
4676 }
4677 mutex_unlock(&bp->hwrm_cmd_lock);
4678 return rc;
4679 }
4680
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)4681 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4682 {
4683 int rc = 0, i;
4684 struct hwrm_stat_ctx_alloc_input req = {0};
4685 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4686
4687 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4688 return 0;
4689
4690 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4691
4692 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
4693
4694 mutex_lock(&bp->hwrm_cmd_lock);
4695 for (i = 0; i < bp->cp_nr_rings; i++) {
4696 struct bnxt_napi *bnapi = bp->bnapi[i];
4697 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4698
4699 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4700
4701 rc = _hwrm_send_message(bp, &req, sizeof(req),
4702 HWRM_CMD_TIMEOUT);
4703 if (rc)
4704 break;
4705
4706 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4707
4708 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4709 }
4710 mutex_unlock(&bp->hwrm_cmd_lock);
4711 return rc;
4712 }
4713
bnxt_hwrm_func_qcfg(struct bnxt * bp)4714 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4715 {
4716 struct hwrm_func_qcfg_input req = {0};
4717 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4718 u16 flags;
4719 int rc;
4720
4721 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4722 req.fid = cpu_to_le16(0xffff);
4723 mutex_lock(&bp->hwrm_cmd_lock);
4724 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4725 if (rc)
4726 goto func_qcfg_exit;
4727
4728 #ifdef CONFIG_BNXT_SRIOV
4729 if (BNXT_VF(bp)) {
4730 struct bnxt_vf_info *vf = &bp->vf;
4731
4732 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4733 }
4734 #endif
4735 flags = le16_to_cpu(resp->flags);
4736 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
4737 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
4738 bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
4739 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
4740 bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
4741 }
4742 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
4743 bp->flags |= BNXT_FLAG_MULTI_HOST;
4744
4745 switch (resp->port_partition_type) {
4746 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4747 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4748 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4749 bp->port_partition_type = resp->port_partition_type;
4750 break;
4751 }
4752 if (bp->hwrm_spec_code < 0x10707 ||
4753 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
4754 bp->br_mode = BRIDGE_MODE_VEB;
4755 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
4756 bp->br_mode = BRIDGE_MODE_VEPA;
4757 else
4758 bp->br_mode = BRIDGE_MODE_UNDEF;
4759
4760 func_qcfg_exit:
4761 mutex_unlock(&bp->hwrm_cmd_lock);
4762 return rc;
4763 }
4764
bnxt_hwrm_func_qcaps(struct bnxt * bp)4765 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4766 {
4767 int rc = 0;
4768 struct hwrm_func_qcaps_input req = {0};
4769 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4770
4771 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4772 req.fid = cpu_to_le16(0xffff);
4773
4774 mutex_lock(&bp->hwrm_cmd_lock);
4775 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4776 if (rc)
4777 goto hwrm_func_qcaps_exit;
4778
4779 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4780 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4781 if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4782 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4783
4784 bp->tx_push_thresh = 0;
4785 if (resp->flags &
4786 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4787 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4788
4789 if (BNXT_PF(bp)) {
4790 struct bnxt_pf_info *pf = &bp->pf;
4791
4792 pf->fw_fid = le16_to_cpu(resp->fid);
4793 pf->port_id = le16_to_cpu(resp->port_id);
4794 bp->dev->dev_port = pf->port_id;
4795 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
4796 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4797 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4798 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4799 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4800 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4801 if (!pf->max_hw_ring_grps)
4802 pf->max_hw_ring_grps = pf->max_tx_rings;
4803 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4804 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4805 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4806 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4807 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4808 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4809 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4810 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4811 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4812 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4813 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4814 if (resp->flags &
4815 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
4816 bp->flags |= BNXT_FLAG_WOL_CAP;
4817 } else {
4818 #ifdef CONFIG_BNXT_SRIOV
4819 struct bnxt_vf_info *vf = &bp->vf;
4820
4821 vf->fw_fid = le16_to_cpu(resp->fid);
4822
4823 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4824 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4825 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4826 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4827 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4828 if (!vf->max_hw_ring_grps)
4829 vf->max_hw_ring_grps = vf->max_tx_rings;
4830 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4831 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4832 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4833
4834 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
4835 #endif
4836 }
4837
4838 hwrm_func_qcaps_exit:
4839 mutex_unlock(&bp->hwrm_cmd_lock);
4840 return rc;
4841 }
4842
bnxt_hwrm_func_reset(struct bnxt * bp)4843 static int bnxt_hwrm_func_reset(struct bnxt *bp)
4844 {
4845 struct hwrm_func_reset_input req = {0};
4846
4847 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4848 req.enables = 0;
4849
4850 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4851 }
4852
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)4853 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4854 {
4855 int rc = 0;
4856 struct hwrm_queue_qportcfg_input req = {0};
4857 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4858 u8 i, *qptr;
4859
4860 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4861
4862 mutex_lock(&bp->hwrm_cmd_lock);
4863 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4864 if (rc)
4865 goto qportcfg_exit;
4866
4867 if (!resp->max_configurable_queues) {
4868 rc = -EINVAL;
4869 goto qportcfg_exit;
4870 }
4871 bp->max_tc = resp->max_configurable_queues;
4872 bp->max_lltc = resp->max_configurable_lossless_queues;
4873 if (bp->max_tc > BNXT_MAX_QUEUE)
4874 bp->max_tc = BNXT_MAX_QUEUE;
4875
4876 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4877 bp->max_tc = 1;
4878
4879 if (bp->max_lltc > bp->max_tc)
4880 bp->max_lltc = bp->max_tc;
4881
4882 qptr = &resp->queue_id0;
4883 for (i = 0; i < bp->max_tc; i++) {
4884 bp->q_info[i].queue_id = *qptr++;
4885 bp->q_info[i].queue_profile = *qptr++;
4886 }
4887
4888 qportcfg_exit:
4889 mutex_unlock(&bp->hwrm_cmd_lock);
4890 return rc;
4891 }
4892
bnxt_hwrm_ver_get(struct bnxt * bp)4893 static int bnxt_hwrm_ver_get(struct bnxt *bp)
4894 {
4895 int rc;
4896 struct hwrm_ver_get_input req = {0};
4897 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4898 u32 dev_caps_cfg;
4899
4900 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
4901 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4902 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4903 req.hwrm_intf_min = HWRM_VERSION_MINOR;
4904 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4905 mutex_lock(&bp->hwrm_cmd_lock);
4906 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4907 if (rc)
4908 goto hwrm_ver_get_exit;
4909
4910 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4911
4912 bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4913 resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
4914 if (resp->hwrm_intf_maj < 1) {
4915 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
4916 resp->hwrm_intf_maj, resp->hwrm_intf_min,
4917 resp->hwrm_intf_upd);
4918 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
4919 }
4920 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
4921 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4922 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4923
4924 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4925 if (!bp->hwrm_cmd_timeout)
4926 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4927
4928 if (resp->hwrm_intf_maj >= 1)
4929 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4930
4931 bp->chip_num = le16_to_cpu(resp->chip_num);
4932 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4933 !resp->chip_metal)
4934 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
4935
4936 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
4937 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
4938 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
4939 bp->flags |= BNXT_FLAG_SHORT_CMD;
4940
4941 hwrm_ver_get_exit:
4942 mutex_unlock(&bp->hwrm_cmd_lock);
4943 return rc;
4944 }
4945
bnxt_hwrm_fw_set_time(struct bnxt * bp)4946 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4947 {
4948 #if IS_ENABLED(CONFIG_RTC_LIB)
4949 struct hwrm_fw_set_time_input req = {0};
4950 struct rtc_time tm;
4951 struct timeval tv;
4952
4953 if (bp->hwrm_spec_code < 0x10400)
4954 return -EOPNOTSUPP;
4955
4956 do_gettimeofday(&tv);
4957 rtc_time_to_tm(tv.tv_sec, &tm);
4958 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4959 req.year = cpu_to_le16(1900 + tm.tm_year);
4960 req.month = 1 + tm.tm_mon;
4961 req.day = tm.tm_mday;
4962 req.hour = tm.tm_hour;
4963 req.minute = tm.tm_min;
4964 req.second = tm.tm_sec;
4965 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4966 #else
4967 return -EOPNOTSUPP;
4968 #endif
4969 }
4970
bnxt_hwrm_port_qstats(struct bnxt * bp)4971 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4972 {
4973 int rc;
4974 struct bnxt_pf_info *pf = &bp->pf;
4975 struct hwrm_port_qstats_input req = {0};
4976
4977 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4978 return 0;
4979
4980 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4981 req.port_id = cpu_to_le16(pf->port_id);
4982 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4983 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4984 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4985 return rc;
4986 }
4987
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)4988 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4989 {
4990 if (bp->vxlan_port_cnt) {
4991 bnxt_hwrm_tunnel_dst_port_free(
4992 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4993 }
4994 bp->vxlan_port_cnt = 0;
4995 if (bp->nge_port_cnt) {
4996 bnxt_hwrm_tunnel_dst_port_free(
4997 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4998 }
4999 bp->nge_port_cnt = 0;
5000 }
5001
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)5002 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
5003 {
5004 int rc, i;
5005 u32 tpa_flags = 0;
5006
5007 if (set_tpa)
5008 tpa_flags = bp->flags & BNXT_FLAG_TPA;
5009 for (i = 0; i < bp->nr_vnics; i++) {
5010 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
5011 if (rc) {
5012 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
5013 i, rc);
5014 return rc;
5015 }
5016 }
5017 return 0;
5018 }
5019
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)5020 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
5021 {
5022 int i;
5023
5024 for (i = 0; i < bp->nr_vnics; i++)
5025 bnxt_hwrm_vnic_set_rss(bp, i, false);
5026 }
5027
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)5028 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
5029 bool irq_re_init)
5030 {
5031 if (bp->vnic_info) {
5032 bnxt_hwrm_clear_vnic_filter(bp);
5033 /* clear all RSS setting before free vnic ctx */
5034 bnxt_hwrm_clear_vnic_rss(bp);
5035 bnxt_hwrm_vnic_ctx_free(bp);
5036 /* before free the vnic, undo the vnic tpa settings */
5037 if (bp->flags & BNXT_FLAG_TPA)
5038 bnxt_set_tpa(bp, false);
5039 bnxt_hwrm_vnic_free(bp);
5040 }
5041 bnxt_hwrm_ring_free(bp, close_path);
5042 bnxt_hwrm_ring_grp_free(bp);
5043 if (irq_re_init) {
5044 bnxt_hwrm_stat_ctx_free(bp);
5045 bnxt_hwrm_free_tunnel_ports(bp);
5046 }
5047 }
5048
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)5049 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
5050 {
5051 struct hwrm_func_cfg_input req = {0};
5052 int rc;
5053
5054 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5055 req.fid = cpu_to_le16(0xffff);
5056 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
5057 if (br_mode == BRIDGE_MODE_VEB)
5058 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
5059 else if (br_mode == BRIDGE_MODE_VEPA)
5060 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
5061 else
5062 return -EINVAL;
5063 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5064 if (rc)
5065 rc = -EIO;
5066 return rc;
5067 }
5068
bnxt_setup_vnic(struct bnxt * bp,u16 vnic_id)5069 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
5070 {
5071 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5072 int rc;
5073
5074 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
5075 goto skip_rss_ctx;
5076
5077 /* allocate context for vnic */
5078 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
5079 if (rc) {
5080 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
5081 vnic_id, rc);
5082 goto vnic_setup_err;
5083 }
5084 bp->rsscos_nr_ctxs++;
5085
5086 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5087 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
5088 if (rc) {
5089 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
5090 vnic_id, rc);
5091 goto vnic_setup_err;
5092 }
5093 bp->rsscos_nr_ctxs++;
5094 }
5095
5096 skip_rss_ctx:
5097 /* configure default vnic, ring grp */
5098 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
5099 if (rc) {
5100 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
5101 vnic_id, rc);
5102 goto vnic_setup_err;
5103 }
5104
5105 /* Enable RSS hashing on vnic */
5106 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
5107 if (rc) {
5108 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
5109 vnic_id, rc);
5110 goto vnic_setup_err;
5111 }
5112
5113 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5114 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
5115 if (rc) {
5116 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
5117 vnic_id, rc);
5118 }
5119 }
5120
5121 vnic_setup_err:
5122 return rc;
5123 }
5124
bnxt_alloc_rfs_vnics(struct bnxt * bp)5125 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
5126 {
5127 #ifdef CONFIG_RFS_ACCEL
5128 int i, rc = 0;
5129
5130 for (i = 0; i < bp->rx_nr_rings; i++) {
5131 struct bnxt_vnic_info *vnic;
5132 u16 vnic_id = i + 1;
5133 u16 ring_id = i;
5134
5135 if (vnic_id >= bp->nr_vnics)
5136 break;
5137
5138 vnic = &bp->vnic_info[vnic_id];
5139 vnic->flags |= BNXT_VNIC_RFS_FLAG;
5140 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
5141 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
5142 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
5143 if (rc) {
5144 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
5145 vnic_id, rc);
5146 break;
5147 }
5148 rc = bnxt_setup_vnic(bp, vnic_id);
5149 if (rc)
5150 break;
5151 }
5152 return rc;
5153 #else
5154 return 0;
5155 #endif
5156 }
5157
5158 /* Allow PF and VF with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)5159 static bool bnxt_promisc_ok(struct bnxt *bp)
5160 {
5161 #ifdef CONFIG_BNXT_SRIOV
5162 if (BNXT_VF(bp) && !bp->vf.vlan)
5163 return false;
5164 #endif
5165 return true;
5166 }
5167
bnxt_setup_nitroa0_vnic(struct bnxt * bp)5168 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
5169 {
5170 unsigned int rc = 0;
5171
5172 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
5173 if (rc) {
5174 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5175 rc);
5176 return rc;
5177 }
5178
5179 rc = bnxt_hwrm_vnic_cfg(bp, 1);
5180 if (rc) {
5181 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5182 rc);
5183 return rc;
5184 }
5185 return rc;
5186 }
5187
5188 static int bnxt_cfg_rx_mode(struct bnxt *);
5189 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
5190
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)5191 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5192 {
5193 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5194 int rc = 0;
5195 unsigned int rx_nr_rings = bp->rx_nr_rings;
5196
5197 if (irq_re_init) {
5198 rc = bnxt_hwrm_stat_ctx_alloc(bp);
5199 if (rc) {
5200 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
5201 rc);
5202 goto err_out;
5203 }
5204 if (bp->tx_reserved_rings != bp->tx_nr_rings) {
5205 int tx = bp->tx_nr_rings;
5206
5207 if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
5208 tx < bp->tx_nr_rings) {
5209 rc = -ENOMEM;
5210 goto err_out;
5211 }
5212 }
5213 }
5214
5215 rc = bnxt_hwrm_ring_alloc(bp);
5216 if (rc) {
5217 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
5218 goto err_out;
5219 }
5220
5221 rc = bnxt_hwrm_ring_grp_alloc(bp);
5222 if (rc) {
5223 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
5224 goto err_out;
5225 }
5226
5227 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5228 rx_nr_rings--;
5229
5230 /* default vnic 0 */
5231 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
5232 if (rc) {
5233 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
5234 goto err_out;
5235 }
5236
5237 rc = bnxt_setup_vnic(bp, 0);
5238 if (rc)
5239 goto err_out;
5240
5241 if (bp->flags & BNXT_FLAG_RFS) {
5242 rc = bnxt_alloc_rfs_vnics(bp);
5243 if (rc)
5244 goto err_out;
5245 }
5246
5247 if (bp->flags & BNXT_FLAG_TPA) {
5248 rc = bnxt_set_tpa(bp, true);
5249 if (rc)
5250 goto err_out;
5251 }
5252
5253 if (BNXT_VF(bp))
5254 bnxt_update_vf_mac(bp);
5255
5256 /* Filter for default vnic 0 */
5257 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
5258 if (rc) {
5259 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
5260 goto err_out;
5261 }
5262 vnic->uc_filter_count = 1;
5263
5264 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
5265
5266 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5267 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5268
5269 if (bp->dev->flags & IFF_ALLMULTI) {
5270 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5271 vnic->mc_list_count = 0;
5272 } else {
5273 u32 mask = 0;
5274
5275 bnxt_mc_list_updated(bp, &mask);
5276 vnic->rx_mask |= mask;
5277 }
5278
5279 rc = bnxt_cfg_rx_mode(bp);
5280 if (rc)
5281 goto err_out;
5282
5283 rc = bnxt_hwrm_set_coal(bp);
5284 if (rc)
5285 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
5286 rc);
5287
5288 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5289 rc = bnxt_setup_nitroa0_vnic(bp);
5290 if (rc)
5291 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
5292 rc);
5293 }
5294
5295 if (BNXT_VF(bp)) {
5296 bnxt_hwrm_func_qcfg(bp);
5297 netdev_update_features(bp->dev);
5298 }
5299
5300 return 0;
5301
5302 err_out:
5303 bnxt_hwrm_resource_free(bp, 0, true);
5304
5305 return rc;
5306 }
5307
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)5308 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5309 {
5310 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
5311 return 0;
5312 }
5313
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)5314 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5315 {
5316 bnxt_init_cp_rings(bp);
5317 bnxt_init_rx_rings(bp);
5318 bnxt_init_tx_rings(bp);
5319 bnxt_init_ring_grps(bp, irq_re_init);
5320 bnxt_init_vnics(bp);
5321
5322 return bnxt_init_chip(bp, irq_re_init);
5323 }
5324
bnxt_set_real_num_queues(struct bnxt * bp)5325 static int bnxt_set_real_num_queues(struct bnxt *bp)
5326 {
5327 int rc;
5328 struct net_device *dev = bp->dev;
5329
5330 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
5331 bp->tx_nr_rings_xdp);
5332 if (rc)
5333 return rc;
5334
5335 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
5336 if (rc)
5337 return rc;
5338
5339 #ifdef CONFIG_RFS_ACCEL
5340 if (bp->flags & BNXT_FLAG_RFS)
5341 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
5342 #endif
5343
5344 return rc;
5345 }
5346
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)5347 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5348 bool shared)
5349 {
5350 int _rx = *rx, _tx = *tx;
5351
5352 if (shared) {
5353 *rx = min_t(int, _rx, max);
5354 *tx = min_t(int, _tx, max);
5355 } else {
5356 if (max < 2)
5357 return -ENOMEM;
5358
5359 while (_rx + _tx > max) {
5360 if (_rx > _tx && _rx > 1)
5361 _rx--;
5362 else if (_tx > 1)
5363 _tx--;
5364 }
5365 *rx = _rx;
5366 *tx = _tx;
5367 }
5368 return 0;
5369 }
5370
bnxt_setup_msix(struct bnxt * bp)5371 static void bnxt_setup_msix(struct bnxt *bp)
5372 {
5373 const int len = sizeof(bp->irq_tbl[0].name);
5374 struct net_device *dev = bp->dev;
5375 int tcs, i;
5376
5377 tcs = netdev_get_num_tc(dev);
5378 if (tcs) {
5379 int i, off, count;
5380
5381 for (i = 0; i < tcs; i++) {
5382 count = bp->tx_nr_rings_per_tc;
5383 off = i * count;
5384 netdev_set_tc_queue(dev, i, count, off);
5385 }
5386 }
5387
5388 for (i = 0; i < bp->cp_nr_rings; i++) {
5389 char *attr;
5390
5391 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5392 attr = "TxRx";
5393 else if (i < bp->rx_nr_rings)
5394 attr = "rx";
5395 else
5396 attr = "tx";
5397
5398 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
5399 i);
5400 bp->irq_tbl[i].handler = bnxt_msix;
5401 }
5402 }
5403
bnxt_setup_inta(struct bnxt * bp)5404 static void bnxt_setup_inta(struct bnxt *bp)
5405 {
5406 const int len = sizeof(bp->irq_tbl[0].name);
5407
5408 if (netdev_get_num_tc(bp->dev))
5409 netdev_reset_tc(bp->dev);
5410
5411 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
5412 0);
5413 bp->irq_tbl[0].handler = bnxt_inta;
5414 }
5415
bnxt_setup_int_mode(struct bnxt * bp)5416 static int bnxt_setup_int_mode(struct bnxt *bp)
5417 {
5418 int rc;
5419
5420 if (bp->flags & BNXT_FLAG_USING_MSIX)
5421 bnxt_setup_msix(bp);
5422 else
5423 bnxt_setup_inta(bp);
5424
5425 rc = bnxt_set_real_num_queues(bp);
5426 return rc;
5427 }
5428
5429 #ifdef CONFIG_RFS_ACCEL
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)5430 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
5431 {
5432 #if defined(CONFIG_BNXT_SRIOV)
5433 if (BNXT_VF(bp))
5434 return bp->vf.max_rsscos_ctxs;
5435 #endif
5436 return bp->pf.max_rsscos_ctxs;
5437 }
5438
bnxt_get_max_func_vnics(struct bnxt * bp)5439 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
5440 {
5441 #if defined(CONFIG_BNXT_SRIOV)
5442 if (BNXT_VF(bp))
5443 return bp->vf.max_vnics;
5444 #endif
5445 return bp->pf.max_vnics;
5446 }
5447 #endif
5448
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)5449 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
5450 {
5451 #if defined(CONFIG_BNXT_SRIOV)
5452 if (BNXT_VF(bp))
5453 return bp->vf.max_stat_ctxs;
5454 #endif
5455 return bp->pf.max_stat_ctxs;
5456 }
5457
bnxt_set_max_func_stat_ctxs(struct bnxt * bp,unsigned int max)5458 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
5459 {
5460 #if defined(CONFIG_BNXT_SRIOV)
5461 if (BNXT_VF(bp))
5462 bp->vf.max_stat_ctxs = max;
5463 else
5464 #endif
5465 bp->pf.max_stat_ctxs = max;
5466 }
5467
bnxt_get_max_func_cp_rings(struct bnxt * bp)5468 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5469 {
5470 #if defined(CONFIG_BNXT_SRIOV)
5471 if (BNXT_VF(bp))
5472 return bp->vf.max_cp_rings;
5473 #endif
5474 return bp->pf.max_cp_rings;
5475 }
5476
bnxt_set_max_func_cp_rings(struct bnxt * bp,unsigned int max)5477 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
5478 {
5479 #if defined(CONFIG_BNXT_SRIOV)
5480 if (BNXT_VF(bp))
5481 bp->vf.max_cp_rings = max;
5482 else
5483 #endif
5484 bp->pf.max_cp_rings = max;
5485 }
5486
bnxt_get_max_func_irqs(struct bnxt * bp)5487 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5488 {
5489 #if defined(CONFIG_BNXT_SRIOV)
5490 if (BNXT_VF(bp))
5491 return min_t(unsigned int, bp->vf.max_irqs,
5492 bp->vf.max_cp_rings);
5493 #endif
5494 return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5495 }
5496
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)5497 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5498 {
5499 #if defined(CONFIG_BNXT_SRIOV)
5500 if (BNXT_VF(bp))
5501 bp->vf.max_irqs = max_irqs;
5502 else
5503 #endif
5504 bp->pf.max_irqs = max_irqs;
5505 }
5506
bnxt_init_msix(struct bnxt * bp)5507 static int bnxt_init_msix(struct bnxt *bp)
5508 {
5509 int i, total_vecs, rc = 0, min = 1;
5510 struct msix_entry *msix_ent;
5511
5512 total_vecs = bnxt_get_max_func_irqs(bp);
5513 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
5514 if (!msix_ent)
5515 return -ENOMEM;
5516
5517 for (i = 0; i < total_vecs; i++) {
5518 msix_ent[i].entry = i;
5519 msix_ent[i].vector = 0;
5520 }
5521
5522 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
5523 min = 2;
5524
5525 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
5526 if (total_vecs < 0) {
5527 rc = -ENODEV;
5528 goto msix_setup_exit;
5529 }
5530
5531 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
5532 if (bp->irq_tbl) {
5533 for (i = 0; i < total_vecs; i++)
5534 bp->irq_tbl[i].vector = msix_ent[i].vector;
5535
5536 bp->total_irqs = total_vecs;
5537 /* Trim rings based upon num of vectors allocated */
5538 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
5539 total_vecs, min == 1);
5540 if (rc)
5541 goto msix_setup_exit;
5542
5543 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5544 bp->cp_nr_rings = (min == 1) ?
5545 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5546 bp->tx_nr_rings + bp->rx_nr_rings;
5547
5548 } else {
5549 rc = -ENOMEM;
5550 goto msix_setup_exit;
5551 }
5552 bp->flags |= BNXT_FLAG_USING_MSIX;
5553 kfree(msix_ent);
5554 return 0;
5555
5556 msix_setup_exit:
5557 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
5558 kfree(bp->irq_tbl);
5559 bp->irq_tbl = NULL;
5560 pci_disable_msix(bp->pdev);
5561 kfree(msix_ent);
5562 return rc;
5563 }
5564
bnxt_init_inta(struct bnxt * bp)5565 static int bnxt_init_inta(struct bnxt *bp)
5566 {
5567 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
5568 if (!bp->irq_tbl)
5569 return -ENOMEM;
5570
5571 bp->total_irqs = 1;
5572 bp->rx_nr_rings = 1;
5573 bp->tx_nr_rings = 1;
5574 bp->cp_nr_rings = 1;
5575 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5576 bp->flags |= BNXT_FLAG_SHARED_RINGS;
5577 bp->irq_tbl[0].vector = bp->pdev->irq;
5578 return 0;
5579 }
5580
bnxt_init_int_mode(struct bnxt * bp)5581 static int bnxt_init_int_mode(struct bnxt *bp)
5582 {
5583 int rc = 0;
5584
5585 if (bp->flags & BNXT_FLAG_MSIX_CAP)
5586 rc = bnxt_init_msix(bp);
5587
5588 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
5589 /* fallback to INTA */
5590 rc = bnxt_init_inta(bp);
5591 }
5592 return rc;
5593 }
5594
bnxt_clear_int_mode(struct bnxt * bp)5595 static void bnxt_clear_int_mode(struct bnxt *bp)
5596 {
5597 if (bp->flags & BNXT_FLAG_USING_MSIX)
5598 pci_disable_msix(bp->pdev);
5599
5600 kfree(bp->irq_tbl);
5601 bp->irq_tbl = NULL;
5602 bp->flags &= ~BNXT_FLAG_USING_MSIX;
5603 }
5604
bnxt_free_irq(struct bnxt * bp)5605 static void bnxt_free_irq(struct bnxt *bp)
5606 {
5607 struct bnxt_irq *irq;
5608 int i;
5609
5610 #ifdef CONFIG_RFS_ACCEL
5611 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
5612 bp->dev->rx_cpu_rmap = NULL;
5613 #endif
5614 if (!bp->irq_tbl)
5615 return;
5616
5617 for (i = 0; i < bp->cp_nr_rings; i++) {
5618 irq = &bp->irq_tbl[i];
5619 if (irq->requested) {
5620 if (irq->have_cpumask) {
5621 irq_set_affinity_hint(irq->vector, NULL);
5622 free_cpumask_var(irq->cpu_mask);
5623 irq->have_cpumask = 0;
5624 }
5625 free_irq(irq->vector, bp->bnapi[i]);
5626 }
5627
5628 irq->requested = 0;
5629 }
5630 }
5631
bnxt_request_irq(struct bnxt * bp)5632 static int bnxt_request_irq(struct bnxt *bp)
5633 {
5634 int i, j, rc = 0;
5635 unsigned long flags = 0;
5636 #ifdef CONFIG_RFS_ACCEL
5637 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
5638 #endif
5639
5640 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
5641 flags = IRQF_SHARED;
5642
5643 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
5644 struct bnxt_irq *irq = &bp->irq_tbl[i];
5645 #ifdef CONFIG_RFS_ACCEL
5646 if (rmap && bp->bnapi[i]->rx_ring) {
5647 rc = irq_cpu_rmap_add(rmap, irq->vector);
5648 if (rc)
5649 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
5650 j);
5651 j++;
5652 }
5653 #endif
5654 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5655 bp->bnapi[i]);
5656 if (rc)
5657 break;
5658
5659 irq->requested = 1;
5660
5661 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
5662 int numa_node = dev_to_node(&bp->pdev->dev);
5663
5664 irq->have_cpumask = 1;
5665 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
5666 irq->cpu_mask);
5667 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
5668 if (rc) {
5669 netdev_warn(bp->dev,
5670 "Set affinity failed, IRQ = %d\n",
5671 irq->vector);
5672 break;
5673 }
5674 }
5675 }
5676 return rc;
5677 }
5678
bnxt_del_napi(struct bnxt * bp)5679 static void bnxt_del_napi(struct bnxt *bp)
5680 {
5681 int i;
5682
5683 if (!bp->bnapi)
5684 return;
5685
5686 for (i = 0; i < bp->cp_nr_rings; i++) {
5687 struct bnxt_napi *bnapi = bp->bnapi[i];
5688
5689 napi_hash_del(&bnapi->napi);
5690 netif_napi_del(&bnapi->napi);
5691 }
5692 /* We called napi_hash_del() before netif_napi_del(), we need
5693 * to respect an RCU grace period before freeing napi structures.
5694 */
5695 synchronize_net();
5696 }
5697
bnxt_init_napi(struct bnxt * bp)5698 static void bnxt_init_napi(struct bnxt *bp)
5699 {
5700 int i;
5701 unsigned int cp_nr_rings = bp->cp_nr_rings;
5702 struct bnxt_napi *bnapi;
5703
5704 if (bp->flags & BNXT_FLAG_USING_MSIX) {
5705 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5706 cp_nr_rings--;
5707 for (i = 0; i < cp_nr_rings; i++) {
5708 bnapi = bp->bnapi[i];
5709 netif_napi_add(bp->dev, &bnapi->napi,
5710 bnxt_poll, 64);
5711 }
5712 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5713 bnapi = bp->bnapi[cp_nr_rings];
5714 netif_napi_add(bp->dev, &bnapi->napi,
5715 bnxt_poll_nitroa0, 64);
5716 }
5717 } else {
5718 bnapi = bp->bnapi[0];
5719 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
5720 }
5721 }
5722
bnxt_disable_napi(struct bnxt * bp)5723 static void bnxt_disable_napi(struct bnxt *bp)
5724 {
5725 int i;
5726
5727 if (!bp->bnapi)
5728 return;
5729
5730 for (i = 0; i < bp->cp_nr_rings; i++)
5731 napi_disable(&bp->bnapi[i]->napi);
5732 }
5733
bnxt_enable_napi(struct bnxt * bp)5734 static void bnxt_enable_napi(struct bnxt *bp)
5735 {
5736 int i;
5737
5738 for (i = 0; i < bp->cp_nr_rings; i++) {
5739 bp->bnapi[i]->in_reset = false;
5740 napi_enable(&bp->bnapi[i]->napi);
5741 }
5742 }
5743
bnxt_tx_disable(struct bnxt * bp)5744 void bnxt_tx_disable(struct bnxt *bp)
5745 {
5746 int i;
5747 struct bnxt_tx_ring_info *txr;
5748
5749 if (bp->tx_ring) {
5750 for (i = 0; i < bp->tx_nr_rings; i++) {
5751 txr = &bp->tx_ring[i];
5752 txr->dev_state = BNXT_DEV_STATE_CLOSING;
5753 }
5754 }
5755 /* Stop all TX queues */
5756 netif_tx_disable(bp->dev);
5757 netif_carrier_off(bp->dev);
5758 }
5759
bnxt_tx_enable(struct bnxt * bp)5760 void bnxt_tx_enable(struct bnxt *bp)
5761 {
5762 int i;
5763 struct bnxt_tx_ring_info *txr;
5764
5765 for (i = 0; i < bp->tx_nr_rings; i++) {
5766 txr = &bp->tx_ring[i];
5767 txr->dev_state = 0;
5768 }
5769 netif_tx_wake_all_queues(bp->dev);
5770 if (bp->link_info.link_up)
5771 netif_carrier_on(bp->dev);
5772 }
5773
bnxt_report_link(struct bnxt * bp)5774 static void bnxt_report_link(struct bnxt *bp)
5775 {
5776 if (bp->link_info.link_up) {
5777 const char *duplex;
5778 const char *flow_ctrl;
5779 u32 speed;
5780 u16 fec;
5781
5782 netif_carrier_on(bp->dev);
5783 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5784 duplex = "full";
5785 else
5786 duplex = "half";
5787 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5788 flow_ctrl = "ON - receive & transmit";
5789 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5790 flow_ctrl = "ON - transmit";
5791 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5792 flow_ctrl = "ON - receive";
5793 else
5794 flow_ctrl = "none";
5795 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5796 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
5797 speed, duplex, flow_ctrl);
5798 if (bp->flags & BNXT_FLAG_EEE_CAP)
5799 netdev_info(bp->dev, "EEE is %s\n",
5800 bp->eee.eee_active ? "active" :
5801 "not active");
5802 fec = bp->link_info.fec_cfg;
5803 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
5804 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
5805 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
5806 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
5807 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
5808 } else {
5809 netif_carrier_off(bp->dev);
5810 netdev_err(bp->dev, "NIC Link is Down\n");
5811 }
5812 }
5813
bnxt_hwrm_phy_qcaps(struct bnxt * bp)5814 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5815 {
5816 int rc = 0;
5817 struct hwrm_port_phy_qcaps_input req = {0};
5818 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5819 struct bnxt_link_info *link_info = &bp->link_info;
5820
5821 if (bp->hwrm_spec_code < 0x10201)
5822 return 0;
5823
5824 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5825
5826 mutex_lock(&bp->hwrm_cmd_lock);
5827 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5828 if (rc)
5829 goto hwrm_phy_qcaps_exit;
5830
5831 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
5832 struct ethtool_eee *eee = &bp->eee;
5833 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5834
5835 bp->flags |= BNXT_FLAG_EEE_CAP;
5836 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5837 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5838 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5839 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5840 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5841 }
5842 if (resp->supported_speeds_auto_mode)
5843 link_info->support_auto_speeds =
5844 le16_to_cpu(resp->supported_speeds_auto_mode);
5845
5846 bp->port_count = resp->port_cnt;
5847
5848 hwrm_phy_qcaps_exit:
5849 mutex_unlock(&bp->hwrm_cmd_lock);
5850 return rc;
5851 }
5852
bnxt_update_link(struct bnxt * bp,bool chng_link_state)5853 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5854 {
5855 int rc = 0;
5856 struct bnxt_link_info *link_info = &bp->link_info;
5857 struct hwrm_port_phy_qcfg_input req = {0};
5858 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5859 u8 link_up = link_info->link_up;
5860 u16 diff;
5861
5862 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5863
5864 mutex_lock(&bp->hwrm_cmd_lock);
5865 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5866 if (rc) {
5867 mutex_unlock(&bp->hwrm_cmd_lock);
5868 return rc;
5869 }
5870
5871 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5872 link_info->phy_link_status = resp->link;
5873 link_info->duplex = resp->duplex_cfg;
5874 if (bp->hwrm_spec_code >= 0x10800)
5875 link_info->duplex = resp->duplex_state;
5876 link_info->pause = resp->pause;
5877 link_info->auto_mode = resp->auto_mode;
5878 link_info->auto_pause_setting = resp->auto_pause;
5879 link_info->lp_pause = resp->link_partner_adv_pause;
5880 link_info->force_pause_setting = resp->force_pause;
5881 link_info->duplex_setting = resp->duplex_cfg;
5882 if (link_info->phy_link_status == BNXT_LINK_LINK)
5883 link_info->link_speed = le16_to_cpu(resp->link_speed);
5884 else
5885 link_info->link_speed = 0;
5886 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
5887 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5888 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
5889 link_info->lp_auto_link_speeds =
5890 le16_to_cpu(resp->link_partner_adv_speeds);
5891 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5892 link_info->phy_ver[0] = resp->phy_maj;
5893 link_info->phy_ver[1] = resp->phy_min;
5894 link_info->phy_ver[2] = resp->phy_bld;
5895 link_info->media_type = resp->media_type;
5896 link_info->phy_type = resp->phy_type;
5897 link_info->transceiver = resp->xcvr_pkg_type;
5898 link_info->phy_addr = resp->eee_config_phy_addr &
5899 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
5900 link_info->module_status = resp->module_status;
5901
5902 if (bp->flags & BNXT_FLAG_EEE_CAP) {
5903 struct ethtool_eee *eee = &bp->eee;
5904 u16 fw_speeds;
5905
5906 eee->eee_active = 0;
5907 if (resp->eee_config_phy_addr &
5908 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5909 eee->eee_active = 1;
5910 fw_speeds = le16_to_cpu(
5911 resp->link_partner_adv_eee_link_speed_mask);
5912 eee->lp_advertised =
5913 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5914 }
5915
5916 /* Pull initial EEE config */
5917 if (!chng_link_state) {
5918 if (resp->eee_config_phy_addr &
5919 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5920 eee->eee_enabled = 1;
5921
5922 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5923 eee->advertised =
5924 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5925
5926 if (resp->eee_config_phy_addr &
5927 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5928 __le32 tmr;
5929
5930 eee->tx_lpi_enabled = 1;
5931 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5932 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5933 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5934 }
5935 }
5936 }
5937
5938 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
5939 if (bp->hwrm_spec_code >= 0x10504)
5940 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
5941
5942 /* TODO: need to add more logic to report VF link */
5943 if (chng_link_state) {
5944 if (link_info->phy_link_status == BNXT_LINK_LINK)
5945 link_info->link_up = 1;
5946 else
5947 link_info->link_up = 0;
5948 if (link_up != link_info->link_up)
5949 bnxt_report_link(bp);
5950 } else {
5951 /* alwasy link down if not require to update link state */
5952 link_info->link_up = 0;
5953 }
5954 mutex_unlock(&bp->hwrm_cmd_lock);
5955
5956 if (!BNXT_SINGLE_PF(bp))
5957 return 0;
5958
5959 diff = link_info->support_auto_speeds ^ link_info->advertising;
5960 if ((link_info->support_auto_speeds | diff) !=
5961 link_info->support_auto_speeds) {
5962 /* An advertised speed is no longer supported, so we need to
5963 * update the advertisement settings. Caller holds RTNL
5964 * so we can modify link settings.
5965 */
5966 link_info->advertising = link_info->support_auto_speeds;
5967 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5968 bnxt_hwrm_set_link_setting(bp, true, false);
5969 }
5970 return 0;
5971 }
5972
bnxt_get_port_module_status(struct bnxt * bp)5973 static void bnxt_get_port_module_status(struct bnxt *bp)
5974 {
5975 struct bnxt_link_info *link_info = &bp->link_info;
5976 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5977 u8 module_status;
5978
5979 if (bnxt_update_link(bp, true))
5980 return;
5981
5982 module_status = link_info->module_status;
5983 switch (module_status) {
5984 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5985 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5986 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5987 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5988 bp->pf.port_id);
5989 if (bp->hwrm_spec_code >= 0x10201) {
5990 netdev_warn(bp->dev, "Module part number %s\n",
5991 resp->phy_vendor_partnumber);
5992 }
5993 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5994 netdev_warn(bp->dev, "TX is disabled\n");
5995 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5996 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5997 }
5998 }
5999
6000 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)6001 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
6002 {
6003 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
6004 if (bp->hwrm_spec_code >= 0x10201)
6005 req->auto_pause =
6006 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
6007 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
6008 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
6009 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
6010 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
6011 req->enables |=
6012 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
6013 } else {
6014 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
6015 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
6016 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
6017 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
6018 req->enables |=
6019 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
6020 if (bp->hwrm_spec_code >= 0x10201) {
6021 req->auto_pause = req->force_pause;
6022 req->enables |= cpu_to_le32(
6023 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
6024 }
6025 }
6026 }
6027
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)6028 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
6029 struct hwrm_port_phy_cfg_input *req)
6030 {
6031 u8 autoneg = bp->link_info.autoneg;
6032 u16 fw_link_speed = bp->link_info.req_link_speed;
6033 u16 advertising = bp->link_info.advertising;
6034
6035 if (autoneg & BNXT_AUTONEG_SPEED) {
6036 req->auto_mode |=
6037 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
6038
6039 req->enables |= cpu_to_le32(
6040 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
6041 req->auto_link_speed_mask = cpu_to_le16(advertising);
6042
6043 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
6044 req->flags |=
6045 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
6046 } else {
6047 req->force_link_speed = cpu_to_le16(fw_link_speed);
6048 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
6049 }
6050
6051 /* tell chimp that the setting takes effect immediately */
6052 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
6053 }
6054
bnxt_hwrm_set_pause(struct bnxt * bp)6055 int bnxt_hwrm_set_pause(struct bnxt *bp)
6056 {
6057 struct hwrm_port_phy_cfg_input req = {0};
6058 int rc;
6059
6060 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6061 bnxt_hwrm_set_pause_common(bp, &req);
6062
6063 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
6064 bp->link_info.force_link_chng)
6065 bnxt_hwrm_set_link_common(bp, &req);
6066
6067 mutex_lock(&bp->hwrm_cmd_lock);
6068 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6069 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
6070 /* since changing of pause setting doesn't trigger any link
6071 * change event, the driver needs to update the current pause
6072 * result upon successfully return of the phy_cfg command
6073 */
6074 bp->link_info.pause =
6075 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
6076 bp->link_info.auto_pause_setting = 0;
6077 if (!bp->link_info.force_link_chng)
6078 bnxt_report_link(bp);
6079 }
6080 bp->link_info.force_link_chng = false;
6081 mutex_unlock(&bp->hwrm_cmd_lock);
6082 return rc;
6083 }
6084
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)6085 static void bnxt_hwrm_set_eee(struct bnxt *bp,
6086 struct hwrm_port_phy_cfg_input *req)
6087 {
6088 struct ethtool_eee *eee = &bp->eee;
6089
6090 if (eee->eee_enabled) {
6091 u16 eee_speeds;
6092 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
6093
6094 if (eee->tx_lpi_enabled)
6095 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
6096 else
6097 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
6098
6099 req->flags |= cpu_to_le32(flags);
6100 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
6101 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
6102 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
6103 } else {
6104 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
6105 }
6106 }
6107
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)6108 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
6109 {
6110 struct hwrm_port_phy_cfg_input req = {0};
6111
6112 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6113 if (set_pause)
6114 bnxt_hwrm_set_pause_common(bp, &req);
6115
6116 bnxt_hwrm_set_link_common(bp, &req);
6117
6118 if (set_eee)
6119 bnxt_hwrm_set_eee(bp, &req);
6120 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6121 }
6122
bnxt_hwrm_shutdown_link(struct bnxt * bp)6123 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
6124 {
6125 struct hwrm_port_phy_cfg_input req = {0};
6126
6127 if (!BNXT_SINGLE_PF(bp))
6128 return 0;
6129
6130 if (pci_num_vf(bp->pdev))
6131 return 0;
6132
6133 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
6134 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
6135 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6136 }
6137
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)6138 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
6139 {
6140 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6141 struct hwrm_port_led_qcaps_input req = {0};
6142 struct bnxt_pf_info *pf = &bp->pf;
6143 int rc;
6144
6145 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
6146 return 0;
6147
6148 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
6149 req.port_id = cpu_to_le16(pf->port_id);
6150 mutex_lock(&bp->hwrm_cmd_lock);
6151 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6152 if (rc) {
6153 mutex_unlock(&bp->hwrm_cmd_lock);
6154 return rc;
6155 }
6156 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
6157 int i;
6158
6159 bp->num_leds = resp->num_leds;
6160 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
6161 bp->num_leds);
6162 for (i = 0; i < bp->num_leds; i++) {
6163 struct bnxt_led_info *led = &bp->leds[i];
6164 __le16 caps = led->led_state_caps;
6165
6166 if (!led->led_group_id ||
6167 !BNXT_LED_ALT_BLINK_CAP(caps)) {
6168 bp->num_leds = 0;
6169 break;
6170 }
6171 }
6172 }
6173 mutex_unlock(&bp->hwrm_cmd_lock);
6174 return 0;
6175 }
6176
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)6177 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
6178 {
6179 struct hwrm_wol_filter_alloc_input req = {0};
6180 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6181 int rc;
6182
6183 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
6184 req.port_id = cpu_to_le16(bp->pf.port_id);
6185 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
6186 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
6187 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
6188 mutex_lock(&bp->hwrm_cmd_lock);
6189 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6190 if (!rc)
6191 bp->wol_filter_id = resp->wol_filter_id;
6192 mutex_unlock(&bp->hwrm_cmd_lock);
6193 return rc;
6194 }
6195
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)6196 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
6197 {
6198 struct hwrm_wol_filter_free_input req = {0};
6199 int rc;
6200
6201 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
6202 req.port_id = cpu_to_le16(bp->pf.port_id);
6203 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
6204 req.wol_filter_id = bp->wol_filter_id;
6205 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6206 return rc;
6207 }
6208
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)6209 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
6210 {
6211 struct hwrm_wol_filter_qcfg_input req = {0};
6212 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6213 u16 next_handle = 0;
6214 int rc;
6215
6216 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
6217 req.port_id = cpu_to_le16(bp->pf.port_id);
6218 req.handle = cpu_to_le16(handle);
6219 mutex_lock(&bp->hwrm_cmd_lock);
6220 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6221 if (!rc) {
6222 next_handle = le16_to_cpu(resp->next_handle);
6223 if (next_handle != 0) {
6224 if (resp->wol_type ==
6225 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
6226 bp->wol = 1;
6227 bp->wol_filter_id = resp->wol_filter_id;
6228 }
6229 }
6230 }
6231 mutex_unlock(&bp->hwrm_cmd_lock);
6232 return next_handle;
6233 }
6234
bnxt_get_wol_settings(struct bnxt * bp)6235 static void bnxt_get_wol_settings(struct bnxt *bp)
6236 {
6237 u16 handle = 0;
6238
6239 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
6240 return;
6241
6242 do {
6243 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
6244 } while (handle && handle != 0xffff);
6245 }
6246
bnxt_eee_config_ok(struct bnxt * bp)6247 static bool bnxt_eee_config_ok(struct bnxt *bp)
6248 {
6249 struct ethtool_eee *eee = &bp->eee;
6250 struct bnxt_link_info *link_info = &bp->link_info;
6251
6252 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
6253 return true;
6254
6255 if (eee->eee_enabled) {
6256 u32 advertising =
6257 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
6258
6259 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6260 eee->eee_enabled = 0;
6261 return false;
6262 }
6263 if (eee->advertised & ~advertising) {
6264 eee->advertised = advertising & eee->supported;
6265 return false;
6266 }
6267 }
6268 return true;
6269 }
6270
bnxt_update_phy_setting(struct bnxt * bp)6271 static int bnxt_update_phy_setting(struct bnxt *bp)
6272 {
6273 int rc;
6274 bool update_link = false;
6275 bool update_pause = false;
6276 bool update_eee = false;
6277 struct bnxt_link_info *link_info = &bp->link_info;
6278
6279 rc = bnxt_update_link(bp, true);
6280 if (rc) {
6281 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
6282 rc);
6283 return rc;
6284 }
6285 if (!BNXT_SINGLE_PF(bp))
6286 return 0;
6287
6288 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6289 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
6290 link_info->req_flow_ctrl)
6291 update_pause = true;
6292 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6293 link_info->force_pause_setting != link_info->req_flow_ctrl)
6294 update_pause = true;
6295 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6296 if (BNXT_AUTO_MODE(link_info->auto_mode))
6297 update_link = true;
6298 if (link_info->req_link_speed != link_info->force_link_speed)
6299 update_link = true;
6300 if (link_info->req_duplex != link_info->duplex_setting)
6301 update_link = true;
6302 } else {
6303 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
6304 update_link = true;
6305 if (link_info->advertising != link_info->auto_link_speeds)
6306 update_link = true;
6307 }
6308
6309 /* The last close may have shutdown the link, so need to call
6310 * PHY_CFG to bring it back up.
6311 */
6312 if (!netif_carrier_ok(bp->dev))
6313 update_link = true;
6314
6315 if (!bnxt_eee_config_ok(bp))
6316 update_eee = true;
6317
6318 if (update_link)
6319 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
6320 else if (update_pause)
6321 rc = bnxt_hwrm_set_pause(bp);
6322 if (rc) {
6323 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
6324 rc);
6325 return rc;
6326 }
6327
6328 return rc;
6329 }
6330
6331 /* Common routine to pre-map certain register block to different GRC window.
6332 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
6333 * in PF and 3 windows in VF that can be customized to map in different
6334 * register blocks.
6335 */
bnxt_preset_reg_win(struct bnxt * bp)6336 static void bnxt_preset_reg_win(struct bnxt *bp)
6337 {
6338 if (BNXT_PF(bp)) {
6339 /* CAG registers map to GRC window #4 */
6340 writel(BNXT_CAG_REG_BASE,
6341 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
6342 }
6343 }
6344
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)6345 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6346 {
6347 int rc = 0;
6348
6349 bnxt_preset_reg_win(bp);
6350 netif_carrier_off(bp->dev);
6351 if (irq_re_init) {
6352 rc = bnxt_setup_int_mode(bp);
6353 if (rc) {
6354 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
6355 rc);
6356 return rc;
6357 }
6358 }
6359 if ((bp->flags & BNXT_FLAG_RFS) &&
6360 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
6361 /* disable RFS if falling back to INTA */
6362 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
6363 bp->flags &= ~BNXT_FLAG_RFS;
6364 }
6365
6366 rc = bnxt_alloc_mem(bp, irq_re_init);
6367 if (rc) {
6368 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6369 goto open_err_free_mem;
6370 }
6371
6372 if (irq_re_init) {
6373 bnxt_init_napi(bp);
6374 rc = bnxt_request_irq(bp);
6375 if (rc) {
6376 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
6377 goto open_err_irq;
6378 }
6379 }
6380
6381 bnxt_enable_napi(bp);
6382
6383 rc = bnxt_init_nic(bp, irq_re_init);
6384 if (rc) {
6385 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6386 goto open_err;
6387 }
6388
6389 if (link_re_init) {
6390 mutex_lock(&bp->link_lock);
6391 rc = bnxt_update_phy_setting(bp);
6392 mutex_unlock(&bp->link_lock);
6393 if (rc)
6394 netdev_warn(bp->dev, "failed to update phy settings\n");
6395 }
6396
6397 if (irq_re_init)
6398 udp_tunnel_get_rx_info(bp->dev);
6399
6400 set_bit(BNXT_STATE_OPEN, &bp->state);
6401 bnxt_enable_int(bp);
6402 /* Enable TX queues */
6403 bnxt_tx_enable(bp);
6404 mod_timer(&bp->timer, jiffies + bp->current_interval);
6405 /* Poll link status and check for SFP+ module status */
6406 bnxt_get_port_module_status(bp);
6407
6408 /* VF-reps may need to be re-opened after the PF is re-opened */
6409 if (BNXT_PF(bp))
6410 bnxt_vf_reps_open(bp);
6411 return 0;
6412
6413 open_err:
6414 bnxt_disable_napi(bp);
6415
6416 open_err_irq:
6417 bnxt_del_napi(bp);
6418
6419 open_err_free_mem:
6420 bnxt_free_skbs(bp);
6421 bnxt_free_irq(bp);
6422 bnxt_free_mem(bp, true);
6423 return rc;
6424 }
6425
6426 /* rtnl_lock held */
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)6427 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6428 {
6429 int rc = 0;
6430
6431 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
6432 if (rc) {
6433 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
6434 dev_close(bp->dev);
6435 }
6436 return rc;
6437 }
6438
6439 /* rtnl_lock held, open the NIC half way by allocating all resources, but
6440 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
6441 * self tests.
6442 */
bnxt_half_open_nic(struct bnxt * bp)6443 int bnxt_half_open_nic(struct bnxt *bp)
6444 {
6445 int rc = 0;
6446
6447 rc = bnxt_alloc_mem(bp, false);
6448 if (rc) {
6449 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6450 goto half_open_err;
6451 }
6452 rc = bnxt_init_nic(bp, false);
6453 if (rc) {
6454 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6455 goto half_open_err;
6456 }
6457 return 0;
6458
6459 half_open_err:
6460 bnxt_free_skbs(bp);
6461 bnxt_free_mem(bp, false);
6462 dev_close(bp->dev);
6463 return rc;
6464 }
6465
6466 /* rtnl_lock held, this call can only be made after a previous successful
6467 * call to bnxt_half_open_nic().
6468 */
bnxt_half_close_nic(struct bnxt * bp)6469 void bnxt_half_close_nic(struct bnxt *bp)
6470 {
6471 bnxt_hwrm_resource_free(bp, false, false);
6472 bnxt_free_skbs(bp);
6473 bnxt_free_mem(bp, false);
6474 }
6475
bnxt_open(struct net_device * dev)6476 static int bnxt_open(struct net_device *dev)
6477 {
6478 struct bnxt *bp = netdev_priv(dev);
6479
6480 return __bnxt_open_nic(bp, true, true);
6481 }
6482
bnxt_drv_busy(struct bnxt * bp)6483 static bool bnxt_drv_busy(struct bnxt *bp)
6484 {
6485 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
6486 test_bit(BNXT_STATE_READ_STATS, &bp->state));
6487 }
6488
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)6489 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6490 {
6491 int rc = 0;
6492
6493 #ifdef CONFIG_BNXT_SRIOV
6494 if (bp->sriov_cfg) {
6495 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
6496 !bp->sriov_cfg,
6497 BNXT_SRIOV_CFG_WAIT_TMO);
6498 if (rc)
6499 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
6500 }
6501
6502 /* Close the VF-reps before closing PF */
6503 if (BNXT_PF(bp))
6504 bnxt_vf_reps_close(bp);
6505 #endif
6506 /* Change device state to avoid TX queue wake up's */
6507 bnxt_tx_disable(bp);
6508
6509 clear_bit(BNXT_STATE_OPEN, &bp->state);
6510 smp_mb__after_atomic();
6511 while (bnxt_drv_busy(bp))
6512 msleep(20);
6513
6514 /* Flush rings and and disable interrupts */
6515 bnxt_shutdown_nic(bp, irq_re_init);
6516
6517 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
6518
6519 bnxt_disable_napi(bp);
6520 del_timer_sync(&bp->timer);
6521 bnxt_free_skbs(bp);
6522
6523 if (irq_re_init) {
6524 bnxt_free_irq(bp);
6525 bnxt_del_napi(bp);
6526 }
6527 bnxt_free_mem(bp, irq_re_init);
6528 return rc;
6529 }
6530
bnxt_close(struct net_device * dev)6531 static int bnxt_close(struct net_device *dev)
6532 {
6533 struct bnxt *bp = netdev_priv(dev);
6534
6535 bnxt_close_nic(bp, true, true);
6536 bnxt_hwrm_shutdown_link(bp);
6537 return 0;
6538 }
6539
6540 /* rtnl_lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)6541 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6542 {
6543 switch (cmd) {
6544 case SIOCGMIIPHY:
6545 /* fallthru */
6546 case SIOCGMIIREG: {
6547 if (!netif_running(dev))
6548 return -EAGAIN;
6549
6550 return 0;
6551 }
6552
6553 case SIOCSMIIREG:
6554 if (!netif_running(dev))
6555 return -EAGAIN;
6556
6557 return 0;
6558
6559 default:
6560 /* do nothing */
6561 break;
6562 }
6563 return -EOPNOTSUPP;
6564 }
6565
6566 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)6567 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6568 {
6569 u32 i;
6570 struct bnxt *bp = netdev_priv(dev);
6571
6572 set_bit(BNXT_STATE_READ_STATS, &bp->state);
6573 /* Make sure bnxt_close_nic() sees that we are reading stats before
6574 * we check the BNXT_STATE_OPEN flag.
6575 */
6576 smp_mb__after_atomic();
6577 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6578 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
6579 return;
6580 }
6581
6582 /* TODO check if we need to synchronize with bnxt_close path */
6583 for (i = 0; i < bp->cp_nr_rings; i++) {
6584 struct bnxt_napi *bnapi = bp->bnapi[i];
6585 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6586 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
6587
6588 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
6589 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
6590 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
6591
6592 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
6593 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
6594 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
6595
6596 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
6597 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
6598 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
6599
6600 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
6601 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
6602 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
6603
6604 stats->rx_missed_errors +=
6605 le64_to_cpu(hw_stats->rx_discard_pkts);
6606
6607 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
6608
6609 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
6610 }
6611
6612 if (bp->flags & BNXT_FLAG_PORT_STATS) {
6613 struct rx_port_stats *rx = bp->hw_rx_port_stats;
6614 struct tx_port_stats *tx = bp->hw_tx_port_stats;
6615
6616 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
6617 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
6618 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
6619 le64_to_cpu(rx->rx_ovrsz_frames) +
6620 le64_to_cpu(rx->rx_runt_frames);
6621 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
6622 le64_to_cpu(rx->rx_jbr_frames);
6623 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
6624 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
6625 stats->tx_errors = le64_to_cpu(tx->tx_err);
6626 }
6627 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
6628 }
6629
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)6630 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
6631 {
6632 struct net_device *dev = bp->dev;
6633 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6634 struct netdev_hw_addr *ha;
6635 u8 *haddr;
6636 int mc_count = 0;
6637 bool update = false;
6638 int off = 0;
6639
6640 netdev_for_each_mc_addr(ha, dev) {
6641 if (mc_count >= BNXT_MAX_MC_ADDRS) {
6642 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6643 vnic->mc_list_count = 0;
6644 return false;
6645 }
6646 haddr = ha->addr;
6647 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
6648 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
6649 update = true;
6650 }
6651 off += ETH_ALEN;
6652 mc_count++;
6653 }
6654 if (mc_count)
6655 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
6656
6657 if (mc_count != vnic->mc_list_count) {
6658 vnic->mc_list_count = mc_count;
6659 update = true;
6660 }
6661 return update;
6662 }
6663
bnxt_uc_list_updated(struct bnxt * bp)6664 static bool bnxt_uc_list_updated(struct bnxt *bp)
6665 {
6666 struct net_device *dev = bp->dev;
6667 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6668 struct netdev_hw_addr *ha;
6669 int off = 0;
6670
6671 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
6672 return true;
6673
6674 netdev_for_each_uc_addr(ha, dev) {
6675 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
6676 return true;
6677
6678 off += ETH_ALEN;
6679 }
6680 return false;
6681 }
6682
bnxt_set_rx_mode(struct net_device * dev)6683 static void bnxt_set_rx_mode(struct net_device *dev)
6684 {
6685 struct bnxt *bp = netdev_priv(dev);
6686 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6687 u32 mask = vnic->rx_mask;
6688 bool mc_update = false;
6689 bool uc_update;
6690
6691 if (!netif_running(dev))
6692 return;
6693
6694 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
6695 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
6696 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
6697
6698 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
6699 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6700
6701 uc_update = bnxt_uc_list_updated(bp);
6702
6703 if (dev->flags & IFF_ALLMULTI) {
6704 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6705 vnic->mc_list_count = 0;
6706 } else {
6707 mc_update = bnxt_mc_list_updated(bp, &mask);
6708 }
6709
6710 if (mask != vnic->rx_mask || uc_update || mc_update) {
6711 vnic->rx_mask = mask;
6712
6713 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6714 bnxt_queue_sp_work(bp);
6715 }
6716 }
6717
bnxt_cfg_rx_mode(struct bnxt * bp)6718 static int bnxt_cfg_rx_mode(struct bnxt *bp)
6719 {
6720 struct net_device *dev = bp->dev;
6721 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6722 struct netdev_hw_addr *ha;
6723 int i, off = 0, rc;
6724 bool uc_update;
6725
6726 netif_addr_lock_bh(dev);
6727 uc_update = bnxt_uc_list_updated(bp);
6728 netif_addr_unlock_bh(dev);
6729
6730 if (!uc_update)
6731 goto skip_uc;
6732
6733 mutex_lock(&bp->hwrm_cmd_lock);
6734 for (i = 1; i < vnic->uc_filter_count; i++) {
6735 struct hwrm_cfa_l2_filter_free_input req = {0};
6736
6737 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
6738 -1);
6739
6740 req.l2_filter_id = vnic->fw_l2_filter_id[i];
6741
6742 rc = _hwrm_send_message(bp, &req, sizeof(req),
6743 HWRM_CMD_TIMEOUT);
6744 }
6745 mutex_unlock(&bp->hwrm_cmd_lock);
6746
6747 vnic->uc_filter_count = 1;
6748
6749 netif_addr_lock_bh(dev);
6750 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
6751 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6752 } else {
6753 netdev_for_each_uc_addr(ha, dev) {
6754 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
6755 off += ETH_ALEN;
6756 vnic->uc_filter_count++;
6757 }
6758 }
6759 netif_addr_unlock_bh(dev);
6760
6761 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
6762 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
6763 if (rc) {
6764 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
6765 rc);
6766 vnic->uc_filter_count = i;
6767 return rc;
6768 }
6769 }
6770
6771 skip_uc:
6772 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6773 if (rc && vnic->mc_list_count) {
6774 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
6775 rc);
6776 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6777 vnic->mc_list_count = 0;
6778 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6779 }
6780 if (rc)
6781 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
6782 rc);
6783
6784 return rc;
6785 }
6786
6787 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)6788 static bool bnxt_rfs_supported(struct bnxt *bp)
6789 {
6790 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6791 return true;
6792 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6793 return true;
6794 return false;
6795 }
6796
6797 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp)6798 static bool bnxt_rfs_capable(struct bnxt *bp)
6799 {
6800 #ifdef CONFIG_RFS_ACCEL
6801 int vnics, max_vnics, max_rss_ctxs;
6802
6803 if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
6804 return false;
6805
6806 vnics = 1 + bp->rx_nr_rings;
6807 max_vnics = bnxt_get_max_func_vnics(bp);
6808 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
6809
6810 /* RSS contexts not a limiting factor */
6811 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6812 max_rss_ctxs = max_vnics;
6813 if (vnics > max_vnics || vnics > max_rss_ctxs) {
6814 netdev_warn(bp->dev,
6815 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
6816 min(max_rss_ctxs - 1, max_vnics - 1));
6817 return false;
6818 }
6819
6820 return true;
6821 #else
6822 return false;
6823 #endif
6824 }
6825
bnxt_fix_features(struct net_device * dev,netdev_features_t features)6826 static netdev_features_t bnxt_fix_features(struct net_device *dev,
6827 netdev_features_t features)
6828 {
6829 struct bnxt *bp = netdev_priv(dev);
6830
6831 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
6832 features &= ~NETIF_F_NTUPLE;
6833
6834 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6835 * turned on or off together.
6836 */
6837 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
6838 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
6839 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6840 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6841 NETIF_F_HW_VLAN_STAG_RX);
6842 else
6843 features |= NETIF_F_HW_VLAN_CTAG_RX |
6844 NETIF_F_HW_VLAN_STAG_RX;
6845 }
6846 #ifdef CONFIG_BNXT_SRIOV
6847 if (BNXT_VF(bp)) {
6848 if (bp->vf.vlan) {
6849 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6850 NETIF_F_HW_VLAN_STAG_RX);
6851 }
6852 }
6853 #endif
6854 return features;
6855 }
6856
bnxt_set_features(struct net_device * dev,netdev_features_t features)6857 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6858 {
6859 struct bnxt *bp = netdev_priv(dev);
6860 u32 flags = bp->flags;
6861 u32 changes;
6862 int rc = 0;
6863 bool re_init = false;
6864 bool update_tpa = false;
6865
6866 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
6867 if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6868 flags |= BNXT_FLAG_GRO;
6869 if (features & NETIF_F_LRO)
6870 flags |= BNXT_FLAG_LRO;
6871
6872 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
6873 flags &= ~BNXT_FLAG_TPA;
6874
6875 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6876 flags |= BNXT_FLAG_STRIP_VLAN;
6877
6878 if (features & NETIF_F_NTUPLE)
6879 flags |= BNXT_FLAG_RFS;
6880
6881 changes = flags ^ bp->flags;
6882 if (changes & BNXT_FLAG_TPA) {
6883 update_tpa = true;
6884 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6885 (flags & BNXT_FLAG_TPA) == 0)
6886 re_init = true;
6887 }
6888
6889 if (changes & ~BNXT_FLAG_TPA)
6890 re_init = true;
6891
6892 if (flags != bp->flags) {
6893 u32 old_flags = bp->flags;
6894
6895 bp->flags = flags;
6896
6897 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6898 if (update_tpa)
6899 bnxt_set_ring_params(bp);
6900 return rc;
6901 }
6902
6903 if (re_init) {
6904 bnxt_close_nic(bp, false, false);
6905 if (update_tpa)
6906 bnxt_set_ring_params(bp);
6907
6908 return bnxt_open_nic(bp, false, false);
6909 }
6910 if (update_tpa) {
6911 rc = bnxt_set_tpa(bp,
6912 (flags & BNXT_FLAG_TPA) ?
6913 true : false);
6914 if (rc)
6915 bp->flags = old_flags;
6916 }
6917 }
6918 return rc;
6919 }
6920
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)6921 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6922 {
6923 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
6924 int i = bnapi->index;
6925
6926 if (!txr)
6927 return;
6928
6929 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6930 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6931 txr->tx_cons);
6932 }
6933
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)6934 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6935 {
6936 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6937 int i = bnapi->index;
6938
6939 if (!rxr)
6940 return;
6941
6942 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6943 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6944 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6945 rxr->rx_sw_agg_prod);
6946 }
6947
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)6948 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6949 {
6950 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6951 int i = bnapi->index;
6952
6953 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6954 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6955 }
6956
bnxt_dbg_dump_states(struct bnxt * bp)6957 static void bnxt_dbg_dump_states(struct bnxt *bp)
6958 {
6959 int i;
6960 struct bnxt_napi *bnapi;
6961
6962 for (i = 0; i < bp->cp_nr_rings; i++) {
6963 bnapi = bp->bnapi[i];
6964 if (netif_msg_drv(bp)) {
6965 bnxt_dump_tx_sw_state(bnapi);
6966 bnxt_dump_rx_sw_state(bnapi);
6967 bnxt_dump_cp_sw_state(bnapi);
6968 }
6969 }
6970 }
6971
bnxt_reset_task(struct bnxt * bp,bool silent)6972 static void bnxt_reset_task(struct bnxt *bp, bool silent)
6973 {
6974 if (!silent)
6975 bnxt_dbg_dump_states(bp);
6976 if (netif_running(bp->dev)) {
6977 int rc;
6978
6979 if (!silent)
6980 bnxt_ulp_stop(bp);
6981 bnxt_close_nic(bp, false, false);
6982 rc = bnxt_open_nic(bp, false, false);
6983 if (!silent && !rc)
6984 bnxt_ulp_start(bp);
6985 }
6986 }
6987
bnxt_tx_timeout(struct net_device * dev)6988 static void bnxt_tx_timeout(struct net_device *dev)
6989 {
6990 struct bnxt *bp = netdev_priv(dev);
6991
6992 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
6993 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6994 bnxt_queue_sp_work(bp);
6995 }
6996
6997 #ifdef CONFIG_NET_POLL_CONTROLLER
bnxt_poll_controller(struct net_device * dev)6998 static void bnxt_poll_controller(struct net_device *dev)
6999 {
7000 struct bnxt *bp = netdev_priv(dev);
7001 int i;
7002
7003 /* Only process tx rings/combined rings in netpoll mode. */
7004 for (i = 0; i < bp->tx_nr_rings; i++) {
7005 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7006
7007 napi_schedule(&txr->bnapi->napi);
7008 }
7009 }
7010 #endif
7011
bnxt_timer(unsigned long data)7012 static void bnxt_timer(unsigned long data)
7013 {
7014 struct bnxt *bp = (struct bnxt *)data;
7015 struct net_device *dev = bp->dev;
7016
7017 if (!netif_running(dev))
7018 return;
7019
7020 if (atomic_read(&bp->intr_sem) != 0)
7021 goto bnxt_restart_timer;
7022
7023 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
7024 bp->stats_coal_ticks) {
7025 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
7026 bnxt_queue_sp_work(bp);
7027 }
7028 bnxt_restart_timer:
7029 mod_timer(&bp->timer, jiffies + bp->current_interval);
7030 }
7031
bnxt_rtnl_lock_sp(struct bnxt * bp)7032 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
7033 {
7034 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
7035 * set. If the device is being closed, bnxt_close() may be holding
7036 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
7037 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
7038 */
7039 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7040 rtnl_lock();
7041 }
7042
bnxt_rtnl_unlock_sp(struct bnxt * bp)7043 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
7044 {
7045 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7046 rtnl_unlock();
7047 }
7048
7049 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)7050 static void bnxt_reset(struct bnxt *bp, bool silent)
7051 {
7052 bnxt_rtnl_lock_sp(bp);
7053 if (test_bit(BNXT_STATE_OPEN, &bp->state))
7054 bnxt_reset_task(bp, silent);
7055 bnxt_rtnl_unlock_sp(bp);
7056 }
7057
7058 static void bnxt_cfg_ntp_filters(struct bnxt *);
7059
bnxt_sp_task(struct work_struct * work)7060 static void bnxt_sp_task(struct work_struct *work)
7061 {
7062 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
7063
7064 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7065 smp_mb__after_atomic();
7066 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
7067 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7068 return;
7069 }
7070
7071 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
7072 bnxt_cfg_rx_mode(bp);
7073
7074 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
7075 bnxt_cfg_ntp_filters(bp);
7076 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
7077 bnxt_hwrm_exec_fwd_req(bp);
7078 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
7079 bnxt_hwrm_tunnel_dst_port_alloc(
7080 bp, bp->vxlan_port,
7081 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7082 }
7083 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
7084 bnxt_hwrm_tunnel_dst_port_free(
7085 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7086 }
7087 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
7088 bnxt_hwrm_tunnel_dst_port_alloc(
7089 bp, bp->nge_port,
7090 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7091 }
7092 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
7093 bnxt_hwrm_tunnel_dst_port_free(
7094 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7095 }
7096 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
7097 bnxt_hwrm_port_qstats(bp);
7098
7099 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
7100 int rc;
7101
7102 mutex_lock(&bp->link_lock);
7103 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
7104 &bp->sp_event))
7105 bnxt_hwrm_phy_qcaps(bp);
7106
7107 rc = bnxt_update_link(bp, true);
7108 mutex_unlock(&bp->link_lock);
7109 if (rc)
7110 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
7111 rc);
7112 }
7113 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
7114 mutex_lock(&bp->link_lock);
7115 bnxt_get_port_module_status(bp);
7116 mutex_unlock(&bp->link_lock);
7117 }
7118 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
7119 * must be the last functions to be called before exiting.
7120 */
7121 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
7122 bnxt_reset(bp, false);
7123
7124 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
7125 bnxt_reset(bp, true);
7126
7127 smp_mb__before_atomic();
7128 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
7129 }
7130
7131 /* Under rtnl_lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)7132 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7133 int tx_xdp)
7134 {
7135 int max_rx, max_tx, tx_sets = 1;
7136 int tx_rings_needed;
7137 int rc;
7138
7139 if (tcs)
7140 tx_sets = tcs;
7141
7142 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
7143 if (rc)
7144 return rc;
7145
7146 if (max_rx < rx)
7147 return -ENOMEM;
7148
7149 tx_rings_needed = tx * tx_sets + tx_xdp;
7150 if (max_tx < tx_rings_needed)
7151 return -ENOMEM;
7152
7153 return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
7154 }
7155
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)7156 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
7157 {
7158 if (bp->bar2) {
7159 pci_iounmap(pdev, bp->bar2);
7160 bp->bar2 = NULL;
7161 }
7162
7163 if (bp->bar1) {
7164 pci_iounmap(pdev, bp->bar1);
7165 bp->bar1 = NULL;
7166 }
7167
7168 if (bp->bar0) {
7169 pci_iounmap(pdev, bp->bar0);
7170 bp->bar0 = NULL;
7171 }
7172 }
7173
bnxt_cleanup_pci(struct bnxt * bp)7174 static void bnxt_cleanup_pci(struct bnxt *bp)
7175 {
7176 bnxt_unmap_bars(bp, bp->pdev);
7177 pci_release_regions(bp->pdev);
7178 pci_disable_device(bp->pdev);
7179 }
7180
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)7181 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
7182 {
7183 int rc;
7184 struct bnxt *bp = netdev_priv(dev);
7185
7186 SET_NETDEV_DEV(dev, &pdev->dev);
7187
7188 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7189 rc = pci_enable_device(pdev);
7190 if (rc) {
7191 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7192 goto init_err;
7193 }
7194
7195 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7196 dev_err(&pdev->dev,
7197 "Cannot find PCI device base address, aborting\n");
7198 rc = -ENODEV;
7199 goto init_err_disable;
7200 }
7201
7202 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7203 if (rc) {
7204 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7205 goto init_err_disable;
7206 }
7207
7208 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
7209 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7210 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7211 goto init_err_disable;
7212 }
7213
7214 pci_set_master(pdev);
7215
7216 bp->dev = dev;
7217 bp->pdev = pdev;
7218
7219 bp->bar0 = pci_ioremap_bar(pdev, 0);
7220 if (!bp->bar0) {
7221 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
7222 rc = -ENOMEM;
7223 goto init_err_release;
7224 }
7225
7226 bp->bar1 = pci_ioremap_bar(pdev, 2);
7227 if (!bp->bar1) {
7228 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
7229 rc = -ENOMEM;
7230 goto init_err_release;
7231 }
7232
7233 bp->bar2 = pci_ioremap_bar(pdev, 4);
7234 if (!bp->bar2) {
7235 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
7236 rc = -ENOMEM;
7237 goto init_err_release;
7238 }
7239
7240 pci_enable_pcie_error_reporting(pdev);
7241
7242 INIT_WORK(&bp->sp_task, bnxt_sp_task);
7243
7244 spin_lock_init(&bp->ntp_fltr_lock);
7245
7246 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
7247 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
7248
7249 /* tick values in micro seconds */
7250 bp->rx_coal_ticks = 12;
7251 bp->rx_coal_bufs = 30;
7252 bp->rx_coal_ticks_irq = 1;
7253 bp->rx_coal_bufs_irq = 2;
7254
7255 bp->tx_coal_ticks = 25;
7256 bp->tx_coal_bufs = 30;
7257 bp->tx_coal_ticks_irq = 2;
7258 bp->tx_coal_bufs_irq = 2;
7259
7260 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
7261
7262 init_timer(&bp->timer);
7263 bp->timer.data = (unsigned long)bp;
7264 bp->timer.function = bnxt_timer;
7265 bp->current_interval = BNXT_TIMER_INTERVAL;
7266
7267 clear_bit(BNXT_STATE_OPEN, &bp->state);
7268 return 0;
7269
7270 init_err_release:
7271 bnxt_unmap_bars(bp, pdev);
7272 pci_release_regions(pdev);
7273
7274 init_err_disable:
7275 pci_disable_device(pdev);
7276
7277 init_err:
7278 return rc;
7279 }
7280
7281 /* rtnl_lock held */
bnxt_change_mac_addr(struct net_device * dev,void * p)7282 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
7283 {
7284 struct sockaddr *addr = p;
7285 struct bnxt *bp = netdev_priv(dev);
7286 int rc = 0;
7287
7288 if (!is_valid_ether_addr(addr->sa_data))
7289 return -EADDRNOTAVAIL;
7290
7291 rc = bnxt_approve_mac(bp, addr->sa_data);
7292 if (rc)
7293 return rc;
7294
7295 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
7296 return 0;
7297
7298 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7299 if (netif_running(dev)) {
7300 bnxt_close_nic(bp, false, false);
7301 rc = bnxt_open_nic(bp, false, false);
7302 }
7303
7304 return rc;
7305 }
7306
7307 /* rtnl_lock held */
bnxt_change_mtu(struct net_device * dev,int new_mtu)7308 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
7309 {
7310 struct bnxt *bp = netdev_priv(dev);
7311
7312 if (netif_running(dev))
7313 bnxt_close_nic(bp, true, false);
7314
7315 dev->mtu = new_mtu;
7316 bnxt_set_ring_params(bp);
7317
7318 if (netif_running(dev))
7319 return bnxt_open_nic(bp, true, false);
7320
7321 return 0;
7322 }
7323
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)7324 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
7325 {
7326 struct bnxt *bp = netdev_priv(dev);
7327 bool sh = false;
7328 int rc;
7329
7330 if (tc > bp->max_tc) {
7331 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
7332 tc, bp->max_tc);
7333 return -EINVAL;
7334 }
7335
7336 if (netdev_get_num_tc(dev) == tc)
7337 return 0;
7338
7339 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7340 sh = true;
7341
7342 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
7343 sh, tc, bp->tx_nr_rings_xdp);
7344 if (rc)
7345 return rc;
7346
7347 /* Needs to close the device and do hw resource re-allocations */
7348 if (netif_running(bp->dev))
7349 bnxt_close_nic(bp, true, false);
7350
7351 if (tc) {
7352 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
7353 netdev_set_num_tc(dev, tc);
7354 } else {
7355 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7356 netdev_reset_tc(dev);
7357 }
7358 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
7359 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7360 bp->tx_nr_rings + bp->rx_nr_rings;
7361 bp->num_stat_ctxs = bp->cp_nr_rings;
7362
7363 if (netif_running(bp->dev))
7364 return bnxt_open_nic(bp, true, false);
7365
7366 return 0;
7367 }
7368
bnxt_setup_flower(struct net_device * dev,struct tc_cls_flower_offload * cls_flower)7369 static int bnxt_setup_flower(struct net_device *dev,
7370 struct tc_cls_flower_offload *cls_flower)
7371 {
7372 struct bnxt *bp = netdev_priv(dev);
7373
7374 if (BNXT_VF(bp))
7375 return -EOPNOTSUPP;
7376
7377 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower);
7378 }
7379
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)7380 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
7381 void *type_data)
7382 {
7383 switch (type) {
7384 case TC_SETUP_CLSFLOWER:
7385 return bnxt_setup_flower(dev, type_data);
7386 case TC_SETUP_MQPRIO: {
7387 struct tc_mqprio_qopt *mqprio = type_data;
7388
7389 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
7390
7391 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
7392 }
7393 default:
7394 return -EOPNOTSUPP;
7395 }
7396 }
7397
7398 #ifdef CONFIG_RFS_ACCEL
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)7399 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
7400 struct bnxt_ntuple_filter *f2)
7401 {
7402 struct flow_keys *keys1 = &f1->fkeys;
7403 struct flow_keys *keys2 = &f2->fkeys;
7404
7405 if (keys1->basic.n_proto != keys2->basic.n_proto ||
7406 keys1->basic.ip_proto != keys2->basic.ip_proto)
7407 return false;
7408
7409 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
7410 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
7411 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
7412 return false;
7413 } else {
7414 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
7415 sizeof(keys1->addrs.v6addrs.src)) ||
7416 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
7417 sizeof(keys1->addrs.v6addrs.dst)))
7418 return false;
7419 }
7420
7421 if (keys1->ports.ports == keys2->ports.ports &&
7422 keys1->control.flags == keys2->control.flags &&
7423 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
7424 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
7425 return true;
7426
7427 return false;
7428 }
7429
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)7430 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7431 u16 rxq_index, u32 flow_id)
7432 {
7433 struct bnxt *bp = netdev_priv(dev);
7434 struct bnxt_ntuple_filter *fltr, *new_fltr;
7435 struct flow_keys *fkeys;
7436 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
7437 int rc = 0, idx, bit_id, l2_idx = 0;
7438 struct hlist_head *head;
7439
7440 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
7441 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7442 int off = 0, j;
7443
7444 netif_addr_lock_bh(dev);
7445 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
7446 if (ether_addr_equal(eth->h_dest,
7447 vnic->uc_list + off)) {
7448 l2_idx = j + 1;
7449 break;
7450 }
7451 }
7452 netif_addr_unlock_bh(dev);
7453 if (!l2_idx)
7454 return -EINVAL;
7455 }
7456 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
7457 if (!new_fltr)
7458 return -ENOMEM;
7459
7460 fkeys = &new_fltr->fkeys;
7461 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
7462 rc = -EPROTONOSUPPORT;
7463 goto err_free;
7464 }
7465
7466 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
7467 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
7468 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
7469 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
7470 rc = -EPROTONOSUPPORT;
7471 goto err_free;
7472 }
7473 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
7474 bp->hwrm_spec_code < 0x10601) {
7475 rc = -EPROTONOSUPPORT;
7476 goto err_free;
7477 }
7478 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
7479 bp->hwrm_spec_code < 0x10601) {
7480 rc = -EPROTONOSUPPORT;
7481 goto err_free;
7482 }
7483
7484 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
7485 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
7486
7487 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
7488 head = &bp->ntp_fltr_hash_tbl[idx];
7489 rcu_read_lock();
7490 hlist_for_each_entry_rcu(fltr, head, hash) {
7491 if (bnxt_fltr_match(fltr, new_fltr)) {
7492 rcu_read_unlock();
7493 rc = 0;
7494 goto err_free;
7495 }
7496 }
7497 rcu_read_unlock();
7498
7499 spin_lock_bh(&bp->ntp_fltr_lock);
7500 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
7501 BNXT_NTP_FLTR_MAX_FLTR, 0);
7502 if (bit_id < 0) {
7503 spin_unlock_bh(&bp->ntp_fltr_lock);
7504 rc = -ENOMEM;
7505 goto err_free;
7506 }
7507
7508 new_fltr->sw_id = (u16)bit_id;
7509 new_fltr->flow_id = flow_id;
7510 new_fltr->l2_fltr_idx = l2_idx;
7511 new_fltr->rxq = rxq_index;
7512 hlist_add_head_rcu(&new_fltr->hash, head);
7513 bp->ntp_fltr_count++;
7514 spin_unlock_bh(&bp->ntp_fltr_lock);
7515
7516 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7517 bnxt_queue_sp_work(bp);
7518
7519 return new_fltr->sw_id;
7520
7521 err_free:
7522 kfree(new_fltr);
7523 return rc;
7524 }
7525
bnxt_cfg_ntp_filters(struct bnxt * bp)7526 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7527 {
7528 int i;
7529
7530 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
7531 struct hlist_head *head;
7532 struct hlist_node *tmp;
7533 struct bnxt_ntuple_filter *fltr;
7534 int rc;
7535
7536 head = &bp->ntp_fltr_hash_tbl[i];
7537 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
7538 bool del = false;
7539
7540 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
7541 if (rps_may_expire_flow(bp->dev, fltr->rxq,
7542 fltr->flow_id,
7543 fltr->sw_id)) {
7544 bnxt_hwrm_cfa_ntuple_filter_free(bp,
7545 fltr);
7546 del = true;
7547 }
7548 } else {
7549 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
7550 fltr);
7551 if (rc)
7552 del = true;
7553 else
7554 set_bit(BNXT_FLTR_VALID, &fltr->state);
7555 }
7556
7557 if (del) {
7558 spin_lock_bh(&bp->ntp_fltr_lock);
7559 hlist_del_rcu(&fltr->hash);
7560 bp->ntp_fltr_count--;
7561 spin_unlock_bh(&bp->ntp_fltr_lock);
7562 synchronize_rcu();
7563 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
7564 kfree(fltr);
7565 }
7566 }
7567 }
7568 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
7569 netdev_info(bp->dev, "Receive PF driver unload event!");
7570 }
7571
7572 #else
7573
bnxt_cfg_ntp_filters(struct bnxt * bp)7574 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7575 {
7576 }
7577
7578 #endif /* CONFIG_RFS_ACCEL */
7579
bnxt_udp_tunnel_add(struct net_device * dev,struct udp_tunnel_info * ti)7580 static void bnxt_udp_tunnel_add(struct net_device *dev,
7581 struct udp_tunnel_info *ti)
7582 {
7583 struct bnxt *bp = netdev_priv(dev);
7584
7585 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7586 return;
7587
7588 if (!netif_running(dev))
7589 return;
7590
7591 switch (ti->type) {
7592 case UDP_TUNNEL_TYPE_VXLAN:
7593 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
7594 return;
7595
7596 bp->vxlan_port_cnt++;
7597 if (bp->vxlan_port_cnt == 1) {
7598 bp->vxlan_port = ti->port;
7599 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7600 bnxt_queue_sp_work(bp);
7601 }
7602 break;
7603 case UDP_TUNNEL_TYPE_GENEVE:
7604 if (bp->nge_port_cnt && bp->nge_port != ti->port)
7605 return;
7606
7607 bp->nge_port_cnt++;
7608 if (bp->nge_port_cnt == 1) {
7609 bp->nge_port = ti->port;
7610 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
7611 }
7612 break;
7613 default:
7614 return;
7615 }
7616
7617 bnxt_queue_sp_work(bp);
7618 }
7619
bnxt_udp_tunnel_del(struct net_device * dev,struct udp_tunnel_info * ti)7620 static void bnxt_udp_tunnel_del(struct net_device *dev,
7621 struct udp_tunnel_info *ti)
7622 {
7623 struct bnxt *bp = netdev_priv(dev);
7624
7625 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7626 return;
7627
7628 if (!netif_running(dev))
7629 return;
7630
7631 switch (ti->type) {
7632 case UDP_TUNNEL_TYPE_VXLAN:
7633 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
7634 return;
7635 bp->vxlan_port_cnt--;
7636
7637 if (bp->vxlan_port_cnt != 0)
7638 return;
7639
7640 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
7641 break;
7642 case UDP_TUNNEL_TYPE_GENEVE:
7643 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
7644 return;
7645 bp->nge_port_cnt--;
7646
7647 if (bp->nge_port_cnt != 0)
7648 return;
7649
7650 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
7651 break;
7652 default:
7653 return;
7654 }
7655
7656 bnxt_queue_sp_work(bp);
7657 }
7658
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)7659 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
7660 struct net_device *dev, u32 filter_mask,
7661 int nlflags)
7662 {
7663 struct bnxt *bp = netdev_priv(dev);
7664
7665 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
7666 nlflags, filter_mask, NULL);
7667 }
7668
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags)7669 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
7670 u16 flags)
7671 {
7672 struct bnxt *bp = netdev_priv(dev);
7673 struct nlattr *attr, *br_spec;
7674 int rem, rc = 0;
7675
7676 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
7677 return -EOPNOTSUPP;
7678
7679 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7680 if (!br_spec)
7681 return -EINVAL;
7682
7683 nla_for_each_nested(attr, br_spec, rem) {
7684 u16 mode;
7685
7686 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7687 continue;
7688
7689 if (nla_len(attr) < sizeof(mode))
7690 return -EINVAL;
7691
7692 mode = nla_get_u16(attr);
7693 if (mode == bp->br_mode)
7694 break;
7695
7696 rc = bnxt_hwrm_set_br_mode(bp, mode);
7697 if (!rc)
7698 bp->br_mode = mode;
7699 break;
7700 }
7701 return rc;
7702 }
7703
bnxt_get_phys_port_name(struct net_device * dev,char * buf,size_t len)7704 static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
7705 size_t len)
7706 {
7707 struct bnxt *bp = netdev_priv(dev);
7708 int rc;
7709
7710 /* The PF and it's VF-reps only support the switchdev framework */
7711 if (!BNXT_PF(bp))
7712 return -EOPNOTSUPP;
7713
7714 rc = snprintf(buf, len, "p%d", bp->pf.port_id);
7715
7716 if (rc >= len)
7717 return -EOPNOTSUPP;
7718 return 0;
7719 }
7720
bnxt_port_attr_get(struct bnxt * bp,struct switchdev_attr * attr)7721 int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
7722 {
7723 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
7724 return -EOPNOTSUPP;
7725
7726 /* The PF and it's VF-reps only support the switchdev framework */
7727 if (!BNXT_PF(bp))
7728 return -EOPNOTSUPP;
7729
7730 switch (attr->id) {
7731 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
7732 /* In SRIOV each PF-pool (PF + child VFs) serves as a
7733 * switching domain, the PF's perm mac-addr can be used
7734 * as the unique parent-id
7735 */
7736 attr->u.ppid.id_len = ETH_ALEN;
7737 ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
7738 break;
7739 default:
7740 return -EOPNOTSUPP;
7741 }
7742 return 0;
7743 }
7744
bnxt_swdev_port_attr_get(struct net_device * dev,struct switchdev_attr * attr)7745 static int bnxt_swdev_port_attr_get(struct net_device *dev,
7746 struct switchdev_attr *attr)
7747 {
7748 return bnxt_port_attr_get(netdev_priv(dev), attr);
7749 }
7750
7751 static const struct switchdev_ops bnxt_switchdev_ops = {
7752 .switchdev_port_attr_get = bnxt_swdev_port_attr_get
7753 };
7754
7755 static const struct net_device_ops bnxt_netdev_ops = {
7756 .ndo_open = bnxt_open,
7757 .ndo_start_xmit = bnxt_start_xmit,
7758 .ndo_stop = bnxt_close,
7759 .ndo_get_stats64 = bnxt_get_stats64,
7760 .ndo_set_rx_mode = bnxt_set_rx_mode,
7761 .ndo_do_ioctl = bnxt_ioctl,
7762 .ndo_validate_addr = eth_validate_addr,
7763 .ndo_set_mac_address = bnxt_change_mac_addr,
7764 .ndo_change_mtu = bnxt_change_mtu,
7765 .ndo_fix_features = bnxt_fix_features,
7766 .ndo_set_features = bnxt_set_features,
7767 .ndo_tx_timeout = bnxt_tx_timeout,
7768 #ifdef CONFIG_BNXT_SRIOV
7769 .ndo_get_vf_config = bnxt_get_vf_config,
7770 .ndo_set_vf_mac = bnxt_set_vf_mac,
7771 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
7772 .ndo_set_vf_rate = bnxt_set_vf_bw,
7773 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
7774 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
7775 #endif
7776 #ifdef CONFIG_NET_POLL_CONTROLLER
7777 .ndo_poll_controller = bnxt_poll_controller,
7778 #endif
7779 .ndo_setup_tc = bnxt_setup_tc,
7780 #ifdef CONFIG_RFS_ACCEL
7781 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
7782 #endif
7783 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
7784 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
7785 .ndo_xdp = bnxt_xdp,
7786 .ndo_bridge_getlink = bnxt_bridge_getlink,
7787 .ndo_bridge_setlink = bnxt_bridge_setlink,
7788 .ndo_get_phys_port_name = bnxt_get_phys_port_name
7789 };
7790
bnxt_remove_one(struct pci_dev * pdev)7791 static void bnxt_remove_one(struct pci_dev *pdev)
7792 {
7793 struct net_device *dev = pci_get_drvdata(pdev);
7794 struct bnxt *bp = netdev_priv(dev);
7795
7796 if (BNXT_PF(bp)) {
7797 bnxt_sriov_disable(bp);
7798 bnxt_dl_unregister(bp);
7799 }
7800
7801 pci_disable_pcie_error_reporting(pdev);
7802 unregister_netdev(dev);
7803 bnxt_shutdown_tc(bp);
7804 bnxt_cancel_sp_work(bp);
7805 bp->sp_event = 0;
7806
7807 bnxt_clear_int_mode(bp);
7808 bnxt_hwrm_func_drv_unrgtr(bp);
7809 bnxt_free_hwrm_resources(bp);
7810 bnxt_free_hwrm_short_cmd_req(bp);
7811 bnxt_ethtool_free(bp);
7812 bnxt_dcb_free(bp);
7813 kfree(bp->edev);
7814 bp->edev = NULL;
7815 if (bp->xdp_prog)
7816 bpf_prog_put(bp->xdp_prog);
7817 bnxt_cleanup_pci(bp);
7818 free_netdev(dev);
7819 }
7820
bnxt_probe_phy(struct bnxt * bp)7821 static int bnxt_probe_phy(struct bnxt *bp)
7822 {
7823 int rc = 0;
7824 struct bnxt_link_info *link_info = &bp->link_info;
7825
7826 rc = bnxt_hwrm_phy_qcaps(bp);
7827 if (rc) {
7828 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
7829 rc);
7830 return rc;
7831 }
7832 mutex_init(&bp->link_lock);
7833
7834 rc = bnxt_update_link(bp, false);
7835 if (rc) {
7836 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
7837 rc);
7838 return rc;
7839 }
7840
7841 /* Older firmware does not have supported_auto_speeds, so assume
7842 * that all supported speeds can be autonegotiated.
7843 */
7844 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
7845 link_info->support_auto_speeds = link_info->support_speeds;
7846
7847 /*initialize the ethool setting copy with NVM settings */
7848 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
7849 link_info->autoneg = BNXT_AUTONEG_SPEED;
7850 if (bp->hwrm_spec_code >= 0x10201) {
7851 if (link_info->auto_pause_setting &
7852 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
7853 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7854 } else {
7855 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7856 }
7857 link_info->advertising = link_info->auto_link_speeds;
7858 } else {
7859 link_info->req_link_speed = link_info->force_link_speed;
7860 link_info->req_duplex = link_info->duplex_setting;
7861 }
7862 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
7863 link_info->req_flow_ctrl =
7864 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
7865 else
7866 link_info->req_flow_ctrl = link_info->force_pause_setting;
7867 return rc;
7868 }
7869
bnxt_get_max_irq(struct pci_dev * pdev)7870 static int bnxt_get_max_irq(struct pci_dev *pdev)
7871 {
7872 u16 ctrl;
7873
7874 if (!pdev->msix_cap)
7875 return 1;
7876
7877 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
7878 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
7879 }
7880
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)7881 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7882 int *max_cp)
7883 {
7884 int max_ring_grps = 0;
7885
7886 #ifdef CONFIG_BNXT_SRIOV
7887 if (!BNXT_PF(bp)) {
7888 *max_tx = bp->vf.max_tx_rings;
7889 *max_rx = bp->vf.max_rx_rings;
7890 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
7891 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
7892 max_ring_grps = bp->vf.max_hw_ring_grps;
7893 } else
7894 #endif
7895 {
7896 *max_tx = bp->pf.max_tx_rings;
7897 *max_rx = bp->pf.max_rx_rings;
7898 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
7899 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
7900 max_ring_grps = bp->pf.max_hw_ring_grps;
7901 }
7902 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
7903 *max_cp -= 1;
7904 *max_rx -= 2;
7905 }
7906 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7907 *max_rx >>= 1;
7908 *max_rx = min_t(int, *max_rx, max_ring_grps);
7909 }
7910
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)7911 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
7912 {
7913 int rx, tx, cp;
7914
7915 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
7916 *max_rx = rx;
7917 *max_tx = tx;
7918 if (!rx || !tx || !cp)
7919 return -ENOMEM;
7920
7921 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
7922 }
7923
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)7924 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7925 bool shared)
7926 {
7927 int rc;
7928
7929 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7930 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
7931 /* Not enough rings, try disabling agg rings. */
7932 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7933 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7934 if (rc) {
7935 /* set BNXT_FLAG_AGG_RINGS back for consistency */
7936 bp->flags |= BNXT_FLAG_AGG_RINGS;
7937 return rc;
7938 }
7939 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7940 bp->dev->hw_features &= ~NETIF_F_LRO;
7941 bp->dev->features &= ~NETIF_F_LRO;
7942 bnxt_set_ring_params(bp);
7943 }
7944
7945 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
7946 int max_cp, max_stat, max_irq;
7947
7948 /* Reserve minimum resources for RoCE */
7949 max_cp = bnxt_get_max_func_cp_rings(bp);
7950 max_stat = bnxt_get_max_func_stat_ctxs(bp);
7951 max_irq = bnxt_get_max_func_irqs(bp);
7952 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
7953 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
7954 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
7955 return 0;
7956
7957 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
7958 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
7959 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
7960 max_cp = min_t(int, max_cp, max_irq);
7961 max_cp = min_t(int, max_cp, max_stat);
7962 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
7963 if (rc)
7964 rc = 0;
7965 }
7966 return rc;
7967 }
7968
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)7969 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
7970 {
7971 int dflt_rings, max_rx_rings, max_tx_rings, rc;
7972
7973 if (sh)
7974 bp->flags |= BNXT_FLAG_SHARED_RINGS;
7975 dflt_rings = netif_get_num_default_rss_queues();
7976 /* Reduce default rings to reduce memory usage on multi-port cards */
7977 if (bp->port_count > 1)
7978 dflt_rings = min_t(int, dflt_rings, 4);
7979 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
7980 if (rc)
7981 return rc;
7982 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
7983 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
7984
7985 rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
7986 if (rc)
7987 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
7988
7989 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7990 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7991 bp->tx_nr_rings + bp->rx_nr_rings;
7992 bp->num_stat_ctxs = bp->cp_nr_rings;
7993 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7994 bp->rx_nr_rings++;
7995 bp->cp_nr_rings++;
7996 }
7997 return rc;
7998 }
7999
bnxt_restore_pf_fw_resources(struct bnxt * bp)8000 void bnxt_restore_pf_fw_resources(struct bnxt *bp)
8001 {
8002 ASSERT_RTNL();
8003 bnxt_hwrm_func_qcaps(bp);
8004 bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
8005 }
8006
bnxt_init_mac_addr(struct bnxt * bp)8007 static int bnxt_init_mac_addr(struct bnxt *bp)
8008 {
8009 int rc = 0;
8010
8011 if (BNXT_PF(bp)) {
8012 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
8013 } else {
8014 #ifdef CONFIG_BNXT_SRIOV
8015 struct bnxt_vf_info *vf = &bp->vf;
8016
8017 if (is_valid_ether_addr(vf->mac_addr)) {
8018 /* overwrite netdev dev_adr with admin VF MAC */
8019 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
8020 } else {
8021 eth_hw_addr_random(bp->dev);
8022 rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
8023 }
8024 #endif
8025 }
8026 return rc;
8027 }
8028
bnxt_parse_log_pcie_link(struct bnxt * bp)8029 static void bnxt_parse_log_pcie_link(struct bnxt *bp)
8030 {
8031 enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
8032 enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
8033
8034 if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
8035 speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
8036 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
8037 else
8038 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
8039 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
8040 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
8041 speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
8042 "Unknown", width);
8043 }
8044
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)8045 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8046 {
8047 static int version_printed;
8048 struct net_device *dev;
8049 struct bnxt *bp;
8050 int rc, max_irqs;
8051
8052 if (pci_is_bridge(pdev))
8053 return -ENODEV;
8054
8055 if (version_printed++ == 0)
8056 pr_info("%s", version);
8057
8058 max_irqs = bnxt_get_max_irq(pdev);
8059 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
8060 if (!dev)
8061 return -ENOMEM;
8062
8063 bp = netdev_priv(dev);
8064
8065 if (bnxt_vf_pciid(ent->driver_data))
8066 bp->flags |= BNXT_FLAG_VF;
8067
8068 if (pdev->msix_cap)
8069 bp->flags |= BNXT_FLAG_MSIX_CAP;
8070
8071 rc = bnxt_init_board(pdev, dev);
8072 if (rc < 0)
8073 goto init_err_free;
8074
8075 dev->netdev_ops = &bnxt_netdev_ops;
8076 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
8077 dev->ethtool_ops = &bnxt_ethtool_ops;
8078 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
8079 pci_set_drvdata(pdev, dev);
8080
8081 rc = bnxt_alloc_hwrm_resources(bp);
8082 if (rc)
8083 goto init_err_pci_clean;
8084
8085 mutex_init(&bp->hwrm_cmd_lock);
8086 rc = bnxt_hwrm_ver_get(bp);
8087 if (rc)
8088 goto init_err_pci_clean;
8089
8090 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
8091 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
8092 if (rc)
8093 goto init_err_pci_clean;
8094 }
8095
8096 rc = bnxt_hwrm_func_reset(bp);
8097 if (rc)
8098 goto init_err_pci_clean;
8099
8100 bnxt_hwrm_fw_set_time(bp);
8101
8102 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
8103 NETIF_F_TSO | NETIF_F_TSO6 |
8104 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
8105 NETIF_F_GSO_IPXIP4 |
8106 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
8107 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
8108 NETIF_F_RXCSUM | NETIF_F_GRO;
8109
8110 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
8111 dev->hw_features |= NETIF_F_LRO;
8112
8113 dev->hw_enc_features =
8114 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
8115 NETIF_F_TSO | NETIF_F_TSO6 |
8116 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
8117 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
8118 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
8119 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
8120 NETIF_F_GSO_GRE_CSUM;
8121 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
8122 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
8123 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
8124 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
8125 dev->priv_flags |= IFF_UNICAST_FLT;
8126
8127 /* MTU range: 60 - 9500 */
8128 dev->min_mtu = ETH_ZLEN;
8129 dev->max_mtu = BNXT_MAX_MTU;
8130
8131 #ifdef CONFIG_BNXT_SRIOV
8132 init_waitqueue_head(&bp->sriov_cfg_wait);
8133 mutex_init(&bp->sriov_lock);
8134 #endif
8135 bp->gro_func = bnxt_gro_func_5730x;
8136 if (BNXT_CHIP_P4_PLUS(bp))
8137 bp->gro_func = bnxt_gro_func_5731x;
8138 else
8139 bp->flags |= BNXT_FLAG_DOUBLE_DB;
8140
8141 rc = bnxt_hwrm_func_drv_rgtr(bp);
8142 if (rc)
8143 goto init_err_pci_clean;
8144
8145 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
8146 if (rc)
8147 goto init_err_pci_clean;
8148
8149 bp->ulp_probe = bnxt_ulp_probe;
8150
8151 /* Get the MAX capabilities for this function */
8152 rc = bnxt_hwrm_func_qcaps(bp);
8153 if (rc) {
8154 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
8155 rc);
8156 rc = -1;
8157 goto init_err_pci_clean;
8158 }
8159 rc = bnxt_init_mac_addr(bp);
8160 if (rc) {
8161 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
8162 rc = -EADDRNOTAVAIL;
8163 goto init_err_pci_clean;
8164 }
8165 rc = bnxt_hwrm_queue_qportcfg(bp);
8166 if (rc) {
8167 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
8168 rc);
8169 rc = -1;
8170 goto init_err_pci_clean;
8171 }
8172
8173 bnxt_hwrm_func_qcfg(bp);
8174 bnxt_hwrm_port_led_qcaps(bp);
8175 bnxt_ethtool_init(bp);
8176 bnxt_dcb_init(bp);
8177
8178 rc = bnxt_probe_phy(bp);
8179 if (rc)
8180 goto init_err_pci_clean;
8181
8182 bnxt_set_rx_skb_mode(bp, false);
8183 bnxt_set_tpa_flags(bp);
8184 bnxt_set_ring_params(bp);
8185 bnxt_set_max_func_irqs(bp, max_irqs);
8186 rc = bnxt_set_dflt_rings(bp, true);
8187 if (rc) {
8188 netdev_err(bp->dev, "Not enough rings available.\n");
8189 rc = -ENOMEM;
8190 goto init_err_pci_clean;
8191 }
8192
8193 /* Default RSS hash cfg. */
8194 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
8195 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
8196 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
8197 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
8198 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
8199 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
8200 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
8201 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
8202 }
8203
8204 bnxt_hwrm_vnic_qcaps(bp);
8205 if (bnxt_rfs_supported(bp)) {
8206 dev->hw_features |= NETIF_F_NTUPLE;
8207 if (bnxt_rfs_capable(bp)) {
8208 bp->flags |= BNXT_FLAG_RFS;
8209 dev->features |= NETIF_F_NTUPLE;
8210 }
8211 }
8212
8213 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
8214 bp->flags |= BNXT_FLAG_STRIP_VLAN;
8215
8216 rc = bnxt_init_int_mode(bp);
8217 if (rc)
8218 goto init_err_pci_clean;
8219
8220 bnxt_get_wol_settings(bp);
8221 if (bp->flags & BNXT_FLAG_WOL_CAP)
8222 device_set_wakeup_enable(&pdev->dev, bp->wol);
8223 else
8224 device_set_wakeup_capable(&pdev->dev, false);
8225
8226 if (BNXT_PF(bp)) {
8227 if (!bnxt_pf_wq) {
8228 bnxt_pf_wq =
8229 create_singlethread_workqueue("bnxt_pf_wq");
8230 if (!bnxt_pf_wq) {
8231 dev_err(&pdev->dev, "Unable to create workqueue.\n");
8232 goto init_err_pci_clean;
8233 }
8234 }
8235 bnxt_init_tc(bp);
8236 }
8237
8238 rc = register_netdev(dev);
8239 if (rc)
8240 goto init_err_cleanup_tc;
8241
8242 if (BNXT_PF(bp))
8243 bnxt_dl_register(bp);
8244
8245 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
8246 board_info[ent->driver_data].name,
8247 (long)pci_resource_start(pdev, 0), dev->dev_addr);
8248
8249 bnxt_parse_log_pcie_link(bp);
8250
8251 return 0;
8252
8253 init_err_cleanup_tc:
8254 bnxt_shutdown_tc(bp);
8255 bnxt_clear_int_mode(bp);
8256
8257 init_err_pci_clean:
8258 bnxt_free_hwrm_short_cmd_req(bp);
8259 bnxt_free_hwrm_resources(bp);
8260 bnxt_cleanup_pci(bp);
8261
8262 init_err_free:
8263 free_netdev(dev);
8264 return rc;
8265 }
8266
bnxt_shutdown(struct pci_dev * pdev)8267 static void bnxt_shutdown(struct pci_dev *pdev)
8268 {
8269 struct net_device *dev = pci_get_drvdata(pdev);
8270 struct bnxt *bp;
8271
8272 if (!dev)
8273 return;
8274
8275 rtnl_lock();
8276 bp = netdev_priv(dev);
8277 if (!bp)
8278 goto shutdown_exit;
8279
8280 if (netif_running(dev))
8281 dev_close(dev);
8282
8283 bnxt_ulp_shutdown(bp);
8284
8285 if (system_state == SYSTEM_POWER_OFF) {
8286 bnxt_clear_int_mode(bp);
8287 pci_wake_from_d3(pdev, bp->wol);
8288 pci_set_power_state(pdev, PCI_D3hot);
8289 }
8290
8291 shutdown_exit:
8292 rtnl_unlock();
8293 }
8294
8295 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)8296 static int bnxt_suspend(struct device *device)
8297 {
8298 struct pci_dev *pdev = to_pci_dev(device);
8299 struct net_device *dev = pci_get_drvdata(pdev);
8300 struct bnxt *bp = netdev_priv(dev);
8301 int rc = 0;
8302
8303 rtnl_lock();
8304 if (netif_running(dev)) {
8305 netif_device_detach(dev);
8306 rc = bnxt_close(dev);
8307 }
8308 bnxt_hwrm_func_drv_unrgtr(bp);
8309 rtnl_unlock();
8310 return rc;
8311 }
8312
bnxt_resume(struct device * device)8313 static int bnxt_resume(struct device *device)
8314 {
8315 struct pci_dev *pdev = to_pci_dev(device);
8316 struct net_device *dev = pci_get_drvdata(pdev);
8317 struct bnxt *bp = netdev_priv(dev);
8318 int rc = 0;
8319
8320 rtnl_lock();
8321 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
8322 rc = -ENODEV;
8323 goto resume_exit;
8324 }
8325 rc = bnxt_hwrm_func_reset(bp);
8326 if (rc) {
8327 rc = -EBUSY;
8328 goto resume_exit;
8329 }
8330 bnxt_get_wol_settings(bp);
8331 if (netif_running(dev)) {
8332 rc = bnxt_open(dev);
8333 if (!rc)
8334 netif_device_attach(dev);
8335 }
8336
8337 resume_exit:
8338 rtnl_unlock();
8339 return rc;
8340 }
8341
8342 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
8343 #define BNXT_PM_OPS (&bnxt_pm_ops)
8344
8345 #else
8346
8347 #define BNXT_PM_OPS NULL
8348
8349 #endif /* CONFIG_PM_SLEEP */
8350
8351 /**
8352 * bnxt_io_error_detected - called when PCI error is detected
8353 * @pdev: Pointer to PCI device
8354 * @state: The current pci connection state
8355 *
8356 * This function is called after a PCI bus error affecting
8357 * this device has been detected.
8358 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8359 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
8360 pci_channel_state_t state)
8361 {
8362 struct net_device *netdev = pci_get_drvdata(pdev);
8363 struct bnxt *bp = netdev_priv(netdev);
8364
8365 netdev_info(netdev, "PCI I/O error detected\n");
8366
8367 rtnl_lock();
8368 netif_device_detach(netdev);
8369
8370 bnxt_ulp_stop(bp);
8371
8372 if (state == pci_channel_io_perm_failure) {
8373 rtnl_unlock();
8374 return PCI_ERS_RESULT_DISCONNECT;
8375 }
8376
8377 if (netif_running(netdev))
8378 bnxt_close(netdev);
8379
8380 pci_disable_device(pdev);
8381 rtnl_unlock();
8382
8383 /* Request a slot slot reset. */
8384 return PCI_ERS_RESULT_NEED_RESET;
8385 }
8386
8387 /**
8388 * bnxt_io_slot_reset - called after the pci bus has been reset.
8389 * @pdev: Pointer to PCI device
8390 *
8391 * Restart the card from scratch, as if from a cold-boot.
8392 * At this point, the card has exprienced a hard reset,
8393 * followed by fixups by BIOS, and has its config space
8394 * set up identically to what it was at cold boot.
8395 */
bnxt_io_slot_reset(struct pci_dev * pdev)8396 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
8397 {
8398 struct net_device *netdev = pci_get_drvdata(pdev);
8399 struct bnxt *bp = netdev_priv(netdev);
8400 int err = 0;
8401 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
8402
8403 netdev_info(bp->dev, "PCI Slot Reset\n");
8404
8405 rtnl_lock();
8406
8407 if (pci_enable_device(pdev)) {
8408 dev_err(&pdev->dev,
8409 "Cannot re-enable PCI device after reset.\n");
8410 } else {
8411 pci_set_master(pdev);
8412
8413 err = bnxt_hwrm_func_reset(bp);
8414 if (!err && netif_running(netdev))
8415 err = bnxt_open(netdev);
8416
8417 if (!err) {
8418 result = PCI_ERS_RESULT_RECOVERED;
8419 bnxt_ulp_start(bp);
8420 }
8421 }
8422
8423 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
8424 dev_close(netdev);
8425
8426 rtnl_unlock();
8427
8428 err = pci_cleanup_aer_uncorrect_error_status(pdev);
8429 if (err) {
8430 dev_err(&pdev->dev,
8431 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8432 err); /* non-fatal, continue */
8433 }
8434
8435 return PCI_ERS_RESULT_RECOVERED;
8436 }
8437
8438 /**
8439 * bnxt_io_resume - called when traffic can start flowing again.
8440 * @pdev: Pointer to PCI device
8441 *
8442 * This callback is called when the error recovery driver tells
8443 * us that its OK to resume normal operation.
8444 */
bnxt_io_resume(struct pci_dev * pdev)8445 static void bnxt_io_resume(struct pci_dev *pdev)
8446 {
8447 struct net_device *netdev = pci_get_drvdata(pdev);
8448
8449 rtnl_lock();
8450
8451 netif_device_attach(netdev);
8452
8453 rtnl_unlock();
8454 }
8455
8456 static const struct pci_error_handlers bnxt_err_handler = {
8457 .error_detected = bnxt_io_error_detected,
8458 .slot_reset = bnxt_io_slot_reset,
8459 .resume = bnxt_io_resume
8460 };
8461
8462 static struct pci_driver bnxt_pci_driver = {
8463 .name = DRV_MODULE_NAME,
8464 .id_table = bnxt_pci_tbl,
8465 .probe = bnxt_init_one,
8466 .remove = bnxt_remove_one,
8467 .shutdown = bnxt_shutdown,
8468 .driver.pm = BNXT_PM_OPS,
8469 .err_handler = &bnxt_err_handler,
8470 #if defined(CONFIG_BNXT_SRIOV)
8471 .sriov_configure = bnxt_sriov_configure,
8472 #endif
8473 };
8474
bnxt_init(void)8475 static int __init bnxt_init(void)
8476 {
8477 return pci_register_driver(&bnxt_pci_driver);
8478 }
8479
bnxt_exit(void)8480 static void __exit bnxt_exit(void)
8481 {
8482 pci_unregister_driver(&bnxt_pci_driver);
8483 if (bnxt_pf_wq)
8484 destroy_workqueue(bnxt_pf_wq);
8485 }
8486
8487 module_init(bnxt_init);
8488 module_exit(bnxt_exit);
8489