• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/crash_dump.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/version.h>
11 #include <linux/device.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/string.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/interrupt.h>
20 #include <asm/byteorder.h>
21 #include <asm/param.h>
22 #include <linux/io.h>
23 #include <linux/netdev_features.h>
24 #include <linux/udp.h>
25 #include <linux/tcp.h>
26 #include <net/udp_tunnel.h>
27 #include <linux/ip.h>
28 #include <net/ipv6.h>
29 #include <net/tcp.h>
30 #include <linux/if_ether.h>
31 #include <linux/if_vlan.h>
32 #include <linux/pkt_sched.h>
33 #include <linux/ethtool.h>
34 #include <linux/in.h>
35 #include <linux/random.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/bitops.h>
38 #include <linux/vmalloc.h>
39 #include <linux/aer.h>
40 #include "qede.h"
41 #include "qede_ptp.h"
42 
43 static char version[] =
44 	"QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
45 
46 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
47 MODULE_LICENSE("GPL");
48 MODULE_VERSION(DRV_MODULE_VERSION);
49 
50 static uint debug;
51 module_param(debug, uint, 0);
52 MODULE_PARM_DESC(debug, " Default debug msglevel");
53 
54 static const struct qed_eth_ops *qed_ops;
55 
56 #define CHIP_NUM_57980S_40		0x1634
57 #define CHIP_NUM_57980S_10		0x1666
58 #define CHIP_NUM_57980S_MF		0x1636
59 #define CHIP_NUM_57980S_100		0x1644
60 #define CHIP_NUM_57980S_50		0x1654
61 #define CHIP_NUM_57980S_25		0x1656
62 #define CHIP_NUM_57980S_IOV		0x1664
63 #define CHIP_NUM_AH			0x8070
64 #define CHIP_NUM_AH_IOV			0x8090
65 
66 #ifndef PCI_DEVICE_ID_NX2_57980E
67 #define PCI_DEVICE_ID_57980S_40		CHIP_NUM_57980S_40
68 #define PCI_DEVICE_ID_57980S_10		CHIP_NUM_57980S_10
69 #define PCI_DEVICE_ID_57980S_MF		CHIP_NUM_57980S_MF
70 #define PCI_DEVICE_ID_57980S_100	CHIP_NUM_57980S_100
71 #define PCI_DEVICE_ID_57980S_50		CHIP_NUM_57980S_50
72 #define PCI_DEVICE_ID_57980S_25		CHIP_NUM_57980S_25
73 #define PCI_DEVICE_ID_57980S_IOV	CHIP_NUM_57980S_IOV
74 #define PCI_DEVICE_ID_AH		CHIP_NUM_AH
75 #define PCI_DEVICE_ID_AH_IOV		CHIP_NUM_AH_IOV
76 
77 #endif
78 
79 enum qede_pci_private {
80 	QEDE_PRIVATE_PF,
81 	QEDE_PRIVATE_VF
82 };
83 
84 static const struct pci_device_id qede_pci_tbl[] = {
85 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
86 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
87 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
88 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
89 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
90 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
91 #ifdef CONFIG_QED_SRIOV
92 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
93 #endif
94 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
95 #ifdef CONFIG_QED_SRIOV
96 	{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
97 #endif
98 	{ 0 }
99 };
100 
101 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
102 
103 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
104 static pci_ers_result_t
105 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
106 
107 #define TX_TIMEOUT		(5 * HZ)
108 
109 /* Utilize last protocol index for XDP */
110 #define XDP_PI	11
111 
112 static void qede_remove(struct pci_dev *pdev);
113 static void qede_shutdown(struct pci_dev *pdev);
114 static void qede_link_update(void *dev, struct qed_link_output *link);
115 static void qede_schedule_recovery_handler(void *dev);
116 static void qede_recovery_handler(struct qede_dev *edev);
117 static void qede_schedule_hw_err_handler(void *dev,
118 					 enum qed_hw_err_type err_type);
119 static void qede_get_eth_tlv_data(void *edev, void *data);
120 static void qede_get_generic_tlv_data(void *edev,
121 				      struct qed_generic_tlvs *data);
122 static void qede_generic_hw_err_handler(struct qede_dev *edev);
123 #ifdef CONFIG_QED_SRIOV
qede_set_vf_vlan(struct net_device * ndev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)124 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
125 			    __be16 vlan_proto)
126 {
127 	struct qede_dev *edev = netdev_priv(ndev);
128 
129 	if (vlan > 4095) {
130 		DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
131 		return -EINVAL;
132 	}
133 
134 	if (vlan_proto != htons(ETH_P_8021Q))
135 		return -EPROTONOSUPPORT;
136 
137 	DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
138 		   vlan, vf);
139 
140 	return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
141 }
142 
qede_set_vf_mac(struct net_device * ndev,int vfidx,u8 * mac)143 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
144 {
145 	struct qede_dev *edev = netdev_priv(ndev);
146 
147 	DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
148 
149 	if (!is_valid_ether_addr(mac)) {
150 		DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
151 		return -EINVAL;
152 	}
153 
154 	return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
155 }
156 
qede_sriov_configure(struct pci_dev * pdev,int num_vfs_param)157 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
158 {
159 	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
160 	struct qed_dev_info *qed_info = &edev->dev_info.common;
161 	struct qed_update_vport_params *vport_params;
162 	int rc;
163 
164 	vport_params = vzalloc(sizeof(*vport_params));
165 	if (!vport_params)
166 		return -ENOMEM;
167 	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
168 
169 	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
170 
171 	/* Enable/Disable Tx switching for PF */
172 	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
173 	    !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
174 		vport_params->vport_id = 0;
175 		vport_params->update_tx_switching_flg = 1;
176 		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
177 		edev->ops->vport_update(edev->cdev, vport_params);
178 	}
179 
180 	vfree(vport_params);
181 	return rc;
182 }
183 #endif
184 
185 static const struct pci_error_handlers qede_err_handler = {
186 	.error_detected = qede_io_error_detected,
187 };
188 
189 static struct pci_driver qede_pci_driver = {
190 	.name = "qede",
191 	.id_table = qede_pci_tbl,
192 	.probe = qede_probe,
193 	.remove = qede_remove,
194 	.shutdown = qede_shutdown,
195 #ifdef CONFIG_QED_SRIOV
196 	.sriov_configure = qede_sriov_configure,
197 #endif
198 	.err_handler = &qede_err_handler,
199 };
200 
201 static struct qed_eth_cb_ops qede_ll_ops = {
202 	{
203 #ifdef CONFIG_RFS_ACCEL
204 		.arfs_filter_op = qede_arfs_filter_op,
205 #endif
206 		.link_update = qede_link_update,
207 		.schedule_recovery_handler = qede_schedule_recovery_handler,
208 		.schedule_hw_err_handler = qede_schedule_hw_err_handler,
209 		.get_generic_tlv_data = qede_get_generic_tlv_data,
210 		.get_protocol_tlv_data = qede_get_eth_tlv_data,
211 	},
212 	.force_mac = qede_force_mac,
213 	.ports_update = qede_udp_ports_update,
214 };
215 
qede_netdev_event(struct notifier_block * this,unsigned long event,void * ptr)216 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
217 			     void *ptr)
218 {
219 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
220 	struct ethtool_drvinfo drvinfo;
221 	struct qede_dev *edev;
222 
223 	if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
224 		goto done;
225 
226 	/* Check whether this is a qede device */
227 	if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
228 		goto done;
229 
230 	memset(&drvinfo, 0, sizeof(drvinfo));
231 	ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
232 	if (strcmp(drvinfo.driver, "qede"))
233 		goto done;
234 	edev = netdev_priv(ndev);
235 
236 	switch (event) {
237 	case NETDEV_CHANGENAME:
238 		/* Notify qed of the name change */
239 		if (!edev->ops || !edev->ops->common)
240 			goto done;
241 		edev->ops->common->set_name(edev->cdev, edev->ndev->name);
242 		break;
243 	case NETDEV_CHANGEADDR:
244 		edev = netdev_priv(ndev);
245 		qede_rdma_event_changeaddr(edev);
246 		break;
247 	}
248 
249 done:
250 	return NOTIFY_DONE;
251 }
252 
253 static struct notifier_block qede_netdev_notifier = {
254 	.notifier_call = qede_netdev_event,
255 };
256 
257 static
qede_init(void)258 int __init qede_init(void)
259 {
260 	int ret;
261 
262 	pr_info("qede_init: %s\n", version);
263 
264 	qede_forced_speed_maps_init();
265 
266 	qed_ops = qed_get_eth_ops();
267 	if (!qed_ops) {
268 		pr_notice("Failed to get qed ethtool operations\n");
269 		return -EINVAL;
270 	}
271 
272 	/* Must register notifier before pci ops, since we might miss
273 	 * interface rename after pci probe and netdev registration.
274 	 */
275 	ret = register_netdevice_notifier(&qede_netdev_notifier);
276 	if (ret) {
277 		pr_notice("Failed to register netdevice_notifier\n");
278 		qed_put_eth_ops();
279 		return -EINVAL;
280 	}
281 
282 	ret = pci_register_driver(&qede_pci_driver);
283 	if (ret) {
284 		pr_notice("Failed to register driver\n");
285 		unregister_netdevice_notifier(&qede_netdev_notifier);
286 		qed_put_eth_ops();
287 		return -EINVAL;
288 	}
289 
290 	return 0;
291 }
292 
qede_cleanup(void)293 static void __exit qede_cleanup(void)
294 {
295 	if (debug & QED_LOG_INFO_MASK)
296 		pr_info("qede_cleanup called\n");
297 
298 	unregister_netdevice_notifier(&qede_netdev_notifier);
299 	pci_unregister_driver(&qede_pci_driver);
300 	qed_put_eth_ops();
301 }
302 
303 module_init(qede_init);
304 module_exit(qede_cleanup);
305 
306 static int qede_open(struct net_device *ndev);
307 static int qede_close(struct net_device *ndev);
308 
qede_fill_by_demand_stats(struct qede_dev * edev)309 void qede_fill_by_demand_stats(struct qede_dev *edev)
310 {
311 	struct qede_stats_common *p_common = &edev->stats.common;
312 	struct qed_eth_stats stats;
313 
314 	edev->ops->get_vport_stats(edev->cdev, &stats);
315 
316 	p_common->no_buff_discards = stats.common.no_buff_discards;
317 	p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
318 	p_common->ttl0_discard = stats.common.ttl0_discard;
319 	p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
320 	p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
321 	p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
322 	p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
323 	p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
324 	p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
325 	p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
326 	p_common->mac_filter_discards = stats.common.mac_filter_discards;
327 	p_common->gft_filter_drop = stats.common.gft_filter_drop;
328 
329 	p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
330 	p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
331 	p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
332 	p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
333 	p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
334 	p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
335 	p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
336 	p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
337 	p_common->coalesced_events = stats.common.tpa_coalesced_events;
338 	p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
339 	p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
340 	p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
341 
342 	p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
343 	p_common->rx_65_to_127_byte_packets =
344 	    stats.common.rx_65_to_127_byte_packets;
345 	p_common->rx_128_to_255_byte_packets =
346 	    stats.common.rx_128_to_255_byte_packets;
347 	p_common->rx_256_to_511_byte_packets =
348 	    stats.common.rx_256_to_511_byte_packets;
349 	p_common->rx_512_to_1023_byte_packets =
350 	    stats.common.rx_512_to_1023_byte_packets;
351 	p_common->rx_1024_to_1518_byte_packets =
352 	    stats.common.rx_1024_to_1518_byte_packets;
353 	p_common->rx_crc_errors = stats.common.rx_crc_errors;
354 	p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
355 	p_common->rx_pause_frames = stats.common.rx_pause_frames;
356 	p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
357 	p_common->rx_align_errors = stats.common.rx_align_errors;
358 	p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
359 	p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
360 	p_common->rx_jabbers = stats.common.rx_jabbers;
361 	p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
362 	p_common->rx_fragments = stats.common.rx_fragments;
363 	p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
364 	p_common->tx_65_to_127_byte_packets =
365 	    stats.common.tx_65_to_127_byte_packets;
366 	p_common->tx_128_to_255_byte_packets =
367 	    stats.common.tx_128_to_255_byte_packets;
368 	p_common->tx_256_to_511_byte_packets =
369 	    stats.common.tx_256_to_511_byte_packets;
370 	p_common->tx_512_to_1023_byte_packets =
371 	    stats.common.tx_512_to_1023_byte_packets;
372 	p_common->tx_1024_to_1518_byte_packets =
373 	    stats.common.tx_1024_to_1518_byte_packets;
374 	p_common->tx_pause_frames = stats.common.tx_pause_frames;
375 	p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
376 	p_common->brb_truncates = stats.common.brb_truncates;
377 	p_common->brb_discards = stats.common.brb_discards;
378 	p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
379 	p_common->link_change_count = stats.common.link_change_count;
380 	p_common->ptp_skip_txts = edev->ptp_skip_txts;
381 
382 	if (QEDE_IS_BB(edev)) {
383 		struct qede_stats_bb *p_bb = &edev->stats.bb;
384 
385 		p_bb->rx_1519_to_1522_byte_packets =
386 		    stats.bb.rx_1519_to_1522_byte_packets;
387 		p_bb->rx_1519_to_2047_byte_packets =
388 		    stats.bb.rx_1519_to_2047_byte_packets;
389 		p_bb->rx_2048_to_4095_byte_packets =
390 		    stats.bb.rx_2048_to_4095_byte_packets;
391 		p_bb->rx_4096_to_9216_byte_packets =
392 		    stats.bb.rx_4096_to_9216_byte_packets;
393 		p_bb->rx_9217_to_16383_byte_packets =
394 		    stats.bb.rx_9217_to_16383_byte_packets;
395 		p_bb->tx_1519_to_2047_byte_packets =
396 		    stats.bb.tx_1519_to_2047_byte_packets;
397 		p_bb->tx_2048_to_4095_byte_packets =
398 		    stats.bb.tx_2048_to_4095_byte_packets;
399 		p_bb->tx_4096_to_9216_byte_packets =
400 		    stats.bb.tx_4096_to_9216_byte_packets;
401 		p_bb->tx_9217_to_16383_byte_packets =
402 		    stats.bb.tx_9217_to_16383_byte_packets;
403 		p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
404 		p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
405 	} else {
406 		struct qede_stats_ah *p_ah = &edev->stats.ah;
407 
408 		p_ah->rx_1519_to_max_byte_packets =
409 		    stats.ah.rx_1519_to_max_byte_packets;
410 		p_ah->tx_1519_to_max_byte_packets =
411 		    stats.ah.tx_1519_to_max_byte_packets;
412 	}
413 }
414 
qede_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)415 static void qede_get_stats64(struct net_device *dev,
416 			     struct rtnl_link_stats64 *stats)
417 {
418 	struct qede_dev *edev = netdev_priv(dev);
419 	struct qede_stats_common *p_common;
420 
421 	qede_fill_by_demand_stats(edev);
422 	p_common = &edev->stats.common;
423 
424 	stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
425 			    p_common->rx_bcast_pkts;
426 	stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
427 			    p_common->tx_bcast_pkts;
428 
429 	stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
430 			  p_common->rx_bcast_bytes;
431 	stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
432 			  p_common->tx_bcast_bytes;
433 
434 	stats->tx_errors = p_common->tx_err_drop_pkts;
435 	stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
436 
437 	stats->rx_fifo_errors = p_common->no_buff_discards;
438 
439 	if (QEDE_IS_BB(edev))
440 		stats->collisions = edev->stats.bb.tx_total_collisions;
441 	stats->rx_crc_errors = p_common->rx_crc_errors;
442 	stats->rx_frame_errors = p_common->rx_align_errors;
443 }
444 
445 #ifdef CONFIG_QED_SRIOV
qede_get_vf_config(struct net_device * dev,int vfidx,struct ifla_vf_info * ivi)446 static int qede_get_vf_config(struct net_device *dev, int vfidx,
447 			      struct ifla_vf_info *ivi)
448 {
449 	struct qede_dev *edev = netdev_priv(dev);
450 
451 	if (!edev->ops)
452 		return -EINVAL;
453 
454 	return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
455 }
456 
qede_set_vf_rate(struct net_device * dev,int vfidx,int min_tx_rate,int max_tx_rate)457 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
458 			    int min_tx_rate, int max_tx_rate)
459 {
460 	struct qede_dev *edev = netdev_priv(dev);
461 
462 	return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
463 					max_tx_rate);
464 }
465 
qede_set_vf_spoofchk(struct net_device * dev,int vfidx,bool val)466 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
467 {
468 	struct qede_dev *edev = netdev_priv(dev);
469 
470 	if (!edev->ops)
471 		return -EINVAL;
472 
473 	return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
474 }
475 
qede_set_vf_link_state(struct net_device * dev,int vfidx,int link_state)476 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
477 				  int link_state)
478 {
479 	struct qede_dev *edev = netdev_priv(dev);
480 
481 	if (!edev->ops)
482 		return -EINVAL;
483 
484 	return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
485 }
486 
qede_set_vf_trust(struct net_device * dev,int vfidx,bool setting)487 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
488 {
489 	struct qede_dev *edev = netdev_priv(dev);
490 
491 	if (!edev->ops)
492 		return -EINVAL;
493 
494 	return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
495 }
496 #endif
497 
qede_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)498 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
499 {
500 	struct qede_dev *edev = netdev_priv(dev);
501 
502 	if (!netif_running(dev))
503 		return -EAGAIN;
504 
505 	switch (cmd) {
506 	case SIOCSHWTSTAMP:
507 		return qede_ptp_hw_ts(edev, ifr);
508 	default:
509 		DP_VERBOSE(edev, QED_MSG_DEBUG,
510 			   "default IOCTL cmd 0x%x\n", cmd);
511 		return -EOPNOTSUPP;
512 	}
513 
514 	return 0;
515 }
516 
qede_tx_log_print(struct qede_dev * edev,struct qede_tx_queue * txq)517 static void qede_tx_log_print(struct qede_dev *edev, struct qede_tx_queue *txq)
518 {
519 	DP_NOTICE(edev,
520 		  "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
521 		  txq->index, le16_to_cpu(*txq->hw_cons_ptr),
522 		  qed_chain_get_cons_idx(&txq->tx_pbl),
523 		  qed_chain_get_prod_idx(&txq->tx_pbl),
524 		  jiffies);
525 }
526 
qede_tx_timeout(struct net_device * dev,unsigned int txqueue)527 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
528 {
529 	struct qede_dev *edev = netdev_priv(dev);
530 	struct qede_tx_queue *txq;
531 	int cos;
532 
533 	netif_carrier_off(dev);
534 	DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
535 
536 	if (!(edev->fp_array[txqueue].type & QEDE_FASTPATH_TX))
537 		return;
538 
539 	for_each_cos_in_txq(edev, cos) {
540 		txq = &edev->fp_array[txqueue].txq[cos];
541 
542 		if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
543 		    qed_chain_get_prod_idx(&txq->tx_pbl))
544 			qede_tx_log_print(edev, txq);
545 	}
546 
547 	if (IS_VF(edev))
548 		return;
549 
550 	if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
551 	    edev->state == QEDE_STATE_RECOVERY) {
552 		DP_INFO(edev,
553 			"Avoid handling a Tx timeout while another HW error is being handled\n");
554 		return;
555 	}
556 
557 	set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
558 	set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
559 	schedule_delayed_work(&edev->sp_task, 0);
560 }
561 
qede_setup_tc(struct net_device * ndev,u8 num_tc)562 static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
563 {
564 	struct qede_dev *edev = netdev_priv(ndev);
565 	int cos, count, offset;
566 
567 	if (num_tc > edev->dev_info.num_tc)
568 		return -EINVAL;
569 
570 	netdev_reset_tc(ndev);
571 	netdev_set_num_tc(ndev, num_tc);
572 
573 	for_each_cos_in_txq(edev, cos) {
574 		count = QEDE_TSS_COUNT(edev);
575 		offset = cos * QEDE_TSS_COUNT(edev);
576 		netdev_set_tc_queue(ndev, cos, count, offset);
577 	}
578 
579 	return 0;
580 }
581 
582 static int
qede_set_flower(struct qede_dev * edev,struct flow_cls_offload * f,__be16 proto)583 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
584 		__be16 proto)
585 {
586 	switch (f->command) {
587 	case FLOW_CLS_REPLACE:
588 		return qede_add_tc_flower_fltr(edev, proto, f);
589 	case FLOW_CLS_DESTROY:
590 		return qede_delete_flow_filter(edev, f->cookie);
591 	default:
592 		return -EOPNOTSUPP;
593 	}
594 }
595 
qede_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)596 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
597 				  void *cb_priv)
598 {
599 	struct flow_cls_offload *f;
600 	struct qede_dev *edev = cb_priv;
601 
602 	if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
603 		return -EOPNOTSUPP;
604 
605 	switch (type) {
606 	case TC_SETUP_CLSFLOWER:
607 		f = type_data;
608 		return qede_set_flower(edev, f, f->common.protocol);
609 	default:
610 		return -EOPNOTSUPP;
611 	}
612 }
613 
614 static LIST_HEAD(qede_block_cb_list);
615 
616 static int
qede_setup_tc_offload(struct net_device * dev,enum tc_setup_type type,void * type_data)617 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
618 		      void *type_data)
619 {
620 	struct qede_dev *edev = netdev_priv(dev);
621 	struct tc_mqprio_qopt *mqprio;
622 
623 	switch (type) {
624 	case TC_SETUP_BLOCK:
625 		return flow_block_cb_setup_simple(type_data,
626 						  &qede_block_cb_list,
627 						  qede_setup_tc_block_cb,
628 						  edev, edev, true);
629 	case TC_SETUP_QDISC_MQPRIO:
630 		mqprio = type_data;
631 
632 		mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
633 		return qede_setup_tc(dev, mqprio->num_tc);
634 	default:
635 		return -EOPNOTSUPP;
636 	}
637 }
638 
639 static const struct net_device_ops qede_netdev_ops = {
640 	.ndo_open		= qede_open,
641 	.ndo_stop		= qede_close,
642 	.ndo_start_xmit		= qede_start_xmit,
643 	.ndo_select_queue	= qede_select_queue,
644 	.ndo_set_rx_mode	= qede_set_rx_mode,
645 	.ndo_set_mac_address	= qede_set_mac_addr,
646 	.ndo_validate_addr	= eth_validate_addr,
647 	.ndo_change_mtu		= qede_change_mtu,
648 	.ndo_do_ioctl		= qede_ioctl,
649 	.ndo_tx_timeout		= qede_tx_timeout,
650 #ifdef CONFIG_QED_SRIOV
651 	.ndo_set_vf_mac		= qede_set_vf_mac,
652 	.ndo_set_vf_vlan	= qede_set_vf_vlan,
653 	.ndo_set_vf_trust	= qede_set_vf_trust,
654 #endif
655 	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
656 	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
657 	.ndo_fix_features	= qede_fix_features,
658 	.ndo_set_features	= qede_set_features,
659 	.ndo_get_stats64	= qede_get_stats64,
660 #ifdef CONFIG_QED_SRIOV
661 	.ndo_set_vf_link_state	= qede_set_vf_link_state,
662 	.ndo_set_vf_spoofchk	= qede_set_vf_spoofchk,
663 	.ndo_get_vf_config	= qede_get_vf_config,
664 	.ndo_set_vf_rate	= qede_set_vf_rate,
665 #endif
666 	.ndo_udp_tunnel_add	= udp_tunnel_nic_add_port,
667 	.ndo_udp_tunnel_del	= udp_tunnel_nic_del_port,
668 	.ndo_features_check	= qede_features_check,
669 	.ndo_bpf		= qede_xdp,
670 #ifdef CONFIG_RFS_ACCEL
671 	.ndo_rx_flow_steer	= qede_rx_flow_steer,
672 #endif
673 	.ndo_xdp_xmit		= qede_xdp_transmit,
674 	.ndo_setup_tc		= qede_setup_tc_offload,
675 };
676 
677 static const struct net_device_ops qede_netdev_vf_ops = {
678 	.ndo_open		= qede_open,
679 	.ndo_stop		= qede_close,
680 	.ndo_start_xmit		= qede_start_xmit,
681 	.ndo_select_queue	= qede_select_queue,
682 	.ndo_set_rx_mode	= qede_set_rx_mode,
683 	.ndo_set_mac_address	= qede_set_mac_addr,
684 	.ndo_validate_addr	= eth_validate_addr,
685 	.ndo_change_mtu		= qede_change_mtu,
686 	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
687 	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
688 	.ndo_fix_features	= qede_fix_features,
689 	.ndo_set_features	= qede_set_features,
690 	.ndo_get_stats64	= qede_get_stats64,
691 	.ndo_udp_tunnel_add	= udp_tunnel_nic_add_port,
692 	.ndo_udp_tunnel_del	= udp_tunnel_nic_del_port,
693 	.ndo_features_check	= qede_features_check,
694 };
695 
696 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
697 	.ndo_open		= qede_open,
698 	.ndo_stop		= qede_close,
699 	.ndo_start_xmit		= qede_start_xmit,
700 	.ndo_select_queue	= qede_select_queue,
701 	.ndo_set_rx_mode	= qede_set_rx_mode,
702 	.ndo_set_mac_address	= qede_set_mac_addr,
703 	.ndo_validate_addr	= eth_validate_addr,
704 	.ndo_change_mtu		= qede_change_mtu,
705 	.ndo_vlan_rx_add_vid	= qede_vlan_rx_add_vid,
706 	.ndo_vlan_rx_kill_vid	= qede_vlan_rx_kill_vid,
707 	.ndo_fix_features	= qede_fix_features,
708 	.ndo_set_features	= qede_set_features,
709 	.ndo_get_stats64	= qede_get_stats64,
710 	.ndo_udp_tunnel_add	= udp_tunnel_nic_add_port,
711 	.ndo_udp_tunnel_del	= udp_tunnel_nic_del_port,
712 	.ndo_features_check	= qede_features_check,
713 	.ndo_bpf		= qede_xdp,
714 	.ndo_xdp_xmit		= qede_xdp_transmit,
715 };
716 
717 /* -------------------------------------------------------------------------
718  * START OF PROBE / REMOVE
719  * -------------------------------------------------------------------------
720  */
721 
qede_alloc_etherdev(struct qed_dev * cdev,struct pci_dev * pdev,struct qed_dev_eth_info * info,u32 dp_module,u8 dp_level)722 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
723 					    struct pci_dev *pdev,
724 					    struct qed_dev_eth_info *info,
725 					    u32 dp_module, u8 dp_level)
726 {
727 	struct net_device *ndev;
728 	struct qede_dev *edev;
729 
730 	ndev = alloc_etherdev_mqs(sizeof(*edev),
731 				  info->num_queues * info->num_tc,
732 				  info->num_queues);
733 	if (!ndev) {
734 		pr_err("etherdev allocation failed\n");
735 		return NULL;
736 	}
737 
738 	edev = netdev_priv(ndev);
739 	edev->ndev = ndev;
740 	edev->cdev = cdev;
741 	edev->pdev = pdev;
742 	edev->dp_module = dp_module;
743 	edev->dp_level = dp_level;
744 	edev->ops = qed_ops;
745 
746 	if (is_kdump_kernel()) {
747 		edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
748 		edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
749 	} else {
750 		edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
751 		edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
752 	}
753 
754 	DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
755 		info->num_queues, info->num_queues);
756 
757 	SET_NETDEV_DEV(ndev, &pdev->dev);
758 
759 	memset(&edev->stats, 0, sizeof(edev->stats));
760 	memcpy(&edev->dev_info, info, sizeof(*info));
761 
762 	/* As ethtool doesn't have the ability to show WoL behavior as
763 	 * 'default', if device supports it declare it's enabled.
764 	 */
765 	if (edev->dev_info.common.wol_support)
766 		edev->wol_enabled = true;
767 
768 	INIT_LIST_HEAD(&edev->vlan_list);
769 
770 	return edev;
771 }
772 
qede_init_ndev(struct qede_dev * edev)773 static void qede_init_ndev(struct qede_dev *edev)
774 {
775 	struct net_device *ndev = edev->ndev;
776 	struct pci_dev *pdev = edev->pdev;
777 	bool udp_tunnel_enable = false;
778 	netdev_features_t hw_features;
779 
780 	pci_set_drvdata(pdev, ndev);
781 
782 	ndev->mem_start = edev->dev_info.common.pci_mem_start;
783 	ndev->base_addr = ndev->mem_start;
784 	ndev->mem_end = edev->dev_info.common.pci_mem_end;
785 	ndev->irq = edev->dev_info.common.pci_irq;
786 
787 	ndev->watchdog_timeo = TX_TIMEOUT;
788 
789 	if (IS_VF(edev)) {
790 		if (edev->dev_info.xdp_supported)
791 			ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
792 		else
793 			ndev->netdev_ops = &qede_netdev_vf_ops;
794 	} else {
795 		ndev->netdev_ops = &qede_netdev_ops;
796 	}
797 
798 	qede_set_ethtool_ops(ndev);
799 
800 	ndev->priv_flags |= IFF_UNICAST_FLT;
801 
802 	/* user-changeble features */
803 	hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
804 		      NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
805 		      NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
806 
807 	if (edev->dev_info.common.b_arfs_capable)
808 		hw_features |= NETIF_F_NTUPLE;
809 
810 	if (edev->dev_info.common.vxlan_enable ||
811 	    edev->dev_info.common.geneve_enable)
812 		udp_tunnel_enable = true;
813 
814 	if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
815 		hw_features |= NETIF_F_TSO_ECN;
816 		ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
817 					NETIF_F_SG | NETIF_F_TSO |
818 					NETIF_F_TSO_ECN | NETIF_F_TSO6 |
819 					NETIF_F_RXCSUM;
820 	}
821 
822 	if (udp_tunnel_enable) {
823 		hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
824 				NETIF_F_GSO_UDP_TUNNEL_CSUM);
825 		ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
826 					  NETIF_F_GSO_UDP_TUNNEL_CSUM);
827 
828 		qede_set_udp_tunnels(edev);
829 	}
830 
831 	if (edev->dev_info.common.gre_enable) {
832 		hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
833 		ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
834 					  NETIF_F_GSO_GRE_CSUM);
835 	}
836 
837 	ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
838 			      NETIF_F_HIGHDMA;
839 	ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
840 			 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
841 			 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
842 
843 	ndev->hw_features = hw_features;
844 
845 	/* MTU range: 46 - 9600 */
846 	ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
847 	ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
848 
849 	/* Set network device HW mac */
850 	ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
851 
852 	ndev->mtu = edev->dev_info.common.mtu;
853 }
854 
855 /* This function converts from 32b param to two params of level and module
856  * Input 32b decoding:
857  * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
858  * 'happy' flow, e.g. memory allocation failed.
859  * b30 - enable all INFO prints. INFO prints are for major steps in the flow
860  * and provide important parameters.
861  * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
862  * module. VERBOSE prints are for tracking the specific flow in low level.
863  *
864  * Notice that the level should be that of the lowest required logs.
865  */
qede_config_debug(uint debug,u32 * p_dp_module,u8 * p_dp_level)866 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
867 {
868 	*p_dp_level = QED_LEVEL_NOTICE;
869 	*p_dp_module = 0;
870 
871 	if (debug & QED_LOG_VERBOSE_MASK) {
872 		*p_dp_level = QED_LEVEL_VERBOSE;
873 		*p_dp_module = (debug & 0x3FFFFFFF);
874 	} else if (debug & QED_LOG_INFO_MASK) {
875 		*p_dp_level = QED_LEVEL_INFO;
876 	} else if (debug & QED_LOG_NOTICE_MASK) {
877 		*p_dp_level = QED_LEVEL_NOTICE;
878 	}
879 }
880 
qede_free_fp_array(struct qede_dev * edev)881 static void qede_free_fp_array(struct qede_dev *edev)
882 {
883 	if (edev->fp_array) {
884 		struct qede_fastpath *fp;
885 		int i;
886 
887 		for_each_queue(i) {
888 			fp = &edev->fp_array[i];
889 
890 			kfree(fp->sb_info);
891 			/* Handle mem alloc failure case where qede_init_fp
892 			 * didn't register xdp_rxq_info yet.
893 			 * Implicit only (fp->type & QEDE_FASTPATH_RX)
894 			 */
895 			if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
896 				xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
897 			kfree(fp->rxq);
898 			kfree(fp->xdp_tx);
899 			kfree(fp->txq);
900 		}
901 		kfree(edev->fp_array);
902 	}
903 
904 	edev->num_queues = 0;
905 	edev->fp_num_tx = 0;
906 	edev->fp_num_rx = 0;
907 }
908 
qede_alloc_fp_array(struct qede_dev * edev)909 static int qede_alloc_fp_array(struct qede_dev *edev)
910 {
911 	u8 fp_combined, fp_rx = edev->fp_num_rx;
912 	struct qede_fastpath *fp;
913 	int i;
914 
915 	edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
916 				 sizeof(*edev->fp_array), GFP_KERNEL);
917 	if (!edev->fp_array) {
918 		DP_NOTICE(edev, "fp array allocation failed\n");
919 		goto err;
920 	}
921 
922 	fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
923 
924 	/* Allocate the FP elements for Rx queues followed by combined and then
925 	 * the Tx. This ordering should be maintained so that the respective
926 	 * queues (Rx or Tx) will be together in the fastpath array and the
927 	 * associated ids will be sequential.
928 	 */
929 	for_each_queue(i) {
930 		fp = &edev->fp_array[i];
931 
932 		fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
933 		if (!fp->sb_info) {
934 			DP_NOTICE(edev, "sb info struct allocation failed\n");
935 			goto err;
936 		}
937 
938 		if (fp_rx) {
939 			fp->type = QEDE_FASTPATH_RX;
940 			fp_rx--;
941 		} else if (fp_combined) {
942 			fp->type = QEDE_FASTPATH_COMBINED;
943 			fp_combined--;
944 		} else {
945 			fp->type = QEDE_FASTPATH_TX;
946 		}
947 
948 		if (fp->type & QEDE_FASTPATH_TX) {
949 			fp->txq = kcalloc(edev->dev_info.num_tc,
950 					  sizeof(*fp->txq), GFP_KERNEL);
951 			if (!fp->txq)
952 				goto err;
953 		}
954 
955 		if (fp->type & QEDE_FASTPATH_RX) {
956 			fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
957 			if (!fp->rxq)
958 				goto err;
959 
960 			if (edev->xdp_prog) {
961 				fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
962 						     GFP_KERNEL);
963 				if (!fp->xdp_tx)
964 					goto err;
965 				fp->type |= QEDE_FASTPATH_XDP;
966 			}
967 		}
968 	}
969 
970 	return 0;
971 err:
972 	qede_free_fp_array(edev);
973 	return -ENOMEM;
974 }
975 
976 /* The qede lock is used to protect driver state change and driver flows that
977  * are not reentrant.
978  */
__qede_lock(struct qede_dev * edev)979 void __qede_lock(struct qede_dev *edev)
980 {
981 	mutex_lock(&edev->qede_lock);
982 }
983 
__qede_unlock(struct qede_dev * edev)984 void __qede_unlock(struct qede_dev *edev)
985 {
986 	mutex_unlock(&edev->qede_lock);
987 }
988 
989 /* This version of the lock should be used when acquiring the RTNL lock is also
990  * needed in addition to the internal qede lock.
991  */
qede_lock(struct qede_dev * edev)992 static void qede_lock(struct qede_dev *edev)
993 {
994 	rtnl_lock();
995 	__qede_lock(edev);
996 }
997 
qede_unlock(struct qede_dev * edev)998 static void qede_unlock(struct qede_dev *edev)
999 {
1000 	__qede_unlock(edev);
1001 	rtnl_unlock();
1002 }
1003 
qede_sp_task(struct work_struct * work)1004 static void qede_sp_task(struct work_struct *work)
1005 {
1006 	struct qede_dev *edev = container_of(work, struct qede_dev,
1007 					     sp_task.work);
1008 
1009 	/* Disable execution of this deferred work once
1010 	 * qede removal is in progress, this stop any future
1011 	 * scheduling of sp_task.
1012 	 */
1013 	if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1014 		return;
1015 
1016 	/* The locking scheme depends on the specific flag:
1017 	 * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1018 	 * ensure that ongoing flows are ended and new ones are not started.
1019 	 * In other cases - only the internal qede lock should be acquired.
1020 	 */
1021 
1022 	if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1023 #ifdef CONFIG_QED_SRIOV
1024 		/* SRIOV must be disabled outside the lock to avoid a deadlock.
1025 		 * The recovery of the active VFs is currently not supported.
1026 		 */
1027 		if (pci_num_vf(edev->pdev))
1028 			qede_sriov_configure(edev->pdev, 0);
1029 #endif
1030 		qede_lock(edev);
1031 		qede_recovery_handler(edev);
1032 		qede_unlock(edev);
1033 	}
1034 
1035 	__qede_lock(edev);
1036 
1037 	if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1038 		if (edev->state == QEDE_STATE_OPEN)
1039 			qede_config_rx_mode(edev->ndev);
1040 
1041 #ifdef CONFIG_RFS_ACCEL
1042 	if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1043 		if (edev->state == QEDE_STATE_OPEN)
1044 			qede_process_arfs_filters(edev, false);
1045 	}
1046 #endif
1047 	if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1048 		qede_generic_hw_err_handler(edev);
1049 	__qede_unlock(edev);
1050 
1051 	if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1052 #ifdef CONFIG_QED_SRIOV
1053 		/* SRIOV must be disabled outside the lock to avoid a deadlock.
1054 		 * The recovery of the active VFs is currently not supported.
1055 		 */
1056 		if (pci_num_vf(edev->pdev))
1057 			qede_sriov_configure(edev->pdev, 0);
1058 #endif
1059 		edev->ops->common->recovery_process(edev->cdev);
1060 	}
1061 }
1062 
qede_update_pf_params(struct qed_dev * cdev)1063 static void qede_update_pf_params(struct qed_dev *cdev)
1064 {
1065 	struct qed_pf_params pf_params;
1066 	u16 num_cons;
1067 
1068 	/* 64 rx + 64 tx + 64 XDP */
1069 	memset(&pf_params, 0, sizeof(struct qed_pf_params));
1070 
1071 	/* 1 rx + 1 xdp + max tx cos */
1072 	num_cons = QED_MIN_L2_CONS;
1073 
1074 	pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1075 
1076 	/* Same for VFs - make sure they'll have sufficient connections
1077 	 * to support XDP Tx queues.
1078 	 */
1079 	pf_params.eth_pf_params.num_vf_cons = 48;
1080 
1081 	pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1082 	qed_ops->common->update_pf_params(cdev, &pf_params);
1083 }
1084 
1085 #define QEDE_FW_VER_STR_SIZE	80
1086 
qede_log_probe(struct qede_dev * edev)1087 static void qede_log_probe(struct qede_dev *edev)
1088 {
1089 	struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1090 	u8 buf[QEDE_FW_VER_STR_SIZE];
1091 	size_t left_size;
1092 
1093 	snprintf(buf, QEDE_FW_VER_STR_SIZE,
1094 		 "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1095 		 p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1096 		 p_dev_info->fw_eng,
1097 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1098 		 QED_MFW_VERSION_3_OFFSET,
1099 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1100 		 QED_MFW_VERSION_2_OFFSET,
1101 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1102 		 QED_MFW_VERSION_1_OFFSET,
1103 		 (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1104 		 QED_MFW_VERSION_0_OFFSET);
1105 
1106 	left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1107 	if (p_dev_info->mbi_version && left_size)
1108 		snprintf(buf + strlen(buf), left_size,
1109 			 " [MBI %d.%d.%d]",
1110 			 (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1111 			 QED_MBI_VERSION_2_OFFSET,
1112 			 (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1113 			 QED_MBI_VERSION_1_OFFSET,
1114 			 (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1115 			 QED_MBI_VERSION_0_OFFSET);
1116 
1117 	pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1118 		PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1119 		buf, edev->ndev->name);
1120 }
1121 
1122 enum qede_probe_mode {
1123 	QEDE_PROBE_NORMAL,
1124 	QEDE_PROBE_RECOVERY,
1125 };
1126 
__qede_probe(struct pci_dev * pdev,u32 dp_module,u8 dp_level,bool is_vf,enum qede_probe_mode mode)1127 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1128 			bool is_vf, enum qede_probe_mode mode)
1129 {
1130 	struct qed_probe_params probe_params;
1131 	struct qed_slowpath_params sp_params;
1132 	struct qed_dev_eth_info dev_info;
1133 	struct qede_dev *edev;
1134 	struct qed_dev *cdev;
1135 	int rc;
1136 
1137 	if (unlikely(dp_level & QED_LEVEL_INFO))
1138 		pr_notice("Starting qede probe\n");
1139 
1140 	memset(&probe_params, 0, sizeof(probe_params));
1141 	probe_params.protocol = QED_PROTOCOL_ETH;
1142 	probe_params.dp_module = dp_module;
1143 	probe_params.dp_level = dp_level;
1144 	probe_params.is_vf = is_vf;
1145 	probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1146 	cdev = qed_ops->common->probe(pdev, &probe_params);
1147 	if (!cdev) {
1148 		rc = -ENODEV;
1149 		goto err0;
1150 	}
1151 
1152 	qede_update_pf_params(cdev);
1153 
1154 	/* Start the Slowpath-process */
1155 	memset(&sp_params, 0, sizeof(sp_params));
1156 	sp_params.int_mode = QED_INT_MODE_MSIX;
1157 	sp_params.drv_major = QEDE_MAJOR_VERSION;
1158 	sp_params.drv_minor = QEDE_MINOR_VERSION;
1159 	sp_params.drv_rev = QEDE_REVISION_VERSION;
1160 	sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
1161 	strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1162 	rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1163 	if (rc) {
1164 		pr_notice("Cannot start slowpath\n");
1165 		goto err1;
1166 	}
1167 
1168 	/* Learn information crucial for qede to progress */
1169 	rc = qed_ops->fill_dev_info(cdev, &dev_info);
1170 	if (rc)
1171 		goto err2;
1172 
1173 	if (mode != QEDE_PROBE_RECOVERY) {
1174 		edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1175 					   dp_level);
1176 		if (!edev) {
1177 			rc = -ENOMEM;
1178 			goto err2;
1179 		}
1180 
1181 		edev->devlink = qed_ops->common->devlink_register(cdev);
1182 		if (IS_ERR(edev->devlink)) {
1183 			DP_NOTICE(edev, "Cannot register devlink\n");
1184 			edev->devlink = NULL;
1185 			/* Go on, we can live without devlink */
1186 		}
1187 	} else {
1188 		struct net_device *ndev = pci_get_drvdata(pdev);
1189 
1190 		edev = netdev_priv(ndev);
1191 
1192 		if (edev->devlink) {
1193 			struct qed_devlink *qdl = devlink_priv(edev->devlink);
1194 
1195 			qdl->cdev = cdev;
1196 		}
1197 		edev->cdev = cdev;
1198 		memset(&edev->stats, 0, sizeof(edev->stats));
1199 		memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1200 	}
1201 
1202 	if (is_vf)
1203 		set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1204 
1205 	qede_init_ndev(edev);
1206 
1207 	rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1208 	if (rc)
1209 		goto err3;
1210 
1211 	if (mode != QEDE_PROBE_RECOVERY) {
1212 		/* Prepare the lock prior to the registration of the netdev,
1213 		 * as once it's registered we might reach flows requiring it
1214 		 * [it's even possible to reach a flow needing it directly
1215 		 * from there, although it's unlikely].
1216 		 */
1217 		INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1218 		mutex_init(&edev->qede_lock);
1219 
1220 		rc = register_netdev(edev->ndev);
1221 		if (rc) {
1222 			DP_NOTICE(edev, "Cannot register net-device\n");
1223 			goto err4;
1224 		}
1225 	}
1226 
1227 	edev->ops->common->set_name(cdev, edev->ndev->name);
1228 
1229 	/* PTP not supported on VFs */
1230 	if (!is_vf)
1231 		qede_ptp_enable(edev);
1232 
1233 	edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1234 
1235 #ifdef CONFIG_DCB
1236 	if (!IS_VF(edev))
1237 		qede_set_dcbnl_ops(edev->ndev);
1238 #endif
1239 
1240 	edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1241 
1242 	qede_log_probe(edev);
1243 	return 0;
1244 
1245 err4:
1246 	qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1247 err3:
1248 	if (mode != QEDE_PROBE_RECOVERY)
1249 		free_netdev(edev->ndev);
1250 	else
1251 		edev->cdev = NULL;
1252 err2:
1253 	qed_ops->common->slowpath_stop(cdev);
1254 err1:
1255 	qed_ops->common->remove(cdev);
1256 err0:
1257 	return rc;
1258 }
1259 
qede_probe(struct pci_dev * pdev,const struct pci_device_id * id)1260 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1261 {
1262 	bool is_vf = false;
1263 	u32 dp_module = 0;
1264 	u8 dp_level = 0;
1265 
1266 	switch ((enum qede_pci_private)id->driver_data) {
1267 	case QEDE_PRIVATE_VF:
1268 		if (debug & QED_LOG_VERBOSE_MASK)
1269 			dev_err(&pdev->dev, "Probing a VF\n");
1270 		is_vf = true;
1271 		break;
1272 	default:
1273 		if (debug & QED_LOG_VERBOSE_MASK)
1274 			dev_err(&pdev->dev, "Probing a PF\n");
1275 	}
1276 
1277 	qede_config_debug(debug, &dp_module, &dp_level);
1278 
1279 	return __qede_probe(pdev, dp_module, dp_level, is_vf,
1280 			    QEDE_PROBE_NORMAL);
1281 }
1282 
1283 enum qede_remove_mode {
1284 	QEDE_REMOVE_NORMAL,
1285 	QEDE_REMOVE_RECOVERY,
1286 };
1287 
__qede_remove(struct pci_dev * pdev,enum qede_remove_mode mode)1288 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1289 {
1290 	struct net_device *ndev = pci_get_drvdata(pdev);
1291 	struct qede_dev *edev;
1292 	struct qed_dev *cdev;
1293 
1294 	if (!ndev) {
1295 		dev_info(&pdev->dev, "Device has already been removed\n");
1296 		return;
1297 	}
1298 
1299 	edev = netdev_priv(ndev);
1300 	cdev = edev->cdev;
1301 
1302 	DP_INFO(edev, "Starting qede_remove\n");
1303 
1304 	qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1305 
1306 	if (mode != QEDE_REMOVE_RECOVERY) {
1307 		set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1308 		unregister_netdev(ndev);
1309 
1310 		cancel_delayed_work_sync(&edev->sp_task);
1311 
1312 		edev->ops->common->set_power_state(cdev, PCI_D0);
1313 
1314 		pci_set_drvdata(pdev, NULL);
1315 	}
1316 
1317 	qede_ptp_disable(edev);
1318 
1319 	/* Use global ops since we've freed edev */
1320 	qed_ops->common->slowpath_stop(cdev);
1321 	if (system_state == SYSTEM_POWER_OFF)
1322 		return;
1323 
1324 	if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1325 		qed_ops->common->devlink_unregister(edev->devlink);
1326 		edev->devlink = NULL;
1327 	}
1328 	qed_ops->common->remove(cdev);
1329 	edev->cdev = NULL;
1330 
1331 	/* Since this can happen out-of-sync with other flows,
1332 	 * don't release the netdevice until after slowpath stop
1333 	 * has been called to guarantee various other contexts
1334 	 * [e.g., QED register callbacks] won't break anything when
1335 	 * accessing the netdevice.
1336 	 */
1337 	if (mode != QEDE_REMOVE_RECOVERY)
1338 		free_netdev(ndev);
1339 
1340 	dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1341 }
1342 
qede_remove(struct pci_dev * pdev)1343 static void qede_remove(struct pci_dev *pdev)
1344 {
1345 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1346 }
1347 
qede_shutdown(struct pci_dev * pdev)1348 static void qede_shutdown(struct pci_dev *pdev)
1349 {
1350 	__qede_remove(pdev, QEDE_REMOVE_NORMAL);
1351 }
1352 
1353 /* -------------------------------------------------------------------------
1354  * START OF LOAD / UNLOAD
1355  * -------------------------------------------------------------------------
1356  */
1357 
qede_set_num_queues(struct qede_dev * edev)1358 static int qede_set_num_queues(struct qede_dev *edev)
1359 {
1360 	int rc;
1361 	u16 rss_num;
1362 
1363 	/* Setup queues according to possible resources*/
1364 	if (edev->req_queues)
1365 		rss_num = edev->req_queues;
1366 	else
1367 		rss_num = netif_get_num_default_rss_queues() *
1368 			  edev->dev_info.common.num_hwfns;
1369 
1370 	rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1371 
1372 	rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1373 	if (rc > 0) {
1374 		/* Managed to request interrupts for our queues */
1375 		edev->num_queues = rc;
1376 		DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1377 			QEDE_QUEUE_CNT(edev), rss_num);
1378 		rc = 0;
1379 	}
1380 
1381 	edev->fp_num_tx = edev->req_num_tx;
1382 	edev->fp_num_rx = edev->req_num_rx;
1383 
1384 	return rc;
1385 }
1386 
qede_free_mem_sb(struct qede_dev * edev,struct qed_sb_info * sb_info,u16 sb_id)1387 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1388 			     u16 sb_id)
1389 {
1390 	if (sb_info->sb_virt) {
1391 		edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1392 					      QED_SB_TYPE_L2_QUEUE);
1393 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1394 				  (void *)sb_info->sb_virt, sb_info->sb_phys);
1395 		memset(sb_info, 0, sizeof(*sb_info));
1396 	}
1397 }
1398 
1399 /* This function allocates fast-path status block memory */
qede_alloc_mem_sb(struct qede_dev * edev,struct qed_sb_info * sb_info,u16 sb_id)1400 static int qede_alloc_mem_sb(struct qede_dev *edev,
1401 			     struct qed_sb_info *sb_info, u16 sb_id)
1402 {
1403 	struct status_block_e4 *sb_virt;
1404 	dma_addr_t sb_phys;
1405 	int rc;
1406 
1407 	sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1408 				     sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1409 	if (!sb_virt) {
1410 		DP_ERR(edev, "Status block allocation failed\n");
1411 		return -ENOMEM;
1412 	}
1413 
1414 	rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1415 					sb_virt, sb_phys, sb_id,
1416 					QED_SB_TYPE_L2_QUEUE);
1417 	if (rc) {
1418 		DP_ERR(edev, "Status block initialization failed\n");
1419 		dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1420 				  sb_virt, sb_phys);
1421 		return rc;
1422 	}
1423 
1424 	return 0;
1425 }
1426 
qede_free_rx_buffers(struct qede_dev * edev,struct qede_rx_queue * rxq)1427 static void qede_free_rx_buffers(struct qede_dev *edev,
1428 				 struct qede_rx_queue *rxq)
1429 {
1430 	u16 i;
1431 
1432 	for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1433 		struct sw_rx_data *rx_buf;
1434 		struct page *data;
1435 
1436 		rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1437 		data = rx_buf->data;
1438 
1439 		dma_unmap_page(&edev->pdev->dev,
1440 			       rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1441 
1442 		rx_buf->data = NULL;
1443 		__free_page(data);
1444 	}
1445 }
1446 
qede_free_mem_rxq(struct qede_dev * edev,struct qede_rx_queue * rxq)1447 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1448 {
1449 	/* Free rx buffers */
1450 	qede_free_rx_buffers(edev, rxq);
1451 
1452 	/* Free the parallel SW ring */
1453 	kfree(rxq->sw_rx_ring);
1454 
1455 	/* Free the real RQ ring used by FW */
1456 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1457 	edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1458 }
1459 
qede_set_tpa_param(struct qede_rx_queue * rxq)1460 static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1461 {
1462 	int i;
1463 
1464 	for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1465 		struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1466 
1467 		tpa_info->state = QEDE_AGG_STATE_NONE;
1468 	}
1469 }
1470 
1471 /* This function allocates all memory needed per Rx queue */
qede_alloc_mem_rxq(struct qede_dev * edev,struct qede_rx_queue * rxq)1472 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1473 {
1474 	struct qed_chain_init_params params = {
1475 		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1476 		.num_elems	= RX_RING_SIZE,
1477 	};
1478 	struct qed_dev *cdev = edev->cdev;
1479 	int i, rc, size;
1480 
1481 	rxq->num_rx_buffers = edev->q_num_rx_buffers;
1482 
1483 	rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1484 
1485 	rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1486 	size = rxq->rx_headroom +
1487 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1488 
1489 	/* Make sure that the headroom and  payload fit in a single page */
1490 	if (rxq->rx_buf_size + size > PAGE_SIZE)
1491 		rxq->rx_buf_size = PAGE_SIZE - size;
1492 
1493 	/* Segment size to split a page in multiple equal parts,
1494 	 * unless XDP is used in which case we'd use the entire page.
1495 	 */
1496 	if (!edev->xdp_prog) {
1497 		size = size + rxq->rx_buf_size;
1498 		rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1499 	} else {
1500 		rxq->rx_buf_seg_size = PAGE_SIZE;
1501 		edev->ndev->features &= ~NETIF_F_GRO_HW;
1502 	}
1503 
1504 	/* Allocate the parallel driver ring for Rx buffers */
1505 	size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1506 	rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1507 	if (!rxq->sw_rx_ring) {
1508 		DP_ERR(edev, "Rx buffers ring allocation failed\n");
1509 		rc = -ENOMEM;
1510 		goto err;
1511 	}
1512 
1513 	/* Allocate FW Rx ring  */
1514 	params.mode = QED_CHAIN_MODE_NEXT_PTR;
1515 	params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1516 	params.elem_size = sizeof(struct eth_rx_bd);
1517 
1518 	rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
1519 	if (rc)
1520 		goto err;
1521 
1522 	/* Allocate FW completion ring */
1523 	params.mode = QED_CHAIN_MODE_PBL;
1524 	params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1525 	params.elem_size = sizeof(union eth_rx_cqe);
1526 
1527 	rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
1528 	if (rc)
1529 		goto err;
1530 
1531 	/* Allocate buffers for the Rx ring */
1532 	rxq->filled_buffers = 0;
1533 	for (i = 0; i < rxq->num_rx_buffers; i++) {
1534 		rc = qede_alloc_rx_buffer(rxq, false);
1535 		if (rc) {
1536 			DP_ERR(edev,
1537 			       "Rx buffers allocation failed at index %d\n", i);
1538 			goto err;
1539 		}
1540 	}
1541 
1542 	edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1543 	if (!edev->gro_disable)
1544 		qede_set_tpa_param(rxq);
1545 err:
1546 	return rc;
1547 }
1548 
qede_free_mem_txq(struct qede_dev * edev,struct qede_tx_queue * txq)1549 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1550 {
1551 	/* Free the parallel SW ring */
1552 	if (txq->is_xdp)
1553 		kfree(txq->sw_tx_ring.xdp);
1554 	else
1555 		kfree(txq->sw_tx_ring.skbs);
1556 
1557 	/* Free the real RQ ring used by FW */
1558 	edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1559 }
1560 
1561 /* This function allocates all memory needed per Tx queue */
qede_alloc_mem_txq(struct qede_dev * edev,struct qede_tx_queue * txq)1562 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1563 {
1564 	struct qed_chain_init_params params = {
1565 		.mode		= QED_CHAIN_MODE_PBL,
1566 		.intended_use	= QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1567 		.cnt_type	= QED_CHAIN_CNT_TYPE_U16,
1568 		.num_elems	= edev->q_num_tx_buffers,
1569 		.elem_size	= sizeof(union eth_tx_bd_types),
1570 	};
1571 	int size, rc;
1572 
1573 	txq->num_tx_buffers = edev->q_num_tx_buffers;
1574 
1575 	/* Allocate the parallel driver ring for Tx buffers */
1576 	if (txq->is_xdp) {
1577 		size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1578 		txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1579 		if (!txq->sw_tx_ring.xdp)
1580 			goto err;
1581 	} else {
1582 		size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1583 		txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1584 		if (!txq->sw_tx_ring.skbs)
1585 			goto err;
1586 	}
1587 
1588 	rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
1589 	if (rc)
1590 		goto err;
1591 
1592 	return 0;
1593 
1594 err:
1595 	qede_free_mem_txq(edev, txq);
1596 	return -ENOMEM;
1597 }
1598 
1599 /* This function frees all memory of a single fp */
qede_free_mem_fp(struct qede_dev * edev,struct qede_fastpath * fp)1600 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1601 {
1602 	qede_free_mem_sb(edev, fp->sb_info, fp->id);
1603 
1604 	if (fp->type & QEDE_FASTPATH_RX)
1605 		qede_free_mem_rxq(edev, fp->rxq);
1606 
1607 	if (fp->type & QEDE_FASTPATH_XDP)
1608 		qede_free_mem_txq(edev, fp->xdp_tx);
1609 
1610 	if (fp->type & QEDE_FASTPATH_TX) {
1611 		int cos;
1612 
1613 		for_each_cos_in_txq(edev, cos)
1614 			qede_free_mem_txq(edev, &fp->txq[cos]);
1615 	}
1616 }
1617 
1618 /* This function allocates all memory needed for a single fp (i.e. an entity
1619  * which contains status block, one rx queue and/or multiple per-TC tx queues.
1620  */
qede_alloc_mem_fp(struct qede_dev * edev,struct qede_fastpath * fp)1621 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1622 {
1623 	int rc = 0;
1624 
1625 	rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1626 	if (rc)
1627 		goto out;
1628 
1629 	if (fp->type & QEDE_FASTPATH_RX) {
1630 		rc = qede_alloc_mem_rxq(edev, fp->rxq);
1631 		if (rc)
1632 			goto out;
1633 	}
1634 
1635 	if (fp->type & QEDE_FASTPATH_XDP) {
1636 		rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1637 		if (rc)
1638 			goto out;
1639 	}
1640 
1641 	if (fp->type & QEDE_FASTPATH_TX) {
1642 		int cos;
1643 
1644 		for_each_cos_in_txq(edev, cos) {
1645 			rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1646 			if (rc)
1647 				goto out;
1648 		}
1649 	}
1650 
1651 out:
1652 	return rc;
1653 }
1654 
qede_free_mem_load(struct qede_dev * edev)1655 static void qede_free_mem_load(struct qede_dev *edev)
1656 {
1657 	int i;
1658 
1659 	for_each_queue(i) {
1660 		struct qede_fastpath *fp = &edev->fp_array[i];
1661 
1662 		qede_free_mem_fp(edev, fp);
1663 	}
1664 }
1665 
1666 /* This function allocates all qede memory at NIC load. */
qede_alloc_mem_load(struct qede_dev * edev)1667 static int qede_alloc_mem_load(struct qede_dev *edev)
1668 {
1669 	int rc = 0, queue_id;
1670 
1671 	for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1672 		struct qede_fastpath *fp = &edev->fp_array[queue_id];
1673 
1674 		rc = qede_alloc_mem_fp(edev, fp);
1675 		if (rc) {
1676 			DP_ERR(edev,
1677 			       "Failed to allocate memory for fastpath - rss id = %d\n",
1678 			       queue_id);
1679 			qede_free_mem_load(edev);
1680 			return rc;
1681 		}
1682 	}
1683 
1684 	return 0;
1685 }
1686 
qede_empty_tx_queue(struct qede_dev * edev,struct qede_tx_queue * txq)1687 static void qede_empty_tx_queue(struct qede_dev *edev,
1688 				struct qede_tx_queue *txq)
1689 {
1690 	unsigned int pkts_compl = 0, bytes_compl = 0;
1691 	struct netdev_queue *netdev_txq;
1692 	int rc, len = 0;
1693 
1694 	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1695 
1696 	while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1697 	       qed_chain_get_prod_idx(&txq->tx_pbl)) {
1698 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1699 			   "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1700 			   txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1701 			   qed_chain_get_prod_idx(&txq->tx_pbl));
1702 
1703 		rc = qede_free_tx_pkt(edev, txq, &len);
1704 		if (rc) {
1705 			DP_NOTICE(edev,
1706 				  "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1707 				  txq->index,
1708 				  qed_chain_get_cons_idx(&txq->tx_pbl),
1709 				  qed_chain_get_prod_idx(&txq->tx_pbl));
1710 			break;
1711 		}
1712 
1713 		bytes_compl += len;
1714 		pkts_compl++;
1715 		txq->sw_tx_cons++;
1716 	}
1717 
1718 	netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1719 }
1720 
qede_empty_tx_queues(struct qede_dev * edev)1721 static void qede_empty_tx_queues(struct qede_dev *edev)
1722 {
1723 	int i;
1724 
1725 	for_each_queue(i)
1726 		if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1727 			int cos;
1728 
1729 			for_each_cos_in_txq(edev, cos) {
1730 				struct qede_fastpath *fp;
1731 
1732 				fp = &edev->fp_array[i];
1733 				qede_empty_tx_queue(edev,
1734 						    &fp->txq[cos]);
1735 			}
1736 		}
1737 }
1738 
1739 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
qede_init_fp(struct qede_dev * edev)1740 static void qede_init_fp(struct qede_dev *edev)
1741 {
1742 	int queue_id, rxq_index = 0, txq_index = 0;
1743 	struct qede_fastpath *fp;
1744 	bool init_xdp = false;
1745 
1746 	for_each_queue(queue_id) {
1747 		fp = &edev->fp_array[queue_id];
1748 
1749 		fp->edev = edev;
1750 		fp->id = queue_id;
1751 
1752 		if (fp->type & QEDE_FASTPATH_XDP) {
1753 			fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1754 								rxq_index);
1755 			fp->xdp_tx->is_xdp = 1;
1756 
1757 			spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1758 			init_xdp = true;
1759 		}
1760 
1761 		if (fp->type & QEDE_FASTPATH_RX) {
1762 			fp->rxq->rxq_id = rxq_index++;
1763 
1764 			/* Determine how to map buffers for this queue */
1765 			if (fp->type & QEDE_FASTPATH_XDP)
1766 				fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1767 			else
1768 				fp->rxq->data_direction = DMA_FROM_DEVICE;
1769 			fp->rxq->dev = &edev->pdev->dev;
1770 
1771 			/* Driver have no error path from here */
1772 			WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1773 						 fp->rxq->rxq_id) < 0);
1774 
1775 			if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1776 						       MEM_TYPE_PAGE_ORDER0,
1777 						       NULL)) {
1778 				DP_NOTICE(edev,
1779 					  "Failed to register XDP memory model\n");
1780 			}
1781 		}
1782 
1783 		if (fp->type & QEDE_FASTPATH_TX) {
1784 			int cos;
1785 
1786 			for_each_cos_in_txq(edev, cos) {
1787 				struct qede_tx_queue *txq = &fp->txq[cos];
1788 				u16 ndev_tx_id;
1789 
1790 				txq->cos = cos;
1791 				txq->index = txq_index;
1792 				ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1793 				txq->ndev_txq_id = ndev_tx_id;
1794 
1795 				if (edev->dev_info.is_legacy)
1796 					txq->is_legacy = true;
1797 				txq->dev = &edev->pdev->dev;
1798 			}
1799 
1800 			txq_index++;
1801 		}
1802 
1803 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1804 			 edev->ndev->name, queue_id);
1805 	}
1806 
1807 	if (init_xdp) {
1808 		edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1809 		DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1810 	}
1811 }
1812 
qede_set_real_num_queues(struct qede_dev * edev)1813 static int qede_set_real_num_queues(struct qede_dev *edev)
1814 {
1815 	int rc = 0;
1816 
1817 	rc = netif_set_real_num_tx_queues(edev->ndev,
1818 					  QEDE_TSS_COUNT(edev) *
1819 					  edev->dev_info.num_tc);
1820 	if (rc) {
1821 		DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1822 		return rc;
1823 	}
1824 
1825 	rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1826 	if (rc) {
1827 		DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1828 		return rc;
1829 	}
1830 
1831 	return 0;
1832 }
1833 
qede_napi_disable_remove(struct qede_dev * edev)1834 static void qede_napi_disable_remove(struct qede_dev *edev)
1835 {
1836 	int i;
1837 
1838 	for_each_queue(i) {
1839 		napi_disable(&edev->fp_array[i].napi);
1840 
1841 		netif_napi_del(&edev->fp_array[i].napi);
1842 	}
1843 }
1844 
qede_napi_add_enable(struct qede_dev * edev)1845 static void qede_napi_add_enable(struct qede_dev *edev)
1846 {
1847 	int i;
1848 
1849 	/* Add NAPI objects */
1850 	for_each_queue(i) {
1851 		netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1852 			       qede_poll, NAPI_POLL_WEIGHT);
1853 		napi_enable(&edev->fp_array[i].napi);
1854 	}
1855 }
1856 
qede_sync_free_irqs(struct qede_dev * edev)1857 static void qede_sync_free_irqs(struct qede_dev *edev)
1858 {
1859 	int i;
1860 
1861 	for (i = 0; i < edev->int_info.used_cnt; i++) {
1862 		if (edev->int_info.msix_cnt) {
1863 			synchronize_irq(edev->int_info.msix[i].vector);
1864 			free_irq(edev->int_info.msix[i].vector,
1865 				 &edev->fp_array[i]);
1866 		} else {
1867 			edev->ops->common->simd_handler_clean(edev->cdev, i);
1868 		}
1869 	}
1870 
1871 	edev->int_info.used_cnt = 0;
1872 	edev->int_info.msix_cnt = 0;
1873 }
1874 
qede_req_msix_irqs(struct qede_dev * edev)1875 static int qede_req_msix_irqs(struct qede_dev *edev)
1876 {
1877 	int i, rc;
1878 
1879 	/* Sanitize number of interrupts == number of prepared RSS queues */
1880 	if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1881 		DP_ERR(edev,
1882 		       "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1883 		       QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1884 		return -EINVAL;
1885 	}
1886 
1887 	for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1888 #ifdef CONFIG_RFS_ACCEL
1889 		struct qede_fastpath *fp = &edev->fp_array[i];
1890 
1891 		if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1892 			rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1893 					      edev->int_info.msix[i].vector);
1894 			if (rc) {
1895 				DP_ERR(edev, "Failed to add CPU rmap\n");
1896 				qede_free_arfs(edev);
1897 			}
1898 		}
1899 #endif
1900 		rc = request_irq(edev->int_info.msix[i].vector,
1901 				 qede_msix_fp_int, 0, edev->fp_array[i].name,
1902 				 &edev->fp_array[i]);
1903 		if (rc) {
1904 			DP_ERR(edev, "Request fp %d irq failed\n", i);
1905 			qede_sync_free_irqs(edev);
1906 			return rc;
1907 		}
1908 		DP_VERBOSE(edev, NETIF_MSG_INTR,
1909 			   "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1910 			   edev->fp_array[i].name, i,
1911 			   &edev->fp_array[i]);
1912 		edev->int_info.used_cnt++;
1913 	}
1914 
1915 	return 0;
1916 }
1917 
qede_simd_fp_handler(void * cookie)1918 static void qede_simd_fp_handler(void *cookie)
1919 {
1920 	struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1921 
1922 	napi_schedule_irqoff(&fp->napi);
1923 }
1924 
qede_setup_irqs(struct qede_dev * edev)1925 static int qede_setup_irqs(struct qede_dev *edev)
1926 {
1927 	int i, rc = 0;
1928 
1929 	/* Learn Interrupt configuration */
1930 	rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1931 	if (rc)
1932 		return rc;
1933 
1934 	if (edev->int_info.msix_cnt) {
1935 		rc = qede_req_msix_irqs(edev);
1936 		if (rc)
1937 			return rc;
1938 		edev->ndev->irq = edev->int_info.msix[0].vector;
1939 	} else {
1940 		const struct qed_common_ops *ops;
1941 
1942 		/* qed should learn receive the RSS ids and callbacks */
1943 		ops = edev->ops->common;
1944 		for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
1945 			ops->simd_handler_config(edev->cdev,
1946 						 &edev->fp_array[i], i,
1947 						 qede_simd_fp_handler);
1948 		edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
1949 	}
1950 	return 0;
1951 }
1952 
qede_drain_txq(struct qede_dev * edev,struct qede_tx_queue * txq,bool allow_drain)1953 static int qede_drain_txq(struct qede_dev *edev,
1954 			  struct qede_tx_queue *txq, bool allow_drain)
1955 {
1956 	int rc, cnt = 1000;
1957 
1958 	while (txq->sw_tx_cons != txq->sw_tx_prod) {
1959 		if (!cnt) {
1960 			if (allow_drain) {
1961 				DP_NOTICE(edev,
1962 					  "Tx queue[%d] is stuck, requesting MCP to drain\n",
1963 					  txq->index);
1964 				rc = edev->ops->common->drain(edev->cdev);
1965 				if (rc)
1966 					return rc;
1967 				return qede_drain_txq(edev, txq, false);
1968 			}
1969 			DP_NOTICE(edev,
1970 				  "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1971 				  txq->index, txq->sw_tx_prod,
1972 				  txq->sw_tx_cons);
1973 			return -ENODEV;
1974 		}
1975 		cnt--;
1976 		usleep_range(1000, 2000);
1977 		barrier();
1978 	}
1979 
1980 	/* FW finished processing, wait for HW to transmit all tx packets */
1981 	usleep_range(1000, 2000);
1982 
1983 	return 0;
1984 }
1985 
qede_stop_txq(struct qede_dev * edev,struct qede_tx_queue * txq,int rss_id)1986 static int qede_stop_txq(struct qede_dev *edev,
1987 			 struct qede_tx_queue *txq, int rss_id)
1988 {
1989 	/* delete doorbell from doorbell recovery mechanism */
1990 	edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
1991 					   &txq->tx_db);
1992 
1993 	return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1994 }
1995 
qede_stop_queues(struct qede_dev * edev)1996 static int qede_stop_queues(struct qede_dev *edev)
1997 {
1998 	struct qed_update_vport_params *vport_update_params;
1999 	struct qed_dev *cdev = edev->cdev;
2000 	struct qede_fastpath *fp;
2001 	int rc, i;
2002 
2003 	/* Disable the vport */
2004 	vport_update_params = vzalloc(sizeof(*vport_update_params));
2005 	if (!vport_update_params)
2006 		return -ENOMEM;
2007 
2008 	vport_update_params->vport_id = 0;
2009 	vport_update_params->update_vport_active_flg = 1;
2010 	vport_update_params->vport_active_flg = 0;
2011 	vport_update_params->update_rss_flg = 0;
2012 
2013 	rc = edev->ops->vport_update(cdev, vport_update_params);
2014 	vfree(vport_update_params);
2015 
2016 	if (rc) {
2017 		DP_ERR(edev, "Failed to update vport\n");
2018 		return rc;
2019 	}
2020 
2021 	/* Flush Tx queues. If needed, request drain from MCP */
2022 	for_each_queue(i) {
2023 		fp = &edev->fp_array[i];
2024 
2025 		if (fp->type & QEDE_FASTPATH_TX) {
2026 			int cos;
2027 
2028 			for_each_cos_in_txq(edev, cos) {
2029 				rc = qede_drain_txq(edev, &fp->txq[cos], true);
2030 				if (rc)
2031 					return rc;
2032 			}
2033 		}
2034 
2035 		if (fp->type & QEDE_FASTPATH_XDP) {
2036 			rc = qede_drain_txq(edev, fp->xdp_tx, true);
2037 			if (rc)
2038 				return rc;
2039 		}
2040 	}
2041 
2042 	/* Stop all Queues in reverse order */
2043 	for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2044 		fp = &edev->fp_array[i];
2045 
2046 		/* Stop the Tx Queue(s) */
2047 		if (fp->type & QEDE_FASTPATH_TX) {
2048 			int cos;
2049 
2050 			for_each_cos_in_txq(edev, cos) {
2051 				rc = qede_stop_txq(edev, &fp->txq[cos], i);
2052 				if (rc)
2053 					return rc;
2054 			}
2055 		}
2056 
2057 		/* Stop the Rx Queue */
2058 		if (fp->type & QEDE_FASTPATH_RX) {
2059 			rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2060 			if (rc) {
2061 				DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2062 				return rc;
2063 			}
2064 		}
2065 
2066 		/* Stop the XDP forwarding queue */
2067 		if (fp->type & QEDE_FASTPATH_XDP) {
2068 			rc = qede_stop_txq(edev, fp->xdp_tx, i);
2069 			if (rc)
2070 				return rc;
2071 
2072 			bpf_prog_put(fp->rxq->xdp_prog);
2073 		}
2074 	}
2075 
2076 	/* Stop the vport */
2077 	rc = edev->ops->vport_stop(cdev, 0);
2078 	if (rc)
2079 		DP_ERR(edev, "Failed to stop VPORT\n");
2080 
2081 	return rc;
2082 }
2083 
qede_start_txq(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_tx_queue * txq,u8 rss_id,u16 sb_idx)2084 static int qede_start_txq(struct qede_dev *edev,
2085 			  struct qede_fastpath *fp,
2086 			  struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2087 {
2088 	dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2089 	u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2090 	struct qed_queue_start_common_params params;
2091 	struct qed_txq_start_ret_params ret_params;
2092 	int rc;
2093 
2094 	memset(&params, 0, sizeof(params));
2095 	memset(&ret_params, 0, sizeof(ret_params));
2096 
2097 	/* Let the XDP queue share the queue-zone with one of the regular txq.
2098 	 * We don't really care about its coalescing.
2099 	 */
2100 	if (txq->is_xdp)
2101 		params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2102 	else
2103 		params.queue_id = txq->index;
2104 
2105 	params.p_sb = fp->sb_info;
2106 	params.sb_idx = sb_idx;
2107 	params.tc = txq->cos;
2108 
2109 	rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
2110 				   page_cnt, &ret_params);
2111 	if (rc) {
2112 		DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2113 		return rc;
2114 	}
2115 
2116 	txq->doorbell_addr = ret_params.p_doorbell;
2117 	txq->handle = ret_params.p_handle;
2118 
2119 	/* Determine the FW consumer address associated */
2120 	txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2121 
2122 	/* Prepare the doorbell parameters */
2123 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2124 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2125 	SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2126 		  DQ_XCM_ETH_TX_BD_PROD_CMD);
2127 	txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2128 
2129 	/* register doorbell with doorbell recovery mechanism */
2130 	rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2131 						&txq->tx_db, DB_REC_WIDTH_32B,
2132 						DB_REC_KERNEL);
2133 
2134 	return rc;
2135 }
2136 
qede_start_queues(struct qede_dev * edev,bool clear_stats)2137 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2138 {
2139 	int vlan_removal_en = 1;
2140 	struct qed_dev *cdev = edev->cdev;
2141 	struct qed_dev_info *qed_info = &edev->dev_info.common;
2142 	struct qed_update_vport_params *vport_update_params;
2143 	struct qed_queue_start_common_params q_params;
2144 	struct qed_start_vport_params start = {0};
2145 	int rc, i;
2146 
2147 	if (!edev->num_queues) {
2148 		DP_ERR(edev,
2149 		       "Cannot update V-VPORT as active as there are no Rx queues\n");
2150 		return -EINVAL;
2151 	}
2152 
2153 	vport_update_params = vzalloc(sizeof(*vport_update_params));
2154 	if (!vport_update_params)
2155 		return -ENOMEM;
2156 
2157 	start.handle_ptp_pkts = !!(edev->ptp);
2158 	start.gro_enable = !edev->gro_disable;
2159 	start.mtu = edev->ndev->mtu;
2160 	start.vport_id = 0;
2161 	start.drop_ttl0 = true;
2162 	start.remove_inner_vlan = vlan_removal_en;
2163 	start.clear_stats = clear_stats;
2164 
2165 	rc = edev->ops->vport_start(cdev, &start);
2166 
2167 	if (rc) {
2168 		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2169 		goto out;
2170 	}
2171 
2172 	DP_VERBOSE(edev, NETIF_MSG_IFUP,
2173 		   "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2174 		   start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2175 
2176 	for_each_queue(i) {
2177 		struct qede_fastpath *fp = &edev->fp_array[i];
2178 		dma_addr_t p_phys_table;
2179 		u32 page_cnt;
2180 
2181 		if (fp->type & QEDE_FASTPATH_RX) {
2182 			struct qed_rxq_start_ret_params ret_params;
2183 			struct qede_rx_queue *rxq = fp->rxq;
2184 			__le16 *val;
2185 
2186 			memset(&ret_params, 0, sizeof(ret_params));
2187 			memset(&q_params, 0, sizeof(q_params));
2188 			q_params.queue_id = rxq->rxq_id;
2189 			q_params.vport_id = 0;
2190 			q_params.p_sb = fp->sb_info;
2191 			q_params.sb_idx = RX_PI;
2192 
2193 			p_phys_table =
2194 			    qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2195 			page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2196 
2197 			rc = edev->ops->q_rx_start(cdev, i, &q_params,
2198 						   rxq->rx_buf_size,
2199 						   rxq->rx_bd_ring.p_phys_addr,
2200 						   p_phys_table,
2201 						   page_cnt, &ret_params);
2202 			if (rc) {
2203 				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2204 				       rc);
2205 				goto out;
2206 			}
2207 
2208 			/* Use the return parameters */
2209 			rxq->hw_rxq_prod_addr = ret_params.p_prod;
2210 			rxq->handle = ret_params.p_handle;
2211 
2212 			val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2213 			rxq->hw_cons_ptr = val;
2214 
2215 			qede_update_rx_prod(edev, rxq);
2216 		}
2217 
2218 		if (fp->type & QEDE_FASTPATH_XDP) {
2219 			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2220 			if (rc)
2221 				goto out;
2222 
2223 			bpf_prog_add(edev->xdp_prog, 1);
2224 			fp->rxq->xdp_prog = edev->xdp_prog;
2225 		}
2226 
2227 		if (fp->type & QEDE_FASTPATH_TX) {
2228 			int cos;
2229 
2230 			for_each_cos_in_txq(edev, cos) {
2231 				rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2232 						    TX_PI(cos));
2233 				if (rc)
2234 					goto out;
2235 			}
2236 		}
2237 	}
2238 
2239 	/* Prepare and send the vport enable */
2240 	vport_update_params->vport_id = start.vport_id;
2241 	vport_update_params->update_vport_active_flg = 1;
2242 	vport_update_params->vport_active_flg = 1;
2243 
2244 	if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2245 	    qed_info->tx_switching) {
2246 		vport_update_params->update_tx_switching_flg = 1;
2247 		vport_update_params->tx_switching_flg = 1;
2248 	}
2249 
2250 	qede_fill_rss_params(edev, &vport_update_params->rss_params,
2251 			     &vport_update_params->update_rss_flg);
2252 
2253 	rc = edev->ops->vport_update(cdev, vport_update_params);
2254 	if (rc)
2255 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2256 
2257 out:
2258 	vfree(vport_update_params);
2259 	return rc;
2260 }
2261 
2262 enum qede_unload_mode {
2263 	QEDE_UNLOAD_NORMAL,
2264 	QEDE_UNLOAD_RECOVERY,
2265 };
2266 
qede_unload(struct qede_dev * edev,enum qede_unload_mode mode,bool is_locked)2267 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2268 			bool is_locked)
2269 {
2270 	struct qed_link_params link_params;
2271 	int rc;
2272 
2273 	DP_INFO(edev, "Starting qede unload\n");
2274 
2275 	if (!is_locked)
2276 		__qede_lock(edev);
2277 
2278 	clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2279 
2280 	if (mode != QEDE_UNLOAD_RECOVERY)
2281 		edev->state = QEDE_STATE_CLOSED;
2282 
2283 	qede_rdma_dev_event_close(edev);
2284 
2285 	/* Close OS Tx */
2286 	netif_tx_disable(edev->ndev);
2287 	netif_carrier_off(edev->ndev);
2288 
2289 	if (mode != QEDE_UNLOAD_RECOVERY) {
2290 		/* Reset the link */
2291 		memset(&link_params, 0, sizeof(link_params));
2292 		link_params.link_up = false;
2293 		edev->ops->common->set_link(edev->cdev, &link_params);
2294 
2295 		rc = qede_stop_queues(edev);
2296 		if (rc) {
2297 			qede_sync_free_irqs(edev);
2298 			goto out;
2299 		}
2300 
2301 		DP_INFO(edev, "Stopped Queues\n");
2302 	}
2303 
2304 	qede_vlan_mark_nonconfigured(edev);
2305 	edev->ops->fastpath_stop(edev->cdev);
2306 
2307 	if (edev->dev_info.common.b_arfs_capable) {
2308 		qede_poll_for_freeing_arfs_filters(edev);
2309 		qede_free_arfs(edev);
2310 	}
2311 
2312 	/* Release the interrupts */
2313 	qede_sync_free_irqs(edev);
2314 	edev->ops->common->set_fp_int(edev->cdev, 0);
2315 
2316 	qede_napi_disable_remove(edev);
2317 
2318 	if (mode == QEDE_UNLOAD_RECOVERY)
2319 		qede_empty_tx_queues(edev);
2320 
2321 	qede_free_mem_load(edev);
2322 	qede_free_fp_array(edev);
2323 
2324 out:
2325 	if (!is_locked)
2326 		__qede_unlock(edev);
2327 
2328 	if (mode != QEDE_UNLOAD_RECOVERY)
2329 		DP_NOTICE(edev, "Link is down\n");
2330 
2331 	edev->ptp_skip_txts = 0;
2332 
2333 	DP_INFO(edev, "Ending qede unload\n");
2334 }
2335 
2336 enum qede_load_mode {
2337 	QEDE_LOAD_NORMAL,
2338 	QEDE_LOAD_RELOAD,
2339 	QEDE_LOAD_RECOVERY,
2340 };
2341 
qede_load(struct qede_dev * edev,enum qede_load_mode mode,bool is_locked)2342 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2343 		     bool is_locked)
2344 {
2345 	struct qed_link_params link_params;
2346 	u8 num_tc;
2347 	int rc;
2348 
2349 	DP_INFO(edev, "Starting qede load\n");
2350 
2351 	if (!is_locked)
2352 		__qede_lock(edev);
2353 
2354 	rc = qede_set_num_queues(edev);
2355 	if (rc)
2356 		goto out;
2357 
2358 	rc = qede_alloc_fp_array(edev);
2359 	if (rc)
2360 		goto out;
2361 
2362 	qede_init_fp(edev);
2363 
2364 	rc = qede_alloc_mem_load(edev);
2365 	if (rc)
2366 		goto err1;
2367 	DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2368 		QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2369 
2370 	rc = qede_set_real_num_queues(edev);
2371 	if (rc)
2372 		goto err2;
2373 
2374 	if (qede_alloc_arfs(edev)) {
2375 		edev->ndev->features &= ~NETIF_F_NTUPLE;
2376 		edev->dev_info.common.b_arfs_capable = false;
2377 	}
2378 
2379 	qede_napi_add_enable(edev);
2380 	DP_INFO(edev, "Napi added and enabled\n");
2381 
2382 	rc = qede_setup_irqs(edev);
2383 	if (rc)
2384 		goto err3;
2385 	DP_INFO(edev, "Setup IRQs succeeded\n");
2386 
2387 	rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2388 	if (rc)
2389 		goto err4;
2390 	DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2391 
2392 	num_tc = netdev_get_num_tc(edev->ndev);
2393 	num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2394 	qede_setup_tc(edev->ndev, num_tc);
2395 
2396 	/* Program un-configured VLANs */
2397 	qede_configure_vlan_filters(edev);
2398 
2399 	set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2400 
2401 	/* Ask for link-up using current configuration */
2402 	memset(&link_params, 0, sizeof(link_params));
2403 	link_params.link_up = true;
2404 	edev->ops->common->set_link(edev->cdev, &link_params);
2405 
2406 	edev->state = QEDE_STATE_OPEN;
2407 
2408 	DP_INFO(edev, "Ending successfully qede load\n");
2409 
2410 	goto out;
2411 err4:
2412 	qede_sync_free_irqs(edev);
2413 err3:
2414 	qede_napi_disable_remove(edev);
2415 err2:
2416 	qede_free_mem_load(edev);
2417 err1:
2418 	edev->ops->common->set_fp_int(edev->cdev, 0);
2419 	qede_free_fp_array(edev);
2420 	edev->num_queues = 0;
2421 	edev->fp_num_tx = 0;
2422 	edev->fp_num_rx = 0;
2423 out:
2424 	if (!is_locked)
2425 		__qede_unlock(edev);
2426 
2427 	return rc;
2428 }
2429 
2430 /* 'func' should be able to run between unload and reload assuming interface
2431  * is actually running, or afterwards in case it's currently DOWN.
2432  */
qede_reload(struct qede_dev * edev,struct qede_reload_args * args,bool is_locked)2433 void qede_reload(struct qede_dev *edev,
2434 		 struct qede_reload_args *args, bool is_locked)
2435 {
2436 	if (!is_locked)
2437 		__qede_lock(edev);
2438 
2439 	/* Since qede_lock is held, internal state wouldn't change even
2440 	 * if netdev state would start transitioning. Check whether current
2441 	 * internal configuration indicates device is up, then reload.
2442 	 */
2443 	if (edev->state == QEDE_STATE_OPEN) {
2444 		qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2445 		if (args)
2446 			args->func(edev, args);
2447 		qede_load(edev, QEDE_LOAD_RELOAD, true);
2448 
2449 		/* Since no one is going to do it for us, re-configure */
2450 		qede_config_rx_mode(edev->ndev);
2451 	} else if (args) {
2452 		args->func(edev, args);
2453 	}
2454 
2455 	if (!is_locked)
2456 		__qede_unlock(edev);
2457 }
2458 
2459 /* called with rtnl_lock */
qede_open(struct net_device * ndev)2460 static int qede_open(struct net_device *ndev)
2461 {
2462 	struct qede_dev *edev = netdev_priv(ndev);
2463 	int rc;
2464 
2465 	netif_carrier_off(ndev);
2466 
2467 	edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2468 
2469 	rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2470 	if (rc)
2471 		return rc;
2472 
2473 	udp_tunnel_nic_reset_ntf(ndev);
2474 
2475 	edev->ops->common->update_drv_state(edev->cdev, true);
2476 
2477 	return 0;
2478 }
2479 
qede_close(struct net_device * ndev)2480 static int qede_close(struct net_device *ndev)
2481 {
2482 	struct qede_dev *edev = netdev_priv(ndev);
2483 
2484 	qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2485 
2486 	if (edev->cdev)
2487 		edev->ops->common->update_drv_state(edev->cdev, false);
2488 
2489 	return 0;
2490 }
2491 
qede_link_update(void * dev,struct qed_link_output * link)2492 static void qede_link_update(void *dev, struct qed_link_output *link)
2493 {
2494 	struct qede_dev *edev = dev;
2495 
2496 	if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2497 		DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2498 		return;
2499 	}
2500 
2501 	if (link->link_up) {
2502 		if (!netif_carrier_ok(edev->ndev)) {
2503 			DP_NOTICE(edev, "Link is up\n");
2504 			netif_tx_start_all_queues(edev->ndev);
2505 			netif_carrier_on(edev->ndev);
2506 			qede_rdma_dev_event_open(edev);
2507 		}
2508 	} else {
2509 		if (netif_carrier_ok(edev->ndev)) {
2510 			DP_NOTICE(edev, "Link is down\n");
2511 			netif_tx_disable(edev->ndev);
2512 			netif_carrier_off(edev->ndev);
2513 			qede_rdma_dev_event_close(edev);
2514 		}
2515 	}
2516 }
2517 
qede_schedule_recovery_handler(void * dev)2518 static void qede_schedule_recovery_handler(void *dev)
2519 {
2520 	struct qede_dev *edev = dev;
2521 
2522 	if (edev->state == QEDE_STATE_RECOVERY) {
2523 		DP_NOTICE(edev,
2524 			  "Avoid scheduling a recovery handling since already in recovery state\n");
2525 		return;
2526 	}
2527 
2528 	set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2529 	schedule_delayed_work(&edev->sp_task, 0);
2530 
2531 	DP_INFO(edev, "Scheduled a recovery handler\n");
2532 }
2533 
qede_recovery_failed(struct qede_dev * edev)2534 static void qede_recovery_failed(struct qede_dev *edev)
2535 {
2536 	netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2537 
2538 	netif_device_detach(edev->ndev);
2539 
2540 	if (edev->cdev)
2541 		edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2542 }
2543 
qede_recovery_handler(struct qede_dev * edev)2544 static void qede_recovery_handler(struct qede_dev *edev)
2545 {
2546 	u32 curr_state = edev->state;
2547 	int rc;
2548 
2549 	DP_NOTICE(edev, "Starting a recovery process\n");
2550 
2551 	/* No need to acquire first the qede_lock since is done by qede_sp_task
2552 	 * before calling this function.
2553 	 */
2554 	edev->state = QEDE_STATE_RECOVERY;
2555 
2556 	edev->ops->common->recovery_prolog(edev->cdev);
2557 
2558 	if (curr_state == QEDE_STATE_OPEN)
2559 		qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2560 
2561 	__qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2562 
2563 	rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2564 			  IS_VF(edev), QEDE_PROBE_RECOVERY);
2565 	if (rc) {
2566 		edev->cdev = NULL;
2567 		goto err;
2568 	}
2569 
2570 	if (curr_state == QEDE_STATE_OPEN) {
2571 		rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2572 		if (rc)
2573 			goto err;
2574 
2575 		qede_config_rx_mode(edev->ndev);
2576 		udp_tunnel_nic_reset_ntf(edev->ndev);
2577 	}
2578 
2579 	edev->state = curr_state;
2580 
2581 	DP_NOTICE(edev, "Recovery handling is done\n");
2582 
2583 	return;
2584 
2585 err:
2586 	qede_recovery_failed(edev);
2587 }
2588 
qede_atomic_hw_err_handler(struct qede_dev * edev)2589 static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2590 {
2591 	struct qed_dev *cdev = edev->cdev;
2592 
2593 	DP_NOTICE(edev,
2594 		  "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2595 		  edev->err_flags);
2596 
2597 	/* Get a call trace of the flow that led to the error */
2598 	WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2599 
2600 	/* Prevent HW attentions from being reasserted */
2601 	if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2602 		edev->ops->common->attn_clr_enable(cdev, true);
2603 
2604 	DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2605 }
2606 
qede_generic_hw_err_handler(struct qede_dev * edev)2607 static void qede_generic_hw_err_handler(struct qede_dev *edev)
2608 {
2609 	DP_NOTICE(edev,
2610 		  "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2611 		  edev->err_flags);
2612 
2613 	if (edev->devlink)
2614 		edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2615 
2616 	clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2617 
2618 	DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2619 }
2620 
qede_set_hw_err_flags(struct qede_dev * edev,enum qed_hw_err_type err_type)2621 static void qede_set_hw_err_flags(struct qede_dev *edev,
2622 				  enum qed_hw_err_type err_type)
2623 {
2624 	unsigned long err_flags = 0;
2625 
2626 	switch (err_type) {
2627 	case QED_HW_ERR_DMAE_FAIL:
2628 		set_bit(QEDE_ERR_WARN, &err_flags);
2629 		fallthrough;
2630 	case QED_HW_ERR_MFW_RESP_FAIL:
2631 	case QED_HW_ERR_HW_ATTN:
2632 	case QED_HW_ERR_RAMROD_FAIL:
2633 	case QED_HW_ERR_FW_ASSERT:
2634 		set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2635 		set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2636 		break;
2637 
2638 	default:
2639 		DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2640 		break;
2641 	}
2642 
2643 	edev->err_flags |= err_flags;
2644 }
2645 
qede_schedule_hw_err_handler(void * dev,enum qed_hw_err_type err_type)2646 static void qede_schedule_hw_err_handler(void *dev,
2647 					 enum qed_hw_err_type err_type)
2648 {
2649 	struct qede_dev *edev = dev;
2650 
2651 	/* Fan failure cannot be masked by handling of another HW error or by a
2652 	 * concurrent recovery process.
2653 	 */
2654 	if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2655 	     edev->state == QEDE_STATE_RECOVERY) &&
2656 	     err_type != QED_HW_ERR_FAN_FAIL) {
2657 		DP_INFO(edev,
2658 			"Avoid scheduling an error handling while another HW error is being handled\n");
2659 		return;
2660 	}
2661 
2662 	if (err_type >= QED_HW_ERR_LAST) {
2663 		DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2664 		clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2665 		return;
2666 	}
2667 
2668 	edev->last_err_type = err_type;
2669 	qede_set_hw_err_flags(edev, err_type);
2670 	qede_atomic_hw_err_handler(edev);
2671 	set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2672 	schedule_delayed_work(&edev->sp_task, 0);
2673 
2674 	DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2675 }
2676 
qede_is_txq_full(struct qede_dev * edev,struct qede_tx_queue * txq)2677 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2678 {
2679 	struct netdev_queue *netdev_txq;
2680 
2681 	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2682 	if (netif_xmit_stopped(netdev_txq))
2683 		return true;
2684 
2685 	return false;
2686 }
2687 
qede_get_generic_tlv_data(void * dev,struct qed_generic_tlvs * data)2688 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2689 {
2690 	struct qede_dev *edev = dev;
2691 	struct netdev_hw_addr *ha;
2692 	int i;
2693 
2694 	if (edev->ndev->features & NETIF_F_IP_CSUM)
2695 		data->feat_flags |= QED_TLV_IP_CSUM;
2696 	if (edev->ndev->features & NETIF_F_TSO)
2697 		data->feat_flags |= QED_TLV_LSO;
2698 
2699 	ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2700 	eth_zero_addr(data->mac[1]);
2701 	eth_zero_addr(data->mac[2]);
2702 	/* Copy the first two UC macs */
2703 	netif_addr_lock_bh(edev->ndev);
2704 	i = 1;
2705 	netdev_for_each_uc_addr(ha, edev->ndev) {
2706 		ether_addr_copy(data->mac[i++], ha->addr);
2707 		if (i == QED_TLV_MAC_COUNT)
2708 			break;
2709 	}
2710 
2711 	netif_addr_unlock_bh(edev->ndev);
2712 }
2713 
qede_get_eth_tlv_data(void * dev,void * data)2714 static void qede_get_eth_tlv_data(void *dev, void *data)
2715 {
2716 	struct qed_mfw_tlv_eth *etlv = data;
2717 	struct qede_dev *edev = dev;
2718 	struct qede_fastpath *fp;
2719 	int i;
2720 
2721 	etlv->lso_maxoff_size = 0XFFFF;
2722 	etlv->lso_maxoff_size_set = true;
2723 	etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2724 	etlv->lso_minseg_size_set = true;
2725 	etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2726 	etlv->prom_mode_set = true;
2727 	etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2728 	etlv->tx_descr_size_set = true;
2729 	etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2730 	etlv->rx_descr_size_set = true;
2731 	etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2732 	etlv->iov_offload_set = true;
2733 
2734 	/* Fill information regarding queues; Should be done under the qede
2735 	 * lock to guarantee those don't change beneath our feet.
2736 	 */
2737 	etlv->txqs_empty = true;
2738 	etlv->rxqs_empty = true;
2739 	etlv->num_txqs_full = 0;
2740 	etlv->num_rxqs_full = 0;
2741 
2742 	__qede_lock(edev);
2743 	for_each_queue(i) {
2744 		fp = &edev->fp_array[i];
2745 		if (fp->type & QEDE_FASTPATH_TX) {
2746 			struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2747 
2748 			if (txq->sw_tx_cons != txq->sw_tx_prod)
2749 				etlv->txqs_empty = false;
2750 			if (qede_is_txq_full(edev, txq))
2751 				etlv->num_txqs_full++;
2752 		}
2753 		if (fp->type & QEDE_FASTPATH_RX) {
2754 			if (qede_has_rx_work(fp->rxq))
2755 				etlv->rxqs_empty = false;
2756 
2757 			/* This one is a bit tricky; Firmware might stop
2758 			 * placing packets if ring is not yet full.
2759 			 * Give an approximation.
2760 			 */
2761 			if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2762 			    qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2763 			    RX_RING_SIZE - 100)
2764 				etlv->num_rxqs_full++;
2765 		}
2766 	}
2767 	__qede_unlock(edev);
2768 
2769 	etlv->txqs_empty_set = true;
2770 	etlv->rxqs_empty_set = true;
2771 	etlv->num_txqs_full_set = true;
2772 	etlv->num_rxqs_full_set = true;
2773 }
2774 
2775 /**
2776  * qede_io_error_detected - called when PCI error is detected
2777  * @pdev: Pointer to PCI device
2778  * @state: The current pci connection state
2779  *
2780  * This function is called after a PCI bus error affecting
2781  * this device has been detected.
2782  */
2783 static pci_ers_result_t
qede_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)2784 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2785 {
2786 	struct net_device *dev = pci_get_drvdata(pdev);
2787 	struct qede_dev *edev = netdev_priv(dev);
2788 
2789 	if (!edev)
2790 		return PCI_ERS_RESULT_NONE;
2791 
2792 	DP_NOTICE(edev, "IO error detected [%d]\n", state);
2793 
2794 	__qede_lock(edev);
2795 	if (edev->state == QEDE_STATE_RECOVERY) {
2796 		DP_NOTICE(edev, "Device already in the recovery state\n");
2797 		__qede_unlock(edev);
2798 		return PCI_ERS_RESULT_NONE;
2799 	}
2800 
2801 	/* PF handles the recovery of its VFs */
2802 	if (IS_VF(edev)) {
2803 		DP_VERBOSE(edev, QED_MSG_IOV,
2804 			   "VF recovery is handled by its PF\n");
2805 		__qede_unlock(edev);
2806 		return PCI_ERS_RESULT_RECOVERED;
2807 	}
2808 
2809 	/* Close OS Tx */
2810 	netif_tx_disable(edev->ndev);
2811 	netif_carrier_off(edev->ndev);
2812 
2813 	set_bit(QEDE_SP_AER, &edev->sp_flags);
2814 	schedule_delayed_work(&edev->sp_task, 0);
2815 
2816 	__qede_unlock(edev);
2817 
2818 	return PCI_ERS_RESULT_CAN_RECOVER;
2819 }
2820