• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*******************************************************************************
2 
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35 
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41 
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static const struct pci_device_id e1000_pci_tbl[] = {
50 	INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 	INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 	INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 	INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 	INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 	INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 	INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 	INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 	INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 	INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 	INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 	INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 	INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 	INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 	INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 	INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 	INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 	INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 	INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 	INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 	INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 	INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 	INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 	INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 	INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 	INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 	INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 	INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 	INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 	INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 	INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 	INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 	INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87 	/* required last entry */
88 	{0,}
89 };
90 
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92 
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                              struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                              struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                              struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                              struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110 
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 				    struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 			       struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 			       struct e1000_rx_ring *rx_ring,
143 			       int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 				     struct e1000_rx_ring *rx_ring,
146 				     int *work_done, int work_to_do);
e1000_alloc_dummy_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)147 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
148 					 struct e1000_rx_ring *rx_ring,
149 					 int cleaned_count)
150 {
151 }
152 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
153 				   struct e1000_rx_ring *rx_ring,
154 				   int cleaned_count);
155 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156 					 struct e1000_rx_ring *rx_ring,
157 					 int cleaned_count);
158 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
160 			   int cmd);
161 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163 static void e1000_tx_timeout(struct net_device *dev);
164 static void e1000_reset_task(struct work_struct *work);
165 static void e1000_smartspeed(struct e1000_adapter *adapter);
166 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
167                                        struct sk_buff *skb);
168 
169 static bool e1000_vlan_used(struct e1000_adapter *adapter);
170 static void e1000_vlan_mode(struct net_device *netdev,
171 			    netdev_features_t features);
172 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
173 				     bool filter_on);
174 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
175 				 __be16 proto, u16 vid);
176 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
177 				  __be16 proto, u16 vid);
178 static void e1000_restore_vlan(struct e1000_adapter *adapter);
179 
180 #ifdef CONFIG_PM
181 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
182 static int e1000_resume(struct pci_dev *pdev);
183 #endif
184 static void e1000_shutdown(struct pci_dev *pdev);
185 
186 #ifdef CONFIG_NET_POLL_CONTROLLER
187 /* for netdump / net console */
188 static void e1000_netpoll (struct net_device *netdev);
189 #endif
190 
191 #define COPYBREAK_DEFAULT 256
192 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
193 module_param(copybreak, uint, 0644);
194 MODULE_PARM_DESC(copybreak,
195 	"Maximum size of packet that is copied to a new buffer on receive");
196 
197 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
198                      pci_channel_state_t state);
199 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
200 static void e1000_io_resume(struct pci_dev *pdev);
201 
202 static const struct pci_error_handlers e1000_err_handler = {
203 	.error_detected = e1000_io_error_detected,
204 	.slot_reset = e1000_io_slot_reset,
205 	.resume = e1000_io_resume,
206 };
207 
208 static struct pci_driver e1000_driver = {
209 	.name     = e1000_driver_name,
210 	.id_table = e1000_pci_tbl,
211 	.probe    = e1000_probe,
212 	.remove   = e1000_remove,
213 #ifdef CONFIG_PM
214 	/* Power Management Hooks */
215 	.suspend  = e1000_suspend,
216 	.resume   = e1000_resume,
217 #endif
218 	.shutdown = e1000_shutdown,
219 	.err_handler = &e1000_err_handler
220 };
221 
222 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_VERSION);
226 
227 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
228 static int debug = -1;
229 module_param(debug, int, 0);
230 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
231 
232 /**
233  * e1000_get_hw_dev - return device
234  * used by hardware layer to print debugging information
235  *
236  **/
e1000_get_hw_dev(struct e1000_hw * hw)237 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
238 {
239 	struct e1000_adapter *adapter = hw->back;
240 	return adapter->netdev;
241 }
242 
243 /**
244  * e1000_init_module - Driver Registration Routine
245  *
246  * e1000_init_module is the first routine called when the driver is
247  * loaded. All it does is register with the PCI subsystem.
248  **/
e1000_init_module(void)249 static int __init e1000_init_module(void)
250 {
251 	int ret;
252 	pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
253 
254 	pr_info("%s\n", e1000_copyright);
255 
256 	ret = pci_register_driver(&e1000_driver);
257 	if (copybreak != COPYBREAK_DEFAULT) {
258 		if (copybreak == 0)
259 			pr_info("copybreak disabled\n");
260 		else
261 			pr_info("copybreak enabled for "
262 				   "packets <= %u bytes\n", copybreak);
263 	}
264 	return ret;
265 }
266 
267 module_init(e1000_init_module);
268 
269 /**
270  * e1000_exit_module - Driver Exit Cleanup Routine
271  *
272  * e1000_exit_module is called just before the driver is removed
273  * from memory.
274  **/
e1000_exit_module(void)275 static void __exit e1000_exit_module(void)
276 {
277 	pci_unregister_driver(&e1000_driver);
278 }
279 
280 module_exit(e1000_exit_module);
281 
e1000_request_irq(struct e1000_adapter * adapter)282 static int e1000_request_irq(struct e1000_adapter *adapter)
283 {
284 	struct net_device *netdev = adapter->netdev;
285 	irq_handler_t handler = e1000_intr;
286 	int irq_flags = IRQF_SHARED;
287 	int err;
288 
289 	err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
290 	                  netdev);
291 	if (err) {
292 		e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
293 	}
294 
295 	return err;
296 }
297 
e1000_free_irq(struct e1000_adapter * adapter)298 static void e1000_free_irq(struct e1000_adapter *adapter)
299 {
300 	struct net_device *netdev = adapter->netdev;
301 
302 	free_irq(adapter->pdev->irq, netdev);
303 }
304 
305 /**
306  * e1000_irq_disable - Mask off interrupt generation on the NIC
307  * @adapter: board private structure
308  **/
e1000_irq_disable(struct e1000_adapter * adapter)309 static void e1000_irq_disable(struct e1000_adapter *adapter)
310 {
311 	struct e1000_hw *hw = &adapter->hw;
312 
313 	ew32(IMC, ~0);
314 	E1000_WRITE_FLUSH();
315 	synchronize_irq(adapter->pdev->irq);
316 }
317 
318 /**
319  * e1000_irq_enable - Enable default interrupt generation settings
320  * @adapter: board private structure
321  **/
e1000_irq_enable(struct e1000_adapter * adapter)322 static void e1000_irq_enable(struct e1000_adapter *adapter)
323 {
324 	struct e1000_hw *hw = &adapter->hw;
325 
326 	ew32(IMS, IMS_ENABLE_MASK);
327 	E1000_WRITE_FLUSH();
328 }
329 
e1000_update_mng_vlan(struct e1000_adapter * adapter)330 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
331 {
332 	struct e1000_hw *hw = &adapter->hw;
333 	struct net_device *netdev = adapter->netdev;
334 	u16 vid = hw->mng_cookie.vlan_id;
335 	u16 old_vid = adapter->mng_vlan_id;
336 
337 	if (!e1000_vlan_used(adapter))
338 		return;
339 
340 	if (!test_bit(vid, adapter->active_vlans)) {
341 		if (hw->mng_cookie.status &
342 		    E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
343 			e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
344 			adapter->mng_vlan_id = vid;
345 		} else {
346 			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
347 		}
348 		if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
349 		    (vid != old_vid) &&
350 		    !test_bit(old_vid, adapter->active_vlans))
351 			e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
352 					       old_vid);
353 	} else {
354 		adapter->mng_vlan_id = vid;
355 	}
356 }
357 
e1000_init_manageability(struct e1000_adapter * adapter)358 static void e1000_init_manageability(struct e1000_adapter *adapter)
359 {
360 	struct e1000_hw *hw = &adapter->hw;
361 
362 	if (adapter->en_mng_pt) {
363 		u32 manc = er32(MANC);
364 
365 		/* disable hardware interception of ARP */
366 		manc &= ~(E1000_MANC_ARP_EN);
367 
368 		ew32(MANC, manc);
369 	}
370 }
371 
e1000_release_manageability(struct e1000_adapter * adapter)372 static void e1000_release_manageability(struct e1000_adapter *adapter)
373 {
374 	struct e1000_hw *hw = &adapter->hw;
375 
376 	if (adapter->en_mng_pt) {
377 		u32 manc = er32(MANC);
378 
379 		/* re-enable hardware interception of ARP */
380 		manc |= E1000_MANC_ARP_EN;
381 
382 		ew32(MANC, manc);
383 	}
384 }
385 
386 /**
387  * e1000_configure - configure the hardware for RX and TX
388  * @adapter = private board structure
389  **/
e1000_configure(struct e1000_adapter * adapter)390 static void e1000_configure(struct e1000_adapter *adapter)
391 {
392 	struct net_device *netdev = adapter->netdev;
393 	int i;
394 
395 	e1000_set_rx_mode(netdev);
396 
397 	e1000_restore_vlan(adapter);
398 	e1000_init_manageability(adapter);
399 
400 	e1000_configure_tx(adapter);
401 	e1000_setup_rctl(adapter);
402 	e1000_configure_rx(adapter);
403 	/* call E1000_DESC_UNUSED which always leaves
404 	 * at least 1 descriptor unused to make sure
405 	 * next_to_use != next_to_clean
406 	 */
407 	for (i = 0; i < adapter->num_rx_queues; i++) {
408 		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
409 		adapter->alloc_rx_buf(adapter, ring,
410 				      E1000_DESC_UNUSED(ring));
411 	}
412 }
413 
e1000_up(struct e1000_adapter * adapter)414 int e1000_up(struct e1000_adapter *adapter)
415 {
416 	struct e1000_hw *hw = &adapter->hw;
417 
418 	/* hardware has been reset, we need to reload some things */
419 	e1000_configure(adapter);
420 
421 	clear_bit(__E1000_DOWN, &adapter->flags);
422 
423 	napi_enable(&adapter->napi);
424 
425 	e1000_irq_enable(adapter);
426 
427 	netif_wake_queue(adapter->netdev);
428 
429 	/* fire a link change interrupt to start the watchdog */
430 	ew32(ICS, E1000_ICS_LSC);
431 	return 0;
432 }
433 
434 /**
435  * e1000_power_up_phy - restore link in case the phy was powered down
436  * @adapter: address of board private structure
437  *
438  * The phy may be powered down to save power and turn off link when the
439  * driver is unloaded and wake on lan is not enabled (among others)
440  * *** this routine MUST be followed by a call to e1000_reset ***
441  **/
e1000_power_up_phy(struct e1000_adapter * adapter)442 void e1000_power_up_phy(struct e1000_adapter *adapter)
443 {
444 	struct e1000_hw *hw = &adapter->hw;
445 	u16 mii_reg = 0;
446 
447 	/* Just clear the power down bit to wake the phy back up */
448 	if (hw->media_type == e1000_media_type_copper) {
449 		/* according to the manual, the phy will retain its
450 		 * settings across a power-down/up cycle
451 		 */
452 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
453 		mii_reg &= ~MII_CR_POWER_DOWN;
454 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
455 	}
456 }
457 
e1000_power_down_phy(struct e1000_adapter * adapter)458 static void e1000_power_down_phy(struct e1000_adapter *adapter)
459 {
460 	struct e1000_hw *hw = &adapter->hw;
461 
462 	/* Power down the PHY so no link is implied when interface is down *
463 	 * The PHY cannot be powered down if any of the following is true *
464 	 * (a) WoL is enabled
465 	 * (b) AMT is active
466 	 * (c) SoL/IDER session is active
467 	 */
468 	if (!adapter->wol && hw->mac_type >= e1000_82540 &&
469 	   hw->media_type == e1000_media_type_copper) {
470 		u16 mii_reg = 0;
471 
472 		switch (hw->mac_type) {
473 		case e1000_82540:
474 		case e1000_82545:
475 		case e1000_82545_rev_3:
476 		case e1000_82546:
477 		case e1000_ce4100:
478 		case e1000_82546_rev_3:
479 		case e1000_82541:
480 		case e1000_82541_rev_2:
481 		case e1000_82547:
482 		case e1000_82547_rev_2:
483 			if (er32(MANC) & E1000_MANC_SMBUS_EN)
484 				goto out;
485 			break;
486 		default:
487 			goto out;
488 		}
489 		e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
490 		mii_reg |= MII_CR_POWER_DOWN;
491 		e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
492 		msleep(1);
493 	}
494 out:
495 	return;
496 }
497 
e1000_down_and_stop(struct e1000_adapter * adapter)498 static void e1000_down_and_stop(struct e1000_adapter *adapter)
499 {
500 	set_bit(__E1000_DOWN, &adapter->flags);
501 
502 	cancel_delayed_work_sync(&adapter->watchdog_task);
503 
504 	/*
505 	 * Since the watchdog task can reschedule other tasks, we should cancel
506 	 * it first, otherwise we can run into the situation when a work is
507 	 * still running after the adapter has been turned down.
508 	 */
509 
510 	cancel_delayed_work_sync(&adapter->phy_info_task);
511 	cancel_delayed_work_sync(&adapter->fifo_stall_task);
512 
513 	/* Only kill reset task if adapter is not resetting */
514 	if (!test_bit(__E1000_RESETTING, &adapter->flags))
515 		cancel_work_sync(&adapter->reset_task);
516 }
517 
e1000_down(struct e1000_adapter * adapter)518 void e1000_down(struct e1000_adapter *adapter)
519 {
520 	struct e1000_hw *hw = &adapter->hw;
521 	struct net_device *netdev = adapter->netdev;
522 	u32 rctl, tctl;
523 
524 	/* disable receives in the hardware */
525 	rctl = er32(RCTL);
526 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
527 	/* flush and sleep below */
528 
529 	netif_tx_disable(netdev);
530 
531 	/* disable transmits in the hardware */
532 	tctl = er32(TCTL);
533 	tctl &= ~E1000_TCTL_EN;
534 	ew32(TCTL, tctl);
535 	/* flush both disables and wait for them to finish */
536 	E1000_WRITE_FLUSH();
537 	msleep(10);
538 
539 	/* Set the carrier off after transmits have been disabled in the
540 	 * hardware, to avoid race conditions with e1000_watchdog() (which
541 	 * may be running concurrently to us, checking for the carrier
542 	 * bit to decide whether it should enable transmits again). Such
543 	 * a race condition would result into transmission being disabled
544 	 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
545 	 */
546 	netif_carrier_off(netdev);
547 
548 	napi_disable(&adapter->napi);
549 
550 	e1000_irq_disable(adapter);
551 
552 	/* Setting DOWN must be after irq_disable to prevent
553 	 * a screaming interrupt.  Setting DOWN also prevents
554 	 * tasks from rescheduling.
555 	 */
556 	e1000_down_and_stop(adapter);
557 
558 	adapter->link_speed = 0;
559 	adapter->link_duplex = 0;
560 
561 	e1000_reset(adapter);
562 	e1000_clean_all_tx_rings(adapter);
563 	e1000_clean_all_rx_rings(adapter);
564 }
565 
e1000_reinit_locked(struct e1000_adapter * adapter)566 void e1000_reinit_locked(struct e1000_adapter *adapter)
567 {
568 	WARN_ON(in_interrupt());
569 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
570 		msleep(1);
571 
572 	/* only run the task if not already down */
573 	if (!test_bit(__E1000_DOWN, &adapter->flags)) {
574 		e1000_down(adapter);
575 		e1000_up(adapter);
576 	}
577 
578 	clear_bit(__E1000_RESETTING, &adapter->flags);
579 }
580 
e1000_reset(struct e1000_adapter * adapter)581 void e1000_reset(struct e1000_adapter *adapter)
582 {
583 	struct e1000_hw *hw = &adapter->hw;
584 	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
585 	bool legacy_pba_adjust = false;
586 	u16 hwm;
587 
588 	/* Repartition Pba for greater than 9k mtu
589 	 * To take effect CTRL.RST is required.
590 	 */
591 
592 	switch (hw->mac_type) {
593 	case e1000_82542_rev2_0:
594 	case e1000_82542_rev2_1:
595 	case e1000_82543:
596 	case e1000_82544:
597 	case e1000_82540:
598 	case e1000_82541:
599 	case e1000_82541_rev_2:
600 		legacy_pba_adjust = true;
601 		pba = E1000_PBA_48K;
602 		break;
603 	case e1000_82545:
604 	case e1000_82545_rev_3:
605 	case e1000_82546:
606 	case e1000_ce4100:
607 	case e1000_82546_rev_3:
608 		pba = E1000_PBA_48K;
609 		break;
610 	case e1000_82547:
611 	case e1000_82547_rev_2:
612 		legacy_pba_adjust = true;
613 		pba = E1000_PBA_30K;
614 		break;
615 	case e1000_undefined:
616 	case e1000_num_macs:
617 		break;
618 	}
619 
620 	if (legacy_pba_adjust) {
621 		if (hw->max_frame_size > E1000_RXBUFFER_8192)
622 			pba -= 8; /* allocate more FIFO for Tx */
623 
624 		if (hw->mac_type == e1000_82547) {
625 			adapter->tx_fifo_head = 0;
626 			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
627 			adapter->tx_fifo_size =
628 				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
629 			atomic_set(&adapter->tx_fifo_stall, 0);
630 		}
631 	} else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
632 		/* adjust PBA for jumbo frames */
633 		ew32(PBA, pba);
634 
635 		/* To maintain wire speed transmits, the Tx FIFO should be
636 		 * large enough to accommodate two full transmit packets,
637 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
638 		 * the Rx FIFO should be large enough to accommodate at least
639 		 * one full receive packet and is similarly rounded up and
640 		 * expressed in KB.
641 		 */
642 		pba = er32(PBA);
643 		/* upper 16 bits has Tx packet buffer allocation size in KB */
644 		tx_space = pba >> 16;
645 		/* lower 16 bits has Rx packet buffer allocation size in KB */
646 		pba &= 0xffff;
647 		/* the Tx fifo also stores 16 bytes of information about the Tx
648 		 * but don't include ethernet FCS because hardware appends it
649 		 */
650 		min_tx_space = (hw->max_frame_size +
651 		                sizeof(struct e1000_tx_desc) -
652 		                ETH_FCS_LEN) * 2;
653 		min_tx_space = ALIGN(min_tx_space, 1024);
654 		min_tx_space >>= 10;
655 		/* software strips receive CRC, so leave room for it */
656 		min_rx_space = hw->max_frame_size;
657 		min_rx_space = ALIGN(min_rx_space, 1024);
658 		min_rx_space >>= 10;
659 
660 		/* If current Tx allocation is less than the min Tx FIFO size,
661 		 * and the min Tx FIFO size is less than the current Rx FIFO
662 		 * allocation, take space away from current Rx allocation
663 		 */
664 		if (tx_space < min_tx_space &&
665 		    ((min_tx_space - tx_space) < pba)) {
666 			pba = pba - (min_tx_space - tx_space);
667 
668 			/* PCI/PCIx hardware has PBA alignment constraints */
669 			switch (hw->mac_type) {
670 			case e1000_82545 ... e1000_82546_rev_3:
671 				pba &= ~(E1000_PBA_8K - 1);
672 				break;
673 			default:
674 				break;
675 			}
676 
677 			/* if short on Rx space, Rx wins and must trump Tx
678 			 * adjustment or use Early Receive if available
679 			 */
680 			if (pba < min_rx_space)
681 				pba = min_rx_space;
682 		}
683 	}
684 
685 	ew32(PBA, pba);
686 
687 	/* flow control settings:
688 	 * The high water mark must be low enough to fit one full frame
689 	 * (or the size used for early receive) above it in the Rx FIFO.
690 	 * Set it to the lower of:
691 	 * - 90% of the Rx FIFO size, and
692 	 * - the full Rx FIFO size minus the early receive size (for parts
693 	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
694 	 * - the full Rx FIFO size minus one full frame
695 	 */
696 	hwm = min(((pba << 10) * 9 / 10),
697 		  ((pba << 10) - hw->max_frame_size));
698 
699 	hw->fc_high_water = hwm & 0xFFF8;	/* 8-byte granularity */
700 	hw->fc_low_water = hw->fc_high_water - 8;
701 	hw->fc_pause_time = E1000_FC_PAUSE_TIME;
702 	hw->fc_send_xon = 1;
703 	hw->fc = hw->original_fc;
704 
705 	/* Allow time for pending master requests to run */
706 	e1000_reset_hw(hw);
707 	if (hw->mac_type >= e1000_82544)
708 		ew32(WUC, 0);
709 
710 	if (e1000_init_hw(hw))
711 		e_dev_err("Hardware Error\n");
712 	e1000_update_mng_vlan(adapter);
713 
714 	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
715 	if (hw->mac_type >= e1000_82544 &&
716 	    hw->autoneg == 1 &&
717 	    hw->autoneg_advertised == ADVERTISE_1000_FULL) {
718 		u32 ctrl = er32(CTRL);
719 		/* clear phy power management bit if we are in gig only mode,
720 		 * which if enabled will attempt negotiation to 100Mb, which
721 		 * can cause a loss of link at power off or driver unload
722 		 */
723 		ctrl &= ~E1000_CTRL_SWDPIN3;
724 		ew32(CTRL, ctrl);
725 	}
726 
727 	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
728 	ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
729 
730 	e1000_reset_adaptive(hw);
731 	e1000_phy_get_info(hw, &adapter->phy_info);
732 
733 	e1000_release_manageability(adapter);
734 }
735 
736 /* Dump the eeprom for users having checksum issues */
e1000_dump_eeprom(struct e1000_adapter * adapter)737 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
738 {
739 	struct net_device *netdev = adapter->netdev;
740 	struct ethtool_eeprom eeprom;
741 	const struct ethtool_ops *ops = netdev->ethtool_ops;
742 	u8 *data;
743 	int i;
744 	u16 csum_old, csum_new = 0;
745 
746 	eeprom.len = ops->get_eeprom_len(netdev);
747 	eeprom.offset = 0;
748 
749 	data = kmalloc(eeprom.len, GFP_KERNEL);
750 	if (!data)
751 		return;
752 
753 	ops->get_eeprom(netdev, &eeprom, data);
754 
755 	csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
756 		   (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
757 	for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
758 		csum_new += data[i] + (data[i + 1] << 8);
759 	csum_new = EEPROM_SUM - csum_new;
760 
761 	pr_err("/*********************/\n");
762 	pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
763 	pr_err("Calculated              : 0x%04x\n", csum_new);
764 
765 	pr_err("Offset    Values\n");
766 	pr_err("========  ======\n");
767 	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
768 
769 	pr_err("Include this output when contacting your support provider.\n");
770 	pr_err("This is not a software error! Something bad happened to\n");
771 	pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
772 	pr_err("result in further problems, possibly loss of data,\n");
773 	pr_err("corruption or system hangs!\n");
774 	pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
775 	pr_err("which is invalid and requires you to set the proper MAC\n");
776 	pr_err("address manually before continuing to enable this network\n");
777 	pr_err("device. Please inspect the EEPROM dump and report the\n");
778 	pr_err("issue to your hardware vendor or Intel Customer Support.\n");
779 	pr_err("/*********************/\n");
780 
781 	kfree(data);
782 }
783 
784 /**
785  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
786  * @pdev: PCI device information struct
787  *
788  * Return true if an adapter needs ioport resources
789  **/
e1000_is_need_ioport(struct pci_dev * pdev)790 static int e1000_is_need_ioport(struct pci_dev *pdev)
791 {
792 	switch (pdev->device) {
793 	case E1000_DEV_ID_82540EM:
794 	case E1000_DEV_ID_82540EM_LOM:
795 	case E1000_DEV_ID_82540EP:
796 	case E1000_DEV_ID_82540EP_LOM:
797 	case E1000_DEV_ID_82540EP_LP:
798 	case E1000_DEV_ID_82541EI:
799 	case E1000_DEV_ID_82541EI_MOBILE:
800 	case E1000_DEV_ID_82541ER:
801 	case E1000_DEV_ID_82541ER_LOM:
802 	case E1000_DEV_ID_82541GI:
803 	case E1000_DEV_ID_82541GI_LF:
804 	case E1000_DEV_ID_82541GI_MOBILE:
805 	case E1000_DEV_ID_82544EI_COPPER:
806 	case E1000_DEV_ID_82544EI_FIBER:
807 	case E1000_DEV_ID_82544GC_COPPER:
808 	case E1000_DEV_ID_82544GC_LOM:
809 	case E1000_DEV_ID_82545EM_COPPER:
810 	case E1000_DEV_ID_82545EM_FIBER:
811 	case E1000_DEV_ID_82546EB_COPPER:
812 	case E1000_DEV_ID_82546EB_FIBER:
813 	case E1000_DEV_ID_82546EB_QUAD_COPPER:
814 		return true;
815 	default:
816 		return false;
817 	}
818 }
819 
e1000_fix_features(struct net_device * netdev,netdev_features_t features)820 static netdev_features_t e1000_fix_features(struct net_device *netdev,
821 	netdev_features_t features)
822 {
823 	/* Since there is no support for separate Rx/Tx vlan accel
824 	 * enable/disable make sure Tx flag is always in same state as Rx.
825 	 */
826 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
827 		features |= NETIF_F_HW_VLAN_CTAG_TX;
828 	else
829 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
830 
831 	return features;
832 }
833 
e1000_set_features(struct net_device * netdev,netdev_features_t features)834 static int e1000_set_features(struct net_device *netdev,
835 	netdev_features_t features)
836 {
837 	struct e1000_adapter *adapter = netdev_priv(netdev);
838 	netdev_features_t changed = features ^ netdev->features;
839 
840 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
841 		e1000_vlan_mode(netdev, features);
842 
843 	if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
844 		return 0;
845 
846 	netdev->features = features;
847 	adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
848 
849 	if (netif_running(netdev))
850 		e1000_reinit_locked(adapter);
851 	else
852 		e1000_reset(adapter);
853 
854 	return 0;
855 }
856 
857 static const struct net_device_ops e1000_netdev_ops = {
858 	.ndo_open		= e1000_open,
859 	.ndo_stop		= e1000_close,
860 	.ndo_start_xmit		= e1000_xmit_frame,
861 	.ndo_get_stats		= e1000_get_stats,
862 	.ndo_set_rx_mode	= e1000_set_rx_mode,
863 	.ndo_set_mac_address	= e1000_set_mac,
864 	.ndo_tx_timeout		= e1000_tx_timeout,
865 	.ndo_change_mtu		= e1000_change_mtu,
866 	.ndo_do_ioctl		= e1000_ioctl,
867 	.ndo_validate_addr	= eth_validate_addr,
868 	.ndo_vlan_rx_add_vid	= e1000_vlan_rx_add_vid,
869 	.ndo_vlan_rx_kill_vid	= e1000_vlan_rx_kill_vid,
870 #ifdef CONFIG_NET_POLL_CONTROLLER
871 	.ndo_poll_controller	= e1000_netpoll,
872 #endif
873 	.ndo_fix_features	= e1000_fix_features,
874 	.ndo_set_features	= e1000_set_features,
875 };
876 
877 /**
878  * e1000_init_hw_struct - initialize members of hw struct
879  * @adapter: board private struct
880  * @hw: structure used by e1000_hw.c
881  *
882  * Factors out initialization of the e1000_hw struct to its own function
883  * that can be called very early at init (just after struct allocation).
884  * Fields are initialized based on PCI device information and
885  * OS network device settings (MTU size).
886  * Returns negative error codes if MAC type setup fails.
887  */
e1000_init_hw_struct(struct e1000_adapter * adapter,struct e1000_hw * hw)888 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
889 				struct e1000_hw *hw)
890 {
891 	struct pci_dev *pdev = adapter->pdev;
892 
893 	/* PCI config space info */
894 	hw->vendor_id = pdev->vendor;
895 	hw->device_id = pdev->device;
896 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
897 	hw->subsystem_id = pdev->subsystem_device;
898 	hw->revision_id = pdev->revision;
899 
900 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
901 
902 	hw->max_frame_size = adapter->netdev->mtu +
903 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
904 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
905 
906 	/* identify the MAC */
907 	if (e1000_set_mac_type(hw)) {
908 		e_err(probe, "Unknown MAC Type\n");
909 		return -EIO;
910 	}
911 
912 	switch (hw->mac_type) {
913 	default:
914 		break;
915 	case e1000_82541:
916 	case e1000_82547:
917 	case e1000_82541_rev_2:
918 	case e1000_82547_rev_2:
919 		hw->phy_init_script = 1;
920 		break;
921 	}
922 
923 	e1000_set_media_type(hw);
924 	e1000_get_bus_info(hw);
925 
926 	hw->wait_autoneg_complete = false;
927 	hw->tbi_compatibility_en = true;
928 	hw->adaptive_ifs = true;
929 
930 	/* Copper options */
931 
932 	if (hw->media_type == e1000_media_type_copper) {
933 		hw->mdix = AUTO_ALL_MODES;
934 		hw->disable_polarity_correction = false;
935 		hw->master_slave = E1000_MASTER_SLAVE;
936 	}
937 
938 	return 0;
939 }
940 
941 /**
942  * e1000_probe - Device Initialization Routine
943  * @pdev: PCI device information struct
944  * @ent: entry in e1000_pci_tbl
945  *
946  * Returns 0 on success, negative on failure
947  *
948  * e1000_probe initializes an adapter identified by a pci_dev structure.
949  * The OS initialization, configuring of the adapter private structure,
950  * and a hardware reset occur.
951  **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)952 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
953 {
954 	struct net_device *netdev;
955 	struct e1000_adapter *adapter = NULL;
956 	struct e1000_hw *hw;
957 
958 	static int cards_found = 0;
959 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
960 	int i, err, pci_using_dac;
961 	u16 eeprom_data = 0;
962 	u16 tmp = 0;
963 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
964 	int bars, need_ioport;
965 	bool disable_dev = false;
966 
967 	/* do not allocate ioport bars when not needed */
968 	need_ioport = e1000_is_need_ioport(pdev);
969 	if (need_ioport) {
970 		bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
971 		err = pci_enable_device(pdev);
972 	} else {
973 		bars = pci_select_bars(pdev, IORESOURCE_MEM);
974 		err = pci_enable_device_mem(pdev);
975 	}
976 	if (err)
977 		return err;
978 
979 	err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
980 	if (err)
981 		goto err_pci_reg;
982 
983 	pci_set_master(pdev);
984 	err = pci_save_state(pdev);
985 	if (err)
986 		goto err_alloc_etherdev;
987 
988 	err = -ENOMEM;
989 	netdev = alloc_etherdev(sizeof(struct e1000_adapter));
990 	if (!netdev)
991 		goto err_alloc_etherdev;
992 
993 	SET_NETDEV_DEV(netdev, &pdev->dev);
994 
995 	pci_set_drvdata(pdev, netdev);
996 	adapter = netdev_priv(netdev);
997 	adapter->netdev = netdev;
998 	adapter->pdev = pdev;
999 	adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1000 	adapter->bars = bars;
1001 	adapter->need_ioport = need_ioport;
1002 
1003 	hw = &adapter->hw;
1004 	hw->back = adapter;
1005 
1006 	err = -EIO;
1007 	hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1008 	if (!hw->hw_addr)
1009 		goto err_ioremap;
1010 
1011 	if (adapter->need_ioport) {
1012 		for (i = BAR_1; i <= BAR_5; i++) {
1013 			if (pci_resource_len(pdev, i) == 0)
1014 				continue;
1015 			if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1016 				hw->io_base = pci_resource_start(pdev, i);
1017 				break;
1018 			}
1019 		}
1020 	}
1021 
1022 	/* make ready for any if (hw->...) below */
1023 	err = e1000_init_hw_struct(adapter, hw);
1024 	if (err)
1025 		goto err_sw_init;
1026 
1027 	/* there is a workaround being applied below that limits
1028 	 * 64-bit DMA addresses to 64-bit hardware.  There are some
1029 	 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1030 	 */
1031 	pci_using_dac = 0;
1032 	if ((hw->bus_type == e1000_bus_type_pcix) &&
1033 	    !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1034 		pci_using_dac = 1;
1035 	} else {
1036 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1037 		if (err) {
1038 			pr_err("No usable DMA config, aborting\n");
1039 			goto err_dma;
1040 		}
1041 	}
1042 
1043 	netdev->netdev_ops = &e1000_netdev_ops;
1044 	e1000_set_ethtool_ops(netdev);
1045 	netdev->watchdog_timeo = 5 * HZ;
1046 	netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1047 
1048 	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1049 
1050 	adapter->bd_number = cards_found;
1051 
1052 	/* setup the private structure */
1053 
1054 	err = e1000_sw_init(adapter);
1055 	if (err)
1056 		goto err_sw_init;
1057 
1058 	err = -EIO;
1059 	if (hw->mac_type == e1000_ce4100) {
1060 		hw->ce4100_gbe_mdio_base_virt =
1061 					ioremap(pci_resource_start(pdev, BAR_1),
1062 		                                pci_resource_len(pdev, BAR_1));
1063 
1064 		if (!hw->ce4100_gbe_mdio_base_virt)
1065 			goto err_mdio_ioremap;
1066 	}
1067 
1068 	if (hw->mac_type >= e1000_82543) {
1069 		netdev->hw_features = NETIF_F_SG |
1070 				   NETIF_F_HW_CSUM |
1071 				   NETIF_F_HW_VLAN_CTAG_RX;
1072 		netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1073 				   NETIF_F_HW_VLAN_CTAG_FILTER;
1074 	}
1075 
1076 	if ((hw->mac_type >= e1000_82544) &&
1077 	   (hw->mac_type != e1000_82547))
1078 		netdev->hw_features |= NETIF_F_TSO;
1079 
1080 	netdev->priv_flags |= IFF_SUPP_NOFCS;
1081 
1082 	netdev->features |= netdev->hw_features;
1083 	netdev->hw_features |= (NETIF_F_RXCSUM |
1084 				NETIF_F_RXALL |
1085 				NETIF_F_RXFCS);
1086 
1087 	if (pci_using_dac) {
1088 		netdev->features |= NETIF_F_HIGHDMA;
1089 		netdev->vlan_features |= NETIF_F_HIGHDMA;
1090 	}
1091 
1092 	netdev->vlan_features |= (NETIF_F_TSO |
1093 				  NETIF_F_HW_CSUM |
1094 				  NETIF_F_SG);
1095 
1096 	/* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1097 	if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1098 	    hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1099 		netdev->priv_flags |= IFF_UNICAST_FLT;
1100 
1101 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1102 
1103 	/* initialize eeprom parameters */
1104 	if (e1000_init_eeprom_params(hw)) {
1105 		e_err(probe, "EEPROM initialization failed\n");
1106 		goto err_eeprom;
1107 	}
1108 
1109 	/* before reading the EEPROM, reset the controller to
1110 	 * put the device in a known good starting state
1111 	 */
1112 
1113 	e1000_reset_hw(hw);
1114 
1115 	/* make sure the EEPROM is good */
1116 	if (e1000_validate_eeprom_checksum(hw) < 0) {
1117 		e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1118 		e1000_dump_eeprom(adapter);
1119 		/* set MAC address to all zeroes to invalidate and temporary
1120 		 * disable this device for the user. This blocks regular
1121 		 * traffic while still permitting ethtool ioctls from reaching
1122 		 * the hardware as well as allowing the user to run the
1123 		 * interface after manually setting a hw addr using
1124 		 * `ip set address`
1125 		 */
1126 		memset(hw->mac_addr, 0, netdev->addr_len);
1127 	} else {
1128 		/* copy the MAC address out of the EEPROM */
1129 		if (e1000_read_mac_addr(hw))
1130 			e_err(probe, "EEPROM Read Error\n");
1131 	}
1132 	/* don't block initialization here due to bad MAC address */
1133 	memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1134 
1135 	if (!is_valid_ether_addr(netdev->dev_addr))
1136 		e_err(probe, "Invalid MAC Address\n");
1137 
1138 
1139 	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1140 	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1141 			  e1000_82547_tx_fifo_stall_task);
1142 	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1143 	INIT_WORK(&adapter->reset_task, e1000_reset_task);
1144 
1145 	e1000_check_options(adapter);
1146 
1147 	/* Initial Wake on LAN setting
1148 	 * If APM wake is enabled in the EEPROM,
1149 	 * enable the ACPI Magic Packet filter
1150 	 */
1151 
1152 	switch (hw->mac_type) {
1153 	case e1000_82542_rev2_0:
1154 	case e1000_82542_rev2_1:
1155 	case e1000_82543:
1156 		break;
1157 	case e1000_82544:
1158 		e1000_read_eeprom(hw,
1159 			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1160 		eeprom_apme_mask = E1000_EEPROM_82544_APM;
1161 		break;
1162 	case e1000_82546:
1163 	case e1000_82546_rev_3:
1164 		if (er32(STATUS) & E1000_STATUS_FUNC_1){
1165 			e1000_read_eeprom(hw,
1166 				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1167 			break;
1168 		}
1169 		/* Fall Through */
1170 	default:
1171 		e1000_read_eeprom(hw,
1172 			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1173 		break;
1174 	}
1175 	if (eeprom_data & eeprom_apme_mask)
1176 		adapter->eeprom_wol |= E1000_WUFC_MAG;
1177 
1178 	/* now that we have the eeprom settings, apply the special cases
1179 	 * where the eeprom may be wrong or the board simply won't support
1180 	 * wake on lan on a particular port
1181 	 */
1182 	switch (pdev->device) {
1183 	case E1000_DEV_ID_82546GB_PCIE:
1184 		adapter->eeprom_wol = 0;
1185 		break;
1186 	case E1000_DEV_ID_82546EB_FIBER:
1187 	case E1000_DEV_ID_82546GB_FIBER:
1188 		/* Wake events only supported on port A for dual fiber
1189 		 * regardless of eeprom setting
1190 		 */
1191 		if (er32(STATUS) & E1000_STATUS_FUNC_1)
1192 			adapter->eeprom_wol = 0;
1193 		break;
1194 	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1195 		/* if quad port adapter, disable WoL on all but port A */
1196 		if (global_quad_port_a != 0)
1197 			adapter->eeprom_wol = 0;
1198 		else
1199 			adapter->quad_port_a = true;
1200 		/* Reset for multiple quad port adapters */
1201 		if (++global_quad_port_a == 4)
1202 			global_quad_port_a = 0;
1203 		break;
1204 	}
1205 
1206 	/* initialize the wol settings based on the eeprom settings */
1207 	adapter->wol = adapter->eeprom_wol;
1208 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1209 
1210 	/* Auto detect PHY address */
1211 	if (hw->mac_type == e1000_ce4100) {
1212 		for (i = 0; i < 32; i++) {
1213 			hw->phy_addr = i;
1214 			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1215 			if (tmp == 0 || tmp == 0xFF) {
1216 				if (i == 31)
1217 					goto err_eeprom;
1218 				continue;
1219 			} else
1220 				break;
1221 		}
1222 	}
1223 
1224 	/* reset the hardware with the new settings */
1225 	e1000_reset(adapter);
1226 
1227 	strcpy(netdev->name, "eth%d");
1228 	err = register_netdev(netdev);
1229 	if (err)
1230 		goto err_register;
1231 
1232 	e1000_vlan_filter_on_off(adapter, false);
1233 
1234 	/* print bus type/speed/width info */
1235 	e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1236 	       ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1237 	       ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1238 		(hw->bus_speed == e1000_bus_speed_120) ? 120 :
1239 		(hw->bus_speed == e1000_bus_speed_100) ? 100 :
1240 		(hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1241 	       ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1242 	       netdev->dev_addr);
1243 
1244 	/* carrier off reporting is important to ethtool even BEFORE open */
1245 	netif_carrier_off(netdev);
1246 
1247 	e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1248 
1249 	cards_found++;
1250 	return 0;
1251 
1252 err_register:
1253 err_eeprom:
1254 	e1000_phy_hw_reset(hw);
1255 
1256 	if (hw->flash_address)
1257 		iounmap(hw->flash_address);
1258 	kfree(adapter->tx_ring);
1259 	kfree(adapter->rx_ring);
1260 err_dma:
1261 err_sw_init:
1262 err_mdio_ioremap:
1263 	iounmap(hw->ce4100_gbe_mdio_base_virt);
1264 	iounmap(hw->hw_addr);
1265 err_ioremap:
1266 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1267 	free_netdev(netdev);
1268 err_alloc_etherdev:
1269 	pci_release_selected_regions(pdev, bars);
1270 err_pci_reg:
1271 	if (!adapter || disable_dev)
1272 		pci_disable_device(pdev);
1273 	return err;
1274 }
1275 
1276 /**
1277  * e1000_remove - Device Removal Routine
1278  * @pdev: PCI device information struct
1279  *
1280  * e1000_remove is called by the PCI subsystem to alert the driver
1281  * that it should release a PCI device.  The could be caused by a
1282  * Hot-Plug event, or because the driver is going to be removed from
1283  * memory.
1284  **/
e1000_remove(struct pci_dev * pdev)1285 static void e1000_remove(struct pci_dev *pdev)
1286 {
1287 	struct net_device *netdev = pci_get_drvdata(pdev);
1288 	struct e1000_adapter *adapter = netdev_priv(netdev);
1289 	struct e1000_hw *hw = &adapter->hw;
1290 	bool disable_dev;
1291 
1292 	e1000_down_and_stop(adapter);
1293 	e1000_release_manageability(adapter);
1294 
1295 	unregister_netdev(netdev);
1296 
1297 	e1000_phy_hw_reset(hw);
1298 
1299 	kfree(adapter->tx_ring);
1300 	kfree(adapter->rx_ring);
1301 
1302 	if (hw->mac_type == e1000_ce4100)
1303 		iounmap(hw->ce4100_gbe_mdio_base_virt);
1304 	iounmap(hw->hw_addr);
1305 	if (hw->flash_address)
1306 		iounmap(hw->flash_address);
1307 	pci_release_selected_regions(pdev, adapter->bars);
1308 
1309 	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1310 	free_netdev(netdev);
1311 
1312 	if (disable_dev)
1313 		pci_disable_device(pdev);
1314 }
1315 
1316 /**
1317  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1318  * @adapter: board private structure to initialize
1319  *
1320  * e1000_sw_init initializes the Adapter private data structure.
1321  * e1000_init_hw_struct MUST be called before this function
1322  **/
e1000_sw_init(struct e1000_adapter * adapter)1323 static int e1000_sw_init(struct e1000_adapter *adapter)
1324 {
1325 	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1326 
1327 	adapter->num_tx_queues = 1;
1328 	adapter->num_rx_queues = 1;
1329 
1330 	if (e1000_alloc_queues(adapter)) {
1331 		e_err(probe, "Unable to allocate memory for queues\n");
1332 		return -ENOMEM;
1333 	}
1334 
1335 	/* Explicitly disable IRQ since the NIC can be in any state. */
1336 	e1000_irq_disable(adapter);
1337 
1338 	spin_lock_init(&adapter->stats_lock);
1339 
1340 	set_bit(__E1000_DOWN, &adapter->flags);
1341 
1342 	return 0;
1343 }
1344 
1345 /**
1346  * e1000_alloc_queues - Allocate memory for all rings
1347  * @adapter: board private structure to initialize
1348  *
1349  * We allocate one ring per queue at run-time since we don't know the
1350  * number of queues at compile-time.
1351  **/
e1000_alloc_queues(struct e1000_adapter * adapter)1352 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1353 {
1354 	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1355 	                           sizeof(struct e1000_tx_ring), GFP_KERNEL);
1356 	if (!adapter->tx_ring)
1357 		return -ENOMEM;
1358 
1359 	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1360 	                           sizeof(struct e1000_rx_ring), GFP_KERNEL);
1361 	if (!adapter->rx_ring) {
1362 		kfree(adapter->tx_ring);
1363 		return -ENOMEM;
1364 	}
1365 
1366 	return E1000_SUCCESS;
1367 }
1368 
1369 /**
1370  * e1000_open - Called when a network interface is made active
1371  * @netdev: network interface device structure
1372  *
1373  * Returns 0 on success, negative value on failure
1374  *
1375  * The open entry point is called when a network interface is made
1376  * active by the system (IFF_UP).  At this point all resources needed
1377  * for transmit and receive operations are allocated, the interrupt
1378  * handler is registered with the OS, the watchdog task is started,
1379  * and the stack is notified that the interface is ready.
1380  **/
e1000_open(struct net_device * netdev)1381 static int e1000_open(struct net_device *netdev)
1382 {
1383 	struct e1000_adapter *adapter = netdev_priv(netdev);
1384 	struct e1000_hw *hw = &adapter->hw;
1385 	int err;
1386 
1387 	/* disallow open during test */
1388 	if (test_bit(__E1000_TESTING, &adapter->flags))
1389 		return -EBUSY;
1390 
1391 	netif_carrier_off(netdev);
1392 
1393 	/* allocate transmit descriptors */
1394 	err = e1000_setup_all_tx_resources(adapter);
1395 	if (err)
1396 		goto err_setup_tx;
1397 
1398 	/* allocate receive descriptors */
1399 	err = e1000_setup_all_rx_resources(adapter);
1400 	if (err)
1401 		goto err_setup_rx;
1402 
1403 	e1000_power_up_phy(adapter);
1404 
1405 	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1406 	if ((hw->mng_cookie.status &
1407 			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1408 		e1000_update_mng_vlan(adapter);
1409 	}
1410 
1411 	/* before we allocate an interrupt, we must be ready to handle it.
1412 	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1413 	 * as soon as we call pci_request_irq, so we have to setup our
1414 	 * clean_rx handler before we do so.
1415 	 */
1416 	e1000_configure(adapter);
1417 
1418 	err = e1000_request_irq(adapter);
1419 	if (err)
1420 		goto err_req_irq;
1421 
1422 	/* From here on the code is the same as e1000_up() */
1423 	clear_bit(__E1000_DOWN, &adapter->flags);
1424 
1425 	napi_enable(&adapter->napi);
1426 
1427 	e1000_irq_enable(adapter);
1428 
1429 	netif_start_queue(netdev);
1430 
1431 	/* fire a link status change interrupt to start the watchdog */
1432 	ew32(ICS, E1000_ICS_LSC);
1433 
1434 	return E1000_SUCCESS;
1435 
1436 err_req_irq:
1437 	e1000_power_down_phy(adapter);
1438 	e1000_free_all_rx_resources(adapter);
1439 err_setup_rx:
1440 	e1000_free_all_tx_resources(adapter);
1441 err_setup_tx:
1442 	e1000_reset(adapter);
1443 
1444 	return err;
1445 }
1446 
1447 /**
1448  * e1000_close - Disables a network interface
1449  * @netdev: network interface device structure
1450  *
1451  * Returns 0, this is not allowed to fail
1452  *
1453  * The close entry point is called when an interface is de-activated
1454  * by the OS.  The hardware is still under the drivers control, but
1455  * needs to be disabled.  A global MAC reset is issued to stop the
1456  * hardware, and all transmit and receive resources are freed.
1457  **/
e1000_close(struct net_device * netdev)1458 static int e1000_close(struct net_device *netdev)
1459 {
1460 	struct e1000_adapter *adapter = netdev_priv(netdev);
1461 	struct e1000_hw *hw = &adapter->hw;
1462 	int count = E1000_CHECK_RESET_COUNT;
1463 
1464 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1465 		usleep_range(10000, 20000);
1466 
1467 	WARN_ON(count < 0);
1468 
1469 	/* signal that we're down so that the reset task will no longer run */
1470 	set_bit(__E1000_DOWN, &adapter->flags);
1471 	clear_bit(__E1000_RESETTING, &adapter->flags);
1472 
1473 	e1000_down(adapter);
1474 	e1000_power_down_phy(adapter);
1475 	e1000_free_irq(adapter);
1476 
1477 	e1000_free_all_tx_resources(adapter);
1478 	e1000_free_all_rx_resources(adapter);
1479 
1480 	/* kill manageability vlan ID if supported, but not if a vlan with
1481 	 * the same ID is registered on the host OS (let 8021q kill it)
1482 	 */
1483 	if ((hw->mng_cookie.status &
1484 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1485 	    !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1486 		e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1487 				       adapter->mng_vlan_id);
1488 	}
1489 
1490 	return 0;
1491 }
1492 
1493 /**
1494  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1495  * @adapter: address of board private structure
1496  * @start: address of beginning of memory
1497  * @len: length of memory
1498  **/
e1000_check_64k_bound(struct e1000_adapter * adapter,void * start,unsigned long len)1499 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1500 				  unsigned long len)
1501 {
1502 	struct e1000_hw *hw = &adapter->hw;
1503 	unsigned long begin = (unsigned long)start;
1504 	unsigned long end = begin + len;
1505 
1506 	/* First rev 82545 and 82546 need to not allow any memory
1507 	 * write location to cross 64k boundary due to errata 23
1508 	 */
1509 	if (hw->mac_type == e1000_82545 ||
1510 	    hw->mac_type == e1000_ce4100 ||
1511 	    hw->mac_type == e1000_82546) {
1512 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1513 	}
1514 
1515 	return true;
1516 }
1517 
1518 /**
1519  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1520  * @adapter: board private structure
1521  * @txdr:    tx descriptor ring (for a specific queue) to setup
1522  *
1523  * Return 0 on success, negative on failure
1524  **/
e1000_setup_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * txdr)1525 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1526 				    struct e1000_tx_ring *txdr)
1527 {
1528 	struct pci_dev *pdev = adapter->pdev;
1529 	int size;
1530 
1531 	size = sizeof(struct e1000_tx_buffer) * txdr->count;
1532 	txdr->buffer_info = vzalloc(size);
1533 	if (!txdr->buffer_info)
1534 		return -ENOMEM;
1535 
1536 	/* round up to nearest 4K */
1537 
1538 	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1539 	txdr->size = ALIGN(txdr->size, 4096);
1540 
1541 	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1542 					GFP_KERNEL);
1543 	if (!txdr->desc) {
1544 setup_tx_desc_die:
1545 		vfree(txdr->buffer_info);
1546 		return -ENOMEM;
1547 	}
1548 
1549 	/* Fix for errata 23, can't cross 64kB boundary */
1550 	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1551 		void *olddesc = txdr->desc;
1552 		dma_addr_t olddma = txdr->dma;
1553 		e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1554 		      txdr->size, txdr->desc);
1555 		/* Try again, without freeing the previous */
1556 		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1557 						&txdr->dma, GFP_KERNEL);
1558 		/* Failed allocation, critical failure */
1559 		if (!txdr->desc) {
1560 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1561 					  olddma);
1562 			goto setup_tx_desc_die;
1563 		}
1564 
1565 		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1566 			/* give up */
1567 			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1568 					  txdr->dma);
1569 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1570 					  olddma);
1571 			e_err(probe, "Unable to allocate aligned memory "
1572 			      "for the transmit descriptor ring\n");
1573 			vfree(txdr->buffer_info);
1574 			return -ENOMEM;
1575 		} else {
1576 			/* Free old allocation, new allocation was successful */
1577 			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1578 					  olddma);
1579 		}
1580 	}
1581 	memset(txdr->desc, 0, txdr->size);
1582 
1583 	txdr->next_to_use = 0;
1584 	txdr->next_to_clean = 0;
1585 
1586 	return 0;
1587 }
1588 
1589 /**
1590  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1591  * 				  (Descriptors) for all queues
1592  * @adapter: board private structure
1593  *
1594  * Return 0 on success, negative on failure
1595  **/
e1000_setup_all_tx_resources(struct e1000_adapter * adapter)1596 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1597 {
1598 	int i, err = 0;
1599 
1600 	for (i = 0; i < adapter->num_tx_queues; i++) {
1601 		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1602 		if (err) {
1603 			e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1604 			for (i-- ; i >= 0; i--)
1605 				e1000_free_tx_resources(adapter,
1606 							&adapter->tx_ring[i]);
1607 			break;
1608 		}
1609 	}
1610 
1611 	return err;
1612 }
1613 
1614 /**
1615  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1616  * @adapter: board private structure
1617  *
1618  * Configure the Tx unit of the MAC after a reset.
1619  **/
e1000_configure_tx(struct e1000_adapter * adapter)1620 static void e1000_configure_tx(struct e1000_adapter *adapter)
1621 {
1622 	u64 tdba;
1623 	struct e1000_hw *hw = &adapter->hw;
1624 	u32 tdlen, tctl, tipg;
1625 	u32 ipgr1, ipgr2;
1626 
1627 	/* Setup the HW Tx Head and Tail descriptor pointers */
1628 
1629 	switch (adapter->num_tx_queues) {
1630 	case 1:
1631 	default:
1632 		tdba = adapter->tx_ring[0].dma;
1633 		tdlen = adapter->tx_ring[0].count *
1634 			sizeof(struct e1000_tx_desc);
1635 		ew32(TDLEN, tdlen);
1636 		ew32(TDBAH, (tdba >> 32));
1637 		ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1638 		ew32(TDT, 0);
1639 		ew32(TDH, 0);
1640 		adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1641 					   E1000_TDH : E1000_82542_TDH);
1642 		adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1643 					   E1000_TDT : E1000_82542_TDT);
1644 		break;
1645 	}
1646 
1647 	/* Set the default values for the Tx Inter Packet Gap timer */
1648 	if ((hw->media_type == e1000_media_type_fiber ||
1649 	     hw->media_type == e1000_media_type_internal_serdes))
1650 		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1651 	else
1652 		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1653 
1654 	switch (hw->mac_type) {
1655 	case e1000_82542_rev2_0:
1656 	case e1000_82542_rev2_1:
1657 		tipg = DEFAULT_82542_TIPG_IPGT;
1658 		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1659 		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1660 		break;
1661 	default:
1662 		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1663 		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1664 		break;
1665 	}
1666 	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1667 	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1668 	ew32(TIPG, tipg);
1669 
1670 	/* Set the Tx Interrupt Delay register */
1671 
1672 	ew32(TIDV, adapter->tx_int_delay);
1673 	if (hw->mac_type >= e1000_82540)
1674 		ew32(TADV, adapter->tx_abs_int_delay);
1675 
1676 	/* Program the Transmit Control Register */
1677 
1678 	tctl = er32(TCTL);
1679 	tctl &= ~E1000_TCTL_CT;
1680 	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1681 		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1682 
1683 	e1000_config_collision_dist(hw);
1684 
1685 	/* Setup Transmit Descriptor Settings for eop descriptor */
1686 	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1687 
1688 	/* only set IDE if we are delaying interrupts using the timers */
1689 	if (adapter->tx_int_delay)
1690 		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1691 
1692 	if (hw->mac_type < e1000_82543)
1693 		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1694 	else
1695 		adapter->txd_cmd |= E1000_TXD_CMD_RS;
1696 
1697 	/* Cache if we're 82544 running in PCI-X because we'll
1698 	 * need this to apply a workaround later in the send path.
1699 	 */
1700 	if (hw->mac_type == e1000_82544 &&
1701 	    hw->bus_type == e1000_bus_type_pcix)
1702 		adapter->pcix_82544 = true;
1703 
1704 	ew32(TCTL, tctl);
1705 
1706 }
1707 
1708 /**
1709  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1710  * @adapter: board private structure
1711  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1712  *
1713  * Returns 0 on success, negative on failure
1714  **/
e1000_setup_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rxdr)1715 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1716 				    struct e1000_rx_ring *rxdr)
1717 {
1718 	struct pci_dev *pdev = adapter->pdev;
1719 	int size, desc_len;
1720 
1721 	size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1722 	rxdr->buffer_info = vzalloc(size);
1723 	if (!rxdr->buffer_info)
1724 		return -ENOMEM;
1725 
1726 	desc_len = sizeof(struct e1000_rx_desc);
1727 
1728 	/* Round up to nearest 4K */
1729 
1730 	rxdr->size = rxdr->count * desc_len;
1731 	rxdr->size = ALIGN(rxdr->size, 4096);
1732 
1733 	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1734 					GFP_KERNEL);
1735 	if (!rxdr->desc) {
1736 setup_rx_desc_die:
1737 		vfree(rxdr->buffer_info);
1738 		return -ENOMEM;
1739 	}
1740 
1741 	/* Fix for errata 23, can't cross 64kB boundary */
1742 	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1743 		void *olddesc = rxdr->desc;
1744 		dma_addr_t olddma = rxdr->dma;
1745 		e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1746 		      rxdr->size, rxdr->desc);
1747 		/* Try again, without freeing the previous */
1748 		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1749 						&rxdr->dma, GFP_KERNEL);
1750 		/* Failed allocation, critical failure */
1751 		if (!rxdr->desc) {
1752 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1753 					  olddma);
1754 			goto setup_rx_desc_die;
1755 		}
1756 
1757 		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1758 			/* give up */
1759 			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1760 					  rxdr->dma);
1761 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1762 					  olddma);
1763 			e_err(probe, "Unable to allocate aligned memory for "
1764 			      "the Rx descriptor ring\n");
1765 			goto setup_rx_desc_die;
1766 		} else {
1767 			/* Free old allocation, new allocation was successful */
1768 			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1769 					  olddma);
1770 		}
1771 	}
1772 	memset(rxdr->desc, 0, rxdr->size);
1773 
1774 	rxdr->next_to_clean = 0;
1775 	rxdr->next_to_use = 0;
1776 	rxdr->rx_skb_top = NULL;
1777 
1778 	return 0;
1779 }
1780 
1781 /**
1782  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1783  * 				  (Descriptors) for all queues
1784  * @adapter: board private structure
1785  *
1786  * Return 0 on success, negative on failure
1787  **/
e1000_setup_all_rx_resources(struct e1000_adapter * adapter)1788 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1789 {
1790 	int i, err = 0;
1791 
1792 	for (i = 0; i < adapter->num_rx_queues; i++) {
1793 		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1794 		if (err) {
1795 			e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1796 			for (i-- ; i >= 0; i--)
1797 				e1000_free_rx_resources(adapter,
1798 							&adapter->rx_ring[i]);
1799 			break;
1800 		}
1801 	}
1802 
1803 	return err;
1804 }
1805 
1806 /**
1807  * e1000_setup_rctl - configure the receive control registers
1808  * @adapter: Board private structure
1809  **/
e1000_setup_rctl(struct e1000_adapter * adapter)1810 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1811 {
1812 	struct e1000_hw *hw = &adapter->hw;
1813 	u32 rctl;
1814 
1815 	rctl = er32(RCTL);
1816 
1817 	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1818 
1819 	rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1820 		E1000_RCTL_RDMTS_HALF |
1821 		(hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1822 
1823 	if (hw->tbi_compatibility_on == 1)
1824 		rctl |= E1000_RCTL_SBP;
1825 	else
1826 		rctl &= ~E1000_RCTL_SBP;
1827 
1828 	if (adapter->netdev->mtu <= ETH_DATA_LEN)
1829 		rctl &= ~E1000_RCTL_LPE;
1830 	else
1831 		rctl |= E1000_RCTL_LPE;
1832 
1833 	/* Setup buffer sizes */
1834 	rctl &= ~E1000_RCTL_SZ_4096;
1835 	rctl |= E1000_RCTL_BSEX;
1836 	switch (adapter->rx_buffer_len) {
1837 		case E1000_RXBUFFER_2048:
1838 		default:
1839 			rctl |= E1000_RCTL_SZ_2048;
1840 			rctl &= ~E1000_RCTL_BSEX;
1841 			break;
1842 		case E1000_RXBUFFER_4096:
1843 			rctl |= E1000_RCTL_SZ_4096;
1844 			break;
1845 		case E1000_RXBUFFER_8192:
1846 			rctl |= E1000_RCTL_SZ_8192;
1847 			break;
1848 		case E1000_RXBUFFER_16384:
1849 			rctl |= E1000_RCTL_SZ_16384;
1850 			break;
1851 	}
1852 
1853 	/* This is useful for sniffing bad packets. */
1854 	if (adapter->netdev->features & NETIF_F_RXALL) {
1855 		/* UPE and MPE will be handled by normal PROMISC logic
1856 		 * in e1000e_set_rx_mode
1857 		 */
1858 		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1859 			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1860 			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1861 
1862 		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1863 			  E1000_RCTL_DPF | /* Allow filtered pause */
1864 			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1865 		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1866 		 * and that breaks VLANs.
1867 		 */
1868 	}
1869 
1870 	ew32(RCTL, rctl);
1871 }
1872 
1873 /**
1874  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1875  * @adapter: board private structure
1876  *
1877  * Configure the Rx unit of the MAC after a reset.
1878  **/
e1000_configure_rx(struct e1000_adapter * adapter)1879 static void e1000_configure_rx(struct e1000_adapter *adapter)
1880 {
1881 	u64 rdba;
1882 	struct e1000_hw *hw = &adapter->hw;
1883 	u32 rdlen, rctl, rxcsum;
1884 
1885 	if (adapter->netdev->mtu > ETH_DATA_LEN) {
1886 		rdlen = adapter->rx_ring[0].count *
1887 		        sizeof(struct e1000_rx_desc);
1888 		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1889 		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1890 	} else {
1891 		rdlen = adapter->rx_ring[0].count *
1892 		        sizeof(struct e1000_rx_desc);
1893 		adapter->clean_rx = e1000_clean_rx_irq;
1894 		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1895 	}
1896 
1897 	/* disable receives while setting up the descriptors */
1898 	rctl = er32(RCTL);
1899 	ew32(RCTL, rctl & ~E1000_RCTL_EN);
1900 
1901 	/* set the Receive Delay Timer Register */
1902 	ew32(RDTR, adapter->rx_int_delay);
1903 
1904 	if (hw->mac_type >= e1000_82540) {
1905 		ew32(RADV, adapter->rx_abs_int_delay);
1906 		if (adapter->itr_setting != 0)
1907 			ew32(ITR, 1000000000 / (adapter->itr * 256));
1908 	}
1909 
1910 	/* Setup the HW Rx Head and Tail Descriptor Pointers and
1911 	 * the Base and Length of the Rx Descriptor Ring
1912 	 */
1913 	switch (adapter->num_rx_queues) {
1914 	case 1:
1915 	default:
1916 		rdba = adapter->rx_ring[0].dma;
1917 		ew32(RDLEN, rdlen);
1918 		ew32(RDBAH, (rdba >> 32));
1919 		ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1920 		ew32(RDT, 0);
1921 		ew32(RDH, 0);
1922 		adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1923 					   E1000_RDH : E1000_82542_RDH);
1924 		adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1925 					   E1000_RDT : E1000_82542_RDT);
1926 		break;
1927 	}
1928 
1929 	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
1930 	if (hw->mac_type >= e1000_82543) {
1931 		rxcsum = er32(RXCSUM);
1932 		if (adapter->rx_csum)
1933 			rxcsum |= E1000_RXCSUM_TUOFL;
1934 		else
1935 			/* don't need to clear IPPCSE as it defaults to 0 */
1936 			rxcsum &= ~E1000_RXCSUM_TUOFL;
1937 		ew32(RXCSUM, rxcsum);
1938 	}
1939 
1940 	/* Enable Receives */
1941 	ew32(RCTL, rctl | E1000_RCTL_EN);
1942 }
1943 
1944 /**
1945  * e1000_free_tx_resources - Free Tx Resources per Queue
1946  * @adapter: board private structure
1947  * @tx_ring: Tx descriptor ring for a specific queue
1948  *
1949  * Free all transmit software resources
1950  **/
e1000_free_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1951 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1952 				    struct e1000_tx_ring *tx_ring)
1953 {
1954 	struct pci_dev *pdev = adapter->pdev;
1955 
1956 	e1000_clean_tx_ring(adapter, tx_ring);
1957 
1958 	vfree(tx_ring->buffer_info);
1959 	tx_ring->buffer_info = NULL;
1960 
1961 	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1962 			  tx_ring->dma);
1963 
1964 	tx_ring->desc = NULL;
1965 }
1966 
1967 /**
1968  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1969  * @adapter: board private structure
1970  *
1971  * Free all transmit software resources
1972  **/
e1000_free_all_tx_resources(struct e1000_adapter * adapter)1973 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1974 {
1975 	int i;
1976 
1977 	for (i = 0; i < adapter->num_tx_queues; i++)
1978 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1979 }
1980 
1981 static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter * adapter,struct e1000_tx_buffer * buffer_info)1982 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1983 				 struct e1000_tx_buffer *buffer_info)
1984 {
1985 	if (buffer_info->dma) {
1986 		if (buffer_info->mapped_as_page)
1987 			dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1988 				       buffer_info->length, DMA_TO_DEVICE);
1989 		else
1990 			dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1991 					 buffer_info->length,
1992 					 DMA_TO_DEVICE);
1993 		buffer_info->dma = 0;
1994 	}
1995 	if (buffer_info->skb) {
1996 		dev_kfree_skb_any(buffer_info->skb);
1997 		buffer_info->skb = NULL;
1998 	}
1999 	buffer_info->time_stamp = 0;
2000 	/* buffer_info must be completely set up in the transmit path */
2001 }
2002 
2003 /**
2004  * e1000_clean_tx_ring - Free Tx Buffers
2005  * @adapter: board private structure
2006  * @tx_ring: ring to be cleaned
2007  **/
e1000_clean_tx_ring(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)2008 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2009 				struct e1000_tx_ring *tx_ring)
2010 {
2011 	struct e1000_hw *hw = &adapter->hw;
2012 	struct e1000_tx_buffer *buffer_info;
2013 	unsigned long size;
2014 	unsigned int i;
2015 
2016 	/* Free all the Tx ring sk_buffs */
2017 
2018 	for (i = 0; i < tx_ring->count; i++) {
2019 		buffer_info = &tx_ring->buffer_info[i];
2020 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2021 	}
2022 
2023 	netdev_reset_queue(adapter->netdev);
2024 	size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2025 	memset(tx_ring->buffer_info, 0, size);
2026 
2027 	/* Zero out the descriptor ring */
2028 
2029 	memset(tx_ring->desc, 0, tx_ring->size);
2030 
2031 	tx_ring->next_to_use = 0;
2032 	tx_ring->next_to_clean = 0;
2033 	tx_ring->last_tx_tso = false;
2034 
2035 	writel(0, hw->hw_addr + tx_ring->tdh);
2036 	writel(0, hw->hw_addr + tx_ring->tdt);
2037 }
2038 
2039 /**
2040  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2041  * @adapter: board private structure
2042  **/
e1000_clean_all_tx_rings(struct e1000_adapter * adapter)2043 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2044 {
2045 	int i;
2046 
2047 	for (i = 0; i < adapter->num_tx_queues; i++)
2048 		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2049 }
2050 
2051 /**
2052  * e1000_free_rx_resources - Free Rx Resources
2053  * @adapter: board private structure
2054  * @rx_ring: ring to clean the resources from
2055  *
2056  * Free all receive software resources
2057  **/
e1000_free_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2058 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2059 				    struct e1000_rx_ring *rx_ring)
2060 {
2061 	struct pci_dev *pdev = adapter->pdev;
2062 
2063 	e1000_clean_rx_ring(adapter, rx_ring);
2064 
2065 	vfree(rx_ring->buffer_info);
2066 	rx_ring->buffer_info = NULL;
2067 
2068 	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2069 			  rx_ring->dma);
2070 
2071 	rx_ring->desc = NULL;
2072 }
2073 
2074 /**
2075  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2076  * @adapter: board private structure
2077  *
2078  * Free all receive software resources
2079  **/
e1000_free_all_rx_resources(struct e1000_adapter * adapter)2080 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2081 {
2082 	int i;
2083 
2084 	for (i = 0; i < adapter->num_rx_queues; i++)
2085 		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2086 }
2087 
2088 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
e1000_frag_len(const struct e1000_adapter * a)2089 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2090 {
2091 	return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2092 		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2093 }
2094 
e1000_alloc_frag(const struct e1000_adapter * a)2095 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2096 {
2097 	unsigned int len = e1000_frag_len(a);
2098 	u8 *data = netdev_alloc_frag(len);
2099 
2100 	if (likely(data))
2101 		data += E1000_HEADROOM;
2102 	return data;
2103 }
2104 
2105 /**
2106  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2107  * @adapter: board private structure
2108  * @rx_ring: ring to free buffers from
2109  **/
e1000_clean_rx_ring(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2110 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2111 				struct e1000_rx_ring *rx_ring)
2112 {
2113 	struct e1000_hw *hw = &adapter->hw;
2114 	struct e1000_rx_buffer *buffer_info;
2115 	struct pci_dev *pdev = adapter->pdev;
2116 	unsigned long size;
2117 	unsigned int i;
2118 
2119 	/* Free all the Rx netfrags */
2120 	for (i = 0; i < rx_ring->count; i++) {
2121 		buffer_info = &rx_ring->buffer_info[i];
2122 		if (adapter->clean_rx == e1000_clean_rx_irq) {
2123 			if (buffer_info->dma)
2124 				dma_unmap_single(&pdev->dev, buffer_info->dma,
2125 						 adapter->rx_buffer_len,
2126 						 DMA_FROM_DEVICE);
2127 			if (buffer_info->rxbuf.data) {
2128 				skb_free_frag(buffer_info->rxbuf.data);
2129 				buffer_info->rxbuf.data = NULL;
2130 			}
2131 		} else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2132 			if (buffer_info->dma)
2133 				dma_unmap_page(&pdev->dev, buffer_info->dma,
2134 					       adapter->rx_buffer_len,
2135 					       DMA_FROM_DEVICE);
2136 			if (buffer_info->rxbuf.page) {
2137 				put_page(buffer_info->rxbuf.page);
2138 				buffer_info->rxbuf.page = NULL;
2139 			}
2140 		}
2141 
2142 		buffer_info->dma = 0;
2143 	}
2144 
2145 	/* there also may be some cached data from a chained receive */
2146 	napi_free_frags(&adapter->napi);
2147 	rx_ring->rx_skb_top = NULL;
2148 
2149 	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2150 	memset(rx_ring->buffer_info, 0, size);
2151 
2152 	/* Zero out the descriptor ring */
2153 	memset(rx_ring->desc, 0, rx_ring->size);
2154 
2155 	rx_ring->next_to_clean = 0;
2156 	rx_ring->next_to_use = 0;
2157 
2158 	writel(0, hw->hw_addr + rx_ring->rdh);
2159 	writel(0, hw->hw_addr + rx_ring->rdt);
2160 }
2161 
2162 /**
2163  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2164  * @adapter: board private structure
2165  **/
e1000_clean_all_rx_rings(struct e1000_adapter * adapter)2166 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2167 {
2168 	int i;
2169 
2170 	for (i = 0; i < adapter->num_rx_queues; i++)
2171 		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2172 }
2173 
2174 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2175  * and memory write and invalidate disabled for certain operations
2176  */
e1000_enter_82542_rst(struct e1000_adapter * adapter)2177 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2178 {
2179 	struct e1000_hw *hw = &adapter->hw;
2180 	struct net_device *netdev = adapter->netdev;
2181 	u32 rctl;
2182 
2183 	e1000_pci_clear_mwi(hw);
2184 
2185 	rctl = er32(RCTL);
2186 	rctl |= E1000_RCTL_RST;
2187 	ew32(RCTL, rctl);
2188 	E1000_WRITE_FLUSH();
2189 	mdelay(5);
2190 
2191 	if (netif_running(netdev))
2192 		e1000_clean_all_rx_rings(adapter);
2193 }
2194 
e1000_leave_82542_rst(struct e1000_adapter * adapter)2195 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2196 {
2197 	struct e1000_hw *hw = &adapter->hw;
2198 	struct net_device *netdev = adapter->netdev;
2199 	u32 rctl;
2200 
2201 	rctl = er32(RCTL);
2202 	rctl &= ~E1000_RCTL_RST;
2203 	ew32(RCTL, rctl);
2204 	E1000_WRITE_FLUSH();
2205 	mdelay(5);
2206 
2207 	if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2208 		e1000_pci_set_mwi(hw);
2209 
2210 	if (netif_running(netdev)) {
2211 		/* No need to loop, because 82542 supports only 1 queue */
2212 		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2213 		e1000_configure_rx(adapter);
2214 		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2215 	}
2216 }
2217 
2218 /**
2219  * e1000_set_mac - Change the Ethernet Address of the NIC
2220  * @netdev: network interface device structure
2221  * @p: pointer to an address structure
2222  *
2223  * Returns 0 on success, negative on failure
2224  **/
e1000_set_mac(struct net_device * netdev,void * p)2225 static int e1000_set_mac(struct net_device *netdev, void *p)
2226 {
2227 	struct e1000_adapter *adapter = netdev_priv(netdev);
2228 	struct e1000_hw *hw = &adapter->hw;
2229 	struct sockaddr *addr = p;
2230 
2231 	if (!is_valid_ether_addr(addr->sa_data))
2232 		return -EADDRNOTAVAIL;
2233 
2234 	/* 82542 2.0 needs to be in reset to write receive address registers */
2235 
2236 	if (hw->mac_type == e1000_82542_rev2_0)
2237 		e1000_enter_82542_rst(adapter);
2238 
2239 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2240 	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2241 
2242 	e1000_rar_set(hw, hw->mac_addr, 0);
2243 
2244 	if (hw->mac_type == e1000_82542_rev2_0)
2245 		e1000_leave_82542_rst(adapter);
2246 
2247 	return 0;
2248 }
2249 
2250 /**
2251  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2252  * @netdev: network interface device structure
2253  *
2254  * The set_rx_mode entry point is called whenever the unicast or multicast
2255  * address lists or the network interface flags are updated. This routine is
2256  * responsible for configuring the hardware for proper unicast, multicast,
2257  * promiscuous mode, and all-multi behavior.
2258  **/
e1000_set_rx_mode(struct net_device * netdev)2259 static void e1000_set_rx_mode(struct net_device *netdev)
2260 {
2261 	struct e1000_adapter *adapter = netdev_priv(netdev);
2262 	struct e1000_hw *hw = &adapter->hw;
2263 	struct netdev_hw_addr *ha;
2264 	bool use_uc = false;
2265 	u32 rctl;
2266 	u32 hash_value;
2267 	int i, rar_entries = E1000_RAR_ENTRIES;
2268 	int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2269 	u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2270 
2271 	if (!mcarray)
2272 		return;
2273 
2274 	/* Check for Promiscuous and All Multicast modes */
2275 
2276 	rctl = er32(RCTL);
2277 
2278 	if (netdev->flags & IFF_PROMISC) {
2279 		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2280 		rctl &= ~E1000_RCTL_VFE;
2281 	} else {
2282 		if (netdev->flags & IFF_ALLMULTI)
2283 			rctl |= E1000_RCTL_MPE;
2284 		else
2285 			rctl &= ~E1000_RCTL_MPE;
2286 		/* Enable VLAN filter if there is a VLAN */
2287 		if (e1000_vlan_used(adapter))
2288 			rctl |= E1000_RCTL_VFE;
2289 	}
2290 
2291 	if (netdev_uc_count(netdev) > rar_entries - 1) {
2292 		rctl |= E1000_RCTL_UPE;
2293 	} else if (!(netdev->flags & IFF_PROMISC)) {
2294 		rctl &= ~E1000_RCTL_UPE;
2295 		use_uc = true;
2296 	}
2297 
2298 	ew32(RCTL, rctl);
2299 
2300 	/* 82542 2.0 needs to be in reset to write receive address registers */
2301 
2302 	if (hw->mac_type == e1000_82542_rev2_0)
2303 		e1000_enter_82542_rst(adapter);
2304 
2305 	/* load the first 14 addresses into the exact filters 1-14. Unicast
2306 	 * addresses take precedence to avoid disabling unicast filtering
2307 	 * when possible.
2308 	 *
2309 	 * RAR 0 is used for the station MAC address
2310 	 * if there are not 14 addresses, go ahead and clear the filters
2311 	 */
2312 	i = 1;
2313 	if (use_uc)
2314 		netdev_for_each_uc_addr(ha, netdev) {
2315 			if (i == rar_entries)
2316 				break;
2317 			e1000_rar_set(hw, ha->addr, i++);
2318 		}
2319 
2320 	netdev_for_each_mc_addr(ha, netdev) {
2321 		if (i == rar_entries) {
2322 			/* load any remaining addresses into the hash table */
2323 			u32 hash_reg, hash_bit, mta;
2324 			hash_value = e1000_hash_mc_addr(hw, ha->addr);
2325 			hash_reg = (hash_value >> 5) & 0x7F;
2326 			hash_bit = hash_value & 0x1F;
2327 			mta = (1 << hash_bit);
2328 			mcarray[hash_reg] |= mta;
2329 		} else {
2330 			e1000_rar_set(hw, ha->addr, i++);
2331 		}
2332 	}
2333 
2334 	for (; i < rar_entries; i++) {
2335 		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2336 		E1000_WRITE_FLUSH();
2337 		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2338 		E1000_WRITE_FLUSH();
2339 	}
2340 
2341 	/* write the hash table completely, write from bottom to avoid
2342 	 * both stupid write combining chipsets, and flushing each write
2343 	 */
2344 	for (i = mta_reg_count - 1; i >= 0 ; i--) {
2345 		/* If we are on an 82544 has an errata where writing odd
2346 		 * offsets overwrites the previous even offset, but writing
2347 		 * backwards over the range solves the issue by always
2348 		 * writing the odd offset first
2349 		 */
2350 		E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2351 	}
2352 	E1000_WRITE_FLUSH();
2353 
2354 	if (hw->mac_type == e1000_82542_rev2_0)
2355 		e1000_leave_82542_rst(adapter);
2356 
2357 	kfree(mcarray);
2358 }
2359 
2360 /**
2361  * e1000_update_phy_info_task - get phy info
2362  * @work: work struct contained inside adapter struct
2363  *
2364  * Need to wait a few seconds after link up to get diagnostic information from
2365  * the phy
2366  */
e1000_update_phy_info_task(struct work_struct * work)2367 static void e1000_update_phy_info_task(struct work_struct *work)
2368 {
2369 	struct e1000_adapter *adapter = container_of(work,
2370 						     struct e1000_adapter,
2371 						     phy_info_task.work);
2372 
2373 	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2374 }
2375 
2376 /**
2377  * e1000_82547_tx_fifo_stall_task - task to complete work
2378  * @work: work struct contained inside adapter struct
2379  **/
e1000_82547_tx_fifo_stall_task(struct work_struct * work)2380 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2381 {
2382 	struct e1000_adapter *adapter = container_of(work,
2383 						     struct e1000_adapter,
2384 						     fifo_stall_task.work);
2385 	struct e1000_hw *hw = &adapter->hw;
2386 	struct net_device *netdev = adapter->netdev;
2387 	u32 tctl;
2388 
2389 	if (atomic_read(&adapter->tx_fifo_stall)) {
2390 		if ((er32(TDT) == er32(TDH)) &&
2391 		   (er32(TDFT) == er32(TDFH)) &&
2392 		   (er32(TDFTS) == er32(TDFHS))) {
2393 			tctl = er32(TCTL);
2394 			ew32(TCTL, tctl & ~E1000_TCTL_EN);
2395 			ew32(TDFT, adapter->tx_head_addr);
2396 			ew32(TDFH, adapter->tx_head_addr);
2397 			ew32(TDFTS, adapter->tx_head_addr);
2398 			ew32(TDFHS, adapter->tx_head_addr);
2399 			ew32(TCTL, tctl);
2400 			E1000_WRITE_FLUSH();
2401 
2402 			adapter->tx_fifo_head = 0;
2403 			atomic_set(&adapter->tx_fifo_stall, 0);
2404 			netif_wake_queue(netdev);
2405 		} else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2406 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
2407 		}
2408 	}
2409 }
2410 
e1000_has_link(struct e1000_adapter * adapter)2411 bool e1000_has_link(struct e1000_adapter *adapter)
2412 {
2413 	struct e1000_hw *hw = &adapter->hw;
2414 	bool link_active = false;
2415 
2416 	/* get_link_status is set on LSC (link status) interrupt or rx
2417 	 * sequence error interrupt (except on intel ce4100).
2418 	 * get_link_status will stay false until the
2419 	 * e1000_check_for_link establishes link for copper adapters
2420 	 * ONLY
2421 	 */
2422 	switch (hw->media_type) {
2423 	case e1000_media_type_copper:
2424 		if (hw->mac_type == e1000_ce4100)
2425 			hw->get_link_status = 1;
2426 		if (hw->get_link_status) {
2427 			e1000_check_for_link(hw);
2428 			link_active = !hw->get_link_status;
2429 		} else {
2430 			link_active = true;
2431 		}
2432 		break;
2433 	case e1000_media_type_fiber:
2434 		e1000_check_for_link(hw);
2435 		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2436 		break;
2437 	case e1000_media_type_internal_serdes:
2438 		e1000_check_for_link(hw);
2439 		link_active = hw->serdes_has_link;
2440 		break;
2441 	default:
2442 		break;
2443 	}
2444 
2445 	return link_active;
2446 }
2447 
2448 /**
2449  * e1000_watchdog - work function
2450  * @work: work struct contained inside adapter struct
2451  **/
e1000_watchdog(struct work_struct * work)2452 static void e1000_watchdog(struct work_struct *work)
2453 {
2454 	struct e1000_adapter *adapter = container_of(work,
2455 						     struct e1000_adapter,
2456 						     watchdog_task.work);
2457 	struct e1000_hw *hw = &adapter->hw;
2458 	struct net_device *netdev = adapter->netdev;
2459 	struct e1000_tx_ring *txdr = adapter->tx_ring;
2460 	u32 link, tctl;
2461 
2462 	link = e1000_has_link(adapter);
2463 	if ((netif_carrier_ok(netdev)) && link)
2464 		goto link_up;
2465 
2466 	if (link) {
2467 		if (!netif_carrier_ok(netdev)) {
2468 			u32 ctrl;
2469 			bool txb2b = true;
2470 			/* update snapshot of PHY registers on LSC */
2471 			e1000_get_speed_and_duplex(hw,
2472 						   &adapter->link_speed,
2473 						   &adapter->link_duplex);
2474 
2475 			ctrl = er32(CTRL);
2476 			pr_info("%s NIC Link is Up %d Mbps %s, "
2477 				"Flow Control: %s\n",
2478 				netdev->name,
2479 				adapter->link_speed,
2480 				adapter->link_duplex == FULL_DUPLEX ?
2481 				"Full Duplex" : "Half Duplex",
2482 				((ctrl & E1000_CTRL_TFCE) && (ctrl &
2483 				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2484 				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2485 				E1000_CTRL_TFCE) ? "TX" : "None")));
2486 
2487 			/* adjust timeout factor according to speed/duplex */
2488 			adapter->tx_timeout_factor = 1;
2489 			switch (adapter->link_speed) {
2490 			case SPEED_10:
2491 				txb2b = false;
2492 				adapter->tx_timeout_factor = 16;
2493 				break;
2494 			case SPEED_100:
2495 				txb2b = false;
2496 				/* maybe add some timeout factor ? */
2497 				break;
2498 			}
2499 
2500 			/* enable transmits in the hardware */
2501 			tctl = er32(TCTL);
2502 			tctl |= E1000_TCTL_EN;
2503 			ew32(TCTL, tctl);
2504 
2505 			netif_carrier_on(netdev);
2506 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2507 				schedule_delayed_work(&adapter->phy_info_task,
2508 						      2 * HZ);
2509 			adapter->smartspeed = 0;
2510 		}
2511 	} else {
2512 		if (netif_carrier_ok(netdev)) {
2513 			adapter->link_speed = 0;
2514 			adapter->link_duplex = 0;
2515 			pr_info("%s NIC Link is Down\n",
2516 				netdev->name);
2517 			netif_carrier_off(netdev);
2518 
2519 			if (!test_bit(__E1000_DOWN, &adapter->flags))
2520 				schedule_delayed_work(&adapter->phy_info_task,
2521 						      2 * HZ);
2522 		}
2523 
2524 		e1000_smartspeed(adapter);
2525 	}
2526 
2527 link_up:
2528 	e1000_update_stats(adapter);
2529 
2530 	hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2531 	adapter->tpt_old = adapter->stats.tpt;
2532 	hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2533 	adapter->colc_old = adapter->stats.colc;
2534 
2535 	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2536 	adapter->gorcl_old = adapter->stats.gorcl;
2537 	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2538 	adapter->gotcl_old = adapter->stats.gotcl;
2539 
2540 	e1000_update_adaptive(hw);
2541 
2542 	if (!netif_carrier_ok(netdev)) {
2543 		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2544 			/* We've lost link, so the controller stops DMA,
2545 			 * but we've got queued Tx work that's never going
2546 			 * to get done, so reset controller to flush Tx.
2547 			 * (Do the reset outside of interrupt context).
2548 			 */
2549 			adapter->tx_timeout_count++;
2550 			schedule_work(&adapter->reset_task);
2551 			/* exit immediately since reset is imminent */
2552 			return;
2553 		}
2554 	}
2555 
2556 	/* Simple mode for Interrupt Throttle Rate (ITR) */
2557 	if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2558 		/* Symmetric Tx/Rx gets a reduced ITR=2000;
2559 		 * Total asymmetrical Tx or Rx gets ITR=8000;
2560 		 * everyone else is between 2000-8000.
2561 		 */
2562 		u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2563 		u32 dif = (adapter->gotcl > adapter->gorcl ?
2564 			    adapter->gotcl - adapter->gorcl :
2565 			    adapter->gorcl - adapter->gotcl) / 10000;
2566 		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2567 
2568 		ew32(ITR, 1000000000 / (itr * 256));
2569 	}
2570 
2571 	/* Cause software interrupt to ensure rx ring is cleaned */
2572 	ew32(ICS, E1000_ICS_RXDMT0);
2573 
2574 	/* Force detection of hung controller every watchdog period */
2575 	adapter->detect_tx_hung = true;
2576 
2577 	/* Reschedule the task */
2578 	if (!test_bit(__E1000_DOWN, &adapter->flags))
2579 		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2580 }
2581 
2582 enum latency_range {
2583 	lowest_latency = 0,
2584 	low_latency = 1,
2585 	bulk_latency = 2,
2586 	latency_invalid = 255
2587 };
2588 
2589 /**
2590  * e1000_update_itr - update the dynamic ITR value based on statistics
2591  * @adapter: pointer to adapter
2592  * @itr_setting: current adapter->itr
2593  * @packets: the number of packets during this measurement interval
2594  * @bytes: the number of bytes during this measurement interval
2595  *
2596  *      Stores a new ITR value based on packets and byte
2597  *      counts during the last interrupt.  The advantage of per interrupt
2598  *      computation is faster updates and more accurate ITR for the current
2599  *      traffic pattern.  Constants in this function were computed
2600  *      based on theoretical maximum wire speed and thresholds were set based
2601  *      on testing data as well as attempting to minimize response time
2602  *      while increasing bulk throughput.
2603  *      this functionality is controlled by the InterruptThrottleRate module
2604  *      parameter (see e1000_param.c)
2605  **/
e1000_update_itr(struct e1000_adapter * adapter,u16 itr_setting,int packets,int bytes)2606 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2607 				     u16 itr_setting, int packets, int bytes)
2608 {
2609 	unsigned int retval = itr_setting;
2610 	struct e1000_hw *hw = &adapter->hw;
2611 
2612 	if (unlikely(hw->mac_type < e1000_82540))
2613 		goto update_itr_done;
2614 
2615 	if (packets == 0)
2616 		goto update_itr_done;
2617 
2618 	switch (itr_setting) {
2619 	case lowest_latency:
2620 		/* jumbo frames get bulk treatment*/
2621 		if (bytes/packets > 8000)
2622 			retval = bulk_latency;
2623 		else if ((packets < 5) && (bytes > 512))
2624 			retval = low_latency;
2625 		break;
2626 	case low_latency:  /* 50 usec aka 20000 ints/s */
2627 		if (bytes > 10000) {
2628 			/* jumbo frames need bulk latency setting */
2629 			if (bytes/packets > 8000)
2630 				retval = bulk_latency;
2631 			else if ((packets < 10) || ((bytes/packets) > 1200))
2632 				retval = bulk_latency;
2633 			else if ((packets > 35))
2634 				retval = lowest_latency;
2635 		} else if (bytes/packets > 2000)
2636 			retval = bulk_latency;
2637 		else if (packets <= 2 && bytes < 512)
2638 			retval = lowest_latency;
2639 		break;
2640 	case bulk_latency: /* 250 usec aka 4000 ints/s */
2641 		if (bytes > 25000) {
2642 			if (packets > 35)
2643 				retval = low_latency;
2644 		} else if (bytes < 6000) {
2645 			retval = low_latency;
2646 		}
2647 		break;
2648 	}
2649 
2650 update_itr_done:
2651 	return retval;
2652 }
2653 
e1000_set_itr(struct e1000_adapter * adapter)2654 static void e1000_set_itr(struct e1000_adapter *adapter)
2655 {
2656 	struct e1000_hw *hw = &adapter->hw;
2657 	u16 current_itr;
2658 	u32 new_itr = adapter->itr;
2659 
2660 	if (unlikely(hw->mac_type < e1000_82540))
2661 		return;
2662 
2663 	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2664 	if (unlikely(adapter->link_speed != SPEED_1000)) {
2665 		current_itr = 0;
2666 		new_itr = 4000;
2667 		goto set_itr_now;
2668 	}
2669 
2670 	adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2671 					   adapter->total_tx_packets,
2672 					   adapter->total_tx_bytes);
2673 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2674 	if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2675 		adapter->tx_itr = low_latency;
2676 
2677 	adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2678 					   adapter->total_rx_packets,
2679 					   adapter->total_rx_bytes);
2680 	/* conservative mode (itr 3) eliminates the lowest_latency setting */
2681 	if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2682 		adapter->rx_itr = low_latency;
2683 
2684 	current_itr = max(adapter->rx_itr, adapter->tx_itr);
2685 
2686 	switch (current_itr) {
2687 	/* counts and packets in update_itr are dependent on these numbers */
2688 	case lowest_latency:
2689 		new_itr = 70000;
2690 		break;
2691 	case low_latency:
2692 		new_itr = 20000; /* aka hwitr = ~200 */
2693 		break;
2694 	case bulk_latency:
2695 		new_itr = 4000;
2696 		break;
2697 	default:
2698 		break;
2699 	}
2700 
2701 set_itr_now:
2702 	if (new_itr != adapter->itr) {
2703 		/* this attempts to bias the interrupt rate towards Bulk
2704 		 * by adding intermediate steps when interrupt rate is
2705 		 * increasing
2706 		 */
2707 		new_itr = new_itr > adapter->itr ?
2708 			  min(adapter->itr + (new_itr >> 2), new_itr) :
2709 			  new_itr;
2710 		adapter->itr = new_itr;
2711 		ew32(ITR, 1000000000 / (new_itr * 256));
2712 	}
2713 }
2714 
2715 #define E1000_TX_FLAGS_CSUM		0x00000001
2716 #define E1000_TX_FLAGS_VLAN		0x00000002
2717 #define E1000_TX_FLAGS_TSO		0x00000004
2718 #define E1000_TX_FLAGS_IPV4		0x00000008
2719 #define E1000_TX_FLAGS_NO_FCS		0x00000010
2720 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
2721 #define E1000_TX_FLAGS_VLAN_SHIFT	16
2722 
e1000_tso(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2723 static int e1000_tso(struct e1000_adapter *adapter,
2724 		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2725 		     __be16 protocol)
2726 {
2727 	struct e1000_context_desc *context_desc;
2728 	struct e1000_tx_buffer *buffer_info;
2729 	unsigned int i;
2730 	u32 cmd_length = 0;
2731 	u16 ipcse = 0, tucse, mss;
2732 	u8 ipcss, ipcso, tucss, tucso, hdr_len;
2733 
2734 	if (skb_is_gso(skb)) {
2735 		int err;
2736 
2737 		err = skb_cow_head(skb, 0);
2738 		if (err < 0)
2739 			return err;
2740 
2741 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2742 		mss = skb_shinfo(skb)->gso_size;
2743 		if (protocol == htons(ETH_P_IP)) {
2744 			struct iphdr *iph = ip_hdr(skb);
2745 			iph->tot_len = 0;
2746 			iph->check = 0;
2747 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2748 								 iph->daddr, 0,
2749 								 IPPROTO_TCP,
2750 								 0);
2751 			cmd_length = E1000_TXD_CMD_IP;
2752 			ipcse = skb_transport_offset(skb) - 1;
2753 		} else if (skb_is_gso_v6(skb)) {
2754 			ipv6_hdr(skb)->payload_len = 0;
2755 			tcp_hdr(skb)->check =
2756 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2757 						 &ipv6_hdr(skb)->daddr,
2758 						 0, IPPROTO_TCP, 0);
2759 			ipcse = 0;
2760 		}
2761 		ipcss = skb_network_offset(skb);
2762 		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2763 		tucss = skb_transport_offset(skb);
2764 		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2765 		tucse = 0;
2766 
2767 		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2768 			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2769 
2770 		i = tx_ring->next_to_use;
2771 		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2772 		buffer_info = &tx_ring->buffer_info[i];
2773 
2774 		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2775 		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2776 		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2777 		context_desc->upper_setup.tcp_fields.tucss = tucss;
2778 		context_desc->upper_setup.tcp_fields.tucso = tucso;
2779 		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2780 		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2781 		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2782 		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2783 
2784 		buffer_info->time_stamp = jiffies;
2785 		buffer_info->next_to_watch = i;
2786 
2787 		if (++i == tx_ring->count) i = 0;
2788 		tx_ring->next_to_use = i;
2789 
2790 		return true;
2791 	}
2792 	return false;
2793 }
2794 
e1000_tx_csum(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2795 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2796 			  struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2797 			  __be16 protocol)
2798 {
2799 	struct e1000_context_desc *context_desc;
2800 	struct e1000_tx_buffer *buffer_info;
2801 	unsigned int i;
2802 	u8 css;
2803 	u32 cmd_len = E1000_TXD_CMD_DEXT;
2804 
2805 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2806 		return false;
2807 
2808 	switch (protocol) {
2809 	case cpu_to_be16(ETH_P_IP):
2810 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2811 			cmd_len |= E1000_TXD_CMD_TCP;
2812 		break;
2813 	case cpu_to_be16(ETH_P_IPV6):
2814 		/* XXX not handling all IPV6 headers */
2815 		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2816 			cmd_len |= E1000_TXD_CMD_TCP;
2817 		break;
2818 	default:
2819 		if (unlikely(net_ratelimit()))
2820 			e_warn(drv, "checksum_partial proto=%x!\n",
2821 			       skb->protocol);
2822 		break;
2823 	}
2824 
2825 	css = skb_checksum_start_offset(skb);
2826 
2827 	i = tx_ring->next_to_use;
2828 	buffer_info = &tx_ring->buffer_info[i];
2829 	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2830 
2831 	context_desc->lower_setup.ip_config = 0;
2832 	context_desc->upper_setup.tcp_fields.tucss = css;
2833 	context_desc->upper_setup.tcp_fields.tucso =
2834 		css + skb->csum_offset;
2835 	context_desc->upper_setup.tcp_fields.tucse = 0;
2836 	context_desc->tcp_seg_setup.data = 0;
2837 	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2838 
2839 	buffer_info->time_stamp = jiffies;
2840 	buffer_info->next_to_watch = i;
2841 
2842 	if (unlikely(++i == tx_ring->count)) i = 0;
2843 	tx_ring->next_to_use = i;
2844 
2845 	return true;
2846 }
2847 
2848 #define E1000_MAX_TXD_PWR	12
2849 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
2850 
e1000_tx_map(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags,unsigned int mss)2851 static int e1000_tx_map(struct e1000_adapter *adapter,
2852 			struct e1000_tx_ring *tx_ring,
2853 			struct sk_buff *skb, unsigned int first,
2854 			unsigned int max_per_txd, unsigned int nr_frags,
2855 			unsigned int mss)
2856 {
2857 	struct e1000_hw *hw = &adapter->hw;
2858 	struct pci_dev *pdev = adapter->pdev;
2859 	struct e1000_tx_buffer *buffer_info;
2860 	unsigned int len = skb_headlen(skb);
2861 	unsigned int offset = 0, size, count = 0, i;
2862 	unsigned int f, bytecount, segs;
2863 
2864 	i = tx_ring->next_to_use;
2865 
2866 	while (len) {
2867 		buffer_info = &tx_ring->buffer_info[i];
2868 		size = min(len, max_per_txd);
2869 		/* Workaround for Controller erratum --
2870 		 * descriptor for non-tso packet in a linear SKB that follows a
2871 		 * tso gets written back prematurely before the data is fully
2872 		 * DMA'd to the controller
2873 		 */
2874 		if (!skb->data_len && tx_ring->last_tx_tso &&
2875 		    !skb_is_gso(skb)) {
2876 			tx_ring->last_tx_tso = false;
2877 			size -= 4;
2878 		}
2879 
2880 		/* Workaround for premature desc write-backs
2881 		 * in TSO mode.  Append 4-byte sentinel desc
2882 		 */
2883 		if (unlikely(mss && !nr_frags && size == len && size > 8))
2884 			size -= 4;
2885 		/* work-around for errata 10 and it applies
2886 		 * to all controllers in PCI-X mode
2887 		 * The fix is to make sure that the first descriptor of a
2888 		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2889 		 */
2890 		if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2891 		                (size > 2015) && count == 0))
2892 		        size = 2015;
2893 
2894 		/* Workaround for potential 82544 hang in PCI-X.  Avoid
2895 		 * terminating buffers within evenly-aligned dwords.
2896 		 */
2897 		if (unlikely(adapter->pcix_82544 &&
2898 		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2899 		   size > 4))
2900 			size -= 4;
2901 
2902 		buffer_info->length = size;
2903 		/* set time_stamp *before* dma to help avoid a possible race */
2904 		buffer_info->time_stamp = jiffies;
2905 		buffer_info->mapped_as_page = false;
2906 		buffer_info->dma = dma_map_single(&pdev->dev,
2907 						  skb->data + offset,
2908 						  size, DMA_TO_DEVICE);
2909 		if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2910 			goto dma_error;
2911 		buffer_info->next_to_watch = i;
2912 
2913 		len -= size;
2914 		offset += size;
2915 		count++;
2916 		if (len) {
2917 			i++;
2918 			if (unlikely(i == tx_ring->count))
2919 				i = 0;
2920 		}
2921 	}
2922 
2923 	for (f = 0; f < nr_frags; f++) {
2924 		const struct skb_frag_struct *frag;
2925 
2926 		frag = &skb_shinfo(skb)->frags[f];
2927 		len = skb_frag_size(frag);
2928 		offset = 0;
2929 
2930 		while (len) {
2931 			unsigned long bufend;
2932 			i++;
2933 			if (unlikely(i == tx_ring->count))
2934 				i = 0;
2935 
2936 			buffer_info = &tx_ring->buffer_info[i];
2937 			size = min(len, max_per_txd);
2938 			/* Workaround for premature desc write-backs
2939 			 * in TSO mode.  Append 4-byte sentinel desc
2940 			 */
2941 			if (unlikely(mss && f == (nr_frags-1) &&
2942 			    size == len && size > 8))
2943 				size -= 4;
2944 			/* Workaround for potential 82544 hang in PCI-X.
2945 			 * Avoid terminating buffers within evenly-aligned
2946 			 * dwords.
2947 			 */
2948 			bufend = (unsigned long)
2949 				page_to_phys(skb_frag_page(frag));
2950 			bufend += offset + size - 1;
2951 			if (unlikely(adapter->pcix_82544 &&
2952 				     !(bufend & 4) &&
2953 				     size > 4))
2954 				size -= 4;
2955 
2956 			buffer_info->length = size;
2957 			buffer_info->time_stamp = jiffies;
2958 			buffer_info->mapped_as_page = true;
2959 			buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2960 						offset, size, DMA_TO_DEVICE);
2961 			if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2962 				goto dma_error;
2963 			buffer_info->next_to_watch = i;
2964 
2965 			len -= size;
2966 			offset += size;
2967 			count++;
2968 		}
2969 	}
2970 
2971 	segs = skb_shinfo(skb)->gso_segs ?: 1;
2972 	/* multiply data chunks by size of headers */
2973 	bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2974 
2975 	tx_ring->buffer_info[i].skb = skb;
2976 	tx_ring->buffer_info[i].segs = segs;
2977 	tx_ring->buffer_info[i].bytecount = bytecount;
2978 	tx_ring->buffer_info[first].next_to_watch = i;
2979 
2980 	return count;
2981 
2982 dma_error:
2983 	dev_err(&pdev->dev, "TX DMA map failed\n");
2984 	buffer_info->dma = 0;
2985 	if (count)
2986 		count--;
2987 
2988 	while (count--) {
2989 		if (i==0)
2990 			i += tx_ring->count;
2991 		i--;
2992 		buffer_info = &tx_ring->buffer_info[i];
2993 		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2994 	}
2995 
2996 	return 0;
2997 }
2998 
e1000_tx_queue(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,int tx_flags,int count)2999 static void e1000_tx_queue(struct e1000_adapter *adapter,
3000 			   struct e1000_tx_ring *tx_ring, int tx_flags,
3001 			   int count)
3002 {
3003 	struct e1000_tx_desc *tx_desc = NULL;
3004 	struct e1000_tx_buffer *buffer_info;
3005 	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3006 	unsigned int i;
3007 
3008 	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3009 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3010 			     E1000_TXD_CMD_TSE;
3011 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3012 
3013 		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3014 			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3015 	}
3016 
3017 	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3018 		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3019 		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3020 	}
3021 
3022 	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3023 		txd_lower |= E1000_TXD_CMD_VLE;
3024 		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3025 	}
3026 
3027 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3028 		txd_lower &= ~(E1000_TXD_CMD_IFCS);
3029 
3030 	i = tx_ring->next_to_use;
3031 
3032 	while (count--) {
3033 		buffer_info = &tx_ring->buffer_info[i];
3034 		tx_desc = E1000_TX_DESC(*tx_ring, i);
3035 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3036 		tx_desc->lower.data =
3037 			cpu_to_le32(txd_lower | buffer_info->length);
3038 		tx_desc->upper.data = cpu_to_le32(txd_upper);
3039 		if (unlikely(++i == tx_ring->count)) i = 0;
3040 	}
3041 
3042 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3043 
3044 	/* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3045 	if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3046 		tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3047 
3048 	/* Force memory writes to complete before letting h/w
3049 	 * know there are new descriptors to fetch.  (Only
3050 	 * applicable for weak-ordered memory model archs,
3051 	 * such as IA-64).
3052 	 */
3053 	wmb();
3054 
3055 	tx_ring->next_to_use = i;
3056 }
3057 
3058 /* 82547 workaround to avoid controller hang in half-duplex environment.
3059  * The workaround is to avoid queuing a large packet that would span
3060  * the internal Tx FIFO ring boundary by notifying the stack to resend
3061  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3062  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3063  * to the beginning of the Tx FIFO.
3064  */
3065 
3066 #define E1000_FIFO_HDR			0x10
3067 #define E1000_82547_PAD_LEN		0x3E0
3068 
e1000_82547_fifo_workaround(struct e1000_adapter * adapter,struct sk_buff * skb)3069 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3070 				       struct sk_buff *skb)
3071 {
3072 	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3073 	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3074 
3075 	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3076 
3077 	if (adapter->link_duplex != HALF_DUPLEX)
3078 		goto no_fifo_stall_required;
3079 
3080 	if (atomic_read(&adapter->tx_fifo_stall))
3081 		return 1;
3082 
3083 	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3084 		atomic_set(&adapter->tx_fifo_stall, 1);
3085 		return 1;
3086 	}
3087 
3088 no_fifo_stall_required:
3089 	adapter->tx_fifo_head += skb_fifo_len;
3090 	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3091 		adapter->tx_fifo_head -= adapter->tx_fifo_size;
3092 	return 0;
3093 }
3094 
__e1000_maybe_stop_tx(struct net_device * netdev,int size)3095 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3096 {
3097 	struct e1000_adapter *adapter = netdev_priv(netdev);
3098 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3099 
3100 	netif_stop_queue(netdev);
3101 	/* Herbert's original patch had:
3102 	 *  smp_mb__after_netif_stop_queue();
3103 	 * but since that doesn't exist yet, just open code it.
3104 	 */
3105 	smp_mb();
3106 
3107 	/* We need to check again in a case another CPU has just
3108 	 * made room available.
3109 	 */
3110 	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3111 		return -EBUSY;
3112 
3113 	/* A reprieve! */
3114 	netif_start_queue(netdev);
3115 	++adapter->restart_queue;
3116 	return 0;
3117 }
3118 
e1000_maybe_stop_tx(struct net_device * netdev,struct e1000_tx_ring * tx_ring,int size)3119 static int e1000_maybe_stop_tx(struct net_device *netdev,
3120 			       struct e1000_tx_ring *tx_ring, int size)
3121 {
3122 	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3123 		return 0;
3124 	return __e1000_maybe_stop_tx(netdev, size);
3125 }
3126 
3127 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)3128 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3129 				    struct net_device *netdev)
3130 {
3131 	struct e1000_adapter *adapter = netdev_priv(netdev);
3132 	struct e1000_hw *hw = &adapter->hw;
3133 	struct e1000_tx_ring *tx_ring;
3134 	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3135 	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3136 	unsigned int tx_flags = 0;
3137 	unsigned int len = skb_headlen(skb);
3138 	unsigned int nr_frags;
3139 	unsigned int mss;
3140 	int count = 0;
3141 	int tso;
3142 	unsigned int f;
3143 	__be16 protocol = vlan_get_protocol(skb);
3144 
3145 	/* This goes back to the question of how to logically map a Tx queue
3146 	 * to a flow.  Right now, performance is impacted slightly negatively
3147 	 * if using multiple Tx queues.  If the stack breaks away from a
3148 	 * single qdisc implementation, we can look at this again.
3149 	 */
3150 	tx_ring = adapter->tx_ring;
3151 
3152 	/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3153 	 * packets may get corrupted during padding by HW.
3154 	 * To WA this issue, pad all small packets manually.
3155 	 */
3156 	if (eth_skb_pad(skb))
3157 		return NETDEV_TX_OK;
3158 
3159 	mss = skb_shinfo(skb)->gso_size;
3160 	/* The controller does a simple calculation to
3161 	 * make sure there is enough room in the FIFO before
3162 	 * initiating the DMA for each buffer.  The calc is:
3163 	 * 4 = ceil(buffer len/mss).  To make sure we don't
3164 	 * overrun the FIFO, adjust the max buffer len if mss
3165 	 * drops.
3166 	 */
3167 	if (mss) {
3168 		u8 hdr_len;
3169 		max_per_txd = min(mss << 2, max_per_txd);
3170 		max_txd_pwr = fls(max_per_txd) - 1;
3171 
3172 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3173 		if (skb->data_len && hdr_len == len) {
3174 			switch (hw->mac_type) {
3175 			case e1000_82544: {
3176 				unsigned int pull_size;
3177 
3178 				/* Make sure we have room to chop off 4 bytes,
3179 				 * and that the end alignment will work out to
3180 				 * this hardware's requirements
3181 				 * NOTE: this is a TSO only workaround
3182 				 * if end byte alignment not correct move us
3183 				 * into the next dword
3184 				 */
3185 				if ((unsigned long)(skb_tail_pointer(skb) - 1)
3186 				    & 4)
3187 					break;
3188 				/* fall through */
3189 				pull_size = min((unsigned int)4, skb->data_len);
3190 				if (!__pskb_pull_tail(skb, pull_size)) {
3191 					e_err(drv, "__pskb_pull_tail "
3192 					      "failed.\n");
3193 					dev_kfree_skb_any(skb);
3194 					return NETDEV_TX_OK;
3195 				}
3196 				len = skb_headlen(skb);
3197 				break;
3198 			}
3199 			default:
3200 				/* do nothing */
3201 				break;
3202 			}
3203 		}
3204 	}
3205 
3206 	/* reserve a descriptor for the offload context */
3207 	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3208 		count++;
3209 	count++;
3210 
3211 	/* Controller Erratum workaround */
3212 	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3213 		count++;
3214 
3215 	count += TXD_USE_COUNT(len, max_txd_pwr);
3216 
3217 	if (adapter->pcix_82544)
3218 		count++;
3219 
3220 	/* work-around for errata 10 and it applies to all controllers
3221 	 * in PCI-X mode, so add one more descriptor to the count
3222 	 */
3223 	if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3224 			(len > 2015)))
3225 		count++;
3226 
3227 	nr_frags = skb_shinfo(skb)->nr_frags;
3228 	for (f = 0; f < nr_frags; f++)
3229 		count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3230 				       max_txd_pwr);
3231 	if (adapter->pcix_82544)
3232 		count += nr_frags;
3233 
3234 	/* need: count + 2 desc gap to keep tail from touching
3235 	 * head, otherwise try next time
3236 	 */
3237 	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3238 		return NETDEV_TX_BUSY;
3239 
3240 	if (unlikely((hw->mac_type == e1000_82547) &&
3241 		     (e1000_82547_fifo_workaround(adapter, skb)))) {
3242 		netif_stop_queue(netdev);
3243 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3244 			schedule_delayed_work(&adapter->fifo_stall_task, 1);
3245 		return NETDEV_TX_BUSY;
3246 	}
3247 
3248 	if (skb_vlan_tag_present(skb)) {
3249 		tx_flags |= E1000_TX_FLAGS_VLAN;
3250 		tx_flags |= (skb_vlan_tag_get(skb) <<
3251 			     E1000_TX_FLAGS_VLAN_SHIFT);
3252 	}
3253 
3254 	first = tx_ring->next_to_use;
3255 
3256 	tso = e1000_tso(adapter, tx_ring, skb, protocol);
3257 	if (tso < 0) {
3258 		dev_kfree_skb_any(skb);
3259 		return NETDEV_TX_OK;
3260 	}
3261 
3262 	if (likely(tso)) {
3263 		if (likely(hw->mac_type != e1000_82544))
3264 			tx_ring->last_tx_tso = true;
3265 		tx_flags |= E1000_TX_FLAGS_TSO;
3266 	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3267 		tx_flags |= E1000_TX_FLAGS_CSUM;
3268 
3269 	if (protocol == htons(ETH_P_IP))
3270 		tx_flags |= E1000_TX_FLAGS_IPV4;
3271 
3272 	if (unlikely(skb->no_fcs))
3273 		tx_flags |= E1000_TX_FLAGS_NO_FCS;
3274 
3275 	count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3276 			     nr_frags, mss);
3277 
3278 	if (count) {
3279 		netdev_sent_queue(netdev, skb->len);
3280 		skb_tx_timestamp(skb);
3281 
3282 		e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3283 		/* Make sure there is space in the ring for the next send. */
3284 		e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3285 
3286 		if (!skb->xmit_more ||
3287 		    netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3288 			writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3289 			/* we need this if more than one processor can write to
3290 			 * our tail at a time, it synchronizes IO on IA64/Altix
3291 			 * systems
3292 			 */
3293 			mmiowb();
3294 		}
3295 	} else {
3296 		dev_kfree_skb_any(skb);
3297 		tx_ring->buffer_info[first].time_stamp = 0;
3298 		tx_ring->next_to_use = first;
3299 	}
3300 
3301 	return NETDEV_TX_OK;
3302 }
3303 
3304 #define NUM_REGS 38 /* 1 based count */
e1000_regdump(struct e1000_adapter * adapter)3305 static void e1000_regdump(struct e1000_adapter *adapter)
3306 {
3307 	struct e1000_hw *hw = &adapter->hw;
3308 	u32 regs[NUM_REGS];
3309 	u32 *regs_buff = regs;
3310 	int i = 0;
3311 
3312 	static const char * const reg_name[] = {
3313 		"CTRL",  "STATUS",
3314 		"RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3315 		"TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3316 		"TIDV", "TXDCTL", "TADV", "TARC0",
3317 		"TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3318 		"TXDCTL1", "TARC1",
3319 		"CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3320 		"TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3321 		"RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3322 	};
3323 
3324 	regs_buff[0]  = er32(CTRL);
3325 	regs_buff[1]  = er32(STATUS);
3326 
3327 	regs_buff[2]  = er32(RCTL);
3328 	regs_buff[3]  = er32(RDLEN);
3329 	regs_buff[4]  = er32(RDH);
3330 	regs_buff[5]  = er32(RDT);
3331 	regs_buff[6]  = er32(RDTR);
3332 
3333 	regs_buff[7]  = er32(TCTL);
3334 	regs_buff[8]  = er32(TDBAL);
3335 	regs_buff[9]  = er32(TDBAH);
3336 	regs_buff[10] = er32(TDLEN);
3337 	regs_buff[11] = er32(TDH);
3338 	regs_buff[12] = er32(TDT);
3339 	regs_buff[13] = er32(TIDV);
3340 	regs_buff[14] = er32(TXDCTL);
3341 	regs_buff[15] = er32(TADV);
3342 	regs_buff[16] = er32(TARC0);
3343 
3344 	regs_buff[17] = er32(TDBAL1);
3345 	regs_buff[18] = er32(TDBAH1);
3346 	regs_buff[19] = er32(TDLEN1);
3347 	regs_buff[20] = er32(TDH1);
3348 	regs_buff[21] = er32(TDT1);
3349 	regs_buff[22] = er32(TXDCTL1);
3350 	regs_buff[23] = er32(TARC1);
3351 	regs_buff[24] = er32(CTRL_EXT);
3352 	regs_buff[25] = er32(ERT);
3353 	regs_buff[26] = er32(RDBAL0);
3354 	regs_buff[27] = er32(RDBAH0);
3355 	regs_buff[28] = er32(TDFH);
3356 	regs_buff[29] = er32(TDFT);
3357 	regs_buff[30] = er32(TDFHS);
3358 	regs_buff[31] = er32(TDFTS);
3359 	regs_buff[32] = er32(TDFPC);
3360 	regs_buff[33] = er32(RDFH);
3361 	regs_buff[34] = er32(RDFT);
3362 	regs_buff[35] = er32(RDFHS);
3363 	regs_buff[36] = er32(RDFTS);
3364 	regs_buff[37] = er32(RDFPC);
3365 
3366 	pr_info("Register dump\n");
3367 	for (i = 0; i < NUM_REGS; i++)
3368 		pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3369 }
3370 
3371 /*
3372  * e1000_dump: Print registers, tx ring and rx ring
3373  */
e1000_dump(struct e1000_adapter * adapter)3374 static void e1000_dump(struct e1000_adapter *adapter)
3375 {
3376 	/* this code doesn't handle multiple rings */
3377 	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3378 	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3379 	int i;
3380 
3381 	if (!netif_msg_hw(adapter))
3382 		return;
3383 
3384 	/* Print Registers */
3385 	e1000_regdump(adapter);
3386 
3387 	/* transmit dump */
3388 	pr_info("TX Desc ring0 dump\n");
3389 
3390 	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3391 	 *
3392 	 * Legacy Transmit Descriptor
3393 	 *   +--------------------------------------------------------------+
3394 	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3395 	 *   +--------------------------------------------------------------+
3396 	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3397 	 *   +--------------------------------------------------------------+
3398 	 *   63       48 47        36 35    32 31     24 23    16 15        0
3399 	 *
3400 	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3401 	 *   63      48 47    40 39       32 31             16 15    8 7      0
3402 	 *   +----------------------------------------------------------------+
3403 	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3404 	 *   +----------------------------------------------------------------+
3405 	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3406 	 *   +----------------------------------------------------------------+
3407 	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3408 	 *
3409 	 * Extended Data Descriptor (DTYP=0x1)
3410 	 *   +----------------------------------------------------------------+
3411 	 * 0 |                     Buffer Address [63:0]                      |
3412 	 *   +----------------------------------------------------------------+
3413 	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3414 	 *   +----------------------------------------------------------------+
3415 	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3416 	 */
3417 	pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3418 	pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3419 
3420 	if (!netif_msg_tx_done(adapter))
3421 		goto rx_ring_summary;
3422 
3423 	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3424 		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3425 		struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3426 		struct my_u { __le64 a; __le64 b; };
3427 		struct my_u *u = (struct my_u *)tx_desc;
3428 		const char *type;
3429 
3430 		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3431 			type = "NTC/U";
3432 		else if (i == tx_ring->next_to_use)
3433 			type = "NTU";
3434 		else if (i == tx_ring->next_to_clean)
3435 			type = "NTC";
3436 		else
3437 			type = "";
3438 
3439 		pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3440 			((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3441 			le64_to_cpu(u->a), le64_to_cpu(u->b),
3442 			(u64)buffer_info->dma, buffer_info->length,
3443 			buffer_info->next_to_watch,
3444 			(u64)buffer_info->time_stamp, buffer_info->skb, type);
3445 	}
3446 
3447 rx_ring_summary:
3448 	/* receive dump */
3449 	pr_info("\nRX Desc ring dump\n");
3450 
3451 	/* Legacy Receive Descriptor Format
3452 	 *
3453 	 * +-----------------------------------------------------+
3454 	 * |                Buffer Address [63:0]                |
3455 	 * +-----------------------------------------------------+
3456 	 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3457 	 * +-----------------------------------------------------+
3458 	 * 63       48 47    40 39      32 31         16 15      0
3459 	 */
3460 	pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3461 
3462 	if (!netif_msg_rx_status(adapter))
3463 		goto exit;
3464 
3465 	for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3466 		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3467 		struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3468 		struct my_u { __le64 a; __le64 b; };
3469 		struct my_u *u = (struct my_u *)rx_desc;
3470 		const char *type;
3471 
3472 		if (i == rx_ring->next_to_use)
3473 			type = "NTU";
3474 		else if (i == rx_ring->next_to_clean)
3475 			type = "NTC";
3476 		else
3477 			type = "";
3478 
3479 		pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3480 			i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3481 			(u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3482 	} /* for */
3483 
3484 	/* dump the descriptor caches */
3485 	/* rx */
3486 	pr_info("Rx descriptor cache in 64bit format\n");
3487 	for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3488 		pr_info("R%04X: %08X|%08X %08X|%08X\n",
3489 			i,
3490 			readl(adapter->hw.hw_addr + i+4),
3491 			readl(adapter->hw.hw_addr + i),
3492 			readl(adapter->hw.hw_addr + i+12),
3493 			readl(adapter->hw.hw_addr + i+8));
3494 	}
3495 	/* tx */
3496 	pr_info("Tx descriptor cache in 64bit format\n");
3497 	for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3498 		pr_info("T%04X: %08X|%08X %08X|%08X\n",
3499 			i,
3500 			readl(adapter->hw.hw_addr + i+4),
3501 			readl(adapter->hw.hw_addr + i),
3502 			readl(adapter->hw.hw_addr + i+12),
3503 			readl(adapter->hw.hw_addr + i+8));
3504 	}
3505 exit:
3506 	return;
3507 }
3508 
3509 /**
3510  * e1000_tx_timeout - Respond to a Tx Hang
3511  * @netdev: network interface device structure
3512  **/
e1000_tx_timeout(struct net_device * netdev)3513 static void e1000_tx_timeout(struct net_device *netdev)
3514 {
3515 	struct e1000_adapter *adapter = netdev_priv(netdev);
3516 
3517 	/* Do the reset outside of interrupt context */
3518 	adapter->tx_timeout_count++;
3519 	schedule_work(&adapter->reset_task);
3520 }
3521 
e1000_reset_task(struct work_struct * work)3522 static void e1000_reset_task(struct work_struct *work)
3523 {
3524 	struct e1000_adapter *adapter =
3525 		container_of(work, struct e1000_adapter, reset_task);
3526 
3527 	e_err(drv, "Reset adapter\n");
3528 	e1000_reinit_locked(adapter);
3529 }
3530 
3531 /**
3532  * e1000_get_stats - Get System Network Statistics
3533  * @netdev: network interface device structure
3534  *
3535  * Returns the address of the device statistics structure.
3536  * The statistics are actually updated from the watchdog.
3537  **/
e1000_get_stats(struct net_device * netdev)3538 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3539 {
3540 	/* only return the current stats */
3541 	return &netdev->stats;
3542 }
3543 
3544 /**
3545  * e1000_change_mtu - Change the Maximum Transfer Unit
3546  * @netdev: network interface device structure
3547  * @new_mtu: new value for maximum frame size
3548  *
3549  * Returns 0 on success, negative on failure
3550  **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)3551 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3552 {
3553 	struct e1000_adapter *adapter = netdev_priv(netdev);
3554 	struct e1000_hw *hw = &adapter->hw;
3555 	int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3556 
3557 	if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3558 	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3559 		e_err(probe, "Invalid MTU setting\n");
3560 		return -EINVAL;
3561 	}
3562 
3563 	/* Adapter-specific max frame size limits. */
3564 	switch (hw->mac_type) {
3565 	case e1000_undefined ... e1000_82542_rev2_1:
3566 		if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3567 			e_err(probe, "Jumbo Frames not supported.\n");
3568 			return -EINVAL;
3569 		}
3570 		break;
3571 	default:
3572 		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3573 		break;
3574 	}
3575 
3576 	while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3577 		msleep(1);
3578 	/* e1000_down has a dependency on max_frame_size */
3579 	hw->max_frame_size = max_frame;
3580 	if (netif_running(netdev)) {
3581 		/* prevent buffers from being reallocated */
3582 		adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3583 		e1000_down(adapter);
3584 	}
3585 
3586 	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3587 	 * means we reserve 2 more, this pushes us to allocate from the next
3588 	 * larger slab size.
3589 	 * i.e. RXBUFFER_2048 --> size-4096 slab
3590 	 * however with the new *_jumbo_rx* routines, jumbo receives will use
3591 	 * fragmented skbs
3592 	 */
3593 
3594 	if (max_frame <= E1000_RXBUFFER_2048)
3595 		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3596 	else
3597 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3598 		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3599 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3600 		adapter->rx_buffer_len = PAGE_SIZE;
3601 #endif
3602 
3603 	/* adjust allocation if LPE protects us, and we aren't using SBP */
3604 	if (!hw->tbi_compatibility_on &&
3605 	    ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3606 	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3607 		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3608 
3609 	pr_info("%s changing MTU from %d to %d\n",
3610 		netdev->name, netdev->mtu, new_mtu);
3611 	netdev->mtu = new_mtu;
3612 
3613 	if (netif_running(netdev))
3614 		e1000_up(adapter);
3615 	else
3616 		e1000_reset(adapter);
3617 
3618 	clear_bit(__E1000_RESETTING, &adapter->flags);
3619 
3620 	return 0;
3621 }
3622 
3623 /**
3624  * e1000_update_stats - Update the board statistics counters
3625  * @adapter: board private structure
3626  **/
e1000_update_stats(struct e1000_adapter * adapter)3627 void e1000_update_stats(struct e1000_adapter *adapter)
3628 {
3629 	struct net_device *netdev = adapter->netdev;
3630 	struct e1000_hw *hw = &adapter->hw;
3631 	struct pci_dev *pdev = adapter->pdev;
3632 	unsigned long flags;
3633 	u16 phy_tmp;
3634 
3635 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3636 
3637 	/* Prevent stats update while adapter is being reset, or if the pci
3638 	 * connection is down.
3639 	 */
3640 	if (adapter->link_speed == 0)
3641 		return;
3642 	if (pci_channel_offline(pdev))
3643 		return;
3644 
3645 	spin_lock_irqsave(&adapter->stats_lock, flags);
3646 
3647 	/* these counters are modified from e1000_tbi_adjust_stats,
3648 	 * called from the interrupt context, so they must only
3649 	 * be written while holding adapter->stats_lock
3650 	 */
3651 
3652 	adapter->stats.crcerrs += er32(CRCERRS);
3653 	adapter->stats.gprc += er32(GPRC);
3654 	adapter->stats.gorcl += er32(GORCL);
3655 	adapter->stats.gorch += er32(GORCH);
3656 	adapter->stats.bprc += er32(BPRC);
3657 	adapter->stats.mprc += er32(MPRC);
3658 	adapter->stats.roc += er32(ROC);
3659 
3660 	adapter->stats.prc64 += er32(PRC64);
3661 	adapter->stats.prc127 += er32(PRC127);
3662 	adapter->stats.prc255 += er32(PRC255);
3663 	adapter->stats.prc511 += er32(PRC511);
3664 	adapter->stats.prc1023 += er32(PRC1023);
3665 	adapter->stats.prc1522 += er32(PRC1522);
3666 
3667 	adapter->stats.symerrs += er32(SYMERRS);
3668 	adapter->stats.mpc += er32(MPC);
3669 	adapter->stats.scc += er32(SCC);
3670 	adapter->stats.ecol += er32(ECOL);
3671 	adapter->stats.mcc += er32(MCC);
3672 	adapter->stats.latecol += er32(LATECOL);
3673 	adapter->stats.dc += er32(DC);
3674 	adapter->stats.sec += er32(SEC);
3675 	adapter->stats.rlec += er32(RLEC);
3676 	adapter->stats.xonrxc += er32(XONRXC);
3677 	adapter->stats.xontxc += er32(XONTXC);
3678 	adapter->stats.xoffrxc += er32(XOFFRXC);
3679 	adapter->stats.xofftxc += er32(XOFFTXC);
3680 	adapter->stats.fcruc += er32(FCRUC);
3681 	adapter->stats.gptc += er32(GPTC);
3682 	adapter->stats.gotcl += er32(GOTCL);
3683 	adapter->stats.gotch += er32(GOTCH);
3684 	adapter->stats.rnbc += er32(RNBC);
3685 	adapter->stats.ruc += er32(RUC);
3686 	adapter->stats.rfc += er32(RFC);
3687 	adapter->stats.rjc += er32(RJC);
3688 	adapter->stats.torl += er32(TORL);
3689 	adapter->stats.torh += er32(TORH);
3690 	adapter->stats.totl += er32(TOTL);
3691 	adapter->stats.toth += er32(TOTH);
3692 	adapter->stats.tpr += er32(TPR);
3693 
3694 	adapter->stats.ptc64 += er32(PTC64);
3695 	adapter->stats.ptc127 += er32(PTC127);
3696 	adapter->stats.ptc255 += er32(PTC255);
3697 	adapter->stats.ptc511 += er32(PTC511);
3698 	adapter->stats.ptc1023 += er32(PTC1023);
3699 	adapter->stats.ptc1522 += er32(PTC1522);
3700 
3701 	adapter->stats.mptc += er32(MPTC);
3702 	adapter->stats.bptc += er32(BPTC);
3703 
3704 	/* used for adaptive IFS */
3705 
3706 	hw->tx_packet_delta = er32(TPT);
3707 	adapter->stats.tpt += hw->tx_packet_delta;
3708 	hw->collision_delta = er32(COLC);
3709 	adapter->stats.colc += hw->collision_delta;
3710 
3711 	if (hw->mac_type >= e1000_82543) {
3712 		adapter->stats.algnerrc += er32(ALGNERRC);
3713 		adapter->stats.rxerrc += er32(RXERRC);
3714 		adapter->stats.tncrs += er32(TNCRS);
3715 		adapter->stats.cexterr += er32(CEXTERR);
3716 		adapter->stats.tsctc += er32(TSCTC);
3717 		adapter->stats.tsctfc += er32(TSCTFC);
3718 	}
3719 
3720 	/* Fill out the OS statistics structure */
3721 	netdev->stats.multicast = adapter->stats.mprc;
3722 	netdev->stats.collisions = adapter->stats.colc;
3723 
3724 	/* Rx Errors */
3725 
3726 	/* RLEC on some newer hardware can be incorrect so build
3727 	 * our own version based on RUC and ROC
3728 	 */
3729 	netdev->stats.rx_errors = adapter->stats.rxerrc +
3730 		adapter->stats.crcerrs + adapter->stats.algnerrc +
3731 		adapter->stats.ruc + adapter->stats.roc +
3732 		adapter->stats.cexterr;
3733 	adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3734 	netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3735 	netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3736 	netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3737 	netdev->stats.rx_missed_errors = adapter->stats.mpc;
3738 
3739 	/* Tx Errors */
3740 	adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3741 	netdev->stats.tx_errors = adapter->stats.txerrc;
3742 	netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3743 	netdev->stats.tx_window_errors = adapter->stats.latecol;
3744 	netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3745 	if (hw->bad_tx_carr_stats_fd &&
3746 	    adapter->link_duplex == FULL_DUPLEX) {
3747 		netdev->stats.tx_carrier_errors = 0;
3748 		adapter->stats.tncrs = 0;
3749 	}
3750 
3751 	/* Tx Dropped needs to be maintained elsewhere */
3752 
3753 	/* Phy Stats */
3754 	if (hw->media_type == e1000_media_type_copper) {
3755 		if ((adapter->link_speed == SPEED_1000) &&
3756 		   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3757 			phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3758 			adapter->phy_stats.idle_errors += phy_tmp;
3759 		}
3760 
3761 		if ((hw->mac_type <= e1000_82546) &&
3762 		   (hw->phy_type == e1000_phy_m88) &&
3763 		   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3764 			adapter->phy_stats.receive_errors += phy_tmp;
3765 	}
3766 
3767 	/* Management Stats */
3768 	if (hw->has_smbus) {
3769 		adapter->stats.mgptc += er32(MGTPTC);
3770 		adapter->stats.mgprc += er32(MGTPRC);
3771 		adapter->stats.mgpdc += er32(MGTPDC);
3772 	}
3773 
3774 	spin_unlock_irqrestore(&adapter->stats_lock, flags);
3775 }
3776 
3777 /**
3778  * e1000_intr - Interrupt Handler
3779  * @irq: interrupt number
3780  * @data: pointer to a network interface device structure
3781  **/
e1000_intr(int irq,void * data)3782 static irqreturn_t e1000_intr(int irq, void *data)
3783 {
3784 	struct net_device *netdev = data;
3785 	struct e1000_adapter *adapter = netdev_priv(netdev);
3786 	struct e1000_hw *hw = &adapter->hw;
3787 	u32 icr = er32(ICR);
3788 
3789 	if (unlikely((!icr)))
3790 		return IRQ_NONE;  /* Not our interrupt */
3791 
3792 	/* we might have caused the interrupt, but the above
3793 	 * read cleared it, and just in case the driver is
3794 	 * down there is nothing to do so return handled
3795 	 */
3796 	if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3797 		return IRQ_HANDLED;
3798 
3799 	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3800 		hw->get_link_status = 1;
3801 		/* guard against interrupt when we're going down */
3802 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3803 			schedule_delayed_work(&adapter->watchdog_task, 1);
3804 	}
3805 
3806 	/* disable interrupts, without the synchronize_irq bit */
3807 	ew32(IMC, ~0);
3808 	E1000_WRITE_FLUSH();
3809 
3810 	if (likely(napi_schedule_prep(&adapter->napi))) {
3811 		adapter->total_tx_bytes = 0;
3812 		adapter->total_tx_packets = 0;
3813 		adapter->total_rx_bytes = 0;
3814 		adapter->total_rx_packets = 0;
3815 		__napi_schedule(&adapter->napi);
3816 	} else {
3817 		/* this really should not happen! if it does it is basically a
3818 		 * bug, but not a hard error, so enable ints and continue
3819 		 */
3820 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3821 			e1000_irq_enable(adapter);
3822 	}
3823 
3824 	return IRQ_HANDLED;
3825 }
3826 
3827 /**
3828  * e1000_clean - NAPI Rx polling callback
3829  * @adapter: board private structure
3830  **/
e1000_clean(struct napi_struct * napi,int budget)3831 static int e1000_clean(struct napi_struct *napi, int budget)
3832 {
3833 	struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3834 						     napi);
3835 	int tx_clean_complete = 0, work_done = 0;
3836 
3837 	tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3838 
3839 	adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3840 
3841 	if (!tx_clean_complete)
3842 		work_done = budget;
3843 
3844 	/* If budget not fully consumed, exit the polling mode */
3845 	if (work_done < budget) {
3846 		if (likely(adapter->itr_setting & 3))
3847 			e1000_set_itr(adapter);
3848 		napi_complete_done(napi, work_done);
3849 		if (!test_bit(__E1000_DOWN, &adapter->flags))
3850 			e1000_irq_enable(adapter);
3851 	}
3852 
3853 	return work_done;
3854 }
3855 
3856 /**
3857  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3858  * @adapter: board private structure
3859  **/
e1000_clean_tx_irq(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)3860 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3861 			       struct e1000_tx_ring *tx_ring)
3862 {
3863 	struct e1000_hw *hw = &adapter->hw;
3864 	struct net_device *netdev = adapter->netdev;
3865 	struct e1000_tx_desc *tx_desc, *eop_desc;
3866 	struct e1000_tx_buffer *buffer_info;
3867 	unsigned int i, eop;
3868 	unsigned int count = 0;
3869 	unsigned int total_tx_bytes=0, total_tx_packets=0;
3870 	unsigned int bytes_compl = 0, pkts_compl = 0;
3871 
3872 	i = tx_ring->next_to_clean;
3873 	eop = tx_ring->buffer_info[i].next_to_watch;
3874 	eop_desc = E1000_TX_DESC(*tx_ring, eop);
3875 
3876 	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3877 	       (count < tx_ring->count)) {
3878 		bool cleaned = false;
3879 		dma_rmb();	/* read buffer_info after eop_desc */
3880 		for ( ; !cleaned; count++) {
3881 			tx_desc = E1000_TX_DESC(*tx_ring, i);
3882 			buffer_info = &tx_ring->buffer_info[i];
3883 			cleaned = (i == eop);
3884 
3885 			if (cleaned) {
3886 				total_tx_packets += buffer_info->segs;
3887 				total_tx_bytes += buffer_info->bytecount;
3888 				if (buffer_info->skb) {
3889 					bytes_compl += buffer_info->skb->len;
3890 					pkts_compl++;
3891 				}
3892 
3893 			}
3894 			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3895 			tx_desc->upper.data = 0;
3896 
3897 			if (unlikely(++i == tx_ring->count)) i = 0;
3898 		}
3899 
3900 		eop = tx_ring->buffer_info[i].next_to_watch;
3901 		eop_desc = E1000_TX_DESC(*tx_ring, eop);
3902 	}
3903 
3904 	/* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3905 	 * which will reuse the cleaned buffers.
3906 	 */
3907 	smp_store_release(&tx_ring->next_to_clean, i);
3908 
3909 	netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3910 
3911 #define TX_WAKE_THRESHOLD 32
3912 	if (unlikely(count && netif_carrier_ok(netdev) &&
3913 		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3914 		/* Make sure that anybody stopping the queue after this
3915 		 * sees the new next_to_clean.
3916 		 */
3917 		smp_mb();
3918 
3919 		if (netif_queue_stopped(netdev) &&
3920 		    !(test_bit(__E1000_DOWN, &adapter->flags))) {
3921 			netif_wake_queue(netdev);
3922 			++adapter->restart_queue;
3923 		}
3924 	}
3925 
3926 	if (adapter->detect_tx_hung) {
3927 		/* Detect a transmit hang in hardware, this serializes the
3928 		 * check with the clearing of time_stamp and movement of i
3929 		 */
3930 		adapter->detect_tx_hung = false;
3931 		if (tx_ring->buffer_info[eop].time_stamp &&
3932 		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3933 			       (adapter->tx_timeout_factor * HZ)) &&
3934 		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3935 
3936 			/* detected Tx unit hang */
3937 			e_err(drv, "Detected Tx Unit Hang\n"
3938 			      "  Tx Queue             <%lu>\n"
3939 			      "  TDH                  <%x>\n"
3940 			      "  TDT                  <%x>\n"
3941 			      "  next_to_use          <%x>\n"
3942 			      "  next_to_clean        <%x>\n"
3943 			      "buffer_info[next_to_clean]\n"
3944 			      "  time_stamp           <%lx>\n"
3945 			      "  next_to_watch        <%x>\n"
3946 			      "  jiffies              <%lx>\n"
3947 			      "  next_to_watch.status <%x>\n",
3948 				(unsigned long)(tx_ring - adapter->tx_ring),
3949 				readl(hw->hw_addr + tx_ring->tdh),
3950 				readl(hw->hw_addr + tx_ring->tdt),
3951 				tx_ring->next_to_use,
3952 				tx_ring->next_to_clean,
3953 				tx_ring->buffer_info[eop].time_stamp,
3954 				eop,
3955 				jiffies,
3956 				eop_desc->upper.fields.status);
3957 			e1000_dump(adapter);
3958 			netif_stop_queue(netdev);
3959 		}
3960 	}
3961 	adapter->total_tx_bytes += total_tx_bytes;
3962 	adapter->total_tx_packets += total_tx_packets;
3963 	netdev->stats.tx_bytes += total_tx_bytes;
3964 	netdev->stats.tx_packets += total_tx_packets;
3965 	return count < tx_ring->count;
3966 }
3967 
3968 /**
3969  * e1000_rx_checksum - Receive Checksum Offload for 82543
3970  * @adapter:     board private structure
3971  * @status_err:  receive descriptor status and error fields
3972  * @csum:        receive descriptor csum field
3973  * @sk_buff:     socket buffer with received data
3974  **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,u32 csum,struct sk_buff * skb)3975 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3976 			      u32 csum, struct sk_buff *skb)
3977 {
3978 	struct e1000_hw *hw = &adapter->hw;
3979 	u16 status = (u16)status_err;
3980 	u8 errors = (u8)(status_err >> 24);
3981 
3982 	skb_checksum_none_assert(skb);
3983 
3984 	/* 82543 or newer only */
3985 	if (unlikely(hw->mac_type < e1000_82543)) return;
3986 	/* Ignore Checksum bit is set */
3987 	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3988 	/* TCP/UDP checksum error bit is set */
3989 	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3990 		/* let the stack verify checksum errors */
3991 		adapter->hw_csum_err++;
3992 		return;
3993 	}
3994 	/* TCP/UDP Checksum has not been calculated */
3995 	if (!(status & E1000_RXD_STAT_TCPCS))
3996 		return;
3997 
3998 	/* It must be a TCP or UDP packet with a valid checksum */
3999 	if (likely(status & E1000_RXD_STAT_TCPCS)) {
4000 		/* TCP checksum is good */
4001 		skb->ip_summed = CHECKSUM_UNNECESSARY;
4002 	}
4003 	adapter->hw_csum_good++;
4004 }
4005 
4006 /**
4007  * e1000_consume_page - helper function for jumbo Rx path
4008  **/
e1000_consume_page(struct e1000_rx_buffer * bi,struct sk_buff * skb,u16 length)4009 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4010 			       u16 length)
4011 {
4012 	bi->rxbuf.page = NULL;
4013 	skb->len += length;
4014 	skb->data_len += length;
4015 	skb->truesize += PAGE_SIZE;
4016 }
4017 
4018 /**
4019  * e1000_receive_skb - helper function to handle rx indications
4020  * @adapter: board private structure
4021  * @status: descriptor status field as written by hardware
4022  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4023  * @skb: pointer to sk_buff to be indicated to stack
4024  */
e1000_receive_skb(struct e1000_adapter * adapter,u8 status,__le16 vlan,struct sk_buff * skb)4025 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4026 			      __le16 vlan, struct sk_buff *skb)
4027 {
4028 	skb->protocol = eth_type_trans(skb, adapter->netdev);
4029 
4030 	if (status & E1000_RXD_STAT_VP) {
4031 		u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4032 
4033 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4034 	}
4035 	napi_gro_receive(&adapter->napi, skb);
4036 }
4037 
4038 /**
4039  * e1000_tbi_adjust_stats
4040  * @hw: Struct containing variables accessed by shared code
4041  * @frame_len: The length of the frame in question
4042  * @mac_addr: The Ethernet destination address of the frame in question
4043  *
4044  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4045  */
e1000_tbi_adjust_stats(struct e1000_hw * hw,struct e1000_hw_stats * stats,u32 frame_len,const u8 * mac_addr)4046 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4047 				   struct e1000_hw_stats *stats,
4048 				   u32 frame_len, const u8 *mac_addr)
4049 {
4050 	u64 carry_bit;
4051 
4052 	/* First adjust the frame length. */
4053 	frame_len--;
4054 	/* We need to adjust the statistics counters, since the hardware
4055 	 * counters overcount this packet as a CRC error and undercount
4056 	 * the packet as a good packet
4057 	 */
4058 	/* This packet should not be counted as a CRC error. */
4059 	stats->crcerrs--;
4060 	/* This packet does count as a Good Packet Received. */
4061 	stats->gprc++;
4062 
4063 	/* Adjust the Good Octets received counters */
4064 	carry_bit = 0x80000000 & stats->gorcl;
4065 	stats->gorcl += frame_len;
4066 	/* If the high bit of Gorcl (the low 32 bits of the Good Octets
4067 	 * Received Count) was one before the addition,
4068 	 * AND it is zero after, then we lost the carry out,
4069 	 * need to add one to Gorch (Good Octets Received Count High).
4070 	 * This could be simplified if all environments supported
4071 	 * 64-bit integers.
4072 	 */
4073 	if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4074 		stats->gorch++;
4075 	/* Is this a broadcast or multicast?  Check broadcast first,
4076 	 * since the test for a multicast frame will test positive on
4077 	 * a broadcast frame.
4078 	 */
4079 	if (is_broadcast_ether_addr(mac_addr))
4080 		stats->bprc++;
4081 	else if (is_multicast_ether_addr(mac_addr))
4082 		stats->mprc++;
4083 
4084 	if (frame_len == hw->max_frame_size) {
4085 		/* In this case, the hardware has overcounted the number of
4086 		 * oversize frames.
4087 		 */
4088 		if (stats->roc > 0)
4089 			stats->roc--;
4090 	}
4091 
4092 	/* Adjust the bin counters when the extra byte put the frame in the
4093 	 * wrong bin. Remember that the frame_len was adjusted above.
4094 	 */
4095 	if (frame_len == 64) {
4096 		stats->prc64++;
4097 		stats->prc127--;
4098 	} else if (frame_len == 127) {
4099 		stats->prc127++;
4100 		stats->prc255--;
4101 	} else if (frame_len == 255) {
4102 		stats->prc255++;
4103 		stats->prc511--;
4104 	} else if (frame_len == 511) {
4105 		stats->prc511++;
4106 		stats->prc1023--;
4107 	} else if (frame_len == 1023) {
4108 		stats->prc1023++;
4109 		stats->prc1522--;
4110 	} else if (frame_len == 1522) {
4111 		stats->prc1522++;
4112 	}
4113 }
4114 
e1000_tbi_should_accept(struct e1000_adapter * adapter,u8 status,u8 errors,u32 length,const u8 * data)4115 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4116 				    u8 status, u8 errors,
4117 				    u32 length, const u8 *data)
4118 {
4119 	struct e1000_hw *hw = &adapter->hw;
4120 	u8 last_byte = *(data + length - 1);
4121 
4122 	if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4123 		unsigned long irq_flags;
4124 
4125 		spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4126 		e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4127 		spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4128 
4129 		return true;
4130 	}
4131 
4132 	return false;
4133 }
4134 
e1000_alloc_rx_skb(struct e1000_adapter * adapter,unsigned int bufsz)4135 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4136 					  unsigned int bufsz)
4137 {
4138 	struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4139 
4140 	if (unlikely(!skb))
4141 		adapter->alloc_rx_buff_failed++;
4142 	return skb;
4143 }
4144 
4145 /**
4146  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4147  * @adapter: board private structure
4148  * @rx_ring: ring to clean
4149  * @work_done: amount of napi work completed this call
4150  * @work_to_do: max amount of work allowed for this call to do
4151  *
4152  * the return value indicates whether actual cleaning was done, there
4153  * is no guarantee that everything was cleaned
4154  */
e1000_clean_jumbo_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4155 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4156 				     struct e1000_rx_ring *rx_ring,
4157 				     int *work_done, int work_to_do)
4158 {
4159 	struct net_device *netdev = adapter->netdev;
4160 	struct pci_dev *pdev = adapter->pdev;
4161 	struct e1000_rx_desc *rx_desc, *next_rxd;
4162 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4163 	u32 length;
4164 	unsigned int i;
4165 	int cleaned_count = 0;
4166 	bool cleaned = false;
4167 	unsigned int total_rx_bytes=0, total_rx_packets=0;
4168 
4169 	i = rx_ring->next_to_clean;
4170 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4171 	buffer_info = &rx_ring->buffer_info[i];
4172 
4173 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4174 		struct sk_buff *skb;
4175 		u8 status;
4176 
4177 		if (*work_done >= work_to_do)
4178 			break;
4179 		(*work_done)++;
4180 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4181 
4182 		status = rx_desc->status;
4183 
4184 		if (++i == rx_ring->count) i = 0;
4185 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4186 		prefetch(next_rxd);
4187 
4188 		next_buffer = &rx_ring->buffer_info[i];
4189 
4190 		cleaned = true;
4191 		cleaned_count++;
4192 		dma_unmap_page(&pdev->dev, buffer_info->dma,
4193 			       adapter->rx_buffer_len, DMA_FROM_DEVICE);
4194 		buffer_info->dma = 0;
4195 
4196 		length = le16_to_cpu(rx_desc->length);
4197 
4198 		/* errors is only valid for DD + EOP descriptors */
4199 		if (unlikely((status & E1000_RXD_STAT_EOP) &&
4200 		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4201 			u8 *mapped = page_address(buffer_info->rxbuf.page);
4202 
4203 			if (e1000_tbi_should_accept(adapter, status,
4204 						    rx_desc->errors,
4205 						    length, mapped)) {
4206 				length--;
4207 			} else if (netdev->features & NETIF_F_RXALL) {
4208 				goto process_skb;
4209 			} else {
4210 				/* an error means any chain goes out the window
4211 				 * too
4212 				 */
4213 				if (rx_ring->rx_skb_top)
4214 					dev_kfree_skb(rx_ring->rx_skb_top);
4215 				rx_ring->rx_skb_top = NULL;
4216 				goto next_desc;
4217 			}
4218 		}
4219 
4220 #define rxtop rx_ring->rx_skb_top
4221 process_skb:
4222 		if (!(status & E1000_RXD_STAT_EOP)) {
4223 			/* this descriptor is only the beginning (or middle) */
4224 			if (!rxtop) {
4225 				/* this is the beginning of a chain */
4226 				rxtop = napi_get_frags(&adapter->napi);
4227 				if (!rxtop)
4228 					break;
4229 
4230 				skb_fill_page_desc(rxtop, 0,
4231 						   buffer_info->rxbuf.page,
4232 						   0, length);
4233 			} else {
4234 				/* this is the middle of a chain */
4235 				skb_fill_page_desc(rxtop,
4236 				    skb_shinfo(rxtop)->nr_frags,
4237 				    buffer_info->rxbuf.page, 0, length);
4238 			}
4239 			e1000_consume_page(buffer_info, rxtop, length);
4240 			goto next_desc;
4241 		} else {
4242 			if (rxtop) {
4243 				/* end of the chain */
4244 				skb_fill_page_desc(rxtop,
4245 				    skb_shinfo(rxtop)->nr_frags,
4246 				    buffer_info->rxbuf.page, 0, length);
4247 				skb = rxtop;
4248 				rxtop = NULL;
4249 				e1000_consume_page(buffer_info, skb, length);
4250 			} else {
4251 				struct page *p;
4252 				/* no chain, got EOP, this buf is the packet
4253 				 * copybreak to save the put_page/alloc_page
4254 				 */
4255 				p = buffer_info->rxbuf.page;
4256 				if (length <= copybreak) {
4257 					u8 *vaddr;
4258 
4259 					if (likely(!(netdev->features & NETIF_F_RXFCS)))
4260 						length -= 4;
4261 					skb = e1000_alloc_rx_skb(adapter,
4262 								 length);
4263 					if (!skb)
4264 						break;
4265 
4266 					vaddr = kmap_atomic(p);
4267 					memcpy(skb_tail_pointer(skb), vaddr,
4268 					       length);
4269 					kunmap_atomic(vaddr);
4270 					/* re-use the page, so don't erase
4271 					 * buffer_info->rxbuf.page
4272 					 */
4273 					skb_put(skb, length);
4274 					e1000_rx_checksum(adapter,
4275 							  status | rx_desc->errors << 24,
4276 							  le16_to_cpu(rx_desc->csum), skb);
4277 
4278 					total_rx_bytes += skb->len;
4279 					total_rx_packets++;
4280 
4281 					e1000_receive_skb(adapter, status,
4282 							  rx_desc->special, skb);
4283 					goto next_desc;
4284 				} else {
4285 					skb = napi_get_frags(&adapter->napi);
4286 					if (!skb) {
4287 						adapter->alloc_rx_buff_failed++;
4288 						break;
4289 					}
4290 					skb_fill_page_desc(skb, 0, p, 0,
4291 							   length);
4292 					e1000_consume_page(buffer_info, skb,
4293 							   length);
4294 				}
4295 			}
4296 		}
4297 
4298 		/* Receive Checksum Offload XXX recompute due to CRC strip? */
4299 		e1000_rx_checksum(adapter,
4300 				  (u32)(status) |
4301 				  ((u32)(rx_desc->errors) << 24),
4302 				  le16_to_cpu(rx_desc->csum), skb);
4303 
4304 		total_rx_bytes += (skb->len - 4); /* don't count FCS */
4305 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4306 			pskb_trim(skb, skb->len - 4);
4307 		total_rx_packets++;
4308 
4309 		if (status & E1000_RXD_STAT_VP) {
4310 			__le16 vlan = rx_desc->special;
4311 			u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4312 
4313 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4314 		}
4315 
4316 		napi_gro_frags(&adapter->napi);
4317 
4318 next_desc:
4319 		rx_desc->status = 0;
4320 
4321 		/* return some buffers to hardware, one at a time is too slow */
4322 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4323 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4324 			cleaned_count = 0;
4325 		}
4326 
4327 		/* use prefetched values */
4328 		rx_desc = next_rxd;
4329 		buffer_info = next_buffer;
4330 	}
4331 	rx_ring->next_to_clean = i;
4332 
4333 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4334 	if (cleaned_count)
4335 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4336 
4337 	adapter->total_rx_packets += total_rx_packets;
4338 	adapter->total_rx_bytes += total_rx_bytes;
4339 	netdev->stats.rx_bytes += total_rx_bytes;
4340 	netdev->stats.rx_packets += total_rx_packets;
4341 	return cleaned;
4342 }
4343 
4344 /* this should improve performance for small packets with large amounts
4345  * of reassembly being done in the stack
4346  */
e1000_copybreak(struct e1000_adapter * adapter,struct e1000_rx_buffer * buffer_info,u32 length,const void * data)4347 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4348 				       struct e1000_rx_buffer *buffer_info,
4349 				       u32 length, const void *data)
4350 {
4351 	struct sk_buff *skb;
4352 
4353 	if (length > copybreak)
4354 		return NULL;
4355 
4356 	skb = e1000_alloc_rx_skb(adapter, length);
4357 	if (!skb)
4358 		return NULL;
4359 
4360 	dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4361 				length, DMA_FROM_DEVICE);
4362 
4363 	memcpy(skb_put(skb, length), data, length);
4364 
4365 	return skb;
4366 }
4367 
4368 /**
4369  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4370  * @adapter: board private structure
4371  * @rx_ring: ring to clean
4372  * @work_done: amount of napi work completed this call
4373  * @work_to_do: max amount of work allowed for this call to do
4374  */
e1000_clean_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4375 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4376 			       struct e1000_rx_ring *rx_ring,
4377 			       int *work_done, int work_to_do)
4378 {
4379 	struct net_device *netdev = adapter->netdev;
4380 	struct pci_dev *pdev = adapter->pdev;
4381 	struct e1000_rx_desc *rx_desc, *next_rxd;
4382 	struct e1000_rx_buffer *buffer_info, *next_buffer;
4383 	u32 length;
4384 	unsigned int i;
4385 	int cleaned_count = 0;
4386 	bool cleaned = false;
4387 	unsigned int total_rx_bytes=0, total_rx_packets=0;
4388 
4389 	i = rx_ring->next_to_clean;
4390 	rx_desc = E1000_RX_DESC(*rx_ring, i);
4391 	buffer_info = &rx_ring->buffer_info[i];
4392 
4393 	while (rx_desc->status & E1000_RXD_STAT_DD) {
4394 		struct sk_buff *skb;
4395 		u8 *data;
4396 		u8 status;
4397 
4398 		if (*work_done >= work_to_do)
4399 			break;
4400 		(*work_done)++;
4401 		dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4402 
4403 		status = rx_desc->status;
4404 		length = le16_to_cpu(rx_desc->length);
4405 
4406 		data = buffer_info->rxbuf.data;
4407 		prefetch(data);
4408 		skb = e1000_copybreak(adapter, buffer_info, length, data);
4409 		if (!skb) {
4410 			unsigned int frag_len = e1000_frag_len(adapter);
4411 
4412 			skb = build_skb(data - E1000_HEADROOM, frag_len);
4413 			if (!skb) {
4414 				adapter->alloc_rx_buff_failed++;
4415 				break;
4416 			}
4417 
4418 			skb_reserve(skb, E1000_HEADROOM);
4419 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4420 					 adapter->rx_buffer_len,
4421 					 DMA_FROM_DEVICE);
4422 			buffer_info->dma = 0;
4423 			buffer_info->rxbuf.data = NULL;
4424 		}
4425 
4426 		if (++i == rx_ring->count) i = 0;
4427 		next_rxd = E1000_RX_DESC(*rx_ring, i);
4428 		prefetch(next_rxd);
4429 
4430 		next_buffer = &rx_ring->buffer_info[i];
4431 
4432 		cleaned = true;
4433 		cleaned_count++;
4434 
4435 		/* !EOP means multiple descriptors were used to store a single
4436 		 * packet, if thats the case we need to toss it.  In fact, we
4437 		 * to toss every packet with the EOP bit clear and the next
4438 		 * frame that _does_ have the EOP bit set, as it is by
4439 		 * definition only a frame fragment
4440 		 */
4441 		if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4442 			adapter->discarding = true;
4443 
4444 		if (adapter->discarding) {
4445 			/* All receives must fit into a single buffer */
4446 			netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4447 			dev_kfree_skb(skb);
4448 			if (status & E1000_RXD_STAT_EOP)
4449 				adapter->discarding = false;
4450 			goto next_desc;
4451 		}
4452 
4453 		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4454 			if (e1000_tbi_should_accept(adapter, status,
4455 						    rx_desc->errors,
4456 						    length, data)) {
4457 				length--;
4458 			} else if (netdev->features & NETIF_F_RXALL) {
4459 				goto process_skb;
4460 			} else {
4461 				dev_kfree_skb(skb);
4462 				goto next_desc;
4463 			}
4464 		}
4465 
4466 process_skb:
4467 		total_rx_bytes += (length - 4); /* don't count FCS */
4468 		total_rx_packets++;
4469 
4470 		if (likely(!(netdev->features & NETIF_F_RXFCS)))
4471 			/* adjust length to remove Ethernet CRC, this must be
4472 			 * done after the TBI_ACCEPT workaround above
4473 			 */
4474 			length -= 4;
4475 
4476 		if (buffer_info->rxbuf.data == NULL)
4477 			skb_put(skb, length);
4478 		else /* copybreak skb */
4479 			skb_trim(skb, length);
4480 
4481 		/* Receive Checksum Offload */
4482 		e1000_rx_checksum(adapter,
4483 				  (u32)(status) |
4484 				  ((u32)(rx_desc->errors) << 24),
4485 				  le16_to_cpu(rx_desc->csum), skb);
4486 
4487 		e1000_receive_skb(adapter, status, rx_desc->special, skb);
4488 
4489 next_desc:
4490 		rx_desc->status = 0;
4491 
4492 		/* return some buffers to hardware, one at a time is too slow */
4493 		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4494 			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4495 			cleaned_count = 0;
4496 		}
4497 
4498 		/* use prefetched values */
4499 		rx_desc = next_rxd;
4500 		buffer_info = next_buffer;
4501 	}
4502 	rx_ring->next_to_clean = i;
4503 
4504 	cleaned_count = E1000_DESC_UNUSED(rx_ring);
4505 	if (cleaned_count)
4506 		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4507 
4508 	adapter->total_rx_packets += total_rx_packets;
4509 	adapter->total_rx_bytes += total_rx_bytes;
4510 	netdev->stats.rx_bytes += total_rx_bytes;
4511 	netdev->stats.rx_packets += total_rx_packets;
4512 	return cleaned;
4513 }
4514 
4515 /**
4516  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4517  * @adapter: address of board private structure
4518  * @rx_ring: pointer to receive ring structure
4519  * @cleaned_count: number of buffers to allocate this pass
4520  **/
4521 static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4522 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4523 			     struct e1000_rx_ring *rx_ring, int cleaned_count)
4524 {
4525 	struct pci_dev *pdev = adapter->pdev;
4526 	struct e1000_rx_desc *rx_desc;
4527 	struct e1000_rx_buffer *buffer_info;
4528 	unsigned int i;
4529 
4530 	i = rx_ring->next_to_use;
4531 	buffer_info = &rx_ring->buffer_info[i];
4532 
4533 	while (cleaned_count--) {
4534 		/* allocate a new page if necessary */
4535 		if (!buffer_info->rxbuf.page) {
4536 			buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4537 			if (unlikely(!buffer_info->rxbuf.page)) {
4538 				adapter->alloc_rx_buff_failed++;
4539 				break;
4540 			}
4541 		}
4542 
4543 		if (!buffer_info->dma) {
4544 			buffer_info->dma = dma_map_page(&pdev->dev,
4545 							buffer_info->rxbuf.page, 0,
4546 							adapter->rx_buffer_len,
4547 							DMA_FROM_DEVICE);
4548 			if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4549 				put_page(buffer_info->rxbuf.page);
4550 				buffer_info->rxbuf.page = NULL;
4551 				buffer_info->dma = 0;
4552 				adapter->alloc_rx_buff_failed++;
4553 				break;
4554 			}
4555 		}
4556 
4557 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4558 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4559 
4560 		if (unlikely(++i == rx_ring->count))
4561 			i = 0;
4562 		buffer_info = &rx_ring->buffer_info[i];
4563 	}
4564 
4565 	if (likely(rx_ring->next_to_use != i)) {
4566 		rx_ring->next_to_use = i;
4567 		if (unlikely(i-- == 0))
4568 			i = (rx_ring->count - 1);
4569 
4570 		/* Force memory writes to complete before letting h/w
4571 		 * know there are new descriptors to fetch.  (Only
4572 		 * applicable for weak-ordered memory model archs,
4573 		 * such as IA-64).
4574 		 */
4575 		wmb();
4576 		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4577 	}
4578 }
4579 
4580 /**
4581  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4582  * @adapter: address of board private structure
4583  **/
e1000_alloc_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4584 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4585 				   struct e1000_rx_ring *rx_ring,
4586 				   int cleaned_count)
4587 {
4588 	struct e1000_hw *hw = &adapter->hw;
4589 	struct pci_dev *pdev = adapter->pdev;
4590 	struct e1000_rx_desc *rx_desc;
4591 	struct e1000_rx_buffer *buffer_info;
4592 	unsigned int i;
4593 	unsigned int bufsz = adapter->rx_buffer_len;
4594 
4595 	i = rx_ring->next_to_use;
4596 	buffer_info = &rx_ring->buffer_info[i];
4597 
4598 	while (cleaned_count--) {
4599 		void *data;
4600 
4601 		if (buffer_info->rxbuf.data)
4602 			goto skip;
4603 
4604 		data = e1000_alloc_frag(adapter);
4605 		if (!data) {
4606 			/* Better luck next round */
4607 			adapter->alloc_rx_buff_failed++;
4608 			break;
4609 		}
4610 
4611 		/* Fix for errata 23, can't cross 64kB boundary */
4612 		if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4613 			void *olddata = data;
4614 			e_err(rx_err, "skb align check failed: %u bytes at "
4615 			      "%p\n", bufsz, data);
4616 			/* Try again, without freeing the previous */
4617 			data = e1000_alloc_frag(adapter);
4618 			/* Failed allocation, critical failure */
4619 			if (!data) {
4620 				skb_free_frag(olddata);
4621 				adapter->alloc_rx_buff_failed++;
4622 				break;
4623 			}
4624 
4625 			if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4626 				/* give up */
4627 				skb_free_frag(data);
4628 				skb_free_frag(olddata);
4629 				adapter->alloc_rx_buff_failed++;
4630 				break;
4631 			}
4632 
4633 			/* Use new allocation */
4634 			skb_free_frag(olddata);
4635 		}
4636 		buffer_info->dma = dma_map_single(&pdev->dev,
4637 						  data,
4638 						  adapter->rx_buffer_len,
4639 						  DMA_FROM_DEVICE);
4640 		if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4641 			skb_free_frag(data);
4642 			buffer_info->dma = 0;
4643 			adapter->alloc_rx_buff_failed++;
4644 			break;
4645 		}
4646 
4647 		/* XXX if it was allocated cleanly it will never map to a
4648 		 * boundary crossing
4649 		 */
4650 
4651 		/* Fix for errata 23, can't cross 64kB boundary */
4652 		if (!e1000_check_64k_bound(adapter,
4653 					(void *)(unsigned long)buffer_info->dma,
4654 					adapter->rx_buffer_len)) {
4655 			e_err(rx_err, "dma align check failed: %u bytes at "
4656 			      "%p\n", adapter->rx_buffer_len,
4657 			      (void *)(unsigned long)buffer_info->dma);
4658 
4659 			dma_unmap_single(&pdev->dev, buffer_info->dma,
4660 					 adapter->rx_buffer_len,
4661 					 DMA_FROM_DEVICE);
4662 
4663 			skb_free_frag(data);
4664 			buffer_info->rxbuf.data = NULL;
4665 			buffer_info->dma = 0;
4666 
4667 			adapter->alloc_rx_buff_failed++;
4668 			break;
4669 		}
4670 		buffer_info->rxbuf.data = data;
4671  skip:
4672 		rx_desc = E1000_RX_DESC(*rx_ring, i);
4673 		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4674 
4675 		if (unlikely(++i == rx_ring->count))
4676 			i = 0;
4677 		buffer_info = &rx_ring->buffer_info[i];
4678 	}
4679 
4680 	if (likely(rx_ring->next_to_use != i)) {
4681 		rx_ring->next_to_use = i;
4682 		if (unlikely(i-- == 0))
4683 			i = (rx_ring->count - 1);
4684 
4685 		/* Force memory writes to complete before letting h/w
4686 		 * know there are new descriptors to fetch.  (Only
4687 		 * applicable for weak-ordered memory model archs,
4688 		 * such as IA-64).
4689 		 */
4690 		wmb();
4691 		writel(i, hw->hw_addr + rx_ring->rdt);
4692 	}
4693 }
4694 
4695 /**
4696  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4697  * @adapter:
4698  **/
e1000_smartspeed(struct e1000_adapter * adapter)4699 static void e1000_smartspeed(struct e1000_adapter *adapter)
4700 {
4701 	struct e1000_hw *hw = &adapter->hw;
4702 	u16 phy_status;
4703 	u16 phy_ctrl;
4704 
4705 	if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4706 	   !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4707 		return;
4708 
4709 	if (adapter->smartspeed == 0) {
4710 		/* If Master/Slave config fault is asserted twice,
4711 		 * we assume back-to-back
4712 		 */
4713 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4714 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4715 		e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4716 		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4717 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4718 		if (phy_ctrl & CR_1000T_MS_ENABLE) {
4719 			phy_ctrl &= ~CR_1000T_MS_ENABLE;
4720 			e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4721 					    phy_ctrl);
4722 			adapter->smartspeed++;
4723 			if (!e1000_phy_setup_autoneg(hw) &&
4724 			   !e1000_read_phy_reg(hw, PHY_CTRL,
4725 					       &phy_ctrl)) {
4726 				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4727 					     MII_CR_RESTART_AUTO_NEG);
4728 				e1000_write_phy_reg(hw, PHY_CTRL,
4729 						    phy_ctrl);
4730 			}
4731 		}
4732 		return;
4733 	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4734 		/* If still no link, perhaps using 2/3 pair cable */
4735 		e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4736 		phy_ctrl |= CR_1000T_MS_ENABLE;
4737 		e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4738 		if (!e1000_phy_setup_autoneg(hw) &&
4739 		   !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4740 			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4741 				     MII_CR_RESTART_AUTO_NEG);
4742 			e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4743 		}
4744 	}
4745 	/* Restart process after E1000_SMARTSPEED_MAX iterations */
4746 	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4747 		adapter->smartspeed = 0;
4748 }
4749 
4750 /**
4751  * e1000_ioctl -
4752  * @netdev:
4753  * @ifreq:
4754  * @cmd:
4755  **/
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4756 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4757 {
4758 	switch (cmd) {
4759 	case SIOCGMIIPHY:
4760 	case SIOCGMIIREG:
4761 	case SIOCSMIIREG:
4762 		return e1000_mii_ioctl(netdev, ifr, cmd);
4763 	default:
4764 		return -EOPNOTSUPP;
4765 	}
4766 }
4767 
4768 /**
4769  * e1000_mii_ioctl -
4770  * @netdev:
4771  * @ifreq:
4772  * @cmd:
4773  **/
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4774 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4775 			   int cmd)
4776 {
4777 	struct e1000_adapter *adapter = netdev_priv(netdev);
4778 	struct e1000_hw *hw = &adapter->hw;
4779 	struct mii_ioctl_data *data = if_mii(ifr);
4780 	int retval;
4781 	u16 mii_reg;
4782 	unsigned long flags;
4783 
4784 	if (hw->media_type != e1000_media_type_copper)
4785 		return -EOPNOTSUPP;
4786 
4787 	switch (cmd) {
4788 	case SIOCGMIIPHY:
4789 		data->phy_id = hw->phy_addr;
4790 		break;
4791 	case SIOCGMIIREG:
4792 		spin_lock_irqsave(&adapter->stats_lock, flags);
4793 		if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4794 				   &data->val_out)) {
4795 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4796 			return -EIO;
4797 		}
4798 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4799 		break;
4800 	case SIOCSMIIREG:
4801 		if (data->reg_num & ~(0x1F))
4802 			return -EFAULT;
4803 		mii_reg = data->val_in;
4804 		spin_lock_irqsave(&adapter->stats_lock, flags);
4805 		if (e1000_write_phy_reg(hw, data->reg_num,
4806 					mii_reg)) {
4807 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
4808 			return -EIO;
4809 		}
4810 		spin_unlock_irqrestore(&adapter->stats_lock, flags);
4811 		if (hw->media_type == e1000_media_type_copper) {
4812 			switch (data->reg_num) {
4813 			case PHY_CTRL:
4814 				if (mii_reg & MII_CR_POWER_DOWN)
4815 					break;
4816 				if (mii_reg & MII_CR_AUTO_NEG_EN) {
4817 					hw->autoneg = 1;
4818 					hw->autoneg_advertised = 0x2F;
4819 				} else {
4820 					u32 speed;
4821 					if (mii_reg & 0x40)
4822 						speed = SPEED_1000;
4823 					else if (mii_reg & 0x2000)
4824 						speed = SPEED_100;
4825 					else
4826 						speed = SPEED_10;
4827 					retval = e1000_set_spd_dplx(
4828 						adapter, speed,
4829 						((mii_reg & 0x100)
4830 						 ? DUPLEX_FULL :
4831 						 DUPLEX_HALF));
4832 					if (retval)
4833 						return retval;
4834 				}
4835 				if (netif_running(adapter->netdev))
4836 					e1000_reinit_locked(adapter);
4837 				else
4838 					e1000_reset(adapter);
4839 				break;
4840 			case M88E1000_PHY_SPEC_CTRL:
4841 			case M88E1000_EXT_PHY_SPEC_CTRL:
4842 				if (e1000_phy_reset(hw))
4843 					return -EIO;
4844 				break;
4845 			}
4846 		} else {
4847 			switch (data->reg_num) {
4848 			case PHY_CTRL:
4849 				if (mii_reg & MII_CR_POWER_DOWN)
4850 					break;
4851 				if (netif_running(adapter->netdev))
4852 					e1000_reinit_locked(adapter);
4853 				else
4854 					e1000_reset(adapter);
4855 				break;
4856 			}
4857 		}
4858 		break;
4859 	default:
4860 		return -EOPNOTSUPP;
4861 	}
4862 	return E1000_SUCCESS;
4863 }
4864 
e1000_pci_set_mwi(struct e1000_hw * hw)4865 void e1000_pci_set_mwi(struct e1000_hw *hw)
4866 {
4867 	struct e1000_adapter *adapter = hw->back;
4868 	int ret_val = pci_set_mwi(adapter->pdev);
4869 
4870 	if (ret_val)
4871 		e_err(probe, "Error in setting MWI\n");
4872 }
4873 
e1000_pci_clear_mwi(struct e1000_hw * hw)4874 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4875 {
4876 	struct e1000_adapter *adapter = hw->back;
4877 
4878 	pci_clear_mwi(adapter->pdev);
4879 }
4880 
e1000_pcix_get_mmrbc(struct e1000_hw * hw)4881 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4882 {
4883 	struct e1000_adapter *adapter = hw->back;
4884 	return pcix_get_mmrbc(adapter->pdev);
4885 }
4886 
e1000_pcix_set_mmrbc(struct e1000_hw * hw,int mmrbc)4887 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4888 {
4889 	struct e1000_adapter *adapter = hw->back;
4890 	pcix_set_mmrbc(adapter->pdev, mmrbc);
4891 }
4892 
e1000_io_write(struct e1000_hw * hw,unsigned long port,u32 value)4893 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4894 {
4895 	outl(value, port);
4896 }
4897 
e1000_vlan_used(struct e1000_adapter * adapter)4898 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4899 {
4900 	u16 vid;
4901 
4902 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4903 		return true;
4904 	return false;
4905 }
4906 
__e1000_vlan_mode(struct e1000_adapter * adapter,netdev_features_t features)4907 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4908 			      netdev_features_t features)
4909 {
4910 	struct e1000_hw *hw = &adapter->hw;
4911 	u32 ctrl;
4912 
4913 	ctrl = er32(CTRL);
4914 	if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4915 		/* enable VLAN tag insert/strip */
4916 		ctrl |= E1000_CTRL_VME;
4917 	} else {
4918 		/* disable VLAN tag insert/strip */
4919 		ctrl &= ~E1000_CTRL_VME;
4920 	}
4921 	ew32(CTRL, ctrl);
4922 }
e1000_vlan_filter_on_off(struct e1000_adapter * adapter,bool filter_on)4923 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4924 				     bool filter_on)
4925 {
4926 	struct e1000_hw *hw = &adapter->hw;
4927 	u32 rctl;
4928 
4929 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4930 		e1000_irq_disable(adapter);
4931 
4932 	__e1000_vlan_mode(adapter, adapter->netdev->features);
4933 	if (filter_on) {
4934 		/* enable VLAN receive filtering */
4935 		rctl = er32(RCTL);
4936 		rctl &= ~E1000_RCTL_CFIEN;
4937 		if (!(adapter->netdev->flags & IFF_PROMISC))
4938 			rctl |= E1000_RCTL_VFE;
4939 		ew32(RCTL, rctl);
4940 		e1000_update_mng_vlan(adapter);
4941 	} else {
4942 		/* disable VLAN receive filtering */
4943 		rctl = er32(RCTL);
4944 		rctl &= ~E1000_RCTL_VFE;
4945 		ew32(RCTL, rctl);
4946 	}
4947 
4948 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4949 		e1000_irq_enable(adapter);
4950 }
4951 
e1000_vlan_mode(struct net_device * netdev,netdev_features_t features)4952 static void e1000_vlan_mode(struct net_device *netdev,
4953 			    netdev_features_t features)
4954 {
4955 	struct e1000_adapter *adapter = netdev_priv(netdev);
4956 
4957 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4958 		e1000_irq_disable(adapter);
4959 
4960 	__e1000_vlan_mode(adapter, features);
4961 
4962 	if (!test_bit(__E1000_DOWN, &adapter->flags))
4963 		e1000_irq_enable(adapter);
4964 }
4965 
e1000_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)4966 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4967 				 __be16 proto, u16 vid)
4968 {
4969 	struct e1000_adapter *adapter = netdev_priv(netdev);
4970 	struct e1000_hw *hw = &adapter->hw;
4971 	u32 vfta, index;
4972 
4973 	if ((hw->mng_cookie.status &
4974 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4975 	    (vid == adapter->mng_vlan_id))
4976 		return 0;
4977 
4978 	if (!e1000_vlan_used(adapter))
4979 		e1000_vlan_filter_on_off(adapter, true);
4980 
4981 	/* add VID to filter table */
4982 	index = (vid >> 5) & 0x7F;
4983 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4984 	vfta |= (1 << (vid & 0x1F));
4985 	e1000_write_vfta(hw, index, vfta);
4986 
4987 	set_bit(vid, adapter->active_vlans);
4988 
4989 	return 0;
4990 }
4991 
e1000_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)4992 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4993 				  __be16 proto, u16 vid)
4994 {
4995 	struct e1000_adapter *adapter = netdev_priv(netdev);
4996 	struct e1000_hw *hw = &adapter->hw;
4997 	u32 vfta, index;
4998 
4999 	if (!test_bit(__E1000_DOWN, &adapter->flags))
5000 		e1000_irq_disable(adapter);
5001 	if (!test_bit(__E1000_DOWN, &adapter->flags))
5002 		e1000_irq_enable(adapter);
5003 
5004 	/* remove VID from filter table */
5005 	index = (vid >> 5) & 0x7F;
5006 	vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5007 	vfta &= ~(1 << (vid & 0x1F));
5008 	e1000_write_vfta(hw, index, vfta);
5009 
5010 	clear_bit(vid, adapter->active_vlans);
5011 
5012 	if (!e1000_vlan_used(adapter))
5013 		e1000_vlan_filter_on_off(adapter, false);
5014 
5015 	return 0;
5016 }
5017 
e1000_restore_vlan(struct e1000_adapter * adapter)5018 static void e1000_restore_vlan(struct e1000_adapter *adapter)
5019 {
5020 	u16 vid;
5021 
5022 	if (!e1000_vlan_used(adapter))
5023 		return;
5024 
5025 	e1000_vlan_filter_on_off(adapter, true);
5026 	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5027 		e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5028 }
5029 
e1000_set_spd_dplx(struct e1000_adapter * adapter,u32 spd,u8 dplx)5030 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5031 {
5032 	struct e1000_hw *hw = &adapter->hw;
5033 
5034 	hw->autoneg = 0;
5035 
5036 	/* Make sure dplx is at most 1 bit and lsb of speed is not set
5037 	 * for the switch() below to work
5038 	 */
5039 	if ((spd & 1) || (dplx & ~1))
5040 		goto err_inval;
5041 
5042 	/* Fiber NICs only allow 1000 gbps Full duplex */
5043 	if ((hw->media_type == e1000_media_type_fiber) &&
5044 	    spd != SPEED_1000 &&
5045 	    dplx != DUPLEX_FULL)
5046 		goto err_inval;
5047 
5048 	switch (spd + dplx) {
5049 	case SPEED_10 + DUPLEX_HALF:
5050 		hw->forced_speed_duplex = e1000_10_half;
5051 		break;
5052 	case SPEED_10 + DUPLEX_FULL:
5053 		hw->forced_speed_duplex = e1000_10_full;
5054 		break;
5055 	case SPEED_100 + DUPLEX_HALF:
5056 		hw->forced_speed_duplex = e1000_100_half;
5057 		break;
5058 	case SPEED_100 + DUPLEX_FULL:
5059 		hw->forced_speed_duplex = e1000_100_full;
5060 		break;
5061 	case SPEED_1000 + DUPLEX_FULL:
5062 		hw->autoneg = 1;
5063 		hw->autoneg_advertised = ADVERTISE_1000_FULL;
5064 		break;
5065 	case SPEED_1000 + DUPLEX_HALF: /* not supported */
5066 	default:
5067 		goto err_inval;
5068 	}
5069 
5070 	/* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5071 	hw->mdix = AUTO_ALL_MODES;
5072 
5073 	return 0;
5074 
5075 err_inval:
5076 	e_err(probe, "Unsupported Speed/Duplex configuration\n");
5077 	return -EINVAL;
5078 }
5079 
__e1000_shutdown(struct pci_dev * pdev,bool * enable_wake)5080 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5081 {
5082 	struct net_device *netdev = pci_get_drvdata(pdev);
5083 	struct e1000_adapter *adapter = netdev_priv(netdev);
5084 	struct e1000_hw *hw = &adapter->hw;
5085 	u32 ctrl, ctrl_ext, rctl, status;
5086 	u32 wufc = adapter->wol;
5087 #ifdef CONFIG_PM
5088 	int retval = 0;
5089 #endif
5090 
5091 	netif_device_detach(netdev);
5092 
5093 	if (netif_running(netdev)) {
5094 		int count = E1000_CHECK_RESET_COUNT;
5095 
5096 		while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5097 			usleep_range(10000, 20000);
5098 
5099 		WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5100 		e1000_down(adapter);
5101 	}
5102 
5103 #ifdef CONFIG_PM
5104 	retval = pci_save_state(pdev);
5105 	if (retval)
5106 		return retval;
5107 #endif
5108 
5109 	status = er32(STATUS);
5110 	if (status & E1000_STATUS_LU)
5111 		wufc &= ~E1000_WUFC_LNKC;
5112 
5113 	if (wufc) {
5114 		e1000_setup_rctl(adapter);
5115 		e1000_set_rx_mode(netdev);
5116 
5117 		rctl = er32(RCTL);
5118 
5119 		/* turn on all-multi mode if wake on multicast is enabled */
5120 		if (wufc & E1000_WUFC_MC)
5121 			rctl |= E1000_RCTL_MPE;
5122 
5123 		/* enable receives in the hardware */
5124 		ew32(RCTL, rctl | E1000_RCTL_EN);
5125 
5126 		if (hw->mac_type >= e1000_82540) {
5127 			ctrl = er32(CTRL);
5128 			/* advertise wake from D3Cold */
5129 			#define E1000_CTRL_ADVD3WUC 0x00100000
5130 			/* phy power management enable */
5131 			#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5132 			ctrl |= E1000_CTRL_ADVD3WUC |
5133 				E1000_CTRL_EN_PHY_PWR_MGMT;
5134 			ew32(CTRL, ctrl);
5135 		}
5136 
5137 		if (hw->media_type == e1000_media_type_fiber ||
5138 		    hw->media_type == e1000_media_type_internal_serdes) {
5139 			/* keep the laser running in D3 */
5140 			ctrl_ext = er32(CTRL_EXT);
5141 			ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5142 			ew32(CTRL_EXT, ctrl_ext);
5143 		}
5144 
5145 		ew32(WUC, E1000_WUC_PME_EN);
5146 		ew32(WUFC, wufc);
5147 	} else {
5148 		ew32(WUC, 0);
5149 		ew32(WUFC, 0);
5150 	}
5151 
5152 	e1000_release_manageability(adapter);
5153 
5154 	*enable_wake = !!wufc;
5155 
5156 	/* make sure adapter isn't asleep if manageability is enabled */
5157 	if (adapter->en_mng_pt)
5158 		*enable_wake = true;
5159 
5160 	if (netif_running(netdev))
5161 		e1000_free_irq(adapter);
5162 
5163 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5164 		pci_disable_device(pdev);
5165 
5166 	return 0;
5167 }
5168 
5169 #ifdef CONFIG_PM
e1000_suspend(struct pci_dev * pdev,pm_message_t state)5170 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5171 {
5172 	int retval;
5173 	bool wake;
5174 
5175 	retval = __e1000_shutdown(pdev, &wake);
5176 	if (retval)
5177 		return retval;
5178 
5179 	if (wake) {
5180 		pci_prepare_to_sleep(pdev);
5181 	} else {
5182 		pci_wake_from_d3(pdev, false);
5183 		pci_set_power_state(pdev, PCI_D3hot);
5184 	}
5185 
5186 	return 0;
5187 }
5188 
e1000_resume(struct pci_dev * pdev)5189 static int e1000_resume(struct pci_dev *pdev)
5190 {
5191 	struct net_device *netdev = pci_get_drvdata(pdev);
5192 	struct e1000_adapter *adapter = netdev_priv(netdev);
5193 	struct e1000_hw *hw = &adapter->hw;
5194 	u32 err;
5195 
5196 	pci_set_power_state(pdev, PCI_D0);
5197 	pci_restore_state(pdev);
5198 	pci_save_state(pdev);
5199 
5200 	if (adapter->need_ioport)
5201 		err = pci_enable_device(pdev);
5202 	else
5203 		err = pci_enable_device_mem(pdev);
5204 	if (err) {
5205 		pr_err("Cannot enable PCI device from suspend\n");
5206 		return err;
5207 	}
5208 
5209 	/* flush memory to make sure state is correct */
5210 	smp_mb__before_atomic();
5211 	clear_bit(__E1000_DISABLED, &adapter->flags);
5212 	pci_set_master(pdev);
5213 
5214 	pci_enable_wake(pdev, PCI_D3hot, 0);
5215 	pci_enable_wake(pdev, PCI_D3cold, 0);
5216 
5217 	if (netif_running(netdev)) {
5218 		err = e1000_request_irq(adapter);
5219 		if (err)
5220 			return err;
5221 	}
5222 
5223 	e1000_power_up_phy(adapter);
5224 	e1000_reset(adapter);
5225 	ew32(WUS, ~0);
5226 
5227 	e1000_init_manageability(adapter);
5228 
5229 	if (netif_running(netdev))
5230 		e1000_up(adapter);
5231 
5232 	netif_device_attach(netdev);
5233 
5234 	return 0;
5235 }
5236 #endif
5237 
e1000_shutdown(struct pci_dev * pdev)5238 static void e1000_shutdown(struct pci_dev *pdev)
5239 {
5240 	bool wake;
5241 
5242 	__e1000_shutdown(pdev, &wake);
5243 
5244 	if (system_state == SYSTEM_POWER_OFF) {
5245 		pci_wake_from_d3(pdev, wake);
5246 		pci_set_power_state(pdev, PCI_D3hot);
5247 	}
5248 }
5249 
5250 #ifdef CONFIG_NET_POLL_CONTROLLER
5251 /* Polling 'interrupt' - used by things like netconsole to send skbs
5252  * without having to re-enable interrupts. It's not called while
5253  * the interrupt routine is executing.
5254  */
e1000_netpoll(struct net_device * netdev)5255 static void e1000_netpoll(struct net_device *netdev)
5256 {
5257 	struct e1000_adapter *adapter = netdev_priv(netdev);
5258 
5259 	disable_irq(adapter->pdev->irq);
5260 	e1000_intr(adapter->pdev->irq, netdev);
5261 	enable_irq(adapter->pdev->irq);
5262 }
5263 #endif
5264 
5265 /**
5266  * e1000_io_error_detected - called when PCI error is detected
5267  * @pdev: Pointer to PCI device
5268  * @state: The current pci connection state
5269  *
5270  * This function is called after a PCI bus error affecting
5271  * this device has been detected.
5272  */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5273 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5274 						pci_channel_state_t state)
5275 {
5276 	struct net_device *netdev = pci_get_drvdata(pdev);
5277 	struct e1000_adapter *adapter = netdev_priv(netdev);
5278 
5279 	netif_device_detach(netdev);
5280 
5281 	if (state == pci_channel_io_perm_failure)
5282 		return PCI_ERS_RESULT_DISCONNECT;
5283 
5284 	if (netif_running(netdev))
5285 		e1000_down(adapter);
5286 
5287 	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5288 		pci_disable_device(pdev);
5289 
5290 	/* Request a slot slot reset. */
5291 	return PCI_ERS_RESULT_NEED_RESET;
5292 }
5293 
5294 /**
5295  * e1000_io_slot_reset - called after the pci bus has been reset.
5296  * @pdev: Pointer to PCI device
5297  *
5298  * Restart the card from scratch, as if from a cold-boot. Implementation
5299  * resembles the first-half of the e1000_resume routine.
5300  */
e1000_io_slot_reset(struct pci_dev * pdev)5301 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5302 {
5303 	struct net_device *netdev = pci_get_drvdata(pdev);
5304 	struct e1000_adapter *adapter = netdev_priv(netdev);
5305 	struct e1000_hw *hw = &adapter->hw;
5306 	int err;
5307 
5308 	if (adapter->need_ioport)
5309 		err = pci_enable_device(pdev);
5310 	else
5311 		err = pci_enable_device_mem(pdev);
5312 	if (err) {
5313 		pr_err("Cannot re-enable PCI device after reset.\n");
5314 		return PCI_ERS_RESULT_DISCONNECT;
5315 	}
5316 
5317 	/* flush memory to make sure state is correct */
5318 	smp_mb__before_atomic();
5319 	clear_bit(__E1000_DISABLED, &adapter->flags);
5320 	pci_set_master(pdev);
5321 
5322 	pci_enable_wake(pdev, PCI_D3hot, 0);
5323 	pci_enable_wake(pdev, PCI_D3cold, 0);
5324 
5325 	e1000_reset(adapter);
5326 	ew32(WUS, ~0);
5327 
5328 	return PCI_ERS_RESULT_RECOVERED;
5329 }
5330 
5331 /**
5332  * e1000_io_resume - called when traffic can start flowing again.
5333  * @pdev: Pointer to PCI device
5334  *
5335  * This callback is called when the error recovery driver tells us that
5336  * its OK to resume normal operation. Implementation resembles the
5337  * second-half of the e1000_resume routine.
5338  */
e1000_io_resume(struct pci_dev * pdev)5339 static void e1000_io_resume(struct pci_dev *pdev)
5340 {
5341 	struct net_device *netdev = pci_get_drvdata(pdev);
5342 	struct e1000_adapter *adapter = netdev_priv(netdev);
5343 
5344 	e1000_init_manageability(adapter);
5345 
5346 	if (netif_running(netdev)) {
5347 		if (e1000_up(adapter)) {
5348 			pr_info("can't bring device back up after reset\n");
5349 			return;
5350 		}
5351 	}
5352 
5353 	netif_device_attach(netdev);
5354 }
5355 
5356 /* e1000_main.c */
5357