1 /*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42 /* e1000_pci_tbl - PCI Device ID Table
43 *
44 * Last entry must be all 0s
45 *
46 * Macro expands to...
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48 */
49 static const struct pci_device_id e1000_pci_tbl[] = {
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87 /* required last entry */
88 {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102 struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104 struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106 struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108 struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 int e1000_open(struct net_device *netdev);
118 int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
134 static struct net_device_stats *e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
e1000_alloc_dummy_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)147 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
149 int cleaned_count)
150 {
151 }
152 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
153 struct e1000_rx_ring *rx_ring,
154 int cleaned_count);
155 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
156 struct e1000_rx_ring *rx_ring,
157 int cleaned_count);
158 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
159 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
160 int cmd);
161 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
162 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
163 static void e1000_tx_timeout(struct net_device *dev);
164 static void e1000_reset_task(struct work_struct *work);
165 static void e1000_smartspeed(struct e1000_adapter *adapter);
166 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
167 struct sk_buff *skb);
168
169 static bool e1000_vlan_used(struct e1000_adapter *adapter);
170 static void e1000_vlan_mode(struct net_device *netdev,
171 netdev_features_t features);
172 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
173 bool filter_on);
174 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
175 __be16 proto, u16 vid);
176 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
177 __be16 proto, u16 vid);
178 static void e1000_restore_vlan(struct e1000_adapter *adapter);
179
180 #ifdef CONFIG_PM
181 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
182 static int e1000_resume(struct pci_dev *pdev);
183 #endif
184 static void e1000_shutdown(struct pci_dev *pdev);
185
186 #ifdef CONFIG_NET_POLL_CONTROLLER
187 /* for netdump / net console */
188 static void e1000_netpoll (struct net_device *netdev);
189 #endif
190
191 #define COPYBREAK_DEFAULT 256
192 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
193 module_param(copybreak, uint, 0644);
194 MODULE_PARM_DESC(copybreak,
195 "Maximum size of packet that is copied to a new buffer on receive");
196
197 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
198 pci_channel_state_t state);
199 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
200 static void e1000_io_resume(struct pci_dev *pdev);
201
202 static const struct pci_error_handlers e1000_err_handler = {
203 .error_detected = e1000_io_error_detected,
204 .slot_reset = e1000_io_slot_reset,
205 .resume = e1000_io_resume,
206 };
207
208 static struct pci_driver e1000_driver = {
209 .name = e1000_driver_name,
210 .id_table = e1000_pci_tbl,
211 .probe = e1000_probe,
212 .remove = e1000_remove,
213 #ifdef CONFIG_PM
214 /* Power Management Hooks */
215 .suspend = e1000_suspend,
216 .resume = e1000_resume,
217 #endif
218 .shutdown = e1000_shutdown,
219 .err_handler = &e1000_err_handler
220 };
221
222 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
223 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_VERSION);
226
227 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
228 static int debug = -1;
229 module_param(debug, int, 0);
230 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
231
232 /**
233 * e1000_get_hw_dev - return device
234 * used by hardware layer to print debugging information
235 *
236 **/
e1000_get_hw_dev(struct e1000_hw * hw)237 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
238 {
239 struct e1000_adapter *adapter = hw->back;
240 return adapter->netdev;
241 }
242
243 /**
244 * e1000_init_module - Driver Registration Routine
245 *
246 * e1000_init_module is the first routine called when the driver is
247 * loaded. All it does is register with the PCI subsystem.
248 **/
e1000_init_module(void)249 static int __init e1000_init_module(void)
250 {
251 int ret;
252 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
253
254 pr_info("%s\n", e1000_copyright);
255
256 ret = pci_register_driver(&e1000_driver);
257 if (copybreak != COPYBREAK_DEFAULT) {
258 if (copybreak == 0)
259 pr_info("copybreak disabled\n");
260 else
261 pr_info("copybreak enabled for "
262 "packets <= %u bytes\n", copybreak);
263 }
264 return ret;
265 }
266
267 module_init(e1000_init_module);
268
269 /**
270 * e1000_exit_module - Driver Exit Cleanup Routine
271 *
272 * e1000_exit_module is called just before the driver is removed
273 * from memory.
274 **/
e1000_exit_module(void)275 static void __exit e1000_exit_module(void)
276 {
277 pci_unregister_driver(&e1000_driver);
278 }
279
280 module_exit(e1000_exit_module);
281
e1000_request_irq(struct e1000_adapter * adapter)282 static int e1000_request_irq(struct e1000_adapter *adapter)
283 {
284 struct net_device *netdev = adapter->netdev;
285 irq_handler_t handler = e1000_intr;
286 int irq_flags = IRQF_SHARED;
287 int err;
288
289 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
290 netdev);
291 if (err) {
292 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
293 }
294
295 return err;
296 }
297
e1000_free_irq(struct e1000_adapter * adapter)298 static void e1000_free_irq(struct e1000_adapter *adapter)
299 {
300 struct net_device *netdev = adapter->netdev;
301
302 free_irq(adapter->pdev->irq, netdev);
303 }
304
305 /**
306 * e1000_irq_disable - Mask off interrupt generation on the NIC
307 * @adapter: board private structure
308 **/
e1000_irq_disable(struct e1000_adapter * adapter)309 static void e1000_irq_disable(struct e1000_adapter *adapter)
310 {
311 struct e1000_hw *hw = &adapter->hw;
312
313 ew32(IMC, ~0);
314 E1000_WRITE_FLUSH();
315 synchronize_irq(adapter->pdev->irq);
316 }
317
318 /**
319 * e1000_irq_enable - Enable default interrupt generation settings
320 * @adapter: board private structure
321 **/
e1000_irq_enable(struct e1000_adapter * adapter)322 static void e1000_irq_enable(struct e1000_adapter *adapter)
323 {
324 struct e1000_hw *hw = &adapter->hw;
325
326 ew32(IMS, IMS_ENABLE_MASK);
327 E1000_WRITE_FLUSH();
328 }
329
e1000_update_mng_vlan(struct e1000_adapter * adapter)330 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
331 {
332 struct e1000_hw *hw = &adapter->hw;
333 struct net_device *netdev = adapter->netdev;
334 u16 vid = hw->mng_cookie.vlan_id;
335 u16 old_vid = adapter->mng_vlan_id;
336
337 if (!e1000_vlan_used(adapter))
338 return;
339
340 if (!test_bit(vid, adapter->active_vlans)) {
341 if (hw->mng_cookie.status &
342 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
343 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
344 adapter->mng_vlan_id = vid;
345 } else {
346 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
347 }
348 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
349 (vid != old_vid) &&
350 !test_bit(old_vid, adapter->active_vlans))
351 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
352 old_vid);
353 } else {
354 adapter->mng_vlan_id = vid;
355 }
356 }
357
e1000_init_manageability(struct e1000_adapter * adapter)358 static void e1000_init_manageability(struct e1000_adapter *adapter)
359 {
360 struct e1000_hw *hw = &adapter->hw;
361
362 if (adapter->en_mng_pt) {
363 u32 manc = er32(MANC);
364
365 /* disable hardware interception of ARP */
366 manc &= ~(E1000_MANC_ARP_EN);
367
368 ew32(MANC, manc);
369 }
370 }
371
e1000_release_manageability(struct e1000_adapter * adapter)372 static void e1000_release_manageability(struct e1000_adapter *adapter)
373 {
374 struct e1000_hw *hw = &adapter->hw;
375
376 if (adapter->en_mng_pt) {
377 u32 manc = er32(MANC);
378
379 /* re-enable hardware interception of ARP */
380 manc |= E1000_MANC_ARP_EN;
381
382 ew32(MANC, manc);
383 }
384 }
385
386 /**
387 * e1000_configure - configure the hardware for RX and TX
388 * @adapter = private board structure
389 **/
e1000_configure(struct e1000_adapter * adapter)390 static void e1000_configure(struct e1000_adapter *adapter)
391 {
392 struct net_device *netdev = adapter->netdev;
393 int i;
394
395 e1000_set_rx_mode(netdev);
396
397 e1000_restore_vlan(adapter);
398 e1000_init_manageability(adapter);
399
400 e1000_configure_tx(adapter);
401 e1000_setup_rctl(adapter);
402 e1000_configure_rx(adapter);
403 /* call E1000_DESC_UNUSED which always leaves
404 * at least 1 descriptor unused to make sure
405 * next_to_use != next_to_clean
406 */
407 for (i = 0; i < adapter->num_rx_queues; i++) {
408 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
409 adapter->alloc_rx_buf(adapter, ring,
410 E1000_DESC_UNUSED(ring));
411 }
412 }
413
e1000_up(struct e1000_adapter * adapter)414 int e1000_up(struct e1000_adapter *adapter)
415 {
416 struct e1000_hw *hw = &adapter->hw;
417
418 /* hardware has been reset, we need to reload some things */
419 e1000_configure(adapter);
420
421 clear_bit(__E1000_DOWN, &adapter->flags);
422
423 napi_enable(&adapter->napi);
424
425 e1000_irq_enable(adapter);
426
427 netif_wake_queue(adapter->netdev);
428
429 /* fire a link change interrupt to start the watchdog */
430 ew32(ICS, E1000_ICS_LSC);
431 return 0;
432 }
433
434 /**
435 * e1000_power_up_phy - restore link in case the phy was powered down
436 * @adapter: address of board private structure
437 *
438 * The phy may be powered down to save power and turn off link when the
439 * driver is unloaded and wake on lan is not enabled (among others)
440 * *** this routine MUST be followed by a call to e1000_reset ***
441 **/
e1000_power_up_phy(struct e1000_adapter * adapter)442 void e1000_power_up_phy(struct e1000_adapter *adapter)
443 {
444 struct e1000_hw *hw = &adapter->hw;
445 u16 mii_reg = 0;
446
447 /* Just clear the power down bit to wake the phy back up */
448 if (hw->media_type == e1000_media_type_copper) {
449 /* according to the manual, the phy will retain its
450 * settings across a power-down/up cycle
451 */
452 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
453 mii_reg &= ~MII_CR_POWER_DOWN;
454 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
455 }
456 }
457
e1000_power_down_phy(struct e1000_adapter * adapter)458 static void e1000_power_down_phy(struct e1000_adapter *adapter)
459 {
460 struct e1000_hw *hw = &adapter->hw;
461
462 /* Power down the PHY so no link is implied when interface is down *
463 * The PHY cannot be powered down if any of the following is true *
464 * (a) WoL is enabled
465 * (b) AMT is active
466 * (c) SoL/IDER session is active
467 */
468 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
469 hw->media_type == e1000_media_type_copper) {
470 u16 mii_reg = 0;
471
472 switch (hw->mac_type) {
473 case e1000_82540:
474 case e1000_82545:
475 case e1000_82545_rev_3:
476 case e1000_82546:
477 case e1000_ce4100:
478 case e1000_82546_rev_3:
479 case e1000_82541:
480 case e1000_82541_rev_2:
481 case e1000_82547:
482 case e1000_82547_rev_2:
483 if (er32(MANC) & E1000_MANC_SMBUS_EN)
484 goto out;
485 break;
486 default:
487 goto out;
488 }
489 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
490 mii_reg |= MII_CR_POWER_DOWN;
491 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
492 msleep(1);
493 }
494 out:
495 return;
496 }
497
e1000_down_and_stop(struct e1000_adapter * adapter)498 static void e1000_down_and_stop(struct e1000_adapter *adapter)
499 {
500 set_bit(__E1000_DOWN, &adapter->flags);
501
502 cancel_delayed_work_sync(&adapter->watchdog_task);
503
504 /*
505 * Since the watchdog task can reschedule other tasks, we should cancel
506 * it first, otherwise we can run into the situation when a work is
507 * still running after the adapter has been turned down.
508 */
509
510 cancel_delayed_work_sync(&adapter->phy_info_task);
511 cancel_delayed_work_sync(&adapter->fifo_stall_task);
512
513 /* Only kill reset task if adapter is not resetting */
514 if (!test_bit(__E1000_RESETTING, &adapter->flags))
515 cancel_work_sync(&adapter->reset_task);
516 }
517
e1000_down(struct e1000_adapter * adapter)518 void e1000_down(struct e1000_adapter *adapter)
519 {
520 struct e1000_hw *hw = &adapter->hw;
521 struct net_device *netdev = adapter->netdev;
522 u32 rctl, tctl;
523
524 netif_carrier_off(netdev);
525
526 /* disable receives in the hardware */
527 rctl = er32(RCTL);
528 ew32(RCTL, rctl & ~E1000_RCTL_EN);
529 /* flush and sleep below */
530
531 netif_tx_disable(netdev);
532
533 /* disable transmits in the hardware */
534 tctl = er32(TCTL);
535 tctl &= ~E1000_TCTL_EN;
536 ew32(TCTL, tctl);
537 /* flush both disables and wait for them to finish */
538 E1000_WRITE_FLUSH();
539 msleep(10);
540
541 napi_disable(&adapter->napi);
542
543 e1000_irq_disable(adapter);
544
545 /* Setting DOWN must be after irq_disable to prevent
546 * a screaming interrupt. Setting DOWN also prevents
547 * tasks from rescheduling.
548 */
549 e1000_down_and_stop(adapter);
550
551 adapter->link_speed = 0;
552 adapter->link_duplex = 0;
553
554 e1000_reset(adapter);
555 e1000_clean_all_tx_rings(adapter);
556 e1000_clean_all_rx_rings(adapter);
557 }
558
e1000_reinit_locked(struct e1000_adapter * adapter)559 void e1000_reinit_locked(struct e1000_adapter *adapter)
560 {
561 WARN_ON(in_interrupt());
562 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
563 msleep(1);
564 e1000_down(adapter);
565 e1000_up(adapter);
566 clear_bit(__E1000_RESETTING, &adapter->flags);
567 }
568
e1000_reset(struct e1000_adapter * adapter)569 void e1000_reset(struct e1000_adapter *adapter)
570 {
571 struct e1000_hw *hw = &adapter->hw;
572 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
573 bool legacy_pba_adjust = false;
574 u16 hwm;
575
576 /* Repartition Pba for greater than 9k mtu
577 * To take effect CTRL.RST is required.
578 */
579
580 switch (hw->mac_type) {
581 case e1000_82542_rev2_0:
582 case e1000_82542_rev2_1:
583 case e1000_82543:
584 case e1000_82544:
585 case e1000_82540:
586 case e1000_82541:
587 case e1000_82541_rev_2:
588 legacy_pba_adjust = true;
589 pba = E1000_PBA_48K;
590 break;
591 case e1000_82545:
592 case e1000_82545_rev_3:
593 case e1000_82546:
594 case e1000_ce4100:
595 case e1000_82546_rev_3:
596 pba = E1000_PBA_48K;
597 break;
598 case e1000_82547:
599 case e1000_82547_rev_2:
600 legacy_pba_adjust = true;
601 pba = E1000_PBA_30K;
602 break;
603 case e1000_undefined:
604 case e1000_num_macs:
605 break;
606 }
607
608 if (legacy_pba_adjust) {
609 if (hw->max_frame_size > E1000_RXBUFFER_8192)
610 pba -= 8; /* allocate more FIFO for Tx */
611
612 if (hw->mac_type == e1000_82547) {
613 adapter->tx_fifo_head = 0;
614 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
615 adapter->tx_fifo_size =
616 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
617 atomic_set(&adapter->tx_fifo_stall, 0);
618 }
619 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
620 /* adjust PBA for jumbo frames */
621 ew32(PBA, pba);
622
623 /* To maintain wire speed transmits, the Tx FIFO should be
624 * large enough to accommodate two full transmit packets,
625 * rounded up to the next 1KB and expressed in KB. Likewise,
626 * the Rx FIFO should be large enough to accommodate at least
627 * one full receive packet and is similarly rounded up and
628 * expressed in KB.
629 */
630 pba = er32(PBA);
631 /* upper 16 bits has Tx packet buffer allocation size in KB */
632 tx_space = pba >> 16;
633 /* lower 16 bits has Rx packet buffer allocation size in KB */
634 pba &= 0xffff;
635 /* the Tx fifo also stores 16 bytes of information about the Tx
636 * but don't include ethernet FCS because hardware appends it
637 */
638 min_tx_space = (hw->max_frame_size +
639 sizeof(struct e1000_tx_desc) -
640 ETH_FCS_LEN) * 2;
641 min_tx_space = ALIGN(min_tx_space, 1024);
642 min_tx_space >>= 10;
643 /* software strips receive CRC, so leave room for it */
644 min_rx_space = hw->max_frame_size;
645 min_rx_space = ALIGN(min_rx_space, 1024);
646 min_rx_space >>= 10;
647
648 /* If current Tx allocation is less than the min Tx FIFO size,
649 * and the min Tx FIFO size is less than the current Rx FIFO
650 * allocation, take space away from current Rx allocation
651 */
652 if (tx_space < min_tx_space &&
653 ((min_tx_space - tx_space) < pba)) {
654 pba = pba - (min_tx_space - tx_space);
655
656 /* PCI/PCIx hardware has PBA alignment constraints */
657 switch (hw->mac_type) {
658 case e1000_82545 ... e1000_82546_rev_3:
659 pba &= ~(E1000_PBA_8K - 1);
660 break;
661 default:
662 break;
663 }
664
665 /* if short on Rx space, Rx wins and must trump Tx
666 * adjustment or use Early Receive if available
667 */
668 if (pba < min_rx_space)
669 pba = min_rx_space;
670 }
671 }
672
673 ew32(PBA, pba);
674
675 /* flow control settings:
676 * The high water mark must be low enough to fit one full frame
677 * (or the size used for early receive) above it in the Rx FIFO.
678 * Set it to the lower of:
679 * - 90% of the Rx FIFO size, and
680 * - the full Rx FIFO size minus the early receive size (for parts
681 * with ERT support assuming ERT set to E1000_ERT_2048), or
682 * - the full Rx FIFO size minus one full frame
683 */
684 hwm = min(((pba << 10) * 9 / 10),
685 ((pba << 10) - hw->max_frame_size));
686
687 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
688 hw->fc_low_water = hw->fc_high_water - 8;
689 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
690 hw->fc_send_xon = 1;
691 hw->fc = hw->original_fc;
692
693 /* Allow time for pending master requests to run */
694 e1000_reset_hw(hw);
695 if (hw->mac_type >= e1000_82544)
696 ew32(WUC, 0);
697
698 if (e1000_init_hw(hw))
699 e_dev_err("Hardware Error\n");
700 e1000_update_mng_vlan(adapter);
701
702 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
703 if (hw->mac_type >= e1000_82544 &&
704 hw->autoneg == 1 &&
705 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
706 u32 ctrl = er32(CTRL);
707 /* clear phy power management bit if we are in gig only mode,
708 * which if enabled will attempt negotiation to 100Mb, which
709 * can cause a loss of link at power off or driver unload
710 */
711 ctrl &= ~E1000_CTRL_SWDPIN3;
712 ew32(CTRL, ctrl);
713 }
714
715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
717
718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
720
721 e1000_release_manageability(adapter);
722 }
723
724 /* Dump the eeprom for users having checksum issues */
e1000_dump_eeprom(struct e1000_adapter * adapter)725 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
726 {
727 struct net_device *netdev = adapter->netdev;
728 struct ethtool_eeprom eeprom;
729 const struct ethtool_ops *ops = netdev->ethtool_ops;
730 u8 *data;
731 int i;
732 u16 csum_old, csum_new = 0;
733
734 eeprom.len = ops->get_eeprom_len(netdev);
735 eeprom.offset = 0;
736
737 data = kmalloc(eeprom.len, GFP_KERNEL);
738 if (!data)
739 return;
740
741 ops->get_eeprom(netdev, &eeprom, data);
742
743 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746 csum_new += data[i] + (data[i + 1] << 8);
747 csum_new = EEPROM_SUM - csum_new;
748
749 pr_err("/*********************/\n");
750 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err("Calculated : 0x%04x\n", csum_new);
752
753 pr_err("Offset Values\n");
754 pr_err("======== ======\n");
755 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
756
757 pr_err("Include this output when contacting your support provider.\n");
758 pr_err("This is not a software error! Something bad happened to\n");
759 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err("result in further problems, possibly loss of data,\n");
761 pr_err("corruption or system hangs!\n");
762 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err("which is invalid and requires you to set the proper MAC\n");
764 pr_err("address manually before continuing to enable this network\n");
765 pr_err("device. Please inspect the EEPROM dump and report the\n");
766 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err("/*********************/\n");
768
769 kfree(data);
770 }
771
772 /**
773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
774 * @pdev: PCI device information struct
775 *
776 * Return true if an adapter needs ioport resources
777 **/
e1000_is_need_ioport(struct pci_dev * pdev)778 static int e1000_is_need_ioport(struct pci_dev *pdev)
779 {
780 switch (pdev->device) {
781 case E1000_DEV_ID_82540EM:
782 case E1000_DEV_ID_82540EM_LOM:
783 case E1000_DEV_ID_82540EP:
784 case E1000_DEV_ID_82540EP_LOM:
785 case E1000_DEV_ID_82540EP_LP:
786 case E1000_DEV_ID_82541EI:
787 case E1000_DEV_ID_82541EI_MOBILE:
788 case E1000_DEV_ID_82541ER:
789 case E1000_DEV_ID_82541ER_LOM:
790 case E1000_DEV_ID_82541GI:
791 case E1000_DEV_ID_82541GI_LF:
792 case E1000_DEV_ID_82541GI_MOBILE:
793 case E1000_DEV_ID_82544EI_COPPER:
794 case E1000_DEV_ID_82544EI_FIBER:
795 case E1000_DEV_ID_82544GC_COPPER:
796 case E1000_DEV_ID_82544GC_LOM:
797 case E1000_DEV_ID_82545EM_COPPER:
798 case E1000_DEV_ID_82545EM_FIBER:
799 case E1000_DEV_ID_82546EB_COPPER:
800 case E1000_DEV_ID_82546EB_FIBER:
801 case E1000_DEV_ID_82546EB_QUAD_COPPER:
802 return true;
803 default:
804 return false;
805 }
806 }
807
e1000_fix_features(struct net_device * netdev,netdev_features_t features)808 static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features)
810 {
811 /* Since there is no support for separate Rx/Tx vlan accel
812 * enable/disable make sure Tx flag is always in same state as Rx.
813 */
814 if (features & NETIF_F_HW_VLAN_CTAG_RX)
815 features |= NETIF_F_HW_VLAN_CTAG_TX;
816 else
817 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
818
819 return features;
820 }
821
e1000_set_features(struct net_device * netdev,netdev_features_t features)822 static int e1000_set_features(struct net_device *netdev,
823 netdev_features_t features)
824 {
825 struct e1000_adapter *adapter = netdev_priv(netdev);
826 netdev_features_t changed = features ^ netdev->features;
827
828 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
829 e1000_vlan_mode(netdev, features);
830
831 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
832 return 0;
833
834 netdev->features = features;
835 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
836
837 if (netif_running(netdev))
838 e1000_reinit_locked(adapter);
839 else
840 e1000_reset(adapter);
841
842 return 0;
843 }
844
845 static const struct net_device_ops e1000_netdev_ops = {
846 .ndo_open = e1000_open,
847 .ndo_stop = e1000_close,
848 .ndo_start_xmit = e1000_xmit_frame,
849 .ndo_get_stats = e1000_get_stats,
850 .ndo_set_rx_mode = e1000_set_rx_mode,
851 .ndo_set_mac_address = e1000_set_mac,
852 .ndo_tx_timeout = e1000_tx_timeout,
853 .ndo_change_mtu = e1000_change_mtu,
854 .ndo_do_ioctl = e1000_ioctl,
855 .ndo_validate_addr = eth_validate_addr,
856 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
857 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
858 #ifdef CONFIG_NET_POLL_CONTROLLER
859 .ndo_poll_controller = e1000_netpoll,
860 #endif
861 .ndo_fix_features = e1000_fix_features,
862 .ndo_set_features = e1000_set_features,
863 };
864
865 /**
866 * e1000_init_hw_struct - initialize members of hw struct
867 * @adapter: board private struct
868 * @hw: structure used by e1000_hw.c
869 *
870 * Factors out initialization of the e1000_hw struct to its own function
871 * that can be called very early at init (just after struct allocation).
872 * Fields are initialized based on PCI device information and
873 * OS network device settings (MTU size).
874 * Returns negative error codes if MAC type setup fails.
875 */
e1000_init_hw_struct(struct e1000_adapter * adapter,struct e1000_hw * hw)876 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
877 struct e1000_hw *hw)
878 {
879 struct pci_dev *pdev = adapter->pdev;
880
881 /* PCI config space info */
882 hw->vendor_id = pdev->vendor;
883 hw->device_id = pdev->device;
884 hw->subsystem_vendor_id = pdev->subsystem_vendor;
885 hw->subsystem_id = pdev->subsystem_device;
886 hw->revision_id = pdev->revision;
887
888 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
889
890 hw->max_frame_size = adapter->netdev->mtu +
891 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
892 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
893
894 /* identify the MAC */
895 if (e1000_set_mac_type(hw)) {
896 e_err(probe, "Unknown MAC Type\n");
897 return -EIO;
898 }
899
900 switch (hw->mac_type) {
901 default:
902 break;
903 case e1000_82541:
904 case e1000_82547:
905 case e1000_82541_rev_2:
906 case e1000_82547_rev_2:
907 hw->phy_init_script = 1;
908 break;
909 }
910
911 e1000_set_media_type(hw);
912 e1000_get_bus_info(hw);
913
914 hw->wait_autoneg_complete = false;
915 hw->tbi_compatibility_en = true;
916 hw->adaptive_ifs = true;
917
918 /* Copper options */
919
920 if (hw->media_type == e1000_media_type_copper) {
921 hw->mdix = AUTO_ALL_MODES;
922 hw->disable_polarity_correction = false;
923 hw->master_slave = E1000_MASTER_SLAVE;
924 }
925
926 return 0;
927 }
928
929 /**
930 * e1000_probe - Device Initialization Routine
931 * @pdev: PCI device information struct
932 * @ent: entry in e1000_pci_tbl
933 *
934 * Returns 0 on success, negative on failure
935 *
936 * e1000_probe initializes an adapter identified by a pci_dev structure.
937 * The OS initialization, configuring of the adapter private structure,
938 * and a hardware reset occur.
939 **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)940 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
941 {
942 struct net_device *netdev;
943 struct e1000_adapter *adapter = NULL;
944 struct e1000_hw *hw;
945
946 static int cards_found;
947 static int global_quad_port_a; /* global ksp3 port a indication */
948 int i, err, pci_using_dac;
949 u16 eeprom_data = 0;
950 u16 tmp = 0;
951 u16 eeprom_apme_mask = E1000_EEPROM_APME;
952 int bars, need_ioport;
953 bool disable_dev = false;
954
955 /* do not allocate ioport bars when not needed */
956 need_ioport = e1000_is_need_ioport(pdev);
957 if (need_ioport) {
958 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
959 err = pci_enable_device(pdev);
960 } else {
961 bars = pci_select_bars(pdev, IORESOURCE_MEM);
962 err = pci_enable_device_mem(pdev);
963 }
964 if (err)
965 return err;
966
967 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
968 if (err)
969 goto err_pci_reg;
970
971 pci_set_master(pdev);
972 err = pci_save_state(pdev);
973 if (err)
974 goto err_alloc_etherdev;
975
976 err = -ENOMEM;
977 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
978 if (!netdev)
979 goto err_alloc_etherdev;
980
981 SET_NETDEV_DEV(netdev, &pdev->dev);
982
983 pci_set_drvdata(pdev, netdev);
984 adapter = netdev_priv(netdev);
985 adapter->netdev = netdev;
986 adapter->pdev = pdev;
987 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
988 adapter->bars = bars;
989 adapter->need_ioport = need_ioport;
990
991 hw = &adapter->hw;
992 hw->back = adapter;
993
994 err = -EIO;
995 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
996 if (!hw->hw_addr)
997 goto err_ioremap;
998
999 if (adapter->need_ioport) {
1000 for (i = BAR_1; i <= BAR_5; i++) {
1001 if (pci_resource_len(pdev, i) == 0)
1002 continue;
1003 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1004 hw->io_base = pci_resource_start(pdev, i);
1005 break;
1006 }
1007 }
1008 }
1009
1010 /* make ready for any if (hw->...) below */
1011 err = e1000_init_hw_struct(adapter, hw);
1012 if (err)
1013 goto err_sw_init;
1014
1015 /* there is a workaround being applied below that limits
1016 * 64-bit DMA addresses to 64-bit hardware. There are some
1017 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1018 */
1019 pci_using_dac = 0;
1020 if ((hw->bus_type == e1000_bus_type_pcix) &&
1021 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1022 pci_using_dac = 1;
1023 } else {
1024 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1025 if (err) {
1026 pr_err("No usable DMA config, aborting\n");
1027 goto err_dma;
1028 }
1029 }
1030
1031 netdev->netdev_ops = &e1000_netdev_ops;
1032 e1000_set_ethtool_ops(netdev);
1033 netdev->watchdog_timeo = 5 * HZ;
1034 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1035
1036 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1037
1038 adapter->bd_number = cards_found;
1039
1040 /* setup the private structure */
1041
1042 err = e1000_sw_init(adapter);
1043 if (err)
1044 goto err_sw_init;
1045
1046 err = -EIO;
1047 if (hw->mac_type == e1000_ce4100) {
1048 hw->ce4100_gbe_mdio_base_virt =
1049 ioremap(pci_resource_start(pdev, BAR_1),
1050 pci_resource_len(pdev, BAR_1));
1051
1052 if (!hw->ce4100_gbe_mdio_base_virt)
1053 goto err_mdio_ioremap;
1054 }
1055
1056 if (hw->mac_type >= e1000_82543) {
1057 netdev->hw_features = NETIF_F_SG |
1058 NETIF_F_HW_CSUM |
1059 NETIF_F_HW_VLAN_CTAG_RX;
1060 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1061 NETIF_F_HW_VLAN_CTAG_FILTER;
1062 }
1063
1064 if ((hw->mac_type >= e1000_82544) &&
1065 (hw->mac_type != e1000_82547))
1066 netdev->hw_features |= NETIF_F_TSO;
1067
1068 netdev->priv_flags |= IFF_SUPP_NOFCS;
1069
1070 netdev->features |= netdev->hw_features;
1071 netdev->hw_features |= (NETIF_F_RXCSUM |
1072 NETIF_F_RXALL |
1073 NETIF_F_RXFCS);
1074
1075 if (pci_using_dac) {
1076 netdev->features |= NETIF_F_HIGHDMA;
1077 netdev->vlan_features |= NETIF_F_HIGHDMA;
1078 }
1079
1080 netdev->vlan_features |= (NETIF_F_TSO |
1081 NETIF_F_HW_CSUM |
1082 NETIF_F_SG);
1083
1084 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1085 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1086 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1087 netdev->priv_flags |= IFF_UNICAST_FLT;
1088
1089 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1090
1091 /* initialize eeprom parameters */
1092 if (e1000_init_eeprom_params(hw)) {
1093 e_err(probe, "EEPROM initialization failed\n");
1094 goto err_eeprom;
1095 }
1096
1097 /* before reading the EEPROM, reset the controller to
1098 * put the device in a known good starting state
1099 */
1100
1101 e1000_reset_hw(hw);
1102
1103 /* make sure the EEPROM is good */
1104 if (e1000_validate_eeprom_checksum(hw) < 0) {
1105 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1106 e1000_dump_eeprom(adapter);
1107 /* set MAC address to all zeroes to invalidate and temporary
1108 * disable this device for the user. This blocks regular
1109 * traffic while still permitting ethtool ioctls from reaching
1110 * the hardware as well as allowing the user to run the
1111 * interface after manually setting a hw addr using
1112 * `ip set address`
1113 */
1114 memset(hw->mac_addr, 0, netdev->addr_len);
1115 } else {
1116 /* copy the MAC address out of the EEPROM */
1117 if (e1000_read_mac_addr(hw))
1118 e_err(probe, "EEPROM Read Error\n");
1119 }
1120 /* don't block initialization here due to bad MAC address */
1121 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1122
1123 if (!is_valid_ether_addr(netdev->dev_addr))
1124 e_err(probe, "Invalid MAC Address\n");
1125
1126
1127 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1128 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1129 e1000_82547_tx_fifo_stall_task);
1130 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1131 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1132
1133 e1000_check_options(adapter);
1134
1135 /* Initial Wake on LAN setting
1136 * If APM wake is enabled in the EEPROM,
1137 * enable the ACPI Magic Packet filter
1138 */
1139
1140 switch (hw->mac_type) {
1141 case e1000_82542_rev2_0:
1142 case e1000_82542_rev2_1:
1143 case e1000_82543:
1144 break;
1145 case e1000_82544:
1146 e1000_read_eeprom(hw,
1147 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1148 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1149 break;
1150 case e1000_82546:
1151 case e1000_82546_rev_3:
1152 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1153 e1000_read_eeprom(hw,
1154 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1155 break;
1156 }
1157 /* Fall Through */
1158 default:
1159 e1000_read_eeprom(hw,
1160 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1161 break;
1162 }
1163 if (eeprom_data & eeprom_apme_mask)
1164 adapter->eeprom_wol |= E1000_WUFC_MAG;
1165
1166 /* now that we have the eeprom settings, apply the special cases
1167 * where the eeprom may be wrong or the board simply won't support
1168 * wake on lan on a particular port
1169 */
1170 switch (pdev->device) {
1171 case E1000_DEV_ID_82546GB_PCIE:
1172 adapter->eeprom_wol = 0;
1173 break;
1174 case E1000_DEV_ID_82546EB_FIBER:
1175 case E1000_DEV_ID_82546GB_FIBER:
1176 /* Wake events only supported on port A for dual fiber
1177 * regardless of eeprom setting
1178 */
1179 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1180 adapter->eeprom_wol = 0;
1181 break;
1182 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1183 /* if quad port adapter, disable WoL on all but port A */
1184 if (global_quad_port_a != 0)
1185 adapter->eeprom_wol = 0;
1186 else
1187 adapter->quad_port_a = true;
1188 /* Reset for multiple quad port adapters */
1189 if (++global_quad_port_a == 4)
1190 global_quad_port_a = 0;
1191 break;
1192 }
1193
1194 /* initialize the wol settings based on the eeprom settings */
1195 adapter->wol = adapter->eeprom_wol;
1196 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1197
1198 /* Auto detect PHY address */
1199 if (hw->mac_type == e1000_ce4100) {
1200 for (i = 0; i < 32; i++) {
1201 hw->phy_addr = i;
1202 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1203
1204 if (tmp != 0 && tmp != 0xFF)
1205 break;
1206 }
1207
1208 if (i >= 32)
1209 goto err_eeprom;
1210 }
1211
1212 /* reset the hardware with the new settings */
1213 e1000_reset(adapter);
1214
1215 strcpy(netdev->name, "eth%d");
1216 err = register_netdev(netdev);
1217 if (err)
1218 goto err_register;
1219
1220 e1000_vlan_filter_on_off(adapter, false);
1221
1222 /* print bus type/speed/width info */
1223 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1224 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1225 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1226 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1227 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1228 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1229 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1230 netdev->dev_addr);
1231
1232 /* carrier off reporting is important to ethtool even BEFORE open */
1233 netif_carrier_off(netdev);
1234
1235 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1236
1237 cards_found++;
1238 return 0;
1239
1240 err_register:
1241 err_eeprom:
1242 e1000_phy_hw_reset(hw);
1243
1244 if (hw->flash_address)
1245 iounmap(hw->flash_address);
1246 kfree(adapter->tx_ring);
1247 kfree(adapter->rx_ring);
1248 err_dma:
1249 err_sw_init:
1250 err_mdio_ioremap:
1251 iounmap(hw->ce4100_gbe_mdio_base_virt);
1252 iounmap(hw->hw_addr);
1253 err_ioremap:
1254 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1255 free_netdev(netdev);
1256 err_alloc_etherdev:
1257 pci_release_selected_regions(pdev, bars);
1258 err_pci_reg:
1259 if (!adapter || disable_dev)
1260 pci_disable_device(pdev);
1261 return err;
1262 }
1263
1264 /**
1265 * e1000_remove - Device Removal Routine
1266 * @pdev: PCI device information struct
1267 *
1268 * e1000_remove is called by the PCI subsystem to alert the driver
1269 * that it should release a PCI device. That could be caused by a
1270 * Hot-Plug event, or because the driver is going to be removed from
1271 * memory.
1272 **/
e1000_remove(struct pci_dev * pdev)1273 static void e1000_remove(struct pci_dev *pdev)
1274 {
1275 struct net_device *netdev = pci_get_drvdata(pdev);
1276 struct e1000_adapter *adapter = netdev_priv(netdev);
1277 struct e1000_hw *hw = &adapter->hw;
1278 bool disable_dev;
1279
1280 e1000_down_and_stop(adapter);
1281 e1000_release_manageability(adapter);
1282
1283 unregister_netdev(netdev);
1284
1285 e1000_phy_hw_reset(hw);
1286
1287 kfree(adapter->tx_ring);
1288 kfree(adapter->rx_ring);
1289
1290 if (hw->mac_type == e1000_ce4100)
1291 iounmap(hw->ce4100_gbe_mdio_base_virt);
1292 iounmap(hw->hw_addr);
1293 if (hw->flash_address)
1294 iounmap(hw->flash_address);
1295 pci_release_selected_regions(pdev, adapter->bars);
1296
1297 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1298 free_netdev(netdev);
1299
1300 if (disable_dev)
1301 pci_disable_device(pdev);
1302 }
1303
1304 /**
1305 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1306 * @adapter: board private structure to initialize
1307 *
1308 * e1000_sw_init initializes the Adapter private data structure.
1309 * e1000_init_hw_struct MUST be called before this function
1310 **/
e1000_sw_init(struct e1000_adapter * adapter)1311 static int e1000_sw_init(struct e1000_adapter *adapter)
1312 {
1313 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1314
1315 adapter->num_tx_queues = 1;
1316 adapter->num_rx_queues = 1;
1317
1318 if (e1000_alloc_queues(adapter)) {
1319 e_err(probe, "Unable to allocate memory for queues\n");
1320 return -ENOMEM;
1321 }
1322
1323 /* Explicitly disable IRQ since the NIC can be in any state. */
1324 e1000_irq_disable(adapter);
1325
1326 spin_lock_init(&adapter->stats_lock);
1327
1328 set_bit(__E1000_DOWN, &adapter->flags);
1329
1330 return 0;
1331 }
1332
1333 /**
1334 * e1000_alloc_queues - Allocate memory for all rings
1335 * @adapter: board private structure to initialize
1336 *
1337 * We allocate one ring per queue at run-time since we don't know the
1338 * number of queues at compile-time.
1339 **/
e1000_alloc_queues(struct e1000_adapter * adapter)1340 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1341 {
1342 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1343 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1344 if (!adapter->tx_ring)
1345 return -ENOMEM;
1346
1347 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1348 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1349 if (!adapter->rx_ring) {
1350 kfree(adapter->tx_ring);
1351 return -ENOMEM;
1352 }
1353
1354 return E1000_SUCCESS;
1355 }
1356
1357 /**
1358 * e1000_open - Called when a network interface is made active
1359 * @netdev: network interface device structure
1360 *
1361 * Returns 0 on success, negative value on failure
1362 *
1363 * The open entry point is called when a network interface is made
1364 * active by the system (IFF_UP). At this point all resources needed
1365 * for transmit and receive operations are allocated, the interrupt
1366 * handler is registered with the OS, the watchdog task is started,
1367 * and the stack is notified that the interface is ready.
1368 **/
e1000_open(struct net_device * netdev)1369 int e1000_open(struct net_device *netdev)
1370 {
1371 struct e1000_adapter *adapter = netdev_priv(netdev);
1372 struct e1000_hw *hw = &adapter->hw;
1373 int err;
1374
1375 /* disallow open during test */
1376 if (test_bit(__E1000_TESTING, &adapter->flags))
1377 return -EBUSY;
1378
1379 netif_carrier_off(netdev);
1380
1381 /* allocate transmit descriptors */
1382 err = e1000_setup_all_tx_resources(adapter);
1383 if (err)
1384 goto err_setup_tx;
1385
1386 /* allocate receive descriptors */
1387 err = e1000_setup_all_rx_resources(adapter);
1388 if (err)
1389 goto err_setup_rx;
1390
1391 e1000_power_up_phy(adapter);
1392
1393 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1394 if ((hw->mng_cookie.status &
1395 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1396 e1000_update_mng_vlan(adapter);
1397 }
1398
1399 /* before we allocate an interrupt, we must be ready to handle it.
1400 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1401 * as soon as we call pci_request_irq, so we have to setup our
1402 * clean_rx handler before we do so.
1403 */
1404 e1000_configure(adapter);
1405
1406 err = e1000_request_irq(adapter);
1407 if (err)
1408 goto err_req_irq;
1409
1410 /* From here on the code is the same as e1000_up() */
1411 clear_bit(__E1000_DOWN, &adapter->flags);
1412
1413 napi_enable(&adapter->napi);
1414
1415 e1000_irq_enable(adapter);
1416
1417 netif_start_queue(netdev);
1418
1419 /* fire a link status change interrupt to start the watchdog */
1420 ew32(ICS, E1000_ICS_LSC);
1421
1422 return E1000_SUCCESS;
1423
1424 err_req_irq:
1425 e1000_power_down_phy(adapter);
1426 e1000_free_all_rx_resources(adapter);
1427 err_setup_rx:
1428 e1000_free_all_tx_resources(adapter);
1429 err_setup_tx:
1430 e1000_reset(adapter);
1431
1432 return err;
1433 }
1434
1435 /**
1436 * e1000_close - Disables a network interface
1437 * @netdev: network interface device structure
1438 *
1439 * Returns 0, this is not allowed to fail
1440 *
1441 * The close entry point is called when an interface is de-activated
1442 * by the OS. The hardware is still under the drivers control, but
1443 * needs to be disabled. A global MAC reset is issued to stop the
1444 * hardware, and all transmit and receive resources are freed.
1445 **/
e1000_close(struct net_device * netdev)1446 int e1000_close(struct net_device *netdev)
1447 {
1448 struct e1000_adapter *adapter = netdev_priv(netdev);
1449 struct e1000_hw *hw = &adapter->hw;
1450 int count = E1000_CHECK_RESET_COUNT;
1451
1452 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
1453 usleep_range(10000, 20000);
1454
1455 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1456 e1000_down(adapter);
1457 e1000_power_down_phy(adapter);
1458 e1000_free_irq(adapter);
1459
1460 e1000_free_all_tx_resources(adapter);
1461 e1000_free_all_rx_resources(adapter);
1462
1463 /* kill manageability vlan ID if supported, but not if a vlan with
1464 * the same ID is registered on the host OS (let 8021q kill it)
1465 */
1466 if ((hw->mng_cookie.status &
1467 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1468 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1469 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1470 adapter->mng_vlan_id);
1471 }
1472
1473 return 0;
1474 }
1475
1476 /**
1477 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1478 * @adapter: address of board private structure
1479 * @start: address of beginning of memory
1480 * @len: length of memory
1481 **/
e1000_check_64k_bound(struct e1000_adapter * adapter,void * start,unsigned long len)1482 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1483 unsigned long len)
1484 {
1485 struct e1000_hw *hw = &adapter->hw;
1486 unsigned long begin = (unsigned long)start;
1487 unsigned long end = begin + len;
1488
1489 /* First rev 82545 and 82546 need to not allow any memory
1490 * write location to cross 64k boundary due to errata 23
1491 */
1492 if (hw->mac_type == e1000_82545 ||
1493 hw->mac_type == e1000_ce4100 ||
1494 hw->mac_type == e1000_82546) {
1495 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1496 }
1497
1498 return true;
1499 }
1500
1501 /**
1502 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1503 * @adapter: board private structure
1504 * @txdr: tx descriptor ring (for a specific queue) to setup
1505 *
1506 * Return 0 on success, negative on failure
1507 **/
e1000_setup_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * txdr)1508 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1509 struct e1000_tx_ring *txdr)
1510 {
1511 struct pci_dev *pdev = adapter->pdev;
1512 int size;
1513
1514 size = sizeof(struct e1000_tx_buffer) * txdr->count;
1515 txdr->buffer_info = vzalloc(size);
1516 if (!txdr->buffer_info)
1517 return -ENOMEM;
1518
1519 /* round up to nearest 4K */
1520
1521 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1522 txdr->size = ALIGN(txdr->size, 4096);
1523
1524 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1525 GFP_KERNEL);
1526 if (!txdr->desc) {
1527 setup_tx_desc_die:
1528 vfree(txdr->buffer_info);
1529 return -ENOMEM;
1530 }
1531
1532 /* Fix for errata 23, can't cross 64kB boundary */
1533 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1534 void *olddesc = txdr->desc;
1535 dma_addr_t olddma = txdr->dma;
1536 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1537 txdr->size, txdr->desc);
1538 /* Try again, without freeing the previous */
1539 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1540 &txdr->dma, GFP_KERNEL);
1541 /* Failed allocation, critical failure */
1542 if (!txdr->desc) {
1543 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1544 olddma);
1545 goto setup_tx_desc_die;
1546 }
1547
1548 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1549 /* give up */
1550 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1551 txdr->dma);
1552 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1553 olddma);
1554 e_err(probe, "Unable to allocate aligned memory "
1555 "for the transmit descriptor ring\n");
1556 vfree(txdr->buffer_info);
1557 return -ENOMEM;
1558 } else {
1559 /* Free old allocation, new allocation was successful */
1560 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1561 olddma);
1562 }
1563 }
1564 memset(txdr->desc, 0, txdr->size);
1565
1566 txdr->next_to_use = 0;
1567 txdr->next_to_clean = 0;
1568
1569 return 0;
1570 }
1571
1572 /**
1573 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1574 * (Descriptors) for all queues
1575 * @adapter: board private structure
1576 *
1577 * Return 0 on success, negative on failure
1578 **/
e1000_setup_all_tx_resources(struct e1000_adapter * adapter)1579 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1580 {
1581 int i, err = 0;
1582
1583 for (i = 0; i < adapter->num_tx_queues; i++) {
1584 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1585 if (err) {
1586 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1587 for (i-- ; i >= 0; i--)
1588 e1000_free_tx_resources(adapter,
1589 &adapter->tx_ring[i]);
1590 break;
1591 }
1592 }
1593
1594 return err;
1595 }
1596
1597 /**
1598 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1599 * @adapter: board private structure
1600 *
1601 * Configure the Tx unit of the MAC after a reset.
1602 **/
e1000_configure_tx(struct e1000_adapter * adapter)1603 static void e1000_configure_tx(struct e1000_adapter *adapter)
1604 {
1605 u64 tdba;
1606 struct e1000_hw *hw = &adapter->hw;
1607 u32 tdlen, tctl, tipg;
1608 u32 ipgr1, ipgr2;
1609
1610 /* Setup the HW Tx Head and Tail descriptor pointers */
1611
1612 switch (adapter->num_tx_queues) {
1613 case 1:
1614 default:
1615 tdba = adapter->tx_ring[0].dma;
1616 tdlen = adapter->tx_ring[0].count *
1617 sizeof(struct e1000_tx_desc);
1618 ew32(TDLEN, tdlen);
1619 ew32(TDBAH, (tdba >> 32));
1620 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1621 ew32(TDT, 0);
1622 ew32(TDH, 0);
1623 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1624 E1000_TDH : E1000_82542_TDH);
1625 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1626 E1000_TDT : E1000_82542_TDT);
1627 break;
1628 }
1629
1630 /* Set the default values for the Tx Inter Packet Gap timer */
1631 if ((hw->media_type == e1000_media_type_fiber ||
1632 hw->media_type == e1000_media_type_internal_serdes))
1633 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1634 else
1635 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1636
1637 switch (hw->mac_type) {
1638 case e1000_82542_rev2_0:
1639 case e1000_82542_rev2_1:
1640 tipg = DEFAULT_82542_TIPG_IPGT;
1641 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1642 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1643 break;
1644 default:
1645 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1646 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1647 break;
1648 }
1649 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1650 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1651 ew32(TIPG, tipg);
1652
1653 /* Set the Tx Interrupt Delay register */
1654
1655 ew32(TIDV, adapter->tx_int_delay);
1656 if (hw->mac_type >= e1000_82540)
1657 ew32(TADV, adapter->tx_abs_int_delay);
1658
1659 /* Program the Transmit Control Register */
1660
1661 tctl = er32(TCTL);
1662 tctl &= ~E1000_TCTL_CT;
1663 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1664 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1665
1666 e1000_config_collision_dist(hw);
1667
1668 /* Setup Transmit Descriptor Settings for eop descriptor */
1669 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1670
1671 /* only set IDE if we are delaying interrupts using the timers */
1672 if (adapter->tx_int_delay)
1673 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1674
1675 if (hw->mac_type < e1000_82543)
1676 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1677 else
1678 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1679
1680 /* Cache if we're 82544 running in PCI-X because we'll
1681 * need this to apply a workaround later in the send path.
1682 */
1683 if (hw->mac_type == e1000_82544 &&
1684 hw->bus_type == e1000_bus_type_pcix)
1685 adapter->pcix_82544 = true;
1686
1687 ew32(TCTL, tctl);
1688
1689 }
1690
1691 /**
1692 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1693 * @adapter: board private structure
1694 * @rxdr: rx descriptor ring (for a specific queue) to setup
1695 *
1696 * Returns 0 on success, negative on failure
1697 **/
e1000_setup_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rxdr)1698 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1699 struct e1000_rx_ring *rxdr)
1700 {
1701 struct pci_dev *pdev = adapter->pdev;
1702 int size, desc_len;
1703
1704 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1705 rxdr->buffer_info = vzalloc(size);
1706 if (!rxdr->buffer_info)
1707 return -ENOMEM;
1708
1709 desc_len = sizeof(struct e1000_rx_desc);
1710
1711 /* Round up to nearest 4K */
1712
1713 rxdr->size = rxdr->count * desc_len;
1714 rxdr->size = ALIGN(rxdr->size, 4096);
1715
1716 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1717 GFP_KERNEL);
1718 if (!rxdr->desc) {
1719 setup_rx_desc_die:
1720 vfree(rxdr->buffer_info);
1721 return -ENOMEM;
1722 }
1723
1724 /* Fix for errata 23, can't cross 64kB boundary */
1725 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1726 void *olddesc = rxdr->desc;
1727 dma_addr_t olddma = rxdr->dma;
1728 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1729 rxdr->size, rxdr->desc);
1730 /* Try again, without freeing the previous */
1731 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1732 &rxdr->dma, GFP_KERNEL);
1733 /* Failed allocation, critical failure */
1734 if (!rxdr->desc) {
1735 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1736 olddma);
1737 goto setup_rx_desc_die;
1738 }
1739
1740 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1741 /* give up */
1742 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1743 rxdr->dma);
1744 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1745 olddma);
1746 e_err(probe, "Unable to allocate aligned memory for "
1747 "the Rx descriptor ring\n");
1748 goto setup_rx_desc_die;
1749 } else {
1750 /* Free old allocation, new allocation was successful */
1751 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1752 olddma);
1753 }
1754 }
1755 memset(rxdr->desc, 0, rxdr->size);
1756
1757 rxdr->next_to_clean = 0;
1758 rxdr->next_to_use = 0;
1759 rxdr->rx_skb_top = NULL;
1760
1761 return 0;
1762 }
1763
1764 /**
1765 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1766 * (Descriptors) for all queues
1767 * @adapter: board private structure
1768 *
1769 * Return 0 on success, negative on failure
1770 **/
e1000_setup_all_rx_resources(struct e1000_adapter * adapter)1771 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1772 {
1773 int i, err = 0;
1774
1775 for (i = 0; i < adapter->num_rx_queues; i++) {
1776 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1777 if (err) {
1778 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1779 for (i-- ; i >= 0; i--)
1780 e1000_free_rx_resources(adapter,
1781 &adapter->rx_ring[i]);
1782 break;
1783 }
1784 }
1785
1786 return err;
1787 }
1788
1789 /**
1790 * e1000_setup_rctl - configure the receive control registers
1791 * @adapter: Board private structure
1792 **/
e1000_setup_rctl(struct e1000_adapter * adapter)1793 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1794 {
1795 struct e1000_hw *hw = &adapter->hw;
1796 u32 rctl;
1797
1798 rctl = er32(RCTL);
1799
1800 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1801
1802 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1803 E1000_RCTL_RDMTS_HALF |
1804 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1805
1806 if (hw->tbi_compatibility_on == 1)
1807 rctl |= E1000_RCTL_SBP;
1808 else
1809 rctl &= ~E1000_RCTL_SBP;
1810
1811 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1812 rctl &= ~E1000_RCTL_LPE;
1813 else
1814 rctl |= E1000_RCTL_LPE;
1815
1816 /* Setup buffer sizes */
1817 rctl &= ~E1000_RCTL_SZ_4096;
1818 rctl |= E1000_RCTL_BSEX;
1819 switch (adapter->rx_buffer_len) {
1820 case E1000_RXBUFFER_2048:
1821 default:
1822 rctl |= E1000_RCTL_SZ_2048;
1823 rctl &= ~E1000_RCTL_BSEX;
1824 break;
1825 case E1000_RXBUFFER_4096:
1826 rctl |= E1000_RCTL_SZ_4096;
1827 break;
1828 case E1000_RXBUFFER_8192:
1829 rctl |= E1000_RCTL_SZ_8192;
1830 break;
1831 case E1000_RXBUFFER_16384:
1832 rctl |= E1000_RCTL_SZ_16384;
1833 break;
1834 }
1835
1836 /* This is useful for sniffing bad packets. */
1837 if (adapter->netdev->features & NETIF_F_RXALL) {
1838 /* UPE and MPE will be handled by normal PROMISC logic
1839 * in e1000e_set_rx_mode
1840 */
1841 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1842 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1843 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1844
1845 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1846 E1000_RCTL_DPF | /* Allow filtered pause */
1847 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1848 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1849 * and that breaks VLANs.
1850 */
1851 }
1852
1853 ew32(RCTL, rctl);
1854 }
1855
1856 /**
1857 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1858 * @adapter: board private structure
1859 *
1860 * Configure the Rx unit of the MAC after a reset.
1861 **/
e1000_configure_rx(struct e1000_adapter * adapter)1862 static void e1000_configure_rx(struct e1000_adapter *adapter)
1863 {
1864 u64 rdba;
1865 struct e1000_hw *hw = &adapter->hw;
1866 u32 rdlen, rctl, rxcsum;
1867
1868 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1869 rdlen = adapter->rx_ring[0].count *
1870 sizeof(struct e1000_rx_desc);
1871 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1872 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1873 } else {
1874 rdlen = adapter->rx_ring[0].count *
1875 sizeof(struct e1000_rx_desc);
1876 adapter->clean_rx = e1000_clean_rx_irq;
1877 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1878 }
1879
1880 /* disable receives while setting up the descriptors */
1881 rctl = er32(RCTL);
1882 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1883
1884 /* set the Receive Delay Timer Register */
1885 ew32(RDTR, adapter->rx_int_delay);
1886
1887 if (hw->mac_type >= e1000_82540) {
1888 ew32(RADV, adapter->rx_abs_int_delay);
1889 if (adapter->itr_setting != 0)
1890 ew32(ITR, 1000000000 / (adapter->itr * 256));
1891 }
1892
1893 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1894 * the Base and Length of the Rx Descriptor Ring
1895 */
1896 switch (adapter->num_rx_queues) {
1897 case 1:
1898 default:
1899 rdba = adapter->rx_ring[0].dma;
1900 ew32(RDLEN, rdlen);
1901 ew32(RDBAH, (rdba >> 32));
1902 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1903 ew32(RDT, 0);
1904 ew32(RDH, 0);
1905 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1906 E1000_RDH : E1000_82542_RDH);
1907 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1908 E1000_RDT : E1000_82542_RDT);
1909 break;
1910 }
1911
1912 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1913 if (hw->mac_type >= e1000_82543) {
1914 rxcsum = er32(RXCSUM);
1915 if (adapter->rx_csum)
1916 rxcsum |= E1000_RXCSUM_TUOFL;
1917 else
1918 /* don't need to clear IPPCSE as it defaults to 0 */
1919 rxcsum &= ~E1000_RXCSUM_TUOFL;
1920 ew32(RXCSUM, rxcsum);
1921 }
1922
1923 /* Enable Receives */
1924 ew32(RCTL, rctl | E1000_RCTL_EN);
1925 }
1926
1927 /**
1928 * e1000_free_tx_resources - Free Tx Resources per Queue
1929 * @adapter: board private structure
1930 * @tx_ring: Tx descriptor ring for a specific queue
1931 *
1932 * Free all transmit software resources
1933 **/
e1000_free_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1934 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1935 struct e1000_tx_ring *tx_ring)
1936 {
1937 struct pci_dev *pdev = adapter->pdev;
1938
1939 e1000_clean_tx_ring(adapter, tx_ring);
1940
1941 vfree(tx_ring->buffer_info);
1942 tx_ring->buffer_info = NULL;
1943
1944 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1945 tx_ring->dma);
1946
1947 tx_ring->desc = NULL;
1948 }
1949
1950 /**
1951 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1952 * @adapter: board private structure
1953 *
1954 * Free all transmit software resources
1955 **/
e1000_free_all_tx_resources(struct e1000_adapter * adapter)1956 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1957 {
1958 int i;
1959
1960 for (i = 0; i < adapter->num_tx_queues; i++)
1961 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1962 }
1963
1964 static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter * adapter,struct e1000_tx_buffer * buffer_info)1965 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1966 struct e1000_tx_buffer *buffer_info)
1967 {
1968 if (buffer_info->dma) {
1969 if (buffer_info->mapped_as_page)
1970 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1971 buffer_info->length, DMA_TO_DEVICE);
1972 else
1973 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1974 buffer_info->length,
1975 DMA_TO_DEVICE);
1976 buffer_info->dma = 0;
1977 }
1978 if (buffer_info->skb) {
1979 dev_kfree_skb_any(buffer_info->skb);
1980 buffer_info->skb = NULL;
1981 }
1982 buffer_info->time_stamp = 0;
1983 /* buffer_info must be completely set up in the transmit path */
1984 }
1985
1986 /**
1987 * e1000_clean_tx_ring - Free Tx Buffers
1988 * @adapter: board private structure
1989 * @tx_ring: ring to be cleaned
1990 **/
e1000_clean_tx_ring(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1991 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1992 struct e1000_tx_ring *tx_ring)
1993 {
1994 struct e1000_hw *hw = &adapter->hw;
1995 struct e1000_tx_buffer *buffer_info;
1996 unsigned long size;
1997 unsigned int i;
1998
1999 /* Free all the Tx ring sk_buffs */
2000
2001 for (i = 0; i < tx_ring->count; i++) {
2002 buffer_info = &tx_ring->buffer_info[i];
2003 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2004 }
2005
2006 netdev_reset_queue(adapter->netdev);
2007 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2008 memset(tx_ring->buffer_info, 0, size);
2009
2010 /* Zero out the descriptor ring */
2011
2012 memset(tx_ring->desc, 0, tx_ring->size);
2013
2014 tx_ring->next_to_use = 0;
2015 tx_ring->next_to_clean = 0;
2016 tx_ring->last_tx_tso = false;
2017
2018 writel(0, hw->hw_addr + tx_ring->tdh);
2019 writel(0, hw->hw_addr + tx_ring->tdt);
2020 }
2021
2022 /**
2023 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2024 * @adapter: board private structure
2025 **/
e1000_clean_all_tx_rings(struct e1000_adapter * adapter)2026 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2027 {
2028 int i;
2029
2030 for (i = 0; i < adapter->num_tx_queues; i++)
2031 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2032 }
2033
2034 /**
2035 * e1000_free_rx_resources - Free Rx Resources
2036 * @adapter: board private structure
2037 * @rx_ring: ring to clean the resources from
2038 *
2039 * Free all receive software resources
2040 **/
e1000_free_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2041 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2042 struct e1000_rx_ring *rx_ring)
2043 {
2044 struct pci_dev *pdev = adapter->pdev;
2045
2046 e1000_clean_rx_ring(adapter, rx_ring);
2047
2048 vfree(rx_ring->buffer_info);
2049 rx_ring->buffer_info = NULL;
2050
2051 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2052 rx_ring->dma);
2053
2054 rx_ring->desc = NULL;
2055 }
2056
2057 /**
2058 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2059 * @adapter: board private structure
2060 *
2061 * Free all receive software resources
2062 **/
e1000_free_all_rx_resources(struct e1000_adapter * adapter)2063 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2064 {
2065 int i;
2066
2067 for (i = 0; i < adapter->num_rx_queues; i++)
2068 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2069 }
2070
2071 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
e1000_frag_len(const struct e1000_adapter * a)2072 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2073 {
2074 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2075 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2076 }
2077
e1000_alloc_frag(const struct e1000_adapter * a)2078 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2079 {
2080 unsigned int len = e1000_frag_len(a);
2081 u8 *data = netdev_alloc_frag(len);
2082
2083 if (likely(data))
2084 data += E1000_HEADROOM;
2085 return data;
2086 }
2087
2088 /**
2089 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2090 * @adapter: board private structure
2091 * @rx_ring: ring to free buffers from
2092 **/
e1000_clean_rx_ring(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2093 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2094 struct e1000_rx_ring *rx_ring)
2095 {
2096 struct e1000_hw *hw = &adapter->hw;
2097 struct e1000_rx_buffer *buffer_info;
2098 struct pci_dev *pdev = adapter->pdev;
2099 unsigned long size;
2100 unsigned int i;
2101
2102 /* Free all the Rx netfrags */
2103 for (i = 0; i < rx_ring->count; i++) {
2104 buffer_info = &rx_ring->buffer_info[i];
2105 if (adapter->clean_rx == e1000_clean_rx_irq) {
2106 if (buffer_info->dma)
2107 dma_unmap_single(&pdev->dev, buffer_info->dma,
2108 adapter->rx_buffer_len,
2109 DMA_FROM_DEVICE);
2110 if (buffer_info->rxbuf.data) {
2111 skb_free_frag(buffer_info->rxbuf.data);
2112 buffer_info->rxbuf.data = NULL;
2113 }
2114 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2115 if (buffer_info->dma)
2116 dma_unmap_page(&pdev->dev, buffer_info->dma,
2117 adapter->rx_buffer_len,
2118 DMA_FROM_DEVICE);
2119 if (buffer_info->rxbuf.page) {
2120 put_page(buffer_info->rxbuf.page);
2121 buffer_info->rxbuf.page = NULL;
2122 }
2123 }
2124
2125 buffer_info->dma = 0;
2126 }
2127
2128 /* there also may be some cached data from a chained receive */
2129 napi_free_frags(&adapter->napi);
2130 rx_ring->rx_skb_top = NULL;
2131
2132 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2133 memset(rx_ring->buffer_info, 0, size);
2134
2135 /* Zero out the descriptor ring */
2136 memset(rx_ring->desc, 0, rx_ring->size);
2137
2138 rx_ring->next_to_clean = 0;
2139 rx_ring->next_to_use = 0;
2140
2141 writel(0, hw->hw_addr + rx_ring->rdh);
2142 writel(0, hw->hw_addr + rx_ring->rdt);
2143 }
2144
2145 /**
2146 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2147 * @adapter: board private structure
2148 **/
e1000_clean_all_rx_rings(struct e1000_adapter * adapter)2149 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2150 {
2151 int i;
2152
2153 for (i = 0; i < adapter->num_rx_queues; i++)
2154 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2155 }
2156
2157 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2158 * and memory write and invalidate disabled for certain operations
2159 */
e1000_enter_82542_rst(struct e1000_adapter * adapter)2160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2161 {
2162 struct e1000_hw *hw = &adapter->hw;
2163 struct net_device *netdev = adapter->netdev;
2164 u32 rctl;
2165
2166 e1000_pci_clear_mwi(hw);
2167
2168 rctl = er32(RCTL);
2169 rctl |= E1000_RCTL_RST;
2170 ew32(RCTL, rctl);
2171 E1000_WRITE_FLUSH();
2172 mdelay(5);
2173
2174 if (netif_running(netdev))
2175 e1000_clean_all_rx_rings(adapter);
2176 }
2177
e1000_leave_82542_rst(struct e1000_adapter * adapter)2178 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2179 {
2180 struct e1000_hw *hw = &adapter->hw;
2181 struct net_device *netdev = adapter->netdev;
2182 u32 rctl;
2183
2184 rctl = er32(RCTL);
2185 rctl &= ~E1000_RCTL_RST;
2186 ew32(RCTL, rctl);
2187 E1000_WRITE_FLUSH();
2188 mdelay(5);
2189
2190 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2191 e1000_pci_set_mwi(hw);
2192
2193 if (netif_running(netdev)) {
2194 /* No need to loop, because 82542 supports only 1 queue */
2195 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2196 e1000_configure_rx(adapter);
2197 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2198 }
2199 }
2200
2201 /**
2202 * e1000_set_mac - Change the Ethernet Address of the NIC
2203 * @netdev: network interface device structure
2204 * @p: pointer to an address structure
2205 *
2206 * Returns 0 on success, negative on failure
2207 **/
e1000_set_mac(struct net_device * netdev,void * p)2208 static int e1000_set_mac(struct net_device *netdev, void *p)
2209 {
2210 struct e1000_adapter *adapter = netdev_priv(netdev);
2211 struct e1000_hw *hw = &adapter->hw;
2212 struct sockaddr *addr = p;
2213
2214 if (!is_valid_ether_addr(addr->sa_data))
2215 return -EADDRNOTAVAIL;
2216
2217 /* 82542 2.0 needs to be in reset to write receive address registers */
2218
2219 if (hw->mac_type == e1000_82542_rev2_0)
2220 e1000_enter_82542_rst(adapter);
2221
2222 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2223 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2224
2225 e1000_rar_set(hw, hw->mac_addr, 0);
2226
2227 if (hw->mac_type == e1000_82542_rev2_0)
2228 e1000_leave_82542_rst(adapter);
2229
2230 return 0;
2231 }
2232
2233 /**
2234 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2235 * @netdev: network interface device structure
2236 *
2237 * The set_rx_mode entry point is called whenever the unicast or multicast
2238 * address lists or the network interface flags are updated. This routine is
2239 * responsible for configuring the hardware for proper unicast, multicast,
2240 * promiscuous mode, and all-multi behavior.
2241 **/
e1000_set_rx_mode(struct net_device * netdev)2242 static void e1000_set_rx_mode(struct net_device *netdev)
2243 {
2244 struct e1000_adapter *adapter = netdev_priv(netdev);
2245 struct e1000_hw *hw = &adapter->hw;
2246 struct netdev_hw_addr *ha;
2247 bool use_uc = false;
2248 u32 rctl;
2249 u32 hash_value;
2250 int i, rar_entries = E1000_RAR_ENTRIES;
2251 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2252 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2253
2254 if (!mcarray)
2255 return;
2256
2257 /* Check for Promiscuous and All Multicast modes */
2258
2259 rctl = er32(RCTL);
2260
2261 if (netdev->flags & IFF_PROMISC) {
2262 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2263 rctl &= ~E1000_RCTL_VFE;
2264 } else {
2265 if (netdev->flags & IFF_ALLMULTI)
2266 rctl |= E1000_RCTL_MPE;
2267 else
2268 rctl &= ~E1000_RCTL_MPE;
2269 /* Enable VLAN filter if there is a VLAN */
2270 if (e1000_vlan_used(adapter))
2271 rctl |= E1000_RCTL_VFE;
2272 }
2273
2274 if (netdev_uc_count(netdev) > rar_entries - 1) {
2275 rctl |= E1000_RCTL_UPE;
2276 } else if (!(netdev->flags & IFF_PROMISC)) {
2277 rctl &= ~E1000_RCTL_UPE;
2278 use_uc = true;
2279 }
2280
2281 ew32(RCTL, rctl);
2282
2283 /* 82542 2.0 needs to be in reset to write receive address registers */
2284
2285 if (hw->mac_type == e1000_82542_rev2_0)
2286 e1000_enter_82542_rst(adapter);
2287
2288 /* load the first 14 addresses into the exact filters 1-14. Unicast
2289 * addresses take precedence to avoid disabling unicast filtering
2290 * when possible.
2291 *
2292 * RAR 0 is used for the station MAC address
2293 * if there are not 14 addresses, go ahead and clear the filters
2294 */
2295 i = 1;
2296 if (use_uc)
2297 netdev_for_each_uc_addr(ha, netdev) {
2298 if (i == rar_entries)
2299 break;
2300 e1000_rar_set(hw, ha->addr, i++);
2301 }
2302
2303 netdev_for_each_mc_addr(ha, netdev) {
2304 if (i == rar_entries) {
2305 /* load any remaining addresses into the hash table */
2306 u32 hash_reg, hash_bit, mta;
2307 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2308 hash_reg = (hash_value >> 5) & 0x7F;
2309 hash_bit = hash_value & 0x1F;
2310 mta = (1 << hash_bit);
2311 mcarray[hash_reg] |= mta;
2312 } else {
2313 e1000_rar_set(hw, ha->addr, i++);
2314 }
2315 }
2316
2317 for (; i < rar_entries; i++) {
2318 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2319 E1000_WRITE_FLUSH();
2320 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2321 E1000_WRITE_FLUSH();
2322 }
2323
2324 /* write the hash table completely, write from bottom to avoid
2325 * both stupid write combining chipsets, and flushing each write
2326 */
2327 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2328 /* If we are on an 82544 has an errata where writing odd
2329 * offsets overwrites the previous even offset, but writing
2330 * backwards over the range solves the issue by always
2331 * writing the odd offset first
2332 */
2333 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2334 }
2335 E1000_WRITE_FLUSH();
2336
2337 if (hw->mac_type == e1000_82542_rev2_0)
2338 e1000_leave_82542_rst(adapter);
2339
2340 kfree(mcarray);
2341 }
2342
2343 /**
2344 * e1000_update_phy_info_task - get phy info
2345 * @work: work struct contained inside adapter struct
2346 *
2347 * Need to wait a few seconds after link up to get diagnostic information from
2348 * the phy
2349 */
e1000_update_phy_info_task(struct work_struct * work)2350 static void e1000_update_phy_info_task(struct work_struct *work)
2351 {
2352 struct e1000_adapter *adapter = container_of(work,
2353 struct e1000_adapter,
2354 phy_info_task.work);
2355
2356 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2357 }
2358
2359 /**
2360 * e1000_82547_tx_fifo_stall_task - task to complete work
2361 * @work: work struct contained inside adapter struct
2362 **/
e1000_82547_tx_fifo_stall_task(struct work_struct * work)2363 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2364 {
2365 struct e1000_adapter *adapter = container_of(work,
2366 struct e1000_adapter,
2367 fifo_stall_task.work);
2368 struct e1000_hw *hw = &adapter->hw;
2369 struct net_device *netdev = adapter->netdev;
2370 u32 tctl;
2371
2372 if (atomic_read(&adapter->tx_fifo_stall)) {
2373 if ((er32(TDT) == er32(TDH)) &&
2374 (er32(TDFT) == er32(TDFH)) &&
2375 (er32(TDFTS) == er32(TDFHS))) {
2376 tctl = er32(TCTL);
2377 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2378 ew32(TDFT, adapter->tx_head_addr);
2379 ew32(TDFH, adapter->tx_head_addr);
2380 ew32(TDFTS, adapter->tx_head_addr);
2381 ew32(TDFHS, adapter->tx_head_addr);
2382 ew32(TCTL, tctl);
2383 E1000_WRITE_FLUSH();
2384
2385 adapter->tx_fifo_head = 0;
2386 atomic_set(&adapter->tx_fifo_stall, 0);
2387 netif_wake_queue(netdev);
2388 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2389 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2390 }
2391 }
2392 }
2393
e1000_has_link(struct e1000_adapter * adapter)2394 bool e1000_has_link(struct e1000_adapter *adapter)
2395 {
2396 struct e1000_hw *hw = &adapter->hw;
2397 bool link_active = false;
2398
2399 /* get_link_status is set on LSC (link status) interrupt or rx
2400 * sequence error interrupt (except on intel ce4100).
2401 * get_link_status will stay false until the
2402 * e1000_check_for_link establishes link for copper adapters
2403 * ONLY
2404 */
2405 switch (hw->media_type) {
2406 case e1000_media_type_copper:
2407 if (hw->mac_type == e1000_ce4100)
2408 hw->get_link_status = 1;
2409 if (hw->get_link_status) {
2410 e1000_check_for_link(hw);
2411 link_active = !hw->get_link_status;
2412 } else {
2413 link_active = true;
2414 }
2415 break;
2416 case e1000_media_type_fiber:
2417 e1000_check_for_link(hw);
2418 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2419 break;
2420 case e1000_media_type_internal_serdes:
2421 e1000_check_for_link(hw);
2422 link_active = hw->serdes_has_link;
2423 break;
2424 default:
2425 break;
2426 }
2427
2428 return link_active;
2429 }
2430
2431 /**
2432 * e1000_watchdog - work function
2433 * @work: work struct contained inside adapter struct
2434 **/
e1000_watchdog(struct work_struct * work)2435 static void e1000_watchdog(struct work_struct *work)
2436 {
2437 struct e1000_adapter *adapter = container_of(work,
2438 struct e1000_adapter,
2439 watchdog_task.work);
2440 struct e1000_hw *hw = &adapter->hw;
2441 struct net_device *netdev = adapter->netdev;
2442 struct e1000_tx_ring *txdr = adapter->tx_ring;
2443 u32 link, tctl;
2444
2445 link = e1000_has_link(adapter);
2446 if ((netif_carrier_ok(netdev)) && link)
2447 goto link_up;
2448
2449 if (link) {
2450 if (!netif_carrier_ok(netdev)) {
2451 u32 ctrl;
2452 bool txb2b = true;
2453 /* update snapshot of PHY registers on LSC */
2454 e1000_get_speed_and_duplex(hw,
2455 &adapter->link_speed,
2456 &adapter->link_duplex);
2457
2458 ctrl = er32(CTRL);
2459 pr_info("%s NIC Link is Up %d Mbps %s, "
2460 "Flow Control: %s\n",
2461 netdev->name,
2462 adapter->link_speed,
2463 adapter->link_duplex == FULL_DUPLEX ?
2464 "Full Duplex" : "Half Duplex",
2465 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2466 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2467 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2468 E1000_CTRL_TFCE) ? "TX" : "None")));
2469
2470 /* adjust timeout factor according to speed/duplex */
2471 adapter->tx_timeout_factor = 1;
2472 switch (adapter->link_speed) {
2473 case SPEED_10:
2474 txb2b = false;
2475 adapter->tx_timeout_factor = 16;
2476 break;
2477 case SPEED_100:
2478 txb2b = false;
2479 /* maybe add some timeout factor ? */
2480 break;
2481 }
2482
2483 /* enable transmits in the hardware */
2484 tctl = er32(TCTL);
2485 tctl |= E1000_TCTL_EN;
2486 ew32(TCTL, tctl);
2487
2488 netif_carrier_on(netdev);
2489 if (!test_bit(__E1000_DOWN, &adapter->flags))
2490 schedule_delayed_work(&adapter->phy_info_task,
2491 2 * HZ);
2492 adapter->smartspeed = 0;
2493 }
2494 } else {
2495 if (netif_carrier_ok(netdev)) {
2496 adapter->link_speed = 0;
2497 adapter->link_duplex = 0;
2498 pr_info("%s NIC Link is Down\n",
2499 netdev->name);
2500 netif_carrier_off(netdev);
2501
2502 if (!test_bit(__E1000_DOWN, &adapter->flags))
2503 schedule_delayed_work(&adapter->phy_info_task,
2504 2 * HZ);
2505 }
2506
2507 e1000_smartspeed(adapter);
2508 }
2509
2510 link_up:
2511 e1000_update_stats(adapter);
2512
2513 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2514 adapter->tpt_old = adapter->stats.tpt;
2515 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2516 adapter->colc_old = adapter->stats.colc;
2517
2518 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2519 adapter->gorcl_old = adapter->stats.gorcl;
2520 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2521 adapter->gotcl_old = adapter->stats.gotcl;
2522
2523 e1000_update_adaptive(hw);
2524
2525 if (!netif_carrier_ok(netdev)) {
2526 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2527 /* We've lost link, so the controller stops DMA,
2528 * but we've got queued Tx work that's never going
2529 * to get done, so reset controller to flush Tx.
2530 * (Do the reset outside of interrupt context).
2531 */
2532 adapter->tx_timeout_count++;
2533 schedule_work(&adapter->reset_task);
2534 /* exit immediately since reset is imminent */
2535 return;
2536 }
2537 }
2538
2539 /* Simple mode for Interrupt Throttle Rate (ITR) */
2540 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2541 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2542 * Total asymmetrical Tx or Rx gets ITR=8000;
2543 * everyone else is between 2000-8000.
2544 */
2545 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2546 u32 dif = (adapter->gotcl > adapter->gorcl ?
2547 adapter->gotcl - adapter->gorcl :
2548 adapter->gorcl - adapter->gotcl) / 10000;
2549 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2550
2551 ew32(ITR, 1000000000 / (itr * 256));
2552 }
2553
2554 /* Cause software interrupt to ensure rx ring is cleaned */
2555 ew32(ICS, E1000_ICS_RXDMT0);
2556
2557 /* Force detection of hung controller every watchdog period */
2558 adapter->detect_tx_hung = true;
2559
2560 /* Reschedule the task */
2561 if (!test_bit(__E1000_DOWN, &adapter->flags))
2562 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2563 }
2564
2565 enum latency_range {
2566 lowest_latency = 0,
2567 low_latency = 1,
2568 bulk_latency = 2,
2569 latency_invalid = 255
2570 };
2571
2572 /**
2573 * e1000_update_itr - update the dynamic ITR value based on statistics
2574 * @adapter: pointer to adapter
2575 * @itr_setting: current adapter->itr
2576 * @packets: the number of packets during this measurement interval
2577 * @bytes: the number of bytes during this measurement interval
2578 *
2579 * Stores a new ITR value based on packets and byte
2580 * counts during the last interrupt. The advantage of per interrupt
2581 * computation is faster updates and more accurate ITR for the current
2582 * traffic pattern. Constants in this function were computed
2583 * based on theoretical maximum wire speed and thresholds were set based
2584 * on testing data as well as attempting to minimize response time
2585 * while increasing bulk throughput.
2586 * this functionality is controlled by the InterruptThrottleRate module
2587 * parameter (see e1000_param.c)
2588 **/
e1000_update_itr(struct e1000_adapter * adapter,u16 itr_setting,int packets,int bytes)2589 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2590 u16 itr_setting, int packets, int bytes)
2591 {
2592 unsigned int retval = itr_setting;
2593 struct e1000_hw *hw = &adapter->hw;
2594
2595 if (unlikely(hw->mac_type < e1000_82540))
2596 goto update_itr_done;
2597
2598 if (packets == 0)
2599 goto update_itr_done;
2600
2601 switch (itr_setting) {
2602 case lowest_latency:
2603 /* jumbo frames get bulk treatment*/
2604 if (bytes/packets > 8000)
2605 retval = bulk_latency;
2606 else if ((packets < 5) && (bytes > 512))
2607 retval = low_latency;
2608 break;
2609 case low_latency: /* 50 usec aka 20000 ints/s */
2610 if (bytes > 10000) {
2611 /* jumbo frames need bulk latency setting */
2612 if (bytes/packets > 8000)
2613 retval = bulk_latency;
2614 else if ((packets < 10) || ((bytes/packets) > 1200))
2615 retval = bulk_latency;
2616 else if ((packets > 35))
2617 retval = lowest_latency;
2618 } else if (bytes/packets > 2000)
2619 retval = bulk_latency;
2620 else if (packets <= 2 && bytes < 512)
2621 retval = lowest_latency;
2622 break;
2623 case bulk_latency: /* 250 usec aka 4000 ints/s */
2624 if (bytes > 25000) {
2625 if (packets > 35)
2626 retval = low_latency;
2627 } else if (bytes < 6000) {
2628 retval = low_latency;
2629 }
2630 break;
2631 }
2632
2633 update_itr_done:
2634 return retval;
2635 }
2636
e1000_set_itr(struct e1000_adapter * adapter)2637 static void e1000_set_itr(struct e1000_adapter *adapter)
2638 {
2639 struct e1000_hw *hw = &adapter->hw;
2640 u16 current_itr;
2641 u32 new_itr = adapter->itr;
2642
2643 if (unlikely(hw->mac_type < e1000_82540))
2644 return;
2645
2646 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2647 if (unlikely(adapter->link_speed != SPEED_1000)) {
2648 current_itr = 0;
2649 new_itr = 4000;
2650 goto set_itr_now;
2651 }
2652
2653 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2654 adapter->total_tx_packets,
2655 adapter->total_tx_bytes);
2656 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2657 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2658 adapter->tx_itr = low_latency;
2659
2660 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2661 adapter->total_rx_packets,
2662 adapter->total_rx_bytes);
2663 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2664 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2665 adapter->rx_itr = low_latency;
2666
2667 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2668
2669 switch (current_itr) {
2670 /* counts and packets in update_itr are dependent on these numbers */
2671 case lowest_latency:
2672 new_itr = 70000;
2673 break;
2674 case low_latency:
2675 new_itr = 20000; /* aka hwitr = ~200 */
2676 break;
2677 case bulk_latency:
2678 new_itr = 4000;
2679 break;
2680 default:
2681 break;
2682 }
2683
2684 set_itr_now:
2685 if (new_itr != adapter->itr) {
2686 /* this attempts to bias the interrupt rate towards Bulk
2687 * by adding intermediate steps when interrupt rate is
2688 * increasing
2689 */
2690 new_itr = new_itr > adapter->itr ?
2691 min(adapter->itr + (new_itr >> 2), new_itr) :
2692 new_itr;
2693 adapter->itr = new_itr;
2694 ew32(ITR, 1000000000 / (new_itr * 256));
2695 }
2696 }
2697
2698 #define E1000_TX_FLAGS_CSUM 0x00000001
2699 #define E1000_TX_FLAGS_VLAN 0x00000002
2700 #define E1000_TX_FLAGS_TSO 0x00000004
2701 #define E1000_TX_FLAGS_IPV4 0x00000008
2702 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2703 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2704 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2705
e1000_tso(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2706 static int e1000_tso(struct e1000_adapter *adapter,
2707 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2708 __be16 protocol)
2709 {
2710 struct e1000_context_desc *context_desc;
2711 struct e1000_tx_buffer *buffer_info;
2712 unsigned int i;
2713 u32 cmd_length = 0;
2714 u16 ipcse = 0, tucse, mss;
2715 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2716
2717 if (skb_is_gso(skb)) {
2718 int err;
2719
2720 err = skb_cow_head(skb, 0);
2721 if (err < 0)
2722 return err;
2723
2724 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2725 mss = skb_shinfo(skb)->gso_size;
2726 if (protocol == htons(ETH_P_IP)) {
2727 struct iphdr *iph = ip_hdr(skb);
2728 iph->tot_len = 0;
2729 iph->check = 0;
2730 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2731 iph->daddr, 0,
2732 IPPROTO_TCP,
2733 0);
2734 cmd_length = E1000_TXD_CMD_IP;
2735 ipcse = skb_transport_offset(skb) - 1;
2736 } else if (skb_is_gso_v6(skb)) {
2737 ipv6_hdr(skb)->payload_len = 0;
2738 tcp_hdr(skb)->check =
2739 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2740 &ipv6_hdr(skb)->daddr,
2741 0, IPPROTO_TCP, 0);
2742 ipcse = 0;
2743 }
2744 ipcss = skb_network_offset(skb);
2745 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2746 tucss = skb_transport_offset(skb);
2747 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2748 tucse = 0;
2749
2750 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2751 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2752
2753 i = tx_ring->next_to_use;
2754 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2755 buffer_info = &tx_ring->buffer_info[i];
2756
2757 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2758 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2759 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2760 context_desc->upper_setup.tcp_fields.tucss = tucss;
2761 context_desc->upper_setup.tcp_fields.tucso = tucso;
2762 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2763 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2764 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2765 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2766
2767 buffer_info->time_stamp = jiffies;
2768 buffer_info->next_to_watch = i;
2769
2770 if (++i == tx_ring->count)
2771 i = 0;
2772
2773 tx_ring->next_to_use = i;
2774
2775 return true;
2776 }
2777 return false;
2778 }
2779
e1000_tx_csum(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2780 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2781 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2782 __be16 protocol)
2783 {
2784 struct e1000_context_desc *context_desc;
2785 struct e1000_tx_buffer *buffer_info;
2786 unsigned int i;
2787 u8 css;
2788 u32 cmd_len = E1000_TXD_CMD_DEXT;
2789
2790 if (skb->ip_summed != CHECKSUM_PARTIAL)
2791 return false;
2792
2793 switch (protocol) {
2794 case cpu_to_be16(ETH_P_IP):
2795 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2796 cmd_len |= E1000_TXD_CMD_TCP;
2797 break;
2798 case cpu_to_be16(ETH_P_IPV6):
2799 /* XXX not handling all IPV6 headers */
2800 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2801 cmd_len |= E1000_TXD_CMD_TCP;
2802 break;
2803 default:
2804 if (unlikely(net_ratelimit()))
2805 e_warn(drv, "checksum_partial proto=%x!\n",
2806 skb->protocol);
2807 break;
2808 }
2809
2810 css = skb_checksum_start_offset(skb);
2811
2812 i = tx_ring->next_to_use;
2813 buffer_info = &tx_ring->buffer_info[i];
2814 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2815
2816 context_desc->lower_setup.ip_config = 0;
2817 context_desc->upper_setup.tcp_fields.tucss = css;
2818 context_desc->upper_setup.tcp_fields.tucso =
2819 css + skb->csum_offset;
2820 context_desc->upper_setup.tcp_fields.tucse = 0;
2821 context_desc->tcp_seg_setup.data = 0;
2822 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2823
2824 buffer_info->time_stamp = jiffies;
2825 buffer_info->next_to_watch = i;
2826
2827 if (unlikely(++i == tx_ring->count))
2828 i = 0;
2829
2830 tx_ring->next_to_use = i;
2831
2832 return true;
2833 }
2834
2835 #define E1000_MAX_TXD_PWR 12
2836 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2837
e1000_tx_map(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags,unsigned int mss)2838 static int e1000_tx_map(struct e1000_adapter *adapter,
2839 struct e1000_tx_ring *tx_ring,
2840 struct sk_buff *skb, unsigned int first,
2841 unsigned int max_per_txd, unsigned int nr_frags,
2842 unsigned int mss)
2843 {
2844 struct e1000_hw *hw = &adapter->hw;
2845 struct pci_dev *pdev = adapter->pdev;
2846 struct e1000_tx_buffer *buffer_info;
2847 unsigned int len = skb_headlen(skb);
2848 unsigned int offset = 0, size, count = 0, i;
2849 unsigned int f, bytecount, segs;
2850
2851 i = tx_ring->next_to_use;
2852
2853 while (len) {
2854 buffer_info = &tx_ring->buffer_info[i];
2855 size = min(len, max_per_txd);
2856 /* Workaround for Controller erratum --
2857 * descriptor for non-tso packet in a linear SKB that follows a
2858 * tso gets written back prematurely before the data is fully
2859 * DMA'd to the controller
2860 */
2861 if (!skb->data_len && tx_ring->last_tx_tso &&
2862 !skb_is_gso(skb)) {
2863 tx_ring->last_tx_tso = false;
2864 size -= 4;
2865 }
2866
2867 /* Workaround for premature desc write-backs
2868 * in TSO mode. Append 4-byte sentinel desc
2869 */
2870 if (unlikely(mss && !nr_frags && size == len && size > 8))
2871 size -= 4;
2872 /* work-around for errata 10 and it applies
2873 * to all controllers in PCI-X mode
2874 * The fix is to make sure that the first descriptor of a
2875 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2876 */
2877 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2878 (size > 2015) && count == 0))
2879 size = 2015;
2880
2881 /* Workaround for potential 82544 hang in PCI-X. Avoid
2882 * terminating buffers within evenly-aligned dwords.
2883 */
2884 if (unlikely(adapter->pcix_82544 &&
2885 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2886 size > 4))
2887 size -= 4;
2888
2889 buffer_info->length = size;
2890 /* set time_stamp *before* dma to help avoid a possible race */
2891 buffer_info->time_stamp = jiffies;
2892 buffer_info->mapped_as_page = false;
2893 buffer_info->dma = dma_map_single(&pdev->dev,
2894 skb->data + offset,
2895 size, DMA_TO_DEVICE);
2896 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2897 goto dma_error;
2898 buffer_info->next_to_watch = i;
2899
2900 len -= size;
2901 offset += size;
2902 count++;
2903 if (len) {
2904 i++;
2905 if (unlikely(i == tx_ring->count))
2906 i = 0;
2907 }
2908 }
2909
2910 for (f = 0; f < nr_frags; f++) {
2911 const struct skb_frag_struct *frag;
2912
2913 frag = &skb_shinfo(skb)->frags[f];
2914 len = skb_frag_size(frag);
2915 offset = 0;
2916
2917 while (len) {
2918 unsigned long bufend;
2919 i++;
2920 if (unlikely(i == tx_ring->count))
2921 i = 0;
2922
2923 buffer_info = &tx_ring->buffer_info[i];
2924 size = min(len, max_per_txd);
2925 /* Workaround for premature desc write-backs
2926 * in TSO mode. Append 4-byte sentinel desc
2927 */
2928 if (unlikely(mss && f == (nr_frags-1) &&
2929 size == len && size > 8))
2930 size -= 4;
2931 /* Workaround for potential 82544 hang in PCI-X.
2932 * Avoid terminating buffers within evenly-aligned
2933 * dwords.
2934 */
2935 bufend = (unsigned long)
2936 page_to_phys(skb_frag_page(frag));
2937 bufend += offset + size - 1;
2938 if (unlikely(adapter->pcix_82544 &&
2939 !(bufend & 4) &&
2940 size > 4))
2941 size -= 4;
2942
2943 buffer_info->length = size;
2944 buffer_info->time_stamp = jiffies;
2945 buffer_info->mapped_as_page = true;
2946 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2947 offset, size, DMA_TO_DEVICE);
2948 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2949 goto dma_error;
2950 buffer_info->next_to_watch = i;
2951
2952 len -= size;
2953 offset += size;
2954 count++;
2955 }
2956 }
2957
2958 segs = skb_shinfo(skb)->gso_segs ?: 1;
2959 /* multiply data chunks by size of headers */
2960 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2961
2962 tx_ring->buffer_info[i].skb = skb;
2963 tx_ring->buffer_info[i].segs = segs;
2964 tx_ring->buffer_info[i].bytecount = bytecount;
2965 tx_ring->buffer_info[first].next_to_watch = i;
2966
2967 return count;
2968
2969 dma_error:
2970 dev_err(&pdev->dev, "TX DMA map failed\n");
2971 buffer_info->dma = 0;
2972 if (count)
2973 count--;
2974
2975 while (count--) {
2976 if (i == 0)
2977 i += tx_ring->count;
2978 i--;
2979 buffer_info = &tx_ring->buffer_info[i];
2980 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2981 }
2982
2983 return 0;
2984 }
2985
e1000_tx_queue(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,int tx_flags,int count)2986 static void e1000_tx_queue(struct e1000_adapter *adapter,
2987 struct e1000_tx_ring *tx_ring, int tx_flags,
2988 int count)
2989 {
2990 struct e1000_tx_desc *tx_desc = NULL;
2991 struct e1000_tx_buffer *buffer_info;
2992 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2993 unsigned int i;
2994
2995 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2996 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2997 E1000_TXD_CMD_TSE;
2998 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2999
3000 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3001 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3002 }
3003
3004 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3005 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3006 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3007 }
3008
3009 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3010 txd_lower |= E1000_TXD_CMD_VLE;
3011 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3012 }
3013
3014 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3015 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3016
3017 i = tx_ring->next_to_use;
3018
3019 while (count--) {
3020 buffer_info = &tx_ring->buffer_info[i];
3021 tx_desc = E1000_TX_DESC(*tx_ring, i);
3022 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3023 tx_desc->lower.data =
3024 cpu_to_le32(txd_lower | buffer_info->length);
3025 tx_desc->upper.data = cpu_to_le32(txd_upper);
3026 if (unlikely(++i == tx_ring->count))
3027 i = 0;
3028 }
3029
3030 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3031
3032 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3033 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3034 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3035
3036 /* Force memory writes to complete before letting h/w
3037 * know there are new descriptors to fetch. (Only
3038 * applicable for weak-ordered memory model archs,
3039 * such as IA-64).
3040 */
3041 wmb();
3042
3043 tx_ring->next_to_use = i;
3044 }
3045
3046 /* 82547 workaround to avoid controller hang in half-duplex environment.
3047 * The workaround is to avoid queuing a large packet that would span
3048 * the internal Tx FIFO ring boundary by notifying the stack to resend
3049 * the packet at a later time. This gives the Tx FIFO an opportunity to
3050 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3051 * to the beginning of the Tx FIFO.
3052 */
3053
3054 #define E1000_FIFO_HDR 0x10
3055 #define E1000_82547_PAD_LEN 0x3E0
3056
e1000_82547_fifo_workaround(struct e1000_adapter * adapter,struct sk_buff * skb)3057 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3058 struct sk_buff *skb)
3059 {
3060 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3061 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3062
3063 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3064
3065 if (adapter->link_duplex != HALF_DUPLEX)
3066 goto no_fifo_stall_required;
3067
3068 if (atomic_read(&adapter->tx_fifo_stall))
3069 return 1;
3070
3071 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3072 atomic_set(&adapter->tx_fifo_stall, 1);
3073 return 1;
3074 }
3075
3076 no_fifo_stall_required:
3077 adapter->tx_fifo_head += skb_fifo_len;
3078 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3079 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3080 return 0;
3081 }
3082
__e1000_maybe_stop_tx(struct net_device * netdev,int size)3083 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3084 {
3085 struct e1000_adapter *adapter = netdev_priv(netdev);
3086 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3087
3088 netif_stop_queue(netdev);
3089 /* Herbert's original patch had:
3090 * smp_mb__after_netif_stop_queue();
3091 * but since that doesn't exist yet, just open code it.
3092 */
3093 smp_mb();
3094
3095 /* We need to check again in a case another CPU has just
3096 * made room available.
3097 */
3098 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3099 return -EBUSY;
3100
3101 /* A reprieve! */
3102 netif_start_queue(netdev);
3103 ++adapter->restart_queue;
3104 return 0;
3105 }
3106
e1000_maybe_stop_tx(struct net_device * netdev,struct e1000_tx_ring * tx_ring,int size)3107 static int e1000_maybe_stop_tx(struct net_device *netdev,
3108 struct e1000_tx_ring *tx_ring, int size)
3109 {
3110 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3111 return 0;
3112 return __e1000_maybe_stop_tx(netdev, size);
3113 }
3114
3115 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)3116 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3117 struct net_device *netdev)
3118 {
3119 struct e1000_adapter *adapter = netdev_priv(netdev);
3120 struct e1000_hw *hw = &adapter->hw;
3121 struct e1000_tx_ring *tx_ring;
3122 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3123 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3124 unsigned int tx_flags = 0;
3125 unsigned int len = skb_headlen(skb);
3126 unsigned int nr_frags;
3127 unsigned int mss;
3128 int count = 0;
3129 int tso;
3130 unsigned int f;
3131 __be16 protocol = vlan_get_protocol(skb);
3132
3133 /* This goes back to the question of how to logically map a Tx queue
3134 * to a flow. Right now, performance is impacted slightly negatively
3135 * if using multiple Tx queues. If the stack breaks away from a
3136 * single qdisc implementation, we can look at this again.
3137 */
3138 tx_ring = adapter->tx_ring;
3139
3140 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3141 * packets may get corrupted during padding by HW.
3142 * To WA this issue, pad all small packets manually.
3143 */
3144 if (eth_skb_pad(skb))
3145 return NETDEV_TX_OK;
3146
3147 mss = skb_shinfo(skb)->gso_size;
3148 /* The controller does a simple calculation to
3149 * make sure there is enough room in the FIFO before
3150 * initiating the DMA for each buffer. The calc is:
3151 * 4 = ceil(buffer len/mss). To make sure we don't
3152 * overrun the FIFO, adjust the max buffer len if mss
3153 * drops.
3154 */
3155 if (mss) {
3156 u8 hdr_len;
3157 max_per_txd = min(mss << 2, max_per_txd);
3158 max_txd_pwr = fls(max_per_txd) - 1;
3159
3160 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3161 if (skb->data_len && hdr_len == len) {
3162 switch (hw->mac_type) {
3163 unsigned int pull_size;
3164 case e1000_82544:
3165 /* Make sure we have room to chop off 4 bytes,
3166 * and that the end alignment will work out to
3167 * this hardware's requirements
3168 * NOTE: this is a TSO only workaround
3169 * if end byte alignment not correct move us
3170 * into the next dword
3171 */
3172 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3173 & 4)
3174 break;
3175 /* fall through */
3176 pull_size = min((unsigned int)4, skb->data_len);
3177 if (!__pskb_pull_tail(skb, pull_size)) {
3178 e_err(drv, "__pskb_pull_tail "
3179 "failed.\n");
3180 dev_kfree_skb_any(skb);
3181 return NETDEV_TX_OK;
3182 }
3183 len = skb_headlen(skb);
3184 break;
3185 default:
3186 /* do nothing */
3187 break;
3188 }
3189 }
3190 }
3191
3192 /* reserve a descriptor for the offload context */
3193 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3194 count++;
3195 count++;
3196
3197 /* Controller Erratum workaround */
3198 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3199 count++;
3200
3201 count += TXD_USE_COUNT(len, max_txd_pwr);
3202
3203 if (adapter->pcix_82544)
3204 count++;
3205
3206 /* work-around for errata 10 and it applies to all controllers
3207 * in PCI-X mode, so add one more descriptor to the count
3208 */
3209 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3210 (len > 2015)))
3211 count++;
3212
3213 nr_frags = skb_shinfo(skb)->nr_frags;
3214 for (f = 0; f < nr_frags; f++)
3215 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3216 max_txd_pwr);
3217 if (adapter->pcix_82544)
3218 count += nr_frags;
3219
3220 /* need: count + 2 desc gap to keep tail from touching
3221 * head, otherwise try next time
3222 */
3223 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3224 return NETDEV_TX_BUSY;
3225
3226 if (unlikely((hw->mac_type == e1000_82547) &&
3227 (e1000_82547_fifo_workaround(adapter, skb)))) {
3228 netif_stop_queue(netdev);
3229 if (!test_bit(__E1000_DOWN, &adapter->flags))
3230 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3231 return NETDEV_TX_BUSY;
3232 }
3233
3234 if (skb_vlan_tag_present(skb)) {
3235 tx_flags |= E1000_TX_FLAGS_VLAN;
3236 tx_flags |= (skb_vlan_tag_get(skb) <<
3237 E1000_TX_FLAGS_VLAN_SHIFT);
3238 }
3239
3240 first = tx_ring->next_to_use;
3241
3242 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3243 if (tso < 0) {
3244 dev_kfree_skb_any(skb);
3245 return NETDEV_TX_OK;
3246 }
3247
3248 if (likely(tso)) {
3249 if (likely(hw->mac_type != e1000_82544))
3250 tx_ring->last_tx_tso = true;
3251 tx_flags |= E1000_TX_FLAGS_TSO;
3252 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3253 tx_flags |= E1000_TX_FLAGS_CSUM;
3254
3255 if (protocol == htons(ETH_P_IP))
3256 tx_flags |= E1000_TX_FLAGS_IPV4;
3257
3258 if (unlikely(skb->no_fcs))
3259 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3260
3261 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3262 nr_frags, mss);
3263
3264 if (count) {
3265 /* The descriptors needed is higher than other Intel drivers
3266 * due to a number of workarounds. The breakdown is below:
3267 * Data descriptors: MAX_SKB_FRAGS + 1
3268 * Context Descriptor: 1
3269 * Keep head from touching tail: 2
3270 * Workarounds: 3
3271 */
3272 int desc_needed = MAX_SKB_FRAGS + 7;
3273
3274 netdev_sent_queue(netdev, skb->len);
3275 skb_tx_timestamp(skb);
3276
3277 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3278
3279 /* 82544 potentially requires twice as many data descriptors
3280 * in order to guarantee buffers don't end on evenly-aligned
3281 * dwords
3282 */
3283 if (adapter->pcix_82544)
3284 desc_needed += MAX_SKB_FRAGS + 1;
3285
3286 /* Make sure there is space in the ring for the next send. */
3287 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3288
3289 if (!skb->xmit_more ||
3290 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3291 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3292 /* we need this if more than one processor can write to
3293 * our tail at a time, it synchronizes IO on IA64/Altix
3294 * systems
3295 */
3296 mmiowb();
3297 }
3298 } else {
3299 dev_kfree_skb_any(skb);
3300 tx_ring->buffer_info[first].time_stamp = 0;
3301 tx_ring->next_to_use = first;
3302 }
3303
3304 return NETDEV_TX_OK;
3305 }
3306
3307 #define NUM_REGS 38 /* 1 based count */
e1000_regdump(struct e1000_adapter * adapter)3308 static void e1000_regdump(struct e1000_adapter *adapter)
3309 {
3310 struct e1000_hw *hw = &adapter->hw;
3311 u32 regs[NUM_REGS];
3312 u32 *regs_buff = regs;
3313 int i = 0;
3314
3315 static const char * const reg_name[] = {
3316 "CTRL", "STATUS",
3317 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3318 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3319 "TIDV", "TXDCTL", "TADV", "TARC0",
3320 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3321 "TXDCTL1", "TARC1",
3322 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3323 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3324 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3325 };
3326
3327 regs_buff[0] = er32(CTRL);
3328 regs_buff[1] = er32(STATUS);
3329
3330 regs_buff[2] = er32(RCTL);
3331 regs_buff[3] = er32(RDLEN);
3332 regs_buff[4] = er32(RDH);
3333 regs_buff[5] = er32(RDT);
3334 regs_buff[6] = er32(RDTR);
3335
3336 regs_buff[7] = er32(TCTL);
3337 regs_buff[8] = er32(TDBAL);
3338 regs_buff[9] = er32(TDBAH);
3339 regs_buff[10] = er32(TDLEN);
3340 regs_buff[11] = er32(TDH);
3341 regs_buff[12] = er32(TDT);
3342 regs_buff[13] = er32(TIDV);
3343 regs_buff[14] = er32(TXDCTL);
3344 regs_buff[15] = er32(TADV);
3345 regs_buff[16] = er32(TARC0);
3346
3347 regs_buff[17] = er32(TDBAL1);
3348 regs_buff[18] = er32(TDBAH1);
3349 regs_buff[19] = er32(TDLEN1);
3350 regs_buff[20] = er32(TDH1);
3351 regs_buff[21] = er32(TDT1);
3352 regs_buff[22] = er32(TXDCTL1);
3353 regs_buff[23] = er32(TARC1);
3354 regs_buff[24] = er32(CTRL_EXT);
3355 regs_buff[25] = er32(ERT);
3356 regs_buff[26] = er32(RDBAL0);
3357 regs_buff[27] = er32(RDBAH0);
3358 regs_buff[28] = er32(TDFH);
3359 regs_buff[29] = er32(TDFT);
3360 regs_buff[30] = er32(TDFHS);
3361 regs_buff[31] = er32(TDFTS);
3362 regs_buff[32] = er32(TDFPC);
3363 regs_buff[33] = er32(RDFH);
3364 regs_buff[34] = er32(RDFT);
3365 regs_buff[35] = er32(RDFHS);
3366 regs_buff[36] = er32(RDFTS);
3367 regs_buff[37] = er32(RDFPC);
3368
3369 pr_info("Register dump\n");
3370 for (i = 0; i < NUM_REGS; i++)
3371 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3372 }
3373
3374 /*
3375 * e1000_dump: Print registers, tx ring and rx ring
3376 */
e1000_dump(struct e1000_adapter * adapter)3377 static void e1000_dump(struct e1000_adapter *adapter)
3378 {
3379 /* this code doesn't handle multiple rings */
3380 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3381 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3382 int i;
3383
3384 if (!netif_msg_hw(adapter))
3385 return;
3386
3387 /* Print Registers */
3388 e1000_regdump(adapter);
3389
3390 /* transmit dump */
3391 pr_info("TX Desc ring0 dump\n");
3392
3393 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3394 *
3395 * Legacy Transmit Descriptor
3396 * +--------------------------------------------------------------+
3397 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3398 * +--------------------------------------------------------------+
3399 * 8 | Special | CSS | Status | CMD | CSO | Length |
3400 * +--------------------------------------------------------------+
3401 * 63 48 47 36 35 32 31 24 23 16 15 0
3402 *
3403 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3404 * 63 48 47 40 39 32 31 16 15 8 7 0
3405 * +----------------------------------------------------------------+
3406 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3407 * +----------------------------------------------------------------+
3408 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3409 * +----------------------------------------------------------------+
3410 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3411 *
3412 * Extended Data Descriptor (DTYP=0x1)
3413 * +----------------------------------------------------------------+
3414 * 0 | Buffer Address [63:0] |
3415 * +----------------------------------------------------------------+
3416 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3417 * +----------------------------------------------------------------+
3418 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3419 */
3420 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3421 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3422
3423 if (!netif_msg_tx_done(adapter))
3424 goto rx_ring_summary;
3425
3426 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3427 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3428 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3429 struct my_u { __le64 a; __le64 b; };
3430 struct my_u *u = (struct my_u *)tx_desc;
3431 const char *type;
3432
3433 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3434 type = "NTC/U";
3435 else if (i == tx_ring->next_to_use)
3436 type = "NTU";
3437 else if (i == tx_ring->next_to_clean)
3438 type = "NTC";
3439 else
3440 type = "";
3441
3442 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3443 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3444 le64_to_cpu(u->a), le64_to_cpu(u->b),
3445 (u64)buffer_info->dma, buffer_info->length,
3446 buffer_info->next_to_watch,
3447 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3448 }
3449
3450 rx_ring_summary:
3451 /* receive dump */
3452 pr_info("\nRX Desc ring dump\n");
3453
3454 /* Legacy Receive Descriptor Format
3455 *
3456 * +-----------------------------------------------------+
3457 * | Buffer Address [63:0] |
3458 * +-----------------------------------------------------+
3459 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3460 * +-----------------------------------------------------+
3461 * 63 48 47 40 39 32 31 16 15 0
3462 */
3463 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3464
3465 if (!netif_msg_rx_status(adapter))
3466 goto exit;
3467
3468 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3469 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3470 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3471 struct my_u { __le64 a; __le64 b; };
3472 struct my_u *u = (struct my_u *)rx_desc;
3473 const char *type;
3474
3475 if (i == rx_ring->next_to_use)
3476 type = "NTU";
3477 else if (i == rx_ring->next_to_clean)
3478 type = "NTC";
3479 else
3480 type = "";
3481
3482 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3483 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3484 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3485 } /* for */
3486
3487 /* dump the descriptor caches */
3488 /* rx */
3489 pr_info("Rx descriptor cache in 64bit format\n");
3490 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3491 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3492 i,
3493 readl(adapter->hw.hw_addr + i+4),
3494 readl(adapter->hw.hw_addr + i),
3495 readl(adapter->hw.hw_addr + i+12),
3496 readl(adapter->hw.hw_addr + i+8));
3497 }
3498 /* tx */
3499 pr_info("Tx descriptor cache in 64bit format\n");
3500 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3501 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3502 i,
3503 readl(adapter->hw.hw_addr + i+4),
3504 readl(adapter->hw.hw_addr + i),
3505 readl(adapter->hw.hw_addr + i+12),
3506 readl(adapter->hw.hw_addr + i+8));
3507 }
3508 exit:
3509 return;
3510 }
3511
3512 /**
3513 * e1000_tx_timeout - Respond to a Tx Hang
3514 * @netdev: network interface device structure
3515 **/
e1000_tx_timeout(struct net_device * netdev)3516 static void e1000_tx_timeout(struct net_device *netdev)
3517 {
3518 struct e1000_adapter *adapter = netdev_priv(netdev);
3519
3520 /* Do the reset outside of interrupt context */
3521 adapter->tx_timeout_count++;
3522 schedule_work(&adapter->reset_task);
3523 }
3524
e1000_reset_task(struct work_struct * work)3525 static void e1000_reset_task(struct work_struct *work)
3526 {
3527 struct e1000_adapter *adapter =
3528 container_of(work, struct e1000_adapter, reset_task);
3529
3530 e_err(drv, "Reset adapter\n");
3531 e1000_reinit_locked(adapter);
3532 }
3533
3534 /**
3535 * e1000_get_stats - Get System Network Statistics
3536 * @netdev: network interface device structure
3537 *
3538 * Returns the address of the device statistics structure.
3539 * The statistics are actually updated from the watchdog.
3540 **/
e1000_get_stats(struct net_device * netdev)3541 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3542 {
3543 /* only return the current stats */
3544 return &netdev->stats;
3545 }
3546
3547 /**
3548 * e1000_change_mtu - Change the Maximum Transfer Unit
3549 * @netdev: network interface device structure
3550 * @new_mtu: new value for maximum frame size
3551 *
3552 * Returns 0 on success, negative on failure
3553 **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)3554 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3555 {
3556 struct e1000_adapter *adapter = netdev_priv(netdev);
3557 struct e1000_hw *hw = &adapter->hw;
3558 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3559
3560 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3561 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3562 e_err(probe, "Invalid MTU setting\n");
3563 return -EINVAL;
3564 }
3565
3566 /* Adapter-specific max frame size limits. */
3567 switch (hw->mac_type) {
3568 case e1000_undefined ... e1000_82542_rev2_1:
3569 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3570 e_err(probe, "Jumbo Frames not supported.\n");
3571 return -EINVAL;
3572 }
3573 break;
3574 default:
3575 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3576 break;
3577 }
3578
3579 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3580 msleep(1);
3581 /* e1000_down has a dependency on max_frame_size */
3582 hw->max_frame_size = max_frame;
3583 if (netif_running(netdev)) {
3584 /* prevent buffers from being reallocated */
3585 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3586 e1000_down(adapter);
3587 }
3588
3589 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3590 * means we reserve 2 more, this pushes us to allocate from the next
3591 * larger slab size.
3592 * i.e. RXBUFFER_2048 --> size-4096 slab
3593 * however with the new *_jumbo_rx* routines, jumbo receives will use
3594 * fragmented skbs
3595 */
3596
3597 if (max_frame <= E1000_RXBUFFER_2048)
3598 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3599 else
3600 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3601 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3602 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3603 adapter->rx_buffer_len = PAGE_SIZE;
3604 #endif
3605
3606 /* adjust allocation if LPE protects us, and we aren't using SBP */
3607 if (!hw->tbi_compatibility_on &&
3608 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3609 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3610 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3611
3612 pr_info("%s changing MTU from %d to %d\n",
3613 netdev->name, netdev->mtu, new_mtu);
3614 netdev->mtu = new_mtu;
3615
3616 if (netif_running(netdev))
3617 e1000_up(adapter);
3618 else
3619 e1000_reset(adapter);
3620
3621 clear_bit(__E1000_RESETTING, &adapter->flags);
3622
3623 return 0;
3624 }
3625
3626 /**
3627 * e1000_update_stats - Update the board statistics counters
3628 * @adapter: board private structure
3629 **/
e1000_update_stats(struct e1000_adapter * adapter)3630 void e1000_update_stats(struct e1000_adapter *adapter)
3631 {
3632 struct net_device *netdev = adapter->netdev;
3633 struct e1000_hw *hw = &adapter->hw;
3634 struct pci_dev *pdev = adapter->pdev;
3635 unsigned long flags;
3636 u16 phy_tmp;
3637
3638 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3639
3640 /* Prevent stats update while adapter is being reset, or if the pci
3641 * connection is down.
3642 */
3643 if (adapter->link_speed == 0)
3644 return;
3645 if (pci_channel_offline(pdev))
3646 return;
3647
3648 spin_lock_irqsave(&adapter->stats_lock, flags);
3649
3650 /* these counters are modified from e1000_tbi_adjust_stats,
3651 * called from the interrupt context, so they must only
3652 * be written while holding adapter->stats_lock
3653 */
3654
3655 adapter->stats.crcerrs += er32(CRCERRS);
3656 adapter->stats.gprc += er32(GPRC);
3657 adapter->stats.gorcl += er32(GORCL);
3658 adapter->stats.gorch += er32(GORCH);
3659 adapter->stats.bprc += er32(BPRC);
3660 adapter->stats.mprc += er32(MPRC);
3661 adapter->stats.roc += er32(ROC);
3662
3663 adapter->stats.prc64 += er32(PRC64);
3664 adapter->stats.prc127 += er32(PRC127);
3665 adapter->stats.prc255 += er32(PRC255);
3666 adapter->stats.prc511 += er32(PRC511);
3667 adapter->stats.prc1023 += er32(PRC1023);
3668 adapter->stats.prc1522 += er32(PRC1522);
3669
3670 adapter->stats.symerrs += er32(SYMERRS);
3671 adapter->stats.mpc += er32(MPC);
3672 adapter->stats.scc += er32(SCC);
3673 adapter->stats.ecol += er32(ECOL);
3674 adapter->stats.mcc += er32(MCC);
3675 adapter->stats.latecol += er32(LATECOL);
3676 adapter->stats.dc += er32(DC);
3677 adapter->stats.sec += er32(SEC);
3678 adapter->stats.rlec += er32(RLEC);
3679 adapter->stats.xonrxc += er32(XONRXC);
3680 adapter->stats.xontxc += er32(XONTXC);
3681 adapter->stats.xoffrxc += er32(XOFFRXC);
3682 adapter->stats.xofftxc += er32(XOFFTXC);
3683 adapter->stats.fcruc += er32(FCRUC);
3684 adapter->stats.gptc += er32(GPTC);
3685 adapter->stats.gotcl += er32(GOTCL);
3686 adapter->stats.gotch += er32(GOTCH);
3687 adapter->stats.rnbc += er32(RNBC);
3688 adapter->stats.ruc += er32(RUC);
3689 adapter->stats.rfc += er32(RFC);
3690 adapter->stats.rjc += er32(RJC);
3691 adapter->stats.torl += er32(TORL);
3692 adapter->stats.torh += er32(TORH);
3693 adapter->stats.totl += er32(TOTL);
3694 adapter->stats.toth += er32(TOTH);
3695 adapter->stats.tpr += er32(TPR);
3696
3697 adapter->stats.ptc64 += er32(PTC64);
3698 adapter->stats.ptc127 += er32(PTC127);
3699 adapter->stats.ptc255 += er32(PTC255);
3700 adapter->stats.ptc511 += er32(PTC511);
3701 adapter->stats.ptc1023 += er32(PTC1023);
3702 adapter->stats.ptc1522 += er32(PTC1522);
3703
3704 adapter->stats.mptc += er32(MPTC);
3705 adapter->stats.bptc += er32(BPTC);
3706
3707 /* used for adaptive IFS */
3708
3709 hw->tx_packet_delta = er32(TPT);
3710 adapter->stats.tpt += hw->tx_packet_delta;
3711 hw->collision_delta = er32(COLC);
3712 adapter->stats.colc += hw->collision_delta;
3713
3714 if (hw->mac_type >= e1000_82543) {
3715 adapter->stats.algnerrc += er32(ALGNERRC);
3716 adapter->stats.rxerrc += er32(RXERRC);
3717 adapter->stats.tncrs += er32(TNCRS);
3718 adapter->stats.cexterr += er32(CEXTERR);
3719 adapter->stats.tsctc += er32(TSCTC);
3720 adapter->stats.tsctfc += er32(TSCTFC);
3721 }
3722
3723 /* Fill out the OS statistics structure */
3724 netdev->stats.multicast = adapter->stats.mprc;
3725 netdev->stats.collisions = adapter->stats.colc;
3726
3727 /* Rx Errors */
3728
3729 /* RLEC on some newer hardware can be incorrect so build
3730 * our own version based on RUC and ROC
3731 */
3732 netdev->stats.rx_errors = adapter->stats.rxerrc +
3733 adapter->stats.crcerrs + adapter->stats.algnerrc +
3734 adapter->stats.ruc + adapter->stats.roc +
3735 adapter->stats.cexterr;
3736 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3737 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3738 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3739 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3740 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3741
3742 /* Tx Errors */
3743 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3744 netdev->stats.tx_errors = adapter->stats.txerrc;
3745 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3746 netdev->stats.tx_window_errors = adapter->stats.latecol;
3747 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3748 if (hw->bad_tx_carr_stats_fd &&
3749 adapter->link_duplex == FULL_DUPLEX) {
3750 netdev->stats.tx_carrier_errors = 0;
3751 adapter->stats.tncrs = 0;
3752 }
3753
3754 /* Tx Dropped needs to be maintained elsewhere */
3755
3756 /* Phy Stats */
3757 if (hw->media_type == e1000_media_type_copper) {
3758 if ((adapter->link_speed == SPEED_1000) &&
3759 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3760 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3761 adapter->phy_stats.idle_errors += phy_tmp;
3762 }
3763
3764 if ((hw->mac_type <= e1000_82546) &&
3765 (hw->phy_type == e1000_phy_m88) &&
3766 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3767 adapter->phy_stats.receive_errors += phy_tmp;
3768 }
3769
3770 /* Management Stats */
3771 if (hw->has_smbus) {
3772 adapter->stats.mgptc += er32(MGTPTC);
3773 adapter->stats.mgprc += er32(MGTPRC);
3774 adapter->stats.mgpdc += er32(MGTPDC);
3775 }
3776
3777 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3778 }
3779
3780 /**
3781 * e1000_intr - Interrupt Handler
3782 * @irq: interrupt number
3783 * @data: pointer to a network interface device structure
3784 **/
e1000_intr(int irq,void * data)3785 static irqreturn_t e1000_intr(int irq, void *data)
3786 {
3787 struct net_device *netdev = data;
3788 struct e1000_adapter *adapter = netdev_priv(netdev);
3789 struct e1000_hw *hw = &adapter->hw;
3790 u32 icr = er32(ICR);
3791
3792 if (unlikely((!icr)))
3793 return IRQ_NONE; /* Not our interrupt */
3794
3795 /* we might have caused the interrupt, but the above
3796 * read cleared it, and just in case the driver is
3797 * down there is nothing to do so return handled
3798 */
3799 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3800 return IRQ_HANDLED;
3801
3802 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3803 hw->get_link_status = 1;
3804 /* guard against interrupt when we're going down */
3805 if (!test_bit(__E1000_DOWN, &adapter->flags))
3806 schedule_delayed_work(&adapter->watchdog_task, 1);
3807 }
3808
3809 /* disable interrupts, without the synchronize_irq bit */
3810 ew32(IMC, ~0);
3811 E1000_WRITE_FLUSH();
3812
3813 if (likely(napi_schedule_prep(&adapter->napi))) {
3814 adapter->total_tx_bytes = 0;
3815 adapter->total_tx_packets = 0;
3816 adapter->total_rx_bytes = 0;
3817 adapter->total_rx_packets = 0;
3818 __napi_schedule(&adapter->napi);
3819 } else {
3820 /* this really should not happen! if it does it is basically a
3821 * bug, but not a hard error, so enable ints and continue
3822 */
3823 if (!test_bit(__E1000_DOWN, &adapter->flags))
3824 e1000_irq_enable(adapter);
3825 }
3826
3827 return IRQ_HANDLED;
3828 }
3829
3830 /**
3831 * e1000_clean - NAPI Rx polling callback
3832 * @adapter: board private structure
3833 **/
e1000_clean(struct napi_struct * napi,int budget)3834 static int e1000_clean(struct napi_struct *napi, int budget)
3835 {
3836 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3837 napi);
3838 int tx_clean_complete = 0, work_done = 0;
3839
3840 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3841
3842 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3843
3844 if (!tx_clean_complete)
3845 work_done = budget;
3846
3847 /* If budget not fully consumed, exit the polling mode */
3848 if (work_done < budget) {
3849 if (likely(adapter->itr_setting & 3))
3850 e1000_set_itr(adapter);
3851 napi_complete_done(napi, work_done);
3852 if (!test_bit(__E1000_DOWN, &adapter->flags))
3853 e1000_irq_enable(adapter);
3854 }
3855
3856 return work_done;
3857 }
3858
3859 /**
3860 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3861 * @adapter: board private structure
3862 **/
e1000_clean_tx_irq(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)3863 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3864 struct e1000_tx_ring *tx_ring)
3865 {
3866 struct e1000_hw *hw = &adapter->hw;
3867 struct net_device *netdev = adapter->netdev;
3868 struct e1000_tx_desc *tx_desc, *eop_desc;
3869 struct e1000_tx_buffer *buffer_info;
3870 unsigned int i, eop;
3871 unsigned int count = 0;
3872 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3873 unsigned int bytes_compl = 0, pkts_compl = 0;
3874
3875 i = tx_ring->next_to_clean;
3876 eop = tx_ring->buffer_info[i].next_to_watch;
3877 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3878
3879 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3880 (count < tx_ring->count)) {
3881 bool cleaned = false;
3882 dma_rmb(); /* read buffer_info after eop_desc */
3883 for ( ; !cleaned; count++) {
3884 tx_desc = E1000_TX_DESC(*tx_ring, i);
3885 buffer_info = &tx_ring->buffer_info[i];
3886 cleaned = (i == eop);
3887
3888 if (cleaned) {
3889 total_tx_packets += buffer_info->segs;
3890 total_tx_bytes += buffer_info->bytecount;
3891 if (buffer_info->skb) {
3892 bytes_compl += buffer_info->skb->len;
3893 pkts_compl++;
3894 }
3895
3896 }
3897 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3898 tx_desc->upper.data = 0;
3899
3900 if (unlikely(++i == tx_ring->count))
3901 i = 0;
3902 }
3903
3904 eop = tx_ring->buffer_info[i].next_to_watch;
3905 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3906 }
3907
3908 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3909 * which will reuse the cleaned buffers.
3910 */
3911 smp_store_release(&tx_ring->next_to_clean, i);
3912
3913 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3914
3915 #define TX_WAKE_THRESHOLD 32
3916 if (unlikely(count && netif_carrier_ok(netdev) &&
3917 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3918 /* Make sure that anybody stopping the queue after this
3919 * sees the new next_to_clean.
3920 */
3921 smp_mb();
3922
3923 if (netif_queue_stopped(netdev) &&
3924 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3925 netif_wake_queue(netdev);
3926 ++adapter->restart_queue;
3927 }
3928 }
3929
3930 if (adapter->detect_tx_hung) {
3931 /* Detect a transmit hang in hardware, this serializes the
3932 * check with the clearing of time_stamp and movement of i
3933 */
3934 adapter->detect_tx_hung = false;
3935 if (tx_ring->buffer_info[eop].time_stamp &&
3936 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3937 (adapter->tx_timeout_factor * HZ)) &&
3938 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3939
3940 /* detected Tx unit hang */
3941 e_err(drv, "Detected Tx Unit Hang\n"
3942 " Tx Queue <%lu>\n"
3943 " TDH <%x>\n"
3944 " TDT <%x>\n"
3945 " next_to_use <%x>\n"
3946 " next_to_clean <%x>\n"
3947 "buffer_info[next_to_clean]\n"
3948 " time_stamp <%lx>\n"
3949 " next_to_watch <%x>\n"
3950 " jiffies <%lx>\n"
3951 " next_to_watch.status <%x>\n",
3952 (unsigned long)(tx_ring - adapter->tx_ring),
3953 readl(hw->hw_addr + tx_ring->tdh),
3954 readl(hw->hw_addr + tx_ring->tdt),
3955 tx_ring->next_to_use,
3956 tx_ring->next_to_clean,
3957 tx_ring->buffer_info[eop].time_stamp,
3958 eop,
3959 jiffies,
3960 eop_desc->upper.fields.status);
3961 e1000_dump(adapter);
3962 netif_stop_queue(netdev);
3963 }
3964 }
3965 adapter->total_tx_bytes += total_tx_bytes;
3966 adapter->total_tx_packets += total_tx_packets;
3967 netdev->stats.tx_bytes += total_tx_bytes;
3968 netdev->stats.tx_packets += total_tx_packets;
3969 return count < tx_ring->count;
3970 }
3971
3972 /**
3973 * e1000_rx_checksum - Receive Checksum Offload for 82543
3974 * @adapter: board private structure
3975 * @status_err: receive descriptor status and error fields
3976 * @csum: receive descriptor csum field
3977 * @sk_buff: socket buffer with received data
3978 **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,u32 csum,struct sk_buff * skb)3979 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3980 u32 csum, struct sk_buff *skb)
3981 {
3982 struct e1000_hw *hw = &adapter->hw;
3983 u16 status = (u16)status_err;
3984 u8 errors = (u8)(status_err >> 24);
3985
3986 skb_checksum_none_assert(skb);
3987
3988 /* 82543 or newer only */
3989 if (unlikely(hw->mac_type < e1000_82543))
3990 return;
3991 /* Ignore Checksum bit is set */
3992 if (unlikely(status & E1000_RXD_STAT_IXSM))
3993 return;
3994 /* TCP/UDP checksum error bit is set */
3995 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3996 /* let the stack verify checksum errors */
3997 adapter->hw_csum_err++;
3998 return;
3999 }
4000 /* TCP/UDP Checksum has not been calculated */
4001 if (!(status & E1000_RXD_STAT_TCPCS))
4002 return;
4003
4004 /* It must be a TCP or UDP packet with a valid checksum */
4005 if (likely(status & E1000_RXD_STAT_TCPCS)) {
4006 /* TCP checksum is good */
4007 skb->ip_summed = CHECKSUM_UNNECESSARY;
4008 }
4009 adapter->hw_csum_good++;
4010 }
4011
4012 /**
4013 * e1000_consume_page - helper function for jumbo Rx path
4014 **/
e1000_consume_page(struct e1000_rx_buffer * bi,struct sk_buff * skb,u16 length)4015 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
4016 u16 length)
4017 {
4018 bi->rxbuf.page = NULL;
4019 skb->len += length;
4020 skb->data_len += length;
4021 skb->truesize += PAGE_SIZE;
4022 }
4023
4024 /**
4025 * e1000_receive_skb - helper function to handle rx indications
4026 * @adapter: board private structure
4027 * @status: descriptor status field as written by hardware
4028 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4029 * @skb: pointer to sk_buff to be indicated to stack
4030 */
e1000_receive_skb(struct e1000_adapter * adapter,u8 status,__le16 vlan,struct sk_buff * skb)4031 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4032 __le16 vlan, struct sk_buff *skb)
4033 {
4034 skb->protocol = eth_type_trans(skb, adapter->netdev);
4035
4036 if (status & E1000_RXD_STAT_VP) {
4037 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4038
4039 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4040 }
4041 napi_gro_receive(&adapter->napi, skb);
4042 }
4043
4044 /**
4045 * e1000_tbi_adjust_stats
4046 * @hw: Struct containing variables accessed by shared code
4047 * @frame_len: The length of the frame in question
4048 * @mac_addr: The Ethernet destination address of the frame in question
4049 *
4050 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4051 */
e1000_tbi_adjust_stats(struct e1000_hw * hw,struct e1000_hw_stats * stats,u32 frame_len,const u8 * mac_addr)4052 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4053 struct e1000_hw_stats *stats,
4054 u32 frame_len, const u8 *mac_addr)
4055 {
4056 u64 carry_bit;
4057
4058 /* First adjust the frame length. */
4059 frame_len--;
4060 /* We need to adjust the statistics counters, since the hardware
4061 * counters overcount this packet as a CRC error and undercount
4062 * the packet as a good packet
4063 */
4064 /* This packet should not be counted as a CRC error. */
4065 stats->crcerrs--;
4066 /* This packet does count as a Good Packet Received. */
4067 stats->gprc++;
4068
4069 /* Adjust the Good Octets received counters */
4070 carry_bit = 0x80000000 & stats->gorcl;
4071 stats->gorcl += frame_len;
4072 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4073 * Received Count) was one before the addition,
4074 * AND it is zero after, then we lost the carry out,
4075 * need to add one to Gorch (Good Octets Received Count High).
4076 * This could be simplified if all environments supported
4077 * 64-bit integers.
4078 */
4079 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4080 stats->gorch++;
4081 /* Is this a broadcast or multicast? Check broadcast first,
4082 * since the test for a multicast frame will test positive on
4083 * a broadcast frame.
4084 */
4085 if (is_broadcast_ether_addr(mac_addr))
4086 stats->bprc++;
4087 else if (is_multicast_ether_addr(mac_addr))
4088 stats->mprc++;
4089
4090 if (frame_len == hw->max_frame_size) {
4091 /* In this case, the hardware has overcounted the number of
4092 * oversize frames.
4093 */
4094 if (stats->roc > 0)
4095 stats->roc--;
4096 }
4097
4098 /* Adjust the bin counters when the extra byte put the frame in the
4099 * wrong bin. Remember that the frame_len was adjusted above.
4100 */
4101 if (frame_len == 64) {
4102 stats->prc64++;
4103 stats->prc127--;
4104 } else if (frame_len == 127) {
4105 stats->prc127++;
4106 stats->prc255--;
4107 } else if (frame_len == 255) {
4108 stats->prc255++;
4109 stats->prc511--;
4110 } else if (frame_len == 511) {
4111 stats->prc511++;
4112 stats->prc1023--;
4113 } else if (frame_len == 1023) {
4114 stats->prc1023++;
4115 stats->prc1522--;
4116 } else if (frame_len == 1522) {
4117 stats->prc1522++;
4118 }
4119 }
4120
e1000_tbi_should_accept(struct e1000_adapter * adapter,u8 status,u8 errors,u32 length,const u8 * data)4121 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4122 u8 status, u8 errors,
4123 u32 length, const u8 *data)
4124 {
4125 struct e1000_hw *hw = &adapter->hw;
4126 u8 last_byte = *(data + length - 1);
4127
4128 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4129 unsigned long irq_flags;
4130
4131 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4132 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4133 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4134
4135 return true;
4136 }
4137
4138 return false;
4139 }
4140
e1000_alloc_rx_skb(struct e1000_adapter * adapter,unsigned int bufsz)4141 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4142 unsigned int bufsz)
4143 {
4144 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4145
4146 if (unlikely(!skb))
4147 adapter->alloc_rx_buff_failed++;
4148 return skb;
4149 }
4150
4151 /**
4152 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4153 * @adapter: board private structure
4154 * @rx_ring: ring to clean
4155 * @work_done: amount of napi work completed this call
4156 * @work_to_do: max amount of work allowed for this call to do
4157 *
4158 * the return value indicates whether actual cleaning was done, there
4159 * is no guarantee that everything was cleaned
4160 */
e1000_clean_jumbo_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4161 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4162 struct e1000_rx_ring *rx_ring,
4163 int *work_done, int work_to_do)
4164 {
4165 struct net_device *netdev = adapter->netdev;
4166 struct pci_dev *pdev = adapter->pdev;
4167 struct e1000_rx_desc *rx_desc, *next_rxd;
4168 struct e1000_rx_buffer *buffer_info, *next_buffer;
4169 u32 length;
4170 unsigned int i;
4171 int cleaned_count = 0;
4172 bool cleaned = false;
4173 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4174
4175 i = rx_ring->next_to_clean;
4176 rx_desc = E1000_RX_DESC(*rx_ring, i);
4177 buffer_info = &rx_ring->buffer_info[i];
4178
4179 while (rx_desc->status & E1000_RXD_STAT_DD) {
4180 struct sk_buff *skb;
4181 u8 status;
4182
4183 if (*work_done >= work_to_do)
4184 break;
4185 (*work_done)++;
4186 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4187
4188 status = rx_desc->status;
4189
4190 if (++i == rx_ring->count)
4191 i = 0;
4192
4193 next_rxd = E1000_RX_DESC(*rx_ring, i);
4194 prefetch(next_rxd);
4195
4196 next_buffer = &rx_ring->buffer_info[i];
4197
4198 cleaned = true;
4199 cleaned_count++;
4200 dma_unmap_page(&pdev->dev, buffer_info->dma,
4201 adapter->rx_buffer_len, DMA_FROM_DEVICE);
4202 buffer_info->dma = 0;
4203
4204 length = le16_to_cpu(rx_desc->length);
4205
4206 /* errors is only valid for DD + EOP descriptors */
4207 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4208 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4209 u8 *mapped = page_address(buffer_info->rxbuf.page);
4210
4211 if (e1000_tbi_should_accept(adapter, status,
4212 rx_desc->errors,
4213 length, mapped)) {
4214 length--;
4215 } else if (netdev->features & NETIF_F_RXALL) {
4216 goto process_skb;
4217 } else {
4218 /* an error means any chain goes out the window
4219 * too
4220 */
4221 if (rx_ring->rx_skb_top)
4222 dev_kfree_skb(rx_ring->rx_skb_top);
4223 rx_ring->rx_skb_top = NULL;
4224 goto next_desc;
4225 }
4226 }
4227
4228 #define rxtop rx_ring->rx_skb_top
4229 process_skb:
4230 if (!(status & E1000_RXD_STAT_EOP)) {
4231 /* this descriptor is only the beginning (or middle) */
4232 if (!rxtop) {
4233 /* this is the beginning of a chain */
4234 rxtop = napi_get_frags(&adapter->napi);
4235 if (!rxtop)
4236 break;
4237
4238 skb_fill_page_desc(rxtop, 0,
4239 buffer_info->rxbuf.page,
4240 0, length);
4241 } else {
4242 /* this is the middle of a chain */
4243 skb_fill_page_desc(rxtop,
4244 skb_shinfo(rxtop)->nr_frags,
4245 buffer_info->rxbuf.page, 0, length);
4246 }
4247 e1000_consume_page(buffer_info, rxtop, length);
4248 goto next_desc;
4249 } else {
4250 if (rxtop) {
4251 /* end of the chain */
4252 skb_fill_page_desc(rxtop,
4253 skb_shinfo(rxtop)->nr_frags,
4254 buffer_info->rxbuf.page, 0, length);
4255 skb = rxtop;
4256 rxtop = NULL;
4257 e1000_consume_page(buffer_info, skb, length);
4258 } else {
4259 struct page *p;
4260 /* no chain, got EOP, this buf is the packet
4261 * copybreak to save the put_page/alloc_page
4262 */
4263 p = buffer_info->rxbuf.page;
4264 if (length <= copybreak) {
4265 u8 *vaddr;
4266
4267 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4268 length -= 4;
4269 skb = e1000_alloc_rx_skb(adapter,
4270 length);
4271 if (!skb)
4272 break;
4273
4274 vaddr = kmap_atomic(p);
4275 memcpy(skb_tail_pointer(skb), vaddr,
4276 length);
4277 kunmap_atomic(vaddr);
4278 /* re-use the page, so don't erase
4279 * buffer_info->rxbuf.page
4280 */
4281 skb_put(skb, length);
4282 e1000_rx_checksum(adapter,
4283 status | rx_desc->errors << 24,
4284 le16_to_cpu(rx_desc->csum), skb);
4285
4286 total_rx_bytes += skb->len;
4287 total_rx_packets++;
4288
4289 e1000_receive_skb(adapter, status,
4290 rx_desc->special, skb);
4291 goto next_desc;
4292 } else {
4293 skb = napi_get_frags(&adapter->napi);
4294 if (!skb) {
4295 adapter->alloc_rx_buff_failed++;
4296 break;
4297 }
4298 skb_fill_page_desc(skb, 0, p, 0,
4299 length);
4300 e1000_consume_page(buffer_info, skb,
4301 length);
4302 }
4303 }
4304 }
4305
4306 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4307 e1000_rx_checksum(adapter,
4308 (u32)(status) |
4309 ((u32)(rx_desc->errors) << 24),
4310 le16_to_cpu(rx_desc->csum), skb);
4311
4312 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4313 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4314 pskb_trim(skb, skb->len - 4);
4315 total_rx_packets++;
4316
4317 if (status & E1000_RXD_STAT_VP) {
4318 __le16 vlan = rx_desc->special;
4319 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4320
4321 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4322 }
4323
4324 napi_gro_frags(&adapter->napi);
4325
4326 next_desc:
4327 rx_desc->status = 0;
4328
4329 /* return some buffers to hardware, one at a time is too slow */
4330 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4331 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4332 cleaned_count = 0;
4333 }
4334
4335 /* use prefetched values */
4336 rx_desc = next_rxd;
4337 buffer_info = next_buffer;
4338 }
4339 rx_ring->next_to_clean = i;
4340
4341 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4342 if (cleaned_count)
4343 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4344
4345 adapter->total_rx_packets += total_rx_packets;
4346 adapter->total_rx_bytes += total_rx_bytes;
4347 netdev->stats.rx_bytes += total_rx_bytes;
4348 netdev->stats.rx_packets += total_rx_packets;
4349 return cleaned;
4350 }
4351
4352 /* this should improve performance for small packets with large amounts
4353 * of reassembly being done in the stack
4354 */
e1000_copybreak(struct e1000_adapter * adapter,struct e1000_rx_buffer * buffer_info,u32 length,const void * data)4355 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4356 struct e1000_rx_buffer *buffer_info,
4357 u32 length, const void *data)
4358 {
4359 struct sk_buff *skb;
4360
4361 if (length > copybreak)
4362 return NULL;
4363
4364 skb = e1000_alloc_rx_skb(adapter, length);
4365 if (!skb)
4366 return NULL;
4367
4368 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4369 length, DMA_FROM_DEVICE);
4370
4371 memcpy(skb_put(skb, length), data, length);
4372
4373 return skb;
4374 }
4375
4376 /**
4377 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4378 * @adapter: board private structure
4379 * @rx_ring: ring to clean
4380 * @work_done: amount of napi work completed this call
4381 * @work_to_do: max amount of work allowed for this call to do
4382 */
e1000_clean_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4383 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4384 struct e1000_rx_ring *rx_ring,
4385 int *work_done, int work_to_do)
4386 {
4387 struct net_device *netdev = adapter->netdev;
4388 struct pci_dev *pdev = adapter->pdev;
4389 struct e1000_rx_desc *rx_desc, *next_rxd;
4390 struct e1000_rx_buffer *buffer_info, *next_buffer;
4391 u32 length;
4392 unsigned int i;
4393 int cleaned_count = 0;
4394 bool cleaned = false;
4395 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4396
4397 i = rx_ring->next_to_clean;
4398 rx_desc = E1000_RX_DESC(*rx_ring, i);
4399 buffer_info = &rx_ring->buffer_info[i];
4400
4401 while (rx_desc->status & E1000_RXD_STAT_DD) {
4402 struct sk_buff *skb;
4403 u8 *data;
4404 u8 status;
4405
4406 if (*work_done >= work_to_do)
4407 break;
4408 (*work_done)++;
4409 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4410
4411 status = rx_desc->status;
4412 length = le16_to_cpu(rx_desc->length);
4413
4414 data = buffer_info->rxbuf.data;
4415 prefetch(data);
4416 skb = e1000_copybreak(adapter, buffer_info, length, data);
4417 if (!skb) {
4418 unsigned int frag_len = e1000_frag_len(adapter);
4419
4420 skb = build_skb(data - E1000_HEADROOM, frag_len);
4421 if (!skb) {
4422 adapter->alloc_rx_buff_failed++;
4423 break;
4424 }
4425
4426 skb_reserve(skb, E1000_HEADROOM);
4427 dma_unmap_single(&pdev->dev, buffer_info->dma,
4428 adapter->rx_buffer_len,
4429 DMA_FROM_DEVICE);
4430 buffer_info->dma = 0;
4431 buffer_info->rxbuf.data = NULL;
4432 }
4433
4434 if (++i == rx_ring->count)
4435 i = 0;
4436
4437 next_rxd = E1000_RX_DESC(*rx_ring, i);
4438 prefetch(next_rxd);
4439
4440 next_buffer = &rx_ring->buffer_info[i];
4441
4442 cleaned = true;
4443 cleaned_count++;
4444
4445 /* !EOP means multiple descriptors were used to store a single
4446 * packet, if thats the case we need to toss it. In fact, we
4447 * to toss every packet with the EOP bit clear and the next
4448 * frame that _does_ have the EOP bit set, as it is by
4449 * definition only a frame fragment
4450 */
4451 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4452 adapter->discarding = true;
4453
4454 if (adapter->discarding) {
4455 /* All receives must fit into a single buffer */
4456 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4457 dev_kfree_skb(skb);
4458 if (status & E1000_RXD_STAT_EOP)
4459 adapter->discarding = false;
4460 goto next_desc;
4461 }
4462
4463 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4464 if (e1000_tbi_should_accept(adapter, status,
4465 rx_desc->errors,
4466 length, data)) {
4467 length--;
4468 } else if (netdev->features & NETIF_F_RXALL) {
4469 goto process_skb;
4470 } else {
4471 dev_kfree_skb(skb);
4472 goto next_desc;
4473 }
4474 }
4475
4476 process_skb:
4477 total_rx_bytes += (length - 4); /* don't count FCS */
4478 total_rx_packets++;
4479
4480 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4481 /* adjust length to remove Ethernet CRC, this must be
4482 * done after the TBI_ACCEPT workaround above
4483 */
4484 length -= 4;
4485
4486 if (buffer_info->rxbuf.data == NULL)
4487 skb_put(skb, length);
4488 else /* copybreak skb */
4489 skb_trim(skb, length);
4490
4491 /* Receive Checksum Offload */
4492 e1000_rx_checksum(adapter,
4493 (u32)(status) |
4494 ((u32)(rx_desc->errors) << 24),
4495 le16_to_cpu(rx_desc->csum), skb);
4496
4497 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4498
4499 next_desc:
4500 rx_desc->status = 0;
4501
4502 /* return some buffers to hardware, one at a time is too slow */
4503 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4504 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4505 cleaned_count = 0;
4506 }
4507
4508 /* use prefetched values */
4509 rx_desc = next_rxd;
4510 buffer_info = next_buffer;
4511 }
4512 rx_ring->next_to_clean = i;
4513
4514 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4515 if (cleaned_count)
4516 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4517
4518 adapter->total_rx_packets += total_rx_packets;
4519 adapter->total_rx_bytes += total_rx_bytes;
4520 netdev->stats.rx_bytes += total_rx_bytes;
4521 netdev->stats.rx_packets += total_rx_packets;
4522 return cleaned;
4523 }
4524
4525 /**
4526 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4527 * @adapter: address of board private structure
4528 * @rx_ring: pointer to receive ring structure
4529 * @cleaned_count: number of buffers to allocate this pass
4530 **/
4531 static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4532 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4533 struct e1000_rx_ring *rx_ring, int cleaned_count)
4534 {
4535 struct pci_dev *pdev = adapter->pdev;
4536 struct e1000_rx_desc *rx_desc;
4537 struct e1000_rx_buffer *buffer_info;
4538 unsigned int i;
4539
4540 i = rx_ring->next_to_use;
4541 buffer_info = &rx_ring->buffer_info[i];
4542
4543 while (cleaned_count--) {
4544 /* allocate a new page if necessary */
4545 if (!buffer_info->rxbuf.page) {
4546 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4547 if (unlikely(!buffer_info->rxbuf.page)) {
4548 adapter->alloc_rx_buff_failed++;
4549 break;
4550 }
4551 }
4552
4553 if (!buffer_info->dma) {
4554 buffer_info->dma = dma_map_page(&pdev->dev,
4555 buffer_info->rxbuf.page, 0,
4556 adapter->rx_buffer_len,
4557 DMA_FROM_DEVICE);
4558 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4559 put_page(buffer_info->rxbuf.page);
4560 buffer_info->rxbuf.page = NULL;
4561 buffer_info->dma = 0;
4562 adapter->alloc_rx_buff_failed++;
4563 break;
4564 }
4565 }
4566
4567 rx_desc = E1000_RX_DESC(*rx_ring, i);
4568 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4569
4570 if (unlikely(++i == rx_ring->count))
4571 i = 0;
4572 buffer_info = &rx_ring->buffer_info[i];
4573 }
4574
4575 if (likely(rx_ring->next_to_use != i)) {
4576 rx_ring->next_to_use = i;
4577 if (unlikely(i-- == 0))
4578 i = (rx_ring->count - 1);
4579
4580 /* Force memory writes to complete before letting h/w
4581 * know there are new descriptors to fetch. (Only
4582 * applicable for weak-ordered memory model archs,
4583 * such as IA-64).
4584 */
4585 wmb();
4586 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4587 }
4588 }
4589
4590 /**
4591 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4592 * @adapter: address of board private structure
4593 **/
e1000_alloc_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4594 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4595 struct e1000_rx_ring *rx_ring,
4596 int cleaned_count)
4597 {
4598 struct e1000_hw *hw = &adapter->hw;
4599 struct pci_dev *pdev = adapter->pdev;
4600 struct e1000_rx_desc *rx_desc;
4601 struct e1000_rx_buffer *buffer_info;
4602 unsigned int i;
4603 unsigned int bufsz = adapter->rx_buffer_len;
4604
4605 i = rx_ring->next_to_use;
4606 buffer_info = &rx_ring->buffer_info[i];
4607
4608 while (cleaned_count--) {
4609 void *data;
4610
4611 if (buffer_info->rxbuf.data)
4612 goto skip;
4613
4614 data = e1000_alloc_frag(adapter);
4615 if (!data) {
4616 /* Better luck next round */
4617 adapter->alloc_rx_buff_failed++;
4618 break;
4619 }
4620
4621 /* Fix for errata 23, can't cross 64kB boundary */
4622 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4623 void *olddata = data;
4624 e_err(rx_err, "skb align check failed: %u bytes at "
4625 "%p\n", bufsz, data);
4626 /* Try again, without freeing the previous */
4627 data = e1000_alloc_frag(adapter);
4628 /* Failed allocation, critical failure */
4629 if (!data) {
4630 skb_free_frag(olddata);
4631 adapter->alloc_rx_buff_failed++;
4632 break;
4633 }
4634
4635 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4636 /* give up */
4637 skb_free_frag(data);
4638 skb_free_frag(olddata);
4639 adapter->alloc_rx_buff_failed++;
4640 break;
4641 }
4642
4643 /* Use new allocation */
4644 skb_free_frag(olddata);
4645 }
4646 buffer_info->dma = dma_map_single(&pdev->dev,
4647 data,
4648 adapter->rx_buffer_len,
4649 DMA_FROM_DEVICE);
4650 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4651 skb_free_frag(data);
4652 buffer_info->dma = 0;
4653 adapter->alloc_rx_buff_failed++;
4654 break;
4655 }
4656
4657 /* XXX if it was allocated cleanly it will never map to a
4658 * boundary crossing
4659 */
4660
4661 /* Fix for errata 23, can't cross 64kB boundary */
4662 if (!e1000_check_64k_bound(adapter,
4663 (void *)(unsigned long)buffer_info->dma,
4664 adapter->rx_buffer_len)) {
4665 e_err(rx_err, "dma align check failed: %u bytes at "
4666 "%p\n", adapter->rx_buffer_len,
4667 (void *)(unsigned long)buffer_info->dma);
4668
4669 dma_unmap_single(&pdev->dev, buffer_info->dma,
4670 adapter->rx_buffer_len,
4671 DMA_FROM_DEVICE);
4672
4673 skb_free_frag(data);
4674 buffer_info->rxbuf.data = NULL;
4675 buffer_info->dma = 0;
4676
4677 adapter->alloc_rx_buff_failed++;
4678 break;
4679 }
4680 buffer_info->rxbuf.data = data;
4681 skip:
4682 rx_desc = E1000_RX_DESC(*rx_ring, i);
4683 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4684
4685 if (unlikely(++i == rx_ring->count))
4686 i = 0;
4687 buffer_info = &rx_ring->buffer_info[i];
4688 }
4689
4690 if (likely(rx_ring->next_to_use != i)) {
4691 rx_ring->next_to_use = i;
4692 if (unlikely(i-- == 0))
4693 i = (rx_ring->count - 1);
4694
4695 /* Force memory writes to complete before letting h/w
4696 * know there are new descriptors to fetch. (Only
4697 * applicable for weak-ordered memory model archs,
4698 * such as IA-64).
4699 */
4700 wmb();
4701 writel(i, hw->hw_addr + rx_ring->rdt);
4702 }
4703 }
4704
4705 /**
4706 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4707 * @adapter:
4708 **/
e1000_smartspeed(struct e1000_adapter * adapter)4709 static void e1000_smartspeed(struct e1000_adapter *adapter)
4710 {
4711 struct e1000_hw *hw = &adapter->hw;
4712 u16 phy_status;
4713 u16 phy_ctrl;
4714
4715 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4716 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4717 return;
4718
4719 if (adapter->smartspeed == 0) {
4720 /* If Master/Slave config fault is asserted twice,
4721 * we assume back-to-back
4722 */
4723 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4724 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4725 return;
4726 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4727 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4728 return;
4729 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4730 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4731 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4732 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4733 phy_ctrl);
4734 adapter->smartspeed++;
4735 if (!e1000_phy_setup_autoneg(hw) &&
4736 !e1000_read_phy_reg(hw, PHY_CTRL,
4737 &phy_ctrl)) {
4738 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4739 MII_CR_RESTART_AUTO_NEG);
4740 e1000_write_phy_reg(hw, PHY_CTRL,
4741 phy_ctrl);
4742 }
4743 }
4744 return;
4745 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4746 /* If still no link, perhaps using 2/3 pair cable */
4747 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4748 phy_ctrl |= CR_1000T_MS_ENABLE;
4749 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4750 if (!e1000_phy_setup_autoneg(hw) &&
4751 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4752 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4753 MII_CR_RESTART_AUTO_NEG);
4754 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4755 }
4756 }
4757 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4758 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4759 adapter->smartspeed = 0;
4760 }
4761
4762 /**
4763 * e1000_ioctl -
4764 * @netdev:
4765 * @ifreq:
4766 * @cmd:
4767 **/
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4768 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4769 {
4770 switch (cmd) {
4771 case SIOCGMIIPHY:
4772 case SIOCGMIIREG:
4773 case SIOCSMIIREG:
4774 return e1000_mii_ioctl(netdev, ifr, cmd);
4775 default:
4776 return -EOPNOTSUPP;
4777 }
4778 }
4779
4780 /**
4781 * e1000_mii_ioctl -
4782 * @netdev:
4783 * @ifreq:
4784 * @cmd:
4785 **/
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4786 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4787 int cmd)
4788 {
4789 struct e1000_adapter *adapter = netdev_priv(netdev);
4790 struct e1000_hw *hw = &adapter->hw;
4791 struct mii_ioctl_data *data = if_mii(ifr);
4792 int retval;
4793 u16 mii_reg;
4794 unsigned long flags;
4795
4796 if (hw->media_type != e1000_media_type_copper)
4797 return -EOPNOTSUPP;
4798
4799 switch (cmd) {
4800 case SIOCGMIIPHY:
4801 data->phy_id = hw->phy_addr;
4802 break;
4803 case SIOCGMIIREG:
4804 spin_lock_irqsave(&adapter->stats_lock, flags);
4805 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4806 &data->val_out)) {
4807 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4808 return -EIO;
4809 }
4810 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4811 break;
4812 case SIOCSMIIREG:
4813 if (data->reg_num & ~(0x1F))
4814 return -EFAULT;
4815 mii_reg = data->val_in;
4816 spin_lock_irqsave(&adapter->stats_lock, flags);
4817 if (e1000_write_phy_reg(hw, data->reg_num,
4818 mii_reg)) {
4819 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4820 return -EIO;
4821 }
4822 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4823 if (hw->media_type == e1000_media_type_copper) {
4824 switch (data->reg_num) {
4825 case PHY_CTRL:
4826 if (mii_reg & MII_CR_POWER_DOWN)
4827 break;
4828 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4829 hw->autoneg = 1;
4830 hw->autoneg_advertised = 0x2F;
4831 } else {
4832 u32 speed;
4833 if (mii_reg & 0x40)
4834 speed = SPEED_1000;
4835 else if (mii_reg & 0x2000)
4836 speed = SPEED_100;
4837 else
4838 speed = SPEED_10;
4839 retval = e1000_set_spd_dplx(
4840 adapter, speed,
4841 ((mii_reg & 0x100)
4842 ? DUPLEX_FULL :
4843 DUPLEX_HALF));
4844 if (retval)
4845 return retval;
4846 }
4847 if (netif_running(adapter->netdev))
4848 e1000_reinit_locked(adapter);
4849 else
4850 e1000_reset(adapter);
4851 break;
4852 case M88E1000_PHY_SPEC_CTRL:
4853 case M88E1000_EXT_PHY_SPEC_CTRL:
4854 if (e1000_phy_reset(hw))
4855 return -EIO;
4856 break;
4857 }
4858 } else {
4859 switch (data->reg_num) {
4860 case PHY_CTRL:
4861 if (mii_reg & MII_CR_POWER_DOWN)
4862 break;
4863 if (netif_running(adapter->netdev))
4864 e1000_reinit_locked(adapter);
4865 else
4866 e1000_reset(adapter);
4867 break;
4868 }
4869 }
4870 break;
4871 default:
4872 return -EOPNOTSUPP;
4873 }
4874 return E1000_SUCCESS;
4875 }
4876
e1000_pci_set_mwi(struct e1000_hw * hw)4877 void e1000_pci_set_mwi(struct e1000_hw *hw)
4878 {
4879 struct e1000_adapter *adapter = hw->back;
4880 int ret_val = pci_set_mwi(adapter->pdev);
4881
4882 if (ret_val)
4883 e_err(probe, "Error in setting MWI\n");
4884 }
4885
e1000_pci_clear_mwi(struct e1000_hw * hw)4886 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4887 {
4888 struct e1000_adapter *adapter = hw->back;
4889
4890 pci_clear_mwi(adapter->pdev);
4891 }
4892
e1000_pcix_get_mmrbc(struct e1000_hw * hw)4893 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4894 {
4895 struct e1000_adapter *adapter = hw->back;
4896 return pcix_get_mmrbc(adapter->pdev);
4897 }
4898
e1000_pcix_set_mmrbc(struct e1000_hw * hw,int mmrbc)4899 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4900 {
4901 struct e1000_adapter *adapter = hw->back;
4902 pcix_set_mmrbc(adapter->pdev, mmrbc);
4903 }
4904
e1000_io_write(struct e1000_hw * hw,unsigned long port,u32 value)4905 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4906 {
4907 outl(value, port);
4908 }
4909
e1000_vlan_used(struct e1000_adapter * adapter)4910 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4911 {
4912 u16 vid;
4913
4914 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4915 return true;
4916 return false;
4917 }
4918
__e1000_vlan_mode(struct e1000_adapter * adapter,netdev_features_t features)4919 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4920 netdev_features_t features)
4921 {
4922 struct e1000_hw *hw = &adapter->hw;
4923 u32 ctrl;
4924
4925 ctrl = er32(CTRL);
4926 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4927 /* enable VLAN tag insert/strip */
4928 ctrl |= E1000_CTRL_VME;
4929 } else {
4930 /* disable VLAN tag insert/strip */
4931 ctrl &= ~E1000_CTRL_VME;
4932 }
4933 ew32(CTRL, ctrl);
4934 }
e1000_vlan_filter_on_off(struct e1000_adapter * adapter,bool filter_on)4935 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4936 bool filter_on)
4937 {
4938 struct e1000_hw *hw = &adapter->hw;
4939 u32 rctl;
4940
4941 if (!test_bit(__E1000_DOWN, &adapter->flags))
4942 e1000_irq_disable(adapter);
4943
4944 __e1000_vlan_mode(adapter, adapter->netdev->features);
4945 if (filter_on) {
4946 /* enable VLAN receive filtering */
4947 rctl = er32(RCTL);
4948 rctl &= ~E1000_RCTL_CFIEN;
4949 if (!(adapter->netdev->flags & IFF_PROMISC))
4950 rctl |= E1000_RCTL_VFE;
4951 ew32(RCTL, rctl);
4952 e1000_update_mng_vlan(adapter);
4953 } else {
4954 /* disable VLAN receive filtering */
4955 rctl = er32(RCTL);
4956 rctl &= ~E1000_RCTL_VFE;
4957 ew32(RCTL, rctl);
4958 }
4959
4960 if (!test_bit(__E1000_DOWN, &adapter->flags))
4961 e1000_irq_enable(adapter);
4962 }
4963
e1000_vlan_mode(struct net_device * netdev,netdev_features_t features)4964 static void e1000_vlan_mode(struct net_device *netdev,
4965 netdev_features_t features)
4966 {
4967 struct e1000_adapter *adapter = netdev_priv(netdev);
4968
4969 if (!test_bit(__E1000_DOWN, &adapter->flags))
4970 e1000_irq_disable(adapter);
4971
4972 __e1000_vlan_mode(adapter, features);
4973
4974 if (!test_bit(__E1000_DOWN, &adapter->flags))
4975 e1000_irq_enable(adapter);
4976 }
4977
e1000_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)4978 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4979 __be16 proto, u16 vid)
4980 {
4981 struct e1000_adapter *adapter = netdev_priv(netdev);
4982 struct e1000_hw *hw = &adapter->hw;
4983 u32 vfta, index;
4984
4985 if ((hw->mng_cookie.status &
4986 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4987 (vid == adapter->mng_vlan_id))
4988 return 0;
4989
4990 if (!e1000_vlan_used(adapter))
4991 e1000_vlan_filter_on_off(adapter, true);
4992
4993 /* add VID to filter table */
4994 index = (vid >> 5) & 0x7F;
4995 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4996 vfta |= (1 << (vid & 0x1F));
4997 e1000_write_vfta(hw, index, vfta);
4998
4999 set_bit(vid, adapter->active_vlans);
5000
5001 return 0;
5002 }
5003
e1000_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)5004 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
5005 __be16 proto, u16 vid)
5006 {
5007 struct e1000_adapter *adapter = netdev_priv(netdev);
5008 struct e1000_hw *hw = &adapter->hw;
5009 u32 vfta, index;
5010
5011 if (!test_bit(__E1000_DOWN, &adapter->flags))
5012 e1000_irq_disable(adapter);
5013 if (!test_bit(__E1000_DOWN, &adapter->flags))
5014 e1000_irq_enable(adapter);
5015
5016 /* remove VID from filter table */
5017 index = (vid >> 5) & 0x7F;
5018 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
5019 vfta &= ~(1 << (vid & 0x1F));
5020 e1000_write_vfta(hw, index, vfta);
5021
5022 clear_bit(vid, adapter->active_vlans);
5023
5024 if (!e1000_vlan_used(adapter))
5025 e1000_vlan_filter_on_off(adapter, false);
5026
5027 return 0;
5028 }
5029
e1000_restore_vlan(struct e1000_adapter * adapter)5030 static void e1000_restore_vlan(struct e1000_adapter *adapter)
5031 {
5032 u16 vid;
5033
5034 if (!e1000_vlan_used(adapter))
5035 return;
5036
5037 e1000_vlan_filter_on_off(adapter, true);
5038 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5039 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5040 }
5041
e1000_set_spd_dplx(struct e1000_adapter * adapter,u32 spd,u8 dplx)5042 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5043 {
5044 struct e1000_hw *hw = &adapter->hw;
5045
5046 hw->autoneg = 0;
5047
5048 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5049 * for the switch() below to work
5050 */
5051 if ((spd & 1) || (dplx & ~1))
5052 goto err_inval;
5053
5054 /* Fiber NICs only allow 1000 gbps Full duplex */
5055 if ((hw->media_type == e1000_media_type_fiber) &&
5056 spd != SPEED_1000 &&
5057 dplx != DUPLEX_FULL)
5058 goto err_inval;
5059
5060 switch (spd + dplx) {
5061 case SPEED_10 + DUPLEX_HALF:
5062 hw->forced_speed_duplex = e1000_10_half;
5063 break;
5064 case SPEED_10 + DUPLEX_FULL:
5065 hw->forced_speed_duplex = e1000_10_full;
5066 break;
5067 case SPEED_100 + DUPLEX_HALF:
5068 hw->forced_speed_duplex = e1000_100_half;
5069 break;
5070 case SPEED_100 + DUPLEX_FULL:
5071 hw->forced_speed_duplex = e1000_100_full;
5072 break;
5073 case SPEED_1000 + DUPLEX_FULL:
5074 hw->autoneg = 1;
5075 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5076 break;
5077 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5078 default:
5079 goto err_inval;
5080 }
5081
5082 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5083 hw->mdix = AUTO_ALL_MODES;
5084
5085 return 0;
5086
5087 err_inval:
5088 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5089 return -EINVAL;
5090 }
5091
__e1000_shutdown(struct pci_dev * pdev,bool * enable_wake)5092 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5093 {
5094 struct net_device *netdev = pci_get_drvdata(pdev);
5095 struct e1000_adapter *adapter = netdev_priv(netdev);
5096 struct e1000_hw *hw = &adapter->hw;
5097 u32 ctrl, ctrl_ext, rctl, status;
5098 u32 wufc = adapter->wol;
5099 #ifdef CONFIG_PM
5100 int retval = 0;
5101 #endif
5102
5103 netif_device_detach(netdev);
5104
5105 if (netif_running(netdev)) {
5106 int count = E1000_CHECK_RESET_COUNT;
5107
5108 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5109 usleep_range(10000, 20000);
5110
5111 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5112 e1000_down(adapter);
5113 }
5114
5115 #ifdef CONFIG_PM
5116 retval = pci_save_state(pdev);
5117 if (retval)
5118 return retval;
5119 #endif
5120
5121 status = er32(STATUS);
5122 if (status & E1000_STATUS_LU)
5123 wufc &= ~E1000_WUFC_LNKC;
5124
5125 if (wufc) {
5126 e1000_setup_rctl(adapter);
5127 e1000_set_rx_mode(netdev);
5128
5129 rctl = er32(RCTL);
5130
5131 /* turn on all-multi mode if wake on multicast is enabled */
5132 if (wufc & E1000_WUFC_MC)
5133 rctl |= E1000_RCTL_MPE;
5134
5135 /* enable receives in the hardware */
5136 ew32(RCTL, rctl | E1000_RCTL_EN);
5137
5138 if (hw->mac_type >= e1000_82540) {
5139 ctrl = er32(CTRL);
5140 /* advertise wake from D3Cold */
5141 #define E1000_CTRL_ADVD3WUC 0x00100000
5142 /* phy power management enable */
5143 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5144 ctrl |= E1000_CTRL_ADVD3WUC |
5145 E1000_CTRL_EN_PHY_PWR_MGMT;
5146 ew32(CTRL, ctrl);
5147 }
5148
5149 if (hw->media_type == e1000_media_type_fiber ||
5150 hw->media_type == e1000_media_type_internal_serdes) {
5151 /* keep the laser running in D3 */
5152 ctrl_ext = er32(CTRL_EXT);
5153 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5154 ew32(CTRL_EXT, ctrl_ext);
5155 }
5156
5157 ew32(WUC, E1000_WUC_PME_EN);
5158 ew32(WUFC, wufc);
5159 } else {
5160 ew32(WUC, 0);
5161 ew32(WUFC, 0);
5162 }
5163
5164 e1000_release_manageability(adapter);
5165
5166 *enable_wake = !!wufc;
5167
5168 /* make sure adapter isn't asleep if manageability is enabled */
5169 if (adapter->en_mng_pt)
5170 *enable_wake = true;
5171
5172 if (netif_running(netdev))
5173 e1000_free_irq(adapter);
5174
5175 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5176 pci_disable_device(pdev);
5177
5178 return 0;
5179 }
5180
5181 #ifdef CONFIG_PM
e1000_suspend(struct pci_dev * pdev,pm_message_t state)5182 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5183 {
5184 int retval;
5185 bool wake;
5186
5187 retval = __e1000_shutdown(pdev, &wake);
5188 if (retval)
5189 return retval;
5190
5191 if (wake) {
5192 pci_prepare_to_sleep(pdev);
5193 } else {
5194 pci_wake_from_d3(pdev, false);
5195 pci_set_power_state(pdev, PCI_D3hot);
5196 }
5197
5198 return 0;
5199 }
5200
e1000_resume(struct pci_dev * pdev)5201 static int e1000_resume(struct pci_dev *pdev)
5202 {
5203 struct net_device *netdev = pci_get_drvdata(pdev);
5204 struct e1000_adapter *adapter = netdev_priv(netdev);
5205 struct e1000_hw *hw = &adapter->hw;
5206 u32 err;
5207
5208 pci_set_power_state(pdev, PCI_D0);
5209 pci_restore_state(pdev);
5210 pci_save_state(pdev);
5211
5212 if (adapter->need_ioport)
5213 err = pci_enable_device(pdev);
5214 else
5215 err = pci_enable_device_mem(pdev);
5216 if (err) {
5217 pr_err("Cannot enable PCI device from suspend\n");
5218 return err;
5219 }
5220
5221 /* flush memory to make sure state is correct */
5222 smp_mb__before_atomic();
5223 clear_bit(__E1000_DISABLED, &adapter->flags);
5224 pci_set_master(pdev);
5225
5226 pci_enable_wake(pdev, PCI_D3hot, 0);
5227 pci_enable_wake(pdev, PCI_D3cold, 0);
5228
5229 if (netif_running(netdev)) {
5230 err = e1000_request_irq(adapter);
5231 if (err)
5232 return err;
5233 }
5234
5235 e1000_power_up_phy(adapter);
5236 e1000_reset(adapter);
5237 ew32(WUS, ~0);
5238
5239 e1000_init_manageability(adapter);
5240
5241 if (netif_running(netdev))
5242 e1000_up(adapter);
5243
5244 netif_device_attach(netdev);
5245
5246 return 0;
5247 }
5248 #endif
5249
e1000_shutdown(struct pci_dev * pdev)5250 static void e1000_shutdown(struct pci_dev *pdev)
5251 {
5252 bool wake;
5253
5254 __e1000_shutdown(pdev, &wake);
5255
5256 if (system_state == SYSTEM_POWER_OFF) {
5257 pci_wake_from_d3(pdev, wake);
5258 pci_set_power_state(pdev, PCI_D3hot);
5259 }
5260 }
5261
5262 #ifdef CONFIG_NET_POLL_CONTROLLER
5263 /* Polling 'interrupt' - used by things like netconsole to send skbs
5264 * without having to re-enable interrupts. It's not called while
5265 * the interrupt routine is executing.
5266 */
e1000_netpoll(struct net_device * netdev)5267 static void e1000_netpoll(struct net_device *netdev)
5268 {
5269 struct e1000_adapter *adapter = netdev_priv(netdev);
5270
5271 disable_irq(adapter->pdev->irq);
5272 e1000_intr(adapter->pdev->irq, netdev);
5273 enable_irq(adapter->pdev->irq);
5274 }
5275 #endif
5276
5277 /**
5278 * e1000_io_error_detected - called when PCI error is detected
5279 * @pdev: Pointer to PCI device
5280 * @state: The current pci connection state
5281 *
5282 * This function is called after a PCI bus error affecting
5283 * this device has been detected.
5284 */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5285 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5286 pci_channel_state_t state)
5287 {
5288 struct net_device *netdev = pci_get_drvdata(pdev);
5289 struct e1000_adapter *adapter = netdev_priv(netdev);
5290
5291 netif_device_detach(netdev);
5292
5293 if (state == pci_channel_io_perm_failure)
5294 return PCI_ERS_RESULT_DISCONNECT;
5295
5296 if (netif_running(netdev))
5297 e1000_down(adapter);
5298
5299 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5300 pci_disable_device(pdev);
5301
5302 /* Request a slot slot reset. */
5303 return PCI_ERS_RESULT_NEED_RESET;
5304 }
5305
5306 /**
5307 * e1000_io_slot_reset - called after the pci bus has been reset.
5308 * @pdev: Pointer to PCI device
5309 *
5310 * Restart the card from scratch, as if from a cold-boot. Implementation
5311 * resembles the first-half of the e1000_resume routine.
5312 */
e1000_io_slot_reset(struct pci_dev * pdev)5313 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5314 {
5315 struct net_device *netdev = pci_get_drvdata(pdev);
5316 struct e1000_adapter *adapter = netdev_priv(netdev);
5317 struct e1000_hw *hw = &adapter->hw;
5318 int err;
5319
5320 if (adapter->need_ioport)
5321 err = pci_enable_device(pdev);
5322 else
5323 err = pci_enable_device_mem(pdev);
5324 if (err) {
5325 pr_err("Cannot re-enable PCI device after reset.\n");
5326 return PCI_ERS_RESULT_DISCONNECT;
5327 }
5328
5329 /* flush memory to make sure state is correct */
5330 smp_mb__before_atomic();
5331 clear_bit(__E1000_DISABLED, &adapter->flags);
5332 pci_set_master(pdev);
5333
5334 pci_enable_wake(pdev, PCI_D3hot, 0);
5335 pci_enable_wake(pdev, PCI_D3cold, 0);
5336
5337 e1000_reset(adapter);
5338 ew32(WUS, ~0);
5339
5340 return PCI_ERS_RESULT_RECOVERED;
5341 }
5342
5343 /**
5344 * e1000_io_resume - called when traffic can start flowing again.
5345 * @pdev: Pointer to PCI device
5346 *
5347 * This callback is called when the error recovery driver tells us that
5348 * its OK to resume normal operation. Implementation resembles the
5349 * second-half of the e1000_resume routine.
5350 */
e1000_io_resume(struct pci_dev * pdev)5351 static void e1000_io_resume(struct pci_dev *pdev)
5352 {
5353 struct net_device *netdev = pci_get_drvdata(pdev);
5354 struct e1000_adapter *adapter = netdev_priv(netdev);
5355
5356 e1000_init_manageability(adapter);
5357
5358 if (netif_running(netdev)) {
5359 if (e1000_up(adapter)) {
5360 pr_info("can't bring device back up after reset\n");
5361 return;
5362 }
5363 }
5364
5365 netif_device_attach(netdev);
5366 }
5367
5368 /* e1000_main.c */
5369