1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 #include "e1000.h"
5 #include <net/ip6_checksum.h>
6 #include <linux/io.h>
7 #include <linux/prefetch.h>
8 #include <linux/bitops.h>
9 #include <linux/if_vlan.h>
10
11 char e1000_driver_name[] = "e1000";
12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13 #define DRV_VERSION "7.3.21-k8-NAPI"
14 const char e1000_driver_version[] = DRV_VERSION;
15 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
16
17 /* e1000_pci_tbl - PCI Device ID Table
18 *
19 * Last entry must be all 0s
20 *
21 * Macro expands to...
22 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
23 */
24 static const struct pci_device_id e1000_pci_tbl[] = {
25 INTEL_E1000_ETHERNET_DEVICE(0x1000),
26 INTEL_E1000_ETHERNET_DEVICE(0x1001),
27 INTEL_E1000_ETHERNET_DEVICE(0x1004),
28 INTEL_E1000_ETHERNET_DEVICE(0x1008),
29 INTEL_E1000_ETHERNET_DEVICE(0x1009),
30 INTEL_E1000_ETHERNET_DEVICE(0x100C),
31 INTEL_E1000_ETHERNET_DEVICE(0x100D),
32 INTEL_E1000_ETHERNET_DEVICE(0x100E),
33 INTEL_E1000_ETHERNET_DEVICE(0x100F),
34 INTEL_E1000_ETHERNET_DEVICE(0x1010),
35 INTEL_E1000_ETHERNET_DEVICE(0x1011),
36 INTEL_E1000_ETHERNET_DEVICE(0x1012),
37 INTEL_E1000_ETHERNET_DEVICE(0x1013),
38 INTEL_E1000_ETHERNET_DEVICE(0x1014),
39 INTEL_E1000_ETHERNET_DEVICE(0x1015),
40 INTEL_E1000_ETHERNET_DEVICE(0x1016),
41 INTEL_E1000_ETHERNET_DEVICE(0x1017),
42 INTEL_E1000_ETHERNET_DEVICE(0x1018),
43 INTEL_E1000_ETHERNET_DEVICE(0x1019),
44 INTEL_E1000_ETHERNET_DEVICE(0x101A),
45 INTEL_E1000_ETHERNET_DEVICE(0x101D),
46 INTEL_E1000_ETHERNET_DEVICE(0x101E),
47 INTEL_E1000_ETHERNET_DEVICE(0x1026),
48 INTEL_E1000_ETHERNET_DEVICE(0x1027),
49 INTEL_E1000_ETHERNET_DEVICE(0x1028),
50 INTEL_E1000_ETHERNET_DEVICE(0x1075),
51 INTEL_E1000_ETHERNET_DEVICE(0x1076),
52 INTEL_E1000_ETHERNET_DEVICE(0x1077),
53 INTEL_E1000_ETHERNET_DEVICE(0x1078),
54 INTEL_E1000_ETHERNET_DEVICE(0x1079),
55 INTEL_E1000_ETHERNET_DEVICE(0x107A),
56 INTEL_E1000_ETHERNET_DEVICE(0x107B),
57 INTEL_E1000_ETHERNET_DEVICE(0x107C),
58 INTEL_E1000_ETHERNET_DEVICE(0x108A),
59 INTEL_E1000_ETHERNET_DEVICE(0x1099),
60 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
61 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
62 /* required last entry */
63 {0,}
64 };
65
66 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
67
68 int e1000_up(struct e1000_adapter *adapter);
69 void e1000_down(struct e1000_adapter *adapter);
70 void e1000_reinit_locked(struct e1000_adapter *adapter);
71 void e1000_reset(struct e1000_adapter *adapter);
72 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
73 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
74 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
75 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
76 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
77 struct e1000_tx_ring *txdr);
78 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
79 struct e1000_rx_ring *rxdr);
80 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
81 struct e1000_tx_ring *tx_ring);
82 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
83 struct e1000_rx_ring *rx_ring);
84 void e1000_update_stats(struct e1000_adapter *adapter);
85
86 static int e1000_init_module(void);
87 static void e1000_exit_module(void);
88 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
89 static void e1000_remove(struct pci_dev *pdev);
90 static int e1000_alloc_queues(struct e1000_adapter *adapter);
91 static int e1000_sw_init(struct e1000_adapter *adapter);
92 int e1000_open(struct net_device *netdev);
93 int e1000_close(struct net_device *netdev);
94 static void e1000_configure_tx(struct e1000_adapter *adapter);
95 static void e1000_configure_rx(struct e1000_adapter *adapter);
96 static void e1000_setup_rctl(struct e1000_adapter *adapter);
97 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
98 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
99 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
100 struct e1000_tx_ring *tx_ring);
101 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
102 struct e1000_rx_ring *rx_ring);
103 static void e1000_set_rx_mode(struct net_device *netdev);
104 static void e1000_update_phy_info_task(struct work_struct *work);
105 static void e1000_watchdog(struct work_struct *work);
106 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
107 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
108 struct net_device *netdev);
109 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
110 static int e1000_set_mac(struct net_device *netdev, void *p);
111 static irqreturn_t e1000_intr(int irq, void *data);
112 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
113 struct e1000_tx_ring *tx_ring);
114 static int e1000_clean(struct napi_struct *napi, int budget);
115 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
116 struct e1000_rx_ring *rx_ring,
117 int *work_done, int work_to_do);
118 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
119 struct e1000_rx_ring *rx_ring,
120 int *work_done, int work_to_do);
e1000_alloc_dummy_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)121 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
122 struct e1000_rx_ring *rx_ring,
123 int cleaned_count)
124 {
125 }
126 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring,
128 int cleaned_count);
129 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
130 struct e1000_rx_ring *rx_ring,
131 int cleaned_count);
132 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
133 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
134 int cmd);
135 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
136 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
137 static void e1000_tx_timeout(struct net_device *dev);
138 static void e1000_reset_task(struct work_struct *work);
139 static void e1000_smartspeed(struct e1000_adapter *adapter);
140 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
141 struct sk_buff *skb);
142
143 static bool e1000_vlan_used(struct e1000_adapter *adapter);
144 static void e1000_vlan_mode(struct net_device *netdev,
145 netdev_features_t features);
146 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
147 bool filter_on);
148 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
149 __be16 proto, u16 vid);
150 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
151 __be16 proto, u16 vid);
152 static void e1000_restore_vlan(struct e1000_adapter *adapter);
153
154 #ifdef CONFIG_PM
155 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
156 static int e1000_resume(struct pci_dev *pdev);
157 #endif
158 static void e1000_shutdown(struct pci_dev *pdev);
159
160 #ifdef CONFIG_NET_POLL_CONTROLLER
161 /* for netdump / net console */
162 static void e1000_netpoll (struct net_device *netdev);
163 #endif
164
165 #define COPYBREAK_DEFAULT 256
166 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
167 module_param(copybreak, uint, 0644);
168 MODULE_PARM_DESC(copybreak,
169 "Maximum size of packet that is copied to a new buffer on receive");
170
171 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
172 pci_channel_state_t state);
173 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
174 static void e1000_io_resume(struct pci_dev *pdev);
175
176 static const struct pci_error_handlers e1000_err_handler = {
177 .error_detected = e1000_io_error_detected,
178 .slot_reset = e1000_io_slot_reset,
179 .resume = e1000_io_resume,
180 };
181
182 static struct pci_driver e1000_driver = {
183 .name = e1000_driver_name,
184 .id_table = e1000_pci_tbl,
185 .probe = e1000_probe,
186 .remove = e1000_remove,
187 #ifdef CONFIG_PM
188 /* Power Management Hooks */
189 .suspend = e1000_suspend,
190 .resume = e1000_resume,
191 #endif
192 .shutdown = e1000_shutdown,
193 .err_handler = &e1000_err_handler
194 };
195
196 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
197 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
198 MODULE_LICENSE("GPL");
199 MODULE_VERSION(DRV_VERSION);
200
201 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
202 static int debug = -1;
203 module_param(debug, int, 0);
204 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
205
206 /**
207 * e1000_get_hw_dev - return device
208 * used by hardware layer to print debugging information
209 *
210 **/
e1000_get_hw_dev(struct e1000_hw * hw)211 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
212 {
213 struct e1000_adapter *adapter = hw->back;
214 return adapter->netdev;
215 }
216
217 /**
218 * e1000_init_module - Driver Registration Routine
219 *
220 * e1000_init_module is the first routine called when the driver is
221 * loaded. All it does is register with the PCI subsystem.
222 **/
e1000_init_module(void)223 static int __init e1000_init_module(void)
224 {
225 int ret;
226 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
227
228 pr_info("%s\n", e1000_copyright);
229
230 ret = pci_register_driver(&e1000_driver);
231 if (copybreak != COPYBREAK_DEFAULT) {
232 if (copybreak == 0)
233 pr_info("copybreak disabled\n");
234 else
235 pr_info("copybreak enabled for "
236 "packets <= %u bytes\n", copybreak);
237 }
238 return ret;
239 }
240
241 module_init(e1000_init_module);
242
243 /**
244 * e1000_exit_module - Driver Exit Cleanup Routine
245 *
246 * e1000_exit_module is called just before the driver is removed
247 * from memory.
248 **/
e1000_exit_module(void)249 static void __exit e1000_exit_module(void)
250 {
251 pci_unregister_driver(&e1000_driver);
252 }
253
254 module_exit(e1000_exit_module);
255
e1000_request_irq(struct e1000_adapter * adapter)256 static int e1000_request_irq(struct e1000_adapter *adapter)
257 {
258 struct net_device *netdev = adapter->netdev;
259 irq_handler_t handler = e1000_intr;
260 int irq_flags = IRQF_SHARED;
261 int err;
262
263 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
264 netdev);
265 if (err) {
266 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
267 }
268
269 return err;
270 }
271
e1000_free_irq(struct e1000_adapter * adapter)272 static void e1000_free_irq(struct e1000_adapter *adapter)
273 {
274 struct net_device *netdev = adapter->netdev;
275
276 free_irq(adapter->pdev->irq, netdev);
277 }
278
279 /**
280 * e1000_irq_disable - Mask off interrupt generation on the NIC
281 * @adapter: board private structure
282 **/
e1000_irq_disable(struct e1000_adapter * adapter)283 static void e1000_irq_disable(struct e1000_adapter *adapter)
284 {
285 struct e1000_hw *hw = &adapter->hw;
286
287 ew32(IMC, ~0);
288 E1000_WRITE_FLUSH();
289 synchronize_irq(adapter->pdev->irq);
290 }
291
292 /**
293 * e1000_irq_enable - Enable default interrupt generation settings
294 * @adapter: board private structure
295 **/
e1000_irq_enable(struct e1000_adapter * adapter)296 static void e1000_irq_enable(struct e1000_adapter *adapter)
297 {
298 struct e1000_hw *hw = &adapter->hw;
299
300 ew32(IMS, IMS_ENABLE_MASK);
301 E1000_WRITE_FLUSH();
302 }
303
e1000_update_mng_vlan(struct e1000_adapter * adapter)304 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
305 {
306 struct e1000_hw *hw = &adapter->hw;
307 struct net_device *netdev = adapter->netdev;
308 u16 vid = hw->mng_cookie.vlan_id;
309 u16 old_vid = adapter->mng_vlan_id;
310
311 if (!e1000_vlan_used(adapter))
312 return;
313
314 if (!test_bit(vid, adapter->active_vlans)) {
315 if (hw->mng_cookie.status &
316 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
317 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
318 adapter->mng_vlan_id = vid;
319 } else {
320 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
321 }
322 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
323 (vid != old_vid) &&
324 !test_bit(old_vid, adapter->active_vlans))
325 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
326 old_vid);
327 } else {
328 adapter->mng_vlan_id = vid;
329 }
330 }
331
e1000_init_manageability(struct e1000_adapter * adapter)332 static void e1000_init_manageability(struct e1000_adapter *adapter)
333 {
334 struct e1000_hw *hw = &adapter->hw;
335
336 if (adapter->en_mng_pt) {
337 u32 manc = er32(MANC);
338
339 /* disable hardware interception of ARP */
340 manc &= ~(E1000_MANC_ARP_EN);
341
342 ew32(MANC, manc);
343 }
344 }
345
e1000_release_manageability(struct e1000_adapter * adapter)346 static void e1000_release_manageability(struct e1000_adapter *adapter)
347 {
348 struct e1000_hw *hw = &adapter->hw;
349
350 if (adapter->en_mng_pt) {
351 u32 manc = er32(MANC);
352
353 /* re-enable hardware interception of ARP */
354 manc |= E1000_MANC_ARP_EN;
355
356 ew32(MANC, manc);
357 }
358 }
359
360 /**
361 * e1000_configure - configure the hardware for RX and TX
362 * @adapter = private board structure
363 **/
e1000_configure(struct e1000_adapter * adapter)364 static void e1000_configure(struct e1000_adapter *adapter)
365 {
366 struct net_device *netdev = adapter->netdev;
367 int i;
368
369 e1000_set_rx_mode(netdev);
370
371 e1000_restore_vlan(adapter);
372 e1000_init_manageability(adapter);
373
374 e1000_configure_tx(adapter);
375 e1000_setup_rctl(adapter);
376 e1000_configure_rx(adapter);
377 /* call E1000_DESC_UNUSED which always leaves
378 * at least 1 descriptor unused to make sure
379 * next_to_use != next_to_clean
380 */
381 for (i = 0; i < adapter->num_rx_queues; i++) {
382 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
383 adapter->alloc_rx_buf(adapter, ring,
384 E1000_DESC_UNUSED(ring));
385 }
386 }
387
e1000_up(struct e1000_adapter * adapter)388 int e1000_up(struct e1000_adapter *adapter)
389 {
390 struct e1000_hw *hw = &adapter->hw;
391
392 /* hardware has been reset, we need to reload some things */
393 e1000_configure(adapter);
394
395 clear_bit(__E1000_DOWN, &adapter->flags);
396
397 napi_enable(&adapter->napi);
398
399 e1000_irq_enable(adapter);
400
401 netif_wake_queue(adapter->netdev);
402
403 /* fire a link change interrupt to start the watchdog */
404 ew32(ICS, E1000_ICS_LSC);
405 return 0;
406 }
407
408 /**
409 * e1000_power_up_phy - restore link in case the phy was powered down
410 * @adapter: address of board private structure
411 *
412 * The phy may be powered down to save power and turn off link when the
413 * driver is unloaded and wake on lan is not enabled (among others)
414 * *** this routine MUST be followed by a call to e1000_reset ***
415 **/
e1000_power_up_phy(struct e1000_adapter * adapter)416 void e1000_power_up_phy(struct e1000_adapter *adapter)
417 {
418 struct e1000_hw *hw = &adapter->hw;
419 u16 mii_reg = 0;
420
421 /* Just clear the power down bit to wake the phy back up */
422 if (hw->media_type == e1000_media_type_copper) {
423 /* according to the manual, the phy will retain its
424 * settings across a power-down/up cycle
425 */
426 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
427 mii_reg &= ~MII_CR_POWER_DOWN;
428 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
429 }
430 }
431
e1000_power_down_phy(struct e1000_adapter * adapter)432 static void e1000_power_down_phy(struct e1000_adapter *adapter)
433 {
434 struct e1000_hw *hw = &adapter->hw;
435
436 /* Power down the PHY so no link is implied when interface is down *
437 * The PHY cannot be powered down if any of the following is true *
438 * (a) WoL is enabled
439 * (b) AMT is active
440 * (c) SoL/IDER session is active
441 */
442 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
443 hw->media_type == e1000_media_type_copper) {
444 u16 mii_reg = 0;
445
446 switch (hw->mac_type) {
447 case e1000_82540:
448 case e1000_82545:
449 case e1000_82545_rev_3:
450 case e1000_82546:
451 case e1000_ce4100:
452 case e1000_82546_rev_3:
453 case e1000_82541:
454 case e1000_82541_rev_2:
455 case e1000_82547:
456 case e1000_82547_rev_2:
457 if (er32(MANC) & E1000_MANC_SMBUS_EN)
458 goto out;
459 break;
460 default:
461 goto out;
462 }
463 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
464 mii_reg |= MII_CR_POWER_DOWN;
465 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
466 msleep(1);
467 }
468 out:
469 return;
470 }
471
e1000_down_and_stop(struct e1000_adapter * adapter)472 static void e1000_down_and_stop(struct e1000_adapter *adapter)
473 {
474 set_bit(__E1000_DOWN, &adapter->flags);
475
476 cancel_delayed_work_sync(&adapter->watchdog_task);
477
478 /*
479 * Since the watchdog task can reschedule other tasks, we should cancel
480 * it first, otherwise we can run into the situation when a work is
481 * still running after the adapter has been turned down.
482 */
483
484 cancel_delayed_work_sync(&adapter->phy_info_task);
485 cancel_delayed_work_sync(&adapter->fifo_stall_task);
486
487 /* Only kill reset task if adapter is not resetting */
488 if (!test_bit(__E1000_RESETTING, &adapter->flags))
489 cancel_work_sync(&adapter->reset_task);
490 }
491
e1000_down(struct e1000_adapter * adapter)492 void e1000_down(struct e1000_adapter *adapter)
493 {
494 struct e1000_hw *hw = &adapter->hw;
495 struct net_device *netdev = adapter->netdev;
496 u32 rctl, tctl;
497
498 /* disable receives in the hardware */
499 rctl = er32(RCTL);
500 ew32(RCTL, rctl & ~E1000_RCTL_EN);
501 /* flush and sleep below */
502
503 netif_tx_disable(netdev);
504
505 /* disable transmits in the hardware */
506 tctl = er32(TCTL);
507 tctl &= ~E1000_TCTL_EN;
508 ew32(TCTL, tctl);
509 /* flush both disables and wait for them to finish */
510 E1000_WRITE_FLUSH();
511 msleep(10);
512
513 /* Set the carrier off after transmits have been disabled in the
514 * hardware, to avoid race conditions with e1000_watchdog() (which
515 * may be running concurrently to us, checking for the carrier
516 * bit to decide whether it should enable transmits again). Such
517 * a race condition would result into transmission being disabled
518 * in the hardware until the next IFF_DOWN+IFF_UP cycle.
519 */
520 netif_carrier_off(netdev);
521
522 napi_disable(&adapter->napi);
523
524 e1000_irq_disable(adapter);
525
526 /* Setting DOWN must be after irq_disable to prevent
527 * a screaming interrupt. Setting DOWN also prevents
528 * tasks from rescheduling.
529 */
530 e1000_down_and_stop(adapter);
531
532 adapter->link_speed = 0;
533 adapter->link_duplex = 0;
534
535 e1000_reset(adapter);
536 e1000_clean_all_tx_rings(adapter);
537 e1000_clean_all_rx_rings(adapter);
538 }
539
e1000_reinit_locked(struct e1000_adapter * adapter)540 void e1000_reinit_locked(struct e1000_adapter *adapter)
541 {
542 WARN_ON(in_interrupt());
543 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
544 msleep(1);
545
546 /* only run the task if not already down */
547 if (!test_bit(__E1000_DOWN, &adapter->flags)) {
548 e1000_down(adapter);
549 e1000_up(adapter);
550 }
551
552 clear_bit(__E1000_RESETTING, &adapter->flags);
553 }
554
e1000_reset(struct e1000_adapter * adapter)555 void e1000_reset(struct e1000_adapter *adapter)
556 {
557 struct e1000_hw *hw = &adapter->hw;
558 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
559 bool legacy_pba_adjust = false;
560 u16 hwm;
561
562 /* Repartition Pba for greater than 9k mtu
563 * To take effect CTRL.RST is required.
564 */
565
566 switch (hw->mac_type) {
567 case e1000_82542_rev2_0:
568 case e1000_82542_rev2_1:
569 case e1000_82543:
570 case e1000_82544:
571 case e1000_82540:
572 case e1000_82541:
573 case e1000_82541_rev_2:
574 legacy_pba_adjust = true;
575 pba = E1000_PBA_48K;
576 break;
577 case e1000_82545:
578 case e1000_82545_rev_3:
579 case e1000_82546:
580 case e1000_ce4100:
581 case e1000_82546_rev_3:
582 pba = E1000_PBA_48K;
583 break;
584 case e1000_82547:
585 case e1000_82547_rev_2:
586 legacy_pba_adjust = true;
587 pba = E1000_PBA_30K;
588 break;
589 case e1000_undefined:
590 case e1000_num_macs:
591 break;
592 }
593
594 if (legacy_pba_adjust) {
595 if (hw->max_frame_size > E1000_RXBUFFER_8192)
596 pba -= 8; /* allocate more FIFO for Tx */
597
598 if (hw->mac_type == e1000_82547) {
599 adapter->tx_fifo_head = 0;
600 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
601 adapter->tx_fifo_size =
602 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
603 atomic_set(&adapter->tx_fifo_stall, 0);
604 }
605 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
606 /* adjust PBA for jumbo frames */
607 ew32(PBA, pba);
608
609 /* To maintain wire speed transmits, the Tx FIFO should be
610 * large enough to accommodate two full transmit packets,
611 * rounded up to the next 1KB and expressed in KB. Likewise,
612 * the Rx FIFO should be large enough to accommodate at least
613 * one full receive packet and is similarly rounded up and
614 * expressed in KB.
615 */
616 pba = er32(PBA);
617 /* upper 16 bits has Tx packet buffer allocation size in KB */
618 tx_space = pba >> 16;
619 /* lower 16 bits has Rx packet buffer allocation size in KB */
620 pba &= 0xffff;
621 /* the Tx fifo also stores 16 bytes of information about the Tx
622 * but don't include ethernet FCS because hardware appends it
623 */
624 min_tx_space = (hw->max_frame_size +
625 sizeof(struct e1000_tx_desc) -
626 ETH_FCS_LEN) * 2;
627 min_tx_space = ALIGN(min_tx_space, 1024);
628 min_tx_space >>= 10;
629 /* software strips receive CRC, so leave room for it */
630 min_rx_space = hw->max_frame_size;
631 min_rx_space = ALIGN(min_rx_space, 1024);
632 min_rx_space >>= 10;
633
634 /* If current Tx allocation is less than the min Tx FIFO size,
635 * and the min Tx FIFO size is less than the current Rx FIFO
636 * allocation, take space away from current Rx allocation
637 */
638 if (tx_space < min_tx_space &&
639 ((min_tx_space - tx_space) < pba)) {
640 pba = pba - (min_tx_space - tx_space);
641
642 /* PCI/PCIx hardware has PBA alignment constraints */
643 switch (hw->mac_type) {
644 case e1000_82545 ... e1000_82546_rev_3:
645 pba &= ~(E1000_PBA_8K - 1);
646 break;
647 default:
648 break;
649 }
650
651 /* if short on Rx space, Rx wins and must trump Tx
652 * adjustment or use Early Receive if available
653 */
654 if (pba < min_rx_space)
655 pba = min_rx_space;
656 }
657 }
658
659 ew32(PBA, pba);
660
661 /* flow control settings:
662 * The high water mark must be low enough to fit one full frame
663 * (or the size used for early receive) above it in the Rx FIFO.
664 * Set it to the lower of:
665 * - 90% of the Rx FIFO size, and
666 * - the full Rx FIFO size minus the early receive size (for parts
667 * with ERT support assuming ERT set to E1000_ERT_2048), or
668 * - the full Rx FIFO size minus one full frame
669 */
670 hwm = min(((pba << 10) * 9 / 10),
671 ((pba << 10) - hw->max_frame_size));
672
673 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
674 hw->fc_low_water = hw->fc_high_water - 8;
675 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
676 hw->fc_send_xon = 1;
677 hw->fc = hw->original_fc;
678
679 /* Allow time for pending master requests to run */
680 e1000_reset_hw(hw);
681 if (hw->mac_type >= e1000_82544)
682 ew32(WUC, 0);
683
684 if (e1000_init_hw(hw))
685 e_dev_err("Hardware Error\n");
686 e1000_update_mng_vlan(adapter);
687
688 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
689 if (hw->mac_type >= e1000_82544 &&
690 hw->autoneg == 1 &&
691 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
692 u32 ctrl = er32(CTRL);
693 /* clear phy power management bit if we are in gig only mode,
694 * which if enabled will attempt negotiation to 100Mb, which
695 * can cause a loss of link at power off or driver unload
696 */
697 ctrl &= ~E1000_CTRL_SWDPIN3;
698 ew32(CTRL, ctrl);
699 }
700
701 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
702 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
703
704 e1000_reset_adaptive(hw);
705 e1000_phy_get_info(hw, &adapter->phy_info);
706
707 e1000_release_manageability(adapter);
708 }
709
710 /* Dump the eeprom for users having checksum issues */
e1000_dump_eeprom(struct e1000_adapter * adapter)711 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
712 {
713 struct net_device *netdev = adapter->netdev;
714 struct ethtool_eeprom eeprom;
715 const struct ethtool_ops *ops = netdev->ethtool_ops;
716 u8 *data;
717 int i;
718 u16 csum_old, csum_new = 0;
719
720 eeprom.len = ops->get_eeprom_len(netdev);
721 eeprom.offset = 0;
722
723 data = kmalloc(eeprom.len, GFP_KERNEL);
724 if (!data)
725 return;
726
727 ops->get_eeprom(netdev, &eeprom, data);
728
729 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
730 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
731 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
732 csum_new += data[i] + (data[i + 1] << 8);
733 csum_new = EEPROM_SUM - csum_new;
734
735 pr_err("/*********************/\n");
736 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
737 pr_err("Calculated : 0x%04x\n", csum_new);
738
739 pr_err("Offset Values\n");
740 pr_err("======== ======\n");
741 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
742
743 pr_err("Include this output when contacting your support provider.\n");
744 pr_err("This is not a software error! Something bad happened to\n");
745 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
746 pr_err("result in further problems, possibly loss of data,\n");
747 pr_err("corruption or system hangs!\n");
748 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
749 pr_err("which is invalid and requires you to set the proper MAC\n");
750 pr_err("address manually before continuing to enable this network\n");
751 pr_err("device. Please inspect the EEPROM dump and report the\n");
752 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
753 pr_err("/*********************/\n");
754
755 kfree(data);
756 }
757
758 /**
759 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
760 * @pdev: PCI device information struct
761 *
762 * Return true if an adapter needs ioport resources
763 **/
e1000_is_need_ioport(struct pci_dev * pdev)764 static int e1000_is_need_ioport(struct pci_dev *pdev)
765 {
766 switch (pdev->device) {
767 case E1000_DEV_ID_82540EM:
768 case E1000_DEV_ID_82540EM_LOM:
769 case E1000_DEV_ID_82540EP:
770 case E1000_DEV_ID_82540EP_LOM:
771 case E1000_DEV_ID_82540EP_LP:
772 case E1000_DEV_ID_82541EI:
773 case E1000_DEV_ID_82541EI_MOBILE:
774 case E1000_DEV_ID_82541ER:
775 case E1000_DEV_ID_82541ER_LOM:
776 case E1000_DEV_ID_82541GI:
777 case E1000_DEV_ID_82541GI_LF:
778 case E1000_DEV_ID_82541GI_MOBILE:
779 case E1000_DEV_ID_82544EI_COPPER:
780 case E1000_DEV_ID_82544EI_FIBER:
781 case E1000_DEV_ID_82544GC_COPPER:
782 case E1000_DEV_ID_82544GC_LOM:
783 case E1000_DEV_ID_82545EM_COPPER:
784 case E1000_DEV_ID_82545EM_FIBER:
785 case E1000_DEV_ID_82546EB_COPPER:
786 case E1000_DEV_ID_82546EB_FIBER:
787 case E1000_DEV_ID_82546EB_QUAD_COPPER:
788 return true;
789 default:
790 return false;
791 }
792 }
793
e1000_fix_features(struct net_device * netdev,netdev_features_t features)794 static netdev_features_t e1000_fix_features(struct net_device *netdev,
795 netdev_features_t features)
796 {
797 /* Since there is no support for separate Rx/Tx vlan accel
798 * enable/disable make sure Tx flag is always in same state as Rx.
799 */
800 if (features & NETIF_F_HW_VLAN_CTAG_RX)
801 features |= NETIF_F_HW_VLAN_CTAG_TX;
802 else
803 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
804
805 return features;
806 }
807
e1000_set_features(struct net_device * netdev,netdev_features_t features)808 static int e1000_set_features(struct net_device *netdev,
809 netdev_features_t features)
810 {
811 struct e1000_adapter *adapter = netdev_priv(netdev);
812 netdev_features_t changed = features ^ netdev->features;
813
814 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
815 e1000_vlan_mode(netdev, features);
816
817 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
818 return 0;
819
820 netdev->features = features;
821 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
822
823 if (netif_running(netdev))
824 e1000_reinit_locked(adapter);
825 else
826 e1000_reset(adapter);
827
828 return 0;
829 }
830
831 static const struct net_device_ops e1000_netdev_ops = {
832 .ndo_open = e1000_open,
833 .ndo_stop = e1000_close,
834 .ndo_start_xmit = e1000_xmit_frame,
835 .ndo_set_rx_mode = e1000_set_rx_mode,
836 .ndo_set_mac_address = e1000_set_mac,
837 .ndo_tx_timeout = e1000_tx_timeout,
838 .ndo_change_mtu = e1000_change_mtu,
839 .ndo_do_ioctl = e1000_ioctl,
840 .ndo_validate_addr = eth_validate_addr,
841 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
842 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
843 #ifdef CONFIG_NET_POLL_CONTROLLER
844 .ndo_poll_controller = e1000_netpoll,
845 #endif
846 .ndo_fix_features = e1000_fix_features,
847 .ndo_set_features = e1000_set_features,
848 };
849
850 /**
851 * e1000_init_hw_struct - initialize members of hw struct
852 * @adapter: board private struct
853 * @hw: structure used by e1000_hw.c
854 *
855 * Factors out initialization of the e1000_hw struct to its own function
856 * that can be called very early at init (just after struct allocation).
857 * Fields are initialized based on PCI device information and
858 * OS network device settings (MTU size).
859 * Returns negative error codes if MAC type setup fails.
860 */
e1000_init_hw_struct(struct e1000_adapter * adapter,struct e1000_hw * hw)861 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
862 struct e1000_hw *hw)
863 {
864 struct pci_dev *pdev = adapter->pdev;
865
866 /* PCI config space info */
867 hw->vendor_id = pdev->vendor;
868 hw->device_id = pdev->device;
869 hw->subsystem_vendor_id = pdev->subsystem_vendor;
870 hw->subsystem_id = pdev->subsystem_device;
871 hw->revision_id = pdev->revision;
872
873 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
874
875 hw->max_frame_size = adapter->netdev->mtu +
876 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
877 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
878
879 /* identify the MAC */
880 if (e1000_set_mac_type(hw)) {
881 e_err(probe, "Unknown MAC Type\n");
882 return -EIO;
883 }
884
885 switch (hw->mac_type) {
886 default:
887 break;
888 case e1000_82541:
889 case e1000_82547:
890 case e1000_82541_rev_2:
891 case e1000_82547_rev_2:
892 hw->phy_init_script = 1;
893 break;
894 }
895
896 e1000_set_media_type(hw);
897 e1000_get_bus_info(hw);
898
899 hw->wait_autoneg_complete = false;
900 hw->tbi_compatibility_en = true;
901 hw->adaptive_ifs = true;
902
903 /* Copper options */
904
905 if (hw->media_type == e1000_media_type_copper) {
906 hw->mdix = AUTO_ALL_MODES;
907 hw->disable_polarity_correction = false;
908 hw->master_slave = E1000_MASTER_SLAVE;
909 }
910
911 return 0;
912 }
913
914 /**
915 * e1000_probe - Device Initialization Routine
916 * @pdev: PCI device information struct
917 * @ent: entry in e1000_pci_tbl
918 *
919 * Returns 0 on success, negative on failure
920 *
921 * e1000_probe initializes an adapter identified by a pci_dev structure.
922 * The OS initialization, configuring of the adapter private structure,
923 * and a hardware reset occur.
924 **/
e1000_probe(struct pci_dev * pdev,const struct pci_device_id * ent)925 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
926 {
927 struct net_device *netdev;
928 struct e1000_adapter *adapter = NULL;
929 struct e1000_hw *hw;
930
931 static int cards_found;
932 static int global_quad_port_a; /* global ksp3 port a indication */
933 int i, err, pci_using_dac;
934 u16 eeprom_data = 0;
935 u16 tmp = 0;
936 u16 eeprom_apme_mask = E1000_EEPROM_APME;
937 int bars, need_ioport;
938 bool disable_dev = false;
939
940 /* do not allocate ioport bars when not needed */
941 need_ioport = e1000_is_need_ioport(pdev);
942 if (need_ioport) {
943 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
944 err = pci_enable_device(pdev);
945 } else {
946 bars = pci_select_bars(pdev, IORESOURCE_MEM);
947 err = pci_enable_device_mem(pdev);
948 }
949 if (err)
950 return err;
951
952 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
953 if (err)
954 goto err_pci_reg;
955
956 pci_set_master(pdev);
957 err = pci_save_state(pdev);
958 if (err)
959 goto err_alloc_etherdev;
960
961 err = -ENOMEM;
962 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
963 if (!netdev)
964 goto err_alloc_etherdev;
965
966 SET_NETDEV_DEV(netdev, &pdev->dev);
967
968 pci_set_drvdata(pdev, netdev);
969 adapter = netdev_priv(netdev);
970 adapter->netdev = netdev;
971 adapter->pdev = pdev;
972 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
973 adapter->bars = bars;
974 adapter->need_ioport = need_ioport;
975
976 hw = &adapter->hw;
977 hw->back = adapter;
978
979 err = -EIO;
980 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
981 if (!hw->hw_addr)
982 goto err_ioremap;
983
984 if (adapter->need_ioport) {
985 for (i = BAR_1; i <= BAR_5; i++) {
986 if (pci_resource_len(pdev, i) == 0)
987 continue;
988 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
989 hw->io_base = pci_resource_start(pdev, i);
990 break;
991 }
992 }
993 }
994
995 /* make ready for any if (hw->...) below */
996 err = e1000_init_hw_struct(adapter, hw);
997 if (err)
998 goto err_sw_init;
999
1000 /* there is a workaround being applied below that limits
1001 * 64-bit DMA addresses to 64-bit hardware. There are some
1002 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1003 */
1004 pci_using_dac = 0;
1005 if ((hw->bus_type == e1000_bus_type_pcix) &&
1006 !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1007 pci_using_dac = 1;
1008 } else {
1009 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1010 if (err) {
1011 pr_err("No usable DMA config, aborting\n");
1012 goto err_dma;
1013 }
1014 }
1015
1016 netdev->netdev_ops = &e1000_netdev_ops;
1017 e1000_set_ethtool_ops(netdev);
1018 netdev->watchdog_timeo = 5 * HZ;
1019 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1020
1021 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1022
1023 adapter->bd_number = cards_found;
1024
1025 /* setup the private structure */
1026
1027 err = e1000_sw_init(adapter);
1028 if (err)
1029 goto err_sw_init;
1030
1031 err = -EIO;
1032 if (hw->mac_type == e1000_ce4100) {
1033 hw->ce4100_gbe_mdio_base_virt =
1034 ioremap(pci_resource_start(pdev, BAR_1),
1035 pci_resource_len(pdev, BAR_1));
1036
1037 if (!hw->ce4100_gbe_mdio_base_virt)
1038 goto err_mdio_ioremap;
1039 }
1040
1041 if (hw->mac_type >= e1000_82543) {
1042 netdev->hw_features = NETIF_F_SG |
1043 NETIF_F_HW_CSUM |
1044 NETIF_F_HW_VLAN_CTAG_RX;
1045 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1046 NETIF_F_HW_VLAN_CTAG_FILTER;
1047 }
1048
1049 if ((hw->mac_type >= e1000_82544) &&
1050 (hw->mac_type != e1000_82547))
1051 netdev->hw_features |= NETIF_F_TSO;
1052
1053 netdev->priv_flags |= IFF_SUPP_NOFCS;
1054
1055 netdev->features |= netdev->hw_features;
1056 netdev->hw_features |= (NETIF_F_RXCSUM |
1057 NETIF_F_RXALL |
1058 NETIF_F_RXFCS);
1059
1060 if (pci_using_dac) {
1061 netdev->features |= NETIF_F_HIGHDMA;
1062 netdev->vlan_features |= NETIF_F_HIGHDMA;
1063 }
1064
1065 netdev->vlan_features |= (NETIF_F_TSO |
1066 NETIF_F_HW_CSUM |
1067 NETIF_F_SG);
1068
1069 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1070 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1071 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1072 netdev->priv_flags |= IFF_UNICAST_FLT;
1073
1074 /* MTU range: 46 - 16110 */
1075 netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1076 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1077
1078 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1079
1080 /* initialize eeprom parameters */
1081 if (e1000_init_eeprom_params(hw)) {
1082 e_err(probe, "EEPROM initialization failed\n");
1083 goto err_eeprom;
1084 }
1085
1086 /* before reading the EEPROM, reset the controller to
1087 * put the device in a known good starting state
1088 */
1089
1090 e1000_reset_hw(hw);
1091
1092 /* make sure the EEPROM is good */
1093 if (e1000_validate_eeprom_checksum(hw) < 0) {
1094 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1095 e1000_dump_eeprom(adapter);
1096 /* set MAC address to all zeroes to invalidate and temporary
1097 * disable this device for the user. This blocks regular
1098 * traffic while still permitting ethtool ioctls from reaching
1099 * the hardware as well as allowing the user to run the
1100 * interface after manually setting a hw addr using
1101 * `ip set address`
1102 */
1103 memset(hw->mac_addr, 0, netdev->addr_len);
1104 } else {
1105 /* copy the MAC address out of the EEPROM */
1106 if (e1000_read_mac_addr(hw))
1107 e_err(probe, "EEPROM Read Error\n");
1108 }
1109 /* don't block initialization here due to bad MAC address */
1110 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1111
1112 if (!is_valid_ether_addr(netdev->dev_addr))
1113 e_err(probe, "Invalid MAC Address\n");
1114
1115
1116 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1117 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1118 e1000_82547_tx_fifo_stall_task);
1119 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1120 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1121
1122 e1000_check_options(adapter);
1123
1124 /* Initial Wake on LAN setting
1125 * If APM wake is enabled in the EEPROM,
1126 * enable the ACPI Magic Packet filter
1127 */
1128
1129 switch (hw->mac_type) {
1130 case e1000_82542_rev2_0:
1131 case e1000_82542_rev2_1:
1132 case e1000_82543:
1133 break;
1134 case e1000_82544:
1135 e1000_read_eeprom(hw,
1136 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1137 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1138 break;
1139 case e1000_82546:
1140 case e1000_82546_rev_3:
1141 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1142 e1000_read_eeprom(hw,
1143 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1144 break;
1145 }
1146 /* Fall Through */
1147 default:
1148 e1000_read_eeprom(hw,
1149 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1150 break;
1151 }
1152 if (eeprom_data & eeprom_apme_mask)
1153 adapter->eeprom_wol |= E1000_WUFC_MAG;
1154
1155 /* now that we have the eeprom settings, apply the special cases
1156 * where the eeprom may be wrong or the board simply won't support
1157 * wake on lan on a particular port
1158 */
1159 switch (pdev->device) {
1160 case E1000_DEV_ID_82546GB_PCIE:
1161 adapter->eeprom_wol = 0;
1162 break;
1163 case E1000_DEV_ID_82546EB_FIBER:
1164 case E1000_DEV_ID_82546GB_FIBER:
1165 /* Wake events only supported on port A for dual fiber
1166 * regardless of eeprom setting
1167 */
1168 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1169 adapter->eeprom_wol = 0;
1170 break;
1171 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1172 /* if quad port adapter, disable WoL on all but port A */
1173 if (global_quad_port_a != 0)
1174 adapter->eeprom_wol = 0;
1175 else
1176 adapter->quad_port_a = true;
1177 /* Reset for multiple quad port adapters */
1178 if (++global_quad_port_a == 4)
1179 global_quad_port_a = 0;
1180 break;
1181 }
1182
1183 /* initialize the wol settings based on the eeprom settings */
1184 adapter->wol = adapter->eeprom_wol;
1185 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1186
1187 /* Auto detect PHY address */
1188 if (hw->mac_type == e1000_ce4100) {
1189 for (i = 0; i < 32; i++) {
1190 hw->phy_addr = i;
1191 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1192
1193 if (tmp != 0 && tmp != 0xFF)
1194 break;
1195 }
1196
1197 if (i >= 32)
1198 goto err_eeprom;
1199 }
1200
1201 /* reset the hardware with the new settings */
1202 e1000_reset(adapter);
1203
1204 strcpy(netdev->name, "eth%d");
1205 err = register_netdev(netdev);
1206 if (err)
1207 goto err_register;
1208
1209 e1000_vlan_filter_on_off(adapter, false);
1210
1211 /* print bus type/speed/width info */
1212 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1213 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1214 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1215 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1216 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1217 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1218 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1219 netdev->dev_addr);
1220
1221 /* carrier off reporting is important to ethtool even BEFORE open */
1222 netif_carrier_off(netdev);
1223
1224 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1225
1226 cards_found++;
1227 return 0;
1228
1229 err_register:
1230 err_eeprom:
1231 e1000_phy_hw_reset(hw);
1232
1233 if (hw->flash_address)
1234 iounmap(hw->flash_address);
1235 kfree(adapter->tx_ring);
1236 kfree(adapter->rx_ring);
1237 err_dma:
1238 err_sw_init:
1239 err_mdio_ioremap:
1240 iounmap(hw->ce4100_gbe_mdio_base_virt);
1241 iounmap(hw->hw_addr);
1242 err_ioremap:
1243 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1244 free_netdev(netdev);
1245 err_alloc_etherdev:
1246 pci_release_selected_regions(pdev, bars);
1247 err_pci_reg:
1248 if (!adapter || disable_dev)
1249 pci_disable_device(pdev);
1250 return err;
1251 }
1252
1253 /**
1254 * e1000_remove - Device Removal Routine
1255 * @pdev: PCI device information struct
1256 *
1257 * e1000_remove is called by the PCI subsystem to alert the driver
1258 * that it should release a PCI device. That could be caused by a
1259 * Hot-Plug event, or because the driver is going to be removed from
1260 * memory.
1261 **/
e1000_remove(struct pci_dev * pdev)1262 static void e1000_remove(struct pci_dev *pdev)
1263 {
1264 struct net_device *netdev = pci_get_drvdata(pdev);
1265 struct e1000_adapter *adapter = netdev_priv(netdev);
1266 struct e1000_hw *hw = &adapter->hw;
1267 bool disable_dev;
1268
1269 e1000_down_and_stop(adapter);
1270 e1000_release_manageability(adapter);
1271
1272 unregister_netdev(netdev);
1273
1274 e1000_phy_hw_reset(hw);
1275
1276 kfree(adapter->tx_ring);
1277 kfree(adapter->rx_ring);
1278
1279 if (hw->mac_type == e1000_ce4100)
1280 iounmap(hw->ce4100_gbe_mdio_base_virt);
1281 iounmap(hw->hw_addr);
1282 if (hw->flash_address)
1283 iounmap(hw->flash_address);
1284 pci_release_selected_regions(pdev, adapter->bars);
1285
1286 disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1287 free_netdev(netdev);
1288
1289 if (disable_dev)
1290 pci_disable_device(pdev);
1291 }
1292
1293 /**
1294 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1295 * @adapter: board private structure to initialize
1296 *
1297 * e1000_sw_init initializes the Adapter private data structure.
1298 * e1000_init_hw_struct MUST be called before this function
1299 **/
e1000_sw_init(struct e1000_adapter * adapter)1300 static int e1000_sw_init(struct e1000_adapter *adapter)
1301 {
1302 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1303
1304 adapter->num_tx_queues = 1;
1305 adapter->num_rx_queues = 1;
1306
1307 if (e1000_alloc_queues(adapter)) {
1308 e_err(probe, "Unable to allocate memory for queues\n");
1309 return -ENOMEM;
1310 }
1311
1312 /* Explicitly disable IRQ since the NIC can be in any state. */
1313 e1000_irq_disable(adapter);
1314
1315 spin_lock_init(&adapter->stats_lock);
1316
1317 set_bit(__E1000_DOWN, &adapter->flags);
1318
1319 return 0;
1320 }
1321
1322 /**
1323 * e1000_alloc_queues - Allocate memory for all rings
1324 * @adapter: board private structure to initialize
1325 *
1326 * We allocate one ring per queue at run-time since we don't know the
1327 * number of queues at compile-time.
1328 **/
e1000_alloc_queues(struct e1000_adapter * adapter)1329 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1330 {
1331 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1332 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1333 if (!adapter->tx_ring)
1334 return -ENOMEM;
1335
1336 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1337 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1338 if (!adapter->rx_ring) {
1339 kfree(adapter->tx_ring);
1340 return -ENOMEM;
1341 }
1342
1343 return E1000_SUCCESS;
1344 }
1345
1346 /**
1347 * e1000_open - Called when a network interface is made active
1348 * @netdev: network interface device structure
1349 *
1350 * Returns 0 on success, negative value on failure
1351 *
1352 * The open entry point is called when a network interface is made
1353 * active by the system (IFF_UP). At this point all resources needed
1354 * for transmit and receive operations are allocated, the interrupt
1355 * handler is registered with the OS, the watchdog task is started,
1356 * and the stack is notified that the interface is ready.
1357 **/
e1000_open(struct net_device * netdev)1358 int e1000_open(struct net_device *netdev)
1359 {
1360 struct e1000_adapter *adapter = netdev_priv(netdev);
1361 struct e1000_hw *hw = &adapter->hw;
1362 int err;
1363
1364 /* disallow open during test */
1365 if (test_bit(__E1000_TESTING, &adapter->flags))
1366 return -EBUSY;
1367
1368 netif_carrier_off(netdev);
1369
1370 /* allocate transmit descriptors */
1371 err = e1000_setup_all_tx_resources(adapter);
1372 if (err)
1373 goto err_setup_tx;
1374
1375 /* allocate receive descriptors */
1376 err = e1000_setup_all_rx_resources(adapter);
1377 if (err)
1378 goto err_setup_rx;
1379
1380 e1000_power_up_phy(adapter);
1381
1382 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1383 if ((hw->mng_cookie.status &
1384 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1385 e1000_update_mng_vlan(adapter);
1386 }
1387
1388 /* before we allocate an interrupt, we must be ready to handle it.
1389 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1390 * as soon as we call pci_request_irq, so we have to setup our
1391 * clean_rx handler before we do so.
1392 */
1393 e1000_configure(adapter);
1394
1395 err = e1000_request_irq(adapter);
1396 if (err)
1397 goto err_req_irq;
1398
1399 /* From here on the code is the same as e1000_up() */
1400 clear_bit(__E1000_DOWN, &adapter->flags);
1401
1402 napi_enable(&adapter->napi);
1403
1404 e1000_irq_enable(adapter);
1405
1406 netif_start_queue(netdev);
1407
1408 /* fire a link status change interrupt to start the watchdog */
1409 ew32(ICS, E1000_ICS_LSC);
1410
1411 return E1000_SUCCESS;
1412
1413 err_req_irq:
1414 e1000_power_down_phy(adapter);
1415 e1000_free_all_rx_resources(adapter);
1416 err_setup_rx:
1417 e1000_free_all_tx_resources(adapter);
1418 err_setup_tx:
1419 e1000_reset(adapter);
1420
1421 return err;
1422 }
1423
1424 /**
1425 * e1000_close - Disables a network interface
1426 * @netdev: network interface device structure
1427 *
1428 * Returns 0, this is not allowed to fail
1429 *
1430 * The close entry point is called when an interface is de-activated
1431 * by the OS. The hardware is still under the drivers control, but
1432 * needs to be disabled. A global MAC reset is issued to stop the
1433 * hardware, and all transmit and receive resources are freed.
1434 **/
e1000_close(struct net_device * netdev)1435 int e1000_close(struct net_device *netdev)
1436 {
1437 struct e1000_adapter *adapter = netdev_priv(netdev);
1438 struct e1000_hw *hw = &adapter->hw;
1439 int count = E1000_CHECK_RESET_COUNT;
1440
1441 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1442 usleep_range(10000, 20000);
1443
1444 WARN_ON(count < 0);
1445
1446 /* signal that we're down so that the reset task will no longer run */
1447 set_bit(__E1000_DOWN, &adapter->flags);
1448 clear_bit(__E1000_RESETTING, &adapter->flags);
1449
1450 e1000_down(adapter);
1451 e1000_power_down_phy(adapter);
1452 e1000_free_irq(adapter);
1453
1454 e1000_free_all_tx_resources(adapter);
1455 e1000_free_all_rx_resources(adapter);
1456
1457 /* kill manageability vlan ID if supported, but not if a vlan with
1458 * the same ID is registered on the host OS (let 8021q kill it)
1459 */
1460 if ((hw->mng_cookie.status &
1461 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1462 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1463 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1464 adapter->mng_vlan_id);
1465 }
1466
1467 return 0;
1468 }
1469
1470 /**
1471 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1472 * @adapter: address of board private structure
1473 * @start: address of beginning of memory
1474 * @len: length of memory
1475 **/
e1000_check_64k_bound(struct e1000_adapter * adapter,void * start,unsigned long len)1476 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1477 unsigned long len)
1478 {
1479 struct e1000_hw *hw = &adapter->hw;
1480 unsigned long begin = (unsigned long)start;
1481 unsigned long end = begin + len;
1482
1483 /* First rev 82545 and 82546 need to not allow any memory
1484 * write location to cross 64k boundary due to errata 23
1485 */
1486 if (hw->mac_type == e1000_82545 ||
1487 hw->mac_type == e1000_ce4100 ||
1488 hw->mac_type == e1000_82546) {
1489 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1490 }
1491
1492 return true;
1493 }
1494
1495 /**
1496 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1497 * @adapter: board private structure
1498 * @txdr: tx descriptor ring (for a specific queue) to setup
1499 *
1500 * Return 0 on success, negative on failure
1501 **/
e1000_setup_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * txdr)1502 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1503 struct e1000_tx_ring *txdr)
1504 {
1505 struct pci_dev *pdev = adapter->pdev;
1506 int size;
1507
1508 size = sizeof(struct e1000_tx_buffer) * txdr->count;
1509 txdr->buffer_info = vzalloc(size);
1510 if (!txdr->buffer_info)
1511 return -ENOMEM;
1512
1513 /* round up to nearest 4K */
1514
1515 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1516 txdr->size = ALIGN(txdr->size, 4096);
1517
1518 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1519 GFP_KERNEL);
1520 if (!txdr->desc) {
1521 setup_tx_desc_die:
1522 vfree(txdr->buffer_info);
1523 return -ENOMEM;
1524 }
1525
1526 /* Fix for errata 23, can't cross 64kB boundary */
1527 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1528 void *olddesc = txdr->desc;
1529 dma_addr_t olddma = txdr->dma;
1530 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1531 txdr->size, txdr->desc);
1532 /* Try again, without freeing the previous */
1533 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1534 &txdr->dma, GFP_KERNEL);
1535 /* Failed allocation, critical failure */
1536 if (!txdr->desc) {
1537 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1538 olddma);
1539 goto setup_tx_desc_die;
1540 }
1541
1542 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1543 /* give up */
1544 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1545 txdr->dma);
1546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1547 olddma);
1548 e_err(probe, "Unable to allocate aligned memory "
1549 "for the transmit descriptor ring\n");
1550 vfree(txdr->buffer_info);
1551 return -ENOMEM;
1552 } else {
1553 /* Free old allocation, new allocation was successful */
1554 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1555 olddma);
1556 }
1557 }
1558 memset(txdr->desc, 0, txdr->size);
1559
1560 txdr->next_to_use = 0;
1561 txdr->next_to_clean = 0;
1562
1563 return 0;
1564 }
1565
1566 /**
1567 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1568 * (Descriptors) for all queues
1569 * @adapter: board private structure
1570 *
1571 * Return 0 on success, negative on failure
1572 **/
e1000_setup_all_tx_resources(struct e1000_adapter * adapter)1573 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1574 {
1575 int i, err = 0;
1576
1577 for (i = 0; i < adapter->num_tx_queues; i++) {
1578 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1579 if (err) {
1580 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1581 for (i-- ; i >= 0; i--)
1582 e1000_free_tx_resources(adapter,
1583 &adapter->tx_ring[i]);
1584 break;
1585 }
1586 }
1587
1588 return err;
1589 }
1590
1591 /**
1592 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1593 * @adapter: board private structure
1594 *
1595 * Configure the Tx unit of the MAC after a reset.
1596 **/
e1000_configure_tx(struct e1000_adapter * adapter)1597 static void e1000_configure_tx(struct e1000_adapter *adapter)
1598 {
1599 u64 tdba;
1600 struct e1000_hw *hw = &adapter->hw;
1601 u32 tdlen, tctl, tipg;
1602 u32 ipgr1, ipgr2;
1603
1604 /* Setup the HW Tx Head and Tail descriptor pointers */
1605
1606 switch (adapter->num_tx_queues) {
1607 case 1:
1608 default:
1609 tdba = adapter->tx_ring[0].dma;
1610 tdlen = adapter->tx_ring[0].count *
1611 sizeof(struct e1000_tx_desc);
1612 ew32(TDLEN, tdlen);
1613 ew32(TDBAH, (tdba >> 32));
1614 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1615 ew32(TDT, 0);
1616 ew32(TDH, 0);
1617 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1618 E1000_TDH : E1000_82542_TDH);
1619 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1620 E1000_TDT : E1000_82542_TDT);
1621 break;
1622 }
1623
1624 /* Set the default values for the Tx Inter Packet Gap timer */
1625 if ((hw->media_type == e1000_media_type_fiber ||
1626 hw->media_type == e1000_media_type_internal_serdes))
1627 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1628 else
1629 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1630
1631 switch (hw->mac_type) {
1632 case e1000_82542_rev2_0:
1633 case e1000_82542_rev2_1:
1634 tipg = DEFAULT_82542_TIPG_IPGT;
1635 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1636 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1637 break;
1638 default:
1639 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1640 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1641 break;
1642 }
1643 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1644 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1645 ew32(TIPG, tipg);
1646
1647 /* Set the Tx Interrupt Delay register */
1648
1649 ew32(TIDV, adapter->tx_int_delay);
1650 if (hw->mac_type >= e1000_82540)
1651 ew32(TADV, adapter->tx_abs_int_delay);
1652
1653 /* Program the Transmit Control Register */
1654
1655 tctl = er32(TCTL);
1656 tctl &= ~E1000_TCTL_CT;
1657 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1658 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1659
1660 e1000_config_collision_dist(hw);
1661
1662 /* Setup Transmit Descriptor Settings for eop descriptor */
1663 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1664
1665 /* only set IDE if we are delaying interrupts using the timers */
1666 if (adapter->tx_int_delay)
1667 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1668
1669 if (hw->mac_type < e1000_82543)
1670 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1671 else
1672 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1673
1674 /* Cache if we're 82544 running in PCI-X because we'll
1675 * need this to apply a workaround later in the send path.
1676 */
1677 if (hw->mac_type == e1000_82544 &&
1678 hw->bus_type == e1000_bus_type_pcix)
1679 adapter->pcix_82544 = true;
1680
1681 ew32(TCTL, tctl);
1682
1683 }
1684
1685 /**
1686 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1687 * @adapter: board private structure
1688 * @rxdr: rx descriptor ring (for a specific queue) to setup
1689 *
1690 * Returns 0 on success, negative on failure
1691 **/
e1000_setup_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rxdr)1692 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1693 struct e1000_rx_ring *rxdr)
1694 {
1695 struct pci_dev *pdev = adapter->pdev;
1696 int size, desc_len;
1697
1698 size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1699 rxdr->buffer_info = vzalloc(size);
1700 if (!rxdr->buffer_info)
1701 return -ENOMEM;
1702
1703 desc_len = sizeof(struct e1000_rx_desc);
1704
1705 /* Round up to nearest 4K */
1706
1707 rxdr->size = rxdr->count * desc_len;
1708 rxdr->size = ALIGN(rxdr->size, 4096);
1709
1710 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1711 GFP_KERNEL);
1712 if (!rxdr->desc) {
1713 setup_rx_desc_die:
1714 vfree(rxdr->buffer_info);
1715 return -ENOMEM;
1716 }
1717
1718 /* Fix for errata 23, can't cross 64kB boundary */
1719 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1720 void *olddesc = rxdr->desc;
1721 dma_addr_t olddma = rxdr->dma;
1722 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1723 rxdr->size, rxdr->desc);
1724 /* Try again, without freeing the previous */
1725 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1726 &rxdr->dma, GFP_KERNEL);
1727 /* Failed allocation, critical failure */
1728 if (!rxdr->desc) {
1729 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1730 olddma);
1731 goto setup_rx_desc_die;
1732 }
1733
1734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735 /* give up */
1736 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1737 rxdr->dma);
1738 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1739 olddma);
1740 e_err(probe, "Unable to allocate aligned memory for "
1741 "the Rx descriptor ring\n");
1742 goto setup_rx_desc_die;
1743 } else {
1744 /* Free old allocation, new allocation was successful */
1745 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1746 olddma);
1747 }
1748 }
1749 memset(rxdr->desc, 0, rxdr->size);
1750
1751 rxdr->next_to_clean = 0;
1752 rxdr->next_to_use = 0;
1753 rxdr->rx_skb_top = NULL;
1754
1755 return 0;
1756 }
1757
1758 /**
1759 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1760 * (Descriptors) for all queues
1761 * @adapter: board private structure
1762 *
1763 * Return 0 on success, negative on failure
1764 **/
e1000_setup_all_rx_resources(struct e1000_adapter * adapter)1765 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1766 {
1767 int i, err = 0;
1768
1769 for (i = 0; i < adapter->num_rx_queues; i++) {
1770 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1771 if (err) {
1772 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1773 for (i-- ; i >= 0; i--)
1774 e1000_free_rx_resources(adapter,
1775 &adapter->rx_ring[i]);
1776 break;
1777 }
1778 }
1779
1780 return err;
1781 }
1782
1783 /**
1784 * e1000_setup_rctl - configure the receive control registers
1785 * @adapter: Board private structure
1786 **/
e1000_setup_rctl(struct e1000_adapter * adapter)1787 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1788 {
1789 struct e1000_hw *hw = &adapter->hw;
1790 u32 rctl;
1791
1792 rctl = er32(RCTL);
1793
1794 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1795
1796 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1797 E1000_RCTL_RDMTS_HALF |
1798 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1799
1800 if (hw->tbi_compatibility_on == 1)
1801 rctl |= E1000_RCTL_SBP;
1802 else
1803 rctl &= ~E1000_RCTL_SBP;
1804
1805 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1806 rctl &= ~E1000_RCTL_LPE;
1807 else
1808 rctl |= E1000_RCTL_LPE;
1809
1810 /* Setup buffer sizes */
1811 rctl &= ~E1000_RCTL_SZ_4096;
1812 rctl |= E1000_RCTL_BSEX;
1813 switch (adapter->rx_buffer_len) {
1814 case E1000_RXBUFFER_2048:
1815 default:
1816 rctl |= E1000_RCTL_SZ_2048;
1817 rctl &= ~E1000_RCTL_BSEX;
1818 break;
1819 case E1000_RXBUFFER_4096:
1820 rctl |= E1000_RCTL_SZ_4096;
1821 break;
1822 case E1000_RXBUFFER_8192:
1823 rctl |= E1000_RCTL_SZ_8192;
1824 break;
1825 case E1000_RXBUFFER_16384:
1826 rctl |= E1000_RCTL_SZ_16384;
1827 break;
1828 }
1829
1830 /* This is useful for sniffing bad packets. */
1831 if (adapter->netdev->features & NETIF_F_RXALL) {
1832 /* UPE and MPE will be handled by normal PROMISC logic
1833 * in e1000e_set_rx_mode
1834 */
1835 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1836 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1837 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1838
1839 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1840 E1000_RCTL_DPF | /* Allow filtered pause */
1841 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1842 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1843 * and that breaks VLANs.
1844 */
1845 }
1846
1847 ew32(RCTL, rctl);
1848 }
1849
1850 /**
1851 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1852 * @adapter: board private structure
1853 *
1854 * Configure the Rx unit of the MAC after a reset.
1855 **/
e1000_configure_rx(struct e1000_adapter * adapter)1856 static void e1000_configure_rx(struct e1000_adapter *adapter)
1857 {
1858 u64 rdba;
1859 struct e1000_hw *hw = &adapter->hw;
1860 u32 rdlen, rctl, rxcsum;
1861
1862 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1863 rdlen = adapter->rx_ring[0].count *
1864 sizeof(struct e1000_rx_desc);
1865 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1866 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1867 } else {
1868 rdlen = adapter->rx_ring[0].count *
1869 sizeof(struct e1000_rx_desc);
1870 adapter->clean_rx = e1000_clean_rx_irq;
1871 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1872 }
1873
1874 /* disable receives while setting up the descriptors */
1875 rctl = er32(RCTL);
1876 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1877
1878 /* set the Receive Delay Timer Register */
1879 ew32(RDTR, adapter->rx_int_delay);
1880
1881 if (hw->mac_type >= e1000_82540) {
1882 ew32(RADV, adapter->rx_abs_int_delay);
1883 if (adapter->itr_setting != 0)
1884 ew32(ITR, 1000000000 / (adapter->itr * 256));
1885 }
1886
1887 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1888 * the Base and Length of the Rx Descriptor Ring
1889 */
1890 switch (adapter->num_rx_queues) {
1891 case 1:
1892 default:
1893 rdba = adapter->rx_ring[0].dma;
1894 ew32(RDLEN, rdlen);
1895 ew32(RDBAH, (rdba >> 32));
1896 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1897 ew32(RDT, 0);
1898 ew32(RDH, 0);
1899 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1900 E1000_RDH : E1000_82542_RDH);
1901 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1902 E1000_RDT : E1000_82542_RDT);
1903 break;
1904 }
1905
1906 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1907 if (hw->mac_type >= e1000_82543) {
1908 rxcsum = er32(RXCSUM);
1909 if (adapter->rx_csum)
1910 rxcsum |= E1000_RXCSUM_TUOFL;
1911 else
1912 /* don't need to clear IPPCSE as it defaults to 0 */
1913 rxcsum &= ~E1000_RXCSUM_TUOFL;
1914 ew32(RXCSUM, rxcsum);
1915 }
1916
1917 /* Enable Receives */
1918 ew32(RCTL, rctl | E1000_RCTL_EN);
1919 }
1920
1921 /**
1922 * e1000_free_tx_resources - Free Tx Resources per Queue
1923 * @adapter: board private structure
1924 * @tx_ring: Tx descriptor ring for a specific queue
1925 *
1926 * Free all transmit software resources
1927 **/
e1000_free_tx_resources(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1928 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1929 struct e1000_tx_ring *tx_ring)
1930 {
1931 struct pci_dev *pdev = adapter->pdev;
1932
1933 e1000_clean_tx_ring(adapter, tx_ring);
1934
1935 vfree(tx_ring->buffer_info);
1936 tx_ring->buffer_info = NULL;
1937
1938 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1939 tx_ring->dma);
1940
1941 tx_ring->desc = NULL;
1942 }
1943
1944 /**
1945 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1946 * @adapter: board private structure
1947 *
1948 * Free all transmit software resources
1949 **/
e1000_free_all_tx_resources(struct e1000_adapter * adapter)1950 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1951 {
1952 int i;
1953
1954 for (i = 0; i < adapter->num_tx_queues; i++)
1955 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1956 }
1957
1958 static void
e1000_unmap_and_free_tx_resource(struct e1000_adapter * adapter,struct e1000_tx_buffer * buffer_info)1959 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1960 struct e1000_tx_buffer *buffer_info)
1961 {
1962 if (buffer_info->dma) {
1963 if (buffer_info->mapped_as_page)
1964 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1965 buffer_info->length, DMA_TO_DEVICE);
1966 else
1967 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1968 buffer_info->length,
1969 DMA_TO_DEVICE);
1970 buffer_info->dma = 0;
1971 }
1972 if (buffer_info->skb) {
1973 dev_kfree_skb_any(buffer_info->skb);
1974 buffer_info->skb = NULL;
1975 }
1976 buffer_info->time_stamp = 0;
1977 /* buffer_info must be completely set up in the transmit path */
1978 }
1979
1980 /**
1981 * e1000_clean_tx_ring - Free Tx Buffers
1982 * @adapter: board private structure
1983 * @tx_ring: ring to be cleaned
1984 **/
e1000_clean_tx_ring(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)1985 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1986 struct e1000_tx_ring *tx_ring)
1987 {
1988 struct e1000_hw *hw = &adapter->hw;
1989 struct e1000_tx_buffer *buffer_info;
1990 unsigned long size;
1991 unsigned int i;
1992
1993 /* Free all the Tx ring sk_buffs */
1994
1995 for (i = 0; i < tx_ring->count; i++) {
1996 buffer_info = &tx_ring->buffer_info[i];
1997 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1998 }
1999
2000 netdev_reset_queue(adapter->netdev);
2001 size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
2002 memset(tx_ring->buffer_info, 0, size);
2003
2004 /* Zero out the descriptor ring */
2005
2006 memset(tx_ring->desc, 0, tx_ring->size);
2007
2008 tx_ring->next_to_use = 0;
2009 tx_ring->next_to_clean = 0;
2010 tx_ring->last_tx_tso = false;
2011
2012 writel(0, hw->hw_addr + tx_ring->tdh);
2013 writel(0, hw->hw_addr + tx_ring->tdt);
2014 }
2015
2016 /**
2017 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2018 * @adapter: board private structure
2019 **/
e1000_clean_all_tx_rings(struct e1000_adapter * adapter)2020 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2021 {
2022 int i;
2023
2024 for (i = 0; i < adapter->num_tx_queues; i++)
2025 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2026 }
2027
2028 /**
2029 * e1000_free_rx_resources - Free Rx Resources
2030 * @adapter: board private structure
2031 * @rx_ring: ring to clean the resources from
2032 *
2033 * Free all receive software resources
2034 **/
e1000_free_rx_resources(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2035 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2036 struct e1000_rx_ring *rx_ring)
2037 {
2038 struct pci_dev *pdev = adapter->pdev;
2039
2040 e1000_clean_rx_ring(adapter, rx_ring);
2041
2042 vfree(rx_ring->buffer_info);
2043 rx_ring->buffer_info = NULL;
2044
2045 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2046 rx_ring->dma);
2047
2048 rx_ring->desc = NULL;
2049 }
2050
2051 /**
2052 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2053 * @adapter: board private structure
2054 *
2055 * Free all receive software resources
2056 **/
e1000_free_all_rx_resources(struct e1000_adapter * adapter)2057 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2058 {
2059 int i;
2060
2061 for (i = 0; i < adapter->num_rx_queues; i++)
2062 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2063 }
2064
2065 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
e1000_frag_len(const struct e1000_adapter * a)2066 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2067 {
2068 return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2069 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2070 }
2071
e1000_alloc_frag(const struct e1000_adapter * a)2072 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2073 {
2074 unsigned int len = e1000_frag_len(a);
2075 u8 *data = netdev_alloc_frag(len);
2076
2077 if (likely(data))
2078 data += E1000_HEADROOM;
2079 return data;
2080 }
2081
2082 /**
2083 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2084 * @adapter: board private structure
2085 * @rx_ring: ring to free buffers from
2086 **/
e1000_clean_rx_ring(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring)2087 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2088 struct e1000_rx_ring *rx_ring)
2089 {
2090 struct e1000_hw *hw = &adapter->hw;
2091 struct e1000_rx_buffer *buffer_info;
2092 struct pci_dev *pdev = adapter->pdev;
2093 unsigned long size;
2094 unsigned int i;
2095
2096 /* Free all the Rx netfrags */
2097 for (i = 0; i < rx_ring->count; i++) {
2098 buffer_info = &rx_ring->buffer_info[i];
2099 if (adapter->clean_rx == e1000_clean_rx_irq) {
2100 if (buffer_info->dma)
2101 dma_unmap_single(&pdev->dev, buffer_info->dma,
2102 adapter->rx_buffer_len,
2103 DMA_FROM_DEVICE);
2104 if (buffer_info->rxbuf.data) {
2105 skb_free_frag(buffer_info->rxbuf.data);
2106 buffer_info->rxbuf.data = NULL;
2107 }
2108 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2109 if (buffer_info->dma)
2110 dma_unmap_page(&pdev->dev, buffer_info->dma,
2111 adapter->rx_buffer_len,
2112 DMA_FROM_DEVICE);
2113 if (buffer_info->rxbuf.page) {
2114 put_page(buffer_info->rxbuf.page);
2115 buffer_info->rxbuf.page = NULL;
2116 }
2117 }
2118
2119 buffer_info->dma = 0;
2120 }
2121
2122 /* there also may be some cached data from a chained receive */
2123 napi_free_frags(&adapter->napi);
2124 rx_ring->rx_skb_top = NULL;
2125
2126 size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2127 memset(rx_ring->buffer_info, 0, size);
2128
2129 /* Zero out the descriptor ring */
2130 memset(rx_ring->desc, 0, rx_ring->size);
2131
2132 rx_ring->next_to_clean = 0;
2133 rx_ring->next_to_use = 0;
2134
2135 writel(0, hw->hw_addr + rx_ring->rdh);
2136 writel(0, hw->hw_addr + rx_ring->rdt);
2137 }
2138
2139 /**
2140 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2141 * @adapter: board private structure
2142 **/
e1000_clean_all_rx_rings(struct e1000_adapter * adapter)2143 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2144 {
2145 int i;
2146
2147 for (i = 0; i < adapter->num_rx_queues; i++)
2148 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2149 }
2150
2151 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2152 * and memory write and invalidate disabled for certain operations
2153 */
e1000_enter_82542_rst(struct e1000_adapter * adapter)2154 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2155 {
2156 struct e1000_hw *hw = &adapter->hw;
2157 struct net_device *netdev = adapter->netdev;
2158 u32 rctl;
2159
2160 e1000_pci_clear_mwi(hw);
2161
2162 rctl = er32(RCTL);
2163 rctl |= E1000_RCTL_RST;
2164 ew32(RCTL, rctl);
2165 E1000_WRITE_FLUSH();
2166 mdelay(5);
2167
2168 if (netif_running(netdev))
2169 e1000_clean_all_rx_rings(adapter);
2170 }
2171
e1000_leave_82542_rst(struct e1000_adapter * adapter)2172 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2173 {
2174 struct e1000_hw *hw = &adapter->hw;
2175 struct net_device *netdev = adapter->netdev;
2176 u32 rctl;
2177
2178 rctl = er32(RCTL);
2179 rctl &= ~E1000_RCTL_RST;
2180 ew32(RCTL, rctl);
2181 E1000_WRITE_FLUSH();
2182 mdelay(5);
2183
2184 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2185 e1000_pci_set_mwi(hw);
2186
2187 if (netif_running(netdev)) {
2188 /* No need to loop, because 82542 supports only 1 queue */
2189 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2190 e1000_configure_rx(adapter);
2191 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2192 }
2193 }
2194
2195 /**
2196 * e1000_set_mac - Change the Ethernet Address of the NIC
2197 * @netdev: network interface device structure
2198 * @p: pointer to an address structure
2199 *
2200 * Returns 0 on success, negative on failure
2201 **/
e1000_set_mac(struct net_device * netdev,void * p)2202 static int e1000_set_mac(struct net_device *netdev, void *p)
2203 {
2204 struct e1000_adapter *adapter = netdev_priv(netdev);
2205 struct e1000_hw *hw = &adapter->hw;
2206 struct sockaddr *addr = p;
2207
2208 if (!is_valid_ether_addr(addr->sa_data))
2209 return -EADDRNOTAVAIL;
2210
2211 /* 82542 2.0 needs to be in reset to write receive address registers */
2212
2213 if (hw->mac_type == e1000_82542_rev2_0)
2214 e1000_enter_82542_rst(adapter);
2215
2216 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2217 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2218
2219 e1000_rar_set(hw, hw->mac_addr, 0);
2220
2221 if (hw->mac_type == e1000_82542_rev2_0)
2222 e1000_leave_82542_rst(adapter);
2223
2224 return 0;
2225 }
2226
2227 /**
2228 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2229 * @netdev: network interface device structure
2230 *
2231 * The set_rx_mode entry point is called whenever the unicast or multicast
2232 * address lists or the network interface flags are updated. This routine is
2233 * responsible for configuring the hardware for proper unicast, multicast,
2234 * promiscuous mode, and all-multi behavior.
2235 **/
e1000_set_rx_mode(struct net_device * netdev)2236 static void e1000_set_rx_mode(struct net_device *netdev)
2237 {
2238 struct e1000_adapter *adapter = netdev_priv(netdev);
2239 struct e1000_hw *hw = &adapter->hw;
2240 struct netdev_hw_addr *ha;
2241 bool use_uc = false;
2242 u32 rctl;
2243 u32 hash_value;
2244 int i, rar_entries = E1000_RAR_ENTRIES;
2245 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2246 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2247
2248 if (!mcarray)
2249 return;
2250
2251 /* Check for Promiscuous and All Multicast modes */
2252
2253 rctl = er32(RCTL);
2254
2255 if (netdev->flags & IFF_PROMISC) {
2256 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2257 rctl &= ~E1000_RCTL_VFE;
2258 } else {
2259 if (netdev->flags & IFF_ALLMULTI)
2260 rctl |= E1000_RCTL_MPE;
2261 else
2262 rctl &= ~E1000_RCTL_MPE;
2263 /* Enable VLAN filter if there is a VLAN */
2264 if (e1000_vlan_used(adapter))
2265 rctl |= E1000_RCTL_VFE;
2266 }
2267
2268 if (netdev_uc_count(netdev) > rar_entries - 1) {
2269 rctl |= E1000_RCTL_UPE;
2270 } else if (!(netdev->flags & IFF_PROMISC)) {
2271 rctl &= ~E1000_RCTL_UPE;
2272 use_uc = true;
2273 }
2274
2275 ew32(RCTL, rctl);
2276
2277 /* 82542 2.0 needs to be in reset to write receive address registers */
2278
2279 if (hw->mac_type == e1000_82542_rev2_0)
2280 e1000_enter_82542_rst(adapter);
2281
2282 /* load the first 14 addresses into the exact filters 1-14. Unicast
2283 * addresses take precedence to avoid disabling unicast filtering
2284 * when possible.
2285 *
2286 * RAR 0 is used for the station MAC address
2287 * if there are not 14 addresses, go ahead and clear the filters
2288 */
2289 i = 1;
2290 if (use_uc)
2291 netdev_for_each_uc_addr(ha, netdev) {
2292 if (i == rar_entries)
2293 break;
2294 e1000_rar_set(hw, ha->addr, i++);
2295 }
2296
2297 netdev_for_each_mc_addr(ha, netdev) {
2298 if (i == rar_entries) {
2299 /* load any remaining addresses into the hash table */
2300 u32 hash_reg, hash_bit, mta;
2301 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2302 hash_reg = (hash_value >> 5) & 0x7F;
2303 hash_bit = hash_value & 0x1F;
2304 mta = (1 << hash_bit);
2305 mcarray[hash_reg] |= mta;
2306 } else {
2307 e1000_rar_set(hw, ha->addr, i++);
2308 }
2309 }
2310
2311 for (; i < rar_entries; i++) {
2312 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2313 E1000_WRITE_FLUSH();
2314 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2315 E1000_WRITE_FLUSH();
2316 }
2317
2318 /* write the hash table completely, write from bottom to avoid
2319 * both stupid write combining chipsets, and flushing each write
2320 */
2321 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2322 /* If we are on an 82544 has an errata where writing odd
2323 * offsets overwrites the previous even offset, but writing
2324 * backwards over the range solves the issue by always
2325 * writing the odd offset first
2326 */
2327 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2328 }
2329 E1000_WRITE_FLUSH();
2330
2331 if (hw->mac_type == e1000_82542_rev2_0)
2332 e1000_leave_82542_rst(adapter);
2333
2334 kfree(mcarray);
2335 }
2336
2337 /**
2338 * e1000_update_phy_info_task - get phy info
2339 * @work: work struct contained inside adapter struct
2340 *
2341 * Need to wait a few seconds after link up to get diagnostic information from
2342 * the phy
2343 */
e1000_update_phy_info_task(struct work_struct * work)2344 static void e1000_update_phy_info_task(struct work_struct *work)
2345 {
2346 struct e1000_adapter *adapter = container_of(work,
2347 struct e1000_adapter,
2348 phy_info_task.work);
2349
2350 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2351 }
2352
2353 /**
2354 * e1000_82547_tx_fifo_stall_task - task to complete work
2355 * @work: work struct contained inside adapter struct
2356 **/
e1000_82547_tx_fifo_stall_task(struct work_struct * work)2357 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2358 {
2359 struct e1000_adapter *adapter = container_of(work,
2360 struct e1000_adapter,
2361 fifo_stall_task.work);
2362 struct e1000_hw *hw = &adapter->hw;
2363 struct net_device *netdev = adapter->netdev;
2364 u32 tctl;
2365
2366 if (atomic_read(&adapter->tx_fifo_stall)) {
2367 if ((er32(TDT) == er32(TDH)) &&
2368 (er32(TDFT) == er32(TDFH)) &&
2369 (er32(TDFTS) == er32(TDFHS))) {
2370 tctl = er32(TCTL);
2371 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2372 ew32(TDFT, adapter->tx_head_addr);
2373 ew32(TDFH, adapter->tx_head_addr);
2374 ew32(TDFTS, adapter->tx_head_addr);
2375 ew32(TDFHS, adapter->tx_head_addr);
2376 ew32(TCTL, tctl);
2377 E1000_WRITE_FLUSH();
2378
2379 adapter->tx_fifo_head = 0;
2380 atomic_set(&adapter->tx_fifo_stall, 0);
2381 netif_wake_queue(netdev);
2382 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2383 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2384 }
2385 }
2386 }
2387
e1000_has_link(struct e1000_adapter * adapter)2388 bool e1000_has_link(struct e1000_adapter *adapter)
2389 {
2390 struct e1000_hw *hw = &adapter->hw;
2391 bool link_active = false;
2392
2393 /* get_link_status is set on LSC (link status) interrupt or rx
2394 * sequence error interrupt (except on intel ce4100).
2395 * get_link_status will stay false until the
2396 * e1000_check_for_link establishes link for copper adapters
2397 * ONLY
2398 */
2399 switch (hw->media_type) {
2400 case e1000_media_type_copper:
2401 if (hw->mac_type == e1000_ce4100)
2402 hw->get_link_status = 1;
2403 if (hw->get_link_status) {
2404 e1000_check_for_link(hw);
2405 link_active = !hw->get_link_status;
2406 } else {
2407 link_active = true;
2408 }
2409 break;
2410 case e1000_media_type_fiber:
2411 e1000_check_for_link(hw);
2412 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2413 break;
2414 case e1000_media_type_internal_serdes:
2415 e1000_check_for_link(hw);
2416 link_active = hw->serdes_has_link;
2417 break;
2418 default:
2419 break;
2420 }
2421
2422 return link_active;
2423 }
2424
2425 /**
2426 * e1000_watchdog - work function
2427 * @work: work struct contained inside adapter struct
2428 **/
e1000_watchdog(struct work_struct * work)2429 static void e1000_watchdog(struct work_struct *work)
2430 {
2431 struct e1000_adapter *adapter = container_of(work,
2432 struct e1000_adapter,
2433 watchdog_task.work);
2434 struct e1000_hw *hw = &adapter->hw;
2435 struct net_device *netdev = adapter->netdev;
2436 struct e1000_tx_ring *txdr = adapter->tx_ring;
2437 u32 link, tctl;
2438
2439 link = e1000_has_link(adapter);
2440 if ((netif_carrier_ok(netdev)) && link)
2441 goto link_up;
2442
2443 if (link) {
2444 if (!netif_carrier_ok(netdev)) {
2445 u32 ctrl;
2446 bool txb2b = true;
2447 /* update snapshot of PHY registers on LSC */
2448 e1000_get_speed_and_duplex(hw,
2449 &adapter->link_speed,
2450 &adapter->link_duplex);
2451
2452 ctrl = er32(CTRL);
2453 pr_info("%s NIC Link is Up %d Mbps %s, "
2454 "Flow Control: %s\n",
2455 netdev->name,
2456 adapter->link_speed,
2457 adapter->link_duplex == FULL_DUPLEX ?
2458 "Full Duplex" : "Half Duplex",
2459 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2460 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2461 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2462 E1000_CTRL_TFCE) ? "TX" : "None")));
2463
2464 /* adjust timeout factor according to speed/duplex */
2465 adapter->tx_timeout_factor = 1;
2466 switch (adapter->link_speed) {
2467 case SPEED_10:
2468 txb2b = false;
2469 adapter->tx_timeout_factor = 16;
2470 break;
2471 case SPEED_100:
2472 txb2b = false;
2473 /* maybe add some timeout factor ? */
2474 break;
2475 }
2476
2477 /* enable transmits in the hardware */
2478 tctl = er32(TCTL);
2479 tctl |= E1000_TCTL_EN;
2480 ew32(TCTL, tctl);
2481
2482 netif_carrier_on(netdev);
2483 if (!test_bit(__E1000_DOWN, &adapter->flags))
2484 schedule_delayed_work(&adapter->phy_info_task,
2485 2 * HZ);
2486 adapter->smartspeed = 0;
2487 }
2488 } else {
2489 if (netif_carrier_ok(netdev)) {
2490 adapter->link_speed = 0;
2491 adapter->link_duplex = 0;
2492 pr_info("%s NIC Link is Down\n",
2493 netdev->name);
2494 netif_carrier_off(netdev);
2495
2496 if (!test_bit(__E1000_DOWN, &adapter->flags))
2497 schedule_delayed_work(&adapter->phy_info_task,
2498 2 * HZ);
2499 }
2500
2501 e1000_smartspeed(adapter);
2502 }
2503
2504 link_up:
2505 e1000_update_stats(adapter);
2506
2507 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2508 adapter->tpt_old = adapter->stats.tpt;
2509 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2510 adapter->colc_old = adapter->stats.colc;
2511
2512 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2513 adapter->gorcl_old = adapter->stats.gorcl;
2514 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2515 adapter->gotcl_old = adapter->stats.gotcl;
2516
2517 e1000_update_adaptive(hw);
2518
2519 if (!netif_carrier_ok(netdev)) {
2520 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2521 /* We've lost link, so the controller stops DMA,
2522 * but we've got queued Tx work that's never going
2523 * to get done, so reset controller to flush Tx.
2524 * (Do the reset outside of interrupt context).
2525 */
2526 adapter->tx_timeout_count++;
2527 schedule_work(&adapter->reset_task);
2528 /* exit immediately since reset is imminent */
2529 return;
2530 }
2531 }
2532
2533 /* Simple mode for Interrupt Throttle Rate (ITR) */
2534 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2535 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2536 * Total asymmetrical Tx or Rx gets ITR=8000;
2537 * everyone else is between 2000-8000.
2538 */
2539 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2540 u32 dif = (adapter->gotcl > adapter->gorcl ?
2541 adapter->gotcl - adapter->gorcl :
2542 adapter->gorcl - adapter->gotcl) / 10000;
2543 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2544
2545 ew32(ITR, 1000000000 / (itr * 256));
2546 }
2547
2548 /* Cause software interrupt to ensure rx ring is cleaned */
2549 ew32(ICS, E1000_ICS_RXDMT0);
2550
2551 /* Force detection of hung controller every watchdog period */
2552 adapter->detect_tx_hung = true;
2553
2554 /* Reschedule the task */
2555 if (!test_bit(__E1000_DOWN, &adapter->flags))
2556 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2557 }
2558
2559 enum latency_range {
2560 lowest_latency = 0,
2561 low_latency = 1,
2562 bulk_latency = 2,
2563 latency_invalid = 255
2564 };
2565
2566 /**
2567 * e1000_update_itr - update the dynamic ITR value based on statistics
2568 * @adapter: pointer to adapter
2569 * @itr_setting: current adapter->itr
2570 * @packets: the number of packets during this measurement interval
2571 * @bytes: the number of bytes during this measurement interval
2572 *
2573 * Stores a new ITR value based on packets and byte
2574 * counts during the last interrupt. The advantage of per interrupt
2575 * computation is faster updates and more accurate ITR for the current
2576 * traffic pattern. Constants in this function were computed
2577 * based on theoretical maximum wire speed and thresholds were set based
2578 * on testing data as well as attempting to minimize response time
2579 * while increasing bulk throughput.
2580 * this functionality is controlled by the InterruptThrottleRate module
2581 * parameter (see e1000_param.c)
2582 **/
e1000_update_itr(struct e1000_adapter * adapter,u16 itr_setting,int packets,int bytes)2583 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2584 u16 itr_setting, int packets, int bytes)
2585 {
2586 unsigned int retval = itr_setting;
2587 struct e1000_hw *hw = &adapter->hw;
2588
2589 if (unlikely(hw->mac_type < e1000_82540))
2590 goto update_itr_done;
2591
2592 if (packets == 0)
2593 goto update_itr_done;
2594
2595 switch (itr_setting) {
2596 case lowest_latency:
2597 /* jumbo frames get bulk treatment*/
2598 if (bytes/packets > 8000)
2599 retval = bulk_latency;
2600 else if ((packets < 5) && (bytes > 512))
2601 retval = low_latency;
2602 break;
2603 case low_latency: /* 50 usec aka 20000 ints/s */
2604 if (bytes > 10000) {
2605 /* jumbo frames need bulk latency setting */
2606 if (bytes/packets > 8000)
2607 retval = bulk_latency;
2608 else if ((packets < 10) || ((bytes/packets) > 1200))
2609 retval = bulk_latency;
2610 else if ((packets > 35))
2611 retval = lowest_latency;
2612 } else if (bytes/packets > 2000)
2613 retval = bulk_latency;
2614 else if (packets <= 2 && bytes < 512)
2615 retval = lowest_latency;
2616 break;
2617 case bulk_latency: /* 250 usec aka 4000 ints/s */
2618 if (bytes > 25000) {
2619 if (packets > 35)
2620 retval = low_latency;
2621 } else if (bytes < 6000) {
2622 retval = low_latency;
2623 }
2624 break;
2625 }
2626
2627 update_itr_done:
2628 return retval;
2629 }
2630
e1000_set_itr(struct e1000_adapter * adapter)2631 static void e1000_set_itr(struct e1000_adapter *adapter)
2632 {
2633 struct e1000_hw *hw = &adapter->hw;
2634 u16 current_itr;
2635 u32 new_itr = adapter->itr;
2636
2637 if (unlikely(hw->mac_type < e1000_82540))
2638 return;
2639
2640 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2641 if (unlikely(adapter->link_speed != SPEED_1000)) {
2642 current_itr = 0;
2643 new_itr = 4000;
2644 goto set_itr_now;
2645 }
2646
2647 adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2648 adapter->total_tx_packets,
2649 adapter->total_tx_bytes);
2650 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2651 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2652 adapter->tx_itr = low_latency;
2653
2654 adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2655 adapter->total_rx_packets,
2656 adapter->total_rx_bytes);
2657 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2658 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2659 adapter->rx_itr = low_latency;
2660
2661 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2662
2663 switch (current_itr) {
2664 /* counts and packets in update_itr are dependent on these numbers */
2665 case lowest_latency:
2666 new_itr = 70000;
2667 break;
2668 case low_latency:
2669 new_itr = 20000; /* aka hwitr = ~200 */
2670 break;
2671 case bulk_latency:
2672 new_itr = 4000;
2673 break;
2674 default:
2675 break;
2676 }
2677
2678 set_itr_now:
2679 if (new_itr != adapter->itr) {
2680 /* this attempts to bias the interrupt rate towards Bulk
2681 * by adding intermediate steps when interrupt rate is
2682 * increasing
2683 */
2684 new_itr = new_itr > adapter->itr ?
2685 min(adapter->itr + (new_itr >> 2), new_itr) :
2686 new_itr;
2687 adapter->itr = new_itr;
2688 ew32(ITR, 1000000000 / (new_itr * 256));
2689 }
2690 }
2691
2692 #define E1000_TX_FLAGS_CSUM 0x00000001
2693 #define E1000_TX_FLAGS_VLAN 0x00000002
2694 #define E1000_TX_FLAGS_TSO 0x00000004
2695 #define E1000_TX_FLAGS_IPV4 0x00000008
2696 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2697 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2698 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2699
e1000_tso(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2700 static int e1000_tso(struct e1000_adapter *adapter,
2701 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2702 __be16 protocol)
2703 {
2704 struct e1000_context_desc *context_desc;
2705 struct e1000_tx_buffer *buffer_info;
2706 unsigned int i;
2707 u32 cmd_length = 0;
2708 u16 ipcse = 0, tucse, mss;
2709 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2710
2711 if (skb_is_gso(skb)) {
2712 int err;
2713
2714 err = skb_cow_head(skb, 0);
2715 if (err < 0)
2716 return err;
2717
2718 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2719 mss = skb_shinfo(skb)->gso_size;
2720 if (protocol == htons(ETH_P_IP)) {
2721 struct iphdr *iph = ip_hdr(skb);
2722 iph->tot_len = 0;
2723 iph->check = 0;
2724 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2725 iph->daddr, 0,
2726 IPPROTO_TCP,
2727 0);
2728 cmd_length = E1000_TXD_CMD_IP;
2729 ipcse = skb_transport_offset(skb) - 1;
2730 } else if (skb_is_gso_v6(skb)) {
2731 ipv6_hdr(skb)->payload_len = 0;
2732 tcp_hdr(skb)->check =
2733 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2734 &ipv6_hdr(skb)->daddr,
2735 0, IPPROTO_TCP, 0);
2736 ipcse = 0;
2737 }
2738 ipcss = skb_network_offset(skb);
2739 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2740 tucss = skb_transport_offset(skb);
2741 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2742 tucse = 0;
2743
2744 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2745 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2746
2747 i = tx_ring->next_to_use;
2748 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2749 buffer_info = &tx_ring->buffer_info[i];
2750
2751 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2752 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2753 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2754 context_desc->upper_setup.tcp_fields.tucss = tucss;
2755 context_desc->upper_setup.tcp_fields.tucso = tucso;
2756 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2757 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2758 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2759 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2760
2761 buffer_info->time_stamp = jiffies;
2762 buffer_info->next_to_watch = i;
2763
2764 if (++i == tx_ring->count)
2765 i = 0;
2766
2767 tx_ring->next_to_use = i;
2768
2769 return true;
2770 }
2771 return false;
2772 }
2773
e1000_tx_csum(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,__be16 protocol)2774 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2775 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2776 __be16 protocol)
2777 {
2778 struct e1000_context_desc *context_desc;
2779 struct e1000_tx_buffer *buffer_info;
2780 unsigned int i;
2781 u8 css;
2782 u32 cmd_len = E1000_TXD_CMD_DEXT;
2783
2784 if (skb->ip_summed != CHECKSUM_PARTIAL)
2785 return false;
2786
2787 switch (protocol) {
2788 case cpu_to_be16(ETH_P_IP):
2789 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2790 cmd_len |= E1000_TXD_CMD_TCP;
2791 break;
2792 case cpu_to_be16(ETH_P_IPV6):
2793 /* XXX not handling all IPV6 headers */
2794 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2795 cmd_len |= E1000_TXD_CMD_TCP;
2796 break;
2797 default:
2798 if (unlikely(net_ratelimit()))
2799 e_warn(drv, "checksum_partial proto=%x!\n",
2800 skb->protocol);
2801 break;
2802 }
2803
2804 css = skb_checksum_start_offset(skb);
2805
2806 i = tx_ring->next_to_use;
2807 buffer_info = &tx_ring->buffer_info[i];
2808 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2809
2810 context_desc->lower_setup.ip_config = 0;
2811 context_desc->upper_setup.tcp_fields.tucss = css;
2812 context_desc->upper_setup.tcp_fields.tucso =
2813 css + skb->csum_offset;
2814 context_desc->upper_setup.tcp_fields.tucse = 0;
2815 context_desc->tcp_seg_setup.data = 0;
2816 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2817
2818 buffer_info->time_stamp = jiffies;
2819 buffer_info->next_to_watch = i;
2820
2821 if (unlikely(++i == tx_ring->count))
2822 i = 0;
2823
2824 tx_ring->next_to_use = i;
2825
2826 return true;
2827 }
2828
2829 #define E1000_MAX_TXD_PWR 12
2830 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2831
e1000_tx_map(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,struct sk_buff * skb,unsigned int first,unsigned int max_per_txd,unsigned int nr_frags,unsigned int mss)2832 static int e1000_tx_map(struct e1000_adapter *adapter,
2833 struct e1000_tx_ring *tx_ring,
2834 struct sk_buff *skb, unsigned int first,
2835 unsigned int max_per_txd, unsigned int nr_frags,
2836 unsigned int mss)
2837 {
2838 struct e1000_hw *hw = &adapter->hw;
2839 struct pci_dev *pdev = adapter->pdev;
2840 struct e1000_tx_buffer *buffer_info;
2841 unsigned int len = skb_headlen(skb);
2842 unsigned int offset = 0, size, count = 0, i;
2843 unsigned int f, bytecount, segs;
2844
2845 i = tx_ring->next_to_use;
2846
2847 while (len) {
2848 buffer_info = &tx_ring->buffer_info[i];
2849 size = min(len, max_per_txd);
2850 /* Workaround for Controller erratum --
2851 * descriptor for non-tso packet in a linear SKB that follows a
2852 * tso gets written back prematurely before the data is fully
2853 * DMA'd to the controller
2854 */
2855 if (!skb->data_len && tx_ring->last_tx_tso &&
2856 !skb_is_gso(skb)) {
2857 tx_ring->last_tx_tso = false;
2858 size -= 4;
2859 }
2860
2861 /* Workaround for premature desc write-backs
2862 * in TSO mode. Append 4-byte sentinel desc
2863 */
2864 if (unlikely(mss && !nr_frags && size == len && size > 8))
2865 size -= 4;
2866 /* work-around for errata 10 and it applies
2867 * to all controllers in PCI-X mode
2868 * The fix is to make sure that the first descriptor of a
2869 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2870 */
2871 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2872 (size > 2015) && count == 0))
2873 size = 2015;
2874
2875 /* Workaround for potential 82544 hang in PCI-X. Avoid
2876 * terminating buffers within evenly-aligned dwords.
2877 */
2878 if (unlikely(adapter->pcix_82544 &&
2879 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2880 size > 4))
2881 size -= 4;
2882
2883 buffer_info->length = size;
2884 /* set time_stamp *before* dma to help avoid a possible race */
2885 buffer_info->time_stamp = jiffies;
2886 buffer_info->mapped_as_page = false;
2887 buffer_info->dma = dma_map_single(&pdev->dev,
2888 skb->data + offset,
2889 size, DMA_TO_DEVICE);
2890 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2891 goto dma_error;
2892 buffer_info->next_to_watch = i;
2893
2894 len -= size;
2895 offset += size;
2896 count++;
2897 if (len) {
2898 i++;
2899 if (unlikely(i == tx_ring->count))
2900 i = 0;
2901 }
2902 }
2903
2904 for (f = 0; f < nr_frags; f++) {
2905 const struct skb_frag_struct *frag;
2906
2907 frag = &skb_shinfo(skb)->frags[f];
2908 len = skb_frag_size(frag);
2909 offset = 0;
2910
2911 while (len) {
2912 unsigned long bufend;
2913 i++;
2914 if (unlikely(i == tx_ring->count))
2915 i = 0;
2916
2917 buffer_info = &tx_ring->buffer_info[i];
2918 size = min(len, max_per_txd);
2919 /* Workaround for premature desc write-backs
2920 * in TSO mode. Append 4-byte sentinel desc
2921 */
2922 if (unlikely(mss && f == (nr_frags-1) &&
2923 size == len && size > 8))
2924 size -= 4;
2925 /* Workaround for potential 82544 hang in PCI-X.
2926 * Avoid terminating buffers within evenly-aligned
2927 * dwords.
2928 */
2929 bufend = (unsigned long)
2930 page_to_phys(skb_frag_page(frag));
2931 bufend += offset + size - 1;
2932 if (unlikely(adapter->pcix_82544 &&
2933 !(bufend & 4) &&
2934 size > 4))
2935 size -= 4;
2936
2937 buffer_info->length = size;
2938 buffer_info->time_stamp = jiffies;
2939 buffer_info->mapped_as_page = true;
2940 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2941 offset, size, DMA_TO_DEVICE);
2942 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2943 goto dma_error;
2944 buffer_info->next_to_watch = i;
2945
2946 len -= size;
2947 offset += size;
2948 count++;
2949 }
2950 }
2951
2952 segs = skb_shinfo(skb)->gso_segs ?: 1;
2953 /* multiply data chunks by size of headers */
2954 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2955
2956 tx_ring->buffer_info[i].skb = skb;
2957 tx_ring->buffer_info[i].segs = segs;
2958 tx_ring->buffer_info[i].bytecount = bytecount;
2959 tx_ring->buffer_info[first].next_to_watch = i;
2960
2961 return count;
2962
2963 dma_error:
2964 dev_err(&pdev->dev, "TX DMA map failed\n");
2965 buffer_info->dma = 0;
2966 if (count)
2967 count--;
2968
2969 while (count--) {
2970 if (i == 0)
2971 i += tx_ring->count;
2972 i--;
2973 buffer_info = &tx_ring->buffer_info[i];
2974 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2975 }
2976
2977 return 0;
2978 }
2979
e1000_tx_queue(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring,int tx_flags,int count)2980 static void e1000_tx_queue(struct e1000_adapter *adapter,
2981 struct e1000_tx_ring *tx_ring, int tx_flags,
2982 int count)
2983 {
2984 struct e1000_tx_desc *tx_desc = NULL;
2985 struct e1000_tx_buffer *buffer_info;
2986 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2987 unsigned int i;
2988
2989 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2990 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2991 E1000_TXD_CMD_TSE;
2992 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2993
2994 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2995 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2996 }
2997
2998 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2999 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3000 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3001 }
3002
3003 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3004 txd_lower |= E1000_TXD_CMD_VLE;
3005 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3006 }
3007
3008 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3009 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3010
3011 i = tx_ring->next_to_use;
3012
3013 while (count--) {
3014 buffer_info = &tx_ring->buffer_info[i];
3015 tx_desc = E1000_TX_DESC(*tx_ring, i);
3016 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3017 tx_desc->lower.data =
3018 cpu_to_le32(txd_lower | buffer_info->length);
3019 tx_desc->upper.data = cpu_to_le32(txd_upper);
3020 if (unlikely(++i == tx_ring->count))
3021 i = 0;
3022 }
3023
3024 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3025
3026 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3027 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3028 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3029
3030 /* Force memory writes to complete before letting h/w
3031 * know there are new descriptors to fetch. (Only
3032 * applicable for weak-ordered memory model archs,
3033 * such as IA-64).
3034 */
3035 wmb();
3036
3037 tx_ring->next_to_use = i;
3038 }
3039
3040 /* 82547 workaround to avoid controller hang in half-duplex environment.
3041 * The workaround is to avoid queuing a large packet that would span
3042 * the internal Tx FIFO ring boundary by notifying the stack to resend
3043 * the packet at a later time. This gives the Tx FIFO an opportunity to
3044 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3045 * to the beginning of the Tx FIFO.
3046 */
3047
3048 #define E1000_FIFO_HDR 0x10
3049 #define E1000_82547_PAD_LEN 0x3E0
3050
e1000_82547_fifo_workaround(struct e1000_adapter * adapter,struct sk_buff * skb)3051 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3052 struct sk_buff *skb)
3053 {
3054 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3055 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3056
3057 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3058
3059 if (adapter->link_duplex != HALF_DUPLEX)
3060 goto no_fifo_stall_required;
3061
3062 if (atomic_read(&adapter->tx_fifo_stall))
3063 return 1;
3064
3065 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3066 atomic_set(&adapter->tx_fifo_stall, 1);
3067 return 1;
3068 }
3069
3070 no_fifo_stall_required:
3071 adapter->tx_fifo_head += skb_fifo_len;
3072 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3073 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3074 return 0;
3075 }
3076
__e1000_maybe_stop_tx(struct net_device * netdev,int size)3077 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3078 {
3079 struct e1000_adapter *adapter = netdev_priv(netdev);
3080 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3081
3082 netif_stop_queue(netdev);
3083 /* Herbert's original patch had:
3084 * smp_mb__after_netif_stop_queue();
3085 * but since that doesn't exist yet, just open code it.
3086 */
3087 smp_mb();
3088
3089 /* We need to check again in a case another CPU has just
3090 * made room available.
3091 */
3092 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3093 return -EBUSY;
3094
3095 /* A reprieve! */
3096 netif_start_queue(netdev);
3097 ++adapter->restart_queue;
3098 return 0;
3099 }
3100
e1000_maybe_stop_tx(struct net_device * netdev,struct e1000_tx_ring * tx_ring,int size)3101 static int e1000_maybe_stop_tx(struct net_device *netdev,
3102 struct e1000_tx_ring *tx_ring, int size)
3103 {
3104 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3105 return 0;
3106 return __e1000_maybe_stop_tx(netdev, size);
3107 }
3108
3109 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
e1000_xmit_frame(struct sk_buff * skb,struct net_device * netdev)3110 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3111 struct net_device *netdev)
3112 {
3113 struct e1000_adapter *adapter = netdev_priv(netdev);
3114 struct e1000_hw *hw = &adapter->hw;
3115 struct e1000_tx_ring *tx_ring;
3116 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3117 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3118 unsigned int tx_flags = 0;
3119 unsigned int len = skb_headlen(skb);
3120 unsigned int nr_frags;
3121 unsigned int mss;
3122 int count = 0;
3123 int tso;
3124 unsigned int f;
3125 __be16 protocol = vlan_get_protocol(skb);
3126
3127 /* This goes back to the question of how to logically map a Tx queue
3128 * to a flow. Right now, performance is impacted slightly negatively
3129 * if using multiple Tx queues. If the stack breaks away from a
3130 * single qdisc implementation, we can look at this again.
3131 */
3132 tx_ring = adapter->tx_ring;
3133
3134 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3135 * packets may get corrupted during padding by HW.
3136 * To WA this issue, pad all small packets manually.
3137 */
3138 if (eth_skb_pad(skb))
3139 return NETDEV_TX_OK;
3140
3141 mss = skb_shinfo(skb)->gso_size;
3142 /* The controller does a simple calculation to
3143 * make sure there is enough room in the FIFO before
3144 * initiating the DMA for each buffer. The calc is:
3145 * 4 = ceil(buffer len/mss). To make sure we don't
3146 * overrun the FIFO, adjust the max buffer len if mss
3147 * drops.
3148 */
3149 if (mss) {
3150 u8 hdr_len;
3151 max_per_txd = min(mss << 2, max_per_txd);
3152 max_txd_pwr = fls(max_per_txd) - 1;
3153
3154 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3155 if (skb->data_len && hdr_len == len) {
3156 switch (hw->mac_type) {
3157 case e1000_82544: {
3158 unsigned int pull_size;
3159
3160 /* Make sure we have room to chop off 4 bytes,
3161 * and that the end alignment will work out to
3162 * this hardware's requirements
3163 * NOTE: this is a TSO only workaround
3164 * if end byte alignment not correct move us
3165 * into the next dword
3166 */
3167 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3168 & 4)
3169 break;
3170 /* fall through */
3171 pull_size = min((unsigned int)4, skb->data_len);
3172 if (!__pskb_pull_tail(skb, pull_size)) {
3173 e_err(drv, "__pskb_pull_tail "
3174 "failed.\n");
3175 dev_kfree_skb_any(skb);
3176 return NETDEV_TX_OK;
3177 }
3178 len = skb_headlen(skb);
3179 break;
3180 }
3181 default:
3182 /* do nothing */
3183 break;
3184 }
3185 }
3186 }
3187
3188 /* reserve a descriptor for the offload context */
3189 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3190 count++;
3191 count++;
3192
3193 /* Controller Erratum workaround */
3194 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3195 count++;
3196
3197 count += TXD_USE_COUNT(len, max_txd_pwr);
3198
3199 if (adapter->pcix_82544)
3200 count++;
3201
3202 /* work-around for errata 10 and it applies to all controllers
3203 * in PCI-X mode, so add one more descriptor to the count
3204 */
3205 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3206 (len > 2015)))
3207 count++;
3208
3209 nr_frags = skb_shinfo(skb)->nr_frags;
3210 for (f = 0; f < nr_frags; f++)
3211 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3212 max_txd_pwr);
3213 if (adapter->pcix_82544)
3214 count += nr_frags;
3215
3216 /* need: count + 2 desc gap to keep tail from touching
3217 * head, otherwise try next time
3218 */
3219 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3220 return NETDEV_TX_BUSY;
3221
3222 if (unlikely((hw->mac_type == e1000_82547) &&
3223 (e1000_82547_fifo_workaround(adapter, skb)))) {
3224 netif_stop_queue(netdev);
3225 if (!test_bit(__E1000_DOWN, &adapter->flags))
3226 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3227 return NETDEV_TX_BUSY;
3228 }
3229
3230 if (skb_vlan_tag_present(skb)) {
3231 tx_flags |= E1000_TX_FLAGS_VLAN;
3232 tx_flags |= (skb_vlan_tag_get(skb) <<
3233 E1000_TX_FLAGS_VLAN_SHIFT);
3234 }
3235
3236 first = tx_ring->next_to_use;
3237
3238 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3239 if (tso < 0) {
3240 dev_kfree_skb_any(skb);
3241 return NETDEV_TX_OK;
3242 }
3243
3244 if (likely(tso)) {
3245 if (likely(hw->mac_type != e1000_82544))
3246 tx_ring->last_tx_tso = true;
3247 tx_flags |= E1000_TX_FLAGS_TSO;
3248 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3249 tx_flags |= E1000_TX_FLAGS_CSUM;
3250
3251 if (protocol == htons(ETH_P_IP))
3252 tx_flags |= E1000_TX_FLAGS_IPV4;
3253
3254 if (unlikely(skb->no_fcs))
3255 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3256
3257 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3258 nr_frags, mss);
3259
3260 if (count) {
3261 /* The descriptors needed is higher than other Intel drivers
3262 * due to a number of workarounds. The breakdown is below:
3263 * Data descriptors: MAX_SKB_FRAGS + 1
3264 * Context Descriptor: 1
3265 * Keep head from touching tail: 2
3266 * Workarounds: 3
3267 */
3268 int desc_needed = MAX_SKB_FRAGS + 7;
3269
3270 netdev_sent_queue(netdev, skb->len);
3271 skb_tx_timestamp(skb);
3272
3273 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3274
3275 /* 82544 potentially requires twice as many data descriptors
3276 * in order to guarantee buffers don't end on evenly-aligned
3277 * dwords
3278 */
3279 if (adapter->pcix_82544)
3280 desc_needed += MAX_SKB_FRAGS + 1;
3281
3282 /* Make sure there is space in the ring for the next send. */
3283 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3284
3285 if (!skb->xmit_more ||
3286 netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3287 writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3288 /* we need this if more than one processor can write to
3289 * our tail at a time, it synchronizes IO on IA64/Altix
3290 * systems
3291 */
3292 mmiowb();
3293 }
3294 } else {
3295 dev_kfree_skb_any(skb);
3296 tx_ring->buffer_info[first].time_stamp = 0;
3297 tx_ring->next_to_use = first;
3298 }
3299
3300 return NETDEV_TX_OK;
3301 }
3302
3303 #define NUM_REGS 38 /* 1 based count */
e1000_regdump(struct e1000_adapter * adapter)3304 static void e1000_regdump(struct e1000_adapter *adapter)
3305 {
3306 struct e1000_hw *hw = &adapter->hw;
3307 u32 regs[NUM_REGS];
3308 u32 *regs_buff = regs;
3309 int i = 0;
3310
3311 static const char * const reg_name[] = {
3312 "CTRL", "STATUS",
3313 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3314 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3315 "TIDV", "TXDCTL", "TADV", "TARC0",
3316 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3317 "TXDCTL1", "TARC1",
3318 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3319 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3320 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3321 };
3322
3323 regs_buff[0] = er32(CTRL);
3324 regs_buff[1] = er32(STATUS);
3325
3326 regs_buff[2] = er32(RCTL);
3327 regs_buff[3] = er32(RDLEN);
3328 regs_buff[4] = er32(RDH);
3329 regs_buff[5] = er32(RDT);
3330 regs_buff[6] = er32(RDTR);
3331
3332 regs_buff[7] = er32(TCTL);
3333 regs_buff[8] = er32(TDBAL);
3334 regs_buff[9] = er32(TDBAH);
3335 regs_buff[10] = er32(TDLEN);
3336 regs_buff[11] = er32(TDH);
3337 regs_buff[12] = er32(TDT);
3338 regs_buff[13] = er32(TIDV);
3339 regs_buff[14] = er32(TXDCTL);
3340 regs_buff[15] = er32(TADV);
3341 regs_buff[16] = er32(TARC0);
3342
3343 regs_buff[17] = er32(TDBAL1);
3344 regs_buff[18] = er32(TDBAH1);
3345 regs_buff[19] = er32(TDLEN1);
3346 regs_buff[20] = er32(TDH1);
3347 regs_buff[21] = er32(TDT1);
3348 regs_buff[22] = er32(TXDCTL1);
3349 regs_buff[23] = er32(TARC1);
3350 regs_buff[24] = er32(CTRL_EXT);
3351 regs_buff[25] = er32(ERT);
3352 regs_buff[26] = er32(RDBAL0);
3353 regs_buff[27] = er32(RDBAH0);
3354 regs_buff[28] = er32(TDFH);
3355 regs_buff[29] = er32(TDFT);
3356 regs_buff[30] = er32(TDFHS);
3357 regs_buff[31] = er32(TDFTS);
3358 regs_buff[32] = er32(TDFPC);
3359 regs_buff[33] = er32(RDFH);
3360 regs_buff[34] = er32(RDFT);
3361 regs_buff[35] = er32(RDFHS);
3362 regs_buff[36] = er32(RDFTS);
3363 regs_buff[37] = er32(RDFPC);
3364
3365 pr_info("Register dump\n");
3366 for (i = 0; i < NUM_REGS; i++)
3367 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3368 }
3369
3370 /*
3371 * e1000_dump: Print registers, tx ring and rx ring
3372 */
e1000_dump(struct e1000_adapter * adapter)3373 static void e1000_dump(struct e1000_adapter *adapter)
3374 {
3375 /* this code doesn't handle multiple rings */
3376 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3377 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3378 int i;
3379
3380 if (!netif_msg_hw(adapter))
3381 return;
3382
3383 /* Print Registers */
3384 e1000_regdump(adapter);
3385
3386 /* transmit dump */
3387 pr_info("TX Desc ring0 dump\n");
3388
3389 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3390 *
3391 * Legacy Transmit Descriptor
3392 * +--------------------------------------------------------------+
3393 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3394 * +--------------------------------------------------------------+
3395 * 8 | Special | CSS | Status | CMD | CSO | Length |
3396 * +--------------------------------------------------------------+
3397 * 63 48 47 36 35 32 31 24 23 16 15 0
3398 *
3399 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3400 * 63 48 47 40 39 32 31 16 15 8 7 0
3401 * +----------------------------------------------------------------+
3402 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3403 * +----------------------------------------------------------------+
3404 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3405 * +----------------------------------------------------------------+
3406 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3407 *
3408 * Extended Data Descriptor (DTYP=0x1)
3409 * +----------------------------------------------------------------+
3410 * 0 | Buffer Address [63:0] |
3411 * +----------------------------------------------------------------+
3412 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3413 * +----------------------------------------------------------------+
3414 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3415 */
3416 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3417 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3418
3419 if (!netif_msg_tx_done(adapter))
3420 goto rx_ring_summary;
3421
3422 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3423 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3424 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3425 struct my_u { __le64 a; __le64 b; };
3426 struct my_u *u = (struct my_u *)tx_desc;
3427 const char *type;
3428
3429 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3430 type = "NTC/U";
3431 else if (i == tx_ring->next_to_use)
3432 type = "NTU";
3433 else if (i == tx_ring->next_to_clean)
3434 type = "NTC";
3435 else
3436 type = "";
3437
3438 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3439 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3440 le64_to_cpu(u->a), le64_to_cpu(u->b),
3441 (u64)buffer_info->dma, buffer_info->length,
3442 buffer_info->next_to_watch,
3443 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3444 }
3445
3446 rx_ring_summary:
3447 /* receive dump */
3448 pr_info("\nRX Desc ring dump\n");
3449
3450 /* Legacy Receive Descriptor Format
3451 *
3452 * +-----------------------------------------------------+
3453 * | Buffer Address [63:0] |
3454 * +-----------------------------------------------------+
3455 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3456 * +-----------------------------------------------------+
3457 * 63 48 47 40 39 32 31 16 15 0
3458 */
3459 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3460
3461 if (!netif_msg_rx_status(adapter))
3462 goto exit;
3463
3464 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3465 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3466 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3467 struct my_u { __le64 a; __le64 b; };
3468 struct my_u *u = (struct my_u *)rx_desc;
3469 const char *type;
3470
3471 if (i == rx_ring->next_to_use)
3472 type = "NTU";
3473 else if (i == rx_ring->next_to_clean)
3474 type = "NTC";
3475 else
3476 type = "";
3477
3478 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3479 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3480 (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3481 } /* for */
3482
3483 /* dump the descriptor caches */
3484 /* rx */
3485 pr_info("Rx descriptor cache in 64bit format\n");
3486 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3487 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3488 i,
3489 readl(adapter->hw.hw_addr + i+4),
3490 readl(adapter->hw.hw_addr + i),
3491 readl(adapter->hw.hw_addr + i+12),
3492 readl(adapter->hw.hw_addr + i+8));
3493 }
3494 /* tx */
3495 pr_info("Tx descriptor cache in 64bit format\n");
3496 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3497 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3498 i,
3499 readl(adapter->hw.hw_addr + i+4),
3500 readl(adapter->hw.hw_addr + i),
3501 readl(adapter->hw.hw_addr + i+12),
3502 readl(adapter->hw.hw_addr + i+8));
3503 }
3504 exit:
3505 return;
3506 }
3507
3508 /**
3509 * e1000_tx_timeout - Respond to a Tx Hang
3510 * @netdev: network interface device structure
3511 **/
e1000_tx_timeout(struct net_device * netdev)3512 static void e1000_tx_timeout(struct net_device *netdev)
3513 {
3514 struct e1000_adapter *adapter = netdev_priv(netdev);
3515
3516 /* Do the reset outside of interrupt context */
3517 adapter->tx_timeout_count++;
3518 schedule_work(&adapter->reset_task);
3519 }
3520
e1000_reset_task(struct work_struct * work)3521 static void e1000_reset_task(struct work_struct *work)
3522 {
3523 struct e1000_adapter *adapter =
3524 container_of(work, struct e1000_adapter, reset_task);
3525
3526 e_err(drv, "Reset adapter\n");
3527 e1000_reinit_locked(adapter);
3528 }
3529
3530 /**
3531 * e1000_change_mtu - Change the Maximum Transfer Unit
3532 * @netdev: network interface device structure
3533 * @new_mtu: new value for maximum frame size
3534 *
3535 * Returns 0 on success, negative on failure
3536 **/
e1000_change_mtu(struct net_device * netdev,int new_mtu)3537 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3538 {
3539 struct e1000_adapter *adapter = netdev_priv(netdev);
3540 struct e1000_hw *hw = &adapter->hw;
3541 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3542
3543 /* Adapter-specific max frame size limits. */
3544 switch (hw->mac_type) {
3545 case e1000_undefined ... e1000_82542_rev2_1:
3546 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3547 e_err(probe, "Jumbo Frames not supported.\n");
3548 return -EINVAL;
3549 }
3550 break;
3551 default:
3552 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3553 break;
3554 }
3555
3556 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3557 msleep(1);
3558 /* e1000_down has a dependency on max_frame_size */
3559 hw->max_frame_size = max_frame;
3560 if (netif_running(netdev)) {
3561 /* prevent buffers from being reallocated */
3562 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3563 e1000_down(adapter);
3564 }
3565
3566 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3567 * means we reserve 2 more, this pushes us to allocate from the next
3568 * larger slab size.
3569 * i.e. RXBUFFER_2048 --> size-4096 slab
3570 * however with the new *_jumbo_rx* routines, jumbo receives will use
3571 * fragmented skbs
3572 */
3573
3574 if (max_frame <= E1000_RXBUFFER_2048)
3575 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3576 else
3577 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3578 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3579 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3580 adapter->rx_buffer_len = PAGE_SIZE;
3581 #endif
3582
3583 /* adjust allocation if LPE protects us, and we aren't using SBP */
3584 if (!hw->tbi_compatibility_on &&
3585 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3586 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3587 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3588
3589 pr_info("%s changing MTU from %d to %d\n",
3590 netdev->name, netdev->mtu, new_mtu);
3591 netdev->mtu = new_mtu;
3592
3593 if (netif_running(netdev))
3594 e1000_up(adapter);
3595 else
3596 e1000_reset(adapter);
3597
3598 clear_bit(__E1000_RESETTING, &adapter->flags);
3599
3600 return 0;
3601 }
3602
3603 /**
3604 * e1000_update_stats - Update the board statistics counters
3605 * @adapter: board private structure
3606 **/
e1000_update_stats(struct e1000_adapter * adapter)3607 void e1000_update_stats(struct e1000_adapter *adapter)
3608 {
3609 struct net_device *netdev = adapter->netdev;
3610 struct e1000_hw *hw = &adapter->hw;
3611 struct pci_dev *pdev = adapter->pdev;
3612 unsigned long flags;
3613 u16 phy_tmp;
3614
3615 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3616
3617 /* Prevent stats update while adapter is being reset, or if the pci
3618 * connection is down.
3619 */
3620 if (adapter->link_speed == 0)
3621 return;
3622 if (pci_channel_offline(pdev))
3623 return;
3624
3625 spin_lock_irqsave(&adapter->stats_lock, flags);
3626
3627 /* these counters are modified from e1000_tbi_adjust_stats,
3628 * called from the interrupt context, so they must only
3629 * be written while holding adapter->stats_lock
3630 */
3631
3632 adapter->stats.crcerrs += er32(CRCERRS);
3633 adapter->stats.gprc += er32(GPRC);
3634 adapter->stats.gorcl += er32(GORCL);
3635 adapter->stats.gorch += er32(GORCH);
3636 adapter->stats.bprc += er32(BPRC);
3637 adapter->stats.mprc += er32(MPRC);
3638 adapter->stats.roc += er32(ROC);
3639
3640 adapter->stats.prc64 += er32(PRC64);
3641 adapter->stats.prc127 += er32(PRC127);
3642 adapter->stats.prc255 += er32(PRC255);
3643 adapter->stats.prc511 += er32(PRC511);
3644 adapter->stats.prc1023 += er32(PRC1023);
3645 adapter->stats.prc1522 += er32(PRC1522);
3646
3647 adapter->stats.symerrs += er32(SYMERRS);
3648 adapter->stats.mpc += er32(MPC);
3649 adapter->stats.scc += er32(SCC);
3650 adapter->stats.ecol += er32(ECOL);
3651 adapter->stats.mcc += er32(MCC);
3652 adapter->stats.latecol += er32(LATECOL);
3653 adapter->stats.dc += er32(DC);
3654 adapter->stats.sec += er32(SEC);
3655 adapter->stats.rlec += er32(RLEC);
3656 adapter->stats.xonrxc += er32(XONRXC);
3657 adapter->stats.xontxc += er32(XONTXC);
3658 adapter->stats.xoffrxc += er32(XOFFRXC);
3659 adapter->stats.xofftxc += er32(XOFFTXC);
3660 adapter->stats.fcruc += er32(FCRUC);
3661 adapter->stats.gptc += er32(GPTC);
3662 adapter->stats.gotcl += er32(GOTCL);
3663 adapter->stats.gotch += er32(GOTCH);
3664 adapter->stats.rnbc += er32(RNBC);
3665 adapter->stats.ruc += er32(RUC);
3666 adapter->stats.rfc += er32(RFC);
3667 adapter->stats.rjc += er32(RJC);
3668 adapter->stats.torl += er32(TORL);
3669 adapter->stats.torh += er32(TORH);
3670 adapter->stats.totl += er32(TOTL);
3671 adapter->stats.toth += er32(TOTH);
3672 adapter->stats.tpr += er32(TPR);
3673
3674 adapter->stats.ptc64 += er32(PTC64);
3675 adapter->stats.ptc127 += er32(PTC127);
3676 adapter->stats.ptc255 += er32(PTC255);
3677 adapter->stats.ptc511 += er32(PTC511);
3678 adapter->stats.ptc1023 += er32(PTC1023);
3679 adapter->stats.ptc1522 += er32(PTC1522);
3680
3681 adapter->stats.mptc += er32(MPTC);
3682 adapter->stats.bptc += er32(BPTC);
3683
3684 /* used for adaptive IFS */
3685
3686 hw->tx_packet_delta = er32(TPT);
3687 adapter->stats.tpt += hw->tx_packet_delta;
3688 hw->collision_delta = er32(COLC);
3689 adapter->stats.colc += hw->collision_delta;
3690
3691 if (hw->mac_type >= e1000_82543) {
3692 adapter->stats.algnerrc += er32(ALGNERRC);
3693 adapter->stats.rxerrc += er32(RXERRC);
3694 adapter->stats.tncrs += er32(TNCRS);
3695 adapter->stats.cexterr += er32(CEXTERR);
3696 adapter->stats.tsctc += er32(TSCTC);
3697 adapter->stats.tsctfc += er32(TSCTFC);
3698 }
3699
3700 /* Fill out the OS statistics structure */
3701 netdev->stats.multicast = adapter->stats.mprc;
3702 netdev->stats.collisions = adapter->stats.colc;
3703
3704 /* Rx Errors */
3705
3706 /* RLEC on some newer hardware can be incorrect so build
3707 * our own version based on RUC and ROC
3708 */
3709 netdev->stats.rx_errors = adapter->stats.rxerrc +
3710 adapter->stats.crcerrs + adapter->stats.algnerrc +
3711 adapter->stats.ruc + adapter->stats.roc +
3712 adapter->stats.cexterr;
3713 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3714 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3715 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3716 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3717 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3718
3719 /* Tx Errors */
3720 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3721 netdev->stats.tx_errors = adapter->stats.txerrc;
3722 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3723 netdev->stats.tx_window_errors = adapter->stats.latecol;
3724 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3725 if (hw->bad_tx_carr_stats_fd &&
3726 adapter->link_duplex == FULL_DUPLEX) {
3727 netdev->stats.tx_carrier_errors = 0;
3728 adapter->stats.tncrs = 0;
3729 }
3730
3731 /* Tx Dropped needs to be maintained elsewhere */
3732
3733 /* Phy Stats */
3734 if (hw->media_type == e1000_media_type_copper) {
3735 if ((adapter->link_speed == SPEED_1000) &&
3736 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3737 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3738 adapter->phy_stats.idle_errors += phy_tmp;
3739 }
3740
3741 if ((hw->mac_type <= e1000_82546) &&
3742 (hw->phy_type == e1000_phy_m88) &&
3743 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3744 adapter->phy_stats.receive_errors += phy_tmp;
3745 }
3746
3747 /* Management Stats */
3748 if (hw->has_smbus) {
3749 adapter->stats.mgptc += er32(MGTPTC);
3750 adapter->stats.mgprc += er32(MGTPRC);
3751 adapter->stats.mgpdc += er32(MGTPDC);
3752 }
3753
3754 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3755 }
3756
3757 /**
3758 * e1000_intr - Interrupt Handler
3759 * @irq: interrupt number
3760 * @data: pointer to a network interface device structure
3761 **/
e1000_intr(int irq,void * data)3762 static irqreturn_t e1000_intr(int irq, void *data)
3763 {
3764 struct net_device *netdev = data;
3765 struct e1000_adapter *adapter = netdev_priv(netdev);
3766 struct e1000_hw *hw = &adapter->hw;
3767 u32 icr = er32(ICR);
3768
3769 if (unlikely((!icr)))
3770 return IRQ_NONE; /* Not our interrupt */
3771
3772 /* we might have caused the interrupt, but the above
3773 * read cleared it, and just in case the driver is
3774 * down there is nothing to do so return handled
3775 */
3776 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3777 return IRQ_HANDLED;
3778
3779 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3780 hw->get_link_status = 1;
3781 /* guard against interrupt when we're going down */
3782 if (!test_bit(__E1000_DOWN, &adapter->flags))
3783 schedule_delayed_work(&adapter->watchdog_task, 1);
3784 }
3785
3786 /* disable interrupts, without the synchronize_irq bit */
3787 ew32(IMC, ~0);
3788 E1000_WRITE_FLUSH();
3789
3790 if (likely(napi_schedule_prep(&adapter->napi))) {
3791 adapter->total_tx_bytes = 0;
3792 adapter->total_tx_packets = 0;
3793 adapter->total_rx_bytes = 0;
3794 adapter->total_rx_packets = 0;
3795 __napi_schedule(&adapter->napi);
3796 } else {
3797 /* this really should not happen! if it does it is basically a
3798 * bug, but not a hard error, so enable ints and continue
3799 */
3800 if (!test_bit(__E1000_DOWN, &adapter->flags))
3801 e1000_irq_enable(adapter);
3802 }
3803
3804 return IRQ_HANDLED;
3805 }
3806
3807 /**
3808 * e1000_clean - NAPI Rx polling callback
3809 * @adapter: board private structure
3810 **/
e1000_clean(struct napi_struct * napi,int budget)3811 static int e1000_clean(struct napi_struct *napi, int budget)
3812 {
3813 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3814 napi);
3815 int tx_clean_complete = 0, work_done = 0;
3816
3817 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3818
3819 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3820
3821 if (!tx_clean_complete)
3822 work_done = budget;
3823
3824 /* If budget not fully consumed, exit the polling mode */
3825 if (work_done < budget) {
3826 if (likely(adapter->itr_setting & 3))
3827 e1000_set_itr(adapter);
3828 napi_complete_done(napi, work_done);
3829 if (!test_bit(__E1000_DOWN, &adapter->flags))
3830 e1000_irq_enable(adapter);
3831 }
3832
3833 return work_done;
3834 }
3835
3836 /**
3837 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3838 * @adapter: board private structure
3839 **/
e1000_clean_tx_irq(struct e1000_adapter * adapter,struct e1000_tx_ring * tx_ring)3840 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3841 struct e1000_tx_ring *tx_ring)
3842 {
3843 struct e1000_hw *hw = &adapter->hw;
3844 struct net_device *netdev = adapter->netdev;
3845 struct e1000_tx_desc *tx_desc, *eop_desc;
3846 struct e1000_tx_buffer *buffer_info;
3847 unsigned int i, eop;
3848 unsigned int count = 0;
3849 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3850 unsigned int bytes_compl = 0, pkts_compl = 0;
3851
3852 i = tx_ring->next_to_clean;
3853 eop = tx_ring->buffer_info[i].next_to_watch;
3854 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3855
3856 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3857 (count < tx_ring->count)) {
3858 bool cleaned = false;
3859 dma_rmb(); /* read buffer_info after eop_desc */
3860 for ( ; !cleaned; count++) {
3861 tx_desc = E1000_TX_DESC(*tx_ring, i);
3862 buffer_info = &tx_ring->buffer_info[i];
3863 cleaned = (i == eop);
3864
3865 if (cleaned) {
3866 total_tx_packets += buffer_info->segs;
3867 total_tx_bytes += buffer_info->bytecount;
3868 if (buffer_info->skb) {
3869 bytes_compl += buffer_info->skb->len;
3870 pkts_compl++;
3871 }
3872
3873 }
3874 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3875 tx_desc->upper.data = 0;
3876
3877 if (unlikely(++i == tx_ring->count))
3878 i = 0;
3879 }
3880
3881 eop = tx_ring->buffer_info[i].next_to_watch;
3882 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3883 }
3884
3885 /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3886 * which will reuse the cleaned buffers.
3887 */
3888 smp_store_release(&tx_ring->next_to_clean, i);
3889
3890 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3891
3892 #define TX_WAKE_THRESHOLD 32
3893 if (unlikely(count && netif_carrier_ok(netdev) &&
3894 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3895 /* Make sure that anybody stopping the queue after this
3896 * sees the new next_to_clean.
3897 */
3898 smp_mb();
3899
3900 if (netif_queue_stopped(netdev) &&
3901 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3902 netif_wake_queue(netdev);
3903 ++adapter->restart_queue;
3904 }
3905 }
3906
3907 if (adapter->detect_tx_hung) {
3908 /* Detect a transmit hang in hardware, this serializes the
3909 * check with the clearing of time_stamp and movement of i
3910 */
3911 adapter->detect_tx_hung = false;
3912 if (tx_ring->buffer_info[eop].time_stamp &&
3913 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3914 (adapter->tx_timeout_factor * HZ)) &&
3915 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3916
3917 /* detected Tx unit hang */
3918 e_err(drv, "Detected Tx Unit Hang\n"
3919 " Tx Queue <%lu>\n"
3920 " TDH <%x>\n"
3921 " TDT <%x>\n"
3922 " next_to_use <%x>\n"
3923 " next_to_clean <%x>\n"
3924 "buffer_info[next_to_clean]\n"
3925 " time_stamp <%lx>\n"
3926 " next_to_watch <%x>\n"
3927 " jiffies <%lx>\n"
3928 " next_to_watch.status <%x>\n",
3929 (unsigned long)(tx_ring - adapter->tx_ring),
3930 readl(hw->hw_addr + tx_ring->tdh),
3931 readl(hw->hw_addr + tx_ring->tdt),
3932 tx_ring->next_to_use,
3933 tx_ring->next_to_clean,
3934 tx_ring->buffer_info[eop].time_stamp,
3935 eop,
3936 jiffies,
3937 eop_desc->upper.fields.status);
3938 e1000_dump(adapter);
3939 netif_stop_queue(netdev);
3940 }
3941 }
3942 adapter->total_tx_bytes += total_tx_bytes;
3943 adapter->total_tx_packets += total_tx_packets;
3944 netdev->stats.tx_bytes += total_tx_bytes;
3945 netdev->stats.tx_packets += total_tx_packets;
3946 return count < tx_ring->count;
3947 }
3948
3949 /**
3950 * e1000_rx_checksum - Receive Checksum Offload for 82543
3951 * @adapter: board private structure
3952 * @status_err: receive descriptor status and error fields
3953 * @csum: receive descriptor csum field
3954 * @sk_buff: socket buffer with received data
3955 **/
e1000_rx_checksum(struct e1000_adapter * adapter,u32 status_err,u32 csum,struct sk_buff * skb)3956 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3957 u32 csum, struct sk_buff *skb)
3958 {
3959 struct e1000_hw *hw = &adapter->hw;
3960 u16 status = (u16)status_err;
3961 u8 errors = (u8)(status_err >> 24);
3962
3963 skb_checksum_none_assert(skb);
3964
3965 /* 82543 or newer only */
3966 if (unlikely(hw->mac_type < e1000_82543))
3967 return;
3968 /* Ignore Checksum bit is set */
3969 if (unlikely(status & E1000_RXD_STAT_IXSM))
3970 return;
3971 /* TCP/UDP checksum error bit is set */
3972 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3973 /* let the stack verify checksum errors */
3974 adapter->hw_csum_err++;
3975 return;
3976 }
3977 /* TCP/UDP Checksum has not been calculated */
3978 if (!(status & E1000_RXD_STAT_TCPCS))
3979 return;
3980
3981 /* It must be a TCP or UDP packet with a valid checksum */
3982 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3983 /* TCP checksum is good */
3984 skb->ip_summed = CHECKSUM_UNNECESSARY;
3985 }
3986 adapter->hw_csum_good++;
3987 }
3988
3989 /**
3990 * e1000_consume_page - helper function for jumbo Rx path
3991 **/
e1000_consume_page(struct e1000_rx_buffer * bi,struct sk_buff * skb,u16 length)3992 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3993 u16 length)
3994 {
3995 bi->rxbuf.page = NULL;
3996 skb->len += length;
3997 skb->data_len += length;
3998 skb->truesize += PAGE_SIZE;
3999 }
4000
4001 /**
4002 * e1000_receive_skb - helper function to handle rx indications
4003 * @adapter: board private structure
4004 * @status: descriptor status field as written by hardware
4005 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4006 * @skb: pointer to sk_buff to be indicated to stack
4007 */
e1000_receive_skb(struct e1000_adapter * adapter,u8 status,__le16 vlan,struct sk_buff * skb)4008 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4009 __le16 vlan, struct sk_buff *skb)
4010 {
4011 skb->protocol = eth_type_trans(skb, adapter->netdev);
4012
4013 if (status & E1000_RXD_STAT_VP) {
4014 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4015
4016 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4017 }
4018 napi_gro_receive(&adapter->napi, skb);
4019 }
4020
4021 /**
4022 * e1000_tbi_adjust_stats
4023 * @hw: Struct containing variables accessed by shared code
4024 * @frame_len: The length of the frame in question
4025 * @mac_addr: The Ethernet destination address of the frame in question
4026 *
4027 * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4028 */
e1000_tbi_adjust_stats(struct e1000_hw * hw,struct e1000_hw_stats * stats,u32 frame_len,const u8 * mac_addr)4029 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4030 struct e1000_hw_stats *stats,
4031 u32 frame_len, const u8 *mac_addr)
4032 {
4033 u64 carry_bit;
4034
4035 /* First adjust the frame length. */
4036 frame_len--;
4037 /* We need to adjust the statistics counters, since the hardware
4038 * counters overcount this packet as a CRC error and undercount
4039 * the packet as a good packet
4040 */
4041 /* This packet should not be counted as a CRC error. */
4042 stats->crcerrs--;
4043 /* This packet does count as a Good Packet Received. */
4044 stats->gprc++;
4045
4046 /* Adjust the Good Octets received counters */
4047 carry_bit = 0x80000000 & stats->gorcl;
4048 stats->gorcl += frame_len;
4049 /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4050 * Received Count) was one before the addition,
4051 * AND it is zero after, then we lost the carry out,
4052 * need to add one to Gorch (Good Octets Received Count High).
4053 * This could be simplified if all environments supported
4054 * 64-bit integers.
4055 */
4056 if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4057 stats->gorch++;
4058 /* Is this a broadcast or multicast? Check broadcast first,
4059 * since the test for a multicast frame will test positive on
4060 * a broadcast frame.
4061 */
4062 if (is_broadcast_ether_addr(mac_addr))
4063 stats->bprc++;
4064 else if (is_multicast_ether_addr(mac_addr))
4065 stats->mprc++;
4066
4067 if (frame_len == hw->max_frame_size) {
4068 /* In this case, the hardware has overcounted the number of
4069 * oversize frames.
4070 */
4071 if (stats->roc > 0)
4072 stats->roc--;
4073 }
4074
4075 /* Adjust the bin counters when the extra byte put the frame in the
4076 * wrong bin. Remember that the frame_len was adjusted above.
4077 */
4078 if (frame_len == 64) {
4079 stats->prc64++;
4080 stats->prc127--;
4081 } else if (frame_len == 127) {
4082 stats->prc127++;
4083 stats->prc255--;
4084 } else if (frame_len == 255) {
4085 stats->prc255++;
4086 stats->prc511--;
4087 } else if (frame_len == 511) {
4088 stats->prc511++;
4089 stats->prc1023--;
4090 } else if (frame_len == 1023) {
4091 stats->prc1023++;
4092 stats->prc1522--;
4093 } else if (frame_len == 1522) {
4094 stats->prc1522++;
4095 }
4096 }
4097
e1000_tbi_should_accept(struct e1000_adapter * adapter,u8 status,u8 errors,u32 length,const u8 * data)4098 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4099 u8 status, u8 errors,
4100 u32 length, const u8 *data)
4101 {
4102 struct e1000_hw *hw = &adapter->hw;
4103 u8 last_byte = *(data + length - 1);
4104
4105 if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4106 unsigned long irq_flags;
4107
4108 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4109 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4110 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4111
4112 return true;
4113 }
4114
4115 return false;
4116 }
4117
e1000_alloc_rx_skb(struct e1000_adapter * adapter,unsigned int bufsz)4118 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4119 unsigned int bufsz)
4120 {
4121 struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4122
4123 if (unlikely(!skb))
4124 adapter->alloc_rx_buff_failed++;
4125 return skb;
4126 }
4127
4128 /**
4129 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4130 * @adapter: board private structure
4131 * @rx_ring: ring to clean
4132 * @work_done: amount of napi work completed this call
4133 * @work_to_do: max amount of work allowed for this call to do
4134 *
4135 * the return value indicates whether actual cleaning was done, there
4136 * is no guarantee that everything was cleaned
4137 */
e1000_clean_jumbo_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4138 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4139 struct e1000_rx_ring *rx_ring,
4140 int *work_done, int work_to_do)
4141 {
4142 struct net_device *netdev = adapter->netdev;
4143 struct pci_dev *pdev = adapter->pdev;
4144 struct e1000_rx_desc *rx_desc, *next_rxd;
4145 struct e1000_rx_buffer *buffer_info, *next_buffer;
4146 u32 length;
4147 unsigned int i;
4148 int cleaned_count = 0;
4149 bool cleaned = false;
4150 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4151
4152 i = rx_ring->next_to_clean;
4153 rx_desc = E1000_RX_DESC(*rx_ring, i);
4154 buffer_info = &rx_ring->buffer_info[i];
4155
4156 while (rx_desc->status & E1000_RXD_STAT_DD) {
4157 struct sk_buff *skb;
4158 u8 status;
4159
4160 if (*work_done >= work_to_do)
4161 break;
4162 (*work_done)++;
4163 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4164
4165 status = rx_desc->status;
4166
4167 if (++i == rx_ring->count)
4168 i = 0;
4169
4170 next_rxd = E1000_RX_DESC(*rx_ring, i);
4171 prefetch(next_rxd);
4172
4173 next_buffer = &rx_ring->buffer_info[i];
4174
4175 cleaned = true;
4176 cleaned_count++;
4177 dma_unmap_page(&pdev->dev, buffer_info->dma,
4178 adapter->rx_buffer_len, DMA_FROM_DEVICE);
4179 buffer_info->dma = 0;
4180
4181 length = le16_to_cpu(rx_desc->length);
4182
4183 /* errors is only valid for DD + EOP descriptors */
4184 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4185 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4186 u8 *mapped = page_address(buffer_info->rxbuf.page);
4187
4188 if (e1000_tbi_should_accept(adapter, status,
4189 rx_desc->errors,
4190 length, mapped)) {
4191 length--;
4192 } else if (netdev->features & NETIF_F_RXALL) {
4193 goto process_skb;
4194 } else {
4195 /* an error means any chain goes out the window
4196 * too
4197 */
4198 if (rx_ring->rx_skb_top)
4199 dev_kfree_skb(rx_ring->rx_skb_top);
4200 rx_ring->rx_skb_top = NULL;
4201 goto next_desc;
4202 }
4203 }
4204
4205 #define rxtop rx_ring->rx_skb_top
4206 process_skb:
4207 if (!(status & E1000_RXD_STAT_EOP)) {
4208 /* this descriptor is only the beginning (or middle) */
4209 if (!rxtop) {
4210 /* this is the beginning of a chain */
4211 rxtop = napi_get_frags(&adapter->napi);
4212 if (!rxtop)
4213 break;
4214
4215 skb_fill_page_desc(rxtop, 0,
4216 buffer_info->rxbuf.page,
4217 0, length);
4218 } else {
4219 /* this is the middle of a chain */
4220 skb_fill_page_desc(rxtop,
4221 skb_shinfo(rxtop)->nr_frags,
4222 buffer_info->rxbuf.page, 0, length);
4223 }
4224 e1000_consume_page(buffer_info, rxtop, length);
4225 goto next_desc;
4226 } else {
4227 if (rxtop) {
4228 /* end of the chain */
4229 skb_fill_page_desc(rxtop,
4230 skb_shinfo(rxtop)->nr_frags,
4231 buffer_info->rxbuf.page, 0, length);
4232 skb = rxtop;
4233 rxtop = NULL;
4234 e1000_consume_page(buffer_info, skb, length);
4235 } else {
4236 struct page *p;
4237 /* no chain, got EOP, this buf is the packet
4238 * copybreak to save the put_page/alloc_page
4239 */
4240 p = buffer_info->rxbuf.page;
4241 if (length <= copybreak) {
4242 u8 *vaddr;
4243
4244 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4245 length -= 4;
4246 skb = e1000_alloc_rx_skb(adapter,
4247 length);
4248 if (!skb)
4249 break;
4250
4251 vaddr = kmap_atomic(p);
4252 memcpy(skb_tail_pointer(skb), vaddr,
4253 length);
4254 kunmap_atomic(vaddr);
4255 /* re-use the page, so don't erase
4256 * buffer_info->rxbuf.page
4257 */
4258 skb_put(skb, length);
4259 e1000_rx_checksum(adapter,
4260 status | rx_desc->errors << 24,
4261 le16_to_cpu(rx_desc->csum), skb);
4262
4263 total_rx_bytes += skb->len;
4264 total_rx_packets++;
4265
4266 e1000_receive_skb(adapter, status,
4267 rx_desc->special, skb);
4268 goto next_desc;
4269 } else {
4270 skb = napi_get_frags(&adapter->napi);
4271 if (!skb) {
4272 adapter->alloc_rx_buff_failed++;
4273 break;
4274 }
4275 skb_fill_page_desc(skb, 0, p, 0,
4276 length);
4277 e1000_consume_page(buffer_info, skb,
4278 length);
4279 }
4280 }
4281 }
4282
4283 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4284 e1000_rx_checksum(adapter,
4285 (u32)(status) |
4286 ((u32)(rx_desc->errors) << 24),
4287 le16_to_cpu(rx_desc->csum), skb);
4288
4289 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4290 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4291 pskb_trim(skb, skb->len - 4);
4292 total_rx_packets++;
4293
4294 if (status & E1000_RXD_STAT_VP) {
4295 __le16 vlan = rx_desc->special;
4296 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4297
4298 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4299 }
4300
4301 napi_gro_frags(&adapter->napi);
4302
4303 next_desc:
4304 rx_desc->status = 0;
4305
4306 /* return some buffers to hardware, one at a time is too slow */
4307 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4308 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4309 cleaned_count = 0;
4310 }
4311
4312 /* use prefetched values */
4313 rx_desc = next_rxd;
4314 buffer_info = next_buffer;
4315 }
4316 rx_ring->next_to_clean = i;
4317
4318 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4319 if (cleaned_count)
4320 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4321
4322 adapter->total_rx_packets += total_rx_packets;
4323 adapter->total_rx_bytes += total_rx_bytes;
4324 netdev->stats.rx_bytes += total_rx_bytes;
4325 netdev->stats.rx_packets += total_rx_packets;
4326 return cleaned;
4327 }
4328
4329 /* this should improve performance for small packets with large amounts
4330 * of reassembly being done in the stack
4331 */
e1000_copybreak(struct e1000_adapter * adapter,struct e1000_rx_buffer * buffer_info,u32 length,const void * data)4332 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4333 struct e1000_rx_buffer *buffer_info,
4334 u32 length, const void *data)
4335 {
4336 struct sk_buff *skb;
4337
4338 if (length > copybreak)
4339 return NULL;
4340
4341 skb = e1000_alloc_rx_skb(adapter, length);
4342 if (!skb)
4343 return NULL;
4344
4345 dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4346 length, DMA_FROM_DEVICE);
4347
4348 skb_put_data(skb, data, length);
4349
4350 return skb;
4351 }
4352
4353 /**
4354 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4355 * @adapter: board private structure
4356 * @rx_ring: ring to clean
4357 * @work_done: amount of napi work completed this call
4358 * @work_to_do: max amount of work allowed for this call to do
4359 */
e1000_clean_rx_irq(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int * work_done,int work_to_do)4360 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4361 struct e1000_rx_ring *rx_ring,
4362 int *work_done, int work_to_do)
4363 {
4364 struct net_device *netdev = adapter->netdev;
4365 struct pci_dev *pdev = adapter->pdev;
4366 struct e1000_rx_desc *rx_desc, *next_rxd;
4367 struct e1000_rx_buffer *buffer_info, *next_buffer;
4368 u32 length;
4369 unsigned int i;
4370 int cleaned_count = 0;
4371 bool cleaned = false;
4372 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4373
4374 i = rx_ring->next_to_clean;
4375 rx_desc = E1000_RX_DESC(*rx_ring, i);
4376 buffer_info = &rx_ring->buffer_info[i];
4377
4378 while (rx_desc->status & E1000_RXD_STAT_DD) {
4379 struct sk_buff *skb;
4380 u8 *data;
4381 u8 status;
4382
4383 if (*work_done >= work_to_do)
4384 break;
4385 (*work_done)++;
4386 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4387
4388 status = rx_desc->status;
4389 length = le16_to_cpu(rx_desc->length);
4390
4391 data = buffer_info->rxbuf.data;
4392 prefetch(data);
4393 skb = e1000_copybreak(adapter, buffer_info, length, data);
4394 if (!skb) {
4395 unsigned int frag_len = e1000_frag_len(adapter);
4396
4397 skb = build_skb(data - E1000_HEADROOM, frag_len);
4398 if (!skb) {
4399 adapter->alloc_rx_buff_failed++;
4400 break;
4401 }
4402
4403 skb_reserve(skb, E1000_HEADROOM);
4404 dma_unmap_single(&pdev->dev, buffer_info->dma,
4405 adapter->rx_buffer_len,
4406 DMA_FROM_DEVICE);
4407 buffer_info->dma = 0;
4408 buffer_info->rxbuf.data = NULL;
4409 }
4410
4411 if (++i == rx_ring->count)
4412 i = 0;
4413
4414 next_rxd = E1000_RX_DESC(*rx_ring, i);
4415 prefetch(next_rxd);
4416
4417 next_buffer = &rx_ring->buffer_info[i];
4418
4419 cleaned = true;
4420 cleaned_count++;
4421
4422 /* !EOP means multiple descriptors were used to store a single
4423 * packet, if thats the case we need to toss it. In fact, we
4424 * to toss every packet with the EOP bit clear and the next
4425 * frame that _does_ have the EOP bit set, as it is by
4426 * definition only a frame fragment
4427 */
4428 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4429 adapter->discarding = true;
4430
4431 if (adapter->discarding) {
4432 /* All receives must fit into a single buffer */
4433 netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4434 dev_kfree_skb(skb);
4435 if (status & E1000_RXD_STAT_EOP)
4436 adapter->discarding = false;
4437 goto next_desc;
4438 }
4439
4440 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4441 if (e1000_tbi_should_accept(adapter, status,
4442 rx_desc->errors,
4443 length, data)) {
4444 length--;
4445 } else if (netdev->features & NETIF_F_RXALL) {
4446 goto process_skb;
4447 } else {
4448 dev_kfree_skb(skb);
4449 goto next_desc;
4450 }
4451 }
4452
4453 process_skb:
4454 total_rx_bytes += (length - 4); /* don't count FCS */
4455 total_rx_packets++;
4456
4457 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4458 /* adjust length to remove Ethernet CRC, this must be
4459 * done after the TBI_ACCEPT workaround above
4460 */
4461 length -= 4;
4462
4463 if (buffer_info->rxbuf.data == NULL)
4464 skb_put(skb, length);
4465 else /* copybreak skb */
4466 skb_trim(skb, length);
4467
4468 /* Receive Checksum Offload */
4469 e1000_rx_checksum(adapter,
4470 (u32)(status) |
4471 ((u32)(rx_desc->errors) << 24),
4472 le16_to_cpu(rx_desc->csum), skb);
4473
4474 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4475
4476 next_desc:
4477 rx_desc->status = 0;
4478
4479 /* return some buffers to hardware, one at a time is too slow */
4480 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4481 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4482 cleaned_count = 0;
4483 }
4484
4485 /* use prefetched values */
4486 rx_desc = next_rxd;
4487 buffer_info = next_buffer;
4488 }
4489 rx_ring->next_to_clean = i;
4490
4491 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4492 if (cleaned_count)
4493 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4494
4495 adapter->total_rx_packets += total_rx_packets;
4496 adapter->total_rx_bytes += total_rx_bytes;
4497 netdev->stats.rx_bytes += total_rx_bytes;
4498 netdev->stats.rx_packets += total_rx_packets;
4499 return cleaned;
4500 }
4501
4502 /**
4503 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4504 * @adapter: address of board private structure
4505 * @rx_ring: pointer to receive ring structure
4506 * @cleaned_count: number of buffers to allocate this pass
4507 **/
4508 static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4509 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4510 struct e1000_rx_ring *rx_ring, int cleaned_count)
4511 {
4512 struct pci_dev *pdev = adapter->pdev;
4513 struct e1000_rx_desc *rx_desc;
4514 struct e1000_rx_buffer *buffer_info;
4515 unsigned int i;
4516
4517 i = rx_ring->next_to_use;
4518 buffer_info = &rx_ring->buffer_info[i];
4519
4520 while (cleaned_count--) {
4521 /* allocate a new page if necessary */
4522 if (!buffer_info->rxbuf.page) {
4523 buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4524 if (unlikely(!buffer_info->rxbuf.page)) {
4525 adapter->alloc_rx_buff_failed++;
4526 break;
4527 }
4528 }
4529
4530 if (!buffer_info->dma) {
4531 buffer_info->dma = dma_map_page(&pdev->dev,
4532 buffer_info->rxbuf.page, 0,
4533 adapter->rx_buffer_len,
4534 DMA_FROM_DEVICE);
4535 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4536 put_page(buffer_info->rxbuf.page);
4537 buffer_info->rxbuf.page = NULL;
4538 buffer_info->dma = 0;
4539 adapter->alloc_rx_buff_failed++;
4540 break;
4541 }
4542 }
4543
4544 rx_desc = E1000_RX_DESC(*rx_ring, i);
4545 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4546
4547 if (unlikely(++i == rx_ring->count))
4548 i = 0;
4549 buffer_info = &rx_ring->buffer_info[i];
4550 }
4551
4552 if (likely(rx_ring->next_to_use != i)) {
4553 rx_ring->next_to_use = i;
4554 if (unlikely(i-- == 0))
4555 i = (rx_ring->count - 1);
4556
4557 /* Force memory writes to complete before letting h/w
4558 * know there are new descriptors to fetch. (Only
4559 * applicable for weak-ordered memory model archs,
4560 * such as IA-64).
4561 */
4562 wmb();
4563 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4564 }
4565 }
4566
4567 /**
4568 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4569 * @adapter: address of board private structure
4570 **/
e1000_alloc_rx_buffers(struct e1000_adapter * adapter,struct e1000_rx_ring * rx_ring,int cleaned_count)4571 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4572 struct e1000_rx_ring *rx_ring,
4573 int cleaned_count)
4574 {
4575 struct e1000_hw *hw = &adapter->hw;
4576 struct pci_dev *pdev = adapter->pdev;
4577 struct e1000_rx_desc *rx_desc;
4578 struct e1000_rx_buffer *buffer_info;
4579 unsigned int i;
4580 unsigned int bufsz = adapter->rx_buffer_len;
4581
4582 i = rx_ring->next_to_use;
4583 buffer_info = &rx_ring->buffer_info[i];
4584
4585 while (cleaned_count--) {
4586 void *data;
4587
4588 if (buffer_info->rxbuf.data)
4589 goto skip;
4590
4591 data = e1000_alloc_frag(adapter);
4592 if (!data) {
4593 /* Better luck next round */
4594 adapter->alloc_rx_buff_failed++;
4595 break;
4596 }
4597
4598 /* Fix for errata 23, can't cross 64kB boundary */
4599 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4600 void *olddata = data;
4601 e_err(rx_err, "skb align check failed: %u bytes at "
4602 "%p\n", bufsz, data);
4603 /* Try again, without freeing the previous */
4604 data = e1000_alloc_frag(adapter);
4605 /* Failed allocation, critical failure */
4606 if (!data) {
4607 skb_free_frag(olddata);
4608 adapter->alloc_rx_buff_failed++;
4609 break;
4610 }
4611
4612 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4613 /* give up */
4614 skb_free_frag(data);
4615 skb_free_frag(olddata);
4616 adapter->alloc_rx_buff_failed++;
4617 break;
4618 }
4619
4620 /* Use new allocation */
4621 skb_free_frag(olddata);
4622 }
4623 buffer_info->dma = dma_map_single(&pdev->dev,
4624 data,
4625 adapter->rx_buffer_len,
4626 DMA_FROM_DEVICE);
4627 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4628 skb_free_frag(data);
4629 buffer_info->dma = 0;
4630 adapter->alloc_rx_buff_failed++;
4631 break;
4632 }
4633
4634 /* XXX if it was allocated cleanly it will never map to a
4635 * boundary crossing
4636 */
4637
4638 /* Fix for errata 23, can't cross 64kB boundary */
4639 if (!e1000_check_64k_bound(adapter,
4640 (void *)(unsigned long)buffer_info->dma,
4641 adapter->rx_buffer_len)) {
4642 e_err(rx_err, "dma align check failed: %u bytes at "
4643 "%p\n", adapter->rx_buffer_len,
4644 (void *)(unsigned long)buffer_info->dma);
4645
4646 dma_unmap_single(&pdev->dev, buffer_info->dma,
4647 adapter->rx_buffer_len,
4648 DMA_FROM_DEVICE);
4649
4650 skb_free_frag(data);
4651 buffer_info->rxbuf.data = NULL;
4652 buffer_info->dma = 0;
4653
4654 adapter->alloc_rx_buff_failed++;
4655 break;
4656 }
4657 buffer_info->rxbuf.data = data;
4658 skip:
4659 rx_desc = E1000_RX_DESC(*rx_ring, i);
4660 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4661
4662 if (unlikely(++i == rx_ring->count))
4663 i = 0;
4664 buffer_info = &rx_ring->buffer_info[i];
4665 }
4666
4667 if (likely(rx_ring->next_to_use != i)) {
4668 rx_ring->next_to_use = i;
4669 if (unlikely(i-- == 0))
4670 i = (rx_ring->count - 1);
4671
4672 /* Force memory writes to complete before letting h/w
4673 * know there are new descriptors to fetch. (Only
4674 * applicable for weak-ordered memory model archs,
4675 * such as IA-64).
4676 */
4677 wmb();
4678 writel(i, hw->hw_addr + rx_ring->rdt);
4679 }
4680 }
4681
4682 /**
4683 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4684 * @adapter:
4685 **/
e1000_smartspeed(struct e1000_adapter * adapter)4686 static void e1000_smartspeed(struct e1000_adapter *adapter)
4687 {
4688 struct e1000_hw *hw = &adapter->hw;
4689 u16 phy_status;
4690 u16 phy_ctrl;
4691
4692 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4693 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4694 return;
4695
4696 if (adapter->smartspeed == 0) {
4697 /* If Master/Slave config fault is asserted twice,
4698 * we assume back-to-back
4699 */
4700 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4701 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4702 return;
4703 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4704 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4705 return;
4706 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4707 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4708 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4709 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4710 phy_ctrl);
4711 adapter->smartspeed++;
4712 if (!e1000_phy_setup_autoneg(hw) &&
4713 !e1000_read_phy_reg(hw, PHY_CTRL,
4714 &phy_ctrl)) {
4715 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4716 MII_CR_RESTART_AUTO_NEG);
4717 e1000_write_phy_reg(hw, PHY_CTRL,
4718 phy_ctrl);
4719 }
4720 }
4721 return;
4722 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4723 /* If still no link, perhaps using 2/3 pair cable */
4724 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4725 phy_ctrl |= CR_1000T_MS_ENABLE;
4726 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4727 if (!e1000_phy_setup_autoneg(hw) &&
4728 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4729 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4730 MII_CR_RESTART_AUTO_NEG);
4731 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4732 }
4733 }
4734 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4735 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4736 adapter->smartspeed = 0;
4737 }
4738
4739 /**
4740 * e1000_ioctl -
4741 * @netdev:
4742 * @ifreq:
4743 * @cmd:
4744 **/
e1000_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4745 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4746 {
4747 switch (cmd) {
4748 case SIOCGMIIPHY:
4749 case SIOCGMIIREG:
4750 case SIOCSMIIREG:
4751 return e1000_mii_ioctl(netdev, ifr, cmd);
4752 default:
4753 return -EOPNOTSUPP;
4754 }
4755 }
4756
4757 /**
4758 * e1000_mii_ioctl -
4759 * @netdev:
4760 * @ifreq:
4761 * @cmd:
4762 **/
e1000_mii_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)4763 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4764 int cmd)
4765 {
4766 struct e1000_adapter *adapter = netdev_priv(netdev);
4767 struct e1000_hw *hw = &adapter->hw;
4768 struct mii_ioctl_data *data = if_mii(ifr);
4769 int retval;
4770 u16 mii_reg;
4771 unsigned long flags;
4772
4773 if (hw->media_type != e1000_media_type_copper)
4774 return -EOPNOTSUPP;
4775
4776 switch (cmd) {
4777 case SIOCGMIIPHY:
4778 data->phy_id = hw->phy_addr;
4779 break;
4780 case SIOCGMIIREG:
4781 spin_lock_irqsave(&adapter->stats_lock, flags);
4782 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4783 &data->val_out)) {
4784 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4785 return -EIO;
4786 }
4787 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4788 break;
4789 case SIOCSMIIREG:
4790 if (data->reg_num & ~(0x1F))
4791 return -EFAULT;
4792 mii_reg = data->val_in;
4793 spin_lock_irqsave(&adapter->stats_lock, flags);
4794 if (e1000_write_phy_reg(hw, data->reg_num,
4795 mii_reg)) {
4796 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4797 return -EIO;
4798 }
4799 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4800 if (hw->media_type == e1000_media_type_copper) {
4801 switch (data->reg_num) {
4802 case PHY_CTRL:
4803 if (mii_reg & MII_CR_POWER_DOWN)
4804 break;
4805 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4806 hw->autoneg = 1;
4807 hw->autoneg_advertised = 0x2F;
4808 } else {
4809 u32 speed;
4810 if (mii_reg & 0x40)
4811 speed = SPEED_1000;
4812 else if (mii_reg & 0x2000)
4813 speed = SPEED_100;
4814 else
4815 speed = SPEED_10;
4816 retval = e1000_set_spd_dplx(
4817 adapter, speed,
4818 ((mii_reg & 0x100)
4819 ? DUPLEX_FULL :
4820 DUPLEX_HALF));
4821 if (retval)
4822 return retval;
4823 }
4824 if (netif_running(adapter->netdev))
4825 e1000_reinit_locked(adapter);
4826 else
4827 e1000_reset(adapter);
4828 break;
4829 case M88E1000_PHY_SPEC_CTRL:
4830 case M88E1000_EXT_PHY_SPEC_CTRL:
4831 if (e1000_phy_reset(hw))
4832 return -EIO;
4833 break;
4834 }
4835 } else {
4836 switch (data->reg_num) {
4837 case PHY_CTRL:
4838 if (mii_reg & MII_CR_POWER_DOWN)
4839 break;
4840 if (netif_running(adapter->netdev))
4841 e1000_reinit_locked(adapter);
4842 else
4843 e1000_reset(adapter);
4844 break;
4845 }
4846 }
4847 break;
4848 default:
4849 return -EOPNOTSUPP;
4850 }
4851 return E1000_SUCCESS;
4852 }
4853
e1000_pci_set_mwi(struct e1000_hw * hw)4854 void e1000_pci_set_mwi(struct e1000_hw *hw)
4855 {
4856 struct e1000_adapter *adapter = hw->back;
4857 int ret_val = pci_set_mwi(adapter->pdev);
4858
4859 if (ret_val)
4860 e_err(probe, "Error in setting MWI\n");
4861 }
4862
e1000_pci_clear_mwi(struct e1000_hw * hw)4863 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4864 {
4865 struct e1000_adapter *adapter = hw->back;
4866
4867 pci_clear_mwi(adapter->pdev);
4868 }
4869
e1000_pcix_get_mmrbc(struct e1000_hw * hw)4870 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4871 {
4872 struct e1000_adapter *adapter = hw->back;
4873 return pcix_get_mmrbc(adapter->pdev);
4874 }
4875
e1000_pcix_set_mmrbc(struct e1000_hw * hw,int mmrbc)4876 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4877 {
4878 struct e1000_adapter *adapter = hw->back;
4879 pcix_set_mmrbc(adapter->pdev, mmrbc);
4880 }
4881
e1000_io_write(struct e1000_hw * hw,unsigned long port,u32 value)4882 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4883 {
4884 outl(value, port);
4885 }
4886
e1000_vlan_used(struct e1000_adapter * adapter)4887 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4888 {
4889 u16 vid;
4890
4891 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4892 return true;
4893 return false;
4894 }
4895
__e1000_vlan_mode(struct e1000_adapter * adapter,netdev_features_t features)4896 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4897 netdev_features_t features)
4898 {
4899 struct e1000_hw *hw = &adapter->hw;
4900 u32 ctrl;
4901
4902 ctrl = er32(CTRL);
4903 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4904 /* enable VLAN tag insert/strip */
4905 ctrl |= E1000_CTRL_VME;
4906 } else {
4907 /* disable VLAN tag insert/strip */
4908 ctrl &= ~E1000_CTRL_VME;
4909 }
4910 ew32(CTRL, ctrl);
4911 }
e1000_vlan_filter_on_off(struct e1000_adapter * adapter,bool filter_on)4912 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4913 bool filter_on)
4914 {
4915 struct e1000_hw *hw = &adapter->hw;
4916 u32 rctl;
4917
4918 if (!test_bit(__E1000_DOWN, &adapter->flags))
4919 e1000_irq_disable(adapter);
4920
4921 __e1000_vlan_mode(adapter, adapter->netdev->features);
4922 if (filter_on) {
4923 /* enable VLAN receive filtering */
4924 rctl = er32(RCTL);
4925 rctl &= ~E1000_RCTL_CFIEN;
4926 if (!(adapter->netdev->flags & IFF_PROMISC))
4927 rctl |= E1000_RCTL_VFE;
4928 ew32(RCTL, rctl);
4929 e1000_update_mng_vlan(adapter);
4930 } else {
4931 /* disable VLAN receive filtering */
4932 rctl = er32(RCTL);
4933 rctl &= ~E1000_RCTL_VFE;
4934 ew32(RCTL, rctl);
4935 }
4936
4937 if (!test_bit(__E1000_DOWN, &adapter->flags))
4938 e1000_irq_enable(adapter);
4939 }
4940
e1000_vlan_mode(struct net_device * netdev,netdev_features_t features)4941 static void e1000_vlan_mode(struct net_device *netdev,
4942 netdev_features_t features)
4943 {
4944 struct e1000_adapter *adapter = netdev_priv(netdev);
4945
4946 if (!test_bit(__E1000_DOWN, &adapter->flags))
4947 e1000_irq_disable(adapter);
4948
4949 __e1000_vlan_mode(adapter, features);
4950
4951 if (!test_bit(__E1000_DOWN, &adapter->flags))
4952 e1000_irq_enable(adapter);
4953 }
4954
e1000_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)4955 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4956 __be16 proto, u16 vid)
4957 {
4958 struct e1000_adapter *adapter = netdev_priv(netdev);
4959 struct e1000_hw *hw = &adapter->hw;
4960 u32 vfta, index;
4961
4962 if ((hw->mng_cookie.status &
4963 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4964 (vid == adapter->mng_vlan_id))
4965 return 0;
4966
4967 if (!e1000_vlan_used(adapter))
4968 e1000_vlan_filter_on_off(adapter, true);
4969
4970 /* add VID to filter table */
4971 index = (vid >> 5) & 0x7F;
4972 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4973 vfta |= (1 << (vid & 0x1F));
4974 e1000_write_vfta(hw, index, vfta);
4975
4976 set_bit(vid, adapter->active_vlans);
4977
4978 return 0;
4979 }
4980
e1000_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)4981 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4982 __be16 proto, u16 vid)
4983 {
4984 struct e1000_adapter *adapter = netdev_priv(netdev);
4985 struct e1000_hw *hw = &adapter->hw;
4986 u32 vfta, index;
4987
4988 if (!test_bit(__E1000_DOWN, &adapter->flags))
4989 e1000_irq_disable(adapter);
4990 if (!test_bit(__E1000_DOWN, &adapter->flags))
4991 e1000_irq_enable(adapter);
4992
4993 /* remove VID from filter table */
4994 index = (vid >> 5) & 0x7F;
4995 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4996 vfta &= ~(1 << (vid & 0x1F));
4997 e1000_write_vfta(hw, index, vfta);
4998
4999 clear_bit(vid, adapter->active_vlans);
5000
5001 if (!e1000_vlan_used(adapter))
5002 e1000_vlan_filter_on_off(adapter, false);
5003
5004 return 0;
5005 }
5006
e1000_restore_vlan(struct e1000_adapter * adapter)5007 static void e1000_restore_vlan(struct e1000_adapter *adapter)
5008 {
5009 u16 vid;
5010
5011 if (!e1000_vlan_used(adapter))
5012 return;
5013
5014 e1000_vlan_filter_on_off(adapter, true);
5015 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
5016 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
5017 }
5018
e1000_set_spd_dplx(struct e1000_adapter * adapter,u32 spd,u8 dplx)5019 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5020 {
5021 struct e1000_hw *hw = &adapter->hw;
5022
5023 hw->autoneg = 0;
5024
5025 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5026 * for the switch() below to work
5027 */
5028 if ((spd & 1) || (dplx & ~1))
5029 goto err_inval;
5030
5031 /* Fiber NICs only allow 1000 gbps Full duplex */
5032 if ((hw->media_type == e1000_media_type_fiber) &&
5033 spd != SPEED_1000 &&
5034 dplx != DUPLEX_FULL)
5035 goto err_inval;
5036
5037 switch (spd + dplx) {
5038 case SPEED_10 + DUPLEX_HALF:
5039 hw->forced_speed_duplex = e1000_10_half;
5040 break;
5041 case SPEED_10 + DUPLEX_FULL:
5042 hw->forced_speed_duplex = e1000_10_full;
5043 break;
5044 case SPEED_100 + DUPLEX_HALF:
5045 hw->forced_speed_duplex = e1000_100_half;
5046 break;
5047 case SPEED_100 + DUPLEX_FULL:
5048 hw->forced_speed_duplex = e1000_100_full;
5049 break;
5050 case SPEED_1000 + DUPLEX_FULL:
5051 hw->autoneg = 1;
5052 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5053 break;
5054 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5055 default:
5056 goto err_inval;
5057 }
5058
5059 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5060 hw->mdix = AUTO_ALL_MODES;
5061
5062 return 0;
5063
5064 err_inval:
5065 e_err(probe, "Unsupported Speed/Duplex configuration\n");
5066 return -EINVAL;
5067 }
5068
__e1000_shutdown(struct pci_dev * pdev,bool * enable_wake)5069 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5070 {
5071 struct net_device *netdev = pci_get_drvdata(pdev);
5072 struct e1000_adapter *adapter = netdev_priv(netdev);
5073 struct e1000_hw *hw = &adapter->hw;
5074 u32 ctrl, ctrl_ext, rctl, status;
5075 u32 wufc = adapter->wol;
5076 #ifdef CONFIG_PM
5077 int retval = 0;
5078 #endif
5079
5080 netif_device_detach(netdev);
5081
5082 if (netif_running(netdev)) {
5083 int count = E1000_CHECK_RESET_COUNT;
5084
5085 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5086 usleep_range(10000, 20000);
5087
5088 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5089 e1000_down(adapter);
5090 }
5091
5092 #ifdef CONFIG_PM
5093 retval = pci_save_state(pdev);
5094 if (retval)
5095 return retval;
5096 #endif
5097
5098 status = er32(STATUS);
5099 if (status & E1000_STATUS_LU)
5100 wufc &= ~E1000_WUFC_LNKC;
5101
5102 if (wufc) {
5103 e1000_setup_rctl(adapter);
5104 e1000_set_rx_mode(netdev);
5105
5106 rctl = er32(RCTL);
5107
5108 /* turn on all-multi mode if wake on multicast is enabled */
5109 if (wufc & E1000_WUFC_MC)
5110 rctl |= E1000_RCTL_MPE;
5111
5112 /* enable receives in the hardware */
5113 ew32(RCTL, rctl | E1000_RCTL_EN);
5114
5115 if (hw->mac_type >= e1000_82540) {
5116 ctrl = er32(CTRL);
5117 /* advertise wake from D3Cold */
5118 #define E1000_CTRL_ADVD3WUC 0x00100000
5119 /* phy power management enable */
5120 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5121 ctrl |= E1000_CTRL_ADVD3WUC |
5122 E1000_CTRL_EN_PHY_PWR_MGMT;
5123 ew32(CTRL, ctrl);
5124 }
5125
5126 if (hw->media_type == e1000_media_type_fiber ||
5127 hw->media_type == e1000_media_type_internal_serdes) {
5128 /* keep the laser running in D3 */
5129 ctrl_ext = er32(CTRL_EXT);
5130 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5131 ew32(CTRL_EXT, ctrl_ext);
5132 }
5133
5134 ew32(WUC, E1000_WUC_PME_EN);
5135 ew32(WUFC, wufc);
5136 } else {
5137 ew32(WUC, 0);
5138 ew32(WUFC, 0);
5139 }
5140
5141 e1000_release_manageability(adapter);
5142
5143 *enable_wake = !!wufc;
5144
5145 /* make sure adapter isn't asleep if manageability is enabled */
5146 if (adapter->en_mng_pt)
5147 *enable_wake = true;
5148
5149 if (netif_running(netdev))
5150 e1000_free_irq(adapter);
5151
5152 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5153 pci_disable_device(pdev);
5154
5155 return 0;
5156 }
5157
5158 #ifdef CONFIG_PM
e1000_suspend(struct pci_dev * pdev,pm_message_t state)5159 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5160 {
5161 int retval;
5162 bool wake;
5163
5164 retval = __e1000_shutdown(pdev, &wake);
5165 if (retval)
5166 return retval;
5167
5168 if (wake) {
5169 pci_prepare_to_sleep(pdev);
5170 } else {
5171 pci_wake_from_d3(pdev, false);
5172 pci_set_power_state(pdev, PCI_D3hot);
5173 }
5174
5175 return 0;
5176 }
5177
e1000_resume(struct pci_dev * pdev)5178 static int e1000_resume(struct pci_dev *pdev)
5179 {
5180 struct net_device *netdev = pci_get_drvdata(pdev);
5181 struct e1000_adapter *adapter = netdev_priv(netdev);
5182 struct e1000_hw *hw = &adapter->hw;
5183 u32 err;
5184
5185 pci_set_power_state(pdev, PCI_D0);
5186 pci_restore_state(pdev);
5187 pci_save_state(pdev);
5188
5189 if (adapter->need_ioport)
5190 err = pci_enable_device(pdev);
5191 else
5192 err = pci_enable_device_mem(pdev);
5193 if (err) {
5194 pr_err("Cannot enable PCI device from suspend\n");
5195 return err;
5196 }
5197
5198 /* flush memory to make sure state is correct */
5199 smp_mb__before_atomic();
5200 clear_bit(__E1000_DISABLED, &adapter->flags);
5201 pci_set_master(pdev);
5202
5203 pci_enable_wake(pdev, PCI_D3hot, 0);
5204 pci_enable_wake(pdev, PCI_D3cold, 0);
5205
5206 if (netif_running(netdev)) {
5207 err = e1000_request_irq(adapter);
5208 if (err)
5209 return err;
5210 }
5211
5212 e1000_power_up_phy(adapter);
5213 e1000_reset(adapter);
5214 ew32(WUS, ~0);
5215
5216 e1000_init_manageability(adapter);
5217
5218 if (netif_running(netdev))
5219 e1000_up(adapter);
5220
5221 netif_device_attach(netdev);
5222
5223 return 0;
5224 }
5225 #endif
5226
e1000_shutdown(struct pci_dev * pdev)5227 static void e1000_shutdown(struct pci_dev *pdev)
5228 {
5229 bool wake;
5230
5231 __e1000_shutdown(pdev, &wake);
5232
5233 if (system_state == SYSTEM_POWER_OFF) {
5234 pci_wake_from_d3(pdev, wake);
5235 pci_set_power_state(pdev, PCI_D3hot);
5236 }
5237 }
5238
5239 #ifdef CONFIG_NET_POLL_CONTROLLER
5240 /* Polling 'interrupt' - used by things like netconsole to send skbs
5241 * without having to re-enable interrupts. It's not called while
5242 * the interrupt routine is executing.
5243 */
e1000_netpoll(struct net_device * netdev)5244 static void e1000_netpoll(struct net_device *netdev)
5245 {
5246 struct e1000_adapter *adapter = netdev_priv(netdev);
5247
5248 if (disable_hardirq(adapter->pdev->irq))
5249 e1000_intr(adapter->pdev->irq, netdev);
5250 enable_irq(adapter->pdev->irq);
5251 }
5252 #endif
5253
5254 /**
5255 * e1000_io_error_detected - called when PCI error is detected
5256 * @pdev: Pointer to PCI device
5257 * @state: The current pci connection state
5258 *
5259 * This function is called after a PCI bus error affecting
5260 * this device has been detected.
5261 */
e1000_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)5262 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5263 pci_channel_state_t state)
5264 {
5265 struct net_device *netdev = pci_get_drvdata(pdev);
5266 struct e1000_adapter *adapter = netdev_priv(netdev);
5267
5268 netif_device_detach(netdev);
5269
5270 if (state == pci_channel_io_perm_failure)
5271 return PCI_ERS_RESULT_DISCONNECT;
5272
5273 if (netif_running(netdev))
5274 e1000_down(adapter);
5275
5276 if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5277 pci_disable_device(pdev);
5278
5279 /* Request a slot slot reset. */
5280 return PCI_ERS_RESULT_NEED_RESET;
5281 }
5282
5283 /**
5284 * e1000_io_slot_reset - called after the pci bus has been reset.
5285 * @pdev: Pointer to PCI device
5286 *
5287 * Restart the card from scratch, as if from a cold-boot. Implementation
5288 * resembles the first-half of the e1000_resume routine.
5289 */
e1000_io_slot_reset(struct pci_dev * pdev)5290 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5291 {
5292 struct net_device *netdev = pci_get_drvdata(pdev);
5293 struct e1000_adapter *adapter = netdev_priv(netdev);
5294 struct e1000_hw *hw = &adapter->hw;
5295 int err;
5296
5297 if (adapter->need_ioport)
5298 err = pci_enable_device(pdev);
5299 else
5300 err = pci_enable_device_mem(pdev);
5301 if (err) {
5302 pr_err("Cannot re-enable PCI device after reset.\n");
5303 return PCI_ERS_RESULT_DISCONNECT;
5304 }
5305
5306 /* flush memory to make sure state is correct */
5307 smp_mb__before_atomic();
5308 clear_bit(__E1000_DISABLED, &adapter->flags);
5309 pci_set_master(pdev);
5310
5311 pci_enable_wake(pdev, PCI_D3hot, 0);
5312 pci_enable_wake(pdev, PCI_D3cold, 0);
5313
5314 e1000_reset(adapter);
5315 ew32(WUS, ~0);
5316
5317 return PCI_ERS_RESULT_RECOVERED;
5318 }
5319
5320 /**
5321 * e1000_io_resume - called when traffic can start flowing again.
5322 * @pdev: Pointer to PCI device
5323 *
5324 * This callback is called when the error recovery driver tells us that
5325 * its OK to resume normal operation. Implementation resembles the
5326 * second-half of the e1000_resume routine.
5327 */
e1000_io_resume(struct pci_dev * pdev)5328 static void e1000_io_resume(struct pci_dev *pdev)
5329 {
5330 struct net_device *netdev = pci_get_drvdata(pdev);
5331 struct e1000_adapter *adapter = netdev_priv(netdev);
5332
5333 e1000_init_manageability(adapter);
5334
5335 if (netif_running(netdev)) {
5336 if (e1000_up(adapter)) {
5337 pr_info("can't bring device back up after reset\n");
5338 return;
5339 }
5340 }
5341
5342 netif_device_attach(netdev);
5343 }
5344
5345 /* e1000_main.c */
5346