1 /*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 /*
30 * 82562G 10/100 Network Connection
31 * 82562G-2 10/100 Network Connection
32 * 82562GT 10/100 Network Connection
33 * 82562GT-2 10/100 Network Connection
34 * 82562V 10/100 Network Connection
35 * 82562V-2 10/100 Network Connection
36 * 82566DC-2 Gigabit Network Connection
37 * 82566DC Gigabit Network Connection
38 * 82566DM-2 Gigabit Network Connection
39 * 82566DM Gigabit Network Connection
40 * 82566MC Gigabit Network Connection
41 * 82566MM Gigabit Network Connection
42 * 82567LM Gigabit Network Connection
43 * 82567LF Gigabit Network Connection
44 * 82567V Gigabit Network Connection
45 * 82567LM-2 Gigabit Network Connection
46 * 82567LF-2 Gigabit Network Connection
47 * 82567V-2 Gigabit Network Connection
48 * 82567LF-3 Gigabit Network Connection
49 * 82567LM-3 Gigabit Network Connection
50 * 82567LM-4 Gigabit Network Connection
51 * 82577LM Gigabit Network Connection
52 * 82577LC Gigabit Network Connection
53 * 82578DM Gigabit Network Connection
54 * 82578DC Gigabit Network Connection
55 * 82579LM Gigabit Network Connection
56 * 82579V Gigabit Network Connection
57 */
58
59 #include "e1000.h"
60
61 #define ICH_FLASH_GFPREG 0x0000
62 #define ICH_FLASH_HSFSTS 0x0004
63 #define ICH_FLASH_HSFCTL 0x0006
64 #define ICH_FLASH_FADDR 0x0008
65 #define ICH_FLASH_FDATA0 0x0010
66 #define ICH_FLASH_PR0 0x0074
67
68 #define ICH_FLASH_READ_COMMAND_TIMEOUT 500
69 #define ICH_FLASH_WRITE_COMMAND_TIMEOUT 500
70 #define ICH_FLASH_ERASE_COMMAND_TIMEOUT 3000000
71 #define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
72 #define ICH_FLASH_CYCLE_REPEAT_COUNT 10
73
74 #define ICH_CYCLE_READ 0
75 #define ICH_CYCLE_WRITE 2
76 #define ICH_CYCLE_ERASE 3
77
78 #define FLASH_GFPREG_BASE_MASK 0x1FFF
79 #define FLASH_SECTOR_ADDR_SHIFT 12
80
81 #define ICH_FLASH_SEG_SIZE_256 256
82 #define ICH_FLASH_SEG_SIZE_4K 4096
83 #define ICH_FLASH_SEG_SIZE_8K 8192
84 #define ICH_FLASH_SEG_SIZE_64K 65536
85
86
87 #define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
88 /* FW established a valid mode */
89 #define E1000_ICH_FWSM_FW_VALID 0x00008000
90
91 #define E1000_ICH_MNG_IAMT_MODE 0x2
92
93 #define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
94 (ID_LED_DEF1_OFF2 << 8) | \
95 (ID_LED_DEF1_ON2 << 4) | \
96 (ID_LED_DEF1_DEF2))
97
98 #define E1000_ICH_NVM_SIG_WORD 0x13
99 #define E1000_ICH_NVM_SIG_MASK 0xC000
100 #define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
101 #define E1000_ICH_NVM_SIG_VALUE 0x80
102
103 #define E1000_ICH8_LAN_INIT_TIMEOUT 1500
104
105 #define E1000_FEXTNVM_SW_CONFIG 1
106 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107
108 #define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109 #define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110 #define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
111
112 #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
113
114 #define E1000_ICH_RAR_ENTRIES 7
115
116 #define PHY_PAGE_SHIFT 5
117 #define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
118 ((reg) & MAX_PHY_REG_ADDRESS))
119 #define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */
120 #define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */
121
122 #define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002
123 #define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
124 #define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200
125
126 #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */
127
128 #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
129
130 /* SMBus Address Phy Register */
131 #define HV_SMB_ADDR PHY_REG(768, 26)
132 #define HV_SMB_ADDR_MASK 0x007F
133 #define HV_SMB_ADDR_PEC_EN 0x0200
134 #define HV_SMB_ADDR_VALID 0x0080
135
136 /* PHY Power Management Control */
137 #define HV_PM_CTRL PHY_REG(770, 17)
138
139 /* PHY Low Power Idle Control */
140 #define I82579_LPI_CTRL PHY_REG(772, 20)
141 #define I82579_LPI_CTRL_ENABLE_MASK 0x6000
142 #define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80
143
144 /* EMI Registers */
145 #define I82579_EMI_ADDR 0x10
146 #define I82579_EMI_DATA 0x11
147 #define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
148 #define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
149 #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
150
151 /* Strapping Option Register - RO */
152 #define E1000_STRAP 0x0000C
153 #define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
154 #define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
155
156 /* OEM Bits Phy Register */
157 #define HV_OEM_BITS PHY_REG(768, 25)
158 #define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */
159 #define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */
160 #define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
161
162 #define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
163 #define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */
164
165 /* KMRN Mode Control */
166 #define HV_KMRN_MODE_CTRL PHY_REG(769, 16)
167 #define HV_KMRN_MDIO_SLOW 0x0400
168
169 /* KMRN FIFO Control and Status */
170 #define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16)
171 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000
172 #define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12
173
174 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
175 /* Offset 04h HSFSTS */
176 union ich8_hws_flash_status {
177 struct ich8_hsfsts {
178 u16 flcdone :1; /* bit 0 Flash Cycle Done */
179 u16 flcerr :1; /* bit 1 Flash Cycle Error */
180 u16 dael :1; /* bit 2 Direct Access error Log */
181 u16 berasesz :2; /* bit 4:3 Sector Erase Size */
182 u16 flcinprog :1; /* bit 5 flash cycle in Progress */
183 u16 reserved1 :2; /* bit 13:6 Reserved */
184 u16 reserved2 :6; /* bit 13:6 Reserved */
185 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
186 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */
187 } hsf_status;
188 u16 regval;
189 };
190
191 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
192 /* Offset 06h FLCTL */
193 union ich8_hws_flash_ctrl {
194 struct ich8_hsflctl {
195 u16 flcgo :1; /* 0 Flash Cycle Go */
196 u16 flcycle :2; /* 2:1 Flash Cycle */
197 u16 reserved :5; /* 7:3 Reserved */
198 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
199 u16 flockdn :6; /* 15:10 Reserved */
200 } hsf_ctrl;
201 u16 regval;
202 };
203
204 /* ICH Flash Region Access Permissions */
205 union ich8_hws_flash_regacc {
206 struct ich8_flracc {
207 u32 grra :8; /* 0:7 GbE region Read Access */
208 u32 grwa :8; /* 8:15 GbE region Write Access */
209 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
210 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
211 } hsf_flregacc;
212 u16 regval;
213 };
214
215 /* ICH Flash Protected Region */
216 union ich8_flash_protected_range {
217 struct ich8_pr {
218 u32 base:13; /* 0:12 Protected Range Base */
219 u32 reserved1:2; /* 13:14 Reserved */
220 u32 rpe:1; /* 15 Read Protection Enable */
221 u32 limit:13; /* 16:28 Protected Range Limit */
222 u32 reserved2:2; /* 29:30 Reserved */
223 u32 wpe:1; /* 31 Write Protection Enable */
224 } range;
225 u32 regval;
226 };
227
228 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
229 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
230 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
231 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
232 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
233 u32 offset, u8 byte);
234 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
235 u8 *data);
236 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
237 u16 *data);
238 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
239 u8 size, u16 *data);
240 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
241 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
242 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
243 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
244 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
245 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
246 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
247 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
248 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
249 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
250 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
251 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
252 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
253 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
254 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
255 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
256 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
257 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
258 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
259 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
260
__er16flash(struct e1000_hw * hw,unsigned long reg)261 static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
262 {
263 return readw(hw->flash_address + reg);
264 }
265
__er32flash(struct e1000_hw * hw,unsigned long reg)266 static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
267 {
268 return readl(hw->flash_address + reg);
269 }
270
__ew16flash(struct e1000_hw * hw,unsigned long reg,u16 val)271 static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
272 {
273 writew(val, hw->flash_address + reg);
274 }
275
__ew32flash(struct e1000_hw * hw,unsigned long reg,u32 val)276 static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
277 {
278 writel(val, hw->flash_address + reg);
279 }
280
281 #define er16flash(reg) __er16flash(hw, (reg))
282 #define er32flash(reg) __er32flash(hw, (reg))
283 #define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
284 #define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
285
e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw * hw)286 static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
287 {
288 u32 ctrl;
289
290 ctrl = er32(CTRL);
291 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
292 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
293 ew32(CTRL, ctrl);
294 e1e_flush();
295 udelay(10);
296 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
297 ew32(CTRL, ctrl);
298 }
299
300 /**
301 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
302 * @hw: pointer to the HW structure
303 *
304 * Initialize family-specific PHY parameters and function pointers.
305 **/
e1000_init_phy_params_pchlan(struct e1000_hw * hw)306 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
307 {
308 struct e1000_phy_info *phy = &hw->phy;
309 s32 ret_val = 0;
310
311 phy->addr = 1;
312 phy->reset_delay_us = 100;
313
314 phy->ops.set_page = e1000_set_page_igp;
315 phy->ops.read_reg = e1000_read_phy_reg_hv;
316 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
317 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
318 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
319 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
320 phy->ops.write_reg = e1000_write_phy_reg_hv;
321 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
322 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
323 phy->ops.power_up = e1000_power_up_phy_copper;
324 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
325 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
326
327 if (!hw->phy.ops.check_reset_block(hw)) {
328 u32 fwsm = er32(FWSM);
329
330 /*
331 * The MAC-PHY interconnect may still be in SMBus mode after
332 * Sx->S0. If resetting the PHY is not blocked, toggle the
333 * LANPHYPC Value bit to force the interconnect to PCIe mode.
334 */
335 e1000_toggle_lanphypc_value_ich8lan(hw);
336 msleep(50);
337
338 /*
339 * Gate automatic PHY configuration by hardware on
340 * non-managed 82579
341 */
342 if ((hw->mac.type == e1000_pch2lan) &&
343 !(fwsm & E1000_ICH_FWSM_FW_VALID))
344 e1000_gate_hw_phy_config_ich8lan(hw, true);
345
346 /*
347 * Reset the PHY before any access to it. Doing so, ensures
348 * that the PHY is in a known good state before we read/write
349 * PHY registers. The generic reset is sufficient here,
350 * because we haven't determined the PHY type yet.
351 */
352 ret_val = e1000e_phy_hw_reset_generic(hw);
353 if (ret_val)
354 return ret_val;
355
356 /* Ungate automatic PHY configuration on non-managed 82579 */
357 if ((hw->mac.type == e1000_pch2lan) &&
358 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
359 usleep_range(10000, 20000);
360 e1000_gate_hw_phy_config_ich8lan(hw, false);
361 }
362 }
363
364 phy->id = e1000_phy_unknown;
365 switch (hw->mac.type) {
366 default:
367 ret_val = e1000e_get_phy_id(hw);
368 if (ret_val)
369 return ret_val;
370 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
371 break;
372 /* fall-through */
373 case e1000_pch2lan:
374 /*
375 * In case the PHY needs to be in mdio slow mode,
376 * set slow mode and try to get the PHY id again.
377 */
378 ret_val = e1000_set_mdio_slow_mode_hv(hw);
379 if (ret_val)
380 return ret_val;
381 ret_val = e1000e_get_phy_id(hw);
382 if (ret_val)
383 return ret_val;
384 break;
385 }
386 phy->type = e1000e_get_phy_type_from_id(phy->id);
387
388 switch (phy->type) {
389 case e1000_phy_82577:
390 case e1000_phy_82579:
391 phy->ops.check_polarity = e1000_check_polarity_82577;
392 phy->ops.force_speed_duplex =
393 e1000_phy_force_speed_duplex_82577;
394 phy->ops.get_cable_length = e1000_get_cable_length_82577;
395 phy->ops.get_info = e1000_get_phy_info_82577;
396 phy->ops.commit = e1000e_phy_sw_reset;
397 break;
398 case e1000_phy_82578:
399 phy->ops.check_polarity = e1000_check_polarity_m88;
400 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
401 phy->ops.get_cable_length = e1000e_get_cable_length_m88;
402 phy->ops.get_info = e1000e_get_phy_info_m88;
403 break;
404 default:
405 ret_val = -E1000_ERR_PHY;
406 break;
407 }
408
409 return ret_val;
410 }
411
412 /**
413 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
414 * @hw: pointer to the HW structure
415 *
416 * Initialize family-specific PHY parameters and function pointers.
417 **/
e1000_init_phy_params_ich8lan(struct e1000_hw * hw)418 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
419 {
420 struct e1000_phy_info *phy = &hw->phy;
421 s32 ret_val;
422 u16 i = 0;
423
424 phy->addr = 1;
425 phy->reset_delay_us = 100;
426
427 phy->ops.power_up = e1000_power_up_phy_copper;
428 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
429
430 /*
431 * We may need to do this twice - once for IGP and if that fails,
432 * we'll set BM func pointers and try again
433 */
434 ret_val = e1000e_determine_phy_address(hw);
435 if (ret_val) {
436 phy->ops.write_reg = e1000e_write_phy_reg_bm;
437 phy->ops.read_reg = e1000e_read_phy_reg_bm;
438 ret_val = e1000e_determine_phy_address(hw);
439 if (ret_val) {
440 e_dbg("Cannot determine PHY addr. Erroring out\n");
441 return ret_val;
442 }
443 }
444
445 phy->id = 0;
446 while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
447 (i++ < 100)) {
448 usleep_range(1000, 2000);
449 ret_val = e1000e_get_phy_id(hw);
450 if (ret_val)
451 return ret_val;
452 }
453
454 /* Verify phy id */
455 switch (phy->id) {
456 case IGP03E1000_E_PHY_ID:
457 phy->type = e1000_phy_igp_3;
458 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
459 phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
460 phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
461 phy->ops.get_info = e1000e_get_phy_info_igp;
462 phy->ops.check_polarity = e1000_check_polarity_igp;
463 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
464 break;
465 case IFE_E_PHY_ID:
466 case IFE_PLUS_E_PHY_ID:
467 case IFE_C_E_PHY_ID:
468 phy->type = e1000_phy_ife;
469 phy->autoneg_mask = E1000_ALL_NOT_GIG;
470 phy->ops.get_info = e1000_get_phy_info_ife;
471 phy->ops.check_polarity = e1000_check_polarity_ife;
472 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
473 break;
474 case BME1000_E_PHY_ID:
475 phy->type = e1000_phy_bm;
476 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
477 phy->ops.read_reg = e1000e_read_phy_reg_bm;
478 phy->ops.write_reg = e1000e_write_phy_reg_bm;
479 phy->ops.commit = e1000e_phy_sw_reset;
480 phy->ops.get_info = e1000e_get_phy_info_m88;
481 phy->ops.check_polarity = e1000_check_polarity_m88;
482 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
483 break;
484 default:
485 return -E1000_ERR_PHY;
486 break;
487 }
488
489 return 0;
490 }
491
492 /**
493 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
494 * @hw: pointer to the HW structure
495 *
496 * Initialize family-specific NVM parameters and function
497 * pointers.
498 **/
e1000_init_nvm_params_ich8lan(struct e1000_hw * hw)499 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
500 {
501 struct e1000_nvm_info *nvm = &hw->nvm;
502 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
503 u32 gfpreg, sector_base_addr, sector_end_addr;
504 u16 i;
505
506 /* Can't read flash registers if the register set isn't mapped. */
507 if (!hw->flash_address) {
508 e_dbg("ERROR: Flash registers not mapped\n");
509 return -E1000_ERR_CONFIG;
510 }
511
512 nvm->type = e1000_nvm_flash_sw;
513
514 gfpreg = er32flash(ICH_FLASH_GFPREG);
515
516 /*
517 * sector_X_addr is a "sector"-aligned address (4096 bytes)
518 * Add 1 to sector_end_addr since this sector is included in
519 * the overall size.
520 */
521 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
522 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
523
524 /* flash_base_addr is byte-aligned */
525 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
526
527 /*
528 * find total size of the NVM, then cut in half since the total
529 * size represents two separate NVM banks.
530 */
531 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
532 << FLASH_SECTOR_ADDR_SHIFT;
533 nvm->flash_bank_size /= 2;
534 /* Adjust to word count */
535 nvm->flash_bank_size /= sizeof(u16);
536
537 nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
538
539 /* Clear shadow ram */
540 for (i = 0; i < nvm->word_size; i++) {
541 dev_spec->shadow_ram[i].modified = false;
542 dev_spec->shadow_ram[i].value = 0xFFFF;
543 }
544
545 return 0;
546 }
547
548 /**
549 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
550 * @hw: pointer to the HW structure
551 *
552 * Initialize family-specific MAC parameters and function
553 * pointers.
554 **/
e1000_init_mac_params_ich8lan(struct e1000_hw * hw)555 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
556 {
557 struct e1000_mac_info *mac = &hw->mac;
558
559 /* Set media type function pointer */
560 hw->phy.media_type = e1000_media_type_copper;
561
562 /* Set mta register count */
563 mac->mta_reg_count = 32;
564 /* Set rar entry count */
565 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
566 if (mac->type == e1000_ich8lan)
567 mac->rar_entry_count--;
568 /* FWSM register */
569 mac->has_fwsm = true;
570 /* ARC subsystem not supported */
571 mac->arc_subsystem_valid = false;
572 /* Adaptive IFS supported */
573 mac->adaptive_ifs = true;
574
575 /* LED operations */
576 switch (mac->type) {
577 case e1000_ich8lan:
578 case e1000_ich9lan:
579 case e1000_ich10lan:
580 /* check management mode */
581 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
582 /* ID LED init */
583 mac->ops.id_led_init = e1000e_id_led_init_generic;
584 /* blink LED */
585 mac->ops.blink_led = e1000e_blink_led_generic;
586 /* setup LED */
587 mac->ops.setup_led = e1000e_setup_led_generic;
588 /* cleanup LED */
589 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
590 /* turn on/off LED */
591 mac->ops.led_on = e1000_led_on_ich8lan;
592 mac->ops.led_off = e1000_led_off_ich8lan;
593 break;
594 case e1000_pchlan:
595 case e1000_pch2lan:
596 /* check management mode */
597 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
598 /* ID LED init */
599 mac->ops.id_led_init = e1000_id_led_init_pchlan;
600 /* setup LED */
601 mac->ops.setup_led = e1000_setup_led_pchlan;
602 /* cleanup LED */
603 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
604 /* turn on/off LED */
605 mac->ops.led_on = e1000_led_on_pchlan;
606 mac->ops.led_off = e1000_led_off_pchlan;
607 break;
608 default:
609 break;
610 }
611
612 /* Enable PCS Lock-loss workaround for ICH8 */
613 if (mac->type == e1000_ich8lan)
614 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
615
616 /* Gate automatic PHY configuration by hardware on managed 82579 */
617 if ((mac->type == e1000_pch2lan) &&
618 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
619 e1000_gate_hw_phy_config_ich8lan(hw, true);
620
621 return 0;
622 }
623
624 /**
625 * e1000_set_eee_pchlan - Enable/disable EEE support
626 * @hw: pointer to the HW structure
627 *
628 * Enable/disable EEE based on setting in dev_spec structure. The bits in
629 * the LPI Control register will remain set only if/when link is up.
630 **/
e1000_set_eee_pchlan(struct e1000_hw * hw)631 static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
632 {
633 s32 ret_val = 0;
634 u16 phy_reg;
635
636 if (hw->phy.type != e1000_phy_82579)
637 return 0;
638
639 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
640 if (ret_val)
641 return ret_val;
642
643 if (hw->dev_spec.ich8lan.eee_disable)
644 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
645 else
646 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
647
648 return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
649 }
650
651 /**
652 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
653 * @hw: pointer to the HW structure
654 *
655 * Checks to see of the link status of the hardware has changed. If a
656 * change in link status has been detected, then we read the PHY registers
657 * to get the current speed/duplex if link exists.
658 **/
e1000_check_for_copper_link_ich8lan(struct e1000_hw * hw)659 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
660 {
661 struct e1000_mac_info *mac = &hw->mac;
662 s32 ret_val;
663 bool link;
664 u16 phy_reg;
665
666 /*
667 * We only want to go out to the PHY registers to see if Auto-Neg
668 * has completed and/or if our link status has changed. The
669 * get_link_status flag is set upon receiving a Link Status
670 * Change or Rx Sequence Error interrupt.
671 */
672 if (!mac->get_link_status)
673 return 0;
674
675 /*
676 * First we want to see if the MII Status Register reports
677 * link. If so, then we want to get the current speed/duplex
678 * of the PHY.
679 */
680 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
681 if (ret_val)
682 return ret_val;
683
684 if (hw->mac.type == e1000_pchlan) {
685 ret_val = e1000_k1_gig_workaround_hv(hw, link);
686 if (ret_val)
687 return ret_val;
688 }
689
690 if (!link)
691 return 0; /* No link detected */
692
693 mac->get_link_status = false;
694
695 switch (hw->mac.type) {
696 case e1000_pch2lan:
697 ret_val = e1000_k1_workaround_lv(hw);
698 if (ret_val)
699 return ret_val;
700 /* fall-thru */
701 case e1000_pchlan:
702 if (hw->phy.type == e1000_phy_82578) {
703 ret_val = e1000_link_stall_workaround_hv(hw);
704 if (ret_val)
705 return ret_val;
706 }
707
708 /*
709 * Workaround for PCHx parts in half-duplex:
710 * Set the number of preambles removed from the packet
711 * when it is passed from the PHY to the MAC to prevent
712 * the MAC from misinterpreting the packet type.
713 */
714 e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
715 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
716
717 if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
718 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
719
720 e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
721 break;
722 default:
723 break;
724 }
725
726 /*
727 * Check if there was DownShift, must be checked
728 * immediately after link-up
729 */
730 e1000e_check_downshift(hw);
731
732 /* Enable/Disable EEE after link up */
733 ret_val = e1000_set_eee_pchlan(hw);
734 if (ret_val)
735 return ret_val;
736
737 /*
738 * If we are forcing speed/duplex, then we simply return since
739 * we have already determined whether we have link or not.
740 */
741 if (!mac->autoneg)
742 return -E1000_ERR_CONFIG;
743
744 /*
745 * Auto-Neg is enabled. Auto Speed Detection takes care
746 * of MAC speed/duplex configuration. So we only need to
747 * configure Collision Distance in the MAC.
748 */
749 mac->ops.config_collision_dist(hw);
750
751 /*
752 * Configure Flow Control now that Auto-Neg has completed.
753 * First, we need to restore the desired flow control
754 * settings because we may have had to re-autoneg with a
755 * different link partner.
756 */
757 ret_val = e1000e_config_fc_after_link_up(hw);
758 if (ret_val)
759 e_dbg("Error configuring flow control\n");
760
761 return ret_val;
762 }
763
e1000_get_variants_ich8lan(struct e1000_adapter * adapter)764 static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
765 {
766 struct e1000_hw *hw = &adapter->hw;
767 s32 rc;
768
769 rc = e1000_init_mac_params_ich8lan(hw);
770 if (rc)
771 return rc;
772
773 rc = e1000_init_nvm_params_ich8lan(hw);
774 if (rc)
775 return rc;
776
777 switch (hw->mac.type) {
778 case e1000_ich8lan:
779 case e1000_ich9lan:
780 case e1000_ich10lan:
781 rc = e1000_init_phy_params_ich8lan(hw);
782 break;
783 case e1000_pchlan:
784 case e1000_pch2lan:
785 rc = e1000_init_phy_params_pchlan(hw);
786 break;
787 default:
788 break;
789 }
790 if (rc)
791 return rc;
792
793 /*
794 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
795 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
796 */
797 if ((adapter->hw.phy.type == e1000_phy_ife) ||
798 ((adapter->hw.mac.type >= e1000_pch2lan) &&
799 (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
800 adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
801 adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
802
803 hw->mac.ops.blink_led = NULL;
804 }
805
806 if ((adapter->hw.mac.type == e1000_ich8lan) &&
807 (adapter->hw.phy.type != e1000_phy_ife))
808 adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
809
810 /* Enable workaround for 82579 w/ ME enabled */
811 if ((adapter->hw.mac.type == e1000_pch2lan) &&
812 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
813 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
814
815 /* Disable EEE by default until IEEE802.3az spec is finalized */
816 if (adapter->flags2 & FLAG2_HAS_EEE)
817 adapter->hw.dev_spec.ich8lan.eee_disable = true;
818
819 return 0;
820 }
821
822 static DEFINE_MUTEX(nvm_mutex);
823
824 /**
825 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
826 * @hw: pointer to the HW structure
827 *
828 * Acquires the mutex for performing NVM operations.
829 **/
e1000_acquire_nvm_ich8lan(struct e1000_hw * hw)830 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
831 {
832 mutex_lock(&nvm_mutex);
833
834 return 0;
835 }
836
837 /**
838 * e1000_release_nvm_ich8lan - Release NVM mutex
839 * @hw: pointer to the HW structure
840 *
841 * Releases the mutex used while performing NVM operations.
842 **/
e1000_release_nvm_ich8lan(struct e1000_hw * hw)843 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
844 {
845 mutex_unlock(&nvm_mutex);
846 }
847
848 /**
849 * e1000_acquire_swflag_ich8lan - Acquire software control flag
850 * @hw: pointer to the HW structure
851 *
852 * Acquires the software control flag for performing PHY and select
853 * MAC CSR accesses.
854 **/
e1000_acquire_swflag_ich8lan(struct e1000_hw * hw)855 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
856 {
857 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
858 s32 ret_val = 0;
859
860 if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
861 &hw->adapter->state)) {
862 e_dbg("contention for Phy access\n");
863 return -E1000_ERR_PHY;
864 }
865
866 while (timeout) {
867 extcnf_ctrl = er32(EXTCNF_CTRL);
868 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
869 break;
870
871 mdelay(1);
872 timeout--;
873 }
874
875 if (!timeout) {
876 e_dbg("SW has already locked the resource.\n");
877 ret_val = -E1000_ERR_CONFIG;
878 goto out;
879 }
880
881 timeout = SW_FLAG_TIMEOUT;
882
883 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
884 ew32(EXTCNF_CTRL, extcnf_ctrl);
885
886 while (timeout) {
887 extcnf_ctrl = er32(EXTCNF_CTRL);
888 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
889 break;
890
891 mdelay(1);
892 timeout--;
893 }
894
895 if (!timeout) {
896 e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
897 er32(FWSM), extcnf_ctrl);
898 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
899 ew32(EXTCNF_CTRL, extcnf_ctrl);
900 ret_val = -E1000_ERR_CONFIG;
901 goto out;
902 }
903
904 out:
905 if (ret_val)
906 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
907
908 return ret_val;
909 }
910
911 /**
912 * e1000_release_swflag_ich8lan - Release software control flag
913 * @hw: pointer to the HW structure
914 *
915 * Releases the software control flag for performing PHY and select
916 * MAC CSR accesses.
917 **/
e1000_release_swflag_ich8lan(struct e1000_hw * hw)918 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
919 {
920 u32 extcnf_ctrl;
921
922 extcnf_ctrl = er32(EXTCNF_CTRL);
923
924 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
925 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
926 ew32(EXTCNF_CTRL, extcnf_ctrl);
927 } else {
928 e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
929 }
930
931 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
932 }
933
934 /**
935 * e1000_check_mng_mode_ich8lan - Checks management mode
936 * @hw: pointer to the HW structure
937 *
938 * This checks if the adapter has any manageability enabled.
939 * This is a function pointer entry point only called by read/write
940 * routines for the PHY and NVM parts.
941 **/
e1000_check_mng_mode_ich8lan(struct e1000_hw * hw)942 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
943 {
944 u32 fwsm;
945
946 fwsm = er32(FWSM);
947 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
948 ((fwsm & E1000_FWSM_MODE_MASK) ==
949 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
950 }
951
952 /**
953 * e1000_check_mng_mode_pchlan - Checks management mode
954 * @hw: pointer to the HW structure
955 *
956 * This checks if the adapter has iAMT enabled.
957 * This is a function pointer entry point only called by read/write
958 * routines for the PHY and NVM parts.
959 **/
e1000_check_mng_mode_pchlan(struct e1000_hw * hw)960 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
961 {
962 u32 fwsm;
963
964 fwsm = er32(FWSM);
965 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
966 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
967 }
968
969 /**
970 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
971 * @hw: pointer to the HW structure
972 *
973 * Checks if firmware is blocking the reset of the PHY.
974 * This is a function pointer entry point only called by
975 * reset routines.
976 **/
e1000_check_reset_block_ich8lan(struct e1000_hw * hw)977 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
978 {
979 u32 fwsm;
980
981 fwsm = er32(FWSM);
982
983 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
984 }
985
986 /**
987 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
988 * @hw: pointer to the HW structure
989 *
990 * Assumes semaphore already acquired.
991 *
992 **/
e1000_write_smbus_addr(struct e1000_hw * hw)993 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
994 {
995 u16 phy_data;
996 u32 strap = er32(STRAP);
997 s32 ret_val = 0;
998
999 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1000
1001 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1002 if (ret_val)
1003 return ret_val;
1004
1005 phy_data &= ~HV_SMB_ADDR_MASK;
1006 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1007 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1008
1009 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1010 }
1011
1012 /**
1013 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1014 * @hw: pointer to the HW structure
1015 *
1016 * SW should configure the LCD from the NVM extended configuration region
1017 * as a workaround for certain parts.
1018 **/
e1000_sw_lcd_config_ich8lan(struct e1000_hw * hw)1019 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1020 {
1021 struct e1000_phy_info *phy = &hw->phy;
1022 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1023 s32 ret_val = 0;
1024 u16 word_addr, reg_data, reg_addr, phy_page = 0;
1025
1026 /*
1027 * Initialize the PHY from the NVM on ICH platforms. This
1028 * is needed due to an issue where the NVM configuration is
1029 * not properly autoloaded after power transitions.
1030 * Therefore, after each PHY reset, we will load the
1031 * configuration data out of the NVM manually.
1032 */
1033 switch (hw->mac.type) {
1034 case e1000_ich8lan:
1035 if (phy->type != e1000_phy_igp_3)
1036 return ret_val;
1037
1038 if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
1039 (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
1040 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1041 break;
1042 }
1043 /* Fall-thru */
1044 case e1000_pchlan:
1045 case e1000_pch2lan:
1046 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1047 break;
1048 default:
1049 return ret_val;
1050 }
1051
1052 ret_val = hw->phy.ops.acquire(hw);
1053 if (ret_val)
1054 return ret_val;
1055
1056 data = er32(FEXTNVM);
1057 if (!(data & sw_cfg_mask))
1058 goto release;
1059
1060 /*
1061 * Make sure HW does not configure LCD from PHY
1062 * extended configuration before SW configuration
1063 */
1064 data = er32(EXTCNF_CTRL);
1065 if (!(hw->mac.type == e1000_pch2lan)) {
1066 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
1067 goto release;
1068 }
1069
1070 cnf_size = er32(EXTCNF_SIZE);
1071 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
1072 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
1073 if (!cnf_size)
1074 goto release;
1075
1076 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1077 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1078
1079 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
1080 (hw->mac.type == e1000_pchlan)) ||
1081 (hw->mac.type == e1000_pch2lan)) {
1082 /*
1083 * HW configures the SMBus address and LEDs when the
1084 * OEM and LCD Write Enable bits are set in the NVM.
1085 * When both NVM bits are cleared, SW will configure
1086 * them instead.
1087 */
1088 ret_val = e1000_write_smbus_addr(hw);
1089 if (ret_val)
1090 goto release;
1091
1092 data = er32(LEDCTL);
1093 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
1094 (u16)data);
1095 if (ret_val)
1096 goto release;
1097 }
1098
1099 /* Configure LCD from extended configuration region. */
1100
1101 /* cnf_base_addr is in DWORD */
1102 word_addr = (u16)(cnf_base_addr << 1);
1103
1104 for (i = 0; i < cnf_size; i++) {
1105 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
1106 ®_data);
1107 if (ret_val)
1108 goto release;
1109
1110 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
1111 1, ®_addr);
1112 if (ret_val)
1113 goto release;
1114
1115 /* Save off the PHY page for future writes. */
1116 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
1117 phy_page = reg_data;
1118 continue;
1119 }
1120
1121 reg_addr &= PHY_REG_MASK;
1122 reg_addr |= phy_page;
1123
1124 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
1125 reg_data);
1126 if (ret_val)
1127 goto release;
1128 }
1129
1130 release:
1131 hw->phy.ops.release(hw);
1132 return ret_val;
1133 }
1134
1135 /**
1136 * e1000_k1_gig_workaround_hv - K1 Si workaround
1137 * @hw: pointer to the HW structure
1138 * @link: link up bool flag
1139 *
1140 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
1141 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
1142 * If link is down, the function will restore the default K1 setting located
1143 * in the NVM.
1144 **/
e1000_k1_gig_workaround_hv(struct e1000_hw * hw,bool link)1145 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1146 {
1147 s32 ret_val = 0;
1148 u16 status_reg = 0;
1149 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
1150
1151 if (hw->mac.type != e1000_pchlan)
1152 return 0;
1153
1154 /* Wrap the whole flow with the sw flag */
1155 ret_val = hw->phy.ops.acquire(hw);
1156 if (ret_val)
1157 return ret_val;
1158
1159 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1160 if (link) {
1161 if (hw->phy.type == e1000_phy_82578) {
1162 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
1163 &status_reg);
1164 if (ret_val)
1165 goto release;
1166
1167 status_reg &= BM_CS_STATUS_LINK_UP |
1168 BM_CS_STATUS_RESOLVED |
1169 BM_CS_STATUS_SPEED_MASK;
1170
1171 if (status_reg == (BM_CS_STATUS_LINK_UP |
1172 BM_CS_STATUS_RESOLVED |
1173 BM_CS_STATUS_SPEED_1000))
1174 k1_enable = false;
1175 }
1176
1177 if (hw->phy.type == e1000_phy_82577) {
1178 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
1179 &status_reg);
1180 if (ret_val)
1181 goto release;
1182
1183 status_reg &= HV_M_STATUS_LINK_UP |
1184 HV_M_STATUS_AUTONEG_COMPLETE |
1185 HV_M_STATUS_SPEED_MASK;
1186
1187 if (status_reg == (HV_M_STATUS_LINK_UP |
1188 HV_M_STATUS_AUTONEG_COMPLETE |
1189 HV_M_STATUS_SPEED_1000))
1190 k1_enable = false;
1191 }
1192
1193 /* Link stall fix for link up */
1194 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1195 0x0100);
1196 if (ret_val)
1197 goto release;
1198
1199 } else {
1200 /* Link stall fix for link down */
1201 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1202 0x4100);
1203 if (ret_val)
1204 goto release;
1205 }
1206
1207 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1208
1209 release:
1210 hw->phy.ops.release(hw);
1211
1212 return ret_val;
1213 }
1214
1215 /**
1216 * e1000_configure_k1_ich8lan - Configure K1 power state
1217 * @hw: pointer to the HW structure
1218 * @enable: K1 state to configure
1219 *
1220 * Configure the K1 power state based on the provided parameter.
1221 * Assumes semaphore already acquired.
1222 *
1223 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1224 **/
e1000_configure_k1_ich8lan(struct e1000_hw * hw,bool k1_enable)1225 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1226 {
1227 s32 ret_val = 0;
1228 u32 ctrl_reg = 0;
1229 u32 ctrl_ext = 0;
1230 u32 reg = 0;
1231 u16 kmrn_reg = 0;
1232
1233 ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1234 &kmrn_reg);
1235 if (ret_val)
1236 return ret_val;
1237
1238 if (k1_enable)
1239 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
1240 else
1241 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
1242
1243 ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1244 kmrn_reg);
1245 if (ret_val)
1246 return ret_val;
1247
1248 udelay(20);
1249 ctrl_ext = er32(CTRL_EXT);
1250 ctrl_reg = er32(CTRL);
1251
1252 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1253 reg |= E1000_CTRL_FRCSPD;
1254 ew32(CTRL, reg);
1255
1256 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1257 e1e_flush();
1258 udelay(20);
1259 ew32(CTRL, ctrl_reg);
1260 ew32(CTRL_EXT, ctrl_ext);
1261 e1e_flush();
1262 udelay(20);
1263
1264 return 0;
1265 }
1266
1267 /**
1268 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
1269 * @hw: pointer to the HW structure
1270 * @d0_state: boolean if entering d0 or d3 device state
1271 *
1272 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
1273 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
1274 * in NVM determines whether HW should configure LPLU and Gbe Disable.
1275 **/
e1000_oem_bits_config_ich8lan(struct e1000_hw * hw,bool d0_state)1276 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1277 {
1278 s32 ret_val = 0;
1279 u32 mac_reg;
1280 u16 oem_reg;
1281
1282 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan))
1283 return ret_val;
1284
1285 ret_val = hw->phy.ops.acquire(hw);
1286 if (ret_val)
1287 return ret_val;
1288
1289 if (!(hw->mac.type == e1000_pch2lan)) {
1290 mac_reg = er32(EXTCNF_CTRL);
1291 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1292 goto release;
1293 }
1294
1295 mac_reg = er32(FEXTNVM);
1296 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
1297 goto release;
1298
1299 mac_reg = er32(PHY_CTRL);
1300
1301 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1302 if (ret_val)
1303 goto release;
1304
1305 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
1306
1307 if (d0_state) {
1308 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
1309 oem_reg |= HV_OEM_BITS_GBE_DIS;
1310
1311 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
1312 oem_reg |= HV_OEM_BITS_LPLU;
1313 } else {
1314 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
1315 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
1316 oem_reg |= HV_OEM_BITS_GBE_DIS;
1317
1318 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
1319 E1000_PHY_CTRL_NOND0A_LPLU))
1320 oem_reg |= HV_OEM_BITS_LPLU;
1321 }
1322
1323 /* Set Restart auto-neg to activate the bits */
1324 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
1325 !hw->phy.ops.check_reset_block(hw))
1326 oem_reg |= HV_OEM_BITS_RESTART_AN;
1327
1328 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1329
1330 release:
1331 hw->phy.ops.release(hw);
1332
1333 return ret_val;
1334 }
1335
1336
1337 /**
1338 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1339 * @hw: pointer to the HW structure
1340 **/
e1000_set_mdio_slow_mode_hv(struct e1000_hw * hw)1341 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
1342 {
1343 s32 ret_val;
1344 u16 data;
1345
1346 ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
1347 if (ret_val)
1348 return ret_val;
1349
1350 data |= HV_KMRN_MDIO_SLOW;
1351
1352 ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
1353
1354 return ret_val;
1355 }
1356
1357 /**
1358 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1359 * done after every PHY reset.
1360 **/
e1000_hv_phy_workarounds_ich8lan(struct e1000_hw * hw)1361 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1362 {
1363 s32 ret_val = 0;
1364 u16 phy_data;
1365
1366 if (hw->mac.type != e1000_pchlan)
1367 return 0;
1368
1369 /* Set MDIO slow mode before any other MDIO access */
1370 if (hw->phy.type == e1000_phy_82577) {
1371 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1372 if (ret_val)
1373 return ret_val;
1374 }
1375
1376 if (((hw->phy.type == e1000_phy_82577) &&
1377 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
1378 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
1379 /* Disable generation of early preamble */
1380 ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
1381 if (ret_val)
1382 return ret_val;
1383
1384 /* Preamble tuning for SSC */
1385 ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
1386 if (ret_val)
1387 return ret_val;
1388 }
1389
1390 if (hw->phy.type == e1000_phy_82578) {
1391 /*
1392 * Return registers to default by doing a soft reset then
1393 * writing 0x3140 to the control register.
1394 */
1395 if (hw->phy.revision < 2) {
1396 e1000e_phy_sw_reset(hw);
1397 ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
1398 }
1399 }
1400
1401 /* Select page 0 */
1402 ret_val = hw->phy.ops.acquire(hw);
1403 if (ret_val)
1404 return ret_val;
1405
1406 hw->phy.addr = 1;
1407 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1408 hw->phy.ops.release(hw);
1409 if (ret_val)
1410 return ret_val;
1411
1412 /*
1413 * Configure the K1 Si workaround during phy reset assuming there is
1414 * link so that it disables K1 if link is in 1Gbps.
1415 */
1416 ret_val = e1000_k1_gig_workaround_hv(hw, true);
1417 if (ret_val)
1418 return ret_val;
1419
1420 /* Workaround for link disconnects on a busy hub in half duplex */
1421 ret_val = hw->phy.ops.acquire(hw);
1422 if (ret_val)
1423 return ret_val;
1424 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1425 if (ret_val)
1426 goto release;
1427 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
1428 phy_data & 0x00FF);
1429 release:
1430 hw->phy.ops.release(hw);
1431
1432 return ret_val;
1433 }
1434
1435 /**
1436 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
1437 * @hw: pointer to the HW structure
1438 **/
e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw * hw)1439 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1440 {
1441 u32 mac_reg;
1442 u16 i, phy_reg = 0;
1443 s32 ret_val;
1444
1445 ret_val = hw->phy.ops.acquire(hw);
1446 if (ret_val)
1447 return;
1448 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1449 if (ret_val)
1450 goto release;
1451
1452 /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
1453 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1454 mac_reg = er32(RAL(i));
1455 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
1456 (u16)(mac_reg & 0xFFFF));
1457 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
1458 (u16)((mac_reg >> 16) & 0xFFFF));
1459
1460 mac_reg = er32(RAH(i));
1461 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
1462 (u16)(mac_reg & 0xFFFF));
1463 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
1464 (u16)((mac_reg & E1000_RAH_AV)
1465 >> 16));
1466 }
1467
1468 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
1469
1470 release:
1471 hw->phy.ops.release(hw);
1472 }
1473
1474 /**
1475 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1476 * with 82579 PHY
1477 * @hw: pointer to the HW structure
1478 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
1479 **/
e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw * hw,bool enable)1480 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1481 {
1482 s32 ret_val = 0;
1483 u16 phy_reg, data;
1484 u32 mac_reg;
1485 u16 i;
1486
1487 if (hw->mac.type != e1000_pch2lan)
1488 return 0;
1489
1490 /* disable Rx path while enabling/disabling workaround */
1491 e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
1492 ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
1493 if (ret_val)
1494 return ret_val;
1495
1496 if (enable) {
1497 /*
1498 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
1499 * SHRAL/H) and initial CRC values to the MAC
1500 */
1501 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1502 u8 mac_addr[ETH_ALEN] = {0};
1503 u32 addr_high, addr_low;
1504
1505 addr_high = er32(RAH(i));
1506 if (!(addr_high & E1000_RAH_AV))
1507 continue;
1508 addr_low = er32(RAL(i));
1509 mac_addr[0] = (addr_low & 0xFF);
1510 mac_addr[1] = ((addr_low >> 8) & 0xFF);
1511 mac_addr[2] = ((addr_low >> 16) & 0xFF);
1512 mac_addr[3] = ((addr_low >> 24) & 0xFF);
1513 mac_addr[4] = (addr_high & 0xFF);
1514 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1515
1516 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1517 }
1518
1519 /* Write Rx addresses to the PHY */
1520 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
1521
1522 /* Enable jumbo frame workaround in the MAC */
1523 mac_reg = er32(FFLT_DBG);
1524 mac_reg &= ~(1 << 14);
1525 mac_reg |= (7 << 15);
1526 ew32(FFLT_DBG, mac_reg);
1527
1528 mac_reg = er32(RCTL);
1529 mac_reg |= E1000_RCTL_SECRC;
1530 ew32(RCTL, mac_reg);
1531
1532 ret_val = e1000e_read_kmrn_reg(hw,
1533 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1534 &data);
1535 if (ret_val)
1536 return ret_val;
1537 ret_val = e1000e_write_kmrn_reg(hw,
1538 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1539 data | (1 << 0));
1540 if (ret_val)
1541 return ret_val;
1542 ret_val = e1000e_read_kmrn_reg(hw,
1543 E1000_KMRNCTRLSTA_HD_CTRL,
1544 &data);
1545 if (ret_val)
1546 return ret_val;
1547 data &= ~(0xF << 8);
1548 data |= (0xB << 8);
1549 ret_val = e1000e_write_kmrn_reg(hw,
1550 E1000_KMRNCTRLSTA_HD_CTRL,
1551 data);
1552 if (ret_val)
1553 return ret_val;
1554
1555 /* Enable jumbo frame workaround in the PHY */
1556 e1e_rphy(hw, PHY_REG(769, 23), &data);
1557 data &= ~(0x7F << 5);
1558 data |= (0x37 << 5);
1559 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1560 if (ret_val)
1561 return ret_val;
1562 e1e_rphy(hw, PHY_REG(769, 16), &data);
1563 data &= ~(1 << 13);
1564 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1565 if (ret_val)
1566 return ret_val;
1567 e1e_rphy(hw, PHY_REG(776, 20), &data);
1568 data &= ~(0x3FF << 2);
1569 data |= (0x1A << 2);
1570 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1571 if (ret_val)
1572 return ret_val;
1573 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
1574 if (ret_val)
1575 return ret_val;
1576 e1e_rphy(hw, HV_PM_CTRL, &data);
1577 ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
1578 if (ret_val)
1579 return ret_val;
1580 } else {
1581 /* Write MAC register values back to h/w defaults */
1582 mac_reg = er32(FFLT_DBG);
1583 mac_reg &= ~(0xF << 14);
1584 ew32(FFLT_DBG, mac_reg);
1585
1586 mac_reg = er32(RCTL);
1587 mac_reg &= ~E1000_RCTL_SECRC;
1588 ew32(RCTL, mac_reg);
1589
1590 ret_val = e1000e_read_kmrn_reg(hw,
1591 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1592 &data);
1593 if (ret_val)
1594 return ret_val;
1595 ret_val = e1000e_write_kmrn_reg(hw,
1596 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1597 data & ~(1 << 0));
1598 if (ret_val)
1599 return ret_val;
1600 ret_val = e1000e_read_kmrn_reg(hw,
1601 E1000_KMRNCTRLSTA_HD_CTRL,
1602 &data);
1603 if (ret_val)
1604 return ret_val;
1605 data &= ~(0xF << 8);
1606 data |= (0xB << 8);
1607 ret_val = e1000e_write_kmrn_reg(hw,
1608 E1000_KMRNCTRLSTA_HD_CTRL,
1609 data);
1610 if (ret_val)
1611 return ret_val;
1612
1613 /* Write PHY register values back to h/w defaults */
1614 e1e_rphy(hw, PHY_REG(769, 23), &data);
1615 data &= ~(0x7F << 5);
1616 ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
1617 if (ret_val)
1618 return ret_val;
1619 e1e_rphy(hw, PHY_REG(769, 16), &data);
1620 data |= (1 << 13);
1621 ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
1622 if (ret_val)
1623 return ret_val;
1624 e1e_rphy(hw, PHY_REG(776, 20), &data);
1625 data &= ~(0x3FF << 2);
1626 data |= (0x8 << 2);
1627 ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
1628 if (ret_val)
1629 return ret_val;
1630 ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
1631 if (ret_val)
1632 return ret_val;
1633 e1e_rphy(hw, HV_PM_CTRL, &data);
1634 ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
1635 if (ret_val)
1636 return ret_val;
1637 }
1638
1639 /* re-enable Rx path after enabling/disabling workaround */
1640 return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
1641 }
1642
1643 /**
1644 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
1645 * done after every PHY reset.
1646 **/
e1000_lv_phy_workarounds_ich8lan(struct e1000_hw * hw)1647 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1648 {
1649 s32 ret_val = 0;
1650
1651 if (hw->mac.type != e1000_pch2lan)
1652 return 0;
1653
1654 /* Set MDIO slow mode before any other MDIO access */
1655 ret_val = e1000_set_mdio_slow_mode_hv(hw);
1656
1657 ret_val = hw->phy.ops.acquire(hw);
1658 if (ret_val)
1659 return ret_val;
1660 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1661 I82579_MSE_THRESHOLD);
1662 if (ret_val)
1663 goto release;
1664 /* set MSE higher to enable link to stay up when noise is high */
1665 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034);
1666 if (ret_val)
1667 goto release;
1668 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1669 I82579_MSE_LINK_DOWN);
1670 if (ret_val)
1671 goto release;
1672 /* drop link after 5 times MSE threshold was reached */
1673 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005);
1674 release:
1675 hw->phy.ops.release(hw);
1676
1677 return ret_val;
1678 }
1679
1680 /**
1681 * e1000_k1_gig_workaround_lv - K1 Si workaround
1682 * @hw: pointer to the HW structure
1683 *
1684 * Workaround to set the K1 beacon duration for 82579 parts
1685 **/
e1000_k1_workaround_lv(struct e1000_hw * hw)1686 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1687 {
1688 s32 ret_val = 0;
1689 u16 status_reg = 0;
1690 u32 mac_reg;
1691 u16 phy_reg;
1692
1693 if (hw->mac.type != e1000_pch2lan)
1694 return 0;
1695
1696 /* Set K1 beacon duration based on 1Gbps speed or otherwise */
1697 ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
1698 if (ret_val)
1699 return ret_val;
1700
1701 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
1702 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
1703 mac_reg = er32(FEXTNVM4);
1704 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1705
1706 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
1707 if (ret_val)
1708 return ret_val;
1709
1710 if (status_reg & HV_M_STATUS_SPEED_1000) {
1711 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1712 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1713 } else {
1714 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1715 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
1716 }
1717 ew32(FEXTNVM4, mac_reg);
1718 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
1719 }
1720
1721 return ret_val;
1722 }
1723
1724 /**
1725 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
1726 * @hw: pointer to the HW structure
1727 * @gate: boolean set to true to gate, false to ungate
1728 *
1729 * Gate/ungate the automatic PHY configuration via hardware; perform
1730 * the configuration via software instead.
1731 **/
e1000_gate_hw_phy_config_ich8lan(struct e1000_hw * hw,bool gate)1732 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1733 {
1734 u32 extcnf_ctrl;
1735
1736 if (hw->mac.type != e1000_pch2lan)
1737 return;
1738
1739 extcnf_ctrl = er32(EXTCNF_CTRL);
1740
1741 if (gate)
1742 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1743 else
1744 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
1745
1746 ew32(EXTCNF_CTRL, extcnf_ctrl);
1747 }
1748
1749 /**
1750 * e1000_lan_init_done_ich8lan - Check for PHY config completion
1751 * @hw: pointer to the HW structure
1752 *
1753 * Check the appropriate indication the MAC has finished configuring the
1754 * PHY after a software reset.
1755 **/
e1000_lan_init_done_ich8lan(struct e1000_hw * hw)1756 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1757 {
1758 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
1759
1760 /* Wait for basic configuration completes before proceeding */
1761 do {
1762 data = er32(STATUS);
1763 data &= E1000_STATUS_LAN_INIT_DONE;
1764 udelay(100);
1765 } while ((!data) && --loop);
1766
1767 /*
1768 * If basic configuration is incomplete before the above loop
1769 * count reaches 0, loading the configuration from NVM will
1770 * leave the PHY in a bad state possibly resulting in no link.
1771 */
1772 if (loop == 0)
1773 e_dbg("LAN_INIT_DONE not set, increase timeout\n");
1774
1775 /* Clear the Init Done bit for the next init event */
1776 data = er32(STATUS);
1777 data &= ~E1000_STATUS_LAN_INIT_DONE;
1778 ew32(STATUS, data);
1779 }
1780
1781 /**
1782 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1783 * @hw: pointer to the HW structure
1784 **/
e1000_post_phy_reset_ich8lan(struct e1000_hw * hw)1785 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1786 {
1787 s32 ret_val = 0;
1788 u16 reg;
1789
1790 if (hw->phy.ops.check_reset_block(hw))
1791 return 0;
1792
1793 /* Allow time for h/w to get to quiescent state after reset */
1794 usleep_range(10000, 20000);
1795
1796 /* Perform any necessary post-reset workarounds */
1797 switch (hw->mac.type) {
1798 case e1000_pchlan:
1799 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1800 if (ret_val)
1801 return ret_val;
1802 break;
1803 case e1000_pch2lan:
1804 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
1805 if (ret_val)
1806 return ret_val;
1807 break;
1808 default:
1809 break;
1810 }
1811
1812 /* Clear the host wakeup bit after lcd reset */
1813 if (hw->mac.type >= e1000_pchlan) {
1814 e1e_rphy(hw, BM_PORT_GEN_CFG, ®);
1815 reg &= ~BM_WUC_HOST_WU_BIT;
1816 e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
1817 }
1818
1819 /* Configure the LCD with the extended configuration region in NVM */
1820 ret_val = e1000_sw_lcd_config_ich8lan(hw);
1821 if (ret_val)
1822 return ret_val;
1823
1824 /* Configure the LCD with the OEM bits in NVM */
1825 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1826
1827 if (hw->mac.type == e1000_pch2lan) {
1828 /* Ungate automatic PHY configuration on non-managed 82579 */
1829 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
1830 usleep_range(10000, 20000);
1831 e1000_gate_hw_phy_config_ich8lan(hw, false);
1832 }
1833
1834 /* Set EEE LPI Update Timer to 200usec */
1835 ret_val = hw->phy.ops.acquire(hw);
1836 if (ret_val)
1837 return ret_val;
1838 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
1839 I82579_LPI_UPDATE_TIMER);
1840 if (!ret_val)
1841 ret_val = hw->phy.ops.write_reg_locked(hw,
1842 I82579_EMI_DATA,
1843 0x1387);
1844 hw->phy.ops.release(hw);
1845 }
1846
1847 return ret_val;
1848 }
1849
1850 /**
1851 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1852 * @hw: pointer to the HW structure
1853 *
1854 * Resets the PHY
1855 * This is a function pointer entry point called by drivers
1856 * or other shared routines.
1857 **/
e1000_phy_hw_reset_ich8lan(struct e1000_hw * hw)1858 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1859 {
1860 s32 ret_val = 0;
1861
1862 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
1863 if ((hw->mac.type == e1000_pch2lan) &&
1864 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1865 e1000_gate_hw_phy_config_ich8lan(hw, true);
1866
1867 ret_val = e1000e_phy_hw_reset_generic(hw);
1868 if (ret_val)
1869 return ret_val;
1870
1871 return e1000_post_phy_reset_ich8lan(hw);
1872 }
1873
1874 /**
1875 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
1876 * @hw: pointer to the HW structure
1877 * @active: true to enable LPLU, false to disable
1878 *
1879 * Sets the LPLU state according to the active flag. For PCH, if OEM write
1880 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
1881 * the phy speed. This function will manually set the LPLU bit and restart
1882 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
1883 * since it configures the same bit.
1884 **/
e1000_set_lplu_state_pchlan(struct e1000_hw * hw,bool active)1885 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
1886 {
1887 s32 ret_val = 0;
1888 u16 oem_reg;
1889
1890 ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
1891 if (ret_val)
1892 return ret_val;
1893
1894 if (active)
1895 oem_reg |= HV_OEM_BITS_LPLU;
1896 else
1897 oem_reg &= ~HV_OEM_BITS_LPLU;
1898
1899 if (!hw->phy.ops.check_reset_block(hw))
1900 oem_reg |= HV_OEM_BITS_RESTART_AN;
1901
1902 return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
1903 }
1904
1905 /**
1906 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1907 * @hw: pointer to the HW structure
1908 * @active: true to enable LPLU, false to disable
1909 *
1910 * Sets the LPLU D0 state according to the active flag. When
1911 * activating LPLU this function also disables smart speed
1912 * and vice versa. LPLU will not be activated unless the
1913 * device autonegotiation advertisement meets standards of
1914 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1915 * This is a function pointer entry point only called by
1916 * PHY setup routines.
1917 **/
e1000_set_d0_lplu_state_ich8lan(struct e1000_hw * hw,bool active)1918 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1919 {
1920 struct e1000_phy_info *phy = &hw->phy;
1921 u32 phy_ctrl;
1922 s32 ret_val = 0;
1923 u16 data;
1924
1925 if (phy->type == e1000_phy_ife)
1926 return 0;
1927
1928 phy_ctrl = er32(PHY_CTRL);
1929
1930 if (active) {
1931 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
1932 ew32(PHY_CTRL, phy_ctrl);
1933
1934 if (phy->type != e1000_phy_igp_3)
1935 return 0;
1936
1937 /*
1938 * Call gig speed drop workaround on LPLU before accessing
1939 * any PHY registers
1940 */
1941 if (hw->mac.type == e1000_ich8lan)
1942 e1000e_gig_downshift_workaround_ich8lan(hw);
1943
1944 /* When LPLU is enabled, we should disable SmartSpeed */
1945 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
1946 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1947 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
1948 if (ret_val)
1949 return ret_val;
1950 } else {
1951 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
1952 ew32(PHY_CTRL, phy_ctrl);
1953
1954 if (phy->type != e1000_phy_igp_3)
1955 return 0;
1956
1957 /*
1958 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1959 * during Dx states where the power conservation is most
1960 * important. During driver activity we should enable
1961 * SmartSpeed, so performance is maintained.
1962 */
1963 if (phy->smart_speed == e1000_smart_speed_on) {
1964 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1965 &data);
1966 if (ret_val)
1967 return ret_val;
1968
1969 data |= IGP01E1000_PSCFR_SMART_SPEED;
1970 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1971 data);
1972 if (ret_val)
1973 return ret_val;
1974 } else if (phy->smart_speed == e1000_smart_speed_off) {
1975 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1976 &data);
1977 if (ret_val)
1978 return ret_val;
1979
1980 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1981 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1982 data);
1983 if (ret_val)
1984 return ret_val;
1985 }
1986 }
1987
1988 return 0;
1989 }
1990
1991 /**
1992 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
1993 * @hw: pointer to the HW structure
1994 * @active: true to enable LPLU, false to disable
1995 *
1996 * Sets the LPLU D3 state according to the active flag. When
1997 * activating LPLU this function also disables smart speed
1998 * and vice versa. LPLU will not be activated unless the
1999 * device autonegotiation advertisement meets standards of
2000 * either 10 or 10/100 or 10/100/1000 at all duplexes.
2001 * This is a function pointer entry point only called by
2002 * PHY setup routines.
2003 **/
e1000_set_d3_lplu_state_ich8lan(struct e1000_hw * hw,bool active)2004 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2005 {
2006 struct e1000_phy_info *phy = &hw->phy;
2007 u32 phy_ctrl;
2008 s32 ret_val = 0;
2009 u16 data;
2010
2011 phy_ctrl = er32(PHY_CTRL);
2012
2013 if (!active) {
2014 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2015 ew32(PHY_CTRL, phy_ctrl);
2016
2017 if (phy->type != e1000_phy_igp_3)
2018 return 0;
2019
2020 /*
2021 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
2022 * during Dx states where the power conservation is most
2023 * important. During driver activity we should enable
2024 * SmartSpeed, so performance is maintained.
2025 */
2026 if (phy->smart_speed == e1000_smart_speed_on) {
2027 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2028 &data);
2029 if (ret_val)
2030 return ret_val;
2031
2032 data |= IGP01E1000_PSCFR_SMART_SPEED;
2033 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2034 data);
2035 if (ret_val)
2036 return ret_val;
2037 } else if (phy->smart_speed == e1000_smart_speed_off) {
2038 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2039 &data);
2040 if (ret_val)
2041 return ret_val;
2042
2043 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2044 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2045 data);
2046 if (ret_val)
2047 return ret_val;
2048 }
2049 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2050 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2051 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2052 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2053 ew32(PHY_CTRL, phy_ctrl);
2054
2055 if (phy->type != e1000_phy_igp_3)
2056 return 0;
2057
2058 /*
2059 * Call gig speed drop workaround on LPLU before accessing
2060 * any PHY registers
2061 */
2062 if (hw->mac.type == e1000_ich8lan)
2063 e1000e_gig_downshift_workaround_ich8lan(hw);
2064
2065 /* When LPLU is enabled, we should disable SmartSpeed */
2066 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2067 if (ret_val)
2068 return ret_val;
2069
2070 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2071 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2072 }
2073
2074 return ret_val;
2075 }
2076
2077 /**
2078 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
2079 * @hw: pointer to the HW structure
2080 * @bank: pointer to the variable that returns the active bank
2081 *
2082 * Reads signature byte from the NVM using the flash access registers.
2083 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
2084 **/
e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw * hw,u32 * bank)2085 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2086 {
2087 u32 eecd;
2088 struct e1000_nvm_info *nvm = &hw->nvm;
2089 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
2090 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
2091 u8 sig_byte = 0;
2092 s32 ret_val;
2093
2094 switch (hw->mac.type) {
2095 case e1000_ich8lan:
2096 case e1000_ich9lan:
2097 eecd = er32(EECD);
2098 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
2099 E1000_EECD_SEC1VAL_VALID_MASK) {
2100 if (eecd & E1000_EECD_SEC1VAL)
2101 *bank = 1;
2102 else
2103 *bank = 0;
2104
2105 return 0;
2106 }
2107 e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
2108 /* fall-thru */
2109 default:
2110 /* set bank to 0 in case flash read fails */
2111 *bank = 0;
2112
2113 /* Check bank 0 */
2114 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2115 &sig_byte);
2116 if (ret_val)
2117 return ret_val;
2118 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2119 E1000_ICH_NVM_SIG_VALUE) {
2120 *bank = 0;
2121 return 0;
2122 }
2123
2124 /* Check bank 1 */
2125 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2126 bank1_offset,
2127 &sig_byte);
2128 if (ret_val)
2129 return ret_val;
2130 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
2131 E1000_ICH_NVM_SIG_VALUE) {
2132 *bank = 1;
2133 return 0;
2134 }
2135
2136 e_dbg("ERROR: No valid NVM bank present\n");
2137 return -E1000_ERR_NVM;
2138 }
2139 }
2140
2141 /**
2142 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
2143 * @hw: pointer to the HW structure
2144 * @offset: The offset (in bytes) of the word(s) to read.
2145 * @words: Size of data to read in words
2146 * @data: Pointer to the word(s) to read at offset.
2147 *
2148 * Reads a word(s) from the NVM using the flash access registers.
2149 **/
e1000_read_nvm_ich8lan(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)2150 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2151 u16 *data)
2152 {
2153 struct e1000_nvm_info *nvm = &hw->nvm;
2154 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2155 u32 act_offset;
2156 s32 ret_val = 0;
2157 u32 bank = 0;
2158 u16 i, word;
2159
2160 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2161 (words == 0)) {
2162 e_dbg("nvm parameter(s) out of bounds\n");
2163 ret_val = -E1000_ERR_NVM;
2164 goto out;
2165 }
2166
2167 nvm->ops.acquire(hw);
2168
2169 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2170 if (ret_val) {
2171 e_dbg("Could not detect valid bank, assuming bank 0\n");
2172 bank = 0;
2173 }
2174
2175 act_offset = (bank) ? nvm->flash_bank_size : 0;
2176 act_offset += offset;
2177
2178 ret_val = 0;
2179 for (i = 0; i < words; i++) {
2180 if (dev_spec->shadow_ram[offset+i].modified) {
2181 data[i] = dev_spec->shadow_ram[offset+i].value;
2182 } else {
2183 ret_val = e1000_read_flash_word_ich8lan(hw,
2184 act_offset + i,
2185 &word);
2186 if (ret_val)
2187 break;
2188 data[i] = word;
2189 }
2190 }
2191
2192 nvm->ops.release(hw);
2193
2194 out:
2195 if (ret_val)
2196 e_dbg("NVM read error: %d\n", ret_val);
2197
2198 return ret_val;
2199 }
2200
2201 /**
2202 * e1000_flash_cycle_init_ich8lan - Initialize flash
2203 * @hw: pointer to the HW structure
2204 *
2205 * This function does initial flash setup so that a new read/write/erase cycle
2206 * can be started.
2207 **/
e1000_flash_cycle_init_ich8lan(struct e1000_hw * hw)2208 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2209 {
2210 union ich8_hws_flash_status hsfsts;
2211 s32 ret_val = -E1000_ERR_NVM;
2212
2213 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2214
2215 /* Check if the flash descriptor is valid */
2216 if (hsfsts.hsf_status.fldesvalid == 0) {
2217 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
2218 return -E1000_ERR_NVM;
2219 }
2220
2221 /* Clear FCERR and DAEL in hw status by writing 1 */
2222 hsfsts.hsf_status.flcerr = 1;
2223 hsfsts.hsf_status.dael = 1;
2224
2225 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2226
2227 /*
2228 * Either we should have a hardware SPI cycle in progress
2229 * bit to check against, in order to start a new cycle or
2230 * FDONE bit should be changed in the hardware so that it
2231 * is 1 after hardware reset, which can then be used as an
2232 * indication whether a cycle is in progress or has been
2233 * completed.
2234 */
2235
2236 if (hsfsts.hsf_status.flcinprog == 0) {
2237 /*
2238 * There is no cycle running at present,
2239 * so we can start a cycle.
2240 * Begin by setting Flash Cycle Done.
2241 */
2242 hsfsts.hsf_status.flcdone = 1;
2243 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2244 ret_val = 0;
2245 } else {
2246 s32 i;
2247
2248 /*
2249 * Otherwise poll for sometime so the current
2250 * cycle has a chance to end before giving up.
2251 */
2252 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2253 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2254 if (hsfsts.hsf_status.flcinprog == 0) {
2255 ret_val = 0;
2256 break;
2257 }
2258 udelay(1);
2259 }
2260 if (!ret_val) {
2261 /*
2262 * Successful in waiting for previous cycle to timeout,
2263 * now set the Flash Cycle Done.
2264 */
2265 hsfsts.hsf_status.flcdone = 1;
2266 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2267 } else {
2268 e_dbg("Flash controller busy, cannot get access\n");
2269 }
2270 }
2271
2272 return ret_val;
2273 }
2274
2275 /**
2276 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
2277 * @hw: pointer to the HW structure
2278 * @timeout: maximum time to wait for completion
2279 *
2280 * This function starts a flash cycle and waits for its completion.
2281 **/
e1000_flash_cycle_ich8lan(struct e1000_hw * hw,u32 timeout)2282 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2283 {
2284 union ich8_hws_flash_ctrl hsflctl;
2285 union ich8_hws_flash_status hsfsts;
2286 u32 i = 0;
2287
2288 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
2289 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2290 hsflctl.hsf_ctrl.flcgo = 1;
2291 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2292
2293 /* wait till FDONE bit is set to 1 */
2294 do {
2295 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2296 if (hsfsts.hsf_status.flcdone == 1)
2297 break;
2298 udelay(1);
2299 } while (i++ < timeout);
2300
2301 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
2302 return 0;
2303
2304 return -E1000_ERR_NVM;
2305 }
2306
2307 /**
2308 * e1000_read_flash_word_ich8lan - Read word from flash
2309 * @hw: pointer to the HW structure
2310 * @offset: offset to data location
2311 * @data: pointer to the location for storing the data
2312 *
2313 * Reads the flash word at offset into data. Offset is converted
2314 * to bytes before read.
2315 **/
e1000_read_flash_word_ich8lan(struct e1000_hw * hw,u32 offset,u16 * data)2316 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
2317 u16 *data)
2318 {
2319 /* Must convert offset into bytes. */
2320 offset <<= 1;
2321
2322 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
2323 }
2324
2325 /**
2326 * e1000_read_flash_byte_ich8lan - Read byte from flash
2327 * @hw: pointer to the HW structure
2328 * @offset: The offset of the byte to read.
2329 * @data: Pointer to a byte to store the value read.
2330 *
2331 * Reads a single byte from the NVM using the flash access registers.
2332 **/
e1000_read_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 * data)2333 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2334 u8 *data)
2335 {
2336 s32 ret_val;
2337 u16 word = 0;
2338
2339 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
2340 if (ret_val)
2341 return ret_val;
2342
2343 *data = (u8)word;
2344
2345 return 0;
2346 }
2347
2348 /**
2349 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
2350 * @hw: pointer to the HW structure
2351 * @offset: The offset (in bytes) of the byte or word to read.
2352 * @size: Size of data to read, 1=byte 2=word
2353 * @data: Pointer to the word to store the value read.
2354 *
2355 * Reads a byte or word from the NVM using the flash access registers.
2356 **/
e1000_read_flash_data_ich8lan(struct e1000_hw * hw,u32 offset,u8 size,u16 * data)2357 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2358 u8 size, u16 *data)
2359 {
2360 union ich8_hws_flash_status hsfsts;
2361 union ich8_hws_flash_ctrl hsflctl;
2362 u32 flash_linear_addr;
2363 u32 flash_data = 0;
2364 s32 ret_val = -E1000_ERR_NVM;
2365 u8 count = 0;
2366
2367 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2368 return -E1000_ERR_NVM;
2369
2370 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2371 hw->nvm.flash_base_addr;
2372
2373 do {
2374 udelay(1);
2375 /* Steps */
2376 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2377 if (ret_val)
2378 break;
2379
2380 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2381 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2382 hsflctl.hsf_ctrl.fldbcount = size - 1;
2383 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
2384 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2385
2386 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2387
2388 ret_val = e1000_flash_cycle_ich8lan(hw,
2389 ICH_FLASH_READ_COMMAND_TIMEOUT);
2390
2391 /*
2392 * Check if FCERR is set to 1, if set to 1, clear it
2393 * and try the whole sequence a few more times, else
2394 * read in (shift in) the Flash Data0, the order is
2395 * least significant byte first msb to lsb
2396 */
2397 if (!ret_val) {
2398 flash_data = er32flash(ICH_FLASH_FDATA0);
2399 if (size == 1)
2400 *data = (u8)(flash_data & 0x000000FF);
2401 else if (size == 2)
2402 *data = (u16)(flash_data & 0x0000FFFF);
2403 break;
2404 } else {
2405 /*
2406 * If we've gotten here, then things are probably
2407 * completely hosed, but if the error condition is
2408 * detected, it won't hurt to give it another try...
2409 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2410 */
2411 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2412 if (hsfsts.hsf_status.flcerr == 1) {
2413 /* Repeat for some time before giving up. */
2414 continue;
2415 } else if (hsfsts.hsf_status.flcdone == 0) {
2416 e_dbg("Timeout error - flash cycle did not complete.\n");
2417 break;
2418 }
2419 }
2420 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2421
2422 return ret_val;
2423 }
2424
2425 /**
2426 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
2427 * @hw: pointer to the HW structure
2428 * @offset: The offset (in bytes) of the word(s) to write.
2429 * @words: Size of data to write in words
2430 * @data: Pointer to the word(s) to write at offset.
2431 *
2432 * Writes a byte or word to the NVM using the flash access registers.
2433 **/
e1000_write_nvm_ich8lan(struct e1000_hw * hw,u16 offset,u16 words,u16 * data)2434 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2435 u16 *data)
2436 {
2437 struct e1000_nvm_info *nvm = &hw->nvm;
2438 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2439 u16 i;
2440
2441 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
2442 (words == 0)) {
2443 e_dbg("nvm parameter(s) out of bounds\n");
2444 return -E1000_ERR_NVM;
2445 }
2446
2447 nvm->ops.acquire(hw);
2448
2449 for (i = 0; i < words; i++) {
2450 dev_spec->shadow_ram[offset+i].modified = true;
2451 dev_spec->shadow_ram[offset+i].value = data[i];
2452 }
2453
2454 nvm->ops.release(hw);
2455
2456 return 0;
2457 }
2458
2459 /**
2460 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
2461 * @hw: pointer to the HW structure
2462 *
2463 * The NVM checksum is updated by calling the generic update_nvm_checksum,
2464 * which writes the checksum to the shadow ram. The changes in the shadow
2465 * ram are then committed to the EEPROM by processing each bank at a time
2466 * checking for the modified bit and writing only the pending changes.
2467 * After a successful commit, the shadow ram is cleared and is ready for
2468 * future writes.
2469 **/
e1000_update_nvm_checksum_ich8lan(struct e1000_hw * hw)2470 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2471 {
2472 struct e1000_nvm_info *nvm = &hw->nvm;
2473 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2474 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
2475 s32 ret_val;
2476 u16 data;
2477
2478 ret_val = e1000e_update_nvm_checksum_generic(hw);
2479 if (ret_val)
2480 goto out;
2481
2482 if (nvm->type != e1000_nvm_flash_sw)
2483 goto out;
2484
2485 nvm->ops.acquire(hw);
2486
2487 /*
2488 * We're writing to the opposite bank so if we're on bank 1,
2489 * write to bank 0 etc. We also need to erase the segment that
2490 * is going to be written
2491 */
2492 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2493 if (ret_val) {
2494 e_dbg("Could not detect valid bank, assuming bank 0\n");
2495 bank = 0;
2496 }
2497
2498 if (bank == 0) {
2499 new_bank_offset = nvm->flash_bank_size;
2500 old_bank_offset = 0;
2501 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2502 if (ret_val)
2503 goto release;
2504 } else {
2505 old_bank_offset = nvm->flash_bank_size;
2506 new_bank_offset = 0;
2507 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2508 if (ret_val)
2509 goto release;
2510 }
2511
2512 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2513 /*
2514 * Determine whether to write the value stored
2515 * in the other NVM bank or a modified value stored
2516 * in the shadow RAM
2517 */
2518 if (dev_spec->shadow_ram[i].modified) {
2519 data = dev_spec->shadow_ram[i].value;
2520 } else {
2521 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2522 old_bank_offset,
2523 &data);
2524 if (ret_val)
2525 break;
2526 }
2527
2528 /*
2529 * If the word is 0x13, then make sure the signature bits
2530 * (15:14) are 11b until the commit has completed.
2531 * This will allow us to write 10b which indicates the
2532 * signature is valid. We want to do this after the write
2533 * has completed so that we don't mark the segment valid
2534 * while the write is still in progress
2535 */
2536 if (i == E1000_ICH_NVM_SIG_WORD)
2537 data |= E1000_ICH_NVM_SIG_MASK;
2538
2539 /* Convert offset to bytes. */
2540 act_offset = (i + new_bank_offset) << 1;
2541
2542 udelay(100);
2543 /* Write the bytes to the new bank. */
2544 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2545 act_offset,
2546 (u8)data);
2547 if (ret_val)
2548 break;
2549
2550 udelay(100);
2551 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2552 act_offset + 1,
2553 (u8)(data >> 8));
2554 if (ret_val)
2555 break;
2556 }
2557
2558 /*
2559 * Don't bother writing the segment valid bits if sector
2560 * programming failed.
2561 */
2562 if (ret_val) {
2563 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
2564 e_dbg("Flash commit failed.\n");
2565 goto release;
2566 }
2567
2568 /*
2569 * Finally validate the new segment by setting bit 15:14
2570 * to 10b in word 0x13 , this can be done without an
2571 * erase as well since these bits are 11 to start with
2572 * and we need to change bit 14 to 0b
2573 */
2574 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2575 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2576 if (ret_val)
2577 goto release;
2578
2579 data &= 0xBFFF;
2580 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2581 act_offset * 2 + 1,
2582 (u8)(data >> 8));
2583 if (ret_val)
2584 goto release;
2585
2586 /*
2587 * And invalidate the previously valid segment by setting
2588 * its signature word (0x13) high_byte to 0b. This can be
2589 * done without an erase because flash erase sets all bits
2590 * to 1's. We can write 1's to 0's without an erase
2591 */
2592 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2593 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2594 if (ret_val)
2595 goto release;
2596
2597 /* Great! Everything worked, we can now clear the cached entries. */
2598 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2599 dev_spec->shadow_ram[i].modified = false;
2600 dev_spec->shadow_ram[i].value = 0xFFFF;
2601 }
2602
2603 release:
2604 nvm->ops.release(hw);
2605
2606 /*
2607 * Reload the EEPROM, or else modifications will not appear
2608 * until after the next adapter reset.
2609 */
2610 if (!ret_val) {
2611 nvm->ops.reload(hw);
2612 usleep_range(10000, 20000);
2613 }
2614
2615 out:
2616 if (ret_val)
2617 e_dbg("NVM update error: %d\n", ret_val);
2618
2619 return ret_val;
2620 }
2621
2622 /**
2623 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
2624 * @hw: pointer to the HW structure
2625 *
2626 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
2627 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
2628 * calculated, in which case we need to calculate the checksum and set bit 6.
2629 **/
e1000_validate_nvm_checksum_ich8lan(struct e1000_hw * hw)2630 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2631 {
2632 s32 ret_val;
2633 u16 data;
2634
2635 /*
2636 * Read 0x19 and check bit 6. If this bit is 0, the checksum
2637 * needs to be fixed. This bit is an indication that the NVM
2638 * was prepared by OEM software and did not calculate the
2639 * checksum...a likely scenario.
2640 */
2641 ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
2642 if (ret_val)
2643 return ret_val;
2644
2645 if ((data & 0x40) == 0) {
2646 data |= 0x40;
2647 ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
2648 if (ret_val)
2649 return ret_val;
2650 ret_val = e1000e_update_nvm_checksum(hw);
2651 if (ret_val)
2652 return ret_val;
2653 }
2654
2655 return e1000e_validate_nvm_checksum_generic(hw);
2656 }
2657
2658 /**
2659 * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
2660 * @hw: pointer to the HW structure
2661 *
2662 * To prevent malicious write/erase of the NVM, set it to be read-only
2663 * so that the hardware ignores all write/erase cycles of the NVM via
2664 * the flash control registers. The shadow-ram copy of the NVM will
2665 * still be updated, however any updates to this copy will not stick
2666 * across driver reloads.
2667 **/
e1000e_write_protect_nvm_ich8lan(struct e1000_hw * hw)2668 void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
2669 {
2670 struct e1000_nvm_info *nvm = &hw->nvm;
2671 union ich8_flash_protected_range pr0;
2672 union ich8_hws_flash_status hsfsts;
2673 u32 gfpreg;
2674
2675 nvm->ops.acquire(hw);
2676
2677 gfpreg = er32flash(ICH_FLASH_GFPREG);
2678
2679 /* Write-protect GbE Sector of NVM */
2680 pr0.regval = er32flash(ICH_FLASH_PR0);
2681 pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
2682 pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
2683 pr0.range.wpe = true;
2684 ew32flash(ICH_FLASH_PR0, pr0.regval);
2685
2686 /*
2687 * Lock down a subset of GbE Flash Control Registers, e.g.
2688 * PR0 to prevent the write-protection from being lifted.
2689 * Once FLOCKDN is set, the registers protected by it cannot
2690 * be written until FLOCKDN is cleared by a hardware reset.
2691 */
2692 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2693 hsfsts.hsf_status.flockdn = true;
2694 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2695
2696 nvm->ops.release(hw);
2697 }
2698
2699 /**
2700 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
2701 * @hw: pointer to the HW structure
2702 * @offset: The offset (in bytes) of the byte/word to read.
2703 * @size: Size of data to read, 1=byte 2=word
2704 * @data: The byte(s) to write to the NVM.
2705 *
2706 * Writes one/two bytes to the NVM using the flash access registers.
2707 **/
e1000_write_flash_data_ich8lan(struct e1000_hw * hw,u32 offset,u8 size,u16 data)2708 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2709 u8 size, u16 data)
2710 {
2711 union ich8_hws_flash_status hsfsts;
2712 union ich8_hws_flash_ctrl hsflctl;
2713 u32 flash_linear_addr;
2714 u32 flash_data = 0;
2715 s32 ret_val;
2716 u8 count = 0;
2717
2718 if (size < 1 || size > 2 || data > size * 0xff ||
2719 offset > ICH_FLASH_LINEAR_ADDR_MASK)
2720 return -E1000_ERR_NVM;
2721
2722 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2723 hw->nvm.flash_base_addr;
2724
2725 do {
2726 udelay(1);
2727 /* Steps */
2728 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2729 if (ret_val)
2730 break;
2731
2732 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2733 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
2734 hsflctl.hsf_ctrl.fldbcount = size -1;
2735 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
2736 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2737
2738 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2739
2740 if (size == 1)
2741 flash_data = (u32)data & 0x00FF;
2742 else
2743 flash_data = (u32)data;
2744
2745 ew32flash(ICH_FLASH_FDATA0, flash_data);
2746
2747 /*
2748 * check if FCERR is set to 1 , if set to 1, clear it
2749 * and try the whole sequence a few more times else done
2750 */
2751 ret_val = e1000_flash_cycle_ich8lan(hw,
2752 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
2753 if (!ret_val)
2754 break;
2755
2756 /*
2757 * If we're here, then things are most likely
2758 * completely hosed, but if the error condition
2759 * is detected, it won't hurt to give it another
2760 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2761 */
2762 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2763 if (hsfsts.hsf_status.flcerr == 1)
2764 /* Repeat for some time before giving up. */
2765 continue;
2766 if (hsfsts.hsf_status.flcdone == 0) {
2767 e_dbg("Timeout error - flash cycle did not complete.\n");
2768 break;
2769 }
2770 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
2771
2772 return ret_val;
2773 }
2774
2775 /**
2776 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
2777 * @hw: pointer to the HW structure
2778 * @offset: The index of the byte to read.
2779 * @data: The byte to write to the NVM.
2780 *
2781 * Writes a single byte to the NVM using the flash access registers.
2782 **/
e1000_write_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 data)2783 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
2784 u8 data)
2785 {
2786 u16 word = (u16)data;
2787
2788 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
2789 }
2790
2791 /**
2792 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
2793 * @hw: pointer to the HW structure
2794 * @offset: The offset of the byte to write.
2795 * @byte: The byte to write to the NVM.
2796 *
2797 * Writes a single byte to the NVM using the flash access registers.
2798 * Goes through a retry algorithm before giving up.
2799 **/
e1000_retry_write_flash_byte_ich8lan(struct e1000_hw * hw,u32 offset,u8 byte)2800 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2801 u32 offset, u8 byte)
2802 {
2803 s32 ret_val;
2804 u16 program_retries;
2805
2806 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2807 if (!ret_val)
2808 return ret_val;
2809
2810 for (program_retries = 0; program_retries < 100; program_retries++) {
2811 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
2812 udelay(100);
2813 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2814 if (!ret_val)
2815 break;
2816 }
2817 if (program_retries == 100)
2818 return -E1000_ERR_NVM;
2819
2820 return 0;
2821 }
2822
2823 /**
2824 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
2825 * @hw: pointer to the HW structure
2826 * @bank: 0 for first bank, 1 for second bank, etc.
2827 *
2828 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
2829 * bank N is 4096 * N + flash_reg_addr.
2830 **/
e1000_erase_flash_bank_ich8lan(struct e1000_hw * hw,u32 bank)2831 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2832 {
2833 struct e1000_nvm_info *nvm = &hw->nvm;
2834 union ich8_hws_flash_status hsfsts;
2835 union ich8_hws_flash_ctrl hsflctl;
2836 u32 flash_linear_addr;
2837 /* bank size is in 16bit words - adjust to bytes */
2838 u32 flash_bank_size = nvm->flash_bank_size * 2;
2839 s32 ret_val;
2840 s32 count = 0;
2841 s32 j, iteration, sector_size;
2842
2843 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2844
2845 /*
2846 * Determine HW Sector size: Read BERASE bits of hw flash status
2847 * register
2848 * 00: The Hw sector is 256 bytes, hence we need to erase 16
2849 * consecutive sectors. The start index for the nth Hw sector
2850 * can be calculated as = bank * 4096 + n * 256
2851 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
2852 * The start index for the nth Hw sector can be calculated
2853 * as = bank * 4096
2854 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
2855 * (ich9 only, otherwise error condition)
2856 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
2857 */
2858 switch (hsfsts.hsf_status.berasesz) {
2859 case 0:
2860 /* Hw sector size 256 */
2861 sector_size = ICH_FLASH_SEG_SIZE_256;
2862 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
2863 break;
2864 case 1:
2865 sector_size = ICH_FLASH_SEG_SIZE_4K;
2866 iteration = 1;
2867 break;
2868 case 2:
2869 sector_size = ICH_FLASH_SEG_SIZE_8K;
2870 iteration = 1;
2871 break;
2872 case 3:
2873 sector_size = ICH_FLASH_SEG_SIZE_64K;
2874 iteration = 1;
2875 break;
2876 default:
2877 return -E1000_ERR_NVM;
2878 }
2879
2880 /* Start with the base address, then add the sector offset. */
2881 flash_linear_addr = hw->nvm.flash_base_addr;
2882 flash_linear_addr += (bank) ? flash_bank_size : 0;
2883
2884 for (j = 0; j < iteration ; j++) {
2885 do {
2886 /* Steps */
2887 ret_val = e1000_flash_cycle_init_ich8lan(hw);
2888 if (ret_val)
2889 return ret_val;
2890
2891 /*
2892 * Write a value 11 (block Erase) in Flash
2893 * Cycle field in hw flash control
2894 */
2895 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
2896 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
2897 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
2898
2899 /*
2900 * Write the last 24 bits of an index within the
2901 * block into Flash Linear address field in Flash
2902 * Address.
2903 */
2904 flash_linear_addr += (j * sector_size);
2905 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2906
2907 ret_val = e1000_flash_cycle_ich8lan(hw,
2908 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
2909 if (!ret_val)
2910 break;
2911
2912 /*
2913 * Check if FCERR is set to 1. If 1,
2914 * clear it and try the whole sequence
2915 * a few more times else Done
2916 */
2917 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2918 if (hsfsts.hsf_status.flcerr == 1)
2919 /* repeat for some time before giving up */
2920 continue;
2921 else if (hsfsts.hsf_status.flcdone == 0)
2922 return ret_val;
2923 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
2924 }
2925
2926 return 0;
2927 }
2928
2929 /**
2930 * e1000_valid_led_default_ich8lan - Set the default LED settings
2931 * @hw: pointer to the HW structure
2932 * @data: Pointer to the LED settings
2933 *
2934 * Reads the LED default settings from the NVM to data. If the NVM LED
2935 * settings is all 0's or F's, set the LED default to a valid LED default
2936 * setting.
2937 **/
e1000_valid_led_default_ich8lan(struct e1000_hw * hw,u16 * data)2938 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
2939 {
2940 s32 ret_val;
2941
2942 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
2943 if (ret_val) {
2944 e_dbg("NVM Read Error\n");
2945 return ret_val;
2946 }
2947
2948 if (*data == ID_LED_RESERVED_0000 ||
2949 *data == ID_LED_RESERVED_FFFF)
2950 *data = ID_LED_DEFAULT_ICH8LAN;
2951
2952 return 0;
2953 }
2954
2955 /**
2956 * e1000_id_led_init_pchlan - store LED configurations
2957 * @hw: pointer to the HW structure
2958 *
2959 * PCH does not control LEDs via the LEDCTL register, rather it uses
2960 * the PHY LED configuration register.
2961 *
2962 * PCH also does not have an "always on" or "always off" mode which
2963 * complicates the ID feature. Instead of using the "on" mode to indicate
2964 * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
2965 * use "link_up" mode. The LEDs will still ID on request if there is no
2966 * link based on logic in e1000_led_[on|off]_pchlan().
2967 **/
e1000_id_led_init_pchlan(struct e1000_hw * hw)2968 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
2969 {
2970 struct e1000_mac_info *mac = &hw->mac;
2971 s32 ret_val;
2972 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
2973 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
2974 u16 data, i, temp, shift;
2975
2976 /* Get default ID LED modes */
2977 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
2978 if (ret_val)
2979 return ret_val;
2980
2981 mac->ledctl_default = er32(LEDCTL);
2982 mac->ledctl_mode1 = mac->ledctl_default;
2983 mac->ledctl_mode2 = mac->ledctl_default;
2984
2985 for (i = 0; i < 4; i++) {
2986 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
2987 shift = (i * 5);
2988 switch (temp) {
2989 case ID_LED_ON1_DEF2:
2990 case ID_LED_ON1_ON2:
2991 case ID_LED_ON1_OFF2:
2992 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2993 mac->ledctl_mode1 |= (ledctl_on << shift);
2994 break;
2995 case ID_LED_OFF1_DEF2:
2996 case ID_LED_OFF1_ON2:
2997 case ID_LED_OFF1_OFF2:
2998 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
2999 mac->ledctl_mode1 |= (ledctl_off << shift);
3000 break;
3001 default:
3002 /* Do nothing */
3003 break;
3004 }
3005 switch (temp) {
3006 case ID_LED_DEF1_ON2:
3007 case ID_LED_ON1_ON2:
3008 case ID_LED_OFF1_ON2:
3009 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3010 mac->ledctl_mode2 |= (ledctl_on << shift);
3011 break;
3012 case ID_LED_DEF1_OFF2:
3013 case ID_LED_ON1_OFF2:
3014 case ID_LED_OFF1_OFF2:
3015 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
3016 mac->ledctl_mode2 |= (ledctl_off << shift);
3017 break;
3018 default:
3019 /* Do nothing */
3020 break;
3021 }
3022 }
3023
3024 return 0;
3025 }
3026
3027 /**
3028 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
3029 * @hw: pointer to the HW structure
3030 *
3031 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
3032 * register, so the the bus width is hard coded.
3033 **/
e1000_get_bus_info_ich8lan(struct e1000_hw * hw)3034 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3035 {
3036 struct e1000_bus_info *bus = &hw->bus;
3037 s32 ret_val;
3038
3039 ret_val = e1000e_get_bus_info_pcie(hw);
3040
3041 /*
3042 * ICH devices are "PCI Express"-ish. They have
3043 * a configuration space, but do not contain
3044 * PCI Express Capability registers, so bus width
3045 * must be hardcoded.
3046 */
3047 if (bus->width == e1000_bus_width_unknown)
3048 bus->width = e1000_bus_width_pcie_x1;
3049
3050 return ret_val;
3051 }
3052
3053 /**
3054 * e1000_reset_hw_ich8lan - Reset the hardware
3055 * @hw: pointer to the HW structure
3056 *
3057 * Does a full reset of the hardware which includes a reset of the PHY and
3058 * MAC.
3059 **/
e1000_reset_hw_ich8lan(struct e1000_hw * hw)3060 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3061 {
3062 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3063 u16 reg;
3064 u32 ctrl, kab;
3065 s32 ret_val;
3066
3067 /*
3068 * Prevent the PCI-E bus from sticking if there is no TLP connection
3069 * on the last TLP read/write transaction when MAC is reset.
3070 */
3071 ret_val = e1000e_disable_pcie_master(hw);
3072 if (ret_val)
3073 e_dbg("PCI-E Master disable polling has failed.\n");
3074
3075 e_dbg("Masking off all interrupts\n");
3076 ew32(IMC, 0xffffffff);
3077
3078 /*
3079 * Disable the Transmit and Receive units. Then delay to allow
3080 * any pending transactions to complete before we hit the MAC
3081 * with the global reset.
3082 */
3083 ew32(RCTL, 0);
3084 ew32(TCTL, E1000_TCTL_PSP);
3085 e1e_flush();
3086
3087 usleep_range(10000, 20000);
3088
3089 /* Workaround for ICH8 bit corruption issue in FIFO memory */
3090 if (hw->mac.type == e1000_ich8lan) {
3091 /* Set Tx and Rx buffer allocation to 8k apiece. */
3092 ew32(PBA, E1000_PBA_8K);
3093 /* Set Packet Buffer Size to 16k. */
3094 ew32(PBS, E1000_PBS_16K);
3095 }
3096
3097 if (hw->mac.type == e1000_pchlan) {
3098 /* Save the NVM K1 bit setting*/
3099 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®);
3100 if (ret_val)
3101 return ret_val;
3102
3103 if (reg & E1000_NVM_K1_ENABLE)
3104 dev_spec->nvm_k1_enabled = true;
3105 else
3106 dev_spec->nvm_k1_enabled = false;
3107 }
3108
3109 ctrl = er32(CTRL);
3110
3111 if (!hw->phy.ops.check_reset_block(hw)) {
3112 /*
3113 * Full-chip reset requires MAC and PHY reset at the same
3114 * time to make sure the interface between MAC and the
3115 * external PHY is reset.
3116 */
3117 ctrl |= E1000_CTRL_PHY_RST;
3118
3119 /*
3120 * Gate automatic PHY configuration by hardware on
3121 * non-managed 82579
3122 */
3123 if ((hw->mac.type == e1000_pch2lan) &&
3124 !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
3125 e1000_gate_hw_phy_config_ich8lan(hw, true);
3126 }
3127 ret_val = e1000_acquire_swflag_ich8lan(hw);
3128 e_dbg("Issuing a global reset to ich8lan\n");
3129 ew32(CTRL, (ctrl | E1000_CTRL_RST));
3130 /* cannot issue a flush here because it hangs the hardware */
3131 msleep(20);
3132
3133 if (!ret_val)
3134 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
3135
3136 if (ctrl & E1000_CTRL_PHY_RST) {
3137 ret_val = hw->phy.ops.get_cfg_done(hw);
3138 if (ret_val)
3139 return ret_val;
3140
3141 ret_val = e1000_post_phy_reset_ich8lan(hw);
3142 if (ret_val)
3143 return ret_val;
3144 }
3145
3146 /*
3147 * For PCH, this write will make sure that any noise
3148 * will be detected as a CRC error and be dropped rather than show up
3149 * as a bad packet to the DMA engine.
3150 */
3151 if (hw->mac.type == e1000_pchlan)
3152 ew32(CRC_OFFSET, 0x65656565);
3153
3154 ew32(IMC, 0xffffffff);
3155 er32(ICR);
3156
3157 kab = er32(KABGTXD);
3158 kab |= E1000_KABGTXD_BGSQLBIAS;
3159 ew32(KABGTXD, kab);
3160
3161 return 0;
3162 }
3163
3164 /**
3165 * e1000_init_hw_ich8lan - Initialize the hardware
3166 * @hw: pointer to the HW structure
3167 *
3168 * Prepares the hardware for transmit and receive by doing the following:
3169 * - initialize hardware bits
3170 * - initialize LED identification
3171 * - setup receive address registers
3172 * - setup flow control
3173 * - setup transmit descriptors
3174 * - clear statistics
3175 **/
e1000_init_hw_ich8lan(struct e1000_hw * hw)3176 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3177 {
3178 struct e1000_mac_info *mac = &hw->mac;
3179 u32 ctrl_ext, txdctl, snoop;
3180 s32 ret_val;
3181 u16 i;
3182
3183 e1000_initialize_hw_bits_ich8lan(hw);
3184
3185 /* Initialize identification LED */
3186 ret_val = mac->ops.id_led_init(hw);
3187 if (ret_val)
3188 e_dbg("Error initializing identification LED\n");
3189 /* This is not fatal and we should not stop init due to this */
3190
3191 /* Setup the receive address. */
3192 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
3193
3194 /* Zero out the Multicast HASH table */
3195 e_dbg("Zeroing the MTA\n");
3196 for (i = 0; i < mac->mta_reg_count; i++)
3197 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
3198
3199 /*
3200 * The 82578 Rx buffer will stall if wakeup is enabled in host and
3201 * the ME. Disable wakeup by clearing the host wakeup bit.
3202 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3203 */
3204 if (hw->phy.type == e1000_phy_82578) {
3205 e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
3206 i &= ~BM_WUC_HOST_WU_BIT;
3207 e1e_wphy(hw, BM_PORT_GEN_CFG, i);
3208 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3209 if (ret_val)
3210 return ret_val;
3211 }
3212
3213 /* Setup link and flow control */
3214 ret_val = mac->ops.setup_link(hw);
3215
3216 /* Set the transmit descriptor write-back policy for both queues */
3217 txdctl = er32(TXDCTL(0));
3218 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3219 E1000_TXDCTL_FULL_TX_DESC_WB;
3220 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3221 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3222 ew32(TXDCTL(0), txdctl);
3223 txdctl = er32(TXDCTL(1));
3224 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
3225 E1000_TXDCTL_FULL_TX_DESC_WB;
3226 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
3227 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
3228 ew32(TXDCTL(1), txdctl);
3229
3230 /*
3231 * ICH8 has opposite polarity of no_snoop bits.
3232 * By default, we should use snoop behavior.
3233 */
3234 if (mac->type == e1000_ich8lan)
3235 snoop = PCIE_ICH8_SNOOP_ALL;
3236 else
3237 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
3238 e1000e_set_pcie_no_snoop(hw, snoop);
3239
3240 ctrl_ext = er32(CTRL_EXT);
3241 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
3242 ew32(CTRL_EXT, ctrl_ext);
3243
3244 /*
3245 * Clear all of the statistics registers (clear on read). It is
3246 * important that we do this after we have tried to establish link
3247 * because the symbol error count will increment wildly if there
3248 * is no link.
3249 */
3250 e1000_clear_hw_cntrs_ich8lan(hw);
3251
3252 return ret_val;
3253 }
3254 /**
3255 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3256 * @hw: pointer to the HW structure
3257 *
3258 * Sets/Clears required hardware bits necessary for correctly setting up the
3259 * hardware for transmit and receive.
3260 **/
e1000_initialize_hw_bits_ich8lan(struct e1000_hw * hw)3261 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3262 {
3263 u32 reg;
3264
3265 /* Extended Device Control */
3266 reg = er32(CTRL_EXT);
3267 reg |= (1 << 22);
3268 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
3269 if (hw->mac.type >= e1000_pchlan)
3270 reg |= E1000_CTRL_EXT_PHYPDEN;
3271 ew32(CTRL_EXT, reg);
3272
3273 /* Transmit Descriptor Control 0 */
3274 reg = er32(TXDCTL(0));
3275 reg |= (1 << 22);
3276 ew32(TXDCTL(0), reg);
3277
3278 /* Transmit Descriptor Control 1 */
3279 reg = er32(TXDCTL(1));
3280 reg |= (1 << 22);
3281 ew32(TXDCTL(1), reg);
3282
3283 /* Transmit Arbitration Control 0 */
3284 reg = er32(TARC(0));
3285 if (hw->mac.type == e1000_ich8lan)
3286 reg |= (1 << 28) | (1 << 29);
3287 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
3288 ew32(TARC(0), reg);
3289
3290 /* Transmit Arbitration Control 1 */
3291 reg = er32(TARC(1));
3292 if (er32(TCTL) & E1000_TCTL_MULR)
3293 reg &= ~(1 << 28);
3294 else
3295 reg |= (1 << 28);
3296 reg |= (1 << 24) | (1 << 26) | (1 << 30);
3297 ew32(TARC(1), reg);
3298
3299 /* Device Status */
3300 if (hw->mac.type == e1000_ich8lan) {
3301 reg = er32(STATUS);
3302 reg &= ~(1 << 31);
3303 ew32(STATUS, reg);
3304 }
3305
3306 /*
3307 * work-around descriptor data corruption issue during nfs v2 udp
3308 * traffic, just disable the nfs filtering capability
3309 */
3310 reg = er32(RFCTL);
3311 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3312 ew32(RFCTL, reg);
3313 }
3314
3315 /**
3316 * e1000_setup_link_ich8lan - Setup flow control and link settings
3317 * @hw: pointer to the HW structure
3318 *
3319 * Determines which flow control settings to use, then configures flow
3320 * control. Calls the appropriate media-specific link configuration
3321 * function. Assuming the adapter has a valid link partner, a valid link
3322 * should be established. Assumes the hardware has previously been reset
3323 * and the transmitter and receiver are not enabled.
3324 **/
e1000_setup_link_ich8lan(struct e1000_hw * hw)3325 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3326 {
3327 s32 ret_val;
3328
3329 if (hw->phy.ops.check_reset_block(hw))
3330 return 0;
3331
3332 /*
3333 * ICH parts do not have a word in the NVM to determine
3334 * the default flow control setting, so we explicitly
3335 * set it to full.
3336 */
3337 if (hw->fc.requested_mode == e1000_fc_default) {
3338 /* Workaround h/w hang when Tx flow control enabled */
3339 if (hw->mac.type == e1000_pchlan)
3340 hw->fc.requested_mode = e1000_fc_rx_pause;
3341 else
3342 hw->fc.requested_mode = e1000_fc_full;
3343 }
3344
3345 /*
3346 * Save off the requested flow control mode for use later. Depending
3347 * on the link partner's capabilities, we may or may not use this mode.
3348 */
3349 hw->fc.current_mode = hw->fc.requested_mode;
3350
3351 e_dbg("After fix-ups FlowControl is now = %x\n",
3352 hw->fc.current_mode);
3353
3354 /* Continue to configure the copper link. */
3355 ret_val = hw->mac.ops.setup_physical_interface(hw);
3356 if (ret_val)
3357 return ret_val;
3358
3359 ew32(FCTTV, hw->fc.pause_time);
3360 if ((hw->phy.type == e1000_phy_82578) ||
3361 (hw->phy.type == e1000_phy_82579) ||
3362 (hw->phy.type == e1000_phy_82577)) {
3363 ew32(FCRTV_PCH, hw->fc.refresh_time);
3364
3365 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3366 hw->fc.pause_time);
3367 if (ret_val)
3368 return ret_val;
3369 }
3370
3371 return e1000e_set_fc_watermarks(hw);
3372 }
3373
3374 /**
3375 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
3376 * @hw: pointer to the HW structure
3377 *
3378 * Configures the kumeran interface to the PHY to wait the appropriate time
3379 * when polling the PHY, then call the generic setup_copper_link to finish
3380 * configuring the copper link.
3381 **/
e1000_setup_copper_link_ich8lan(struct e1000_hw * hw)3382 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3383 {
3384 u32 ctrl;
3385 s32 ret_val;
3386 u16 reg_data;
3387
3388 ctrl = er32(CTRL);
3389 ctrl |= E1000_CTRL_SLU;
3390 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3391 ew32(CTRL, ctrl);
3392
3393 /*
3394 * Set the mac to wait the maximum time between each iteration
3395 * and increase the max iterations when polling the phy;
3396 * this fixes erroneous timeouts at 10Mbps.
3397 */
3398 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
3399 if (ret_val)
3400 return ret_val;
3401 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3402 ®_data);
3403 if (ret_val)
3404 return ret_val;
3405 reg_data |= 0x3F;
3406 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3407 reg_data);
3408 if (ret_val)
3409 return ret_val;
3410
3411 switch (hw->phy.type) {
3412 case e1000_phy_igp_3:
3413 ret_val = e1000e_copper_link_setup_igp(hw);
3414 if (ret_val)
3415 return ret_val;
3416 break;
3417 case e1000_phy_bm:
3418 case e1000_phy_82578:
3419 ret_val = e1000e_copper_link_setup_m88(hw);
3420 if (ret_val)
3421 return ret_val;
3422 break;
3423 case e1000_phy_82577:
3424 case e1000_phy_82579:
3425 ret_val = e1000_copper_link_setup_82577(hw);
3426 if (ret_val)
3427 return ret_val;
3428 break;
3429 case e1000_phy_ife:
3430 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data);
3431 if (ret_val)
3432 return ret_val;
3433
3434 reg_data &= ~IFE_PMC_AUTO_MDIX;
3435
3436 switch (hw->phy.mdix) {
3437 case 1:
3438 reg_data &= ~IFE_PMC_FORCE_MDIX;
3439 break;
3440 case 2:
3441 reg_data |= IFE_PMC_FORCE_MDIX;
3442 break;
3443 case 0:
3444 default:
3445 reg_data |= IFE_PMC_AUTO_MDIX;
3446 break;
3447 }
3448 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3449 if (ret_val)
3450 return ret_val;
3451 break;
3452 default:
3453 break;
3454 }
3455
3456 return e1000e_setup_copper_link(hw);
3457 }
3458
3459 /**
3460 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3461 * @hw: pointer to the HW structure
3462 * @speed: pointer to store current link speed
3463 * @duplex: pointer to store the current link duplex
3464 *
3465 * Calls the generic get_speed_and_duplex to retrieve the current link
3466 * information and then calls the Kumeran lock loss workaround for links at
3467 * gigabit speeds.
3468 **/
e1000_get_link_up_info_ich8lan(struct e1000_hw * hw,u16 * speed,u16 * duplex)3469 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3470 u16 *duplex)
3471 {
3472 s32 ret_val;
3473
3474 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
3475 if (ret_val)
3476 return ret_val;
3477
3478 if ((hw->mac.type == e1000_ich8lan) &&
3479 (hw->phy.type == e1000_phy_igp_3) &&
3480 (*speed == SPEED_1000)) {
3481 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3482 }
3483
3484 return ret_val;
3485 }
3486
3487 /**
3488 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
3489 * @hw: pointer to the HW structure
3490 *
3491 * Work-around for 82566 Kumeran PCS lock loss:
3492 * On link status change (i.e. PCI reset, speed change) and link is up and
3493 * speed is gigabit-
3494 * 0) if workaround is optionally disabled do nothing
3495 * 1) wait 1ms for Kumeran link to come up
3496 * 2) check Kumeran Diagnostic register PCS lock loss bit
3497 * 3) if not set the link is locked (all is good), otherwise...
3498 * 4) reset the PHY
3499 * 5) repeat up to 10 times
3500 * Note: this is only called for IGP3 copper when speed is 1gb.
3501 **/
e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw * hw)3502 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3503 {
3504 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3505 u32 phy_ctrl;
3506 s32 ret_val;
3507 u16 i, data;
3508 bool link;
3509
3510 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
3511 return 0;
3512
3513 /*
3514 * Make sure link is up before proceeding. If not just return.
3515 * Attempting this while link is negotiating fouled up link
3516 * stability
3517 */
3518 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
3519 if (!link)
3520 return 0;
3521
3522 for (i = 0; i < 10; i++) {
3523 /* read once to clear */
3524 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3525 if (ret_val)
3526 return ret_val;
3527 /* and again to get new status */
3528 ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
3529 if (ret_val)
3530 return ret_val;
3531
3532 /* check for PCS lock */
3533 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
3534 return 0;
3535
3536 /* Issue PHY reset */
3537 e1000_phy_hw_reset(hw);
3538 mdelay(5);
3539 }
3540 /* Disable GigE link negotiation */
3541 phy_ctrl = er32(PHY_CTRL);
3542 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
3543 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3544 ew32(PHY_CTRL, phy_ctrl);
3545
3546 /*
3547 * Call gig speed drop workaround on Gig disable before accessing
3548 * any PHY registers
3549 */
3550 e1000e_gig_downshift_workaround_ich8lan(hw);
3551
3552 /* unable to acquire PCS lock */
3553 return -E1000_ERR_PHY;
3554 }
3555
3556 /**
3557 * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
3558 * @hw: pointer to the HW structure
3559 * @state: boolean value used to set the current Kumeran workaround state
3560 *
3561 * If ICH8, set the current Kumeran workaround state (enabled - true
3562 * /disabled - false).
3563 **/
e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw * hw,bool state)3564 void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3565 bool state)
3566 {
3567 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3568
3569 if (hw->mac.type != e1000_ich8lan) {
3570 e_dbg("Workaround applies to ICH8 only.\n");
3571 return;
3572 }
3573
3574 dev_spec->kmrn_lock_loss_workaround_enabled = state;
3575 }
3576
3577 /**
3578 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
3579 * @hw: pointer to the HW structure
3580 *
3581 * Workaround for 82566 power-down on D3 entry:
3582 * 1) disable gigabit link
3583 * 2) write VR power-down enable
3584 * 3) read it back
3585 * Continue if successful, else issue LCD reset and repeat
3586 **/
e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw * hw)3587 void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
3588 {
3589 u32 reg;
3590 u16 data;
3591 u8 retry = 0;
3592
3593 if (hw->phy.type != e1000_phy_igp_3)
3594 return;
3595
3596 /* Try the workaround twice (if needed) */
3597 do {
3598 /* Disable link */
3599 reg = er32(PHY_CTRL);
3600 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
3601 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
3602 ew32(PHY_CTRL, reg);
3603
3604 /*
3605 * Call gig speed drop workaround on Gig disable before
3606 * accessing any PHY registers
3607 */
3608 if (hw->mac.type == e1000_ich8lan)
3609 e1000e_gig_downshift_workaround_ich8lan(hw);
3610
3611 /* Write VR power-down enable */
3612 e1e_rphy(hw, IGP3_VR_CTRL, &data);
3613 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3614 e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
3615
3616 /* Read it back and test */
3617 e1e_rphy(hw, IGP3_VR_CTRL, &data);
3618 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
3619 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
3620 break;
3621
3622 /* Issue PHY reset and repeat at most one more time */
3623 reg = er32(CTRL);
3624 ew32(CTRL, reg | E1000_CTRL_PHY_RST);
3625 retry++;
3626 } while (retry);
3627 }
3628
3629 /**
3630 * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
3631 * @hw: pointer to the HW structure
3632 *
3633 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
3634 * LPLU, Gig disable, MDIC PHY reset):
3635 * 1) Set Kumeran Near-end loopback
3636 * 2) Clear Kumeran Near-end loopback
3637 * Should only be called for ICH8[m] devices with any 1G Phy.
3638 **/
e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw * hw)3639 void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3640 {
3641 s32 ret_val;
3642 u16 reg_data;
3643
3644 if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
3645 return;
3646
3647 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3648 ®_data);
3649 if (ret_val)
3650 return;
3651 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3652 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3653 reg_data);
3654 if (ret_val)
3655 return;
3656 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
3657 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3658 reg_data);
3659 }
3660
3661 /**
3662 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
3663 * @hw: pointer to the HW structure
3664 *
3665 * During S0 to Sx transition, it is possible the link remains at gig
3666 * instead of negotiating to a lower speed. Before going to Sx, set
3667 * 'Gig Disable' to force link speed negotiation to a lower speed based on
3668 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3669 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3670 * needs to be written.
3671 **/
e1000_suspend_workarounds_ich8lan(struct e1000_hw * hw)3672 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3673 {
3674 u32 phy_ctrl;
3675 s32 ret_val;
3676
3677 phy_ctrl = er32(PHY_CTRL);
3678 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
3679 ew32(PHY_CTRL, phy_ctrl);
3680
3681 if (hw->mac.type == e1000_ich8lan)
3682 e1000e_gig_downshift_workaround_ich8lan(hw);
3683
3684 if (hw->mac.type >= e1000_pchlan) {
3685 e1000_oem_bits_config_ich8lan(hw, false);
3686
3687 /* Reset PHY to activate OEM bits on 82577/8 */
3688 if (hw->mac.type == e1000_pchlan)
3689 e1000e_phy_hw_reset_generic(hw);
3690
3691 ret_val = hw->phy.ops.acquire(hw);
3692 if (ret_val)
3693 return;
3694 e1000_write_smbus_addr(hw);
3695 hw->phy.ops.release(hw);
3696 }
3697 }
3698
3699 /**
3700 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
3701 * @hw: pointer to the HW structure
3702 *
3703 * During Sx to S0 transitions on non-managed devices or managed devices
3704 * on which PHY resets are not blocked, if the PHY registers cannot be
3705 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3706 * the PHY.
3707 **/
e1000_resume_workarounds_pchlan(struct e1000_hw * hw)3708 void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3709 {
3710 u16 phy_id1, phy_id2;
3711 s32 ret_val;
3712
3713 if ((hw->mac.type != e1000_pch2lan) ||
3714 hw->phy.ops.check_reset_block(hw))
3715 return;
3716
3717 ret_val = hw->phy.ops.acquire(hw);
3718 if (ret_val) {
3719 e_dbg("Failed to acquire PHY semaphore in resume\n");
3720 return;
3721 }
3722
3723 /* Test access to the PHY registers by reading the ID regs */
3724 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
3725 if (ret_val)
3726 goto release;
3727 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
3728 if (ret_val)
3729 goto release;
3730
3731 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3732 (u32)(phy_id2 & PHY_REVISION_MASK)))
3733 goto release;
3734
3735 e1000_toggle_lanphypc_value_ich8lan(hw);
3736
3737 hw->phy.ops.release(hw);
3738 msleep(50);
3739 e1000_phy_hw_reset(hw);
3740 msleep(50);
3741 return;
3742
3743 release:
3744 hw->phy.ops.release(hw);
3745 }
3746
3747 /**
3748 * e1000_cleanup_led_ich8lan - Restore the default LED operation
3749 * @hw: pointer to the HW structure
3750 *
3751 * Return the LED back to the default configuration.
3752 **/
e1000_cleanup_led_ich8lan(struct e1000_hw * hw)3753 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
3754 {
3755 if (hw->phy.type == e1000_phy_ife)
3756 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
3757
3758 ew32(LEDCTL, hw->mac.ledctl_default);
3759 return 0;
3760 }
3761
3762 /**
3763 * e1000_led_on_ich8lan - Turn LEDs on
3764 * @hw: pointer to the HW structure
3765 *
3766 * Turn on the LEDs.
3767 **/
e1000_led_on_ich8lan(struct e1000_hw * hw)3768 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
3769 {
3770 if (hw->phy.type == e1000_phy_ife)
3771 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3772 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
3773
3774 ew32(LEDCTL, hw->mac.ledctl_mode2);
3775 return 0;
3776 }
3777
3778 /**
3779 * e1000_led_off_ich8lan - Turn LEDs off
3780 * @hw: pointer to the HW structure
3781 *
3782 * Turn off the LEDs.
3783 **/
e1000_led_off_ich8lan(struct e1000_hw * hw)3784 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3785 {
3786 if (hw->phy.type == e1000_phy_ife)
3787 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3788 (IFE_PSCL_PROBE_MODE |
3789 IFE_PSCL_PROBE_LEDS_OFF));
3790
3791 ew32(LEDCTL, hw->mac.ledctl_mode1);
3792 return 0;
3793 }
3794
3795 /**
3796 * e1000_setup_led_pchlan - Configures SW controllable LED
3797 * @hw: pointer to the HW structure
3798 *
3799 * This prepares the SW controllable LED for use.
3800 **/
e1000_setup_led_pchlan(struct e1000_hw * hw)3801 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3802 {
3803 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
3804 }
3805
3806 /**
3807 * e1000_cleanup_led_pchlan - Restore the default LED operation
3808 * @hw: pointer to the HW structure
3809 *
3810 * Return the LED back to the default configuration.
3811 **/
e1000_cleanup_led_pchlan(struct e1000_hw * hw)3812 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3813 {
3814 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
3815 }
3816
3817 /**
3818 * e1000_led_on_pchlan - Turn LEDs on
3819 * @hw: pointer to the HW structure
3820 *
3821 * Turn on the LEDs.
3822 **/
e1000_led_on_pchlan(struct e1000_hw * hw)3823 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3824 {
3825 u16 data = (u16)hw->mac.ledctl_mode2;
3826 u32 i, led;
3827
3828 /*
3829 * If no link, then turn LED on by setting the invert bit
3830 * for each LED that's mode is "link_up" in ledctl_mode2.
3831 */
3832 if (!(er32(STATUS) & E1000_STATUS_LU)) {
3833 for (i = 0; i < 3; i++) {
3834 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3835 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3836 E1000_LEDCTL_MODE_LINK_UP)
3837 continue;
3838 if (led & E1000_PHY_LED0_IVRT)
3839 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3840 else
3841 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3842 }
3843 }
3844
3845 return e1e_wphy(hw, HV_LED_CONFIG, data);
3846 }
3847
3848 /**
3849 * e1000_led_off_pchlan - Turn LEDs off
3850 * @hw: pointer to the HW structure
3851 *
3852 * Turn off the LEDs.
3853 **/
e1000_led_off_pchlan(struct e1000_hw * hw)3854 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3855 {
3856 u16 data = (u16)hw->mac.ledctl_mode1;
3857 u32 i, led;
3858
3859 /*
3860 * If no link, then turn LED off by clearing the invert bit
3861 * for each LED that's mode is "link_up" in ledctl_mode1.
3862 */
3863 if (!(er32(STATUS) & E1000_STATUS_LU)) {
3864 for (i = 0; i < 3; i++) {
3865 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
3866 if ((led & E1000_PHY_LED0_MODE_MASK) !=
3867 E1000_LEDCTL_MODE_LINK_UP)
3868 continue;
3869 if (led & E1000_PHY_LED0_IVRT)
3870 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
3871 else
3872 data |= (E1000_PHY_LED0_IVRT << (i * 5));
3873 }
3874 }
3875
3876 return e1e_wphy(hw, HV_LED_CONFIG, data);
3877 }
3878
3879 /**
3880 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
3881 * @hw: pointer to the HW structure
3882 *
3883 * Read appropriate register for the config done bit for completion status
3884 * and configure the PHY through s/w for EEPROM-less parts.
3885 *
3886 * NOTE: some silicon which is EEPROM-less will fail trying to read the
3887 * config done bit, so only an error is logged and continues. If we were
3888 * to return with error, EEPROM-less silicon would not be able to be reset
3889 * or change link.
3890 **/
e1000_get_cfg_done_ich8lan(struct e1000_hw * hw)3891 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3892 {
3893 s32 ret_val = 0;
3894 u32 bank = 0;
3895 u32 status;
3896
3897 e1000e_get_cfg_done(hw);
3898
3899 /* Wait for indication from h/w that it has completed basic config */
3900 if (hw->mac.type >= e1000_ich10lan) {
3901 e1000_lan_init_done_ich8lan(hw);
3902 } else {
3903 ret_val = e1000e_get_auto_rd_done(hw);
3904 if (ret_val) {
3905 /*
3906 * When auto config read does not complete, do not
3907 * return with an error. This can happen in situations
3908 * where there is no eeprom and prevents getting link.
3909 */
3910 e_dbg("Auto Read Done did not complete\n");
3911 ret_val = 0;
3912 }
3913 }
3914
3915 /* Clear PHY Reset Asserted bit */
3916 status = er32(STATUS);
3917 if (status & E1000_STATUS_PHYRA)
3918 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
3919 else
3920 e_dbg("PHY Reset Asserted not set - needs delay\n");
3921
3922 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
3923 if (hw->mac.type <= e1000_ich9lan) {
3924 if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
3925 (hw->phy.type == e1000_phy_igp_3)) {
3926 e1000e_phy_init_script_igp3(hw);
3927 }
3928 } else {
3929 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
3930 /* Maybe we should do a basic PHY config */
3931 e_dbg("EEPROM not present\n");
3932 ret_val = -E1000_ERR_CONFIG;
3933 }
3934 }
3935
3936 return ret_val;
3937 }
3938
3939 /**
3940 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
3941 * @hw: pointer to the HW structure
3942 *
3943 * In the case of a PHY power down to save power, or to turn off link during a
3944 * driver unload, or wake on lan is not enabled, remove the link.
3945 **/
e1000_power_down_phy_copper_ich8lan(struct e1000_hw * hw)3946 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
3947 {
3948 /* If the management interface is not enabled, then power down */
3949 if (!(hw->mac.ops.check_mng_mode(hw) ||
3950 hw->phy.ops.check_reset_block(hw)))
3951 e1000_power_down_phy_copper(hw);
3952 }
3953
3954 /**
3955 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
3956 * @hw: pointer to the HW structure
3957 *
3958 * Clears hardware counters specific to the silicon family and calls
3959 * clear_hw_cntrs_generic to clear all general purpose counters.
3960 **/
e1000_clear_hw_cntrs_ich8lan(struct e1000_hw * hw)3961 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3962 {
3963 u16 phy_data;
3964 s32 ret_val;
3965
3966 e1000e_clear_hw_cntrs_base(hw);
3967
3968 er32(ALGNERRC);
3969 er32(RXERRC);
3970 er32(TNCRS);
3971 er32(CEXTERR);
3972 er32(TSCTC);
3973 er32(TSCTFC);
3974
3975 er32(MGTPRC);
3976 er32(MGTPDC);
3977 er32(MGTPTC);
3978
3979 er32(IAC);
3980 er32(ICRXOC);
3981
3982 /* Clear PHY statistics registers */
3983 if ((hw->phy.type == e1000_phy_82578) ||
3984 (hw->phy.type == e1000_phy_82579) ||
3985 (hw->phy.type == e1000_phy_82577)) {
3986 ret_val = hw->phy.ops.acquire(hw);
3987 if (ret_val)
3988 return;
3989 ret_val = hw->phy.ops.set_page(hw,
3990 HV_STATS_PAGE << IGP_PAGE_SHIFT);
3991 if (ret_val)
3992 goto release;
3993 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
3994 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
3995 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
3996 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
3997 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
3998 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
3999 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4000 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4001 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4002 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4003 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4004 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4005 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4006 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4007 release:
4008 hw->phy.ops.release(hw);
4009 }
4010 }
4011
4012 static const struct e1000_mac_operations ich8_mac_ops = {
4013 /* check_mng_mode dependent on mac type */
4014 .check_for_link = e1000_check_for_copper_link_ich8lan,
4015 /* cleanup_led dependent on mac type */
4016 .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
4017 .get_bus_info = e1000_get_bus_info_ich8lan,
4018 .set_lan_id = e1000_set_lan_id_single_port,
4019 .get_link_up_info = e1000_get_link_up_info_ich8lan,
4020 /* led_on dependent on mac type */
4021 /* led_off dependent on mac type */
4022 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
4023 .reset_hw = e1000_reset_hw_ich8lan,
4024 .init_hw = e1000_init_hw_ich8lan,
4025 .setup_link = e1000_setup_link_ich8lan,
4026 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
4027 /* id_led_init dependent on mac type */
4028 .config_collision_dist = e1000e_config_collision_dist_generic,
4029 };
4030
4031 static const struct e1000_phy_operations ich8_phy_ops = {
4032 .acquire = e1000_acquire_swflag_ich8lan,
4033 .check_reset_block = e1000_check_reset_block_ich8lan,
4034 .commit = NULL,
4035 .get_cfg_done = e1000_get_cfg_done_ich8lan,
4036 .get_cable_length = e1000e_get_cable_length_igp_2,
4037 .read_reg = e1000e_read_phy_reg_igp,
4038 .release = e1000_release_swflag_ich8lan,
4039 .reset = e1000_phy_hw_reset_ich8lan,
4040 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
4041 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
4042 .write_reg = e1000e_write_phy_reg_igp,
4043 };
4044
4045 static const struct e1000_nvm_operations ich8_nvm_ops = {
4046 .acquire = e1000_acquire_nvm_ich8lan,
4047 .read = e1000_read_nvm_ich8lan,
4048 .release = e1000_release_nvm_ich8lan,
4049 .reload = e1000e_reload_nvm_generic,
4050 .update = e1000_update_nvm_checksum_ich8lan,
4051 .valid_led_default = e1000_valid_led_default_ich8lan,
4052 .validate = e1000_validate_nvm_checksum_ich8lan,
4053 .write = e1000_write_nvm_ich8lan,
4054 };
4055
4056 const struct e1000_info e1000_ich8_info = {
4057 .mac = e1000_ich8lan,
4058 .flags = FLAG_HAS_WOL
4059 | FLAG_IS_ICH
4060 | FLAG_HAS_CTRLEXT_ON_LOAD
4061 | FLAG_HAS_AMT
4062 | FLAG_HAS_FLASH
4063 | FLAG_APME_IN_WUC,
4064 .pba = 8,
4065 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
4066 .get_variants = e1000_get_variants_ich8lan,
4067 .mac_ops = &ich8_mac_ops,
4068 .phy_ops = &ich8_phy_ops,
4069 .nvm_ops = &ich8_nvm_ops,
4070 };
4071
4072 const struct e1000_info e1000_ich9_info = {
4073 .mac = e1000_ich9lan,
4074 .flags = FLAG_HAS_JUMBO_FRAMES
4075 | FLAG_IS_ICH
4076 | FLAG_HAS_WOL
4077 | FLAG_HAS_CTRLEXT_ON_LOAD
4078 | FLAG_HAS_AMT
4079 | FLAG_HAS_FLASH
4080 | FLAG_APME_IN_WUC,
4081 .pba = 18,
4082 .max_hw_frame_size = DEFAULT_JUMBO,
4083 .get_variants = e1000_get_variants_ich8lan,
4084 .mac_ops = &ich8_mac_ops,
4085 .phy_ops = &ich8_phy_ops,
4086 .nvm_ops = &ich8_nvm_ops,
4087 };
4088
4089 const struct e1000_info e1000_ich10_info = {
4090 .mac = e1000_ich10lan,
4091 .flags = FLAG_HAS_JUMBO_FRAMES
4092 | FLAG_IS_ICH
4093 | FLAG_HAS_WOL
4094 | FLAG_HAS_CTRLEXT_ON_LOAD
4095 | FLAG_HAS_AMT
4096 | FLAG_HAS_FLASH
4097 | FLAG_APME_IN_WUC,
4098 .pba = 18,
4099 .max_hw_frame_size = DEFAULT_JUMBO,
4100 .get_variants = e1000_get_variants_ich8lan,
4101 .mac_ops = &ich8_mac_ops,
4102 .phy_ops = &ich8_phy_ops,
4103 .nvm_ops = &ich8_nvm_ops,
4104 };
4105
4106 const struct e1000_info e1000_pch_info = {
4107 .mac = e1000_pchlan,
4108 .flags = FLAG_IS_ICH
4109 | FLAG_HAS_WOL
4110 | FLAG_HAS_CTRLEXT_ON_LOAD
4111 | FLAG_HAS_AMT
4112 | FLAG_HAS_FLASH
4113 | FLAG_HAS_JUMBO_FRAMES
4114 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
4115 | FLAG_APME_IN_WUC,
4116 .flags2 = FLAG2_HAS_PHY_STATS,
4117 .pba = 26,
4118 .max_hw_frame_size = 4096,
4119 .get_variants = e1000_get_variants_ich8lan,
4120 .mac_ops = &ich8_mac_ops,
4121 .phy_ops = &ich8_phy_ops,
4122 .nvm_ops = &ich8_nvm_ops,
4123 };
4124
4125 const struct e1000_info e1000_pch2_info = {
4126 .mac = e1000_pch2lan,
4127 .flags = FLAG_IS_ICH
4128 | FLAG_HAS_WOL
4129 | FLAG_HAS_CTRLEXT_ON_LOAD
4130 | FLAG_HAS_AMT
4131 | FLAG_HAS_FLASH
4132 | FLAG_HAS_JUMBO_FRAMES
4133 | FLAG_APME_IN_WUC,
4134 .flags2 = FLAG2_HAS_PHY_STATS
4135 | FLAG2_HAS_EEE,
4136 .pba = 26,
4137 .max_hw_frame_size = DEFAULT_JUMBO,
4138 .get_variants = e1000_get_variants_ich8lan,
4139 .mac_ops = &ich8_mac_ops,
4140 .phy_ops = &ich8_phy_ops,
4141 .nvm_ops = &ich8_nvm_ops,
4142 };
4143