• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Intel(R) Gigabit Ethernet Linux driver
2  * Copyright(c) 2007-2015 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, see <http://www.gnu.org/licenses/>.
15  *
16  * The full GNU General Public License is included in this distribution in
17  * the file called "COPYING".
18  *
19  * Contact Information:
20  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22  */
23 
24 /* e1000_82575
25  * e1000_82576
26  */
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 
30 #include <linux/types.h>
31 #include <linux/if_ether.h>
32 #include <linux/i2c.h>
33 
34 #include "e1000_mac.h"
35 #include "e1000_82575.h"
36 #include "e1000_i210.h"
37 #include "igb.h"
38 
39 static s32  igb_get_invariants_82575(struct e1000_hw *);
40 static s32  igb_acquire_phy_82575(struct e1000_hw *);
41 static void igb_release_phy_82575(struct e1000_hw *);
42 static s32  igb_acquire_nvm_82575(struct e1000_hw *);
43 static void igb_release_nvm_82575(struct e1000_hw *);
44 static s32  igb_check_for_link_82575(struct e1000_hw *);
45 static s32  igb_get_cfg_done_82575(struct e1000_hw *);
46 static s32  igb_init_hw_82575(struct e1000_hw *);
47 static s32  igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
48 static s32  igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
49 static s32  igb_reset_hw_82575(struct e1000_hw *);
50 static s32  igb_reset_hw_82580(struct e1000_hw *);
51 static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
52 static s32  igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
53 static s32  igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
54 static s32  igb_setup_copper_link_82575(struct e1000_hw *);
55 static s32  igb_setup_serdes_link_82575(struct e1000_hw *);
56 static s32  igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
57 static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
58 static s32  igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
59 static s32  igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
60 						 u16 *);
61 static s32  igb_get_phy_id_82575(struct e1000_hw *);
62 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
63 static bool igb_sgmii_active_82575(struct e1000_hw *);
64 static s32  igb_reset_init_script_82575(struct e1000_hw *);
65 static s32  igb_read_mac_addr_82575(struct e1000_hw *);
66 static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
67 static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
68 static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
69 static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
70 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
71 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
72 static const u16 e1000_82580_rxpbs_table[] = {
73 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
74 
75 /* Due to a hw errata, if the host tries to  configure the VFTA register
76  * while performing queries from the BMC or DMA, then the VFTA in some
77  * cases won't be written.
78  */
79 
80 /**
81  *  igb_write_vfta_i350 - Write value to VLAN filter table
82  *  @hw: pointer to the HW structure
83  *  @offset: register offset in VLAN filter table
84  *  @value: register value written to VLAN filter table
85  *
86  *  Writes value at the given offset in the register array which stores
87  *  the VLAN filter table.
88  **/
igb_write_vfta_i350(struct e1000_hw * hw,u32 offset,u32 value)89 static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
90 {
91 	struct igb_adapter *adapter = hw->back;
92 	int i;
93 
94 	for (i = 10; i--;)
95 		array_wr32(E1000_VFTA, offset, value);
96 
97 	wrfl();
98 	adapter->shadow_vfta[offset] = value;
99 }
100 
101 /**
102  *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
103  *  @hw: pointer to the HW structure
104  *
105  *  Called to determine if the I2C pins are being used for I2C or as an
106  *  external MDIO interface since the two options are mutually exclusive.
107  **/
igb_sgmii_uses_mdio_82575(struct e1000_hw * hw)108 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
109 {
110 	u32 reg = 0;
111 	bool ext_mdio = false;
112 
113 	switch (hw->mac.type) {
114 	case e1000_82575:
115 	case e1000_82576:
116 		reg = rd32(E1000_MDIC);
117 		ext_mdio = !!(reg & E1000_MDIC_DEST);
118 		break;
119 	case e1000_82580:
120 	case e1000_i350:
121 	case e1000_i354:
122 	case e1000_i210:
123 	case e1000_i211:
124 		reg = rd32(E1000_MDICNFG);
125 		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
126 		break;
127 	default:
128 		break;
129 	}
130 	return ext_mdio;
131 }
132 
133 /**
134  *  igb_check_for_link_media_swap - Check which M88E1112 interface linked
135  *  @hw: pointer to the HW structure
136  *
137  *  Poll the M88E1112 interfaces to see which interface achieved link.
138  */
igb_check_for_link_media_swap(struct e1000_hw * hw)139 static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
140 {
141 	struct e1000_phy_info *phy = &hw->phy;
142 	s32 ret_val;
143 	u16 data;
144 	u8 port = 0;
145 
146 	/* Check the copper medium. */
147 	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
148 	if (ret_val)
149 		return ret_val;
150 
151 	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
152 	if (ret_val)
153 		return ret_val;
154 
155 	if (data & E1000_M88E1112_STATUS_LINK)
156 		port = E1000_MEDIA_PORT_COPPER;
157 
158 	/* Check the other medium. */
159 	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
160 	if (ret_val)
161 		return ret_val;
162 
163 	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
164 	if (ret_val)
165 		return ret_val;
166 
167 
168 	if (data & E1000_M88E1112_STATUS_LINK)
169 		port = E1000_MEDIA_PORT_OTHER;
170 
171 	/* Determine if a swap needs to happen. */
172 	if (port && (hw->dev_spec._82575.media_port != port)) {
173 		hw->dev_spec._82575.media_port = port;
174 		hw->dev_spec._82575.media_changed = true;
175 	}
176 
177 	if (port == E1000_MEDIA_PORT_COPPER) {
178 		/* reset page to 0 */
179 		ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
180 		if (ret_val)
181 			return ret_val;
182 		igb_check_for_link_82575(hw);
183 	} else {
184 		igb_check_for_link_82575(hw);
185 		/* reset page to 0 */
186 		ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
187 		if (ret_val)
188 			return ret_val;
189 	}
190 
191 	return 0;
192 }
193 
194 /**
195  *  igb_init_phy_params_82575 - Init PHY func ptrs.
196  *  @hw: pointer to the HW structure
197  **/
igb_init_phy_params_82575(struct e1000_hw * hw)198 static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
199 {
200 	struct e1000_phy_info *phy = &hw->phy;
201 	s32 ret_val = 0;
202 	u32 ctrl_ext;
203 
204 	if (hw->phy.media_type != e1000_media_type_copper) {
205 		phy->type = e1000_phy_none;
206 		goto out;
207 	}
208 
209 	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
210 	phy->reset_delay_us	= 100;
211 
212 	ctrl_ext = rd32(E1000_CTRL_EXT);
213 
214 	if (igb_sgmii_active_82575(hw)) {
215 		phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
216 		ctrl_ext |= E1000_CTRL_I2C_ENA;
217 	} else {
218 		phy->ops.reset = igb_phy_hw_reset;
219 		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
220 	}
221 
222 	wr32(E1000_CTRL_EXT, ctrl_ext);
223 	igb_reset_mdicnfg_82580(hw);
224 
225 	if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
226 		phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
227 		phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
228 	} else {
229 		switch (hw->mac.type) {
230 		case e1000_82580:
231 		case e1000_i350:
232 		case e1000_i354:
233 		case e1000_i210:
234 		case e1000_i211:
235 			phy->ops.read_reg = igb_read_phy_reg_82580;
236 			phy->ops.write_reg = igb_write_phy_reg_82580;
237 			break;
238 		default:
239 			phy->ops.read_reg = igb_read_phy_reg_igp;
240 			phy->ops.write_reg = igb_write_phy_reg_igp;
241 		}
242 	}
243 
244 	/* set lan id */
245 	hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
246 			E1000_STATUS_FUNC_SHIFT;
247 
248 	/* Make sure the PHY is in a good state. Several people have reported
249 	 * firmware leaving the PHY's page select register set to something
250 	 * other than the default of zero, which causes the PHY ID read to
251 	 * access something other than the intended register.
252 	 */
253 	ret_val = hw->phy.ops.reset(hw);
254 	if (ret_val) {
255 		hw_dbg("Error resetting the PHY.\n");
256 		goto out;
257 	}
258 
259 	/* Set phy->phy_addr and phy->id. */
260 	igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
261 	ret_val = igb_get_phy_id_82575(hw);
262 	if (ret_val)
263 		return ret_val;
264 
265 	/* Verify phy id and set remaining function pointers */
266 	switch (phy->id) {
267 	case M88E1543_E_PHY_ID:
268 	case M88E1512_E_PHY_ID:
269 	case I347AT4_E_PHY_ID:
270 	case M88E1112_E_PHY_ID:
271 	case M88E1111_I_PHY_ID:
272 		phy->type		= e1000_phy_m88;
273 		phy->ops.check_polarity	= igb_check_polarity_m88;
274 		phy->ops.get_phy_info	= igb_get_phy_info_m88;
275 		if (phy->id != M88E1111_I_PHY_ID)
276 			phy->ops.get_cable_length =
277 					 igb_get_cable_length_m88_gen2;
278 		else
279 			phy->ops.get_cable_length = igb_get_cable_length_m88;
280 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
281 		/* Check if this PHY is configured for media swap. */
282 		if (phy->id == M88E1112_E_PHY_ID) {
283 			u16 data;
284 
285 			ret_val = phy->ops.write_reg(hw,
286 						     E1000_M88E1112_PAGE_ADDR,
287 						     2);
288 			if (ret_val)
289 				goto out;
290 
291 			ret_val = phy->ops.read_reg(hw,
292 						    E1000_M88E1112_MAC_CTRL_1,
293 						    &data);
294 			if (ret_val)
295 				goto out;
296 
297 			data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
298 			       E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
299 			if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
300 			    data == E1000_M88E1112_AUTO_COPPER_BASEX)
301 				hw->mac.ops.check_for_link =
302 						igb_check_for_link_media_swap;
303 		}
304 		if (phy->id == M88E1512_E_PHY_ID) {
305 			ret_val = igb_initialize_M88E1512_phy(hw);
306 			if (ret_val)
307 				goto out;
308 		}
309 		if (phy->id == M88E1543_E_PHY_ID) {
310 			ret_val = igb_initialize_M88E1543_phy(hw);
311 			if (ret_val)
312 				goto out;
313 		}
314 		break;
315 	case IGP03E1000_E_PHY_ID:
316 		phy->type = e1000_phy_igp_3;
317 		phy->ops.get_phy_info = igb_get_phy_info_igp;
318 		phy->ops.get_cable_length = igb_get_cable_length_igp_2;
319 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
320 		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
321 		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
322 		break;
323 	case I82580_I_PHY_ID:
324 	case I350_I_PHY_ID:
325 		phy->type = e1000_phy_82580;
326 		phy->ops.force_speed_duplex =
327 					 igb_phy_force_speed_duplex_82580;
328 		phy->ops.get_cable_length = igb_get_cable_length_82580;
329 		phy->ops.get_phy_info = igb_get_phy_info_82580;
330 		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
331 		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
332 		break;
333 	case I210_I_PHY_ID:
334 		phy->type		= e1000_phy_i210;
335 		phy->ops.check_polarity	= igb_check_polarity_m88;
336 		phy->ops.get_cfg_done	= igb_get_cfg_done_i210;
337 		phy->ops.get_phy_info	= igb_get_phy_info_m88;
338 		phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
339 		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
340 		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
341 		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
342 		break;
343 	default:
344 		ret_val = -E1000_ERR_PHY;
345 		goto out;
346 	}
347 
348 out:
349 	return ret_val;
350 }
351 
352 /**
353  *  igb_init_nvm_params_82575 - Init NVM func ptrs.
354  *  @hw: pointer to the HW structure
355  **/
igb_init_nvm_params_82575(struct e1000_hw * hw)356 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
357 {
358 	struct e1000_nvm_info *nvm = &hw->nvm;
359 	u32 eecd = rd32(E1000_EECD);
360 	u16 size;
361 
362 	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
363 		     E1000_EECD_SIZE_EX_SHIFT);
364 
365 	/* Added to a constant, "size" becomes the left-shift value
366 	 * for setting word_size.
367 	 */
368 	size += NVM_WORD_SIZE_BASE_SHIFT;
369 
370 	/* Just in case size is out of range, cap it to the largest
371 	 * EEPROM size supported
372 	 */
373 	if (size > 15)
374 		size = 15;
375 
376 	nvm->word_size = BIT(size);
377 	nvm->opcode_bits = 8;
378 	nvm->delay_usec = 1;
379 
380 	switch (nvm->override) {
381 	case e1000_nvm_override_spi_large:
382 		nvm->page_size = 32;
383 		nvm->address_bits = 16;
384 		break;
385 	case e1000_nvm_override_spi_small:
386 		nvm->page_size = 8;
387 		nvm->address_bits = 8;
388 		break;
389 	default:
390 		nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
391 		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
392 				    16 : 8;
393 		break;
394 	}
395 	if (nvm->word_size == BIT(15))
396 		nvm->page_size = 128;
397 
398 	nvm->type = e1000_nvm_eeprom_spi;
399 
400 	/* NVM Function Pointers */
401 	nvm->ops.acquire = igb_acquire_nvm_82575;
402 	nvm->ops.release = igb_release_nvm_82575;
403 	nvm->ops.write = igb_write_nvm_spi;
404 	nvm->ops.validate = igb_validate_nvm_checksum;
405 	nvm->ops.update = igb_update_nvm_checksum;
406 	if (nvm->word_size < BIT(15))
407 		nvm->ops.read = igb_read_nvm_eerd;
408 	else
409 		nvm->ops.read = igb_read_nvm_spi;
410 
411 	/* override generic family function pointers for specific descendants */
412 	switch (hw->mac.type) {
413 	case e1000_82580:
414 		nvm->ops.validate = igb_validate_nvm_checksum_82580;
415 		nvm->ops.update = igb_update_nvm_checksum_82580;
416 		break;
417 	case e1000_i354:
418 	case e1000_i350:
419 		nvm->ops.validate = igb_validate_nvm_checksum_i350;
420 		nvm->ops.update = igb_update_nvm_checksum_i350;
421 		break;
422 	default:
423 		break;
424 	}
425 
426 	return 0;
427 }
428 
429 /**
430  *  igb_init_mac_params_82575 - Init MAC func ptrs.
431  *  @hw: pointer to the HW structure
432  **/
igb_init_mac_params_82575(struct e1000_hw * hw)433 static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
434 {
435 	struct e1000_mac_info *mac = &hw->mac;
436 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
437 
438 	/* Set mta register count */
439 	mac->mta_reg_count = 128;
440 	/* Set uta register count */
441 	mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
442 	/* Set rar entry count */
443 	switch (mac->type) {
444 	case e1000_82576:
445 		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
446 		break;
447 	case e1000_82580:
448 		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
449 		break;
450 	case e1000_i350:
451 	case e1000_i354:
452 		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
453 		break;
454 	default:
455 		mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
456 		break;
457 	}
458 	/* reset */
459 	if (mac->type >= e1000_82580)
460 		mac->ops.reset_hw = igb_reset_hw_82580;
461 	else
462 		mac->ops.reset_hw = igb_reset_hw_82575;
463 
464 	if (mac->type >= e1000_i210) {
465 		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
466 		mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
467 
468 	} else {
469 		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
470 		mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
471 	}
472 
473 	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
474 		mac->ops.write_vfta = igb_write_vfta_i350;
475 	else
476 		mac->ops.write_vfta = igb_write_vfta;
477 
478 	/* Set if part includes ASF firmware */
479 	mac->asf_firmware_present = true;
480 	/* Set if manageability features are enabled. */
481 	mac->arc_subsystem_valid =
482 		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
483 			? true : false;
484 	/* enable EEE on i350 parts and later parts */
485 	if (mac->type >= e1000_i350)
486 		dev_spec->eee_disable = false;
487 	else
488 		dev_spec->eee_disable = true;
489 	/* Allow a single clear of the SW semaphore on I210 and newer */
490 	if (mac->type >= e1000_i210)
491 		dev_spec->clear_semaphore_once = true;
492 	/* physical interface link setup */
493 	mac->ops.setup_physical_interface =
494 		(hw->phy.media_type == e1000_media_type_copper)
495 			? igb_setup_copper_link_82575
496 			: igb_setup_serdes_link_82575;
497 
498 	if (mac->type == e1000_82580) {
499 		switch (hw->device_id) {
500 		/* feature not supported on these id's */
501 		case E1000_DEV_ID_DH89XXCC_SGMII:
502 		case E1000_DEV_ID_DH89XXCC_SERDES:
503 		case E1000_DEV_ID_DH89XXCC_BACKPLANE:
504 		case E1000_DEV_ID_DH89XXCC_SFP:
505 			break;
506 		default:
507 			hw->dev_spec._82575.mas_capable = true;
508 			break;
509 		}
510 	}
511 	return 0;
512 }
513 
514 /**
515  *  igb_set_sfp_media_type_82575 - derives SFP module media type.
516  *  @hw: pointer to the HW structure
517  *
518  *  The media type is chosen based on SFP module.
519  *  compatibility flags retrieved from SFP ID EEPROM.
520  **/
igb_set_sfp_media_type_82575(struct e1000_hw * hw)521 static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
522 {
523 	s32 ret_val = E1000_ERR_CONFIG;
524 	u32 ctrl_ext = 0;
525 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
526 	struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
527 	u8 tranceiver_type = 0;
528 	s32 timeout = 3;
529 
530 	/* Turn I2C interface ON and power on sfp cage */
531 	ctrl_ext = rd32(E1000_CTRL_EXT);
532 	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
533 	wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
534 
535 	wrfl();
536 
537 	/* Read SFP module data */
538 	while (timeout) {
539 		ret_val = igb_read_sfp_data_byte(hw,
540 			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
541 			&tranceiver_type);
542 		if (ret_val == 0)
543 			break;
544 		msleep(100);
545 		timeout--;
546 	}
547 	if (ret_val != 0)
548 		goto out;
549 
550 	ret_val = igb_read_sfp_data_byte(hw,
551 			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
552 			(u8 *)eth_flags);
553 	if (ret_val != 0)
554 		goto out;
555 
556 	/* Check if there is some SFP module plugged and powered */
557 	if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
558 	    (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
559 		dev_spec->module_plugged = true;
560 		if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
561 			hw->phy.media_type = e1000_media_type_internal_serdes;
562 		} else if (eth_flags->e100_base_fx) {
563 			dev_spec->sgmii_active = true;
564 			hw->phy.media_type = e1000_media_type_internal_serdes;
565 		} else if (eth_flags->e1000_base_t) {
566 			dev_spec->sgmii_active = true;
567 			hw->phy.media_type = e1000_media_type_copper;
568 		} else {
569 			hw->phy.media_type = e1000_media_type_unknown;
570 			hw_dbg("PHY module has not been recognized\n");
571 			goto out;
572 		}
573 	} else {
574 		hw->phy.media_type = e1000_media_type_unknown;
575 	}
576 	ret_val = 0;
577 out:
578 	/* Restore I2C interface setting */
579 	wr32(E1000_CTRL_EXT, ctrl_ext);
580 	return ret_val;
581 }
582 
igb_get_invariants_82575(struct e1000_hw * hw)583 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
584 {
585 	struct e1000_mac_info *mac = &hw->mac;
586 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
587 	s32 ret_val;
588 	u32 ctrl_ext = 0;
589 	u32 link_mode = 0;
590 
591 	switch (hw->device_id) {
592 	case E1000_DEV_ID_82575EB_COPPER:
593 	case E1000_DEV_ID_82575EB_FIBER_SERDES:
594 	case E1000_DEV_ID_82575GB_QUAD_COPPER:
595 		mac->type = e1000_82575;
596 		break;
597 	case E1000_DEV_ID_82576:
598 	case E1000_DEV_ID_82576_NS:
599 	case E1000_DEV_ID_82576_NS_SERDES:
600 	case E1000_DEV_ID_82576_FIBER:
601 	case E1000_DEV_ID_82576_SERDES:
602 	case E1000_DEV_ID_82576_QUAD_COPPER:
603 	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
604 	case E1000_DEV_ID_82576_SERDES_QUAD:
605 		mac->type = e1000_82576;
606 		break;
607 	case E1000_DEV_ID_82580_COPPER:
608 	case E1000_DEV_ID_82580_FIBER:
609 	case E1000_DEV_ID_82580_QUAD_FIBER:
610 	case E1000_DEV_ID_82580_SERDES:
611 	case E1000_DEV_ID_82580_SGMII:
612 	case E1000_DEV_ID_82580_COPPER_DUAL:
613 	case E1000_DEV_ID_DH89XXCC_SGMII:
614 	case E1000_DEV_ID_DH89XXCC_SERDES:
615 	case E1000_DEV_ID_DH89XXCC_BACKPLANE:
616 	case E1000_DEV_ID_DH89XXCC_SFP:
617 		mac->type = e1000_82580;
618 		break;
619 	case E1000_DEV_ID_I350_COPPER:
620 	case E1000_DEV_ID_I350_FIBER:
621 	case E1000_DEV_ID_I350_SERDES:
622 	case E1000_DEV_ID_I350_SGMII:
623 		mac->type = e1000_i350;
624 		break;
625 	case E1000_DEV_ID_I210_COPPER:
626 	case E1000_DEV_ID_I210_FIBER:
627 	case E1000_DEV_ID_I210_SERDES:
628 	case E1000_DEV_ID_I210_SGMII:
629 	case E1000_DEV_ID_I210_COPPER_FLASHLESS:
630 	case E1000_DEV_ID_I210_SERDES_FLASHLESS:
631 		mac->type = e1000_i210;
632 		break;
633 	case E1000_DEV_ID_I211_COPPER:
634 		mac->type = e1000_i211;
635 		break;
636 	case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
637 	case E1000_DEV_ID_I354_SGMII:
638 	case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
639 		mac->type = e1000_i354;
640 		break;
641 	default:
642 		return -E1000_ERR_MAC_INIT;
643 	}
644 
645 	/* Set media type */
646 	/* The 82575 uses bits 22:23 for link mode. The mode can be changed
647 	 * based on the EEPROM. We cannot rely upon device ID. There
648 	 * is no distinguishable difference between fiber and internal
649 	 * SerDes mode on the 82575. There can be an external PHY attached
650 	 * on the SGMII interface. For this, we'll set sgmii_active to true.
651 	 */
652 	hw->phy.media_type = e1000_media_type_copper;
653 	dev_spec->sgmii_active = false;
654 	dev_spec->module_plugged = false;
655 
656 	ctrl_ext = rd32(E1000_CTRL_EXT);
657 
658 	link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
659 	switch (link_mode) {
660 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
661 		hw->phy.media_type = e1000_media_type_internal_serdes;
662 		break;
663 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
664 		/* Get phy control interface type set (MDIO vs. I2C)*/
665 		if (igb_sgmii_uses_mdio_82575(hw)) {
666 			hw->phy.media_type = e1000_media_type_copper;
667 			dev_spec->sgmii_active = true;
668 			break;
669 		}
670 		/* fall through for I2C based SGMII */
671 	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
672 		/* read media type from SFP EEPROM */
673 		ret_val = igb_set_sfp_media_type_82575(hw);
674 		if ((ret_val != 0) ||
675 		    (hw->phy.media_type == e1000_media_type_unknown)) {
676 			/* If media type was not identified then return media
677 			 * type defined by the CTRL_EXT settings.
678 			 */
679 			hw->phy.media_type = e1000_media_type_internal_serdes;
680 
681 			if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
682 				hw->phy.media_type = e1000_media_type_copper;
683 				dev_spec->sgmii_active = true;
684 			}
685 
686 			break;
687 		}
688 
689 		/* do not change link mode for 100BaseFX */
690 		if (dev_spec->eth_flags.e100_base_fx)
691 			break;
692 
693 		/* change current link mode setting */
694 		ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
695 
696 		if (hw->phy.media_type == e1000_media_type_copper)
697 			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
698 		else
699 			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
700 
701 		wr32(E1000_CTRL_EXT, ctrl_ext);
702 
703 		break;
704 	default:
705 		break;
706 	}
707 
708 	/* mac initialization and operations */
709 	ret_val = igb_init_mac_params_82575(hw);
710 	if (ret_val)
711 		goto out;
712 
713 	/* NVM initialization */
714 	ret_val = igb_init_nvm_params_82575(hw);
715 	switch (hw->mac.type) {
716 	case e1000_i210:
717 	case e1000_i211:
718 		ret_val = igb_init_nvm_params_i210(hw);
719 		break;
720 	default:
721 		break;
722 	}
723 
724 	if (ret_val)
725 		goto out;
726 
727 	/* if part supports SR-IOV then initialize mailbox parameters */
728 	switch (mac->type) {
729 	case e1000_82576:
730 	case e1000_i350:
731 		igb_init_mbx_params_pf(hw);
732 		break;
733 	default:
734 		break;
735 	}
736 
737 	/* setup PHY parameters */
738 	ret_val = igb_init_phy_params_82575(hw);
739 
740 out:
741 	return ret_val;
742 }
743 
744 /**
745  *  igb_acquire_phy_82575 - Acquire rights to access PHY
746  *  @hw: pointer to the HW structure
747  *
748  *  Acquire access rights to the correct PHY.  This is a
749  *  function pointer entry point called by the api module.
750  **/
igb_acquire_phy_82575(struct e1000_hw * hw)751 static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
752 {
753 	u16 mask = E1000_SWFW_PHY0_SM;
754 
755 	if (hw->bus.func == E1000_FUNC_1)
756 		mask = E1000_SWFW_PHY1_SM;
757 	else if (hw->bus.func == E1000_FUNC_2)
758 		mask = E1000_SWFW_PHY2_SM;
759 	else if (hw->bus.func == E1000_FUNC_3)
760 		mask = E1000_SWFW_PHY3_SM;
761 
762 	return hw->mac.ops.acquire_swfw_sync(hw, mask);
763 }
764 
765 /**
766  *  igb_release_phy_82575 - Release rights to access PHY
767  *  @hw: pointer to the HW structure
768  *
769  *  A wrapper to release access rights to the correct PHY.  This is a
770  *  function pointer entry point called by the api module.
771  **/
igb_release_phy_82575(struct e1000_hw * hw)772 static void igb_release_phy_82575(struct e1000_hw *hw)
773 {
774 	u16 mask = E1000_SWFW_PHY0_SM;
775 
776 	if (hw->bus.func == E1000_FUNC_1)
777 		mask = E1000_SWFW_PHY1_SM;
778 	else if (hw->bus.func == E1000_FUNC_2)
779 		mask = E1000_SWFW_PHY2_SM;
780 	else if (hw->bus.func == E1000_FUNC_3)
781 		mask = E1000_SWFW_PHY3_SM;
782 
783 	hw->mac.ops.release_swfw_sync(hw, mask);
784 }
785 
786 /**
787  *  igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
788  *  @hw: pointer to the HW structure
789  *  @offset: register offset to be read
790  *  @data: pointer to the read data
791  *
792  *  Reads the PHY register at offset using the serial gigabit media independent
793  *  interface and stores the retrieved information in data.
794  **/
igb_read_phy_reg_sgmii_82575(struct e1000_hw * hw,u32 offset,u16 * data)795 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
796 					  u16 *data)
797 {
798 	s32 ret_val = -E1000_ERR_PARAM;
799 
800 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
801 		hw_dbg("PHY Address %u is out of range\n", offset);
802 		goto out;
803 	}
804 
805 	ret_val = hw->phy.ops.acquire(hw);
806 	if (ret_val)
807 		goto out;
808 
809 	ret_val = igb_read_phy_reg_i2c(hw, offset, data);
810 
811 	hw->phy.ops.release(hw);
812 
813 out:
814 	return ret_val;
815 }
816 
817 /**
818  *  igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
819  *  @hw: pointer to the HW structure
820  *  @offset: register offset to write to
821  *  @data: data to write at register offset
822  *
823  *  Writes the data to PHY register at the offset using the serial gigabit
824  *  media independent interface.
825  **/
igb_write_phy_reg_sgmii_82575(struct e1000_hw * hw,u32 offset,u16 data)826 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
827 					   u16 data)
828 {
829 	s32 ret_val = -E1000_ERR_PARAM;
830 
831 
832 	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
833 		hw_dbg("PHY Address %d is out of range\n", offset);
834 		goto out;
835 	}
836 
837 	ret_val = hw->phy.ops.acquire(hw);
838 	if (ret_val)
839 		goto out;
840 
841 	ret_val = igb_write_phy_reg_i2c(hw, offset, data);
842 
843 	hw->phy.ops.release(hw);
844 
845 out:
846 	return ret_val;
847 }
848 
849 /**
850  *  igb_get_phy_id_82575 - Retrieve PHY addr and id
851  *  @hw: pointer to the HW structure
852  *
853  *  Retrieves the PHY address and ID for both PHY's which do and do not use
854  *  sgmi interface.
855  **/
igb_get_phy_id_82575(struct e1000_hw * hw)856 static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
857 {
858 	struct e1000_phy_info *phy = &hw->phy;
859 	s32  ret_val = 0;
860 	u16 phy_id;
861 	u32 ctrl_ext;
862 	u32 mdic;
863 
864 	/* Extra read required for some PHY's on i354 */
865 	if (hw->mac.type == e1000_i354)
866 		igb_get_phy_id(hw);
867 
868 	/* For SGMII PHYs, we try the list of possible addresses until
869 	 * we find one that works.  For non-SGMII PHYs
870 	 * (e.g. integrated copper PHYs), an address of 1 should
871 	 * work.  The result of this function should mean phy->phy_addr
872 	 * and phy->id are set correctly.
873 	 */
874 	if (!(igb_sgmii_active_82575(hw))) {
875 		phy->addr = 1;
876 		ret_val = igb_get_phy_id(hw);
877 		goto out;
878 	}
879 
880 	if (igb_sgmii_uses_mdio_82575(hw)) {
881 		switch (hw->mac.type) {
882 		case e1000_82575:
883 		case e1000_82576:
884 			mdic = rd32(E1000_MDIC);
885 			mdic &= E1000_MDIC_PHY_MASK;
886 			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
887 			break;
888 		case e1000_82580:
889 		case e1000_i350:
890 		case e1000_i354:
891 		case e1000_i210:
892 		case e1000_i211:
893 			mdic = rd32(E1000_MDICNFG);
894 			mdic &= E1000_MDICNFG_PHY_MASK;
895 			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
896 			break;
897 		default:
898 			ret_val = -E1000_ERR_PHY;
899 			goto out;
900 		}
901 		ret_val = igb_get_phy_id(hw);
902 		goto out;
903 	}
904 
905 	/* Power on sgmii phy if it is disabled */
906 	ctrl_ext = rd32(E1000_CTRL_EXT);
907 	wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
908 	wrfl();
909 	msleep(300);
910 
911 	/* The address field in the I2CCMD register is 3 bits and 0 is invalid.
912 	 * Therefore, we need to test 1-7
913 	 */
914 	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
915 		ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
916 		if (ret_val == 0) {
917 			hw_dbg("Vendor ID 0x%08X read at address %u\n",
918 			       phy_id, phy->addr);
919 			/* At the time of this writing, The M88 part is
920 			 * the only supported SGMII PHY product.
921 			 */
922 			if (phy_id == M88_VENDOR)
923 				break;
924 		} else {
925 			hw_dbg("PHY address %u was unreadable\n", phy->addr);
926 		}
927 	}
928 
929 	/* A valid PHY type couldn't be found. */
930 	if (phy->addr == 8) {
931 		phy->addr = 0;
932 		ret_val = -E1000_ERR_PHY;
933 		goto out;
934 	} else {
935 		ret_val = igb_get_phy_id(hw);
936 	}
937 
938 	/* restore previous sfp cage power state */
939 	wr32(E1000_CTRL_EXT, ctrl_ext);
940 
941 out:
942 	return ret_val;
943 }
944 
945 /**
946  *  igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
947  *  @hw: pointer to the HW structure
948  *
949  *  Resets the PHY using the serial gigabit media independent interface.
950  **/
igb_phy_hw_reset_sgmii_82575(struct e1000_hw * hw)951 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
952 {
953 	struct e1000_phy_info *phy = &hw->phy;
954 	s32 ret_val;
955 
956 	/* This isn't a true "hard" reset, but is the only reset
957 	 * available to us at this time.
958 	 */
959 
960 	hw_dbg("Soft resetting SGMII attached PHY...\n");
961 
962 	/* SFP documentation requires the following to configure the SPF module
963 	 * to work on SGMII.  No further documentation is given.
964 	 */
965 	ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
966 	if (ret_val)
967 		goto out;
968 
969 	ret_val = igb_phy_sw_reset(hw);
970 	if (ret_val)
971 		goto out;
972 
973 	if (phy->id == M88E1512_E_PHY_ID)
974 		ret_val = igb_initialize_M88E1512_phy(hw);
975 	if (phy->id == M88E1543_E_PHY_ID)
976 		ret_val = igb_initialize_M88E1543_phy(hw);
977 out:
978 	return ret_val;
979 }
980 
981 /**
982  *  igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
983  *  @hw: pointer to the HW structure
984  *  @active: true to enable LPLU, false to disable
985  *
986  *  Sets the LPLU D0 state according to the active flag.  When
987  *  activating LPLU this function also disables smart speed
988  *  and vice versa.  LPLU will not be activated unless the
989  *  device autonegotiation advertisement meets standards of
990  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
991  *  This is a function pointer entry point only called by
992  *  PHY setup routines.
993  **/
igb_set_d0_lplu_state_82575(struct e1000_hw * hw,bool active)994 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
995 {
996 	struct e1000_phy_info *phy = &hw->phy;
997 	s32 ret_val;
998 	u16 data;
999 
1000 	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1001 	if (ret_val)
1002 		goto out;
1003 
1004 	if (active) {
1005 		data |= IGP02E1000_PM_D0_LPLU;
1006 		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1007 						 data);
1008 		if (ret_val)
1009 			goto out;
1010 
1011 		/* When LPLU is enabled, we should disable SmartSpeed */
1012 		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1013 						&data);
1014 		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1015 		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
1016 						 data);
1017 		if (ret_val)
1018 			goto out;
1019 	} else {
1020 		data &= ~IGP02E1000_PM_D0_LPLU;
1021 		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
1022 						 data);
1023 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1024 		 * during Dx states where the power conservation is most
1025 		 * important.  During driver activity we should enable
1026 		 * SmartSpeed, so performance is maintained.
1027 		 */
1028 		if (phy->smart_speed == e1000_smart_speed_on) {
1029 			ret_val = phy->ops.read_reg(hw,
1030 					IGP01E1000_PHY_PORT_CONFIG, &data);
1031 			if (ret_val)
1032 				goto out;
1033 
1034 			data |= IGP01E1000_PSCFR_SMART_SPEED;
1035 			ret_val = phy->ops.write_reg(hw,
1036 					IGP01E1000_PHY_PORT_CONFIG, data);
1037 			if (ret_val)
1038 				goto out;
1039 		} else if (phy->smart_speed == e1000_smart_speed_off) {
1040 			ret_val = phy->ops.read_reg(hw,
1041 					IGP01E1000_PHY_PORT_CONFIG, &data);
1042 			if (ret_val)
1043 				goto out;
1044 
1045 			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1046 			ret_val = phy->ops.write_reg(hw,
1047 					IGP01E1000_PHY_PORT_CONFIG, data);
1048 			if (ret_val)
1049 				goto out;
1050 		}
1051 	}
1052 
1053 out:
1054 	return ret_val;
1055 }
1056 
1057 /**
1058  *  igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
1059  *  @hw: pointer to the HW structure
1060  *  @active: true to enable LPLU, false to disable
1061  *
1062  *  Sets the LPLU D0 state according to the active flag.  When
1063  *  activating LPLU this function also disables smart speed
1064  *  and vice versa.  LPLU will not be activated unless the
1065  *  device autonegotiation advertisement meets standards of
1066  *  either 10 or 10/100 or 10/100/1000 at all duplexes.
1067  *  This is a function pointer entry point only called by
1068  *  PHY setup routines.
1069  **/
igb_set_d0_lplu_state_82580(struct e1000_hw * hw,bool active)1070 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1071 {
1072 	struct e1000_phy_info *phy = &hw->phy;
1073 	u16 data;
1074 
1075 	data = rd32(E1000_82580_PHY_POWER_MGMT);
1076 
1077 	if (active) {
1078 		data |= E1000_82580_PM_D0_LPLU;
1079 
1080 		/* When LPLU is enabled, we should disable SmartSpeed */
1081 		data &= ~E1000_82580_PM_SPD;
1082 	} else {
1083 		data &= ~E1000_82580_PM_D0_LPLU;
1084 
1085 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1086 		 * during Dx states where the power conservation is most
1087 		 * important.  During driver activity we should enable
1088 		 * SmartSpeed, so performance is maintained.
1089 		 */
1090 		if (phy->smart_speed == e1000_smart_speed_on)
1091 			data |= E1000_82580_PM_SPD;
1092 		else if (phy->smart_speed == e1000_smart_speed_off)
1093 			data &= ~E1000_82580_PM_SPD; }
1094 
1095 	wr32(E1000_82580_PHY_POWER_MGMT, data);
1096 	return 0;
1097 }
1098 
1099 /**
1100  *  igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
1101  *  @hw: pointer to the HW structure
1102  *  @active: boolean used to enable/disable lplu
1103  *
1104  *  Success returns 0, Failure returns 1
1105  *
1106  *  The low power link up (lplu) state is set to the power management level D3
1107  *  and SmartSpeed is disabled when active is true, else clear lplu for D3
1108  *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
1109  *  is used during Dx states where the power conservation is most important.
1110  *  During driver activity, SmartSpeed should be enabled so performance is
1111  *  maintained.
1112  **/
igb_set_d3_lplu_state_82580(struct e1000_hw * hw,bool active)1113 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1114 {
1115 	struct e1000_phy_info *phy = &hw->phy;
1116 	u16 data;
1117 
1118 	data = rd32(E1000_82580_PHY_POWER_MGMT);
1119 
1120 	if (!active) {
1121 		data &= ~E1000_82580_PM_D3_LPLU;
1122 		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
1123 		 * during Dx states where the power conservation is most
1124 		 * important.  During driver activity we should enable
1125 		 * SmartSpeed, so performance is maintained.
1126 		 */
1127 		if (phy->smart_speed == e1000_smart_speed_on)
1128 			data |= E1000_82580_PM_SPD;
1129 		else if (phy->smart_speed == e1000_smart_speed_off)
1130 			data &= ~E1000_82580_PM_SPD;
1131 	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1132 		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1133 		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1134 		data |= E1000_82580_PM_D3_LPLU;
1135 		/* When LPLU is enabled, we should disable SmartSpeed */
1136 		data &= ~E1000_82580_PM_SPD;
1137 	}
1138 
1139 	wr32(E1000_82580_PHY_POWER_MGMT, data);
1140 	return 0;
1141 }
1142 
1143 /**
1144  *  igb_acquire_nvm_82575 - Request for access to EEPROM
1145  *  @hw: pointer to the HW structure
1146  *
1147  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
1148  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
1149  *  Return successful if access grant bit set, else clear the request for
1150  *  EEPROM access and return -E1000_ERR_NVM (-1).
1151  **/
igb_acquire_nvm_82575(struct e1000_hw * hw)1152 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
1153 {
1154 	s32 ret_val;
1155 
1156 	ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
1157 	if (ret_val)
1158 		goto out;
1159 
1160 	ret_val = igb_acquire_nvm(hw);
1161 
1162 	if (ret_val)
1163 		hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1164 
1165 out:
1166 	return ret_val;
1167 }
1168 
1169 /**
1170  *  igb_release_nvm_82575 - Release exclusive access to EEPROM
1171  *  @hw: pointer to the HW structure
1172  *
1173  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
1174  *  then release the semaphores acquired.
1175  **/
igb_release_nvm_82575(struct e1000_hw * hw)1176 static void igb_release_nvm_82575(struct e1000_hw *hw)
1177 {
1178 	igb_release_nvm(hw);
1179 	hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1180 }
1181 
1182 /**
1183  *  igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
1184  *  @hw: pointer to the HW structure
1185  *  @mask: specifies which semaphore to acquire
1186  *
1187  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
1188  *  will also specify which port we're acquiring the lock for.
1189  **/
igb_acquire_swfw_sync_82575(struct e1000_hw * hw,u16 mask)1190 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1191 {
1192 	u32 swfw_sync;
1193 	u32 swmask = mask;
1194 	u32 fwmask = mask << 16;
1195 	s32 ret_val = 0;
1196 	s32 i = 0, timeout = 200;
1197 
1198 	while (i < timeout) {
1199 		if (igb_get_hw_semaphore(hw)) {
1200 			ret_val = -E1000_ERR_SWFW_SYNC;
1201 			goto out;
1202 		}
1203 
1204 		swfw_sync = rd32(E1000_SW_FW_SYNC);
1205 		if (!(swfw_sync & (fwmask | swmask)))
1206 			break;
1207 
1208 		/* Firmware currently using resource (fwmask)
1209 		 * or other software thread using resource (swmask)
1210 		 */
1211 		igb_put_hw_semaphore(hw);
1212 		mdelay(5);
1213 		i++;
1214 	}
1215 
1216 	if (i == timeout) {
1217 		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
1218 		ret_val = -E1000_ERR_SWFW_SYNC;
1219 		goto out;
1220 	}
1221 
1222 	swfw_sync |= swmask;
1223 	wr32(E1000_SW_FW_SYNC, swfw_sync);
1224 
1225 	igb_put_hw_semaphore(hw);
1226 
1227 out:
1228 	return ret_val;
1229 }
1230 
1231 /**
1232  *  igb_release_swfw_sync_82575 - Release SW/FW semaphore
1233  *  @hw: pointer to the HW structure
1234  *  @mask: specifies which semaphore to acquire
1235  *
1236  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
1237  *  will also specify which port we're releasing the lock for.
1238  **/
igb_release_swfw_sync_82575(struct e1000_hw * hw,u16 mask)1239 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1240 {
1241 	u32 swfw_sync;
1242 
1243 	while (igb_get_hw_semaphore(hw) != 0)
1244 		; /* Empty */
1245 
1246 	swfw_sync = rd32(E1000_SW_FW_SYNC);
1247 	swfw_sync &= ~mask;
1248 	wr32(E1000_SW_FW_SYNC, swfw_sync);
1249 
1250 	igb_put_hw_semaphore(hw);
1251 }
1252 
1253 /**
1254  *  igb_get_cfg_done_82575 - Read config done bit
1255  *  @hw: pointer to the HW structure
1256  *
1257  *  Read the management control register for the config done bit for
1258  *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
1259  *  to read the config done bit, so an error is *ONLY* logged and returns
1260  *  0.  If we were to return with error, EEPROM-less silicon
1261  *  would not be able to be reset or change link.
1262  **/
igb_get_cfg_done_82575(struct e1000_hw * hw)1263 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1264 {
1265 	s32 timeout = PHY_CFG_TIMEOUT;
1266 	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1267 
1268 	if (hw->bus.func == 1)
1269 		mask = E1000_NVM_CFG_DONE_PORT_1;
1270 	else if (hw->bus.func == E1000_FUNC_2)
1271 		mask = E1000_NVM_CFG_DONE_PORT_2;
1272 	else if (hw->bus.func == E1000_FUNC_3)
1273 		mask = E1000_NVM_CFG_DONE_PORT_3;
1274 
1275 	while (timeout) {
1276 		if (rd32(E1000_EEMNGCTL) & mask)
1277 			break;
1278 		usleep_range(1000, 2000);
1279 		timeout--;
1280 	}
1281 	if (!timeout)
1282 		hw_dbg("MNG configuration cycle has not completed.\n");
1283 
1284 	/* If EEPROM is not marked present, init the PHY manually */
1285 	if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1286 	    (hw->phy.type == e1000_phy_igp_3))
1287 		igb_phy_init_script_igp3(hw);
1288 
1289 	return 0;
1290 }
1291 
1292 /**
1293  *  igb_get_link_up_info_82575 - Get link speed/duplex info
1294  *  @hw: pointer to the HW structure
1295  *  @speed: stores the current speed
1296  *  @duplex: stores the current duplex
1297  *
1298  *  This is a wrapper function, if using the serial gigabit media independent
1299  *  interface, use PCS to retrieve the link speed and duplex information.
1300  *  Otherwise, use the generic function to get the link speed and duplex info.
1301  **/
igb_get_link_up_info_82575(struct e1000_hw * hw,u16 * speed,u16 * duplex)1302 static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1303 					u16 *duplex)
1304 {
1305 	s32 ret_val;
1306 
1307 	if (hw->phy.media_type != e1000_media_type_copper)
1308 		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
1309 							       duplex);
1310 	else
1311 		ret_val = igb_get_speed_and_duplex_copper(hw, speed,
1312 								    duplex);
1313 
1314 	return ret_val;
1315 }
1316 
1317 /**
1318  *  igb_check_for_link_82575 - Check for link
1319  *  @hw: pointer to the HW structure
1320  *
1321  *  If sgmii is enabled, then use the pcs register to determine link, otherwise
1322  *  use the generic interface for determining link.
1323  **/
igb_check_for_link_82575(struct e1000_hw * hw)1324 static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1325 {
1326 	s32 ret_val;
1327 	u16 speed, duplex;
1328 
1329 	if (hw->phy.media_type != e1000_media_type_copper) {
1330 		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1331 							     &duplex);
1332 		/* Use this flag to determine if link needs to be checked or
1333 		 * not.  If  we have link clear the flag so that we do not
1334 		 * continue to check for link.
1335 		 */
1336 		hw->mac.get_link_status = !hw->mac.serdes_has_link;
1337 
1338 		/* Configure Flow Control now that Auto-Neg has completed.
1339 		 * First, we need to restore the desired flow control
1340 		 * settings because we may have had to re-autoneg with a
1341 		 * different link partner.
1342 		 */
1343 		ret_val = igb_config_fc_after_link_up(hw);
1344 		if (ret_val)
1345 			hw_dbg("Error configuring flow control\n");
1346 	} else {
1347 		ret_val = igb_check_for_copper_link(hw);
1348 	}
1349 
1350 	return ret_val;
1351 }
1352 
1353 /**
1354  *  igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1355  *  @hw: pointer to the HW structure
1356  **/
igb_power_up_serdes_link_82575(struct e1000_hw * hw)1357 void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1358 {
1359 	u32 reg;
1360 
1361 
1362 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1363 	    !igb_sgmii_active_82575(hw))
1364 		return;
1365 
1366 	/* Enable PCS to turn on link */
1367 	reg = rd32(E1000_PCS_CFG0);
1368 	reg |= E1000_PCS_CFG_PCS_EN;
1369 	wr32(E1000_PCS_CFG0, reg);
1370 
1371 	/* Power up the laser */
1372 	reg = rd32(E1000_CTRL_EXT);
1373 	reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1374 	wr32(E1000_CTRL_EXT, reg);
1375 
1376 	/* flush the write to verify completion */
1377 	wrfl();
1378 	usleep_range(1000, 2000);
1379 }
1380 
1381 /**
1382  *  igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1383  *  @hw: pointer to the HW structure
1384  *  @speed: stores the current speed
1385  *  @duplex: stores the current duplex
1386  *
1387  *  Using the physical coding sub-layer (PCS), retrieve the current speed and
1388  *  duplex, then store the values in the pointers provided.
1389  **/
igb_get_pcs_speed_and_duplex_82575(struct e1000_hw * hw,u16 * speed,u16 * duplex)1390 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1391 						u16 *duplex)
1392 {
1393 	struct e1000_mac_info *mac = &hw->mac;
1394 	u32 pcs, status;
1395 
1396 	/* Set up defaults for the return values of this function */
1397 	mac->serdes_has_link = false;
1398 	*speed = 0;
1399 	*duplex = 0;
1400 
1401 	/* Read the PCS Status register for link state. For non-copper mode,
1402 	 * the status register is not accurate. The PCS status register is
1403 	 * used instead.
1404 	 */
1405 	pcs = rd32(E1000_PCS_LSTAT);
1406 
1407 	/* The link up bit determines when link is up on autoneg. The sync ok
1408 	 * gets set once both sides sync up and agree upon link. Stable link
1409 	 * can be determined by checking for both link up and link sync ok
1410 	 */
1411 	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1412 		mac->serdes_has_link = true;
1413 
1414 		/* Detect and store PCS speed */
1415 		if (pcs & E1000_PCS_LSTS_SPEED_1000)
1416 			*speed = SPEED_1000;
1417 		else if (pcs & E1000_PCS_LSTS_SPEED_100)
1418 			*speed = SPEED_100;
1419 		else
1420 			*speed = SPEED_10;
1421 
1422 		/* Detect and store PCS duplex */
1423 		if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
1424 			*duplex = FULL_DUPLEX;
1425 		else
1426 			*duplex = HALF_DUPLEX;
1427 
1428 	/* Check if it is an I354 2.5Gb backplane connection. */
1429 		if (mac->type == e1000_i354) {
1430 			status = rd32(E1000_STATUS);
1431 			if ((status & E1000_STATUS_2P5_SKU) &&
1432 			    !(status & E1000_STATUS_2P5_SKU_OVER)) {
1433 				*speed = SPEED_2500;
1434 				*duplex = FULL_DUPLEX;
1435 				hw_dbg("2500 Mbs, ");
1436 				hw_dbg("Full Duplex\n");
1437 			}
1438 		}
1439 
1440 	}
1441 
1442 	return 0;
1443 }
1444 
1445 /**
1446  *  igb_shutdown_serdes_link_82575 - Remove link during power down
1447  *  @hw: pointer to the HW structure
1448  *
1449  *  In the case of fiber serdes, shut down optics and PCS on driver unload
1450  *  when management pass thru is not enabled.
1451  **/
igb_shutdown_serdes_link_82575(struct e1000_hw * hw)1452 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1453 {
1454 	u32 reg;
1455 
1456 	if (hw->phy.media_type != e1000_media_type_internal_serdes &&
1457 	    igb_sgmii_active_82575(hw))
1458 		return;
1459 
1460 	if (!igb_enable_mng_pass_thru(hw)) {
1461 		/* Disable PCS to turn off link */
1462 		reg = rd32(E1000_PCS_CFG0);
1463 		reg &= ~E1000_PCS_CFG_PCS_EN;
1464 		wr32(E1000_PCS_CFG0, reg);
1465 
1466 		/* shutdown the laser */
1467 		reg = rd32(E1000_CTRL_EXT);
1468 		reg |= E1000_CTRL_EXT_SDP3_DATA;
1469 		wr32(E1000_CTRL_EXT, reg);
1470 
1471 		/* flush the write to verify completion */
1472 		wrfl();
1473 		usleep_range(1000, 2000);
1474 	}
1475 }
1476 
1477 /**
1478  *  igb_reset_hw_82575 - Reset hardware
1479  *  @hw: pointer to the HW structure
1480  *
1481  *  This resets the hardware into a known state.  This is a
1482  *  function pointer entry point called by the api module.
1483  **/
igb_reset_hw_82575(struct e1000_hw * hw)1484 static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1485 {
1486 	u32 ctrl;
1487 	s32 ret_val;
1488 
1489 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
1490 	 * on the last TLP read/write transaction when MAC is reset.
1491 	 */
1492 	ret_val = igb_disable_pcie_master(hw);
1493 	if (ret_val)
1494 		hw_dbg("PCI-E Master disable polling has failed.\n");
1495 
1496 	/* set the completion timeout for interface */
1497 	ret_val = igb_set_pcie_completion_timeout(hw);
1498 	if (ret_val)
1499 		hw_dbg("PCI-E Set completion timeout has failed.\n");
1500 
1501 	hw_dbg("Masking off all interrupts\n");
1502 	wr32(E1000_IMC, 0xffffffff);
1503 
1504 	wr32(E1000_RCTL, 0);
1505 	wr32(E1000_TCTL, E1000_TCTL_PSP);
1506 	wrfl();
1507 
1508 	usleep_range(10000, 20000);
1509 
1510 	ctrl = rd32(E1000_CTRL);
1511 
1512 	hw_dbg("Issuing a global reset to MAC\n");
1513 	wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1514 
1515 	ret_val = igb_get_auto_rd_done(hw);
1516 	if (ret_val) {
1517 		/* When auto config read does not complete, do not
1518 		 * return with an error. This can happen in situations
1519 		 * where there is no eeprom and prevents getting link.
1520 		 */
1521 		hw_dbg("Auto Read Done did not complete\n");
1522 	}
1523 
1524 	/* If EEPROM is not present, run manual init scripts */
1525 	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1526 		igb_reset_init_script_82575(hw);
1527 
1528 	/* Clear any pending interrupt events. */
1529 	wr32(E1000_IMC, 0xffffffff);
1530 	rd32(E1000_ICR);
1531 
1532 	/* Install any alternate MAC address into RAR0 */
1533 	ret_val = igb_check_alt_mac_addr(hw);
1534 
1535 	return ret_val;
1536 }
1537 
1538 /**
1539  *  igb_init_hw_82575 - Initialize hardware
1540  *  @hw: pointer to the HW structure
1541  *
1542  *  This inits the hardware readying it for operation.
1543  **/
igb_init_hw_82575(struct e1000_hw * hw)1544 static s32 igb_init_hw_82575(struct e1000_hw *hw)
1545 {
1546 	struct e1000_mac_info *mac = &hw->mac;
1547 	s32 ret_val;
1548 	u16 i, rar_count = mac->rar_entry_count;
1549 
1550 	if ((hw->mac.type >= e1000_i210) &&
1551 	    !(igb_get_flash_presence_i210(hw))) {
1552 		ret_val = igb_pll_workaround_i210(hw);
1553 		if (ret_val)
1554 			return ret_val;
1555 	}
1556 
1557 	/* Initialize identification LED */
1558 	ret_val = igb_id_led_init(hw);
1559 	if (ret_val) {
1560 		hw_dbg("Error initializing identification LED\n");
1561 		/* This is not fatal and we should not stop init due to this */
1562 	}
1563 
1564 	/* Disabling VLAN filtering */
1565 	hw_dbg("Initializing the IEEE VLAN\n");
1566 	igb_clear_vfta(hw);
1567 
1568 	/* Setup the receive address */
1569 	igb_init_rx_addrs(hw, rar_count);
1570 
1571 	/* Zero out the Multicast HASH table */
1572 	hw_dbg("Zeroing the MTA\n");
1573 	for (i = 0; i < mac->mta_reg_count; i++)
1574 		array_wr32(E1000_MTA, i, 0);
1575 
1576 	/* Zero out the Unicast HASH table */
1577 	hw_dbg("Zeroing the UTA\n");
1578 	for (i = 0; i < mac->uta_reg_count; i++)
1579 		array_wr32(E1000_UTA, i, 0);
1580 
1581 	/* Setup link and flow control */
1582 	ret_val = igb_setup_link(hw);
1583 
1584 	/* Clear all of the statistics registers (clear on read).  It is
1585 	 * important that we do this after we have tried to establish link
1586 	 * because the symbol error count will increment wildly if there
1587 	 * is no link.
1588 	 */
1589 	igb_clear_hw_cntrs_82575(hw);
1590 	return ret_val;
1591 }
1592 
1593 /**
1594  *  igb_setup_copper_link_82575 - Configure copper link settings
1595  *  @hw: pointer to the HW structure
1596  *
1597  *  Configures the link for auto-neg or forced speed and duplex.  Then we check
1598  *  for link, once link is established calls to configure collision distance
1599  *  and flow control are called.
1600  **/
igb_setup_copper_link_82575(struct e1000_hw * hw)1601 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1602 {
1603 	u32 ctrl;
1604 	s32  ret_val;
1605 	u32 phpm_reg;
1606 
1607 	ctrl = rd32(E1000_CTRL);
1608 	ctrl |= E1000_CTRL_SLU;
1609 	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1610 	wr32(E1000_CTRL, ctrl);
1611 
1612 	/* Clear Go Link Disconnect bit on supported devices */
1613 	switch (hw->mac.type) {
1614 	case e1000_82580:
1615 	case e1000_i350:
1616 	case e1000_i210:
1617 	case e1000_i211:
1618 		phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1619 		phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1620 		wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1621 		break;
1622 	default:
1623 		break;
1624 	}
1625 
1626 	ret_val = igb_setup_serdes_link_82575(hw);
1627 	if (ret_val)
1628 		goto out;
1629 
1630 	if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1631 		/* allow time for SFP cage time to power up phy */
1632 		msleep(300);
1633 
1634 		ret_val = hw->phy.ops.reset(hw);
1635 		if (ret_val) {
1636 			hw_dbg("Error resetting the PHY.\n");
1637 			goto out;
1638 		}
1639 	}
1640 	switch (hw->phy.type) {
1641 	case e1000_phy_i210:
1642 	case e1000_phy_m88:
1643 		switch (hw->phy.id) {
1644 		case I347AT4_E_PHY_ID:
1645 		case M88E1112_E_PHY_ID:
1646 		case M88E1543_E_PHY_ID:
1647 		case M88E1512_E_PHY_ID:
1648 		case I210_I_PHY_ID:
1649 			ret_val = igb_copper_link_setup_m88_gen2(hw);
1650 			break;
1651 		default:
1652 			ret_val = igb_copper_link_setup_m88(hw);
1653 			break;
1654 		}
1655 		break;
1656 	case e1000_phy_igp_3:
1657 		ret_val = igb_copper_link_setup_igp(hw);
1658 		break;
1659 	case e1000_phy_82580:
1660 		ret_val = igb_copper_link_setup_82580(hw);
1661 		break;
1662 	default:
1663 		ret_val = -E1000_ERR_PHY;
1664 		break;
1665 	}
1666 
1667 	if (ret_val)
1668 		goto out;
1669 
1670 	ret_val = igb_setup_copper_link(hw);
1671 out:
1672 	return ret_val;
1673 }
1674 
1675 /**
1676  *  igb_setup_serdes_link_82575 - Setup link for serdes
1677  *  @hw: pointer to the HW structure
1678  *
1679  *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
1680  *  used on copper connections where the serialized gigabit media independent
1681  *  interface (sgmii), or serdes fiber is being used.  Configures the link
1682  *  for auto-negotiation or forces speed/duplex.
1683  **/
igb_setup_serdes_link_82575(struct e1000_hw * hw)1684 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1685 {
1686 	u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1687 	bool pcs_autoneg;
1688 	s32 ret_val = 0;
1689 	u16 data;
1690 
1691 	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1692 	    !igb_sgmii_active_82575(hw))
1693 		return ret_val;
1694 
1695 
1696 	/* On the 82575, SerDes loopback mode persists until it is
1697 	 * explicitly turned off or a power cycle is performed.  A read to
1698 	 * the register does not indicate its status.  Therefore, we ensure
1699 	 * loopback mode is disabled during initialization.
1700 	 */
1701 	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1702 
1703 	/* power on the sfp cage if present and turn on I2C */
1704 	ctrl_ext = rd32(E1000_CTRL_EXT);
1705 	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1706 	ctrl_ext |= E1000_CTRL_I2C_ENA;
1707 	wr32(E1000_CTRL_EXT, ctrl_ext);
1708 
1709 	ctrl_reg = rd32(E1000_CTRL);
1710 	ctrl_reg |= E1000_CTRL_SLU;
1711 
1712 	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1713 		/* set both sw defined pins */
1714 		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1715 
1716 		/* Set switch control to serdes energy detect */
1717 		reg = rd32(E1000_CONNSW);
1718 		reg |= E1000_CONNSW_ENRGSRC;
1719 		wr32(E1000_CONNSW, reg);
1720 	}
1721 
1722 	reg = rd32(E1000_PCS_LCTL);
1723 
1724 	/* default pcs_autoneg to the same setting as mac autoneg */
1725 	pcs_autoneg = hw->mac.autoneg;
1726 
1727 	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1728 	case E1000_CTRL_EXT_LINK_MODE_SGMII:
1729 		/* sgmii mode lets the phy handle forcing speed/duplex */
1730 		pcs_autoneg = true;
1731 		/* autoneg time out should be disabled for SGMII mode */
1732 		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1733 		break;
1734 	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1735 		/* disable PCS autoneg and support parallel detect only */
1736 		pcs_autoneg = false;
1737 	default:
1738 		if (hw->mac.type == e1000_82575 ||
1739 		    hw->mac.type == e1000_82576) {
1740 			ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1741 			if (ret_val) {
1742 				hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
1743 				return ret_val;
1744 			}
1745 
1746 			if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1747 				pcs_autoneg = false;
1748 		}
1749 
1750 		/* non-SGMII modes only supports a speed of 1000/Full for the
1751 		 * link so it is best to just force the MAC and let the pcs
1752 		 * link either autoneg or be forced to 1000/Full
1753 		 */
1754 		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1755 				E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1756 
1757 		/* set speed of 1000/Full if speed/duplex is forced */
1758 		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1759 		break;
1760 	}
1761 
1762 	wr32(E1000_CTRL, ctrl_reg);
1763 
1764 	/* New SerDes mode allows for forcing speed or autonegotiating speed
1765 	 * at 1gb. Autoneg should be default set by most drivers. This is the
1766 	 * mode that will be compatible with older link partners and switches.
1767 	 * However, both are supported by the hardware and some drivers/tools.
1768 	 */
1769 	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1770 		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1771 
1772 	if (pcs_autoneg) {
1773 		/* Set PCS register for autoneg */
1774 		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1775 		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1776 
1777 		/* Disable force flow control for autoneg */
1778 		reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1779 
1780 		/* Configure flow control advertisement for autoneg */
1781 		anadv_reg = rd32(E1000_PCS_ANADV);
1782 		anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1783 		switch (hw->fc.requested_mode) {
1784 		case e1000_fc_full:
1785 		case e1000_fc_rx_pause:
1786 			anadv_reg |= E1000_TXCW_ASM_DIR;
1787 			anadv_reg |= E1000_TXCW_PAUSE;
1788 			break;
1789 		case e1000_fc_tx_pause:
1790 			anadv_reg |= E1000_TXCW_ASM_DIR;
1791 			break;
1792 		default:
1793 			break;
1794 		}
1795 		wr32(E1000_PCS_ANADV, anadv_reg);
1796 
1797 		hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1798 	} else {
1799 		/* Set PCS register for forced link */
1800 		reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
1801 
1802 		/* Force flow control for forced link */
1803 		reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1804 
1805 		hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1806 	}
1807 
1808 	wr32(E1000_PCS_LCTL, reg);
1809 
1810 	if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
1811 		igb_force_mac_fc(hw);
1812 
1813 	return ret_val;
1814 }
1815 
1816 /**
1817  *  igb_sgmii_active_82575 - Return sgmii state
1818  *  @hw: pointer to the HW structure
1819  *
1820  *  82575 silicon has a serialized gigabit media independent interface (sgmii)
1821  *  which can be enabled for use in the embedded applications.  Simply
1822  *  return the current state of the sgmii interface.
1823  **/
igb_sgmii_active_82575(struct e1000_hw * hw)1824 static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1825 {
1826 	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1827 	return dev_spec->sgmii_active;
1828 }
1829 
1830 /**
1831  *  igb_reset_init_script_82575 - Inits HW defaults after reset
1832  *  @hw: pointer to the HW structure
1833  *
1834  *  Inits recommended HW defaults after a reset when there is no EEPROM
1835  *  detected. This is only for the 82575.
1836  **/
igb_reset_init_script_82575(struct e1000_hw * hw)1837 static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1838 {
1839 	if (hw->mac.type == e1000_82575) {
1840 		hw_dbg("Running reset init script for 82575\n");
1841 		/* SerDes configuration via SERDESCTRL */
1842 		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1843 		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1844 		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1845 		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1846 
1847 		/* CCM configuration via CCMCTL register */
1848 		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1849 		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1850 
1851 		/* PCIe lanes configuration */
1852 		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1853 		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1854 		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1855 		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1856 
1857 		/* PCIe PLL Configuration */
1858 		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1859 		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1860 		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1861 	}
1862 
1863 	return 0;
1864 }
1865 
1866 /**
1867  *  igb_read_mac_addr_82575 - Read device MAC address
1868  *  @hw: pointer to the HW structure
1869  **/
igb_read_mac_addr_82575(struct e1000_hw * hw)1870 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1871 {
1872 	s32 ret_val = 0;
1873 
1874 	/* If there's an alternate MAC address place it in RAR0
1875 	 * so that it will override the Si installed default perm
1876 	 * address.
1877 	 */
1878 	ret_val = igb_check_alt_mac_addr(hw);
1879 	if (ret_val)
1880 		goto out;
1881 
1882 	ret_val = igb_read_mac_addr(hw);
1883 
1884 out:
1885 	return ret_val;
1886 }
1887 
1888 /**
1889  * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1890  * @hw: pointer to the HW structure
1891  *
1892  * In the case of a PHY power down to save power, or to turn off link during a
1893  * driver unload, or wake on lan is not enabled, remove the link.
1894  **/
igb_power_down_phy_copper_82575(struct e1000_hw * hw)1895 void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1896 {
1897 	/* If the management interface is not enabled, then power down */
1898 	if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1899 		igb_power_down_phy_copper(hw);
1900 }
1901 
1902 /**
1903  *  igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1904  *  @hw: pointer to the HW structure
1905  *
1906  *  Clears the hardware counters by reading the counter registers.
1907  **/
igb_clear_hw_cntrs_82575(struct e1000_hw * hw)1908 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1909 {
1910 	igb_clear_hw_cntrs_base(hw);
1911 
1912 	rd32(E1000_PRC64);
1913 	rd32(E1000_PRC127);
1914 	rd32(E1000_PRC255);
1915 	rd32(E1000_PRC511);
1916 	rd32(E1000_PRC1023);
1917 	rd32(E1000_PRC1522);
1918 	rd32(E1000_PTC64);
1919 	rd32(E1000_PTC127);
1920 	rd32(E1000_PTC255);
1921 	rd32(E1000_PTC511);
1922 	rd32(E1000_PTC1023);
1923 	rd32(E1000_PTC1522);
1924 
1925 	rd32(E1000_ALGNERRC);
1926 	rd32(E1000_RXERRC);
1927 	rd32(E1000_TNCRS);
1928 	rd32(E1000_CEXTERR);
1929 	rd32(E1000_TSCTC);
1930 	rd32(E1000_TSCTFC);
1931 
1932 	rd32(E1000_MGTPRC);
1933 	rd32(E1000_MGTPDC);
1934 	rd32(E1000_MGTPTC);
1935 
1936 	rd32(E1000_IAC);
1937 	rd32(E1000_ICRXOC);
1938 
1939 	rd32(E1000_ICRXPTC);
1940 	rd32(E1000_ICRXATC);
1941 	rd32(E1000_ICTXPTC);
1942 	rd32(E1000_ICTXATC);
1943 	rd32(E1000_ICTXQEC);
1944 	rd32(E1000_ICTXQMTC);
1945 	rd32(E1000_ICRXDMTC);
1946 
1947 	rd32(E1000_CBTMPC);
1948 	rd32(E1000_HTDPMC);
1949 	rd32(E1000_CBRMPC);
1950 	rd32(E1000_RPTHC);
1951 	rd32(E1000_HGPTC);
1952 	rd32(E1000_HTCBDPC);
1953 	rd32(E1000_HGORCL);
1954 	rd32(E1000_HGORCH);
1955 	rd32(E1000_HGOTCL);
1956 	rd32(E1000_HGOTCH);
1957 	rd32(E1000_LENERRS);
1958 
1959 	/* This register should not be read in copper configurations */
1960 	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1961 	    igb_sgmii_active_82575(hw))
1962 		rd32(E1000_SCVPC);
1963 }
1964 
1965 /**
1966  *  igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1967  *  @hw: pointer to the HW structure
1968  *
1969  *  After rx enable if manageability is enabled then there is likely some
1970  *  bad data at the start of the fifo and possibly in the DMA fifo. This
1971  *  function clears the fifos and flushes any packets that came in as rx was
1972  *  being enabled.
1973  **/
igb_rx_fifo_flush_82575(struct e1000_hw * hw)1974 void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1975 {
1976 	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1977 	int i, ms_wait;
1978 
1979 	/* disable IPv6 options as per hardware errata */
1980 	rfctl = rd32(E1000_RFCTL);
1981 	rfctl |= E1000_RFCTL_IPV6_EX_DIS;
1982 	wr32(E1000_RFCTL, rfctl);
1983 
1984 	if (hw->mac.type != e1000_82575 ||
1985 	    !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1986 		return;
1987 
1988 	/* Disable all RX queues */
1989 	for (i = 0; i < 4; i++) {
1990 		rxdctl[i] = rd32(E1000_RXDCTL(i));
1991 		wr32(E1000_RXDCTL(i),
1992 		     rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1993 	}
1994 	/* Poll all queues to verify they have shut down */
1995 	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1996 		usleep_range(1000, 2000);
1997 		rx_enabled = 0;
1998 		for (i = 0; i < 4; i++)
1999 			rx_enabled |= rd32(E1000_RXDCTL(i));
2000 		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
2001 			break;
2002 	}
2003 
2004 	if (ms_wait == 10)
2005 		hw_dbg("Queue disable timed out after 10ms\n");
2006 
2007 	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
2008 	 * incoming packets are rejected.  Set enable and wait 2ms so that
2009 	 * any packet that was coming in as RCTL.EN was set is flushed
2010 	 */
2011 	wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
2012 
2013 	rlpml = rd32(E1000_RLPML);
2014 	wr32(E1000_RLPML, 0);
2015 
2016 	rctl = rd32(E1000_RCTL);
2017 	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
2018 	temp_rctl |= E1000_RCTL_LPE;
2019 
2020 	wr32(E1000_RCTL, temp_rctl);
2021 	wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
2022 	wrfl();
2023 	usleep_range(2000, 3000);
2024 
2025 	/* Enable RX queues that were previously enabled and restore our
2026 	 * previous state
2027 	 */
2028 	for (i = 0; i < 4; i++)
2029 		wr32(E1000_RXDCTL(i), rxdctl[i]);
2030 	wr32(E1000_RCTL, rctl);
2031 	wrfl();
2032 
2033 	wr32(E1000_RLPML, rlpml);
2034 	wr32(E1000_RFCTL, rfctl);
2035 
2036 	/* Flush receive errors generated by workaround */
2037 	rd32(E1000_ROC);
2038 	rd32(E1000_RNBC);
2039 	rd32(E1000_MPC);
2040 }
2041 
2042 /**
2043  *  igb_set_pcie_completion_timeout - set pci-e completion timeout
2044  *  @hw: pointer to the HW structure
2045  *
2046  *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
2047  *  however the hardware default for these parts is 500us to 1ms which is less
2048  *  than the 10ms recommended by the pci-e spec.  To address this we need to
2049  *  increase the value to either 10ms to 200ms for capability version 1 config,
2050  *  or 16ms to 55ms for version 2.
2051  **/
igb_set_pcie_completion_timeout(struct e1000_hw * hw)2052 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
2053 {
2054 	u32 gcr = rd32(E1000_GCR);
2055 	s32 ret_val = 0;
2056 	u16 pcie_devctl2;
2057 
2058 	/* only take action if timeout value is defaulted to 0 */
2059 	if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
2060 		goto out;
2061 
2062 	/* if capabilities version is type 1 we can write the
2063 	 * timeout of 10ms to 200ms through the GCR register
2064 	 */
2065 	if (!(gcr & E1000_GCR_CAP_VER2)) {
2066 		gcr |= E1000_GCR_CMPL_TMOUT_10ms;
2067 		goto out;
2068 	}
2069 
2070 	/* for version 2 capabilities we need to write the config space
2071 	 * directly in order to set the completion timeout value for
2072 	 * 16ms to 55ms
2073 	 */
2074 	ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2075 					&pcie_devctl2);
2076 	if (ret_val)
2077 		goto out;
2078 
2079 	pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
2080 
2081 	ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2082 					 &pcie_devctl2);
2083 out:
2084 	/* disable completion timeout resend */
2085 	gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
2086 
2087 	wr32(E1000_GCR, gcr);
2088 	return ret_val;
2089 }
2090 
2091 /**
2092  *  igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
2093  *  @hw: pointer to the hardware struct
2094  *  @enable: state to enter, either enabled or disabled
2095  *  @pf: Physical Function pool - do not set anti-spoofing for the PF
2096  *
2097  *  enables/disables L2 switch anti-spoofing functionality.
2098  **/
igb_vmdq_set_anti_spoofing_pf(struct e1000_hw * hw,bool enable,int pf)2099 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
2100 {
2101 	u32 reg_val, reg_offset;
2102 
2103 	switch (hw->mac.type) {
2104 	case e1000_82576:
2105 		reg_offset = E1000_DTXSWC;
2106 		break;
2107 	case e1000_i350:
2108 	case e1000_i354:
2109 		reg_offset = E1000_TXSWC;
2110 		break;
2111 	default:
2112 		return;
2113 	}
2114 
2115 	reg_val = rd32(reg_offset);
2116 	if (enable) {
2117 		reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
2118 			     E1000_DTXSWC_VLAN_SPOOF_MASK);
2119 		/* The PF can spoof - it has to in order to
2120 		 * support emulation mode NICs
2121 		 */
2122 		reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
2123 	} else {
2124 		reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
2125 			     E1000_DTXSWC_VLAN_SPOOF_MASK);
2126 	}
2127 	wr32(reg_offset, reg_val);
2128 }
2129 
2130 /**
2131  *  igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
2132  *  @hw: pointer to the hardware struct
2133  *  @enable: state to enter, either enabled or disabled
2134  *
2135  *  enables/disables L2 switch loopback functionality.
2136  **/
igb_vmdq_set_loopback_pf(struct e1000_hw * hw,bool enable)2137 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
2138 {
2139 	u32 dtxswc;
2140 
2141 	switch (hw->mac.type) {
2142 	case e1000_82576:
2143 		dtxswc = rd32(E1000_DTXSWC);
2144 		if (enable)
2145 			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2146 		else
2147 			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2148 		wr32(E1000_DTXSWC, dtxswc);
2149 		break;
2150 	case e1000_i354:
2151 	case e1000_i350:
2152 		dtxswc = rd32(E1000_TXSWC);
2153 		if (enable)
2154 			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2155 		else
2156 			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2157 		wr32(E1000_TXSWC, dtxswc);
2158 		break;
2159 	default:
2160 		/* Currently no other hardware supports loopback */
2161 		break;
2162 	}
2163 
2164 }
2165 
2166 /**
2167  *  igb_vmdq_set_replication_pf - enable or disable vmdq replication
2168  *  @hw: pointer to the hardware struct
2169  *  @enable: state to enter, either enabled or disabled
2170  *
2171  *  enables/disables replication of packets across multiple pools.
2172  **/
igb_vmdq_set_replication_pf(struct e1000_hw * hw,bool enable)2173 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
2174 {
2175 	u32 vt_ctl = rd32(E1000_VT_CTL);
2176 
2177 	if (enable)
2178 		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
2179 	else
2180 		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
2181 
2182 	wr32(E1000_VT_CTL, vt_ctl);
2183 }
2184 
2185 /**
2186  *  igb_read_phy_reg_82580 - Read 82580 MDI control register
2187  *  @hw: pointer to the HW structure
2188  *  @offset: register offset to be read
2189  *  @data: pointer to the read data
2190  *
2191  *  Reads the MDI control register in the PHY at offset and stores the
2192  *  information read to data.
2193  **/
igb_read_phy_reg_82580(struct e1000_hw * hw,u32 offset,u16 * data)2194 s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
2195 {
2196 	s32 ret_val;
2197 
2198 	ret_val = hw->phy.ops.acquire(hw);
2199 	if (ret_val)
2200 		goto out;
2201 
2202 	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
2203 
2204 	hw->phy.ops.release(hw);
2205 
2206 out:
2207 	return ret_val;
2208 }
2209 
2210 /**
2211  *  igb_write_phy_reg_82580 - Write 82580 MDI control register
2212  *  @hw: pointer to the HW structure
2213  *  @offset: register offset to write to
2214  *  @data: data to write to register at offset
2215  *
2216  *  Writes data to MDI control register in the PHY at offset.
2217  **/
igb_write_phy_reg_82580(struct e1000_hw * hw,u32 offset,u16 data)2218 s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
2219 {
2220 	s32 ret_val;
2221 
2222 
2223 	ret_val = hw->phy.ops.acquire(hw);
2224 	if (ret_val)
2225 		goto out;
2226 
2227 	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
2228 
2229 	hw->phy.ops.release(hw);
2230 
2231 out:
2232 	return ret_val;
2233 }
2234 
2235 /**
2236  *  igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
2237  *  @hw: pointer to the HW structure
2238  *
2239  *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
2240  *  the values found in the EEPROM.  This addresses an issue in which these
2241  *  bits are not restored from EEPROM after reset.
2242  **/
igb_reset_mdicnfg_82580(struct e1000_hw * hw)2243 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
2244 {
2245 	s32 ret_val = 0;
2246 	u32 mdicnfg;
2247 	u16 nvm_data = 0;
2248 
2249 	if (hw->mac.type != e1000_82580)
2250 		goto out;
2251 	if (!igb_sgmii_active_82575(hw))
2252 		goto out;
2253 
2254 	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2255 				   NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2256 				   &nvm_data);
2257 	if (ret_val) {
2258 		hw_dbg("NVM Read Error\n");
2259 		goto out;
2260 	}
2261 
2262 	mdicnfg = rd32(E1000_MDICNFG);
2263 	if (nvm_data & NVM_WORD24_EXT_MDIO)
2264 		mdicnfg |= E1000_MDICNFG_EXT_MDIO;
2265 	if (nvm_data & NVM_WORD24_COM_MDIO)
2266 		mdicnfg |= E1000_MDICNFG_COM_MDIO;
2267 	wr32(E1000_MDICNFG, mdicnfg);
2268 out:
2269 	return ret_val;
2270 }
2271 
2272 /**
2273  *  igb_reset_hw_82580 - Reset hardware
2274  *  @hw: pointer to the HW structure
2275  *
2276  *  This resets function or entire device (all ports, etc.)
2277  *  to a known state.
2278  **/
igb_reset_hw_82580(struct e1000_hw * hw)2279 static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2280 {
2281 	s32 ret_val = 0;
2282 	/* BH SW mailbox bit in SW_FW_SYNC */
2283 	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2284 	u32 ctrl;
2285 	bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2286 
2287 	hw->dev_spec._82575.global_device_reset = false;
2288 
2289 	/* due to hw errata, global device reset doesn't always
2290 	 * work on 82580
2291 	 */
2292 	if (hw->mac.type == e1000_82580)
2293 		global_device_reset = false;
2294 
2295 	/* Get current control state. */
2296 	ctrl = rd32(E1000_CTRL);
2297 
2298 	/* Prevent the PCI-E bus from sticking if there is no TLP connection
2299 	 * on the last TLP read/write transaction when MAC is reset.
2300 	 */
2301 	ret_val = igb_disable_pcie_master(hw);
2302 	if (ret_val)
2303 		hw_dbg("PCI-E Master disable polling has failed.\n");
2304 
2305 	hw_dbg("Masking off all interrupts\n");
2306 	wr32(E1000_IMC, 0xffffffff);
2307 	wr32(E1000_RCTL, 0);
2308 	wr32(E1000_TCTL, E1000_TCTL_PSP);
2309 	wrfl();
2310 
2311 	usleep_range(10000, 11000);
2312 
2313 	/* Determine whether or not a global dev reset is requested */
2314 	if (global_device_reset &&
2315 		hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
2316 			global_device_reset = false;
2317 
2318 	if (global_device_reset &&
2319 		!(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
2320 		ctrl |= E1000_CTRL_DEV_RST;
2321 	else
2322 		ctrl |= E1000_CTRL_RST;
2323 
2324 	wr32(E1000_CTRL, ctrl);
2325 	wrfl();
2326 
2327 	/* Add delay to insure DEV_RST has time to complete */
2328 	if (global_device_reset)
2329 		usleep_range(5000, 6000);
2330 
2331 	ret_val = igb_get_auto_rd_done(hw);
2332 	if (ret_val) {
2333 		/* When auto config read does not complete, do not
2334 		 * return with an error. This can happen in situations
2335 		 * where there is no eeprom and prevents getting link.
2336 		 */
2337 		hw_dbg("Auto Read Done did not complete\n");
2338 	}
2339 
2340 	/* clear global device reset status bit */
2341 	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2342 
2343 	/* Clear any pending interrupt events. */
2344 	wr32(E1000_IMC, 0xffffffff);
2345 	rd32(E1000_ICR);
2346 
2347 	ret_val = igb_reset_mdicnfg_82580(hw);
2348 	if (ret_val)
2349 		hw_dbg("Could not reset MDICNFG based on EEPROM\n");
2350 
2351 	/* Install any alternate MAC address into RAR0 */
2352 	ret_val = igb_check_alt_mac_addr(hw);
2353 
2354 	/* Release semaphore */
2355 	if (global_device_reset)
2356 		hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
2357 
2358 	return ret_val;
2359 }
2360 
2361 /**
2362  *  igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
2363  *  @data: data received by reading RXPBS register
2364  *
2365  *  The 82580 uses a table based approach for packet buffer allocation sizes.
2366  *  This function converts the retrieved value into the correct table value
2367  *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2368  *  0x0 36  72 144   1   2   4   8  16
2369  *  0x8 35  70 140 rsv rsv rsv rsv rsv
2370  */
igb_rxpbs_adjust_82580(u32 data)2371 u16 igb_rxpbs_adjust_82580(u32 data)
2372 {
2373 	u16 ret_val = 0;
2374 
2375 	if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
2376 		ret_val = e1000_82580_rxpbs_table[data];
2377 
2378 	return ret_val;
2379 }
2380 
2381 /**
2382  *  igb_validate_nvm_checksum_with_offset - Validate EEPROM
2383  *  checksum
2384  *  @hw: pointer to the HW structure
2385  *  @offset: offset in words of the checksum protected region
2386  *
2387  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2388  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
2389  **/
igb_validate_nvm_checksum_with_offset(struct e1000_hw * hw,u16 offset)2390 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2391 						 u16 offset)
2392 {
2393 	s32 ret_val = 0;
2394 	u16 checksum = 0;
2395 	u16 i, nvm_data;
2396 
2397 	for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2398 		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2399 		if (ret_val) {
2400 			hw_dbg("NVM Read Error\n");
2401 			goto out;
2402 		}
2403 		checksum += nvm_data;
2404 	}
2405 
2406 	if (checksum != (u16) NVM_SUM) {
2407 		hw_dbg("NVM Checksum Invalid\n");
2408 		ret_val = -E1000_ERR_NVM;
2409 		goto out;
2410 	}
2411 
2412 out:
2413 	return ret_val;
2414 }
2415 
2416 /**
2417  *  igb_update_nvm_checksum_with_offset - Update EEPROM
2418  *  checksum
2419  *  @hw: pointer to the HW structure
2420  *  @offset: offset in words of the checksum protected region
2421  *
2422  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
2423  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
2424  *  value to the EEPROM.
2425  **/
igb_update_nvm_checksum_with_offset(struct e1000_hw * hw,u16 offset)2426 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2427 {
2428 	s32 ret_val;
2429 	u16 checksum = 0;
2430 	u16 i, nvm_data;
2431 
2432 	for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2433 		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2434 		if (ret_val) {
2435 			hw_dbg("NVM Read Error while updating checksum.\n");
2436 			goto out;
2437 		}
2438 		checksum += nvm_data;
2439 	}
2440 	checksum = (u16) NVM_SUM - checksum;
2441 	ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2442 				&checksum);
2443 	if (ret_val)
2444 		hw_dbg("NVM Write Error while updating checksum.\n");
2445 
2446 out:
2447 	return ret_val;
2448 }
2449 
2450 /**
2451  *  igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
2452  *  @hw: pointer to the HW structure
2453  *
2454  *  Calculates the EEPROM section checksum by reading/adding each word of
2455  *  the EEPROM and then verifies that the sum of the EEPROM is
2456  *  equal to 0xBABA.
2457  **/
igb_validate_nvm_checksum_82580(struct e1000_hw * hw)2458 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2459 {
2460 	s32 ret_val = 0;
2461 	u16 eeprom_regions_count = 1;
2462 	u16 j, nvm_data;
2463 	u16 nvm_offset;
2464 
2465 	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2466 	if (ret_val) {
2467 		hw_dbg("NVM Read Error\n");
2468 		goto out;
2469 	}
2470 
2471 	if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2472 		/* if checksums compatibility bit is set validate checksums
2473 		 * for all 4 ports.
2474 		 */
2475 		eeprom_regions_count = 4;
2476 	}
2477 
2478 	for (j = 0; j < eeprom_regions_count; j++) {
2479 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2480 		ret_val = igb_validate_nvm_checksum_with_offset(hw,
2481 								nvm_offset);
2482 		if (ret_val != 0)
2483 			goto out;
2484 	}
2485 
2486 out:
2487 	return ret_val;
2488 }
2489 
2490 /**
2491  *  igb_update_nvm_checksum_82580 - Update EEPROM checksum
2492  *  @hw: pointer to the HW structure
2493  *
2494  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2495  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2496  *  checksum and writes the value to the EEPROM.
2497  **/
igb_update_nvm_checksum_82580(struct e1000_hw * hw)2498 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2499 {
2500 	s32 ret_val;
2501 	u16 j, nvm_data;
2502 	u16 nvm_offset;
2503 
2504 	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2505 	if (ret_val) {
2506 		hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
2507 		goto out;
2508 	}
2509 
2510 	if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2511 		/* set compatibility bit to validate checksums appropriately */
2512 		nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2513 		ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2514 					&nvm_data);
2515 		if (ret_val) {
2516 			hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
2517 			goto out;
2518 		}
2519 	}
2520 
2521 	for (j = 0; j < 4; j++) {
2522 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2523 		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2524 		if (ret_val)
2525 			goto out;
2526 	}
2527 
2528 out:
2529 	return ret_val;
2530 }
2531 
2532 /**
2533  *  igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
2534  *  @hw: pointer to the HW structure
2535  *
2536  *  Calculates the EEPROM section checksum by reading/adding each word of
2537  *  the EEPROM and then verifies that the sum of the EEPROM is
2538  *  equal to 0xBABA.
2539  **/
igb_validate_nvm_checksum_i350(struct e1000_hw * hw)2540 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2541 {
2542 	s32 ret_val = 0;
2543 	u16 j;
2544 	u16 nvm_offset;
2545 
2546 	for (j = 0; j < 4; j++) {
2547 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2548 		ret_val = igb_validate_nvm_checksum_with_offset(hw,
2549 								nvm_offset);
2550 		if (ret_val != 0)
2551 			goto out;
2552 	}
2553 
2554 out:
2555 	return ret_val;
2556 }
2557 
2558 /**
2559  *  igb_update_nvm_checksum_i350 - Update EEPROM checksum
2560  *  @hw: pointer to the HW structure
2561  *
2562  *  Updates the EEPROM section checksums for all 4 ports by reading/adding
2563  *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
2564  *  checksum and writes the value to the EEPROM.
2565  **/
igb_update_nvm_checksum_i350(struct e1000_hw * hw)2566 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2567 {
2568 	s32 ret_val = 0;
2569 	u16 j;
2570 	u16 nvm_offset;
2571 
2572 	for (j = 0; j < 4; j++) {
2573 		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2574 		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2575 		if (ret_val != 0)
2576 			goto out;
2577 	}
2578 
2579 out:
2580 	return ret_val;
2581 }
2582 
2583 /**
2584  *  __igb_access_emi_reg - Read/write EMI register
2585  *  @hw: pointer to the HW structure
2586  *  @addr: EMI address to program
2587  *  @data: pointer to value to read/write from/to the EMI address
2588  *  @read: boolean flag to indicate read or write
2589  **/
__igb_access_emi_reg(struct e1000_hw * hw,u16 address,u16 * data,bool read)2590 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2591 				  u16 *data, bool read)
2592 {
2593 	s32 ret_val = 0;
2594 
2595 	ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2596 	if (ret_val)
2597 		return ret_val;
2598 
2599 	if (read)
2600 		ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2601 	else
2602 		ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2603 
2604 	return ret_val;
2605 }
2606 
2607 /**
2608  *  igb_read_emi_reg - Read Extended Management Interface register
2609  *  @hw: pointer to the HW structure
2610  *  @addr: EMI address to program
2611  *  @data: value to be read from the EMI address
2612  **/
igb_read_emi_reg(struct e1000_hw * hw,u16 addr,u16 * data)2613 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2614 {
2615 	return __igb_access_emi_reg(hw, addr, data, true);
2616 }
2617 
2618 /**
2619  *  igb_set_eee_i350 - Enable/disable EEE support
2620  *  @hw: pointer to the HW structure
2621  *  @adv1G: boolean flag enabling 1G EEE advertisement
2622  *  @adv100m: boolean flag enabling 100M EEE advertisement
2623  *
2624  *  Enable/disable EEE based on setting in dev_spec structure.
2625  *
2626  **/
igb_set_eee_i350(struct e1000_hw * hw,bool adv1G,bool adv100M)2627 s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
2628 {
2629 	u32 ipcnfg, eeer;
2630 
2631 	if ((hw->mac.type < e1000_i350) ||
2632 	    (hw->phy.media_type != e1000_media_type_copper))
2633 		goto out;
2634 	ipcnfg = rd32(E1000_IPCNFG);
2635 	eeer = rd32(E1000_EEER);
2636 
2637 	/* enable or disable per user setting */
2638 	if (!(hw->dev_spec._82575.eee_disable)) {
2639 		u32 eee_su = rd32(E1000_EEE_SU);
2640 
2641 		if (adv100M)
2642 			ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
2643 		else
2644 			ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
2645 
2646 		if (adv1G)
2647 			ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
2648 		else
2649 			ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
2650 
2651 		eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2652 			E1000_EEER_LPI_FC);
2653 
2654 		/* This bit should not be set in normal operation. */
2655 		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2656 			hw_dbg("LPI Clock Stop Bit should not be set!\n");
2657 
2658 	} else {
2659 		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2660 			E1000_IPCNFG_EEE_100M_AN);
2661 		eeer &= ~(E1000_EEER_TX_LPI_EN |
2662 			E1000_EEER_RX_LPI_EN |
2663 			E1000_EEER_LPI_FC);
2664 	}
2665 	wr32(E1000_IPCNFG, ipcnfg);
2666 	wr32(E1000_EEER, eeer);
2667 	rd32(E1000_IPCNFG);
2668 	rd32(E1000_EEER);
2669 out:
2670 
2671 	return 0;
2672 }
2673 
2674 /**
2675  *  igb_set_eee_i354 - Enable/disable EEE support
2676  *  @hw: pointer to the HW structure
2677  *  @adv1G: boolean flag enabling 1G EEE advertisement
2678  *  @adv100m: boolean flag enabling 100M EEE advertisement
2679  *
2680  *  Enable/disable EEE legacy mode based on setting in dev_spec structure.
2681  *
2682  **/
igb_set_eee_i354(struct e1000_hw * hw,bool adv1G,bool adv100M)2683 s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
2684 {
2685 	struct e1000_phy_info *phy = &hw->phy;
2686 	s32 ret_val = 0;
2687 	u16 phy_data;
2688 
2689 	if ((hw->phy.media_type != e1000_media_type_copper) ||
2690 	    ((phy->id != M88E1543_E_PHY_ID) &&
2691 	     (phy->id != M88E1512_E_PHY_ID)))
2692 		goto out;
2693 
2694 	if (!hw->dev_spec._82575.eee_disable) {
2695 		/* Switch to PHY page 18. */
2696 		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
2697 		if (ret_val)
2698 			goto out;
2699 
2700 		ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2701 					    &phy_data);
2702 		if (ret_val)
2703 			goto out;
2704 
2705 		phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
2706 		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2707 					     phy_data);
2708 		if (ret_val)
2709 			goto out;
2710 
2711 		/* Return the PHY to page 0. */
2712 		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2713 		if (ret_val)
2714 			goto out;
2715 
2716 		/* Turn on EEE advertisement. */
2717 		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2718 					     E1000_EEE_ADV_DEV_I354,
2719 					     &phy_data);
2720 		if (ret_val)
2721 			goto out;
2722 
2723 		if (adv100M)
2724 			phy_data |= E1000_EEE_ADV_100_SUPPORTED;
2725 		else
2726 			phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
2727 
2728 		if (adv1G)
2729 			phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
2730 		else
2731 			phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
2732 
2733 		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2734 						E1000_EEE_ADV_DEV_I354,
2735 						phy_data);
2736 	} else {
2737 		/* Turn off EEE advertisement. */
2738 		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2739 					     E1000_EEE_ADV_DEV_I354,
2740 					     &phy_data);
2741 		if (ret_val)
2742 			goto out;
2743 
2744 		phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
2745 			      E1000_EEE_ADV_1000_SUPPORTED);
2746 		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2747 					      E1000_EEE_ADV_DEV_I354,
2748 					      phy_data);
2749 	}
2750 
2751 out:
2752 	return ret_val;
2753 }
2754 
2755 /**
2756  *  igb_get_eee_status_i354 - Get EEE status
2757  *  @hw: pointer to the HW structure
2758  *  @status: EEE status
2759  *
2760  *  Get EEE status by guessing based on whether Tx or Rx LPI indications have
2761  *  been received.
2762  **/
igb_get_eee_status_i354(struct e1000_hw * hw,bool * status)2763 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2764 {
2765 	struct e1000_phy_info *phy = &hw->phy;
2766 	s32 ret_val = 0;
2767 	u16 phy_data;
2768 
2769 	/* Check if EEE is supported on this device. */
2770 	if ((hw->phy.media_type != e1000_media_type_copper) ||
2771 	    ((phy->id != M88E1543_E_PHY_ID) &&
2772 	     (phy->id != M88E1512_E_PHY_ID)))
2773 		goto out;
2774 
2775 	ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
2776 				     E1000_PCS_STATUS_DEV_I354,
2777 				     &phy_data);
2778 	if (ret_val)
2779 		goto out;
2780 
2781 	*status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
2782 			      E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
2783 
2784 out:
2785 	return ret_val;
2786 }
2787 
2788 static const u8 e1000_emc_temp_data[4] = {
2789 	E1000_EMC_INTERNAL_DATA,
2790 	E1000_EMC_DIODE1_DATA,
2791 	E1000_EMC_DIODE2_DATA,
2792 	E1000_EMC_DIODE3_DATA
2793 };
2794 static const u8 e1000_emc_therm_limit[4] = {
2795 	E1000_EMC_INTERNAL_THERM_LIMIT,
2796 	E1000_EMC_DIODE1_THERM_LIMIT,
2797 	E1000_EMC_DIODE2_THERM_LIMIT,
2798 	E1000_EMC_DIODE3_THERM_LIMIT
2799 };
2800 
2801 #ifdef CONFIG_IGB_HWMON
2802 /**
2803  *  igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2804  *  @hw: pointer to hardware structure
2805  *
2806  *  Updates the temperatures in mac.thermal_sensor_data
2807  **/
igb_get_thermal_sensor_data_generic(struct e1000_hw * hw)2808 static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2809 {
2810 	u16 ets_offset;
2811 	u16 ets_cfg;
2812 	u16 ets_sensor;
2813 	u8  num_sensors;
2814 	u8  sensor_index;
2815 	u8  sensor_location;
2816 	u8  i;
2817 	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2818 
2819 	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2820 		return E1000_NOT_IMPLEMENTED;
2821 
2822 	data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
2823 
2824 	/* Return the internal sensor only if ETS is unsupported */
2825 	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2826 	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2827 		return 0;
2828 
2829 	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2830 	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2831 	    != NVM_ETS_TYPE_EMC)
2832 		return E1000_NOT_IMPLEMENTED;
2833 
2834 	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2835 	if (num_sensors > E1000_MAX_SENSORS)
2836 		num_sensors = E1000_MAX_SENSORS;
2837 
2838 	for (i = 1; i < num_sensors; i++) {
2839 		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2840 		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2841 				NVM_ETS_DATA_INDEX_SHIFT);
2842 		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2843 				   NVM_ETS_DATA_LOC_SHIFT);
2844 
2845 		if (sensor_location != 0)
2846 			hw->phy.ops.read_i2c_byte(hw,
2847 					e1000_emc_temp_data[sensor_index],
2848 					E1000_I2C_THERMAL_SENSOR_ADDR,
2849 					&data->sensor[i].temp);
2850 	}
2851 	return 0;
2852 }
2853 
2854 /**
2855  *  igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
2856  *  @hw: pointer to hardware structure
2857  *
2858  *  Sets the thermal sensor thresholds according to the NVM map
2859  *  and save off the threshold and location values into mac.thermal_sensor_data
2860  **/
igb_init_thermal_sensor_thresh_generic(struct e1000_hw * hw)2861 static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2862 {
2863 	u16 ets_offset;
2864 	u16 ets_cfg;
2865 	u16 ets_sensor;
2866 	u8  low_thresh_delta;
2867 	u8  num_sensors;
2868 	u8  sensor_index;
2869 	u8  sensor_location;
2870 	u8  therm_limit;
2871 	u8  i;
2872 	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2873 
2874 	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2875 		return E1000_NOT_IMPLEMENTED;
2876 
2877 	memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
2878 
2879 	data->sensor[0].location = 0x1;
2880 	data->sensor[0].caution_thresh =
2881 		(rd32(E1000_THHIGHTC) & 0xFF);
2882 	data->sensor[0].max_op_thresh =
2883 		(rd32(E1000_THLOWTC) & 0xFF);
2884 
2885 	/* Return the internal sensor only if ETS is unsupported */
2886 	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2887 	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2888 		return 0;
2889 
2890 	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2891 	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2892 	    != NVM_ETS_TYPE_EMC)
2893 		return E1000_NOT_IMPLEMENTED;
2894 
2895 	low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
2896 			    NVM_ETS_LTHRES_DELTA_SHIFT);
2897 	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2898 
2899 	for (i = 1; i <= num_sensors; i++) {
2900 		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2901 		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2902 				NVM_ETS_DATA_INDEX_SHIFT);
2903 		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2904 				   NVM_ETS_DATA_LOC_SHIFT);
2905 		therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
2906 
2907 		hw->phy.ops.write_i2c_byte(hw,
2908 			e1000_emc_therm_limit[sensor_index],
2909 			E1000_I2C_THERMAL_SENSOR_ADDR,
2910 			therm_limit);
2911 
2912 		if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
2913 			data->sensor[i].location = sensor_location;
2914 			data->sensor[i].caution_thresh = therm_limit;
2915 			data->sensor[i].max_op_thresh = therm_limit -
2916 							low_thresh_delta;
2917 		}
2918 	}
2919 	return 0;
2920 }
2921 
2922 #endif
2923 static struct e1000_mac_operations e1000_mac_ops_82575 = {
2924 	.init_hw              = igb_init_hw_82575,
2925 	.check_for_link       = igb_check_for_link_82575,
2926 	.rar_set              = igb_rar_set,
2927 	.read_mac_addr        = igb_read_mac_addr_82575,
2928 	.get_speed_and_duplex = igb_get_link_up_info_82575,
2929 #ifdef CONFIG_IGB_HWMON
2930 	.get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
2931 	.init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
2932 #endif
2933 };
2934 
2935 static const struct e1000_phy_operations e1000_phy_ops_82575 = {
2936 	.acquire              = igb_acquire_phy_82575,
2937 	.get_cfg_done         = igb_get_cfg_done_82575,
2938 	.release              = igb_release_phy_82575,
2939 	.write_i2c_byte       = igb_write_i2c_byte,
2940 	.read_i2c_byte        = igb_read_i2c_byte,
2941 };
2942 
2943 static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2944 	.acquire              = igb_acquire_nvm_82575,
2945 	.read                 = igb_read_nvm_eerd,
2946 	.release              = igb_release_nvm_82575,
2947 	.write                = igb_write_nvm_spi,
2948 };
2949 
2950 const struct e1000_info e1000_82575_info = {
2951 	.get_invariants = igb_get_invariants_82575,
2952 	.mac_ops = &e1000_mac_ops_82575,
2953 	.phy_ops = &e1000_phy_ops_82575,
2954 	.nvm_ops = &e1000_nvm_ops_82575,
2955 };
2956 
2957