• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Framework for configuring and reading PHY devices
2  * Based on code in sungem_phy.c and gianfar_phy.c
3  *
4  * Author: Andy Fleming
5  *
6  * Copyright (c) 2004 Freescale Semiconductor, Inc.
7  * Copyright (c) 2006, 2007  Maciej W. Rozycki
8  *
9  * This program is free software; you can redistribute  it and/or modify it
10  * under  the terms of  the GNU General  Public License as published by the
11  * Free Software Foundation;  either version 2 of the  License, or (at your
12  * option) any later version.
13  *
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/phy.h>
32 #include <linux/timer.h>
33 #include <linux/workqueue.h>
34 #include <linux/mdio.h>
35 #include <linux/io.h>
36 #include <linux/uaccess.h>
37 #include <linux/atomic.h>
38 
39 #include <asm/irq.h>
40 
phy_speed_to_str(int speed)41 static const char *phy_speed_to_str(int speed)
42 {
43 	switch (speed) {
44 	case SPEED_10:
45 		return "10Mbps";
46 	case SPEED_100:
47 		return "100Mbps";
48 	case SPEED_1000:
49 		return "1Gbps";
50 	case SPEED_2500:
51 		return "2.5Gbps";
52 	case SPEED_10000:
53 		return "10Gbps";
54 	case SPEED_UNKNOWN:
55 		return "Unknown";
56 	default:
57 		return "Unsupported (update phy.c)";
58 	}
59 }
60 
61 #define PHY_STATE_STR(_state)			\
62 	case PHY_##_state:			\
63 		return __stringify(_state);	\
64 
phy_state_to_str(enum phy_state st)65 static const char *phy_state_to_str(enum phy_state st)
66 {
67 	switch (st) {
68 	PHY_STATE_STR(DOWN)
69 	PHY_STATE_STR(STARTING)
70 	PHY_STATE_STR(READY)
71 	PHY_STATE_STR(PENDING)
72 	PHY_STATE_STR(UP)
73 	PHY_STATE_STR(AN)
74 	PHY_STATE_STR(RUNNING)
75 	PHY_STATE_STR(NOLINK)
76 	PHY_STATE_STR(FORCING)
77 	PHY_STATE_STR(CHANGELINK)
78 	PHY_STATE_STR(HALTED)
79 	PHY_STATE_STR(RESUMING)
80 	}
81 
82 	return NULL;
83 }
84 
85 
86 /**
87  * phy_print_status - Convenience function to print out the current phy status
88  * @phydev: the phy_device struct
89  */
phy_print_status(struct phy_device * phydev)90 void phy_print_status(struct phy_device *phydev)
91 {
92 	if (phydev->link) {
93 		netdev_info(phydev->attached_dev,
94 			"Link is Up - %s/%s - flow control %s\n",
95 			phy_speed_to_str(phydev->speed),
96 			DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
97 			phydev->pause ? "rx/tx" : "off");
98 	} else	{
99 		netdev_info(phydev->attached_dev, "Link is Down\n");
100 	}
101 }
102 EXPORT_SYMBOL(phy_print_status);
103 
104 /**
105  * phy_clear_interrupt - Ack the phy device's interrupt
106  * @phydev: the phy_device struct
107  *
108  * If the @phydev driver has an ack_interrupt function, call it to
109  * ack and clear the phy device's interrupt.
110  *
111  * Returns 0 on success or < 0 on error.
112  */
phy_clear_interrupt(struct phy_device * phydev)113 static int phy_clear_interrupt(struct phy_device *phydev)
114 {
115 	if (phydev->drv->ack_interrupt)
116 		return phydev->drv->ack_interrupt(phydev);
117 
118 	return 0;
119 }
120 
121 /**
122  * phy_config_interrupt - configure the PHY device for the requested interrupts
123  * @phydev: the phy_device struct
124  * @interrupts: interrupt flags to configure for this @phydev
125  *
126  * Returns 0 on success or < 0 on error.
127  */
phy_config_interrupt(struct phy_device * phydev,u32 interrupts)128 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
129 {
130 	phydev->interrupts = interrupts;
131 	if (phydev->drv->config_intr)
132 		return phydev->drv->config_intr(phydev);
133 
134 	return 0;
135 }
136 
137 
138 /**
139  * phy_aneg_done - return auto-negotiation status
140  * @phydev: target phy_device struct
141  *
142  * Description: Return the auto-negotiation status from this @phydev
143  * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
144  * is still pending.
145  */
phy_aneg_done(struct phy_device * phydev)146 static inline int phy_aneg_done(struct phy_device *phydev)
147 {
148 	if (phydev->drv->aneg_done)
149 		return phydev->drv->aneg_done(phydev);
150 
151 	/* Avoid genphy_aneg_done() if the Clause 45 PHY does not
152 	 * implement Clause 22 registers
153 	 */
154 	if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
155 		return -EINVAL;
156 
157 	return genphy_aneg_done(phydev);
158 }
159 
160 /* A structure for mapping a particular speed and duplex
161  * combination to a particular SUPPORTED and ADVERTISED value
162  */
163 struct phy_setting {
164 	int speed;
165 	int duplex;
166 	u32 setting;
167 };
168 
169 /* A mapping of all SUPPORTED settings to speed/duplex */
170 static const struct phy_setting settings[] = {
171 	{
172 		.speed = SPEED_10000,
173 		.duplex = DUPLEX_FULL,
174 		.setting = SUPPORTED_10000baseKR_Full,
175 	},
176 	{
177 		.speed = SPEED_10000,
178 		.duplex = DUPLEX_FULL,
179 		.setting = SUPPORTED_10000baseKX4_Full,
180 	},
181 	{
182 		.speed = SPEED_10000,
183 		.duplex = DUPLEX_FULL,
184 		.setting = SUPPORTED_10000baseT_Full,
185 	},
186 	{
187 		.speed = SPEED_2500,
188 		.duplex = DUPLEX_FULL,
189 		.setting = SUPPORTED_2500baseX_Full,
190 	},
191 	{
192 		.speed = SPEED_1000,
193 		.duplex = DUPLEX_FULL,
194 		.setting = SUPPORTED_1000baseKX_Full,
195 	},
196 	{
197 		.speed = SPEED_1000,
198 		.duplex = DUPLEX_FULL,
199 		.setting = SUPPORTED_1000baseT_Full,
200 	},
201 	{
202 		.speed = SPEED_1000,
203 		.duplex = DUPLEX_HALF,
204 		.setting = SUPPORTED_1000baseT_Half,
205 	},
206 	{
207 		.speed = SPEED_100,
208 		.duplex = DUPLEX_FULL,
209 		.setting = SUPPORTED_100baseT_Full,
210 	},
211 	{
212 		.speed = SPEED_100,
213 		.duplex = DUPLEX_HALF,
214 		.setting = SUPPORTED_100baseT_Half,
215 	},
216 	{
217 		.speed = SPEED_10,
218 		.duplex = DUPLEX_FULL,
219 		.setting = SUPPORTED_10baseT_Full,
220 	},
221 	{
222 		.speed = SPEED_10,
223 		.duplex = DUPLEX_HALF,
224 		.setting = SUPPORTED_10baseT_Half,
225 	},
226 };
227 
228 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
229 
230 /**
231  * phy_find_setting - find a PHY settings array entry that matches speed & duplex
232  * @speed: speed to match
233  * @duplex: duplex to match
234  *
235  * Description: Searches the settings array for the setting which
236  *   matches the desired speed and duplex, and returns the index
237  *   of that setting.  Returns the index of the last setting if
238  *   none of the others match.
239  */
phy_find_setting(int speed,int duplex)240 static inline unsigned int phy_find_setting(int speed, int duplex)
241 {
242 	unsigned int idx = 0;
243 
244 	while (idx < ARRAY_SIZE(settings) &&
245 	       (settings[idx].speed != speed || settings[idx].duplex != duplex))
246 		idx++;
247 
248 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
249 }
250 
251 /**
252  * phy_find_valid - find a PHY setting that matches the requested features mask
253  * @idx: The first index in settings[] to search
254  * @features: A mask of the valid settings
255  *
256  * Description: Returns the index of the first valid setting less
257  *   than or equal to the one pointed to by idx, as determined by
258  *   the mask in features.  Returns the index of the last setting
259  *   if nothing else matches.
260  */
phy_find_valid(unsigned int idx,u32 features)261 static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
262 {
263 	while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
264 		idx++;
265 
266 	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
267 }
268 
269 /**
270  * phy_check_valid - check if there is a valid PHY setting which matches
271  *		     speed, duplex, and feature mask
272  * @speed: speed to match
273  * @duplex: duplex to match
274  * @features: A mask of the valid settings
275  *
276  * Description: Returns true if there is a valid setting, false otherwise.
277  */
phy_check_valid(int speed,int duplex,u32 features)278 static inline bool phy_check_valid(int speed, int duplex, u32 features)
279 {
280 	unsigned int idx;
281 
282 	idx = phy_find_valid(phy_find_setting(speed, duplex), features);
283 
284 	return settings[idx].speed == speed && settings[idx].duplex == duplex &&
285 		(settings[idx].setting & features);
286 }
287 
288 /**
289  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
290  * @phydev: the target phy_device struct
291  *
292  * Description: Make sure the PHY is set to supported speeds and
293  *   duplexes.  Drop down by one in this order:  1000/FULL,
294  *   1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
295  */
phy_sanitize_settings(struct phy_device * phydev)296 static void phy_sanitize_settings(struct phy_device *phydev)
297 {
298 	u32 features = phydev->supported;
299 	unsigned int idx;
300 
301 	/* Sanitize settings based on PHY capabilities */
302 	if ((features & SUPPORTED_Autoneg) == 0)
303 		phydev->autoneg = AUTONEG_DISABLE;
304 
305 	idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
306 			features);
307 
308 	phydev->speed = settings[idx].speed;
309 	phydev->duplex = settings[idx].duplex;
310 }
311 
312 /**
313  * phy_ethtool_sset - generic ethtool sset function, handles all the details
314  * @phydev: target phy_device struct
315  * @cmd: ethtool_cmd
316  *
317  * A few notes about parameter checking:
318  * - We don't set port or transceiver, so we don't care what they
319  *   were set to.
320  * - phy_start_aneg() will make sure forced settings are sane, and
321  *   choose the next best ones from the ones selected, so we don't
322  *   care if ethtool tries to give us bad values.
323  */
phy_ethtool_sset(struct phy_device * phydev,struct ethtool_cmd * cmd)324 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
325 {
326 	u32 speed = ethtool_cmd_speed(cmd);
327 
328 	if (cmd->phy_address != phydev->addr)
329 		return -EINVAL;
330 
331 	/* We make sure that we don't pass unsupported values in to the PHY */
332 	cmd->advertising &= phydev->supported;
333 
334 	/* Verify the settings we care about. */
335 	if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
336 		return -EINVAL;
337 
338 	if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
339 		return -EINVAL;
340 
341 	if (cmd->autoneg == AUTONEG_DISABLE &&
342 	    ((speed != SPEED_1000 &&
343 	      speed != SPEED_100 &&
344 	      speed != SPEED_10) ||
345 	     (cmd->duplex != DUPLEX_HALF &&
346 	      cmd->duplex != DUPLEX_FULL)))
347 		return -EINVAL;
348 
349 	phydev->autoneg = cmd->autoneg;
350 
351 	phydev->speed = speed;
352 
353 	phydev->advertising = cmd->advertising;
354 
355 	if (AUTONEG_ENABLE == cmd->autoneg)
356 		phydev->advertising |= ADVERTISED_Autoneg;
357 	else
358 		phydev->advertising &= ~ADVERTISED_Autoneg;
359 
360 	phydev->duplex = cmd->duplex;
361 
362 	phydev->mdix = cmd->eth_tp_mdix_ctrl;
363 
364 	/* Restart the PHY */
365 	phy_start_aneg(phydev);
366 
367 	return 0;
368 }
369 EXPORT_SYMBOL(phy_ethtool_sset);
370 
phy_ethtool_gset(struct phy_device * phydev,struct ethtool_cmd * cmd)371 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
372 {
373 	cmd->supported = phydev->supported;
374 
375 	cmd->advertising = phydev->advertising;
376 	cmd->lp_advertising = phydev->lp_advertising;
377 
378 	ethtool_cmd_speed_set(cmd, phydev->speed);
379 	cmd->duplex = phydev->duplex;
380 	if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
381 		cmd->port = PORT_BNC;
382 	else
383 		cmd->port = PORT_MII;
384 	cmd->phy_address = phydev->addr;
385 	cmd->transceiver = phy_is_internal(phydev) ?
386 		XCVR_INTERNAL : XCVR_EXTERNAL;
387 	cmd->autoneg = phydev->autoneg;
388 	cmd->eth_tp_mdix_ctrl = phydev->mdix;
389 
390 	return 0;
391 }
392 EXPORT_SYMBOL(phy_ethtool_gset);
393 
394 /**
395  * phy_mii_ioctl - generic PHY MII ioctl interface
396  * @phydev: the phy_device struct
397  * @ifr: &struct ifreq for socket ioctl's
398  * @cmd: ioctl cmd to execute
399  *
400  * Note that this function is currently incompatible with the
401  * PHYCONTROL layer.  It changes registers without regard to
402  * current state.  Use at own risk.
403  */
phy_mii_ioctl(struct phy_device * phydev,struct ifreq * ifr,int cmd)404 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
405 {
406 	struct mii_ioctl_data *mii_data = if_mii(ifr);
407 	u16 val = mii_data->val_in;
408 	bool change_autoneg = false;
409 
410 	switch (cmd) {
411 	case SIOCGMIIPHY:
412 		mii_data->phy_id = phydev->addr;
413 		/* fall through */
414 
415 	case SIOCGMIIREG:
416 		mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
417 						 mii_data->reg_num);
418 		return 0;
419 
420 	case SIOCSMIIREG:
421 		if (mii_data->phy_id == phydev->addr) {
422 			switch (mii_data->reg_num) {
423 			case MII_BMCR:
424 				if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
425 					if (phydev->autoneg == AUTONEG_ENABLE)
426 						change_autoneg = true;
427 					phydev->autoneg = AUTONEG_DISABLE;
428 					if (val & BMCR_FULLDPLX)
429 						phydev->duplex = DUPLEX_FULL;
430 					else
431 						phydev->duplex = DUPLEX_HALF;
432 					if (val & BMCR_SPEED1000)
433 						phydev->speed = SPEED_1000;
434 					else if (val & BMCR_SPEED100)
435 						phydev->speed = SPEED_100;
436 					else phydev->speed = SPEED_10;
437 				}
438 				else {
439 					if (phydev->autoneg == AUTONEG_DISABLE)
440 						change_autoneg = true;
441 					phydev->autoneg = AUTONEG_ENABLE;
442 				}
443 				break;
444 			case MII_ADVERTISE:
445 				phydev->advertising = mii_adv_to_ethtool_adv_t(val);
446 				change_autoneg = true;
447 				break;
448 			default:
449 				/* do nothing */
450 				break;
451 			}
452 		}
453 
454 		mdiobus_write(phydev->bus, mii_data->phy_id,
455 			      mii_data->reg_num, val);
456 
457 		if (mii_data->phy_id == phydev->addr &&
458 		    mii_data->reg_num == MII_BMCR &&
459 		    val & BMCR_RESET)
460 			return phy_init_hw(phydev);
461 
462 		if (change_autoneg)
463 			return phy_start_aneg(phydev);
464 
465 		return 0;
466 
467 	case SIOCSHWTSTAMP:
468 		if (phydev->drv->hwtstamp)
469 			return phydev->drv->hwtstamp(phydev, ifr);
470 		/* fall through */
471 
472 	default:
473 		return -EOPNOTSUPP;
474 	}
475 }
476 EXPORT_SYMBOL(phy_mii_ioctl);
477 
478 /**
479  * phy_start_aneg - start auto-negotiation for this PHY device
480  * @phydev: the phy_device struct
481  *
482  * Description: Sanitizes the settings (if we're not autonegotiating
483  *   them), and then calls the driver's config_aneg function.
484  *   If the PHYCONTROL Layer is operating, we change the state to
485  *   reflect the beginning of Auto-negotiation or forcing.
486  */
phy_start_aneg(struct phy_device * phydev)487 int phy_start_aneg(struct phy_device *phydev)
488 {
489 	int err;
490 
491 	mutex_lock(&phydev->lock);
492 
493 	if (AUTONEG_DISABLE == phydev->autoneg)
494 		phy_sanitize_settings(phydev);
495 
496 	/* Invalidate LP advertising flags */
497 	phydev->lp_advertising = 0;
498 
499 	err = phydev->drv->config_aneg(phydev);
500 	if (err < 0)
501 		goto out_unlock;
502 
503 	if (phydev->state != PHY_HALTED) {
504 		if (AUTONEG_ENABLE == phydev->autoneg) {
505 			phydev->state = PHY_AN;
506 			phydev->link_timeout = PHY_AN_TIMEOUT;
507 		} else {
508 			phydev->state = PHY_FORCING;
509 			phydev->link_timeout = PHY_FORCE_TIMEOUT;
510 		}
511 	}
512 
513 out_unlock:
514 	mutex_unlock(&phydev->lock);
515 	return err;
516 }
517 EXPORT_SYMBOL(phy_start_aneg);
518 
519 /**
520  * phy_start_machine - start PHY state machine tracking
521  * @phydev: the phy_device struct
522  *
523  * Description: The PHY infrastructure can run a state machine
524  *   which tracks whether the PHY is starting up, negotiating,
525  *   etc.  This function starts the timer which tracks the state
526  *   of the PHY.  If you want to maintain your own state machine,
527  *   do not call this function.
528  */
phy_start_machine(struct phy_device * phydev)529 void phy_start_machine(struct phy_device *phydev)
530 {
531 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
532 }
533 
534 /**
535  * phy_stop_machine - stop the PHY state machine tracking
536  * @phydev: target phy_device struct
537  *
538  * Description: Stops the state machine timer, sets the state to UP
539  *   (unless it wasn't up yet). This function must be called BEFORE
540  *   phy_detach.
541  */
phy_stop_machine(struct phy_device * phydev)542 void phy_stop_machine(struct phy_device *phydev)
543 {
544 	cancel_delayed_work_sync(&phydev->state_queue);
545 
546 	mutex_lock(&phydev->lock);
547 	if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
548 		phydev->state = PHY_UP;
549 	mutex_unlock(&phydev->lock);
550 }
551 
552 /**
553  * phy_error - enter HALTED state for this PHY device
554  * @phydev: target phy_device struct
555  *
556  * Moves the PHY to the HALTED state in response to a read
557  * or write error, and tells the controller the link is down.
558  * Must not be called from interrupt context, or while the
559  * phydev->lock is held.
560  */
phy_error(struct phy_device * phydev)561 static void phy_error(struct phy_device *phydev)
562 {
563 	mutex_lock(&phydev->lock);
564 	phydev->state = PHY_HALTED;
565 	mutex_unlock(&phydev->lock);
566 }
567 
568 /**
569  * phy_interrupt - PHY interrupt handler
570  * @irq: interrupt line
571  * @phy_dat: phy_device pointer
572  *
573  * Description: When a PHY interrupt occurs, the handler disables
574  * interrupts, and schedules a work task to clear the interrupt.
575  */
phy_interrupt(int irq,void * phy_dat)576 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
577 {
578 	struct phy_device *phydev = phy_dat;
579 
580 	if (PHY_HALTED == phydev->state)
581 		return IRQ_NONE;		/* It can't be ours.  */
582 
583 	/* The MDIO bus is not allowed to be written in interrupt
584 	 * context, so we need to disable the irq here.  A work
585 	 * queue will write the PHY to disable and clear the
586 	 * interrupt, and then reenable the irq line.
587 	 */
588 	disable_irq_nosync(irq);
589 	atomic_inc(&phydev->irq_disable);
590 
591 	queue_work(system_power_efficient_wq, &phydev->phy_queue);
592 
593 	return IRQ_HANDLED;
594 }
595 
596 /**
597  * phy_enable_interrupts - Enable the interrupts from the PHY side
598  * @phydev: target phy_device struct
599  */
phy_enable_interrupts(struct phy_device * phydev)600 static int phy_enable_interrupts(struct phy_device *phydev)
601 {
602 	int err = phy_clear_interrupt(phydev);
603 
604 	if (err < 0)
605 		return err;
606 
607 	return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
608 }
609 
610 /**
611  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
612  * @phydev: target phy_device struct
613  */
phy_disable_interrupts(struct phy_device * phydev)614 static int phy_disable_interrupts(struct phy_device *phydev)
615 {
616 	int err;
617 
618 	/* Disable PHY interrupts */
619 	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
620 	if (err)
621 		goto phy_err;
622 
623 	/* Clear the interrupt */
624 	err = phy_clear_interrupt(phydev);
625 	if (err)
626 		goto phy_err;
627 
628 	return 0;
629 
630 phy_err:
631 	phy_error(phydev);
632 
633 	return err;
634 }
635 
636 /**
637  * phy_start_interrupts - request and enable interrupts for a PHY device
638  * @phydev: target phy_device struct
639  *
640  * Description: Request the interrupt for the given PHY.
641  *   If this fails, then we set irq to PHY_POLL.
642  *   Otherwise, we enable the interrupts in the PHY.
643  *   This should only be called with a valid IRQ number.
644  *   Returns 0 on success or < 0 on error.
645  */
phy_start_interrupts(struct phy_device * phydev)646 int phy_start_interrupts(struct phy_device *phydev)
647 {
648 	atomic_set(&phydev->irq_disable, 0);
649 	if (request_irq(phydev->irq, phy_interrupt,
650 				IRQF_SHARED,
651 				"phy_interrupt",
652 				phydev) < 0) {
653 		pr_warn("%s: Can't get IRQ %d (PHY)\n",
654 			phydev->bus->name, phydev->irq);
655 		phydev->irq = PHY_POLL;
656 		return 0;
657 	}
658 
659 	return phy_enable_interrupts(phydev);
660 }
661 EXPORT_SYMBOL(phy_start_interrupts);
662 
663 /**
664  * phy_stop_interrupts - disable interrupts from a PHY device
665  * @phydev: target phy_device struct
666  */
phy_stop_interrupts(struct phy_device * phydev)667 int phy_stop_interrupts(struct phy_device *phydev)
668 {
669 	int err = phy_disable_interrupts(phydev);
670 
671 	if (err)
672 		phy_error(phydev);
673 
674 	free_irq(phydev->irq, phydev);
675 
676 	/* Cannot call flush_scheduled_work() here as desired because
677 	 * of rtnl_lock(), but we do not really care about what would
678 	 * be done, except from enable_irq(), so cancel any work
679 	 * possibly pending and take care of the matter below.
680 	 */
681 	cancel_work_sync(&phydev->phy_queue);
682 	/* If work indeed has been cancelled, disable_irq() will have
683 	 * been left unbalanced from phy_interrupt() and enable_irq()
684 	 * has to be called so that other devices on the line work.
685 	 */
686 	while (atomic_dec_return(&phydev->irq_disable) >= 0)
687 		enable_irq(phydev->irq);
688 
689 	return err;
690 }
691 EXPORT_SYMBOL(phy_stop_interrupts);
692 
693 /**
694  * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
695  * @work: work_struct that describes the work to be done
696  */
phy_change(struct work_struct * work)697 void phy_change(struct work_struct *work)
698 {
699 	struct phy_device *phydev =
700 		container_of(work, struct phy_device, phy_queue);
701 
702 	if (phy_interrupt_is_valid(phydev)) {
703 		if (phydev->drv->did_interrupt &&
704 		    !phydev->drv->did_interrupt(phydev))
705 			goto ignore;
706 
707 		if (phy_disable_interrupts(phydev))
708 			goto phy_err;
709 	}
710 
711 	mutex_lock(&phydev->lock);
712 	if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
713 		phydev->state = PHY_CHANGELINK;
714 	mutex_unlock(&phydev->lock);
715 
716 	if (phy_interrupt_is_valid(phydev)) {
717 		atomic_dec(&phydev->irq_disable);
718 		enable_irq(phydev->irq);
719 
720 		/* Reenable interrupts */
721 		if (PHY_HALTED != phydev->state &&
722 		    phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
723 			goto irq_enable_err;
724 	}
725 
726 	/* reschedule state queue work to run as soon as possible */
727 	cancel_delayed_work_sync(&phydev->state_queue);
728 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
729 	return;
730 
731 ignore:
732 	atomic_dec(&phydev->irq_disable);
733 	enable_irq(phydev->irq);
734 	return;
735 
736 irq_enable_err:
737 	disable_irq(phydev->irq);
738 	atomic_inc(&phydev->irq_disable);
739 phy_err:
740 	phy_error(phydev);
741 }
742 
743 /**
744  * phy_stop - Bring down the PHY link, and stop checking the status
745  * @phydev: target phy_device struct
746  */
phy_stop(struct phy_device * phydev)747 void phy_stop(struct phy_device *phydev)
748 {
749 	mutex_lock(&phydev->lock);
750 
751 	if (PHY_HALTED == phydev->state)
752 		goto out_unlock;
753 
754 	if (phy_interrupt_is_valid(phydev)) {
755 		/* Disable PHY Interrupts */
756 		phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
757 
758 		/* Clear any pending interrupts */
759 		phy_clear_interrupt(phydev);
760 	}
761 
762 	phydev->state = PHY_HALTED;
763 
764 out_unlock:
765 	mutex_unlock(&phydev->lock);
766 
767 	/* Cannot call flush_scheduled_work() here as desired because
768 	 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
769 	 * will not reenable interrupts.
770 	 */
771 }
772 EXPORT_SYMBOL(phy_stop);
773 
774 /**
775  * phy_start - start or restart a PHY device
776  * @phydev: target phy_device struct
777  *
778  * Description: Indicates the attached device's readiness to
779  *   handle PHY-related work.  Used during startup to start the
780  *   PHY, and after a call to phy_stop() to resume operation.
781  *   Also used to indicate the MDIO bus has cleared an error
782  *   condition.
783  */
phy_start(struct phy_device * phydev)784 void phy_start(struct phy_device *phydev)
785 {
786 	bool do_resume = false;
787 	int err = 0;
788 
789 	mutex_lock(&phydev->lock);
790 
791 	switch (phydev->state) {
792 	case PHY_STARTING:
793 		phydev->state = PHY_PENDING;
794 		break;
795 	case PHY_READY:
796 		phydev->state = PHY_UP;
797 		break;
798 	case PHY_HALTED:
799 		/* make sure interrupts are re-enabled for the PHY */
800 		err = phy_enable_interrupts(phydev);
801 		if (err < 0)
802 			break;
803 
804 		phydev->state = PHY_RESUMING;
805 		do_resume = true;
806 		break;
807 	default:
808 		break;
809 	}
810 	mutex_unlock(&phydev->lock);
811 
812 	/* if phy was suspended, bring the physical link up again */
813 	if (do_resume)
814 		phy_resume(phydev);
815 }
816 EXPORT_SYMBOL(phy_start);
817 
818 /**
819  * phy_state_machine - Handle the state machine
820  * @work: work_struct that describes the work to be done
821  */
phy_state_machine(struct work_struct * work)822 void phy_state_machine(struct work_struct *work)
823 {
824 	struct delayed_work *dwork = to_delayed_work(work);
825 	struct phy_device *phydev =
826 			container_of(dwork, struct phy_device, state_queue);
827 	bool needs_aneg = false, do_suspend = false;
828 	enum phy_state old_state;
829 	int err = 0;
830 	int old_link;
831 
832 	mutex_lock(&phydev->lock);
833 
834 	old_state = phydev->state;
835 
836 	if (phydev->drv->link_change_notify)
837 		phydev->drv->link_change_notify(phydev);
838 
839 	switch (phydev->state) {
840 	case PHY_DOWN:
841 	case PHY_STARTING:
842 	case PHY_READY:
843 	case PHY_PENDING:
844 		break;
845 	case PHY_UP:
846 		needs_aneg = true;
847 
848 		phydev->link_timeout = PHY_AN_TIMEOUT;
849 
850 		break;
851 	case PHY_AN:
852 		err = phy_read_status(phydev);
853 		if (err < 0)
854 			break;
855 
856 		/* If the link is down, give up on negotiation for now */
857 		if (!phydev->link) {
858 			phydev->state = PHY_NOLINK;
859 			netif_carrier_off(phydev->attached_dev);
860 			phydev->adjust_link(phydev->attached_dev);
861 			break;
862 		}
863 
864 		/* Check if negotiation is done.  Break if there's an error */
865 		err = phy_aneg_done(phydev);
866 		if (err < 0)
867 			break;
868 
869 		/* If AN is done, we're running */
870 		if (err > 0) {
871 			phydev->state = PHY_RUNNING;
872 			netif_carrier_on(phydev->attached_dev);
873 			phydev->adjust_link(phydev->attached_dev);
874 
875 		} else if (0 == phydev->link_timeout--)
876 			needs_aneg = true;
877 		break;
878 	case PHY_NOLINK:
879 		if (phy_interrupt_is_valid(phydev))
880 			break;
881 
882 		err = phy_read_status(phydev);
883 		if (err)
884 			break;
885 
886 		if (phydev->link) {
887 			if (AUTONEG_ENABLE == phydev->autoneg) {
888 				err = phy_aneg_done(phydev);
889 				if (err < 0)
890 					break;
891 
892 				if (!err) {
893 					phydev->state = PHY_AN;
894 					phydev->link_timeout = PHY_AN_TIMEOUT;
895 					break;
896 				}
897 			}
898 			phydev->state = PHY_RUNNING;
899 			netif_carrier_on(phydev->attached_dev);
900 			phydev->adjust_link(phydev->attached_dev);
901 		}
902 		break;
903 	case PHY_FORCING:
904 		err = genphy_update_link(phydev);
905 		if (err)
906 			break;
907 
908 		if (phydev->link) {
909 			phydev->state = PHY_RUNNING;
910 			netif_carrier_on(phydev->attached_dev);
911 		} else {
912 			if (0 == phydev->link_timeout--)
913 				needs_aneg = true;
914 		}
915 
916 		phydev->adjust_link(phydev->attached_dev);
917 		break;
918 	case PHY_RUNNING:
919 		/* Only register a CHANGE if we are polling or ignoring
920 		 * interrupts and link changed since latest checking.
921 		 */
922 		if (!phy_interrupt_is_valid(phydev)) {
923 			old_link = phydev->link;
924 			err = phy_read_status(phydev);
925 			if (err)
926 				break;
927 
928 			if (old_link != phydev->link)
929 				phydev->state = PHY_CHANGELINK;
930 		}
931 		/*
932 		 * Failsafe: check that nobody set phydev->link=0 between two
933 		 * poll cycles, otherwise we won't leave RUNNING state as long
934 		 * as link remains down.
935 		 */
936 		if (!phydev->link && phydev->state == PHY_RUNNING) {
937 			phydev->state = PHY_CHANGELINK;
938 			dev_err(&phydev->dev, "no link in PHY_RUNNING\n");
939 		}
940 		break;
941 	case PHY_CHANGELINK:
942 		err = phy_read_status(phydev);
943 		if (err)
944 			break;
945 
946 		if (phydev->link) {
947 			phydev->state = PHY_RUNNING;
948 			netif_carrier_on(phydev->attached_dev);
949 		} else {
950 			phydev->state = PHY_NOLINK;
951 			netif_carrier_off(phydev->attached_dev);
952 		}
953 
954 		phydev->adjust_link(phydev->attached_dev);
955 
956 		if (phy_interrupt_is_valid(phydev))
957 			err = phy_config_interrupt(phydev,
958 						   PHY_INTERRUPT_ENABLED);
959 		break;
960 	case PHY_HALTED:
961 		if (phydev->link) {
962 			phydev->link = 0;
963 			netif_carrier_off(phydev->attached_dev);
964 			phydev->adjust_link(phydev->attached_dev);
965 			do_suspend = true;
966 		}
967 		break;
968 	case PHY_RESUMING:
969 		if (AUTONEG_ENABLE == phydev->autoneg) {
970 			err = phy_aneg_done(phydev);
971 			if (err < 0)
972 				break;
973 
974 			/* err > 0 if AN is done.
975 			 * Otherwise, it's 0, and we're  still waiting for AN
976 			 */
977 			if (err > 0) {
978 				err = phy_read_status(phydev);
979 				if (err)
980 					break;
981 
982 				if (phydev->link) {
983 					phydev->state = PHY_RUNNING;
984 					netif_carrier_on(phydev->attached_dev);
985 				} else	{
986 					phydev->state = PHY_NOLINK;
987 				}
988 				phydev->adjust_link(phydev->attached_dev);
989 			} else {
990 				phydev->state = PHY_AN;
991 				phydev->link_timeout = PHY_AN_TIMEOUT;
992 			}
993 		} else {
994 			err = phy_read_status(phydev);
995 			if (err)
996 				break;
997 
998 			if (phydev->link) {
999 				phydev->state = PHY_RUNNING;
1000 				netif_carrier_on(phydev->attached_dev);
1001 			} else	{
1002 				phydev->state = PHY_NOLINK;
1003 			}
1004 			phydev->adjust_link(phydev->attached_dev);
1005 		}
1006 		break;
1007 	}
1008 
1009 	mutex_unlock(&phydev->lock);
1010 
1011 	if (needs_aneg)
1012 		err = phy_start_aneg(phydev);
1013 	else if (do_suspend)
1014 		phy_suspend(phydev);
1015 
1016 	if (err < 0)
1017 		phy_error(phydev);
1018 
1019 	dev_dbg(&phydev->dev, "PHY state change %s -> %s\n",
1020 		phy_state_to_str(old_state), phy_state_to_str(phydev->state));
1021 
1022 	queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
1023 			   PHY_STATE_TIME * HZ);
1024 }
1025 
phy_mac_interrupt(struct phy_device * phydev,int new_link)1026 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
1027 {
1028 	phydev->link = new_link;
1029 
1030 	/* Trigger a state machine change */
1031 	queue_work(system_power_efficient_wq, &phydev->phy_queue);
1032 }
1033 EXPORT_SYMBOL(phy_mac_interrupt);
1034 
mmd_phy_indirect(struct mii_bus * bus,int prtad,int devad,int addr)1035 static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
1036 				    int addr)
1037 {
1038 	/* Write the desired MMD Devad */
1039 	bus->write(bus, addr, MII_MMD_CTRL, devad);
1040 
1041 	/* Write the desired MMD register address */
1042 	bus->write(bus, addr, MII_MMD_DATA, prtad);
1043 
1044 	/* Select the Function : DATA with no post increment */
1045 	bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
1046 }
1047 
1048 /**
1049  * phy_read_mmd_indirect - reads data from the MMD registers
1050  * @phydev: The PHY device bus
1051  * @prtad: MMD Address
1052  * @devad: MMD DEVAD
1053  * @addr: PHY address on the MII bus
1054  *
1055  * Description: it reads data from the MMD registers (clause 22 to access to
1056  * clause 45) of the specified phy address.
1057  * To read these register we have:
1058  * 1) Write reg 13 // DEVAD
1059  * 2) Write reg 14 // MMD Address
1060  * 3) Write reg 13 // MMD Data Command for MMD DEVAD
1061  * 3) Read  reg 14 // Read MMD data
1062  */
phy_read_mmd_indirect(struct phy_device * phydev,int prtad,int devad,int addr)1063 int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
1064 				 int devad, int addr)
1065 {
1066 	struct phy_driver *phydrv = phydev->drv;
1067 	int value = -1;
1068 
1069 	if (!phydrv->read_mmd_indirect) {
1070 		struct mii_bus *bus = phydev->bus;
1071 
1072 		mutex_lock(&bus->mdio_lock);
1073 		mmd_phy_indirect(bus, prtad, devad, addr);
1074 
1075 		/* Read the content of the MMD's selected register */
1076 		value = bus->read(bus, addr, MII_MMD_DATA);
1077 		mutex_unlock(&bus->mdio_lock);
1078 	} else {
1079 		value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
1080 	}
1081 	return value;
1082 }
1083 EXPORT_SYMBOL(phy_read_mmd_indirect);
1084 
1085 /**
1086  * phy_write_mmd_indirect - writes data to the MMD registers
1087  * @phydev: The PHY device
1088  * @prtad: MMD Address
1089  * @devad: MMD DEVAD
1090  * @addr: PHY address on the MII bus
1091  * @data: data to write in the MMD register
1092  *
1093  * Description: Write data from the MMD registers of the specified
1094  * phy address.
1095  * To write these register we have:
1096  * 1) Write reg 13 // DEVAD
1097  * 2) Write reg 14 // MMD Address
1098  * 3) Write reg 13 // MMD Data Command for MMD DEVAD
1099  * 3) Write reg 14 // Write MMD data
1100  */
phy_write_mmd_indirect(struct phy_device * phydev,int prtad,int devad,int addr,u32 data)1101 void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
1102 				   int devad, int addr, u32 data)
1103 {
1104 	struct phy_driver *phydrv = phydev->drv;
1105 
1106 	if (!phydrv->write_mmd_indirect) {
1107 		struct mii_bus *bus = phydev->bus;
1108 
1109 		mutex_lock(&bus->mdio_lock);
1110 		mmd_phy_indirect(bus, prtad, devad, addr);
1111 
1112 		/* Write the data into MMD's selected register */
1113 		bus->write(bus, addr, MII_MMD_DATA, data);
1114 		mutex_unlock(&bus->mdio_lock);
1115 	} else {
1116 		phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
1117 	}
1118 }
1119 EXPORT_SYMBOL(phy_write_mmd_indirect);
1120 
1121 /**
1122  * phy_init_eee - init and check the EEE feature
1123  * @phydev: target phy_device struct
1124  * @clk_stop_enable: PHY may stop the clock during LPI
1125  *
1126  * Description: it checks if the Energy-Efficient Ethernet (EEE)
1127  * is supported by looking at the MMD registers 3.20 and 7.60/61
1128  * and it programs the MMD register 3.0 setting the "Clock stop enable"
1129  * bit if required.
1130  */
phy_init_eee(struct phy_device * phydev,bool clk_stop_enable)1131 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1132 {
1133 	/* According to 802.3az,the EEE is supported only in full duplex-mode.
1134 	 * Also EEE feature is active when core is operating with MII, GMII
1135 	 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
1136 	 * should return an error if they do not support EEE.
1137 	 */
1138 	if ((phydev->duplex == DUPLEX_FULL) &&
1139 	    ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1140 	    (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1141 	     phy_interface_is_rgmii(phydev) ||
1142 	     phy_is_internal(phydev))) {
1143 		int eee_lp, eee_cap, eee_adv;
1144 		u32 lp, cap, adv;
1145 		int status;
1146 
1147 		/* Read phy status to properly get the right settings */
1148 		status = phy_read_status(phydev);
1149 		if (status)
1150 			return status;
1151 
1152 		/* First check if the EEE ability is supported */
1153 		eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1154 						MDIO_MMD_PCS, phydev->addr);
1155 		if (eee_cap <= 0)
1156 			goto eee_exit_err;
1157 
1158 		cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1159 		if (!cap)
1160 			goto eee_exit_err;
1161 
1162 		/* Check which link settings negotiated and verify it in
1163 		 * the EEE advertising registers.
1164 		 */
1165 		eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1166 					       MDIO_MMD_AN, phydev->addr);
1167 		if (eee_lp <= 0)
1168 			goto eee_exit_err;
1169 
1170 		eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1171 						MDIO_MMD_AN, phydev->addr);
1172 		if (eee_adv <= 0)
1173 			goto eee_exit_err;
1174 
1175 		adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1176 		lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1177 		if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1178 			goto eee_exit_err;
1179 
1180 		if (clk_stop_enable) {
1181 			/* Configure the PHY to stop receiving xMII
1182 			 * clock while it is signaling LPI.
1183 			 */
1184 			int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
1185 							MDIO_MMD_PCS,
1186 							phydev->addr);
1187 			if (val < 0)
1188 				return val;
1189 
1190 			val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1191 			phy_write_mmd_indirect(phydev, MDIO_CTRL1,
1192 					       MDIO_MMD_PCS, phydev->addr,
1193 					       val);
1194 		}
1195 
1196 		return 0; /* EEE supported */
1197 	}
1198 eee_exit_err:
1199 	return -EPROTONOSUPPORT;
1200 }
1201 EXPORT_SYMBOL(phy_init_eee);
1202 
1203 /**
1204  * phy_get_eee_err - report the EEE wake error count
1205  * @phydev: target phy_device struct
1206  *
1207  * Description: it is to report the number of time where the PHY
1208  * failed to complete its normal wake sequence.
1209  */
phy_get_eee_err(struct phy_device * phydev)1210 int phy_get_eee_err(struct phy_device *phydev)
1211 {
1212 	return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR,
1213 				     MDIO_MMD_PCS, phydev->addr);
1214 }
1215 EXPORT_SYMBOL(phy_get_eee_err);
1216 
1217 /**
1218  * phy_ethtool_get_eee - get EEE supported and status
1219  * @phydev: target phy_device struct
1220  * @data: ethtool_eee data
1221  *
1222  * Description: it reportes the Supported/Advertisement/LP Advertisement
1223  * capabilities.
1224  */
phy_ethtool_get_eee(struct phy_device * phydev,struct ethtool_eee * data)1225 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1226 {
1227 	int val;
1228 
1229 	/* Get Supported EEE */
1230 	val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1231 				    MDIO_MMD_PCS, phydev->addr);
1232 	if (val < 0)
1233 		return val;
1234 	data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1235 
1236 	/* Get advertisement EEE */
1237 	val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1238 				    MDIO_MMD_AN, phydev->addr);
1239 	if (val < 0)
1240 		return val;
1241 	data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1242 
1243 	/* Get LP advertisement EEE */
1244 	val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1245 				    MDIO_MMD_AN, phydev->addr);
1246 	if (val < 0)
1247 		return val;
1248 	data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1249 
1250 	return 0;
1251 }
1252 EXPORT_SYMBOL(phy_ethtool_get_eee);
1253 
1254 /**
1255  * phy_ethtool_set_eee - set EEE supported and status
1256  * @phydev: target phy_device struct
1257  * @data: ethtool_eee data
1258  *
1259  * Description: it is to program the Advertisement EEE register.
1260  */
phy_ethtool_set_eee(struct phy_device * phydev,struct ethtool_eee * data)1261 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1262 {
1263 	int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1264 
1265 	phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1266 			       phydev->addr, val);
1267 
1268 	return 0;
1269 }
1270 EXPORT_SYMBOL(phy_ethtool_set_eee);
1271 
phy_ethtool_set_wol(struct phy_device * phydev,struct ethtool_wolinfo * wol)1272 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1273 {
1274 	if (phydev->drv->set_wol)
1275 		return phydev->drv->set_wol(phydev, wol);
1276 
1277 	return -EOPNOTSUPP;
1278 }
1279 EXPORT_SYMBOL(phy_ethtool_set_wol);
1280 
phy_ethtool_get_wol(struct phy_device * phydev,struct ethtool_wolinfo * wol)1281 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1282 {
1283 	if (phydev->drv->get_wol)
1284 		phydev->drv->get_wol(phydev, wol);
1285 }
1286 EXPORT_SYMBOL(phy_ethtool_get_wol);
1287