1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3  * Copyright 2021-2025 NXP
4  * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5  */
6 
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/phy.h>
14 #include <linux/processor.h>
15 #include <linux/property.h>
16 #include <linux/ptp_classify.h>
17 #include <linux/net_tstamp.h>
18 
19 #include "nxp-c45-tja11xx.h"
20 
21 #define PHY_ID_MASK			GENMASK(31, 4)
22 /* Same id: TJA1103, TJA1104 */
23 #define PHY_ID_TJA_1103			0x001BB010
24 #define PHY_ID_TJA_1120			0x001BB031
25 
26 #define VEND1_DEVICE_ID3		0x0004
27 #define TJA1120_DEV_ID3_SILICON_VERSION	GENMASK(15, 12)
28 #define TJA1120_DEV_ID3_SAMPLE_TYPE	GENMASK(11, 8)
29 #define DEVICE_ID3_SAMPLE_TYPE_R	0x9
30 
31 #define VEND1_DEVICE_CONTROL		0x0040
32 #define DEVICE_CONTROL_RESET		BIT(15)
33 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN	BIT(14)
34 #define DEVICE_CONTROL_CONFIG_ALL_EN	BIT(13)
35 
36 #define VEND1_DEVICE_CONFIG		0x0048
37 
38 #define TJA1120_VEND1_EXT_TS_MODE	0x1012
39 
40 #define TJA1120_GLOBAL_INFRA_IRQ_ACK	0x2C08
41 #define TJA1120_GLOBAL_INFRA_IRQ_EN	0x2C0A
42 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS	0x2C0C
43 #define TJA1120_DEV_BOOT_DONE		BIT(1)
44 
45 #define TJA1120_VEND1_PTP_TRIG_DATA_S	0x1070
46 
47 #define TJA1120_EGRESS_TS_DATA_S	0x9060
48 #define TJA1120_EGRESS_TS_END		0x9067
49 #define TJA1120_TS_VALID		BIT(0)
50 #define TJA1120_MORE_TS			BIT(15)
51 
52 #define VEND1_PHY_IRQ_ACK		0x80A0
53 #define VEND1_PHY_IRQ_EN		0x80A1
54 #define VEND1_PHY_IRQ_STATUS		0x80A2
55 #define PHY_IRQ_LINK_EVENT		BIT(1)
56 
57 #define VEND1_ALWAYS_ACCESSIBLE		0x801F
58 #define FUSA_PASS			BIT(4)
59 
60 #define VEND1_PHY_CONTROL		0x8100
61 #define PHY_CONFIG_EN			BIT(14)
62 #define PHY_START_OP			BIT(0)
63 
64 #define VEND1_PHY_CONFIG		0x8108
65 #define PHY_CONFIG_AUTO			BIT(0)
66 
67 #define TJA1120_EPHY_RESETS		0x810A
68 #define EPHY_PCS_RESET			BIT(3)
69 
70 #define VEND1_SIGNAL_QUALITY		0x8320
71 #define SQI_VALID			BIT(14)
72 #define SQI_MASK			GENMASK(2, 0)
73 #define MAX_SQI				SQI_MASK
74 
75 #define CABLE_TEST_ENABLE		BIT(15)
76 #define CABLE_TEST_START		BIT(14)
77 #define CABLE_TEST_OK			0x00
78 #define CABLE_TEST_SHORTED		0x01
79 #define CABLE_TEST_OPEN			0x02
80 #define CABLE_TEST_UNKNOWN		0x07
81 
82 #define VEND1_PORT_CONTROL		0x8040
83 #define PORT_CONTROL_EN			BIT(14)
84 
85 #define VEND1_PORT_ABILITIES		0x8046
86 #define MACSEC_ABILITY			BIT(5)
87 #define PTP_ABILITY			BIT(3)
88 
89 #define VEND1_PORT_FUNC_IRQ_EN		0x807A
90 #define MACSEC_IRQS			BIT(5)
91 #define PTP_IRQS			BIT(3)
92 
93 #define VEND1_PTP_IRQ_ACK		0x9008
94 #define EGR_TS_IRQ			BIT(1)
95 
96 #define VEND1_PORT_INFRA_CONTROL	0xAC00
97 #define PORT_INFRA_CONTROL_EN		BIT(14)
98 
99 #define VEND1_RXID			0xAFCC
100 #define VEND1_TXID			0xAFCD
101 #define ID_ENABLE			BIT(15)
102 
103 #define VEND1_ABILITIES			0xAFC4
104 #define RGMII_ID_ABILITY		BIT(15)
105 #define RGMII_ABILITY			BIT(14)
106 #define RMII_ABILITY			BIT(10)
107 #define REVMII_ABILITY			BIT(9)
108 #define MII_ABILITY			BIT(8)
109 #define SGMII_ABILITY			BIT(0)
110 
111 #define VEND1_MII_BASIC_CONFIG		0xAFC6
112 #define MII_BASIC_CONFIG_REV		BIT(4)
113 #define MII_BASIC_CONFIG_SGMII		0x9
114 #define MII_BASIC_CONFIG_RGMII		0x7
115 #define MII_BASIC_CONFIG_RMII		0x5
116 #define MII_BASIC_CONFIG_MII		0x4
117 
118 #define VEND1_SGMII_BASIC_CONTROL	0xB000
119 #define SGMII_LPM			BIT(11)
120 
121 #define VEND1_SYMBOL_ERROR_CNT_XTD	0x8351
122 #define EXTENDED_CNT_EN			BIT(15)
123 #define VEND1_MONITOR_STATUS		0xAC80
124 #define MONITOR_RESET			BIT(15)
125 #define VEND1_MONITOR_CONFIG		0xAC86
126 #define LOST_FRAMES_CNT_EN		BIT(9)
127 #define ALL_FRAMES_CNT_EN		BIT(8)
128 
129 #define VEND1_SYMBOL_ERROR_COUNTER	0x8350
130 #define VEND1_LINK_DROP_COUNTER		0x8352
131 #define VEND1_LINK_LOSSES_AND_FAILURES	0x8353
132 #define VEND1_RX_PREAMBLE_COUNT		0xAFCE
133 #define VEND1_TX_PREAMBLE_COUNT		0xAFCF
134 #define VEND1_RX_IPG_LENGTH		0xAFD0
135 #define VEND1_TX_IPG_LENGTH		0xAFD1
136 #define COUNTER_EN			BIT(15)
137 
138 #define VEND1_PTP_CONFIG		0x1102
139 #define EXT_TRG_EDGE			BIT(1)
140 
141 #define TJA1120_SYNC_TRIG_FILTER	0x1010
142 #define PTP_TRIG_RISE_TS		BIT(3)
143 #define PTP_TRIG_FALLING_TS		BIT(2)
144 
145 #define CLK_RATE_ADJ_LD			BIT(15)
146 #define CLK_RATE_ADJ_DIR		BIT(14)
147 
148 #define VEND1_RX_TS_INSRT_CTRL		0x114D
149 #define TJA1103_RX_TS_INSRT_MODE2	0x02
150 
151 #define TJA1120_RX_TS_INSRT_CTRL	0x9012
152 #define TJA1120_RX_TS_INSRT_EN		BIT(15)
153 #define TJA1120_TS_INSRT_MODE		BIT(4)
154 
155 #define VEND1_EGR_RING_DATA_0		0x114E
156 #define VEND1_EGR_RING_CTRL		0x1154
157 
158 #define RING_DATA_0_TS_VALID		BIT(15)
159 
160 #define RING_DONE			BIT(0)
161 
162 #define TS_SEC_MASK			GENMASK(1, 0)
163 
164 #define PTP_ENABLE			BIT(3)
165 #define PHY_TEST_ENABLE			BIT(0)
166 
167 #define VEND1_PORT_PTP_CONTROL		0x9000
168 #define PORT_PTP_CONTROL_BYPASS		BIT(11)
169 
170 #define PTP_CLK_PERIOD_100BT1		15ULL
171 #define PTP_CLK_PERIOD_1000BT1		8ULL
172 
173 #define EVENT_MSG_FILT_ALL		0x0F
174 #define EVENT_MSG_FILT_NONE		0x00
175 
176 #define VEND1_GPIO_FUNC_CONFIG_BASE	0x2C40
177 #define GPIO_FUNC_EN			BIT(15)
178 #define GPIO_FUNC_PTP			BIT(6)
179 #define GPIO_SIGNAL_PTP_TRIGGER		0x01
180 #define GPIO_SIGNAL_PPS_OUT		0x12
181 #define GPIO_DISABLE			0
182 #define GPIO_PPS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
183 	GPIO_SIGNAL_PPS_OUT)
184 #define GPIO_EXTTS_OUT_CFG		(GPIO_FUNC_EN | GPIO_FUNC_PTP | \
185 	GPIO_SIGNAL_PTP_TRIGGER)
186 
187 #define RGMII_PERIOD_PS			8000U
188 #define PS_PER_DEGREE			div_u64(RGMII_PERIOD_PS, 360)
189 #define MIN_ID_PS			1644U
190 #define MAX_ID_PS			2260U
191 #define DEFAULT_ID_PS			2000U
192 
193 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
194 	(ppb) * (ptp_clk_period), NSEC_PER_SEC)
195 
196 #define NXP_C45_SKB_CB(skb)	((struct nxp_c45_skb_cb *)(skb)->cb)
197 
198 struct nxp_c45_phy;
199 
200 struct nxp_c45_skb_cb {
201 	struct ptp_header *header;
202 	unsigned int type;
203 };
204 
205 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size)	\
206 	((struct nxp_c45_reg_field) {			\
207 		.reg = _reg,				\
208 		.devad =  _devad,			\
209 		.offset = _offset,			\
210 		.size = _size,				\
211 	})
212 
213 struct nxp_c45_reg_field {
214 	u16 reg;
215 	u8 devad;
216 	u8 offset;
217 	u8 size;
218 };
219 
220 struct nxp_c45_hwts {
221 	u32	nsec;
222 	u32	sec;
223 	u8	domain_number;
224 	u16	sequence_id;
225 	u8	msg_type;
226 };
227 
228 struct nxp_c45_regmap {
229 	/* PTP config regs. */
230 	u16 vend1_ptp_clk_period;
231 	u16 vend1_event_msg_filt;
232 
233 	/* LTC bits and regs. */
234 	struct nxp_c45_reg_field ltc_read;
235 	struct nxp_c45_reg_field ltc_write;
236 	struct nxp_c45_reg_field ltc_lock_ctrl;
237 	u16 vend1_ltc_wr_nsec_0;
238 	u16 vend1_ltc_wr_nsec_1;
239 	u16 vend1_ltc_wr_sec_0;
240 	u16 vend1_ltc_wr_sec_1;
241 	u16 vend1_ltc_rd_nsec_0;
242 	u16 vend1_ltc_rd_nsec_1;
243 	u16 vend1_ltc_rd_sec_0;
244 	u16 vend1_ltc_rd_sec_1;
245 	u16 vend1_rate_adj_subns_0;
246 	u16 vend1_rate_adj_subns_1;
247 
248 	/* External trigger reg fields. */
249 	struct nxp_c45_reg_field irq_egr_ts_en;
250 	struct nxp_c45_reg_field irq_egr_ts_status;
251 	struct nxp_c45_reg_field domain_number;
252 	struct nxp_c45_reg_field msg_type;
253 	struct nxp_c45_reg_field sequence_id;
254 	struct nxp_c45_reg_field sec_1_0;
255 	struct nxp_c45_reg_field sec_4_2;
256 	struct nxp_c45_reg_field nsec_15_0;
257 	struct nxp_c45_reg_field nsec_29_16;
258 
259 	/* PPS and EXT Trigger bits and regs. */
260 	struct nxp_c45_reg_field pps_enable;
261 	struct nxp_c45_reg_field pps_polarity;
262 	u16 vend1_ext_trg_data_0;
263 	u16 vend1_ext_trg_data_1;
264 	u16 vend1_ext_trg_data_2;
265 	u16 vend1_ext_trg_data_3;
266 	u16 vend1_ext_trg_ctrl;
267 
268 	/* Cable test reg fields. */
269 	u16 cable_test;
270 	struct nxp_c45_reg_field cable_test_valid;
271 	struct nxp_c45_reg_field cable_test_result;
272 };
273 
274 struct nxp_c45_phy_stats {
275 	const char	*name;
276 	const struct nxp_c45_reg_field counter;
277 };
278 
279 struct nxp_c45_phy_data {
280 	const struct nxp_c45_regmap *regmap;
281 	const struct nxp_c45_phy_stats *stats;
282 	int n_stats;
283 	u8 ptp_clk_period;
284 	bool ext_ts_both_edges;
285 	bool ack_ptp_irq;
286 	void (*counters_enable)(struct phy_device *phydev);
287 	bool (*get_egressts)(struct nxp_c45_phy *priv,
288 			     struct nxp_c45_hwts *hwts);
289 	bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
290 	void (*ptp_init)(struct phy_device *phydev);
291 	void (*ptp_enable)(struct phy_device *phydev, bool enable);
292 	void (*nmi_handler)(struct phy_device *phydev,
293 			    irqreturn_t *irq_status);
294 };
295 
296 static const
nxp_c45_get_data(struct phy_device * phydev)297 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
298 {
299 	return phydev->drv->driver_data;
300 }
301 
302 static const
nxp_c45_get_regmap(struct phy_device * phydev)303 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
304 {
305 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
306 
307 	return phy_data->regmap;
308 }
309 
nxp_c45_read_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)310 static int nxp_c45_read_reg_field(struct phy_device *phydev,
311 				  const struct nxp_c45_reg_field *reg_field)
312 {
313 	u16 mask;
314 	int ret;
315 
316 	if (reg_field->size == 0) {
317 		phydev_err(phydev, "Trying to read a reg field of size 0.\n");
318 		return -EINVAL;
319 	}
320 
321 	ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
322 	if (ret < 0)
323 		return ret;
324 
325 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
326 		GENMASK(reg_field->offset + reg_field->size - 1,
327 			reg_field->offset);
328 	ret &= mask;
329 	ret >>= reg_field->offset;
330 
331 	return ret;
332 }
333 
nxp_c45_write_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field,u16 val)334 static int nxp_c45_write_reg_field(struct phy_device *phydev,
335 				   const struct nxp_c45_reg_field *reg_field,
336 				   u16 val)
337 {
338 	u16 mask;
339 	u16 set;
340 
341 	if (reg_field->size == 0) {
342 		phydev_err(phydev, "Trying to write a reg field of size 0.\n");
343 		return -EINVAL;
344 	}
345 
346 	mask = reg_field->size == 1 ? BIT(reg_field->offset) :
347 		GENMASK(reg_field->offset + reg_field->size - 1,
348 			reg_field->offset);
349 	set = val << reg_field->offset;
350 
351 	return phy_modify_mmd_changed(phydev, reg_field->devad,
352 				      reg_field->reg, mask, set);
353 }
354 
nxp_c45_set_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)355 static int nxp_c45_set_reg_field(struct phy_device *phydev,
356 				 const struct nxp_c45_reg_field *reg_field)
357 {
358 	if (reg_field->size != 1) {
359 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
360 		return -EINVAL;
361 	}
362 
363 	return nxp_c45_write_reg_field(phydev, reg_field, 1);
364 }
365 
nxp_c45_clear_reg_field(struct phy_device * phydev,const struct nxp_c45_reg_field * reg_field)366 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
367 				   const struct nxp_c45_reg_field *reg_field)
368 {
369 	if (reg_field->size != 1) {
370 		phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
371 		return -EINVAL;
372 	}
373 
374 	return nxp_c45_write_reg_field(phydev, reg_field, 0);
375 }
376 
nxp_c45_poll_txts(struct phy_device * phydev)377 static bool nxp_c45_poll_txts(struct phy_device *phydev)
378 {
379 	return phydev->irq <= 0;
380 }
381 
_nxp_c45_ptp_gettimex64(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)382 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
383 				   struct timespec64 *ts,
384 				   struct ptp_system_timestamp *sts)
385 {
386 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
387 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
388 
389 	nxp_c45_set_reg_field(priv->phydev, ®map->ltc_read);
390 	ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
391 				   regmap->vend1_ltc_rd_nsec_0);
392 	ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
393 				    regmap->vend1_ltc_rd_nsec_1) << 16;
394 	ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
395 				  regmap->vend1_ltc_rd_sec_0);
396 	ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
397 				   regmap->vend1_ltc_rd_sec_1) << 16;
398 
399 	return 0;
400 }
401 
nxp_c45_ptp_gettimex64(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)402 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
403 				  struct timespec64 *ts,
404 				  struct ptp_system_timestamp *sts)
405 {
406 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
407 
408 	mutex_lock(&priv->ptp_lock);
409 	_nxp_c45_ptp_gettimex64(ptp, ts, sts);
410 	mutex_unlock(&priv->ptp_lock);
411 
412 	return 0;
413 }
414 
_nxp_c45_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)415 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
416 				  const struct timespec64 *ts)
417 {
418 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
419 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
420 
421 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
422 		      ts->tv_nsec);
423 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
424 		      ts->tv_nsec >> 16);
425 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
426 		      ts->tv_sec);
427 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
428 		      ts->tv_sec >> 16);
429 	nxp_c45_set_reg_field(priv->phydev, ®map->ltc_write);
430 
431 	return 0;
432 }
433 
nxp_c45_ptp_settime64(struct ptp_clock_info * ptp,const struct timespec64 * ts)434 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
435 				 const struct timespec64 *ts)
436 {
437 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
438 
439 	mutex_lock(&priv->ptp_lock);
440 	_nxp_c45_ptp_settime64(ptp, ts);
441 	mutex_unlock(&priv->ptp_lock);
442 
443 	return 0;
444 }
445 
nxp_c45_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)446 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
447 {
448 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
449 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
450 	const struct nxp_c45_regmap *regmap = data->regmap;
451 	s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
452 	u64 subns_inc_val;
453 	bool inc;
454 
455 	mutex_lock(&priv->ptp_lock);
456 	inc = ppb >= 0;
457 	ppb = abs(ppb);
458 
459 	subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
460 
461 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
462 		      regmap->vend1_rate_adj_subns_0,
463 		      subns_inc_val);
464 	subns_inc_val >>= 16;
465 	subns_inc_val |= CLK_RATE_ADJ_LD;
466 	if (inc)
467 		subns_inc_val |= CLK_RATE_ADJ_DIR;
468 
469 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
470 		      regmap->vend1_rate_adj_subns_1,
471 		      subns_inc_val);
472 	mutex_unlock(&priv->ptp_lock);
473 
474 	return 0;
475 }
476 
nxp_c45_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)477 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
478 {
479 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
480 	struct timespec64 now, then;
481 
482 	mutex_lock(&priv->ptp_lock);
483 	then = ns_to_timespec64(delta);
484 	_nxp_c45_ptp_gettimex64(ptp, &now, NULL);
485 	now = timespec64_add(now, then);
486 	_nxp_c45_ptp_settime64(ptp, &now);
487 	mutex_unlock(&priv->ptp_lock);
488 
489 	return 0;
490 }
491 
nxp_c45_reconstruct_ts(struct timespec64 * ts,struct nxp_c45_hwts * hwts)492 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
493 				   struct nxp_c45_hwts *hwts)
494 {
495 	ts->tv_nsec = hwts->nsec;
496 	if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
497 		ts->tv_sec -= TS_SEC_MASK + 1;
498 	ts->tv_sec &= ~TS_SEC_MASK;
499 	ts->tv_sec |= hwts->sec & TS_SEC_MASK;
500 }
501 
nxp_c45_match_ts(struct ptp_header * header,struct nxp_c45_hwts * hwts,unsigned int type)502 static bool nxp_c45_match_ts(struct ptp_header *header,
503 			     struct nxp_c45_hwts *hwts,
504 			     unsigned int type)
505 {
506 	return ntohs(header->sequence_id) == hwts->sequence_id &&
507 	       ptp_get_msgtype(header, type) == hwts->msg_type &&
508 	       header->domain_number  == hwts->domain_number;
509 }
510 
nxp_c45_get_extts(struct nxp_c45_phy * priv,struct timespec64 * extts)511 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
512 			      struct timespec64 *extts)
513 {
514 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
515 
516 	extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
517 				      regmap->vend1_ext_trg_data_0);
518 	extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
519 				       regmap->vend1_ext_trg_data_1) << 16;
520 	extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
521 				     regmap->vend1_ext_trg_data_2);
522 	extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
523 				      regmap->vend1_ext_trg_data_3) << 16;
524 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
525 		      regmap->vend1_ext_trg_ctrl, RING_DONE);
526 
527 	return true;
528 }
529 
tja1120_extts_is_valid(struct phy_device * phydev)530 static bool tja1120_extts_is_valid(struct phy_device *phydev)
531 {
532 	bool valid;
533 	int reg;
534 
535 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
536 			   TJA1120_VEND1_PTP_TRIG_DATA_S);
537 	valid = !!(reg & TJA1120_TS_VALID);
538 
539 	return valid;
540 }
541 
tja1120_get_extts(struct nxp_c45_phy * priv,struct timespec64 * extts)542 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
543 			      struct timespec64 *extts)
544 {
545 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
546 	struct phy_device *phydev = priv->phydev;
547 	bool more_ts;
548 	bool valid;
549 	u16 reg;
550 
551 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
552 			   regmap->vend1_ext_trg_ctrl);
553 	more_ts = !!(reg & TJA1120_MORE_TS);
554 
555 	valid = tja1120_extts_is_valid(phydev);
556 	if (!valid) {
557 		if (!more_ts)
558 			goto tja1120_get_extts_out;
559 
560 		/* Bug workaround for TJA1120 engineering samples: move the new
561 		 * timestamp from the FIFO to the buffer.
562 		 */
563 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
564 			      regmap->vend1_ext_trg_ctrl, RING_DONE);
565 		valid = tja1120_extts_is_valid(phydev);
566 		if (!valid)
567 			goto tja1120_get_extts_out;
568 	}
569 
570 	nxp_c45_get_extts(priv, extts);
571 tja1120_get_extts_out:
572 	return valid;
573 }
574 
nxp_c45_read_egress_ts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)575 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
576 				   struct nxp_c45_hwts *hwts)
577 {
578 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
579 	struct phy_device *phydev = priv->phydev;
580 
581 	hwts->domain_number =
582 		nxp_c45_read_reg_field(phydev, ®map->domain_number);
583 	hwts->msg_type =
584 		nxp_c45_read_reg_field(phydev, ®map->msg_type);
585 	hwts->sequence_id =
586 		nxp_c45_read_reg_field(phydev, ®map->sequence_id);
587 	hwts->nsec =
588 		nxp_c45_read_reg_field(phydev, ®map->nsec_15_0);
589 	hwts->nsec |=
590 		nxp_c45_read_reg_field(phydev, ®map->nsec_29_16) << 16;
591 	hwts->sec = nxp_c45_read_reg_field(phydev, ®map->sec_1_0);
592 	hwts->sec |= nxp_c45_read_reg_field(phydev, ®map->sec_4_2) << 2;
593 }
594 
nxp_c45_get_hwtxts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)595 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
596 			       struct nxp_c45_hwts *hwts)
597 {
598 	bool valid;
599 	u16 reg;
600 
601 	mutex_lock(&priv->ptp_lock);
602 	phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
603 		      RING_DONE);
604 	reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
605 	valid = !!(reg & RING_DATA_0_TS_VALID);
606 	if (!valid)
607 		goto nxp_c45_get_hwtxts_out;
608 
609 	nxp_c45_read_egress_ts(priv, hwts);
610 nxp_c45_get_hwtxts_out:
611 	mutex_unlock(&priv->ptp_lock);
612 	return valid;
613 }
614 
tja1120_egress_ts_is_valid(struct phy_device * phydev)615 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
616 {
617 	bool valid;
618 	u16 reg;
619 
620 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
621 	valid = !!(reg & TJA1120_TS_VALID);
622 
623 	return valid;
624 }
625 
tja1120_get_hwtxts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * hwts)626 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
627 			       struct nxp_c45_hwts *hwts)
628 {
629 	struct phy_device *phydev = priv->phydev;
630 	bool more_ts;
631 	bool valid;
632 	u16 reg;
633 
634 	mutex_lock(&priv->ptp_lock);
635 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
636 	more_ts = !!(reg & TJA1120_MORE_TS);
637 	valid = tja1120_egress_ts_is_valid(phydev);
638 	if (!valid) {
639 		if (!more_ts)
640 			goto tja1120_get_hwtxts_out;
641 
642 		/* Bug workaround for TJA1120 engineering samples: move the
643 		 * new timestamp from the FIFO to the buffer.
644 		 */
645 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
646 			      TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
647 		valid = tja1120_egress_ts_is_valid(phydev);
648 		if (!valid)
649 			goto tja1120_get_hwtxts_out;
650 	}
651 	nxp_c45_read_egress_ts(priv, hwts);
652 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
653 			   TJA1120_TS_VALID);
654 tja1120_get_hwtxts_out:
655 	mutex_unlock(&priv->ptp_lock);
656 	return valid;
657 }
658 
nxp_c45_process_txts(struct nxp_c45_phy * priv,struct nxp_c45_hwts * txts)659 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
660 				 struct nxp_c45_hwts *txts)
661 {
662 	struct sk_buff *skb, *tmp, *skb_match = NULL;
663 	struct skb_shared_hwtstamps shhwtstamps;
664 	struct timespec64 ts;
665 	unsigned long flags;
666 	bool ts_match;
667 	s64 ts_ns;
668 
669 	spin_lock_irqsave(&priv->tx_queue.lock, flags);
670 	skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
671 		ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
672 					    NXP_C45_SKB_CB(skb)->type);
673 		if (!ts_match)
674 			continue;
675 		skb_match = skb;
676 		__skb_unlink(skb, &priv->tx_queue);
677 		break;
678 	}
679 	spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
680 
681 	if (skb_match) {
682 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
683 		nxp_c45_reconstruct_ts(&ts, txts);
684 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
685 		ts_ns = timespec64_to_ns(&ts);
686 		shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
687 		skb_complete_tx_timestamp(skb_match, &shhwtstamps);
688 	} else {
689 		phydev_warn(priv->phydev,
690 			    "the tx timestamp doesn't match with any skb\n");
691 	}
692 }
693 
nxp_c45_do_aux_work(struct ptp_clock_info * ptp)694 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
695 {
696 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
697 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
698 	bool poll_txts = nxp_c45_poll_txts(priv->phydev);
699 	struct skb_shared_hwtstamps *shhwtstamps_rx;
700 	struct ptp_clock_event event;
701 	struct nxp_c45_hwts hwts;
702 	bool reschedule = false;
703 	struct timespec64 ts;
704 	struct sk_buff *skb;
705 	bool ts_valid;
706 	u32 ts_raw;
707 
708 	while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
709 		ts_valid = data->get_egressts(priv, &hwts);
710 		if (unlikely(!ts_valid)) {
711 			/* Still more skbs in the queue */
712 			reschedule = true;
713 			break;
714 		}
715 
716 		nxp_c45_process_txts(priv, &hwts);
717 	}
718 
719 	while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
720 		nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
721 		ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
722 		hwts.sec = ts_raw >> 30;
723 		hwts.nsec = ts_raw & GENMASK(29, 0);
724 		nxp_c45_reconstruct_ts(&ts, &hwts);
725 		shhwtstamps_rx = skb_hwtstamps(skb);
726 		shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
727 		NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
728 		netif_rx(skb);
729 	}
730 
731 	if (priv->extts) {
732 		ts_valid = data->get_extts(priv, &ts);
733 		if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
734 			priv->extts_ts = ts;
735 			event.index = priv->extts_index;
736 			event.type = PTP_CLOCK_EXTTS;
737 			event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
738 			ptp_clock_event(priv->ptp_clock, &event);
739 		}
740 		reschedule = true;
741 	}
742 
743 	return reschedule ? 1 : -1;
744 }
745 
nxp_c45_gpio_config(struct nxp_c45_phy * priv,int pin,u16 pin_cfg)746 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
747 				int pin, u16 pin_cfg)
748 {
749 	struct phy_device *phydev = priv->phydev;
750 
751 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
752 		      VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
753 }
754 
nxp_c45_perout_enable(struct nxp_c45_phy * priv,struct ptp_perout_request * perout,int on)755 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
756 				 struct ptp_perout_request *perout, int on)
757 {
758 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
759 	struct phy_device *phydev = priv->phydev;
760 	int pin;
761 
762 	if (perout->flags & ~PTP_PEROUT_PHASE)
763 		return -EOPNOTSUPP;
764 
765 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
766 	if (pin < 0)
767 		return pin;
768 
769 	if (!on) {
770 		nxp_c45_clear_reg_field(priv->phydev,
771 					®map->pps_enable);
772 		nxp_c45_clear_reg_field(priv->phydev,
773 					®map->pps_polarity);
774 
775 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
776 
777 		return 0;
778 	}
779 
780 	/* The PPS signal is fixed to 1 second and is always generated when the
781 	 * seconds counter is incremented. The start time is not configurable.
782 	 * If the clock is adjusted, the PPS signal is automatically readjusted.
783 	 */
784 	if (perout->period.sec != 1 || perout->period.nsec != 0) {
785 		phydev_warn(phydev, "The period can be set only to 1 second.");
786 		return -EINVAL;
787 	}
788 
789 	if (!(perout->flags & PTP_PEROUT_PHASE)) {
790 		if (perout->start.sec != 0 || perout->start.nsec != 0) {
791 			phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
792 			return -EINVAL;
793 		}
794 	} else {
795 		if (perout->phase.nsec != 0 &&
796 		    perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
797 			phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
798 			return -EINVAL;
799 		}
800 
801 		if (perout->phase.nsec == 0)
802 			nxp_c45_clear_reg_field(priv->phydev,
803 						®map->pps_polarity);
804 		else
805 			nxp_c45_set_reg_field(priv->phydev,
806 					      ®map->pps_polarity);
807 	}
808 
809 	nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
810 
811 	nxp_c45_set_reg_field(priv->phydev, ®map->pps_enable);
812 
813 	return 0;
814 }
815 
nxp_c45_set_rising_or_falling(struct phy_device * phydev,struct ptp_extts_request * extts)816 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
817 					  struct ptp_extts_request *extts)
818 {
819 	if (extts->flags & PTP_RISING_EDGE)
820 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
821 				   VEND1_PTP_CONFIG, EXT_TRG_EDGE);
822 
823 	if (extts->flags & PTP_FALLING_EDGE)
824 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
825 				 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
826 }
827 
nxp_c45_set_rising_and_falling(struct phy_device * phydev,struct ptp_extts_request * extts)828 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
829 					   struct ptp_extts_request *extts)
830 {
831 	/* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
832 	 * this case external ts will be enabled on rising edge.
833 	 */
834 	if (extts->flags & PTP_RISING_EDGE ||
835 	    extts->flags == PTP_ENABLE_FEATURE)
836 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
837 				 TJA1120_SYNC_TRIG_FILTER,
838 				 PTP_TRIG_RISE_TS);
839 	else
840 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
841 				   TJA1120_SYNC_TRIG_FILTER,
842 				   PTP_TRIG_RISE_TS);
843 
844 	if (extts->flags & PTP_FALLING_EDGE)
845 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
846 				 TJA1120_SYNC_TRIG_FILTER,
847 				 PTP_TRIG_FALLING_TS);
848 	else
849 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
850 				   TJA1120_SYNC_TRIG_FILTER,
851 				   PTP_TRIG_FALLING_TS);
852 }
853 
nxp_c45_extts_enable(struct nxp_c45_phy * priv,struct ptp_extts_request * extts,int on)854 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
855 				struct ptp_extts_request *extts, int on)
856 {
857 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
858 	int pin;
859 
860 	if (extts->flags & ~(PTP_ENABLE_FEATURE |
861 			      PTP_RISING_EDGE |
862 			      PTP_FALLING_EDGE |
863 			      PTP_STRICT_FLAGS))
864 		return -EOPNOTSUPP;
865 
866 	/* Sampling on both edges is not supported */
867 	if ((extts->flags & PTP_RISING_EDGE) &&
868 	    (extts->flags & PTP_FALLING_EDGE) &&
869 	    !data->ext_ts_both_edges)
870 		return -EOPNOTSUPP;
871 
872 	pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
873 	if (pin < 0)
874 		return pin;
875 
876 	if (!on) {
877 		nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
878 		priv->extts = false;
879 
880 		return 0;
881 	}
882 
883 	if (data->ext_ts_both_edges)
884 		nxp_c45_set_rising_and_falling(priv->phydev, extts);
885 	else
886 		nxp_c45_set_rising_or_falling(priv->phydev, extts);
887 
888 	nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
889 	priv->extts = true;
890 	priv->extts_index = extts->index;
891 	ptp_schedule_worker(priv->ptp_clock, 0);
892 
893 	return 0;
894 }
895 
nxp_c45_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * req,int on)896 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
897 			      struct ptp_clock_request *req, int on)
898 {
899 	struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
900 
901 	switch (req->type) {
902 	case PTP_CLK_REQ_EXTTS:
903 		return nxp_c45_extts_enable(priv, &req->extts, on);
904 	case PTP_CLK_REQ_PEROUT:
905 		return nxp_c45_perout_enable(priv, &req->perout, on);
906 	default:
907 		return -EOPNOTSUPP;
908 	}
909 }
910 
911 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
912 	{ "nxp_c45_gpio0", 0, PTP_PF_NONE},
913 	{ "nxp_c45_gpio1", 1, PTP_PF_NONE},
914 	{ "nxp_c45_gpio2", 2, PTP_PF_NONE},
915 	{ "nxp_c45_gpio3", 3, PTP_PF_NONE},
916 	{ "nxp_c45_gpio4", 4, PTP_PF_NONE},
917 	{ "nxp_c45_gpio5", 5, PTP_PF_NONE},
918 	{ "nxp_c45_gpio6", 6, PTP_PF_NONE},
919 	{ "nxp_c45_gpio7", 7, PTP_PF_NONE},
920 	{ "nxp_c45_gpio8", 8, PTP_PF_NONE},
921 	{ "nxp_c45_gpio9", 9, PTP_PF_NONE},
922 	{ "nxp_c45_gpio10", 10, PTP_PF_NONE},
923 	{ "nxp_c45_gpio11", 11, PTP_PF_NONE},
924 };
925 
nxp_c45_ptp_verify_pin(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)926 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
927 				  enum ptp_pin_function func, unsigned int chan)
928 {
929 	if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
930 		return -EINVAL;
931 
932 	switch (func) {
933 	case PTP_PF_NONE:
934 	case PTP_PF_PEROUT:
935 	case PTP_PF_EXTTS:
936 		break;
937 	default:
938 		return -EOPNOTSUPP;
939 	}
940 
941 	return 0;
942 }
943 
nxp_c45_init_ptp_clock(struct nxp_c45_phy * priv)944 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
945 {
946 	priv->caps = (struct ptp_clock_info) {
947 		.owner		= THIS_MODULE,
948 		.name		= "NXP C45 PHC",
949 		.max_adj	= 16666666,
950 		.adjfine	= nxp_c45_ptp_adjfine,
951 		.adjtime	= nxp_c45_ptp_adjtime,
952 		.gettimex64	= nxp_c45_ptp_gettimex64,
953 		.settime64	= nxp_c45_ptp_settime64,
954 		.enable		= nxp_c45_ptp_enable,
955 		.verify		= nxp_c45_ptp_verify_pin,
956 		.do_aux_work	= nxp_c45_do_aux_work,
957 		.pin_config	= nxp_c45_ptp_pins,
958 		.n_pins		= ARRAY_SIZE(nxp_c45_ptp_pins),
959 		.n_ext_ts	= 1,
960 		.n_per_out	= 1,
961 	};
962 
963 	priv->ptp_clock = ptp_clock_register(&priv->caps,
964 					     &priv->phydev->mdio.dev);
965 
966 	if (IS_ERR(priv->ptp_clock))
967 		return PTR_ERR(priv->ptp_clock);
968 
969 	if (!priv->ptp_clock)
970 		return -ENOMEM;
971 
972 	return 0;
973 }
974 
nxp_c45_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)975 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
976 			     struct sk_buff *skb, int type)
977 {
978 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
979 						mii_ts);
980 
981 	switch (priv->hwts_tx) {
982 	case HWTSTAMP_TX_ON:
983 		NXP_C45_SKB_CB(skb)->type = type;
984 		NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
985 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
986 		skb_queue_tail(&priv->tx_queue, skb);
987 		if (nxp_c45_poll_txts(priv->phydev))
988 			ptp_schedule_worker(priv->ptp_clock, 0);
989 		break;
990 	case HWTSTAMP_TX_OFF:
991 	default:
992 		kfree_skb(skb);
993 		break;
994 	}
995 }
996 
nxp_c45_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)997 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
998 			     struct sk_buff *skb, int type)
999 {
1000 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1001 						mii_ts);
1002 	struct ptp_header *header = ptp_parse_header(skb, type);
1003 
1004 	if (!header)
1005 		return false;
1006 
1007 	if (!priv->hwts_rx)
1008 		return false;
1009 
1010 	NXP_C45_SKB_CB(skb)->header = header;
1011 	skb_queue_tail(&priv->rx_queue, skb);
1012 	ptp_schedule_worker(priv->ptp_clock, 0);
1013 
1014 	return true;
1015 }
1016 
nxp_c45_hwtstamp(struct mii_timestamper * mii_ts,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)1017 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1018 			    struct kernel_hwtstamp_config *cfg,
1019 			    struct netlink_ext_ack *extack)
1020 {
1021 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1022 						mii_ts);
1023 	struct phy_device *phydev = priv->phydev;
1024 	const struct nxp_c45_phy_data *data;
1025 
1026 	if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1027 		return -ERANGE;
1028 
1029 	data = nxp_c45_get_data(phydev);
1030 	priv->hwts_tx = cfg->tx_type;
1031 
1032 	switch (cfg->rx_filter) {
1033 	case HWTSTAMP_FILTER_NONE:
1034 		priv->hwts_rx = 0;
1035 		break;
1036 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1037 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1038 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1039 		priv->hwts_rx = 1;
1040 		cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1041 		break;
1042 	default:
1043 		return -ERANGE;
1044 	}
1045 
1046 	if (priv->hwts_rx || priv->hwts_tx) {
1047 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1048 			      data->regmap->vend1_event_msg_filt,
1049 			      EVENT_MSG_FILT_ALL);
1050 		data->ptp_enable(phydev, true);
1051 	} else {
1052 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1053 			      data->regmap->vend1_event_msg_filt,
1054 			      EVENT_MSG_FILT_NONE);
1055 		data->ptp_enable(phydev, false);
1056 	}
1057 
1058 	if (nxp_c45_poll_txts(priv->phydev))
1059 		goto nxp_c45_no_ptp_irq;
1060 
1061 	if (priv->hwts_tx)
1062 		nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1063 	else
1064 		nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1065 
1066 nxp_c45_no_ptp_irq:
1067 	return 0;
1068 }
1069 
nxp_c45_ts_info(struct mii_timestamper * mii_ts,struct kernel_ethtool_ts_info * ts_info)1070 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1071 			   struct kernel_ethtool_ts_info *ts_info)
1072 {
1073 	struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1074 						mii_ts);
1075 
1076 	ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1077 			SOF_TIMESTAMPING_RX_HARDWARE |
1078 			SOF_TIMESTAMPING_RAW_HARDWARE;
1079 	ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1080 	ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1081 	ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1082 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1083 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1084 			(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1085 
1086 	return 0;
1087 }
1088 
1089 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1090 	{ "phy_link_status_drop_cnt",
1091 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1092 	{ "phy_link_availability_drop_cnt",
1093 		NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1094 	{ "phy_link_loss_cnt",
1095 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1096 	{ "phy_link_failure_cnt",
1097 		NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1098 	{ "phy_symbol_error_cnt",
1099 		NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1100 };
1101 
1102 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1103 	{ "rx_preamble_count",
1104 		NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1105 	{ "tx_preamble_count",
1106 		NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1107 	{ "rx_ipg_length",
1108 		NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1109 	{ "tx_ipg_length",
1110 		NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1111 };
1112 
1113 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1114 	{ "phy_symbol_error_cnt_ext",
1115 		NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1116 	{ "tx_frames_xtd",
1117 		NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1118 	{ "tx_frames",
1119 		NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1120 	{ "rx_frames_xtd",
1121 		NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1122 	{ "rx_frames",
1123 		NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1124 	{ "tx_lost_frames_xtd",
1125 		NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1126 	{ "tx_lost_frames",
1127 		NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1128 	{ "rx_lost_frames_xtd",
1129 		NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1130 	{ "rx_lost_frames",
1131 		NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1132 };
1133 
nxp_c45_get_sset_count(struct phy_device * phydev)1134 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1135 {
1136 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1137 
1138 	return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1139 }
1140 
nxp_c45_get_strings(struct phy_device * phydev,u8 * data)1141 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1142 {
1143 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1144 	size_t count = nxp_c45_get_sset_count(phydev);
1145 	size_t idx;
1146 	size_t i;
1147 
1148 	for (i = 0; i < count; i++) {
1149 		if (i < ARRAY_SIZE(common_hw_stats)) {
1150 			strscpy(data + i * ETH_GSTRING_LEN,
1151 				common_hw_stats[i].name, ETH_GSTRING_LEN);
1152 			continue;
1153 		}
1154 		idx = i - ARRAY_SIZE(common_hw_stats);
1155 		strscpy(data + i * ETH_GSTRING_LEN,
1156 			phy_data->stats[idx].name, ETH_GSTRING_LEN);
1157 	}
1158 }
1159 
nxp_c45_get_stats(struct phy_device * phydev,struct ethtool_stats * stats,u64 * data)1160 static void nxp_c45_get_stats(struct phy_device *phydev,
1161 			      struct ethtool_stats *stats, u64 *data)
1162 {
1163 	const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1164 	size_t count = nxp_c45_get_sset_count(phydev);
1165 	const struct nxp_c45_reg_field *reg_field;
1166 	size_t idx;
1167 	size_t i;
1168 	int ret;
1169 
1170 	for (i = 0; i < count; i++) {
1171 		if (i < ARRAY_SIZE(common_hw_stats)) {
1172 			reg_field = &common_hw_stats[i].counter;
1173 		} else {
1174 			idx = i - ARRAY_SIZE(common_hw_stats);
1175 			reg_field = &phy_data->stats[idx].counter;
1176 		}
1177 
1178 		ret = nxp_c45_read_reg_field(phydev, reg_field);
1179 		if (ret < 0)
1180 			data[i] = U64_MAX;
1181 		else
1182 			data[i] = ret;
1183 	}
1184 }
1185 
nxp_c45_config_enable(struct phy_device * phydev)1186 static int nxp_c45_config_enable(struct phy_device *phydev)
1187 {
1188 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1189 		      DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1190 		      DEVICE_CONTROL_CONFIG_ALL_EN);
1191 	usleep_range(400, 450);
1192 
1193 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1194 		      PORT_CONTROL_EN);
1195 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1196 		      PHY_CONFIG_EN);
1197 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1198 		      PORT_INFRA_CONTROL_EN);
1199 
1200 	return 0;
1201 }
1202 
nxp_c45_start_op(struct phy_device * phydev)1203 static int nxp_c45_start_op(struct phy_device *phydev)
1204 {
1205 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1206 				PHY_START_OP);
1207 }
1208 
nxp_c45_config_intr(struct phy_device * phydev)1209 static int nxp_c45_config_intr(struct phy_device *phydev)
1210 {
1211 	int ret;
1212 
1213 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1214 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1215 				       VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1216 		if (ret)
1217 			return ret;
1218 
1219 		return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1220 					VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1221 	}
1222 
1223 	ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1224 				 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1225 	if (ret)
1226 		return ret;
1227 
1228 	return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1229 				  VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1230 }
1231 
tja1103_config_intr(struct phy_device * phydev)1232 static int tja1103_config_intr(struct phy_device *phydev)
1233 {
1234 	int ret;
1235 
1236 	/* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1237 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1238 			    FUSA_PASS);
1239 	if (ret)
1240 		return ret;
1241 
1242 	return nxp_c45_config_intr(phydev);
1243 }
1244 
tja1120_config_intr(struct phy_device * phydev)1245 static int tja1120_config_intr(struct phy_device *phydev)
1246 {
1247 	int ret;
1248 
1249 	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1250 		ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1251 				       TJA1120_GLOBAL_INFRA_IRQ_EN,
1252 				       TJA1120_DEV_BOOT_DONE);
1253 	else
1254 		ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1255 					 TJA1120_GLOBAL_INFRA_IRQ_EN,
1256 					 TJA1120_DEV_BOOT_DONE);
1257 	if (ret)
1258 		return ret;
1259 
1260 	return nxp_c45_config_intr(phydev);
1261 }
1262 
nxp_c45_handle_interrupt(struct phy_device * phydev)1263 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1264 {
1265 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1266 	struct nxp_c45_phy *priv = phydev->priv;
1267 	irqreturn_t ret = IRQ_NONE;
1268 	struct nxp_c45_hwts hwts;
1269 	int irq;
1270 
1271 	irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1272 	if (irq & PHY_IRQ_LINK_EVENT) {
1273 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1274 			      PHY_IRQ_LINK_EVENT);
1275 		phy_trigger_machine(phydev);
1276 		ret = IRQ_HANDLED;
1277 	}
1278 
1279 	irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1280 	if (irq) {
1281 		/* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1282 		 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1283 		 * IRQ bit should be cleared before reading the timestamp,
1284 		 */
1285 		if (data->ack_ptp_irq)
1286 			phy_write_mmd(phydev, MDIO_MMD_VEND1,
1287 				      VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1288 		while (data->get_egressts(priv, &hwts))
1289 			nxp_c45_process_txts(priv, &hwts);
1290 
1291 		ret = IRQ_HANDLED;
1292 	}
1293 
1294 	data->nmi_handler(phydev, &ret);
1295 	nxp_c45_handle_macsec_interrupt(phydev, &ret);
1296 
1297 	return ret;
1298 }
1299 
nxp_c45_soft_reset(struct phy_device * phydev)1300 static int nxp_c45_soft_reset(struct phy_device *phydev)
1301 {
1302 	int ret;
1303 
1304 	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1305 			    DEVICE_CONTROL_RESET);
1306 	if (ret)
1307 		return ret;
1308 
1309 	usleep_range(2000, 2050);
1310 
1311 	return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1312 					 VEND1_DEVICE_CONTROL, ret,
1313 					 !(ret & DEVICE_CONTROL_RESET), 20000,
1314 					 240000, false);
1315 }
1316 
nxp_c45_cable_test_start(struct phy_device * phydev)1317 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1318 {
1319 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1320 
1321 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1322 			 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1323 	return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1324 				CABLE_TEST_ENABLE | CABLE_TEST_START);
1325 }
1326 
nxp_c45_cable_test_get_status(struct phy_device * phydev,bool * finished)1327 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1328 					 bool *finished)
1329 {
1330 	const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1331 	int ret;
1332 	u8 cable_test_result;
1333 
1334 	ret = nxp_c45_read_reg_field(phydev, ®map->cable_test_valid);
1335 	if (!ret) {
1336 		*finished = false;
1337 		return 0;
1338 	}
1339 
1340 	*finished = true;
1341 	cable_test_result = nxp_c45_read_reg_field(phydev,
1342 						   ®map->cable_test_result);
1343 
1344 	switch (cable_test_result) {
1345 	case CABLE_TEST_OK:
1346 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1347 					ETHTOOL_A_CABLE_RESULT_CODE_OK);
1348 		break;
1349 	case CABLE_TEST_SHORTED:
1350 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1351 					ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1352 		break;
1353 	case CABLE_TEST_OPEN:
1354 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1355 					ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1356 		break;
1357 	default:
1358 		ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1359 					ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1360 	}
1361 
1362 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1363 			   CABLE_TEST_ENABLE);
1364 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1365 			   VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1366 
1367 	return nxp_c45_start_op(phydev);
1368 }
1369 
nxp_c45_get_sqi(struct phy_device * phydev)1370 static int nxp_c45_get_sqi(struct phy_device *phydev)
1371 {
1372 	int reg;
1373 
1374 	reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1375 	if (!(reg & SQI_VALID))
1376 		return -EINVAL;
1377 
1378 	reg &= SQI_MASK;
1379 
1380 	return reg;
1381 }
1382 
tja1120_link_change_notify(struct phy_device * phydev)1383 static void tja1120_link_change_notify(struct phy_device *phydev)
1384 {
1385 	/* Bug workaround for TJA1120 enegineering samples: fix egress
1386 	 * timestamps lost after link recovery.
1387 	 */
1388 	if (phydev->state == PHY_NOLINK) {
1389 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1390 				 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1391 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1392 				   TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1393 	}
1394 }
1395 
nxp_c45_get_sqi_max(struct phy_device * phydev)1396 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1397 {
1398 	return MAX_SQI;
1399 }
1400 
nxp_c45_check_delay(struct phy_device * phydev,u32 delay)1401 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1402 {
1403 	if (delay < MIN_ID_PS) {
1404 		phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1405 		return -EINVAL;
1406 	}
1407 
1408 	if (delay > MAX_ID_PS) {
1409 		phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1410 		return -EINVAL;
1411 	}
1412 
1413 	return 0;
1414 }
1415 
nxp_c45_counters_enable(struct phy_device * phydev)1416 static void nxp_c45_counters_enable(struct phy_device *phydev)
1417 {
1418 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1419 
1420 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1421 			 COUNTER_EN);
1422 
1423 	data->counters_enable(phydev);
1424 }
1425 
nxp_c45_ptp_init(struct phy_device * phydev)1426 static void nxp_c45_ptp_init(struct phy_device *phydev)
1427 {
1428 	const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1429 
1430 	phy_write_mmd(phydev, MDIO_MMD_VEND1,
1431 		      data->regmap->vend1_ptp_clk_period,
1432 		      data->ptp_clk_period);
1433 	nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1434 
1435 	data->ptp_init(phydev);
1436 }
1437 
nxp_c45_get_phase_shift(u64 phase_offset_raw)1438 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1439 {
1440 	/* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1441 	 * To avoid floating point operations we'll multiply by 10
1442 	 * and get 1 decimal point precision.
1443 	 */
1444 	phase_offset_raw *= 10;
1445 	phase_offset_raw -= 738;
1446 	return div_u64(phase_offset_raw, 9);
1447 }
1448 
nxp_c45_disable_delays(struct phy_device * phydev)1449 static void nxp_c45_disable_delays(struct phy_device *phydev)
1450 {
1451 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1452 	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1453 }
1454 
nxp_c45_set_delays(struct phy_device * phydev)1455 static void nxp_c45_set_delays(struct phy_device *phydev)
1456 {
1457 	struct nxp_c45_phy *priv = phydev->priv;
1458 	u64 tx_delay = priv->tx_delay;
1459 	u64 rx_delay = priv->rx_delay;
1460 	u64 degree;
1461 
1462 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1463 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1464 		degree = div_u64(tx_delay, PS_PER_DEGREE);
1465 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1466 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1467 	} else {
1468 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1469 				   ID_ENABLE);
1470 	}
1471 
1472 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1473 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1474 		degree = div_u64(rx_delay, PS_PER_DEGREE);
1475 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1476 			      ID_ENABLE | nxp_c45_get_phase_shift(degree));
1477 	} else {
1478 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1479 				   ID_ENABLE);
1480 	}
1481 }
1482 
nxp_c45_get_delays(struct phy_device * phydev)1483 static int nxp_c45_get_delays(struct phy_device *phydev)
1484 {
1485 	struct nxp_c45_phy *priv = phydev->priv;
1486 	int ret;
1487 
1488 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1489 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1490 		ret = device_property_read_u32(&phydev->mdio.dev,
1491 					       "tx-internal-delay-ps",
1492 					       &priv->tx_delay);
1493 		if (ret)
1494 			priv->tx_delay = DEFAULT_ID_PS;
1495 
1496 		ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1497 		if (ret) {
1498 			phydev_err(phydev,
1499 				   "tx-internal-delay-ps invalid value\n");
1500 			return ret;
1501 		}
1502 	}
1503 
1504 	if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1505 	    phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1506 		ret = device_property_read_u32(&phydev->mdio.dev,
1507 					       "rx-internal-delay-ps",
1508 					       &priv->rx_delay);
1509 		if (ret)
1510 			priv->rx_delay = DEFAULT_ID_PS;
1511 
1512 		ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1513 		if (ret) {
1514 			phydev_err(phydev,
1515 				   "rx-internal-delay-ps invalid value\n");
1516 			return ret;
1517 		}
1518 	}
1519 
1520 	return 0;
1521 }
1522 
nxp_c45_set_phy_mode(struct phy_device * phydev)1523 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1524 {
1525 	int ret;
1526 
1527 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1528 	phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1529 
1530 	switch (phydev->interface) {
1531 	case PHY_INTERFACE_MODE_RGMII:
1532 		if (!(ret & RGMII_ABILITY)) {
1533 			phydev_err(phydev, "rgmii mode not supported\n");
1534 			return -EINVAL;
1535 		}
1536 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1537 			      MII_BASIC_CONFIG_RGMII);
1538 		nxp_c45_disable_delays(phydev);
1539 		break;
1540 	case PHY_INTERFACE_MODE_RGMII_ID:
1541 	case PHY_INTERFACE_MODE_RGMII_TXID:
1542 	case PHY_INTERFACE_MODE_RGMII_RXID:
1543 		if (!(ret & RGMII_ID_ABILITY)) {
1544 			phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1545 			return -EINVAL;
1546 		}
1547 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1548 			      MII_BASIC_CONFIG_RGMII);
1549 		ret = nxp_c45_get_delays(phydev);
1550 		if (ret)
1551 			return ret;
1552 
1553 		nxp_c45_set_delays(phydev);
1554 		break;
1555 	case PHY_INTERFACE_MODE_MII:
1556 		if (!(ret & MII_ABILITY)) {
1557 			phydev_err(phydev, "mii mode not supported\n");
1558 			return -EINVAL;
1559 		}
1560 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1561 			      MII_BASIC_CONFIG_MII);
1562 		break;
1563 	case PHY_INTERFACE_MODE_REVMII:
1564 		if (!(ret & REVMII_ABILITY)) {
1565 			phydev_err(phydev, "rev-mii mode not supported\n");
1566 			return -EINVAL;
1567 		}
1568 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1569 			      MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1570 		break;
1571 	case PHY_INTERFACE_MODE_RMII:
1572 		if (!(ret & RMII_ABILITY)) {
1573 			phydev_err(phydev, "rmii mode not supported\n");
1574 			return -EINVAL;
1575 		}
1576 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1577 			      MII_BASIC_CONFIG_RMII);
1578 		break;
1579 	case PHY_INTERFACE_MODE_SGMII:
1580 		if (!(ret & SGMII_ABILITY)) {
1581 			phydev_err(phydev, "sgmii mode not supported\n");
1582 			return -EINVAL;
1583 		}
1584 		phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1585 			      MII_BASIC_CONFIG_SGMII);
1586 		break;
1587 	case PHY_INTERFACE_MODE_INTERNAL:
1588 		break;
1589 	default:
1590 		return -EINVAL;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 /* Errata: ES_TJA1120 and ES_TJA1121 Rev. 1.0 — 28 November 2024 Section 3.1 & 3.2 */
nxp_c45_tja1120_errata(struct phy_device * phydev)1597 static void nxp_c45_tja1120_errata(struct phy_device *phydev)
1598 {
1599 	bool macsec_ability, sgmii_ability;
1600 	int silicon_version, sample_type;
1601 	int phy_abilities;
1602 	int ret = 0;
1603 
1604 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_ID3);
1605 	if (ret < 0)
1606 		return;
1607 
1608 	sample_type = FIELD_GET(TJA1120_DEV_ID3_SAMPLE_TYPE, ret);
1609 	if (sample_type != DEVICE_ID3_SAMPLE_TYPE_R)
1610 		return;
1611 
1612 	silicon_version = FIELD_GET(TJA1120_DEV_ID3_SILICON_VERSION, ret);
1613 
1614 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1615 				     VEND1_PORT_ABILITIES);
1616 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1617 	sgmii_ability = !!(phy_abilities & SGMII_ABILITY);
1618 	if ((!macsec_ability && silicon_version == 2) ||
1619 	    (macsec_ability && silicon_version == 1)) {
1620 		/* TJA1120/TJA1121 PHY configuration errata workaround.
1621 		 * Apply PHY writes sequence before link up.
1622 		 */
1623 		if (!macsec_ability) {
1624 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x4b95);
1625 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0xf3cd);
1626 		} else {
1627 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x89c7);
1628 			phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0893);
1629 		}
1630 
1631 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x0476, 0x58a0);
1632 
1633 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x8921, 0xa3a);
1634 		phy_write_mmd(phydev, MDIO_MMD_PMAPMD, 0x89F1, 0x16c1);
1635 
1636 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 0x0);
1637 		phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 0x0);
1638 
1639 		if (sgmii_ability) {
1640 			/* TJA1120B/TJA1121B SGMII PCS restart errata workaround.
1641 			 * Put SGMII PCS into power down mode and back up.
1642 			 */
1643 			phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1644 					 VEND1_SGMII_BASIC_CONTROL,
1645 					 SGMII_LPM);
1646 			phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1647 					   VEND1_SGMII_BASIC_CONTROL,
1648 					   SGMII_LPM);
1649 		}
1650 	}
1651 }
1652 
nxp_c45_config_init(struct phy_device * phydev)1653 static int nxp_c45_config_init(struct phy_device *phydev)
1654 {
1655 	int ret;
1656 
1657 	ret = nxp_c45_config_enable(phydev);
1658 	if (ret) {
1659 		phydev_err(phydev, "Failed to enable config\n");
1660 		return ret;
1661 	}
1662 
1663 	/* Bug workaround for SJA1110 rev B: enable write access
1664 	 * to MDIO_MMD_PMAPMD
1665 	 */
1666 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1667 	phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1668 
1669 	if (phy_id_compare(phydev->phy_id, PHY_ID_TJA_1120, GENMASK(31, 4)))
1670 		nxp_c45_tja1120_errata(phydev);
1671 
1672 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1673 			 PHY_CONFIG_AUTO);
1674 
1675 	ret = nxp_c45_set_phy_mode(phydev);
1676 	if (ret)
1677 		return ret;
1678 
1679 	phydev->autoneg = AUTONEG_DISABLE;
1680 
1681 	nxp_c45_counters_enable(phydev);
1682 	nxp_c45_ptp_init(phydev);
1683 	ret = nxp_c45_macsec_config_init(phydev);
1684 	if (ret)
1685 		return ret;
1686 
1687 	return nxp_c45_start_op(phydev);
1688 }
1689 
nxp_c45_get_features(struct phy_device * phydev)1690 static int nxp_c45_get_features(struct phy_device *phydev)
1691 {
1692 	linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1693 	linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1694 
1695 	return genphy_c45_pma_read_abilities(phydev);
1696 }
1697 
nxp_c45_probe(struct phy_device * phydev)1698 static int nxp_c45_probe(struct phy_device *phydev)
1699 {
1700 	struct nxp_c45_phy *priv;
1701 	bool macsec_ability;
1702 	int phy_abilities;
1703 	bool ptp_ability;
1704 	int ret = 0;
1705 
1706 	priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1707 	if (!priv)
1708 		return -ENOMEM;
1709 
1710 	skb_queue_head_init(&priv->tx_queue);
1711 	skb_queue_head_init(&priv->rx_queue);
1712 
1713 	priv->phydev = phydev;
1714 
1715 	phydev->priv = priv;
1716 
1717 	mutex_init(&priv->ptp_lock);
1718 
1719 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1720 				     VEND1_PORT_ABILITIES);
1721 	ptp_ability = !!(phy_abilities & PTP_ABILITY);
1722 	if (!ptp_ability) {
1723 		phydev_dbg(phydev, "the phy does not support PTP");
1724 		goto no_ptp_support;
1725 	}
1726 
1727 	if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1728 	    IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1729 		priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1730 		priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1731 		priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1732 		priv->mii_ts.ts_info = nxp_c45_ts_info;
1733 		phydev->mii_ts = &priv->mii_ts;
1734 		ret = nxp_c45_init_ptp_clock(priv);
1735 
1736 		/* Timestamp selected by default to keep legacy API */
1737 		phydev->default_timestamp = true;
1738 	} else {
1739 		phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1740 	}
1741 
1742 no_ptp_support:
1743 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1744 	if (!macsec_ability) {
1745 		phydev_info(phydev, "the phy does not support MACsec\n");
1746 		goto no_macsec_support;
1747 	}
1748 
1749 	if (IS_ENABLED(CONFIG_MACSEC)) {
1750 		ret = nxp_c45_macsec_probe(phydev);
1751 		phydev_dbg(phydev, "MACsec support enabled.");
1752 	} else {
1753 		phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1754 	}
1755 
1756 no_macsec_support:
1757 
1758 	return ret;
1759 }
1760 
nxp_c45_remove(struct phy_device * phydev)1761 static void nxp_c45_remove(struct phy_device *phydev)
1762 {
1763 	struct nxp_c45_phy *priv = phydev->priv;
1764 
1765 	if (priv->ptp_clock)
1766 		ptp_clock_unregister(priv->ptp_clock);
1767 
1768 	skb_queue_purge(&priv->tx_queue);
1769 	skb_queue_purge(&priv->rx_queue);
1770 	nxp_c45_macsec_remove(phydev);
1771 }
1772 
tja1103_counters_enable(struct phy_device * phydev)1773 static void tja1103_counters_enable(struct phy_device *phydev)
1774 {
1775 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1776 			 COUNTER_EN);
1777 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1778 			 COUNTER_EN);
1779 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1780 			 COUNTER_EN);
1781 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1782 			 COUNTER_EN);
1783 }
1784 
tja1103_ptp_init(struct phy_device * phydev)1785 static void tja1103_ptp_init(struct phy_device *phydev)
1786 {
1787 	phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1788 		      TJA1103_RX_TS_INSRT_MODE2);
1789 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1790 			 PTP_ENABLE);
1791 }
1792 
tja1103_ptp_enable(struct phy_device * phydev,bool enable)1793 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1794 {
1795 	if (enable)
1796 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1797 				   VEND1_PORT_PTP_CONTROL,
1798 				   PORT_PTP_CONTROL_BYPASS);
1799 	else
1800 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1801 				 VEND1_PORT_PTP_CONTROL,
1802 				 PORT_PTP_CONTROL_BYPASS);
1803 }
1804 
tja1103_nmi_handler(struct phy_device * phydev,irqreturn_t * irq_status)1805 static void tja1103_nmi_handler(struct phy_device *phydev,
1806 				irqreturn_t *irq_status)
1807 {
1808 	int ret;
1809 
1810 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1811 			   VEND1_ALWAYS_ACCESSIBLE);
1812 	if (ret & FUSA_PASS) {
1813 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1814 			      VEND1_ALWAYS_ACCESSIBLE,
1815 			      FUSA_PASS);
1816 		*irq_status = IRQ_HANDLED;
1817 	}
1818 }
1819 
1820 static const struct nxp_c45_regmap tja1103_regmap = {
1821 	.vend1_ptp_clk_period	= 0x1104,
1822 	.vend1_event_msg_filt	= 0x1148,
1823 	.pps_enable		=
1824 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1825 	.pps_polarity		=
1826 		NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1827 	.ltc_lock_ctrl		=
1828 		NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1829 	.ltc_read		=
1830 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1831 	.ltc_write		=
1832 		NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1833 	.vend1_ltc_wr_nsec_0	= 0x1106,
1834 	.vend1_ltc_wr_nsec_1	= 0x1107,
1835 	.vend1_ltc_wr_sec_0	= 0x1108,
1836 	.vend1_ltc_wr_sec_1	= 0x1109,
1837 	.vend1_ltc_rd_nsec_0	= 0x110A,
1838 	.vend1_ltc_rd_nsec_1	= 0x110B,
1839 	.vend1_ltc_rd_sec_0	= 0x110C,
1840 	.vend1_ltc_rd_sec_1	= 0x110D,
1841 	.vend1_rate_adj_subns_0	= 0x110F,
1842 	.vend1_rate_adj_subns_1	= 0x1110,
1843 	.irq_egr_ts_en		=
1844 		NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1845 	.irq_egr_ts_status	=
1846 		NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1847 	.domain_number		=
1848 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1849 	.msg_type		=
1850 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1851 	.sequence_id		=
1852 		NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1853 	.sec_1_0		=
1854 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1855 	.sec_4_2		=
1856 		NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1857 	.nsec_15_0		=
1858 		NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1859 	.nsec_29_16		=
1860 		NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1861 	.vend1_ext_trg_data_0	= 0x1121,
1862 	.vend1_ext_trg_data_1	= 0x1122,
1863 	.vend1_ext_trg_data_2	= 0x1123,
1864 	.vend1_ext_trg_data_3	= 0x1124,
1865 	.vend1_ext_trg_ctrl	= 0x1126,
1866 	.cable_test		= 0x8330,
1867 	.cable_test_valid	=
1868 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1869 	.cable_test_result	=
1870 		NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1871 };
1872 
1873 static const struct nxp_c45_phy_data tja1103_phy_data = {
1874 	.regmap = &tja1103_regmap,
1875 	.stats = tja1103_hw_stats,
1876 	.n_stats = ARRAY_SIZE(tja1103_hw_stats),
1877 	.ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1878 	.ext_ts_both_edges = false,
1879 	.ack_ptp_irq = false,
1880 	.counters_enable = tja1103_counters_enable,
1881 	.get_egressts = nxp_c45_get_hwtxts,
1882 	.get_extts = nxp_c45_get_extts,
1883 	.ptp_init = tja1103_ptp_init,
1884 	.ptp_enable = tja1103_ptp_enable,
1885 	.nmi_handler = tja1103_nmi_handler,
1886 };
1887 
tja1120_counters_enable(struct phy_device * phydev)1888 static void tja1120_counters_enable(struct phy_device *phydev)
1889 {
1890 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1891 			 EXTENDED_CNT_EN);
1892 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1893 			 MONITOR_RESET);
1894 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1895 			 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1896 }
1897 
tja1120_ptp_init(struct phy_device * phydev)1898 static void tja1120_ptp_init(struct phy_device *phydev)
1899 {
1900 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1901 		      TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1902 	phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1903 		      TJA1120_TS_INSRT_MODE);
1904 	phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1905 			 PTP_ENABLE);
1906 }
1907 
tja1120_ptp_enable(struct phy_device * phydev,bool enable)1908 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1909 {
1910 	if (enable)
1911 		phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1912 				 VEND1_PORT_FUNC_ENABLES,
1913 				 PTP_ENABLE);
1914 	else
1915 		phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1916 				   VEND1_PORT_FUNC_ENABLES,
1917 				   PTP_ENABLE);
1918 }
1919 
tja1120_nmi_handler(struct phy_device * phydev,irqreturn_t * irq_status)1920 static void tja1120_nmi_handler(struct phy_device *phydev,
1921 				irqreturn_t *irq_status)
1922 {
1923 	int ret;
1924 
1925 	ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1926 			   TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1927 	if (ret & TJA1120_DEV_BOOT_DONE) {
1928 		phy_write_mmd(phydev, MDIO_MMD_VEND1,
1929 			      TJA1120_GLOBAL_INFRA_IRQ_ACK,
1930 			      TJA1120_DEV_BOOT_DONE);
1931 		*irq_status = IRQ_HANDLED;
1932 	}
1933 }
1934 
nxp_c45_macsec_ability(struct phy_device * phydev)1935 static int nxp_c45_macsec_ability(struct phy_device *phydev)
1936 {
1937 	bool macsec_ability;
1938 	int phy_abilities;
1939 
1940 	phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1941 				     VEND1_PORT_ABILITIES);
1942 	macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1943 
1944 	return macsec_ability;
1945 }
1946 
tja1103_match_phy_device(struct phy_device * phydev)1947 static int tja1103_match_phy_device(struct phy_device *phydev)
1948 {
1949 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
1950 	       !nxp_c45_macsec_ability(phydev);
1951 }
1952 
tja1104_match_phy_device(struct phy_device * phydev)1953 static int tja1104_match_phy_device(struct phy_device *phydev)
1954 {
1955 	return phy_id_compare(phydev->phy_id, PHY_ID_TJA_1103, PHY_ID_MASK) &&
1956 	       nxp_c45_macsec_ability(phydev);
1957 }
1958 
1959 static const struct nxp_c45_regmap tja1120_regmap = {
1960 	.vend1_ptp_clk_period	= 0x1020,
1961 	.vend1_event_msg_filt	= 0x9010,
1962 	.pps_enable		=
1963 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1964 	.pps_polarity		=
1965 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1966 	.ltc_lock_ctrl		=
1967 		NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1968 	.ltc_read		=
1969 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1970 	.ltc_write		=
1971 		NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1972 	.vend1_ltc_wr_nsec_0	= 0x1040,
1973 	.vend1_ltc_wr_nsec_1	= 0x1041,
1974 	.vend1_ltc_wr_sec_0	= 0x1042,
1975 	.vend1_ltc_wr_sec_1	= 0x1043,
1976 	.vend1_ltc_rd_nsec_0	= 0x1048,
1977 	.vend1_ltc_rd_nsec_1	= 0x1049,
1978 	.vend1_ltc_rd_sec_0	= 0x104A,
1979 	.vend1_ltc_rd_sec_1	= 0x104B,
1980 	.vend1_rate_adj_subns_0	= 0x1030,
1981 	.vend1_rate_adj_subns_1	= 0x1031,
1982 	.irq_egr_ts_en		=
1983 		NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1984 	.irq_egr_ts_status	=
1985 		NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1986 	.domain_number		=
1987 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1988 	.msg_type		=
1989 		NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1990 	.sequence_id		=
1991 		NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1992 	.sec_1_0		=
1993 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1994 	.sec_4_2		=
1995 		NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1996 	.nsec_15_0		=
1997 		NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1998 	.nsec_29_16		=
1999 		NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
2000 	.vend1_ext_trg_data_0	= 0x1071,
2001 	.vend1_ext_trg_data_1	= 0x1072,
2002 	.vend1_ext_trg_data_2	= 0x1073,
2003 	.vend1_ext_trg_data_3	= 0x1074,
2004 	.vend1_ext_trg_ctrl	= 0x1075,
2005 	.cable_test		= 0x8360,
2006 	.cable_test_valid	=
2007 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
2008 	.cable_test_result	=
2009 		NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
2010 };
2011 
2012 static const struct nxp_c45_phy_data tja1120_phy_data = {
2013 	.regmap = &tja1120_regmap,
2014 	.stats = tja1120_hw_stats,
2015 	.n_stats = ARRAY_SIZE(tja1120_hw_stats),
2016 	.ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
2017 	.ext_ts_both_edges = true,
2018 	.ack_ptp_irq = true,
2019 	.counters_enable = tja1120_counters_enable,
2020 	.get_egressts = tja1120_get_hwtxts,
2021 	.get_extts = tja1120_get_extts,
2022 	.ptp_init = tja1120_ptp_init,
2023 	.ptp_enable = tja1120_ptp_enable,
2024 	.nmi_handler = tja1120_nmi_handler,
2025 };
2026 
2027 static struct phy_driver nxp_c45_driver[] = {
2028 	{
2029 		.name			= "NXP C45 TJA1103",
2030 		.get_features		= nxp_c45_get_features,
2031 		.driver_data		= &tja1103_phy_data,
2032 		.probe			= nxp_c45_probe,
2033 		.soft_reset		= nxp_c45_soft_reset,
2034 		.config_aneg		= genphy_c45_config_aneg,
2035 		.config_init		= nxp_c45_config_init,
2036 		.config_intr		= tja1103_config_intr,
2037 		.handle_interrupt	= nxp_c45_handle_interrupt,
2038 		.read_status		= genphy_c45_read_status,
2039 		.suspend		= genphy_c45_pma_suspend,
2040 		.resume			= genphy_c45_pma_resume,
2041 		.get_sset_count		= nxp_c45_get_sset_count,
2042 		.get_strings		= nxp_c45_get_strings,
2043 		.get_stats		= nxp_c45_get_stats,
2044 		.cable_test_start	= nxp_c45_cable_test_start,
2045 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2046 		.set_loopback		= genphy_c45_loopback,
2047 		.get_sqi		= nxp_c45_get_sqi,
2048 		.get_sqi_max		= nxp_c45_get_sqi_max,
2049 		.remove			= nxp_c45_remove,
2050 		.match_phy_device	= tja1103_match_phy_device,
2051 	},
2052 	{
2053 		.name			= "NXP C45 TJA1104",
2054 		.get_features		= nxp_c45_get_features,
2055 		.driver_data		= &tja1103_phy_data,
2056 		.probe			= nxp_c45_probe,
2057 		.soft_reset		= nxp_c45_soft_reset,
2058 		.config_aneg		= genphy_c45_config_aneg,
2059 		.config_init		= nxp_c45_config_init,
2060 		.config_intr		= tja1103_config_intr,
2061 		.handle_interrupt	= nxp_c45_handle_interrupt,
2062 		.read_status		= genphy_c45_read_status,
2063 		.suspend		= genphy_c45_pma_suspend,
2064 		.resume			= genphy_c45_pma_resume,
2065 		.get_sset_count		= nxp_c45_get_sset_count,
2066 		.get_strings		= nxp_c45_get_strings,
2067 		.get_stats		= nxp_c45_get_stats,
2068 		.cable_test_start	= nxp_c45_cable_test_start,
2069 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2070 		.set_loopback		= genphy_c45_loopback,
2071 		.get_sqi		= nxp_c45_get_sqi,
2072 		.get_sqi_max		= nxp_c45_get_sqi_max,
2073 		.remove			= nxp_c45_remove,
2074 		.match_phy_device	= tja1104_match_phy_device,
2075 	},
2076 	{
2077 		PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
2078 		.name			= "NXP C45 TJA1120",
2079 		.get_features		= nxp_c45_get_features,
2080 		.driver_data		= &tja1120_phy_data,
2081 		.probe			= nxp_c45_probe,
2082 		.soft_reset		= nxp_c45_soft_reset,
2083 		.config_aneg		= genphy_c45_config_aneg,
2084 		.config_init		= nxp_c45_config_init,
2085 		.config_intr		= tja1120_config_intr,
2086 		.handle_interrupt	= nxp_c45_handle_interrupt,
2087 		.read_status		= genphy_c45_read_status,
2088 		.link_change_notify	= tja1120_link_change_notify,
2089 		.suspend		= genphy_c45_pma_suspend,
2090 		.resume			= genphy_c45_pma_resume,
2091 		.get_sset_count		= nxp_c45_get_sset_count,
2092 		.get_strings		= nxp_c45_get_strings,
2093 		.get_stats		= nxp_c45_get_stats,
2094 		.cable_test_start	= nxp_c45_cable_test_start,
2095 		.cable_test_get_status	= nxp_c45_cable_test_get_status,
2096 		.set_loopback		= genphy_c45_loopback,
2097 		.get_sqi		= nxp_c45_get_sqi,
2098 		.get_sqi_max		= nxp_c45_get_sqi_max,
2099 		.remove			= nxp_c45_remove,
2100 	},
2101 };
2102 
2103 module_phy_driver(nxp_c45_driver);
2104 
2105 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
2106 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
2107 	{ PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
2108 	{ /*sentinel*/ },
2109 };
2110 
2111 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
2112 
2113 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
2114 MODULE_DESCRIPTION("NXP C45 PHY driver");
2115 MODULE_LICENSE("GPL v2");
2116