1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3 * Driver for Microsemi VSC85xx PHYs - timestamping and PHC support
4 *
5 * Authors: Quentin Schulz & Antoine Tenart
6 * License: Dual MIT/GPL
7 * Copyright (c) 2020 Microsemi Corporation
8 */
9
10 #include <linux/gpio/consumer.h>
11 #include <linux/ip.h>
12 #include <linux/net_tstamp.h>
13 #include <linux/mii.h>
14 #include <linux/phy.h>
15 #include <linux/ptp_classify.h>
16 #include <linux/ptp_clock_kernel.h>
17 #include <linux/udp.h>
18 #include <linux/unaligned.h>
19
20 #include "mscc.h"
21 #include "mscc_ptp.h"
22
23 /* Two PHYs share the same 1588 processor and it's to be entirely configured
24 * through the base PHY of this processor.
25 */
26 /* phydev->bus->mdio_lock should be locked when using this function */
phy_ts_base_write(struct phy_device * phydev,u32 regnum,u16 val)27 static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val)
28 {
29 struct vsc8531_private *priv = phydev->priv;
30
31 WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
32 return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum,
33 val);
34 }
35
36 /* phydev->bus->mdio_lock should be locked when using this function */
phy_ts_base_read(struct phy_device * phydev,u32 regnum)37 static int phy_ts_base_read(struct phy_device *phydev, u32 regnum)
38 {
39 struct vsc8531_private *priv = phydev->priv;
40
41 WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
42 return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum);
43 }
44
45 enum ts_blk_hw {
46 INGRESS_ENGINE_0,
47 EGRESS_ENGINE_0,
48 INGRESS_ENGINE_1,
49 EGRESS_ENGINE_1,
50 INGRESS_ENGINE_2,
51 EGRESS_ENGINE_2,
52 PROCESSOR_0,
53 PROCESSOR_1,
54 };
55
56 enum ts_blk {
57 INGRESS,
58 EGRESS,
59 PROCESSOR,
60 };
61
vsc85xx_ts_read_csr(struct phy_device * phydev,enum ts_blk blk,u16 addr)62 static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk,
63 u16 addr)
64 {
65 struct vsc8531_private *priv = phydev->priv;
66 bool base_port = phydev->mdio.addr == priv->ts_base_addr;
67 u32 val, cnt = 0;
68 enum ts_blk_hw blk_hw;
69
70 switch (blk) {
71 case INGRESS:
72 blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
73 break;
74 case EGRESS:
75 blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
76 break;
77 case PROCESSOR:
78 default:
79 blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
80 break;
81 }
82
83 phy_lock_mdio_bus(phydev);
84
85 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
86
87 phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
88 BIU_ADDR_READ | BIU_BLK_ID(blk_hw) |
89 BIU_CSR_ADDR(addr));
90
91 do {
92 val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
93 } while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
94
95 val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB);
96 val <<= 16;
97 val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB);
98
99 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
100
101 phy_unlock_mdio_bus(phydev);
102
103 return val;
104 }
105
vsc85xx_ts_write_csr(struct phy_device * phydev,enum ts_blk blk,u16 addr,u32 val)106 static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
107 u16 addr, u32 val)
108 {
109 struct vsc8531_private *priv = phydev->priv;
110 bool base_port = phydev->mdio.addr == priv->ts_base_addr;
111 u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16;
112 bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL ||
113 addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK ||
114 addr == MSCC_PHY_1588_VSC85XX_INT_MASK ||
115 addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS ||
116 addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) &&
117 blk == PROCESSOR;
118 enum ts_blk_hw blk_hw;
119
120 switch (blk) {
121 case INGRESS:
122 blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
123 break;
124 case EGRESS:
125 blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
126 break;
127 case PROCESSOR:
128 default:
129 blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
130 break;
131 }
132
133 phy_lock_mdio_bus(phydev);
134
135 bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
136
137 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
138
139 if (!cond || upper)
140 phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
141
142 phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
143
144 phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
145 BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) |
146 BIU_CSR_ADDR(addr));
147
148 do {
149 reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
150 } while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
151
152 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
153
154 if (cond && upper)
155 phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass);
156
157 phy_unlock_mdio_bus(phydev);
158 }
159
160 /* Pick bytes from PTP header */
161 #define PTP_HEADER_TRNSP_MSG 26
162 #define PTP_HEADER_DOMAIN_NUM 25
163 #define PTP_HEADER_BYTE_8_31(x) (31 - (x))
164 #define MAC_ADDRESS_BYTE(x) ((x) + (35 - ETH_ALEN + 1))
165
vsc85xx_ts_fsb_init(struct phy_device * phydev)166 static int vsc85xx_ts_fsb_init(struct phy_device *phydev)
167 {
168 u8 sig_sel[16] = {};
169 signed char i, pos = 0;
170
171 /* Seq ID is 2B long and starts at 30th byte */
172 for (i = 1; i >= 0; i--)
173 sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i);
174
175 /* DomainNum */
176 sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM;
177
178 /* MsgType */
179 sig_sel[pos++] = PTP_HEADER_TRNSP_MSG;
180
181 /* MAC address is 6B long */
182 for (i = ETH_ALEN - 1; i >= 0; i--)
183 sig_sel[pos++] = MAC_ADDRESS_BYTE(i);
184
185 /* Fill the last bytes of the signature to reach a 16B signature */
186 for (; pos < ARRAY_SIZE(sig_sel); pos++)
187 sig_sel[pos] = PTP_HEADER_TRNSP_MSG;
188
189 for (i = 0; i <= 2; i++) {
190 u32 val = 0;
191
192 for (pos = i * 5 + 4; pos >= i * 5; pos--)
193 val = (val << 6) | sig_sel[pos];
194
195 vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i),
196 val);
197 }
198
199 vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3),
200 sig_sel[15]);
201
202 return 0;
203 }
204
205 static const u32 vsc85xx_egr_latency[] = {
206 /* Copper Egress */
207 1272, /* 1000Mbps */
208 12516, /* 100Mbps */
209 125444, /* 10Mbps */
210 /* Fiber Egress */
211 1277, /* 1000Mbps */
212 12537, /* 100Mbps */
213 };
214
215 static const u32 vsc85xx_egr_latency_macsec[] = {
216 /* Copper Egress ON */
217 3496, /* 1000Mbps */
218 34760, /* 100Mbps */
219 347844, /* 10Mbps */
220 /* Fiber Egress ON */
221 3502, /* 1000Mbps */
222 34780, /* 100Mbps */
223 };
224
225 static const u32 vsc85xx_ingr_latency[] = {
226 /* Copper Ingress */
227 208, /* 1000Mbps */
228 304, /* 100Mbps */
229 2023, /* 10Mbps */
230 /* Fiber Ingress */
231 98, /* 1000Mbps */
232 197, /* 100Mbps */
233 };
234
235 static const u32 vsc85xx_ingr_latency_macsec[] = {
236 /* Copper Ingress */
237 2408, /* 1000Mbps */
238 22300, /* 100Mbps */
239 222009, /* 10Mbps */
240 /* Fiber Ingress */
241 2299, /* 1000Mbps */
242 22192, /* 100Mbps */
243 };
244
vsc85xx_ts_set_latencies(struct phy_device * phydev)245 static void vsc85xx_ts_set_latencies(struct phy_device *phydev)
246 {
247 u32 val, ingr_latency, egr_latency;
248 u8 idx;
249
250 /* No need to set latencies of packets if the PHY is not connected */
251 if (!phydev->link)
252 return;
253
254 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY,
255 STALL_EGR_LATENCY(phydev->speed));
256
257 switch (phydev->speed) {
258 case SPEED_100:
259 idx = 1;
260 break;
261 case SPEED_1000:
262 idx = 0;
263 break;
264 default:
265 idx = 2;
266 break;
267 }
268
269 ingr_latency = IS_ENABLED(CONFIG_MACSEC) ?
270 vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx];
271 egr_latency = IS_ENABLED(CONFIG_MACSEC) ?
272 vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx];
273
274 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY,
275 PTP_INGR_LOCAL_LATENCY(ingr_latency));
276
277 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
278 MSCC_PHY_PTP_INGR_TSP_CTRL);
279 val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS;
280 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
281 val);
282
283 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY,
284 PTP_EGR_LOCAL_LATENCY(egr_latency));
285
286 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
287 val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS;
288 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
289 }
290
vsc85xx_ts_disable_flows(struct phy_device * phydev,enum ts_blk blk)291 static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk)
292 {
293 u8 i;
294
295 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0);
296 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
297 IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2));
298 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0);
299 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM,
300 IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2));
301 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0);
302 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0);
303 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0);
304
305 for (i = 0; i < COMP_MAX_FLOWS; i++) {
306 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i),
307 IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1);
308 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i),
309 IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1);
310 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i),
311 ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1);
312 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i),
313 ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1);
314 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i),
315 MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1);
316
317 if (i >= PTP_COMP_MAX_FLOWS)
318 continue;
319
320 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0);
321 vsc85xx_ts_write_csr(phydev, blk,
322 MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0);
323 vsc85xx_ts_write_csr(phydev, blk,
324 MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0);
325 vsc85xx_ts_write_csr(phydev, blk,
326 MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0);
327 vsc85xx_ts_write_csr(phydev, blk,
328 MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0);
329 vsc85xx_ts_write_csr(phydev, blk,
330 MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0);
331 vsc85xx_ts_write_csr(phydev, blk,
332 MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0);
333 vsc85xx_ts_write_csr(phydev, blk,
334 MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0);
335 vsc85xx_ts_write_csr(phydev, blk,
336 MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0);
337 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i),
338 0);
339 }
340
341 return 0;
342 }
343
vsc85xx_ts_eth_cmp1_sig(struct phy_device * phydev)344 static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev)
345 {
346 u32 val;
347
348 val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT);
349 val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK;
350 val |= ANA_ETH1_NTX_PROT_SIG_OFF(0);
351 vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
352
353 val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG);
354 val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK;
355 val |= ANA_FSB_ADDR_FROM_ETH1;
356 vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val);
357
358 return 0;
359 }
360
get_ptp_header_l4(struct sk_buff * skb,struct iphdr * iphdr,struct udphdr * udphdr)361 static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb,
362 struct iphdr *iphdr,
363 struct udphdr *udphdr)
364 {
365 if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP)
366 return NULL;
367
368 return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN);
369 }
370
get_ptp_header_tx(struct sk_buff * skb)371 static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb)
372 {
373 struct ethhdr *ethhdr = eth_hdr(skb);
374 struct udphdr *udphdr;
375 struct iphdr *iphdr;
376
377 if (ethhdr->h_proto == htons(ETH_P_1588))
378 return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) +
379 skb_mac_header_len(skb));
380
381 if (ethhdr->h_proto != htons(ETH_P_IP))
382 return NULL;
383
384 iphdr = ip_hdr(skb);
385 udphdr = udp_hdr(skb);
386
387 return get_ptp_header_l4(skb, iphdr, udphdr);
388 }
389
get_ptp_header_rx(struct sk_buff * skb,enum hwtstamp_rx_filters rx_filter)390 static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb,
391 enum hwtstamp_rx_filters rx_filter)
392 {
393 struct udphdr *udphdr;
394 struct iphdr *iphdr;
395
396 if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT)
397 return (struct vsc85xx_ptphdr *)skb->data;
398
399 iphdr = (struct iphdr *)skb->data;
400 udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4);
401
402 return get_ptp_header_l4(skb, iphdr, udphdr);
403 }
404
get_sig(struct sk_buff * skb,u8 * sig)405 static int get_sig(struct sk_buff *skb, u8 *sig)
406 {
407 struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb);
408 struct ethhdr *ethhdr = eth_hdr(skb);
409 unsigned int i;
410
411 if (!ptphdr)
412 return -EOPNOTSUPP;
413
414 sig[0] = (__force u16)ptphdr->seq_id >> 8;
415 sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0);
416 sig[2] = ptphdr->domain;
417 sig[3] = ptphdr->tsmt & GENMASK(3, 0);
418
419 memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN);
420
421 /* Fill the last bytes of the signature to reach a 16B signature */
422 for (i = 10; i < 16; i++)
423 sig[i] = ptphdr->tsmt & GENMASK(3, 0);
424
425 return 0;
426 }
427
vsc85xx_dequeue_skb(struct vsc85xx_ptp * ptp)428 static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
429 {
430 struct skb_shared_hwtstamps shhwtstamps;
431 struct vsc85xx_ts_fifo fifo;
432 struct sk_buff *skb;
433 u8 skb_sig[16], *p;
434 int i, len;
435 u32 reg;
436
437 memset(&fifo, 0, sizeof(fifo));
438 p = (u8 *)&fifo;
439
440 reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
441 MSCC_PHY_PTP_EGR_TS_FIFO(0));
442 if (reg & PTP_EGR_TS_FIFO_EMPTY)
443 return;
444
445 *p++ = reg & 0xff;
446 *p++ = (reg >> 8) & 0xff;
447
448 /* Read the current FIFO item. Reading FIFO6 pops the next one. */
449 for (i = 1; i < 7; i++) {
450 reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
451 MSCC_PHY_PTP_EGR_TS_FIFO(i));
452 *p++ = reg & 0xff;
453 *p++ = (reg >> 8) & 0xff;
454 *p++ = (reg >> 16) & 0xff;
455 *p++ = (reg >> 24) & 0xff;
456 }
457
458 len = skb_queue_len_lockless(&ptp->tx_queue);
459 if (len < 1)
460 return;
461
462 while (len--) {
463 skb = skb_dequeue(&ptp->tx_queue);
464 if (!skb)
465 return;
466
467 /* Can't get the signature of the packet, won't ever
468 * be able to have one so let's dequeue the packet.
469 */
470 if (get_sig(skb, skb_sig) < 0) {
471 kfree_skb(skb);
472 continue;
473 }
474
475 /* Check if we found the signature we were looking for. */
476 if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) {
477 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
478 shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns);
479 skb_complete_tx_timestamp(skb, &shhwtstamps);
480
481 return;
482 }
483
484 /* Valid signature but does not match the one of the
485 * packet in the FIFO right now, reschedule it for later
486 * packets.
487 */
488 skb_queue_tail(&ptp->tx_queue, skb);
489 }
490 }
491
vsc85xx_get_tx_ts(struct vsc85xx_ptp * ptp)492 static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp)
493 {
494 u32 reg;
495
496 do {
497 vsc85xx_dequeue_skb(ptp);
498
499 /* If other timestamps are available in the FIFO, process them. */
500 reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
501 MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
502 } while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1);
503 }
504
vsc85xx_ptp_cmp_init(struct phy_device * phydev,enum ts_blk blk)505 static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
506 {
507 struct vsc8531_private *vsc8531 = phydev->priv;
508 bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
509 static const u8 msgs[] = {
510 PTP_MSGTYPE_SYNC,
511 PTP_MSGTYPE_DELAY_REQ
512 };
513 u32 val;
514 u8 i;
515
516 for (i = 0; i < ARRAY_SIZE(msgs); i++) {
517 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
518 base ? PTP_FLOW_VALID_CH0 :
519 PTP_FLOW_VALID_CH1);
520
521 val = vsc85xx_ts_read_csr(phydev, blk,
522 MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i));
523 val &= ~PTP_FLOW_DOMAIN_RANGE_ENA;
524 vsc85xx_ts_write_csr(phydev, blk,
525 MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val);
526
527 vsc85xx_ts_write_csr(phydev, blk,
528 MSCC_ANA_PTP_FLOW_MATCH_UPPER(i),
529 msgs[i] << 24);
530
531 vsc85xx_ts_write_csr(phydev, blk,
532 MSCC_ANA_PTP_FLOW_MASK_UPPER(i),
533 PTP_FLOW_MSG_TYPE_MASK);
534 }
535
536 return 0;
537 }
538
vsc85xx_eth_cmp1_init(struct phy_device * phydev,enum ts_blk blk)539 static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
540 {
541 struct vsc8531_private *vsc8531 = phydev->priv;
542 bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
543 u32 val;
544
545 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0);
546 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID,
547 ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD));
548
549 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0),
550 base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1);
551 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
552 ANA_ETH1_FLOW_MATCH_VLAN_TAG2);
553 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
554 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0);
555 vsc85xx_ts_write_csr(phydev, blk,
556 MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0);
557 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0);
558 vsc85xx_ts_write_csr(phydev, blk,
559 MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0);
560
561 val = vsc85xx_ts_read_csr(phydev, blk,
562 MSCC_ANA_ETH1_FLOW_MATCH_MODE(0));
563 val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK;
564 val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY;
565 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
566 val);
567
568 return 0;
569 }
570
vsc85xx_ip_cmp1_init(struct phy_device * phydev,enum ts_blk blk)571 static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
572 {
573 struct vsc8531_private *vsc8531 = phydev->priv;
574 bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
575 u32 val;
576
577 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER,
578 PTP_EV_PORT);
579 /* Match on dest port only, ignore src */
580 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER,
581 0xffff);
582 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER,
583 0);
584 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0);
585
586 val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
587 val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK;
588 val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1;
589 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
590
591 /* Match all IPs */
592 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0);
593 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0);
594 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0),
595 0);
596 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0),
597 0);
598 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0),
599 0);
600 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0),
601 0);
602 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0);
603 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0);
604
605 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0);
606
607 return 0;
608 }
609
vsc85xx_adjfine(struct ptp_clock_info * info,long scaled_ppm)610 static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm)
611 {
612 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
613 struct phy_device *phydev = ptp->phydev;
614 struct vsc8531_private *priv = phydev->priv;
615 u64 adj = 0;
616 u32 val;
617
618 if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL)
619 return 0;
620
621 adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm));
622 if (adj > 1000000000L)
623 adj = 1000000000L;
624
625 val = PTP_AUTO_ADJ_NS_ROLLOVER(adj);
626 val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS;
627
628 mutex_lock(&priv->phc_lock);
629
630 /* Update the ppb val in nano seconds to the auto adjust reg. */
631 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ,
632 val);
633
634 /* The auto adjust update val is set to 0 after write operation. */
635 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
636 val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE;
637 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
638
639 mutex_unlock(&priv->phc_lock);
640
641 return 0;
642 }
643
__vsc85xx_gettime(struct ptp_clock_info * info,struct timespec64 * ts)644 static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
645 {
646 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
647 struct phy_device *phydev = ptp->phydev;
648 struct vsc85xx_shared_private *shared =
649 (struct vsc85xx_shared_private *)phydev->shared->priv;
650 struct vsc8531_private *priv = phydev->priv;
651 u32 val;
652
653 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
654 val |= PTP_LTC_CTRL_SAVE_ENA;
655 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
656
657 /* Local Time Counter (LTC) is put in SAVE* regs on rising edge of
658 * LOAD_SAVE pin.
659 */
660 mutex_lock(&shared->gpio_lock);
661 gpiod_set_value(priv->load_save, 1);
662
663 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
664 MSCC_PHY_PTP_LTC_SAVED_SEC_MSB);
665
666 ts->tv_sec = ((time64_t)val) << 32;
667
668 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
669 MSCC_PHY_PTP_LTC_SAVED_SEC_LSB);
670 ts->tv_sec += val;
671
672 ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR,
673 MSCC_PHY_PTP_LTC_SAVED_NS);
674
675 gpiod_set_value(priv->load_save, 0);
676 mutex_unlock(&shared->gpio_lock);
677
678 return 0;
679 }
680
vsc85xx_gettime(struct ptp_clock_info * info,struct timespec64 * ts)681 static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
682 {
683 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
684 struct phy_device *phydev = ptp->phydev;
685 struct vsc8531_private *priv = phydev->priv;
686
687 mutex_lock(&priv->phc_lock);
688 __vsc85xx_gettime(info, ts);
689 mutex_unlock(&priv->phc_lock);
690
691 return 0;
692 }
693
__vsc85xx_settime(struct ptp_clock_info * info,const struct timespec64 * ts)694 static int __vsc85xx_settime(struct ptp_clock_info *info,
695 const struct timespec64 *ts)
696 {
697 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
698 struct phy_device *phydev = ptp->phydev;
699 struct vsc85xx_shared_private *shared =
700 (struct vsc85xx_shared_private *)phydev->shared->priv;
701 struct vsc8531_private *priv = phydev->priv;
702 u32 val;
703
704 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
705 PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
706 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
707 PTP_LTC_LOAD_SEC_LSB(ts->tv_sec));
708 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS,
709 PTP_LTC_LOAD_NS(ts->tv_nsec));
710
711 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
712 val |= PTP_LTC_CTRL_LOAD_ENA;
713 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
714
715 /* Local Time Counter (LTC) is set from LOAD* regs on rising edge of
716 * LOAD_SAVE pin.
717 */
718 mutex_lock(&shared->gpio_lock);
719 gpiod_set_value(priv->load_save, 1);
720
721 val &= ~PTP_LTC_CTRL_LOAD_ENA;
722 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
723
724 gpiod_set_value(priv->load_save, 0);
725 mutex_unlock(&shared->gpio_lock);
726
727 return 0;
728 }
729
vsc85xx_settime(struct ptp_clock_info * info,const struct timespec64 * ts)730 static int vsc85xx_settime(struct ptp_clock_info *info,
731 const struct timespec64 *ts)
732 {
733 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
734 struct phy_device *phydev = ptp->phydev;
735 struct vsc8531_private *priv = phydev->priv;
736
737 mutex_lock(&priv->phc_lock);
738 __vsc85xx_settime(info, ts);
739 mutex_unlock(&priv->phc_lock);
740
741 return 0;
742 }
743
vsc85xx_adjtime(struct ptp_clock_info * info,s64 delta)744 static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta)
745 {
746 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
747 struct phy_device *phydev = ptp->phydev;
748 struct vsc8531_private *priv = phydev->priv;
749 u32 val;
750
751 /* Can't recover that big of an offset. Let's set the time directly. */
752 if (abs(delta) >= NSEC_PER_SEC) {
753 struct timespec64 ts;
754 u64 now;
755
756 mutex_lock(&priv->phc_lock);
757
758 __vsc85xx_gettime(info, &ts);
759 now = ktime_to_ns(timespec64_to_ktime(ts));
760 ts = ns_to_timespec64(now + delta);
761 __vsc85xx_settime(info, &ts);
762
763 mutex_unlock(&priv->phc_lock);
764
765 return 0;
766 }
767
768 mutex_lock(&priv->phc_lock);
769
770 val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ;
771 if (delta > 0)
772 val |= PTP_LTC_OFFSET_ADD;
773 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val);
774
775 mutex_unlock(&priv->phc_lock);
776
777 return 0;
778 }
779
vsc85xx_eth1_next_comp(struct phy_device * phydev,enum ts_blk blk,u32 next_comp,u32 etype)780 static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk,
781 u32 next_comp, u32 etype)
782 {
783 u32 val;
784
785 val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT);
786 val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK;
787 val |= next_comp;
788 vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
789
790 val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) |
791 ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA;
792 vsc85xx_ts_write_csr(phydev, blk,
793 MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val);
794
795 return 0;
796 }
797
vsc85xx_ip1_next_comp(struct phy_device * phydev,enum ts_blk blk,u32 next_comp,u32 header)798 static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk,
799 u32 next_comp, u32 header)
800 {
801 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP,
802 ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) |
803 next_comp);
804
805 return 0;
806 }
807
vsc85xx_ts_ptp_action_flow(struct phy_device * phydev,enum ts_blk blk,u8 flow,enum ptp_cmd cmd)808 static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd)
809 {
810 u32 val;
811
812 /* Check non-zero reserved field */
813 val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK;
814 vsc85xx_ts_write_csr(phydev, blk,
815 MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val);
816
817 val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) |
818 PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) |
819 PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ?
820 PTP_NOP : cmd);
821 if (cmd == PTP_SAVE_IN_TS_FIFO)
822 val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME;
823 else if (cmd == PTP_WRITE_NS)
824 val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE |
825 PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6);
826 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow),
827 val);
828
829 if (cmd == PTP_WRITE_1588)
830 /* Rewrite timestamp directly in frame */
831 val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) |
832 PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10);
833 else if (cmd == PTP_SAVE_IN_TS_FIFO)
834 /* no rewrite */
835 val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) |
836 PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0);
837 else
838 /* Write in reserved field */
839 val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) |
840 PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4);
841 vsc85xx_ts_write_csr(phydev, blk,
842 MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val);
843
844 return 0;
845 }
846
vsc85xx_ptp_conf(struct phy_device * phydev,enum ts_blk blk,bool one_step,bool enable)847 static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
848 bool one_step, bool enable)
849 {
850 static const u8 msgs[] = {
851 PTP_MSGTYPE_SYNC,
852 PTP_MSGTYPE_DELAY_REQ
853 };
854 u32 val;
855 u8 i;
856
857 for (i = 0; i < ARRAY_SIZE(msgs); i++) {
858 if (blk == INGRESS)
859 vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
860 PTP_WRITE_NS);
861 else if (msgs[i] == PTP_MSGTYPE_SYNC && one_step)
862 /* no need to know Sync t when sending in one_step */
863 vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
864 PTP_WRITE_1588);
865 else
866 vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
867 PTP_SAVE_IN_TS_FIFO);
868
869 val = vsc85xx_ts_read_csr(phydev, blk,
870 MSCC_ANA_PTP_FLOW_ENA(i));
871 val &= ~PTP_FLOW_ENA;
872 if (enable)
873 val |= PTP_FLOW_ENA;
874 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
875 val);
876 }
877
878 return 0;
879 }
880
vsc85xx_eth1_conf(struct phy_device * phydev,enum ts_blk blk,bool enable)881 static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
882 bool enable)
883 {
884 struct vsc8531_private *vsc8531 = phydev->priv;
885 u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST;
886
887 if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
888 /* PTP over Ethernet multicast address for SYNC and DELAY msg */
889 u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00};
890
891 val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR |
892 get_unaligned_be16(&ptp_multicast[4]);
893 vsc85xx_ts_write_csr(phydev, blk,
894 MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
895 vsc85xx_ts_write_csr(phydev, blk,
896 MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0),
897 get_unaligned_be32(ptp_multicast));
898 } else {
899 val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
900 val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
901 vsc85xx_ts_write_csr(phydev, blk,
902 MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
903 vsc85xx_ts_write_csr(phydev, blk,
904 MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
905 }
906
907 val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0));
908 val &= ~ETH1_FLOW_ENA;
909 if (enable)
910 val |= ETH1_FLOW_ENA;
911 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val);
912
913 return 0;
914 }
915
vsc85xx_ip1_conf(struct phy_device * phydev,enum ts_blk blk,bool enable)916 static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
917 bool enable)
918 {
919 u32 val;
920
921 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE,
922 ANA_IP1_NXT_PROT_IPV4 |
923 ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4);
924
925 /* Matching UDP protocol number */
926 val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) |
927 ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) |
928 ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9);
929 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1,
930 val);
931
932 /* End of IP protocol, start of next protocol (UDP) */
933 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2,
934 ANA_IP1_NXT_PROT_OFFSET2(20));
935
936 val = vsc85xx_ts_read_csr(phydev, blk,
937 MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM);
938 val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK |
939 IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK);
940 val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2);
941
942 val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE |
943 IP1_NXT_PROT_UDP_CHKSUM_CLEAR);
944 /* UDP checksum offset in IPv4 packet
945 * according to: https://tools.ietf.org/html/rfc768
946 */
947 val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26);
948 if (enable)
949 val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
950 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
951 val);
952
953 val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
954 val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA);
955 val |= IP1_FLOW_MATCH_DEST_SRC_ADDR;
956 if (enable)
957 val |= IP1_FLOW_ENA;
958 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
959
960 return 0;
961 }
962
vsc85xx_ts_engine_init(struct phy_device * phydev,bool one_step)963 static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step)
964 {
965 struct vsc8531_private *vsc8531 = phydev->priv;
966 bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr;
967 u8 eng_id = base ? 0 : 1;
968 u32 val;
969
970 ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
971
972 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
973 MSCC_PHY_PTP_ANALYZER_MODE);
974 /* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
975 val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) |
976 PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)));
977 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
978 val);
979
980 if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
981 vsc85xx_eth1_next_comp(phydev, INGRESS,
982 ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
983 vsc85xx_eth1_next_comp(phydev, EGRESS,
984 ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
985 } else {
986 vsc85xx_eth1_next_comp(phydev, INGRESS,
987 ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
988 ETH_P_IP);
989 vsc85xx_eth1_next_comp(phydev, EGRESS,
990 ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
991 ETH_P_IP);
992 /* Header length of IPv[4/6] + UDP */
993 vsc85xx_ip1_next_comp(phydev, INGRESS,
994 ANA_ETH1_NTX_PROT_PTP_OAM, 28);
995 vsc85xx_ip1_next_comp(phydev, EGRESS,
996 ANA_ETH1_NTX_PROT_PTP_OAM, 28);
997 }
998
999 vsc85xx_eth1_conf(phydev, INGRESS,
1000 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1001 vsc85xx_ip1_conf(phydev, INGRESS,
1002 ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1003 vsc85xx_ptp_conf(phydev, INGRESS, one_step,
1004 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1005
1006 vsc85xx_eth1_conf(phydev, EGRESS,
1007 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1008 vsc85xx_ip1_conf(phydev, EGRESS,
1009 ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1010 vsc85xx_ptp_conf(phydev, EGRESS, one_step,
1011 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1012
1013 val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1014 if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF)
1015 val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1016
1017 val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1018 if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE)
1019 val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1020
1021 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1022 val);
1023
1024 return 0;
1025 }
1026
vsc85xx_link_change_notify(struct phy_device * phydev)1027 void vsc85xx_link_change_notify(struct phy_device *phydev)
1028 {
1029 struct vsc8531_private *priv = phydev->priv;
1030
1031 mutex_lock(&priv->ts_lock);
1032 vsc85xx_ts_set_latencies(phydev);
1033 mutex_unlock(&priv->ts_lock);
1034 }
1035
vsc85xx_ts_reset_fifo(struct phy_device * phydev)1036 static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
1037 {
1038 u32 val;
1039
1040 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1041 MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1042 val |= PTP_EGR_TS_FIFO_RESET;
1043 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1044 val);
1045
1046 val &= ~PTP_EGR_TS_FIFO_RESET;
1047 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1048 val);
1049 }
1050
vsc85xx_hwtstamp(struct mii_timestamper * mii_ts,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)1051 static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
1052 struct kernel_hwtstamp_config *cfg,
1053 struct netlink_ext_ack *extack)
1054 {
1055 struct vsc8531_private *vsc8531 =
1056 container_of(mii_ts, struct vsc8531_private, mii_ts);
1057 struct phy_device *phydev = vsc8531->ptp->phydev;
1058 bool one_step = false;
1059 u32 val;
1060
1061 switch (cfg->tx_type) {
1062 case HWTSTAMP_TX_ONESTEP_SYNC:
1063 one_step = true;
1064 break;
1065 case HWTSTAMP_TX_ON:
1066 break;
1067 case HWTSTAMP_TX_OFF:
1068 skb_queue_purge(&vsc8531->ptp->tx_queue);
1069 break;
1070 default:
1071 return -ERANGE;
1072 }
1073
1074 vsc8531->ptp->tx_type = cfg->tx_type;
1075
1076 switch (cfg->rx_filter) {
1077 case HWTSTAMP_FILTER_NONE:
1078 break;
1079 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1080 /* ETH->IP->UDP->PTP */
1081 break;
1082 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1083 /* ETH->PTP */
1084 break;
1085 default:
1086 return -ERANGE;
1087 }
1088
1089 vsc8531->ptp->rx_filter = cfg->rx_filter;
1090
1091 mutex_lock(&vsc8531->ts_lock);
1092
1093 /* Disable predictor while configuring the 1588 block */
1094 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1095 MSCC_PHY_PTP_INGR_PREDICTOR);
1096 val &= ~PTP_INGR_PREDICTOR_EN;
1097 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1098 val);
1099 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1100 MSCC_PHY_PTP_EGR_PREDICTOR);
1101 val &= ~PTP_EGR_PREDICTOR_EN;
1102 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1103 val);
1104
1105 /* Bypass egress or ingress blocks if timestamping isn't used */
1106 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1107 val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS);
1108 if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1109 val |= PTP_IFACE_CTRL_EGR_BYPASS;
1110 if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE)
1111 val |= PTP_IFACE_CTRL_INGR_BYPASS;
1112 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1113
1114 /* Resetting FIFO so that it's empty after reconfiguration */
1115 vsc85xx_ts_reset_fifo(phydev);
1116
1117 vsc85xx_ts_engine_init(phydev, one_step);
1118
1119 /* Re-enable predictors now */
1120 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1121 MSCC_PHY_PTP_INGR_PREDICTOR);
1122 val |= PTP_INGR_PREDICTOR_EN;
1123 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1124 val);
1125 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1126 MSCC_PHY_PTP_EGR_PREDICTOR);
1127 val |= PTP_EGR_PREDICTOR_EN;
1128 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1129 val);
1130
1131 vsc8531->ptp->configured = 1;
1132 mutex_unlock(&vsc8531->ts_lock);
1133
1134 return 0;
1135 }
1136
vsc85xx_ts_info(struct mii_timestamper * mii_ts,struct kernel_ethtool_ts_info * info)1137 static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
1138 struct kernel_ethtool_ts_info *info)
1139 {
1140 struct vsc8531_private *vsc8531 =
1141 container_of(mii_ts, struct vsc8531_private, mii_ts);
1142
1143 info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock);
1144 info->so_timestamping =
1145 SOF_TIMESTAMPING_TX_HARDWARE |
1146 SOF_TIMESTAMPING_RX_HARDWARE |
1147 SOF_TIMESTAMPING_RAW_HARDWARE;
1148 info->tx_types =
1149 (1 << HWTSTAMP_TX_OFF) |
1150 (1 << HWTSTAMP_TX_ON) |
1151 (1 << HWTSTAMP_TX_ONESTEP_SYNC);
1152 info->rx_filters =
1153 (1 << HWTSTAMP_FILTER_NONE) |
1154 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1155 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1156
1157 return 0;
1158 }
1159
vsc85xx_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1160 static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
1161 struct sk_buff *skb, int type)
1162 {
1163 struct vsc8531_private *vsc8531 =
1164 container_of(mii_ts, struct vsc8531_private, mii_ts);
1165
1166 if (!vsc8531->ptp->configured)
1167 goto out;
1168
1169 if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1170 goto out;
1171
1172 if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
1173 if (ptp_msg_is_sync(skb, type))
1174 goto out;
1175
1176 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1177
1178 skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
1179 return;
1180
1181 out:
1182 kfree_skb(skb);
1183 }
1184
vsc85xx_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1185 static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
1186 struct sk_buff *skb, int type)
1187 {
1188 struct vsc8531_private *vsc8531 =
1189 container_of(mii_ts, struct vsc8531_private, mii_ts);
1190 struct vsc85xx_ptphdr *ptphdr;
1191 unsigned long ns;
1192
1193 if (!vsc8531->ptp->configured)
1194 return false;
1195
1196 if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE ||
1197 type == PTP_CLASS_NONE)
1198 return false;
1199
1200 ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
1201 if (!ptphdr)
1202 return false;
1203
1204 ns = ntohl(ptphdr->rsrvd2);
1205
1206 VSC8531_SKB_CB(skb)->ns = ns;
1207 skb_queue_tail(&vsc8531->rx_skbs_list, skb);
1208
1209 ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
1210
1211 return true;
1212 }
1213
vsc85xx_do_aux_work(struct ptp_clock_info * info)1214 static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
1215 {
1216 struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
1217 struct skb_shared_hwtstamps *shhwtstamps = NULL;
1218 struct phy_device *phydev = ptp->phydev;
1219 struct vsc8531_private *priv = phydev->priv;
1220 struct sk_buff_head received;
1221 struct sk_buff *rx_skb;
1222 struct timespec64 ts;
1223 unsigned long flags;
1224
1225 __skb_queue_head_init(&received);
1226 spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
1227 skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
1228 spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
1229
1230 vsc85xx_gettime(info, &ts);
1231 while ((rx_skb = __skb_dequeue(&received)) != NULL) {
1232 shhwtstamps = skb_hwtstamps(rx_skb);
1233 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1234
1235 if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
1236 ts.tv_sec--;
1237
1238 shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
1239 VSC8531_SKB_CB(rx_skb)->ns);
1240 netif_rx(rx_skb);
1241 }
1242
1243 return -1;
1244 }
1245
1246 static const struct ptp_clock_info vsc85xx_clk_caps = {
1247 .owner = THIS_MODULE,
1248 .name = "VSC85xx timer",
1249 .max_adj = S32_MAX,
1250 .n_alarm = 0,
1251 .n_pins = 0,
1252 .n_ext_ts = 0,
1253 .n_per_out = 0,
1254 .pps = 0,
1255 .adjtime = &vsc85xx_adjtime,
1256 .adjfine = &vsc85xx_adjfine,
1257 .gettime64 = &vsc85xx_gettime,
1258 .settime64 = &vsc85xx_settime,
1259 .do_aux_work = &vsc85xx_do_aux_work,
1260 };
1261
vsc8584_base_priv(struct phy_device * phydev)1262 static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
1263 {
1264 struct vsc8531_private *vsc8531 = phydev->priv;
1265
1266 if (vsc8531->ts_base_addr != phydev->mdio.addr) {
1267 struct mdio_device *dev;
1268
1269 dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr];
1270 phydev = container_of(dev, struct phy_device, mdio);
1271
1272 return phydev->priv;
1273 }
1274
1275 return vsc8531;
1276 }
1277
vsc8584_is_1588_input_clk_configured(struct phy_device * phydev)1278 static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev)
1279 {
1280 struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1281
1282 return vsc8531->input_clk_init;
1283 }
1284
vsc8584_set_input_clk_configured(struct phy_device * phydev)1285 static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
1286 {
1287 struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1288
1289 vsc8531->input_clk_init = true;
1290 }
1291
__vsc8584_init_ptp(struct phy_device * phydev)1292 static int __vsc8584_init_ptp(struct phy_device *phydev)
1293 {
1294 static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
1295 static const u8 ltc_seq_a[] = { 8, 6, 5, 4, 2 };
1296 u32 val;
1297
1298 if (!vsc8584_is_1588_input_clk_configured(phydev)) {
1299 phy_lock_mdio_bus(phydev);
1300
1301 /* 1588_DIFF_INPUT_CLK configuration: Use an external clock for
1302 * the LTC, as per 3.13.29 in the VSC8584 datasheet.
1303 */
1304 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1305 MSCC_PHY_PAGE_1588);
1306 phy_ts_base_write(phydev, 29, 0x7ae0);
1307 phy_ts_base_write(phydev, 30, 0xb71c);
1308 phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1309 MSCC_PHY_PAGE_STANDARD);
1310
1311 phy_unlock_mdio_bus(phydev);
1312
1313 vsc8584_set_input_clk_configured(phydev);
1314 }
1315
1316 /* Disable predictor before configuring the 1588 block */
1317 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1318 MSCC_PHY_PTP_INGR_PREDICTOR);
1319 val &= ~PTP_INGR_PREDICTOR_EN;
1320 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1321 val);
1322 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1323 MSCC_PHY_PTP_EGR_PREDICTOR);
1324 val &= ~PTP_EGR_PREDICTOR_EN;
1325 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1326 val);
1327
1328 /* By default, the internal clock of fixed rate 250MHz is used */
1329 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
1330 val &= ~PTP_LTC_CTRL_CLK_SEL_MASK;
1331 val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250;
1332 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
1333
1334 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE);
1335 val &= ~PTP_LTC_SEQUENCE_A_MASK;
1336 val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]);
1337 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val);
1338
1339 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ);
1340 val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB);
1341 if (ltc_seq_e[PHC_CLK_250MHZ])
1342 val |= PTP_LTC_SEQ_ADD_SUB;
1343 val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]);
1344 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val);
1345
1346 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ,
1347 PPS_WIDTH_ADJ);
1348
1349 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO,
1350 IS_ENABLED(CONFIG_MACSEC) ?
1351 PTP_INGR_DELAY_FIFO_DEPTH_MACSEC :
1352 PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT);
1353
1354 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO,
1355 IS_ENABLED(CONFIG_MACSEC) ?
1356 PTP_EGR_DELAY_FIFO_DEPTH_MACSEC :
1357 PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT);
1358
1359 /* Enable n-phase sampler for Viper Rev-B */
1360 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1361 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1362 val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS |
1363 PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS |
1364 PTP_ACCUR_LOAD_SAVE_BYPASS);
1365 val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1366 PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1367 PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1368 PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1369 PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1370 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1371 val);
1372
1373 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1374 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1375 val |= PTP_ACCUR_CALIB_TRIGG;
1376 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1377 val);
1378
1379 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1380 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1381 val &= ~PTP_ACCUR_CALIB_TRIGG;
1382 val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1383 PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1384 PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1385 PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1386 PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1387 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1388 val);
1389
1390 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1391 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1392 val |= PTP_ACCUR_CALIB_TRIGG;
1393 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1394 val);
1395
1396 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1397 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1398 val &= ~PTP_ACCUR_CALIB_TRIGG;
1399 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1400 val);
1401
1402 /* Do not access FIFO via SI */
1403 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1404 MSCC_PHY_PTP_TSTAMP_FIFO_SI);
1405 val &= ~PTP_TSTAMP_FIFO_SI_EN;
1406 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI,
1407 val);
1408
1409 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1410 MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1411 val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE;
1412 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1413 val);
1414 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1415 MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1416 val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE;
1417 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1418 val);
1419
1420 /* Put the flag that indicates the frame has been modified to bit 7 */
1421 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1422 MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1423 val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL;
1424 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1425 val);
1426 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1427 MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1428 val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7);
1429 val &= ~PTP_EGR_REWRITER_FLAG_VAL;
1430 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1431 val);
1432
1433 /* 30bit mode for RX timestamp, only the nanoseconds are kept in
1434 * reserved field.
1435 */
1436 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1437 MSCC_PHY_PTP_INGR_TSP_CTRL);
1438 val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS;
1439 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
1440 val);
1441
1442 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
1443 val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS;
1444 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
1445
1446 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1447 MSCC_PHY_PTP_SERIAL_TOD_IFACE);
1448 val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR;
1449 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE,
1450 val);
1451
1452 vsc85xx_ts_fsb_init(phydev);
1453
1454 /* Set the Egress timestamp FIFO configuration and status register */
1455 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1456 MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1457 val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK);
1458 /* 16 bytes for the signature, 10 for the timestamp in the TS FIFO */
1459 val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7);
1460 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1461 val);
1462
1463 vsc85xx_ts_reset_fifo(phydev);
1464
1465 val = PTP_IFACE_CTRL_CLK_ENA;
1466 if (!IS_ENABLED(CONFIG_MACSEC))
1467 val |= PTP_IFACE_CTRL_GMII_PROT;
1468 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1469
1470 vsc85xx_ts_set_latencies(phydev);
1471
1472 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE);
1473
1474 val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1475 val |= PTP_IFACE_CTRL_EGR_BYPASS;
1476 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1477
1478 vsc85xx_ts_disable_flows(phydev, EGRESS);
1479 vsc85xx_ts_disable_flows(phydev, INGRESS);
1480
1481 val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1482 MSCC_PHY_PTP_ANALYZER_MODE);
1483 /* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
1484 val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK |
1485 PTP_ANALYZER_MODE_INGR_ENA_MASK |
1486 PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK |
1487 PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK);
1488 /* Strict matching in flow (packets should match flows from the same
1489 * index in all enabled comparators (except PTP)).
1490 */
1491 val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) |
1492 PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7);
1493 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1494 val);
1495
1496 /* Initialized for ingress and egress flows:
1497 * - The Ethernet comparator.
1498 * - The IP comparator.
1499 * - The PTP comparator.
1500 */
1501 vsc85xx_eth_cmp1_init(phydev, INGRESS);
1502 vsc85xx_ip_cmp1_init(phydev, INGRESS);
1503 vsc85xx_ptp_cmp_init(phydev, INGRESS);
1504 vsc85xx_eth_cmp1_init(phydev, EGRESS);
1505 vsc85xx_ip_cmp1_init(phydev, EGRESS);
1506 vsc85xx_ptp_cmp_init(phydev, EGRESS);
1507
1508 vsc85xx_ts_eth_cmp1_sig(phydev);
1509
1510 return 0;
1511 }
1512
vsc8584_config_ts_intr(struct phy_device * phydev)1513 void vsc8584_config_ts_intr(struct phy_device *phydev)
1514 {
1515 struct vsc8531_private *priv = phydev->priv;
1516
1517 mutex_lock(&priv->ts_lock);
1518 vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK,
1519 VSC85XX_1588_INT_MASK_MASK);
1520 mutex_unlock(&priv->ts_lock);
1521 }
1522
vsc8584_ptp_init(struct phy_device * phydev)1523 int vsc8584_ptp_init(struct phy_device *phydev)
1524 {
1525 switch (phydev->phy_id & phydev->drv->phy_id_mask) {
1526 case PHY_ID_VSC8572:
1527 case PHY_ID_VSC8574:
1528 case PHY_ID_VSC8575:
1529 case PHY_ID_VSC8582:
1530 case PHY_ID_VSC8584:
1531 return __vsc8584_init_ptp(phydev);
1532 }
1533
1534 return 0;
1535 }
1536
vsc8584_ptp_deinit(struct phy_device * phydev)1537 void vsc8584_ptp_deinit(struct phy_device *phydev)
1538 {
1539 struct vsc8531_private *vsc8531 = phydev->priv;
1540
1541 if (vsc8531->ptp->ptp_clock) {
1542 ptp_clock_unregister(vsc8531->ptp->ptp_clock);
1543 skb_queue_purge(&vsc8531->rx_skbs_list);
1544 skb_queue_purge(&vsc8531->ptp->tx_queue);
1545 }
1546 }
1547
vsc8584_handle_ts_interrupt(struct phy_device * phydev)1548 irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
1549 {
1550 struct vsc8531_private *priv = phydev->priv;
1551 int rc;
1552
1553 mutex_lock(&priv->ts_lock);
1554 rc = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1555 MSCC_PHY_1588_VSC85XX_INT_STATUS);
1556 /* Ack the PTP interrupt */
1557 vsc85xx_ts_write_csr(phydev, PROCESSOR,
1558 MSCC_PHY_1588_VSC85XX_INT_STATUS, rc);
1559
1560 if (!(rc & VSC85XX_1588_INT_MASK_MASK)) {
1561 mutex_unlock(&priv->ts_lock);
1562 return IRQ_NONE;
1563 }
1564
1565 if (rc & VSC85XX_1588_INT_FIFO_ADD) {
1566 vsc85xx_get_tx_ts(priv->ptp);
1567 } else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
1568 skb_queue_purge(&priv->ptp->tx_queue);
1569 vsc85xx_ts_reset_fifo(phydev);
1570 }
1571
1572 mutex_unlock(&priv->ts_lock);
1573 return IRQ_HANDLED;
1574 }
1575
vsc8584_ptp_probe(struct phy_device * phydev)1576 int vsc8584_ptp_probe(struct phy_device *phydev)
1577 {
1578 struct vsc8531_private *vsc8531 = phydev->priv;
1579
1580 vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp),
1581 GFP_KERNEL);
1582 if (!vsc8531->ptp)
1583 return -ENOMEM;
1584
1585 mutex_init(&vsc8531->phc_lock);
1586 mutex_init(&vsc8531->ts_lock);
1587 skb_queue_head_init(&vsc8531->rx_skbs_list);
1588 skb_queue_head_init(&vsc8531->ptp->tx_queue);
1589
1590 /* Retrieve the shared load/save GPIO. Request it as non exclusive as
1591 * the same GPIO can be requested by all the PHYs of the same package.
1592 * This GPIO must be used with the gpio_lock taken (the lock is shared
1593 * between all PHYs).
1594 */
1595 vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save",
1596 GPIOD_FLAGS_BIT_NONEXCLUSIVE |
1597 GPIOD_OUT_LOW);
1598 if (IS_ERR(vsc8531->load_save)) {
1599 phydev_err(phydev, "Can't get load-save GPIO (%ld)\n",
1600 PTR_ERR(vsc8531->load_save));
1601 return PTR_ERR(vsc8531->load_save);
1602 }
1603
1604 /* Timestamp selected by default to keep legacy API */
1605 phydev->default_timestamp = true;
1606
1607 vsc8531->ptp->phydev = phydev;
1608
1609 vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
1610 vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
1611 vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
1612 vsc8531->mii_ts.ts_info = vsc85xx_ts_info;
1613 phydev->mii_ts = &vsc8531->mii_ts;
1614
1615 memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
1616 vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
1617 &phydev->mdio.dev);
1618 return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
1619 }
1620
vsc8584_ptp_probe_once(struct phy_device * phydev)1621 int vsc8584_ptp_probe_once(struct phy_device *phydev)
1622 {
1623 struct vsc85xx_shared_private *shared =
1624 (struct vsc85xx_shared_private *)phydev->shared->priv;
1625
1626 /* Initialize shared GPIO lock */
1627 mutex_init(&shared->gpio_lock);
1628
1629 return 0;
1630 }
1631