1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
6
7 #include "qede_ptp.h"
8 #define QEDE_PTP_TX_TIMEOUT (2 * HZ)
9
10 struct qede_ptp {
11 const struct qed_eth_ptp_ops *ops;
12 struct ptp_clock_info clock_info;
13 struct cyclecounter cc;
14 struct timecounter tc;
15 struct ptp_clock *clock;
16 struct work_struct work;
17 unsigned long ptp_tx_start;
18 struct qede_dev *edev;
19 struct sk_buff *tx_skb;
20
21 /* ptp spinlock is used for protecting the cycle/time counter fields
22 * and, also for serializing the qed PTP API invocations.
23 */
24 spinlock_t lock;
25 bool hw_ts_ioctl_called;
26 u16 tx_type;
27 u16 rx_filter;
28 };
29
30 /**
31 * qede_ptp_adjfreq() - Adjust the frequency of the PTP cycle counter.
32 *
33 * @info: The PTP clock info structure.
34 * @ppb: Parts per billion adjustment from base.
35 *
36 * Return: Zero on success, negative errno otherwise.
37 */
qede_ptp_adjfreq(struct ptp_clock_info * info,s32 ppb)38 static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
39 {
40 struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
41 struct qede_dev *edev = ptp->edev;
42 int rc;
43
44 __qede_lock(edev);
45 if (edev->state == QEDE_STATE_OPEN) {
46 spin_lock_bh(&ptp->lock);
47 rc = ptp->ops->adjfreq(edev->cdev, ppb);
48 spin_unlock_bh(&ptp->lock);
49 } else {
50 DP_ERR(edev, "PTP adjfreq called while interface is down\n");
51 rc = -EFAULT;
52 }
53 __qede_unlock(edev);
54
55 return rc;
56 }
57
qede_ptp_adjtime(struct ptp_clock_info * info,s64 delta)58 static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
59 {
60 struct qede_dev *edev;
61 struct qede_ptp *ptp;
62
63 ptp = container_of(info, struct qede_ptp, clock_info);
64 edev = ptp->edev;
65
66 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
67 delta);
68
69 spin_lock_bh(&ptp->lock);
70 timecounter_adjtime(&ptp->tc, delta);
71 spin_unlock_bh(&ptp->lock);
72
73 return 0;
74 }
75
qede_ptp_gettime(struct ptp_clock_info * info,struct timespec64 * ts)76 static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
77 {
78 struct qede_dev *edev;
79 struct qede_ptp *ptp;
80 u64 ns;
81
82 ptp = container_of(info, struct qede_ptp, clock_info);
83 edev = ptp->edev;
84
85 spin_lock_bh(&ptp->lock);
86 ns = timecounter_read(&ptp->tc);
87 spin_unlock_bh(&ptp->lock);
88
89 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
90
91 *ts = ns_to_timespec64(ns);
92
93 return 0;
94 }
95
qede_ptp_settime(struct ptp_clock_info * info,const struct timespec64 * ts)96 static int qede_ptp_settime(struct ptp_clock_info *info,
97 const struct timespec64 *ts)
98 {
99 struct qede_dev *edev;
100 struct qede_ptp *ptp;
101 u64 ns;
102
103 ptp = container_of(info, struct qede_ptp, clock_info);
104 edev = ptp->edev;
105
106 ns = timespec64_to_ns(ts);
107
108 DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
109
110 /* Re-init the timecounter */
111 spin_lock_bh(&ptp->lock);
112 timecounter_init(&ptp->tc, &ptp->cc, ns);
113 spin_unlock_bh(&ptp->lock);
114
115 return 0;
116 }
117
118 /* Enable (or disable) ancillary features of the phc subsystem */
qede_ptp_ancillary_feature_enable(struct ptp_clock_info * info,struct ptp_clock_request * rq,int on)119 static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
120 struct ptp_clock_request *rq,
121 int on)
122 {
123 struct qede_dev *edev;
124 struct qede_ptp *ptp;
125
126 ptp = container_of(info, struct qede_ptp, clock_info);
127 edev = ptp->edev;
128
129 DP_ERR(edev, "PHC ancillary features are not supported\n");
130
131 return -ENOTSUPP;
132 }
133
qede_ptp_task(struct work_struct * work)134 static void qede_ptp_task(struct work_struct *work)
135 {
136 struct skb_shared_hwtstamps shhwtstamps;
137 struct qede_dev *edev;
138 struct qede_ptp *ptp;
139 u64 timestamp, ns;
140 bool timedout;
141 int rc;
142
143 ptp = container_of(work, struct qede_ptp, work);
144 edev = ptp->edev;
145 timedout = time_is_before_jiffies(ptp->ptp_tx_start +
146 QEDE_PTP_TX_TIMEOUT);
147
148 /* Read Tx timestamp registers */
149 spin_lock_bh(&ptp->lock);
150 rc = ptp->ops->read_tx_ts(edev->cdev, ×tamp);
151 spin_unlock_bh(&ptp->lock);
152 if (rc) {
153 if (unlikely(timedout)) {
154 DP_INFO(edev, "Tx timestamp is not recorded\n");
155 dev_kfree_skb_any(ptp->tx_skb);
156 ptp->tx_skb = NULL;
157 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
158 &edev->flags);
159 edev->ptp_skip_txts++;
160 } else {
161 /* Reschedule to keep checking for a valid TS value */
162 schedule_work(&ptp->work);
163 }
164 return;
165 }
166
167 ns = timecounter_cyc2time(&ptp->tc, timestamp);
168 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
169 shhwtstamps.hwtstamp = ns_to_ktime(ns);
170 skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
171 dev_kfree_skb_any(ptp->tx_skb);
172 ptp->tx_skb = NULL;
173 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
174
175 DP_VERBOSE(edev, QED_MSG_DEBUG,
176 "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
177 timestamp, ns);
178 }
179
180 /* Read the PHC. This API is invoked with ptp_lock held. */
qede_ptp_read_cc(const struct cyclecounter * cc)181 static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
182 {
183 struct qede_dev *edev;
184 struct qede_ptp *ptp;
185 u64 phc_cycles;
186 int rc;
187
188 ptp = container_of(cc, struct qede_ptp, cc);
189 edev = ptp->edev;
190 rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
191 if (rc)
192 WARN_ONCE(1, "PHC read err %d\n", rc);
193
194 DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
195
196 return phc_cycles;
197 }
198
qede_ptp_cfg_filters(struct qede_dev * edev)199 static int qede_ptp_cfg_filters(struct qede_dev *edev)
200 {
201 enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
202 enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
203 struct qede_ptp *ptp = edev->ptp;
204
205 if (!ptp)
206 return -EIO;
207
208 if (!ptp->hw_ts_ioctl_called) {
209 DP_INFO(edev, "TS IOCTL not called\n");
210 return 0;
211 }
212
213 switch (ptp->tx_type) {
214 case HWTSTAMP_TX_ON:
215 set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
216 tx_type = QED_PTP_HWTSTAMP_TX_ON;
217 break;
218
219 case HWTSTAMP_TX_OFF:
220 clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
221 tx_type = QED_PTP_HWTSTAMP_TX_OFF;
222 break;
223
224 case HWTSTAMP_TX_ONESTEP_SYNC:
225 case HWTSTAMP_TX_ONESTEP_P2P:
226 DP_ERR(edev, "One-step timestamping is not supported\n");
227 return -ERANGE;
228 }
229
230 spin_lock_bh(&ptp->lock);
231 switch (ptp->rx_filter) {
232 case HWTSTAMP_FILTER_NONE:
233 rx_filter = QED_PTP_FILTER_NONE;
234 break;
235 case HWTSTAMP_FILTER_ALL:
236 case HWTSTAMP_FILTER_SOME:
237 case HWTSTAMP_FILTER_NTP_ALL:
238 ptp->rx_filter = HWTSTAMP_FILTER_NONE;
239 rx_filter = QED_PTP_FILTER_ALL;
240 break;
241 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
242 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
243 rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
244 break;
245 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
246 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
247 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
248 /* Initialize PTP detection for UDP/IPv4 events */
249 rx_filter = QED_PTP_FILTER_V1_L4_GEN;
250 break;
251 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
252 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
253 rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
254 break;
255 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
256 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
257 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
258 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
259 rx_filter = QED_PTP_FILTER_V2_L4_GEN;
260 break;
261 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
262 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
263 rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
264 break;
265 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
266 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
267 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
268 /* Initialize PTP detection L2 events */
269 rx_filter = QED_PTP_FILTER_V2_L2_GEN;
270 break;
271 case HWTSTAMP_FILTER_PTP_V2_EVENT:
272 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
273 rx_filter = QED_PTP_FILTER_V2_EVENT;
274 break;
275 case HWTSTAMP_FILTER_PTP_V2_SYNC:
276 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
277 ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
278 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
279 rx_filter = QED_PTP_FILTER_V2_GEN;
280 break;
281 }
282
283 ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
284
285 spin_unlock_bh(&ptp->lock);
286
287 return 0;
288 }
289
qede_ptp_hw_ts(struct qede_dev * edev,struct ifreq * ifr)290 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
291 {
292 struct hwtstamp_config config;
293 struct qede_ptp *ptp;
294 int rc;
295
296 ptp = edev->ptp;
297 if (!ptp)
298 return -EIO;
299
300 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
301 return -EFAULT;
302
303 DP_VERBOSE(edev, QED_MSG_DEBUG,
304 "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
305 config.tx_type, config.rx_filter);
306
307 if (config.flags) {
308 DP_ERR(edev, "config.flags is reserved for future use\n");
309 return -EINVAL;
310 }
311
312 ptp->hw_ts_ioctl_called = 1;
313 ptp->tx_type = config.tx_type;
314 ptp->rx_filter = config.rx_filter;
315
316 rc = qede_ptp_cfg_filters(edev);
317 if (rc)
318 return rc;
319
320 config.rx_filter = ptp->rx_filter;
321
322 return copy_to_user(ifr->ifr_data, &config,
323 sizeof(config)) ? -EFAULT : 0;
324 }
325
qede_ptp_get_ts_info(struct qede_dev * edev,struct ethtool_ts_info * info)326 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
327 {
328 struct qede_ptp *ptp = edev->ptp;
329
330 if (!ptp) {
331 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
332 SOF_TIMESTAMPING_RX_SOFTWARE |
333 SOF_TIMESTAMPING_SOFTWARE;
334 info->phc_index = -1;
335
336 return 0;
337 }
338
339 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
340 SOF_TIMESTAMPING_RX_SOFTWARE |
341 SOF_TIMESTAMPING_SOFTWARE |
342 SOF_TIMESTAMPING_TX_HARDWARE |
343 SOF_TIMESTAMPING_RX_HARDWARE |
344 SOF_TIMESTAMPING_RAW_HARDWARE;
345
346 if (ptp->clock)
347 info->phc_index = ptp_clock_index(ptp->clock);
348 else
349 info->phc_index = -1;
350
351 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
352 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
353 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
354 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
355 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
356 BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
357 BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
358 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
359 BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
360 BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
361 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
362 BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
363 BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
364
365 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
366
367 return 0;
368 }
369
qede_ptp_disable(struct qede_dev * edev)370 void qede_ptp_disable(struct qede_dev *edev)
371 {
372 struct qede_ptp *ptp;
373
374 ptp = edev->ptp;
375 if (!ptp)
376 return;
377
378 if (ptp->clock) {
379 ptp_clock_unregister(ptp->clock);
380 ptp->clock = NULL;
381 }
382
383 /* Cancel PTP work queue. Should be done after the Tx queues are
384 * drained to prevent additional scheduling.
385 */
386 cancel_work_sync(&ptp->work);
387 if (ptp->tx_skb) {
388 dev_kfree_skb_any(ptp->tx_skb);
389 ptp->tx_skb = NULL;
390 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
391 }
392
393 /* Disable PTP in HW */
394 spin_lock_bh(&ptp->lock);
395 ptp->ops->disable(edev->cdev);
396 spin_unlock_bh(&ptp->lock);
397
398 kfree(ptp);
399 edev->ptp = NULL;
400 }
401
qede_ptp_init(struct qede_dev * edev)402 static int qede_ptp_init(struct qede_dev *edev)
403 {
404 struct qede_ptp *ptp;
405 int rc;
406
407 ptp = edev->ptp;
408 if (!ptp)
409 return -EINVAL;
410
411 spin_lock_init(&ptp->lock);
412
413 /* Configure PTP in HW */
414 rc = ptp->ops->enable(edev->cdev);
415 if (rc) {
416 DP_INFO(edev, "PTP HW enable failed\n");
417 return rc;
418 }
419
420 /* Init work queue for Tx timestamping */
421 INIT_WORK(&ptp->work, qede_ptp_task);
422
423 /* Init cyclecounter and timecounter */
424 memset(&ptp->cc, 0, sizeof(ptp->cc));
425 ptp->cc.read = qede_ptp_read_cc;
426 ptp->cc.mask = CYCLECOUNTER_MASK(64);
427 ptp->cc.shift = 0;
428 ptp->cc.mult = 1;
429
430 timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
431
432 return 0;
433 }
434
qede_ptp_enable(struct qede_dev * edev)435 int qede_ptp_enable(struct qede_dev *edev)
436 {
437 struct qede_ptp *ptp;
438 int rc;
439
440 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
441 if (!ptp) {
442 DP_INFO(edev, "Failed to allocate struct for PTP\n");
443 return -ENOMEM;
444 }
445
446 ptp->edev = edev;
447 ptp->ops = edev->ops->ptp;
448 if (!ptp->ops) {
449 DP_INFO(edev, "PTP enable failed\n");
450 rc = -EIO;
451 goto err1;
452 }
453
454 edev->ptp = ptp;
455
456 rc = qede_ptp_init(edev);
457 if (rc)
458 goto err1;
459
460 qede_ptp_cfg_filters(edev);
461
462 /* Fill the ptp_clock_info struct and register PTP clock */
463 ptp->clock_info.owner = THIS_MODULE;
464 snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
465 ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
466 ptp->clock_info.n_alarm = 0;
467 ptp->clock_info.n_ext_ts = 0;
468 ptp->clock_info.n_per_out = 0;
469 ptp->clock_info.pps = 0;
470 ptp->clock_info.adjfreq = qede_ptp_adjfreq;
471 ptp->clock_info.adjtime = qede_ptp_adjtime;
472 ptp->clock_info.gettime64 = qede_ptp_gettime;
473 ptp->clock_info.settime64 = qede_ptp_settime;
474 ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
475
476 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
477 if (IS_ERR(ptp->clock)) {
478 DP_ERR(edev, "PTP clock registration failed\n");
479 qede_ptp_disable(edev);
480 rc = -EINVAL;
481 goto err2;
482 }
483
484 return 0;
485
486 err1:
487 kfree(ptp);
488 err2:
489 edev->ptp = NULL;
490
491 return rc;
492 }
493
qede_ptp_tx_ts(struct qede_dev * edev,struct sk_buff * skb)494 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
495 {
496 struct qede_ptp *ptp;
497
498 ptp = edev->ptp;
499 if (!ptp)
500 return;
501
502 if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
503 &edev->flags)) {
504 DP_ERR(edev, "Timestamping in progress\n");
505 edev->ptp_skip_txts++;
506 return;
507 }
508
509 if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
510 DP_ERR(edev,
511 "Tx timestamping was not enabled, this packet will not be timestamped\n");
512 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
513 edev->ptp_skip_txts++;
514 } else if (unlikely(ptp->tx_skb)) {
515 DP_ERR(edev,
516 "The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
517 clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
518 edev->ptp_skip_txts++;
519 } else {
520 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
521 /* schedule check for Tx timestamp */
522 ptp->tx_skb = skb_get(skb);
523 ptp->ptp_tx_start = jiffies;
524 schedule_work(&ptp->work);
525 }
526 }
527
qede_ptp_rx_ts(struct qede_dev * edev,struct sk_buff * skb)528 void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
529 {
530 struct qede_ptp *ptp;
531 u64 timestamp, ns;
532 int rc;
533
534 ptp = edev->ptp;
535 if (!ptp)
536 return;
537
538 spin_lock_bh(&ptp->lock);
539 rc = ptp->ops->read_rx_ts(edev->cdev, ×tamp);
540 if (rc) {
541 spin_unlock_bh(&ptp->lock);
542 DP_INFO(edev, "Invalid Rx timestamp\n");
543 return;
544 }
545
546 ns = timecounter_cyc2time(&ptp->tc, timestamp);
547 spin_unlock_bh(&ptp->lock);
548 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
549 DP_VERBOSE(edev, QED_MSG_DEBUG,
550 "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
551 timestamp, ns);
552 }
553