1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2021 Pensando Systems, Inc */
3
4 #include <linux/netdevice.h>
5 #include <linux/etherdevice.h>
6
7 #include "ionic.h"
8 #include "ionic_bus.h"
9 #include "ionic_lif.h"
10 #include "ionic_ethtool.h"
11
ionic_hwstamp_tx_mode(int config_tx_type)12 static int ionic_hwstamp_tx_mode(int config_tx_type)
13 {
14 switch (config_tx_type) {
15 case HWTSTAMP_TX_OFF:
16 return IONIC_TXSTAMP_OFF;
17 case HWTSTAMP_TX_ON:
18 return IONIC_TXSTAMP_ON;
19 case HWTSTAMP_TX_ONESTEP_SYNC:
20 return IONIC_TXSTAMP_ONESTEP_SYNC;
21 case HWTSTAMP_TX_ONESTEP_P2P:
22 return IONIC_TXSTAMP_ONESTEP_P2P;
23 default:
24 return -ERANGE;
25 }
26 }
27
ionic_hwstamp_rx_filt(int config_rx_filter)28 static u64 ionic_hwstamp_rx_filt(int config_rx_filter)
29 {
30 switch (config_rx_filter) {
31 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
32 return IONIC_PKT_CLS_PTP1_ALL;
33 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
34 return IONIC_PKT_CLS_PTP1_SYNC;
35 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
36 return IONIC_PKT_CLS_PTP1_SYNC | IONIC_PKT_CLS_PTP1_DREQ;
37
38 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
39 return IONIC_PKT_CLS_PTP2_L4_ALL;
40 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
41 return IONIC_PKT_CLS_PTP2_L4_SYNC;
42 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
43 return IONIC_PKT_CLS_PTP2_L4_SYNC | IONIC_PKT_CLS_PTP2_L4_DREQ;
44
45 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
46 return IONIC_PKT_CLS_PTP2_L2_ALL;
47 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
48 return IONIC_PKT_CLS_PTP2_L2_SYNC;
49 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
50 return IONIC_PKT_CLS_PTP2_L2_SYNC | IONIC_PKT_CLS_PTP2_L2_DREQ;
51
52 case HWTSTAMP_FILTER_PTP_V2_EVENT:
53 return IONIC_PKT_CLS_PTP2_ALL;
54 case HWTSTAMP_FILTER_PTP_V2_SYNC:
55 return IONIC_PKT_CLS_PTP2_SYNC;
56 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
57 return IONIC_PKT_CLS_PTP2_SYNC | IONIC_PKT_CLS_PTP2_DREQ;
58
59 case HWTSTAMP_FILTER_NTP_ALL:
60 return IONIC_PKT_CLS_NTP_ALL;
61
62 default:
63 return 0;
64 }
65 }
66
ionic_lif_hwstamp_set_ts_config(struct ionic_lif * lif,struct hwtstamp_config * new_ts)67 static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif,
68 struct hwtstamp_config *new_ts)
69 {
70 struct ionic *ionic = lif->ionic;
71 struct hwtstamp_config *config;
72 struct hwtstamp_config ts;
73 int tx_mode = 0;
74 u64 rx_filt = 0;
75 int err, err2;
76 bool rx_all;
77 __le64 mask;
78
79 if (!lif->phc || !lif->phc->ptp)
80 return -EOPNOTSUPP;
81
82 mutex_lock(&lif->phc->config_lock);
83
84 if (new_ts) {
85 config = new_ts;
86 } else {
87 /* If called with new_ts == NULL, replay the previous request
88 * primarily for recovery after a FW_RESET.
89 * We saved the previous configuration request info, so copy
90 * the previous request for reference, clear the current state
91 * to match the device's reset state, and run with it.
92 */
93 config = &ts;
94 memcpy(config, &lif->phc->ts_config, sizeof(*config));
95 memset(&lif->phc->ts_config, 0, sizeof(lif->phc->ts_config));
96 lif->phc->ts_config_tx_mode = 0;
97 lif->phc->ts_config_rx_filt = 0;
98 }
99
100 tx_mode = ionic_hwstamp_tx_mode(config->tx_type);
101 if (tx_mode < 0) {
102 err = tx_mode;
103 goto err_queues;
104 }
105
106 mask = cpu_to_le64(BIT_ULL(tx_mode));
107 if ((ionic->ident.lif.eth.hwstamp_tx_modes & mask) != mask) {
108 err = -ERANGE;
109 goto err_queues;
110 }
111
112 rx_filt = ionic_hwstamp_rx_filt(config->rx_filter);
113 rx_all = config->rx_filter != HWTSTAMP_FILTER_NONE && !rx_filt;
114
115 mask = cpu_to_le64(rx_filt);
116 if ((ionic->ident.lif.eth.hwstamp_rx_filters & mask) != mask) {
117 rx_filt = 0;
118 rx_all = true;
119 config->rx_filter = HWTSTAMP_FILTER_ALL;
120 }
121
122 dev_dbg(ionic->dev, "%s: config_rx_filter %d rx_filt %#llx rx_all %d\n",
123 __func__, config->rx_filter, rx_filt, rx_all);
124
125 if (tx_mode) {
126 err = ionic_lif_create_hwstamp_txq(lif);
127 if (err)
128 goto err_queues;
129 }
130
131 if (rx_filt) {
132 err = ionic_lif_create_hwstamp_rxq(lif);
133 if (err)
134 goto err_queues;
135 }
136
137 if (tx_mode != lif->phc->ts_config_tx_mode) {
138 err = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
139 if (err)
140 goto err_txmode;
141 }
142
143 if (rx_filt != lif->phc->ts_config_rx_filt) {
144 err = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
145 if (err)
146 goto err_rxfilt;
147 }
148
149 if (rx_all != (lif->phc->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)) {
150 err = ionic_lif_config_hwstamp_rxq_all(lif, rx_all);
151 if (err)
152 goto err_rxall;
153 }
154
155 memcpy(&lif->phc->ts_config, config, sizeof(*config));
156 lif->phc->ts_config_rx_filt = rx_filt;
157 lif->phc->ts_config_tx_mode = tx_mode;
158
159 mutex_unlock(&lif->phc->config_lock);
160
161 return 0;
162
163 err_rxall:
164 if (rx_filt != lif->phc->ts_config_rx_filt) {
165 rx_filt = lif->phc->ts_config_rx_filt;
166 err2 = ionic_lif_set_hwstamp_rxfilt(lif, rx_filt);
167 if (err2)
168 dev_err(ionic->dev,
169 "Failed to revert rx timestamp filter: %d\n", err2);
170 }
171 err_rxfilt:
172 if (tx_mode != lif->phc->ts_config_tx_mode) {
173 tx_mode = lif->phc->ts_config_tx_mode;
174 err2 = ionic_lif_set_hwstamp_txmode(lif, tx_mode);
175 if (err2)
176 dev_err(ionic->dev,
177 "Failed to revert tx timestamp mode: %d\n", err2);
178 }
179 err_txmode:
180 /* special queues remain allocated, just unused */
181 err_queues:
182 mutex_unlock(&lif->phc->config_lock);
183 return err;
184 }
185
ionic_lif_hwstamp_set(struct ionic_lif * lif,struct ifreq * ifr)186 int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr)
187 {
188 struct hwtstamp_config config;
189 int err;
190
191 if (!lif->phc || !lif->phc->ptp)
192 return -EOPNOTSUPP;
193
194 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
195 return -EFAULT;
196
197 mutex_lock(&lif->queue_lock);
198 err = ionic_lif_hwstamp_set_ts_config(lif, &config);
199 mutex_unlock(&lif->queue_lock);
200 if (err) {
201 netdev_info(lif->netdev, "hwstamp set failed: %d\n", err);
202 return err;
203 }
204
205 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
206 return -EFAULT;
207
208 return 0;
209 }
210
ionic_lif_hwstamp_replay(struct ionic_lif * lif)211 void ionic_lif_hwstamp_replay(struct ionic_lif *lif)
212 {
213 int err;
214
215 if (!lif->phc || !lif->phc->ptp)
216 return;
217
218 mutex_lock(&lif->queue_lock);
219 err = ionic_lif_hwstamp_set_ts_config(lif, NULL);
220 mutex_unlock(&lif->queue_lock);
221 if (err)
222 netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err);
223 }
224
ionic_lif_hwstamp_recreate_queues(struct ionic_lif * lif)225 void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif)
226 {
227 int err;
228
229 if (!lif->phc || !lif->phc->ptp)
230 return;
231
232 mutex_lock(&lif->phc->config_lock);
233
234 if (lif->phc->ts_config_tx_mode) {
235 err = ionic_lif_create_hwstamp_txq(lif);
236 if (err)
237 netdev_info(lif->netdev, "hwstamp recreate txq failed: %d\n", err);
238 }
239
240 if (lif->phc->ts_config_rx_filt) {
241 err = ionic_lif_create_hwstamp_rxq(lif);
242 if (err)
243 netdev_info(lif->netdev, "hwstamp recreate rxq failed: %d\n", err);
244 }
245
246 mutex_unlock(&lif->phc->config_lock);
247 }
248
ionic_lif_hwstamp_get(struct ionic_lif * lif,struct ifreq * ifr)249 int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr)
250 {
251 struct hwtstamp_config config;
252
253 if (!lif->phc || !lif->phc->ptp)
254 return -EOPNOTSUPP;
255
256 mutex_lock(&lif->phc->config_lock);
257 memcpy(&config, &lif->phc->ts_config, sizeof(config));
258 mutex_unlock(&lif->phc->config_lock);
259
260 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
261 return -EFAULT;
262 return 0;
263 }
264
ionic_hwstamp_read(struct ionic * ionic,struct ptp_system_timestamp * sts)265 static u64 ionic_hwstamp_read(struct ionic *ionic,
266 struct ptp_system_timestamp *sts)
267 {
268 u32 tick_high_before, tick_high, tick_low;
269
270 /* read and discard low part to defeat hw staging of high part */
271 (void)ioread32(&ionic->idev.hwstamp_regs->tick_low);
272
273 tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high);
274
275 ptp_read_system_prets(sts);
276 tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low);
277 ptp_read_system_postts(sts);
278
279 tick_high = ioread32(&ionic->idev.hwstamp_regs->tick_high);
280
281 /* If tick_high changed, re-read tick_low once more. Assume tick_high
282 * cannot change again so soon as in the span of re-reading tick_low.
283 */
284 if (tick_high != tick_high_before) {
285 ptp_read_system_prets(sts);
286 tick_low = ioread32(&ionic->idev.hwstamp_regs->tick_low);
287 ptp_read_system_postts(sts);
288 }
289
290 return (u64)tick_low | ((u64)tick_high << 32);
291 }
292
ionic_cc_read(const struct cyclecounter * cc)293 static u64 ionic_cc_read(const struct cyclecounter *cc)
294 {
295 struct ionic_phc *phc = container_of(cc, struct ionic_phc, cc);
296 struct ionic *ionic = phc->lif->ionic;
297
298 return ionic_hwstamp_read(ionic, NULL);
299 }
300
ionic_setphc_cmd(struct ionic_phc * phc,struct ionic_admin_ctx * ctx)301 static int ionic_setphc_cmd(struct ionic_phc *phc, struct ionic_admin_ctx *ctx)
302 {
303 ctx->work = COMPLETION_INITIALIZER_ONSTACK(ctx->work);
304
305 ctx->cmd.lif_setphc.opcode = IONIC_CMD_LIF_SETPHC;
306 ctx->cmd.lif_setphc.lif_index = cpu_to_le16(phc->lif->index);
307
308 ctx->cmd.lif_setphc.tick = cpu_to_le64(phc->tc.cycle_last);
309 ctx->cmd.lif_setphc.nsec = cpu_to_le64(phc->tc.nsec);
310 ctx->cmd.lif_setphc.frac = cpu_to_le64(phc->tc.frac);
311 ctx->cmd.lif_setphc.mult = cpu_to_le32(phc->cc.mult);
312 ctx->cmd.lif_setphc.shift = cpu_to_le32(phc->cc.shift);
313
314 return ionic_adminq_post(phc->lif, ctx);
315 }
316
ionic_phc_adjfine(struct ptp_clock_info * info,long scaled_ppm)317 static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
318 {
319 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
320 struct ionic_admin_ctx ctx = {};
321 unsigned long irqflags;
322 s64 adj;
323 int err;
324
325 /* Reject phc adjustments during device upgrade */
326 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
327 return -EBUSY;
328
329 /* Adjustment value scaled by 2^16 million */
330 adj = (s64)scaled_ppm * phc->init_cc_mult;
331
332 /* Adjustment value to scale */
333 adj /= (s64)SCALED_PPM;
334
335 /* Final adjusted multiplier */
336 adj += phc->init_cc_mult;
337
338 spin_lock_irqsave(&phc->lock, irqflags);
339
340 /* update the point-in-time basis to now, before adjusting the rate */
341 timecounter_read(&phc->tc);
342 phc->cc.mult = adj;
343
344 /* Setphc commands are posted in-order, sequenced by phc->lock. We
345 * need to drop the lock before waiting for the command to complete.
346 */
347 err = ionic_setphc_cmd(phc, &ctx);
348
349 spin_unlock_irqrestore(&phc->lock, irqflags);
350
351 return ionic_adminq_wait(phc->lif, &ctx, err, true);
352 }
353
ionic_phc_adjtime(struct ptp_clock_info * info,s64 delta)354 static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
355 {
356 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
357 struct ionic_admin_ctx ctx = {};
358 unsigned long irqflags;
359 int err;
360
361 /* Reject phc adjustments during device upgrade */
362 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
363 return -EBUSY;
364
365 spin_lock_irqsave(&phc->lock, irqflags);
366
367 timecounter_adjtime(&phc->tc, delta);
368
369 /* Setphc commands are posted in-order, sequenced by phc->lock. We
370 * need to drop the lock before waiting for the command to complete.
371 */
372 err = ionic_setphc_cmd(phc, &ctx);
373
374 spin_unlock_irqrestore(&phc->lock, irqflags);
375
376 return ionic_adminq_wait(phc->lif, &ctx, err, true);
377 }
378
ionic_phc_settime64(struct ptp_clock_info * info,const struct timespec64 * ts)379 static int ionic_phc_settime64(struct ptp_clock_info *info,
380 const struct timespec64 *ts)
381 {
382 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
383 struct ionic_admin_ctx ctx = {};
384 unsigned long irqflags;
385 int err;
386 u64 ns;
387
388 /* Reject phc adjustments during device upgrade */
389 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
390 return -EBUSY;
391
392 ns = timespec64_to_ns(ts);
393
394 spin_lock_irqsave(&phc->lock, irqflags);
395
396 timecounter_init(&phc->tc, &phc->cc, ns);
397
398 /* Setphc commands are posted in-order, sequenced by phc->lock. We
399 * need to drop the lock before waiting for the command to complete.
400 */
401 err = ionic_setphc_cmd(phc, &ctx);
402
403 spin_unlock_irqrestore(&phc->lock, irqflags);
404
405 return ionic_adminq_wait(phc->lif, &ctx, err, true);
406 }
407
ionic_phc_gettimex64(struct ptp_clock_info * info,struct timespec64 * ts,struct ptp_system_timestamp * sts)408 static int ionic_phc_gettimex64(struct ptp_clock_info *info,
409 struct timespec64 *ts,
410 struct ptp_system_timestamp *sts)
411 {
412 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
413 struct ionic *ionic = phc->lif->ionic;
414 unsigned long irqflags;
415 u64 tick, ns;
416
417 /* Do not attempt to read device time during upgrade */
418 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
419 return -EBUSY;
420
421 spin_lock_irqsave(&phc->lock, irqflags);
422
423 tick = ionic_hwstamp_read(ionic, sts);
424
425 ns = timecounter_cyc2time(&phc->tc, tick);
426
427 spin_unlock_irqrestore(&phc->lock, irqflags);
428
429 *ts = ns_to_timespec64(ns);
430
431 return 0;
432 }
433
ionic_phc_aux_work(struct ptp_clock_info * info)434 static long ionic_phc_aux_work(struct ptp_clock_info *info)
435 {
436 struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
437 struct ionic_admin_ctx ctx = {};
438 unsigned long irqflags;
439 int err;
440
441 /* Do not update phc during device upgrade, but keep polling to resume
442 * after upgrade. Since we don't update the point in time basis, there
443 * is no expectation that we are maintaining the phc time during the
444 * upgrade. After upgrade, it will need to be readjusted back to the
445 * correct time by the ptp daemon.
446 */
447 if (test_bit(IONIC_LIF_F_FW_RESET, phc->lif->state))
448 return phc->aux_work_delay;
449
450 spin_lock_irqsave(&phc->lock, irqflags);
451
452 /* update point-in-time basis to now */
453 timecounter_read(&phc->tc);
454
455 /* Setphc commands are posted in-order, sequenced by phc->lock. We
456 * need to drop the lock before waiting for the command to complete.
457 */
458 err = ionic_setphc_cmd(phc, &ctx);
459
460 spin_unlock_irqrestore(&phc->lock, irqflags);
461
462 ionic_adminq_wait(phc->lif, &ctx, err, true);
463
464 return phc->aux_work_delay;
465 }
466
ionic_lif_phc_ktime(struct ionic_lif * lif,u64 tick)467 ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 tick)
468 {
469 unsigned long irqflags;
470 u64 ns;
471
472 if (!lif->phc)
473 return 0;
474
475 spin_lock_irqsave(&lif->phc->lock, irqflags);
476 ns = timecounter_cyc2time(&lif->phc->tc, tick);
477 spin_unlock_irqrestore(&lif->phc->lock, irqflags);
478
479 return ns_to_ktime(ns);
480 }
481
482 static const struct ptp_clock_info ionic_ptp_info = {
483 .owner = THIS_MODULE,
484 .name = "ionic_ptp",
485 .adjfine = ionic_phc_adjfine,
486 .adjtime = ionic_phc_adjtime,
487 .gettimex64 = ionic_phc_gettimex64,
488 .settime64 = ionic_phc_settime64,
489 .do_aux_work = ionic_phc_aux_work,
490 };
491
ionic_lif_register_phc(struct ionic_lif * lif)492 void ionic_lif_register_phc(struct ionic_lif *lif)
493 {
494 if (!lif->phc || !(lif->hw_features & IONIC_ETH_HW_TIMESTAMP))
495 return;
496
497 lif->phc->ptp = ptp_clock_register(&lif->phc->ptp_info, lif->ionic->dev);
498
499 if (IS_ERR(lif->phc->ptp)) {
500 dev_warn(lif->ionic->dev, "Cannot register phc device: %ld\n",
501 PTR_ERR(lif->phc->ptp));
502
503 lif->phc->ptp = NULL;
504 }
505
506 if (lif->phc->ptp)
507 ptp_schedule_worker(lif->phc->ptp, lif->phc->aux_work_delay);
508 }
509
ionic_lif_unregister_phc(struct ionic_lif * lif)510 void ionic_lif_unregister_phc(struct ionic_lif *lif)
511 {
512 if (!lif->phc || !lif->phc->ptp)
513 return;
514
515 ptp_clock_unregister(lif->phc->ptp);
516
517 lif->phc->ptp = NULL;
518 }
519
ionic_lif_alloc_phc(struct ionic_lif * lif)520 void ionic_lif_alloc_phc(struct ionic_lif *lif)
521 {
522 struct ionic *ionic = lif->ionic;
523 struct ionic_phc *phc;
524 u64 delay, diff, mult;
525 u64 frac = 0;
526 u64 features;
527 u32 shift;
528
529 if (!ionic->idev.hwstamp_regs)
530 return;
531
532 features = le64_to_cpu(ionic->ident.lif.eth.config.features);
533 if (!(features & IONIC_ETH_HW_TIMESTAMP))
534 return;
535
536 phc = devm_kzalloc(ionic->dev, sizeof(*phc), GFP_KERNEL);
537 if (!phc)
538 return;
539
540 phc->lif = lif;
541
542 phc->cc.read = ionic_cc_read;
543 phc->cc.mask = le64_to_cpu(ionic->ident.dev.hwstamp_mask);
544 phc->cc.mult = le32_to_cpu(ionic->ident.dev.hwstamp_mult);
545 phc->cc.shift = le32_to_cpu(ionic->ident.dev.hwstamp_shift);
546
547 if (!phc->cc.mult) {
548 dev_err(lif->ionic->dev,
549 "Invalid device PHC mask multiplier %u, disabling HW timestamp support\n",
550 phc->cc.mult);
551 devm_kfree(lif->ionic->dev, phc);
552 lif->phc = NULL;
553 return;
554 }
555
556 dev_dbg(lif->ionic->dev, "Device PHC mask %#llx mult %u shift %u\n",
557 phc->cc.mask, phc->cc.mult, phc->cc.shift);
558
559 spin_lock_init(&phc->lock);
560 mutex_init(&phc->config_lock);
561
562 /* max ticks is limited by the multiplier, or by the update period. */
563 if (phc->cc.shift + 2 + ilog2(IONIC_PHC_UPDATE_NS) >= 64) {
564 /* max ticks that do not overflow when multiplied by max
565 * adjusted multiplier (twice the initial multiplier)
566 */
567 diff = U64_MAX / phc->cc.mult / 2;
568 } else {
569 /* approx ticks at four times the update period */
570 diff = (u64)IONIC_PHC_UPDATE_NS << (phc->cc.shift + 2);
571 diff = DIV_ROUND_UP(diff, phc->cc.mult);
572 }
573
574 /* transform to bitmask */
575 diff |= diff >> 1;
576 diff |= diff >> 2;
577 diff |= diff >> 4;
578 diff |= diff >> 8;
579 diff |= diff >> 16;
580 diff |= diff >> 32;
581
582 /* constrain to the hardware bitmask, and use this as the bitmask */
583 diff &= phc->cc.mask;
584 phc->cc.mask = diff;
585
586 /* the wrap period is now defined by diff (or phc->cc.mask)
587 *
588 * we will update the time basis at about 1/4 the wrap period, so
589 * should not see a difference of more than +/- diff/4.
590 *
591 * this is sufficient not see a difference of more than +/- diff/2, as
592 * required by timecounter_cyc2time, to detect an old time stamp.
593 *
594 * adjust the initial multiplier, being careful to avoid overflow:
595 * - do not overflow 63 bits: init_cc_mult * SCALED_PPM
596 * - do not overflow 64 bits: max_mult * (diff / 2)
597 *
598 * we want to increase the initial multiplier as much as possible, to
599 * allow for more precise adjustment in ionic_phc_adjfine.
600 *
601 * only adjust the multiplier if we can double it or more.
602 */
603 mult = U64_MAX / 2 / max(diff / 2, SCALED_PPM);
604 shift = mult / phc->cc.mult;
605 if (shift >= 2) {
606 /* initial multiplier will be 2^n of hardware cc.mult */
607 shift = fls(shift);
608 /* increase cc.mult and cc.shift by the same 2^n and n. */
609 phc->cc.mult <<= shift;
610 phc->cc.shift += shift;
611 }
612
613 dev_dbg(lif->ionic->dev, "Initial PHC mask %#llx mult %u shift %u\n",
614 phc->cc.mask, phc->cc.mult, phc->cc.shift);
615
616 /* frequency adjustments are relative to the initial multiplier */
617 phc->init_cc_mult = phc->cc.mult;
618
619 timecounter_init(&phc->tc, &phc->cc, ktime_get_real_ns());
620
621 /* Update cycle_last at 1/4 the wrap period, or IONIC_PHC_UPDATE_NS */
622 delay = min_t(u64, IONIC_PHC_UPDATE_NS,
623 cyclecounter_cyc2ns(&phc->cc, diff / 4, 0, &frac));
624 dev_dbg(lif->ionic->dev, "Work delay %llu ms\n", delay / NSEC_PER_MSEC);
625
626 phc->aux_work_delay = nsecs_to_jiffies(delay);
627
628 phc->ptp_info = ionic_ptp_info;
629
630 /* We have allowed to adjust the multiplier up to +/- 1 part per 1.
631 * Here expressed as NORMAL_PPB (1 billion parts per billion).
632 */
633 phc->ptp_info.max_adj = NORMAL_PPB;
634
635 lif->phc = phc;
636 }
637
ionic_lif_free_phc(struct ionic_lif * lif)638 void ionic_lif_free_phc(struct ionic_lif *lif)
639 {
640 if (!lif->phc)
641 return;
642
643 mutex_destroy(&lif->phc->config_lock);
644
645 devm_kfree(lif->ionic->dev, lif->phc);
646 lif->phc = NULL;
647 }
648