• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/clocksource.h>
34 #include <linux/highmem.h>
35 #include <linux/ptp_clock_kernel.h>
36 #include <rdma/mlx5-abi.h>
37 #include "lib/eq.h"
38 #include "en.h"
39 #include "clock.h"
40 
41 enum {
42 	MLX5_CYCLES_SHIFT	= 23
43 };
44 
45 enum {
46 	MLX5_PIN_MODE_IN		= 0x0,
47 	MLX5_PIN_MODE_OUT		= 0x1,
48 };
49 
50 enum {
51 	MLX5_OUT_PATTERN_PULSE		= 0x0,
52 	MLX5_OUT_PATTERN_PERIODIC	= 0x1,
53 };
54 
55 enum {
56 	MLX5_EVENT_MODE_DISABLE	= 0x0,
57 	MLX5_EVENT_MODE_REPETETIVE	= 0x1,
58 	MLX5_EVENT_MODE_ONCE_TILL_ARM	= 0x2,
59 };
60 
61 enum {
62 	MLX5_MTPPS_FS_ENABLE			= BIT(0x0),
63 	MLX5_MTPPS_FS_PATTERN			= BIT(0x2),
64 	MLX5_MTPPS_FS_PIN_MODE			= BIT(0x3),
65 	MLX5_MTPPS_FS_TIME_STAMP		= BIT(0x4),
66 	MLX5_MTPPS_FS_OUT_PULSE_DURATION	= BIT(0x5),
67 	MLX5_MTPPS_FS_ENH_OUT_PER_ADJ		= BIT(0x7),
68 };
69 
mlx5_real_time_mode(struct mlx5_core_dev * mdev)70 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
71 {
72 	return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
73 }
74 
mlx5_modify_mtutc_allowed(struct mlx5_core_dev * mdev)75 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
76 {
77 	return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
78 }
79 
mlx5_set_mtutc(struct mlx5_core_dev * dev,u32 * mtutc,u32 size)80 static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
81 {
82 	u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
83 
84 	if (!MLX5_CAP_MCAM_REG(dev, mtutc))
85 		return -EOPNOTSUPP;
86 
87 	return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
88 				    MLX5_REG_MTUTC, 0, 1);
89 }
90 
mlx5_read_time(struct mlx5_core_dev * dev,struct ptp_system_timestamp * sts,bool real_time)91 static u64 mlx5_read_time(struct mlx5_core_dev *dev,
92 			  struct ptp_system_timestamp *sts,
93 			  bool real_time)
94 {
95 	u32 timer_h, timer_h1, timer_l;
96 
97 	timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
98 			     &dev->iseg->internal_timer_h);
99 	ptp_read_system_prets(sts);
100 	timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
101 			     &dev->iseg->internal_timer_l);
102 	ptp_read_system_postts(sts);
103 	timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
104 			      &dev->iseg->internal_timer_h);
105 	if (timer_h != timer_h1) {
106 		/* wrap around */
107 		ptp_read_system_prets(sts);
108 		timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
109 				     &dev->iseg->internal_timer_l);
110 		ptp_read_system_postts(sts);
111 	}
112 
113 	return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
114 			   (u64)timer_l | (u64)timer_h1 << 32;
115 }
116 
read_internal_timer(const struct cyclecounter * cc)117 static u64 read_internal_timer(const struct cyclecounter *cc)
118 {
119 	struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
120 	struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
121 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
122 						  clock);
123 
124 	return mlx5_read_time(mdev, NULL, false) & cc->mask;
125 }
126 
mlx5_update_clock_info_page(struct mlx5_core_dev * mdev)127 static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
128 {
129 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
130 	struct mlx5_clock *clock = &mdev->clock;
131 	struct mlx5_timer *timer;
132 	u32 sign;
133 
134 	if (!clock_info)
135 		return;
136 
137 	sign = smp_load_acquire(&clock_info->sign);
138 	smp_store_mb(clock_info->sign,
139 		     sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
140 
141 	timer = &clock->timer;
142 	clock_info->cycles = timer->tc.cycle_last;
143 	clock_info->mult   = timer->cycles.mult;
144 	clock_info->nsec   = timer->tc.nsec;
145 	clock_info->frac   = timer->tc.frac;
146 
147 	smp_store_release(&clock_info->sign,
148 			  sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
149 }
150 
mlx5_pps_out(struct work_struct * work)151 static void mlx5_pps_out(struct work_struct *work)
152 {
153 	struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
154 						 out_work);
155 	struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
156 						pps_info);
157 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
158 						  clock);
159 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
160 	unsigned long flags;
161 	int i;
162 
163 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
164 		u64 tstart;
165 
166 		write_seqlock_irqsave(&clock->lock, flags);
167 		tstart = clock->pps_info.start[i];
168 		clock->pps_info.start[i] = 0;
169 		write_sequnlock_irqrestore(&clock->lock, flags);
170 		if (!tstart)
171 			continue;
172 
173 		MLX5_SET(mtpps_reg, in, pin, i);
174 		MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
175 		MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
176 		mlx5_set_mtpps(mdev, in, sizeof(in));
177 	}
178 }
179 
mlx5_timestamp_overflow(struct work_struct * work)180 static void mlx5_timestamp_overflow(struct work_struct *work)
181 {
182 	struct delayed_work *dwork = to_delayed_work(work);
183 	struct mlx5_core_dev *mdev;
184 	struct mlx5_timer *timer;
185 	struct mlx5_clock *clock;
186 	unsigned long flags;
187 
188 	timer = container_of(dwork, struct mlx5_timer, overflow_work);
189 	clock = container_of(timer, struct mlx5_clock, timer);
190 	mdev = container_of(clock, struct mlx5_core_dev, clock);
191 
192 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
193 		goto out;
194 
195 	write_seqlock_irqsave(&clock->lock, flags);
196 	timecounter_read(&timer->tc);
197 	mlx5_update_clock_info_page(mdev);
198 	write_sequnlock_irqrestore(&clock->lock, flags);
199 
200 out:
201 	schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
202 }
203 
mlx5_ptp_settime_real_time(struct mlx5_core_dev * mdev,const struct timespec64 * ts)204 static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
205 				      const struct timespec64 *ts)
206 {
207 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
208 
209 	if (!mlx5_modify_mtutc_allowed(mdev))
210 		return 0;
211 
212 	if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
213 	    ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
214 		return -EINVAL;
215 
216 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
217 	MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
218 	MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
219 
220 	return mlx5_set_mtutc(mdev, in, sizeof(in));
221 }
222 
mlx5_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)223 static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
224 {
225 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
226 	struct mlx5_timer *timer = &clock->timer;
227 	struct mlx5_core_dev *mdev;
228 	unsigned long flags;
229 	int err;
230 
231 	mdev = container_of(clock, struct mlx5_core_dev, clock);
232 	err = mlx5_ptp_settime_real_time(mdev, ts);
233 	if (err)
234 		return err;
235 
236 	write_seqlock_irqsave(&clock->lock, flags);
237 	timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
238 	mlx5_update_clock_info_page(mdev);
239 	write_sequnlock_irqrestore(&clock->lock, flags);
240 
241 	return 0;
242 }
243 
244 static
mlx5_ptp_gettimex_real_time(struct mlx5_core_dev * mdev,struct ptp_system_timestamp * sts)245 struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
246 					      struct ptp_system_timestamp *sts)
247 {
248 	struct timespec64 ts;
249 	u64 time;
250 
251 	time = mlx5_read_time(mdev, sts, true);
252 	ts = ns_to_timespec64(time);
253 	return ts;
254 }
255 
mlx5_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)256 static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
257 			     struct ptp_system_timestamp *sts)
258 {
259 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
260 	struct mlx5_timer *timer = &clock->timer;
261 	struct mlx5_core_dev *mdev;
262 	unsigned long flags;
263 	u64 cycles, ns;
264 
265 	mdev = container_of(clock, struct mlx5_core_dev, clock);
266 	if (mlx5_real_time_mode(mdev)) {
267 		*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
268 		goto out;
269 	}
270 
271 	write_seqlock_irqsave(&clock->lock, flags);
272 	cycles = mlx5_read_time(mdev, sts, false);
273 	ns = timecounter_cyc2time(&timer->tc, cycles);
274 	write_sequnlock_irqrestore(&clock->lock, flags);
275 	*ts = ns_to_timespec64(ns);
276 out:
277 	return 0;
278 }
279 
mlx5_ptp_adjtime_real_time(struct mlx5_core_dev * mdev,s64 delta)280 static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
281 {
282 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
283 
284 	if (!mlx5_modify_mtutc_allowed(mdev))
285 		return 0;
286 
287 	/* HW time adjustment range is s16. If out of range, settime instead */
288 	if (delta < S16_MIN || delta > S16_MAX) {
289 		struct timespec64 ts;
290 		s64 ns;
291 
292 		ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
293 		ns = timespec64_to_ns(&ts) + delta;
294 		ts = ns_to_timespec64(ns);
295 		return mlx5_ptp_settime_real_time(mdev, &ts);
296 	}
297 
298 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
299 	MLX5_SET(mtutc_reg, in, time_adjustment, delta);
300 
301 	return mlx5_set_mtutc(mdev, in, sizeof(in));
302 }
303 
mlx5_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)304 static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
305 {
306 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
307 	struct mlx5_timer *timer = &clock->timer;
308 	struct mlx5_core_dev *mdev;
309 	unsigned long flags;
310 	int err;
311 
312 	mdev = container_of(clock, struct mlx5_core_dev, clock);
313 
314 	err = mlx5_ptp_adjtime_real_time(mdev, delta);
315 	if (err)
316 		return err;
317 	write_seqlock_irqsave(&clock->lock, flags);
318 	timecounter_adjtime(&timer->tc, delta);
319 	mlx5_update_clock_info_page(mdev);
320 	write_sequnlock_irqrestore(&clock->lock, flags);
321 
322 	return 0;
323 }
324 
mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev * mdev,s32 freq)325 static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
326 {
327 	u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
328 
329 	if (!mlx5_modify_mtutc_allowed(mdev))
330 		return 0;
331 
332 	MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
333 	MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
334 
335 	return mlx5_set_mtutc(mdev, in, sizeof(in));
336 }
337 
mlx5_ptp_adjfreq(struct ptp_clock_info * ptp,s32 delta)338 static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
339 {
340 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
341 	struct mlx5_timer *timer = &clock->timer;
342 	struct mlx5_core_dev *mdev;
343 	unsigned long flags;
344 	int neg_adj = 0;
345 	u32 diff;
346 	u64 adj;
347 	int err;
348 
349 	mdev = container_of(clock, struct mlx5_core_dev, clock);
350 	err = mlx5_ptp_adjfreq_real_time(mdev, delta);
351 	if (err)
352 		return err;
353 
354 	if (delta < 0) {
355 		neg_adj = 1;
356 		delta = -delta;
357 	}
358 
359 	adj = timer->nominal_c_mult;
360 	adj *= delta;
361 	diff = div_u64(adj, 1000000000ULL);
362 
363 	write_seqlock_irqsave(&clock->lock, flags);
364 	timecounter_read(&timer->tc);
365 	timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
366 				       timer->nominal_c_mult + diff;
367 	mlx5_update_clock_info_page(mdev);
368 	write_sequnlock_irqrestore(&clock->lock, flags);
369 
370 	return 0;
371 }
372 
mlx5_extts_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)373 static int mlx5_extts_configure(struct ptp_clock_info *ptp,
374 				struct ptp_clock_request *rq,
375 				int on)
376 {
377 	struct mlx5_clock *clock =
378 			container_of(ptp, struct mlx5_clock, ptp_info);
379 	struct mlx5_core_dev *mdev =
380 			container_of(clock, struct mlx5_core_dev, clock);
381 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
382 	u32 field_select = 0;
383 	u8 pin_mode = 0;
384 	u8 pattern = 0;
385 	int pin = -1;
386 	int err = 0;
387 
388 	if (!MLX5_PPS_CAP(mdev))
389 		return -EOPNOTSUPP;
390 
391 	/* Reject requests with unsupported flags */
392 	if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
393 				PTP_RISING_EDGE |
394 				PTP_FALLING_EDGE |
395 				PTP_STRICT_FLAGS))
396 		return -EOPNOTSUPP;
397 
398 	/* Reject requests to enable time stamping on both edges. */
399 	if ((rq->extts.flags & PTP_STRICT_FLAGS) &&
400 	    (rq->extts.flags & PTP_ENABLE_FEATURE) &&
401 	    (rq->extts.flags & PTP_EXTTS_EDGES) == PTP_EXTTS_EDGES)
402 		return -EOPNOTSUPP;
403 
404 	if (rq->extts.index >= clock->ptp_info.n_pins)
405 		return -EINVAL;
406 
407 	pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
408 	if (pin < 0)
409 		return -EBUSY;
410 
411 	if (on) {
412 		pin_mode = MLX5_PIN_MODE_IN;
413 		pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
414 		field_select = MLX5_MTPPS_FS_PIN_MODE |
415 			       MLX5_MTPPS_FS_PATTERN |
416 			       MLX5_MTPPS_FS_ENABLE;
417 	} else {
418 		field_select = MLX5_MTPPS_FS_ENABLE;
419 	}
420 
421 	MLX5_SET(mtpps_reg, in, pin, pin);
422 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
423 	MLX5_SET(mtpps_reg, in, pattern, pattern);
424 	MLX5_SET(mtpps_reg, in, enable, on);
425 	MLX5_SET(mtpps_reg, in, field_select, field_select);
426 
427 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
428 	if (err)
429 		return err;
430 
431 	return mlx5_set_mtppse(mdev, pin, 0,
432 			       MLX5_EVENT_MODE_REPETETIVE & on);
433 }
434 
find_target_cycles(struct mlx5_core_dev * mdev,s64 target_ns)435 static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
436 {
437 	struct mlx5_clock *clock = &mdev->clock;
438 	u64 cycles_now, cycles_delta;
439 	u64 nsec_now, nsec_delta;
440 	struct mlx5_timer *timer;
441 	unsigned long flags;
442 
443 	timer = &clock->timer;
444 
445 	cycles_now = mlx5_read_time(mdev, NULL, false);
446 	write_seqlock_irqsave(&clock->lock, flags);
447 	nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
448 	nsec_delta = target_ns - nsec_now;
449 	cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
450 				 timer->cycles.mult);
451 	write_sequnlock_irqrestore(&clock->lock, flags);
452 
453 	return cycles_now + cycles_delta;
454 }
455 
perout_conf_internal_timer(struct mlx5_core_dev * mdev,s64 sec)456 static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev, s64 sec)
457 {
458 	struct timespec64 ts = {};
459 	s64 target_ns;
460 
461 	ts.tv_sec = sec;
462 	target_ns = timespec64_to_ns(&ts);
463 
464 	return find_target_cycles(mdev, target_ns);
465 }
466 
perout_conf_real_time(s64 sec)467 static u64 perout_conf_real_time(s64 sec)
468 {
469 	return (u64)sec << 32;
470 }
471 
mlx5_perout_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)472 static int mlx5_perout_configure(struct ptp_clock_info *ptp,
473 				 struct ptp_clock_request *rq,
474 				 int on)
475 {
476 	struct mlx5_clock *clock =
477 			container_of(ptp, struct mlx5_clock, ptp_info);
478 	struct mlx5_core_dev *mdev =
479 			container_of(clock, struct mlx5_core_dev, clock);
480 	bool rt_mode = mlx5_real_time_mode(mdev);
481 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
482 	struct timespec64 ts;
483 	u32 field_select = 0;
484 	u64 time_stamp = 0;
485 	u8 pin_mode = 0;
486 	u8 pattern = 0;
487 	int pin = -1;
488 	int err = 0;
489 	s64 ns;
490 
491 	if (!MLX5_PPS_CAP(mdev))
492 		return -EOPNOTSUPP;
493 
494 	/* Reject requests with unsupported flags */
495 	if (rq->perout.flags)
496 		return -EOPNOTSUPP;
497 
498 	if (rq->perout.index >= clock->ptp_info.n_pins)
499 		return -EINVAL;
500 
501 	field_select = MLX5_MTPPS_FS_ENABLE;
502 	pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
503 	if (pin < 0)
504 		return -EBUSY;
505 
506 	if (on) {
507 		bool rt_mode = mlx5_real_time_mode(mdev);
508 		s64 sec = rq->perout.start.sec;
509 
510 		if (rq->perout.start.nsec)
511 			return -EINVAL;
512 
513 		pin_mode = MLX5_PIN_MODE_OUT;
514 		pattern = MLX5_OUT_PATTERN_PERIODIC;
515 		ts.tv_sec = rq->perout.period.sec;
516 		ts.tv_nsec = rq->perout.period.nsec;
517 		ns = timespec64_to_ns(&ts);
518 
519 		if ((ns >> 1) != 500000000LL)
520 			return -EINVAL;
521 
522 		if (rt_mode && sec > U32_MAX)
523 			return -EINVAL;
524 
525 		time_stamp = rt_mode ? perout_conf_real_time(sec) :
526 				       perout_conf_internal_timer(mdev, sec);
527 
528 		field_select |= MLX5_MTPPS_FS_PIN_MODE |
529 				MLX5_MTPPS_FS_PATTERN |
530 				MLX5_MTPPS_FS_TIME_STAMP;
531 	}
532 
533 	MLX5_SET(mtpps_reg, in, pin, pin);
534 	MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
535 	MLX5_SET(mtpps_reg, in, pattern, pattern);
536 	MLX5_SET(mtpps_reg, in, enable, on);
537 	MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
538 	MLX5_SET(mtpps_reg, in, field_select, field_select);
539 
540 	err = mlx5_set_mtpps(mdev, in, sizeof(in));
541 	if (err)
542 		return err;
543 
544 	if (rt_mode)
545 		return 0;
546 
547 	return mlx5_set_mtppse(mdev, pin, 0,
548 			       MLX5_EVENT_MODE_REPETETIVE & on);
549 }
550 
mlx5_pps_configure(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)551 static int mlx5_pps_configure(struct ptp_clock_info *ptp,
552 			      struct ptp_clock_request *rq,
553 			      int on)
554 {
555 	struct mlx5_clock *clock =
556 			container_of(ptp, struct mlx5_clock, ptp_info);
557 
558 	clock->pps_info.enabled = !!on;
559 	return 0;
560 }
561 
mlx5_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)562 static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
563 			   struct ptp_clock_request *rq,
564 			   int on)
565 {
566 	switch (rq->type) {
567 	case PTP_CLK_REQ_EXTTS:
568 		return mlx5_extts_configure(ptp, rq, on);
569 	case PTP_CLK_REQ_PEROUT:
570 		return mlx5_perout_configure(ptp, rq, on);
571 	case PTP_CLK_REQ_PPS:
572 		return mlx5_pps_configure(ptp, rq, on);
573 	default:
574 		return -EOPNOTSUPP;
575 	}
576 	return 0;
577 }
578 
579 enum {
580 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
581 	MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
582 };
583 
mlx5_ptp_verify(struct ptp_clock_info * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)584 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
585 			   enum ptp_pin_function func, unsigned int chan)
586 {
587 	struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
588 						ptp_info);
589 
590 	switch (func) {
591 	case PTP_PF_NONE:
592 		return 0;
593 	case PTP_PF_EXTTS:
594 		return !(clock->pps_info.pin_caps[pin] &
595 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
596 	case PTP_PF_PEROUT:
597 		return !(clock->pps_info.pin_caps[pin] &
598 			 MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
599 	default:
600 		return -EOPNOTSUPP;
601 	}
602 }
603 
604 static const struct ptp_clock_info mlx5_ptp_clock_info = {
605 	.owner		= THIS_MODULE,
606 	.name		= "mlx5_ptp",
607 	.max_adj	= 50000000,
608 	.n_alarm	= 0,
609 	.n_ext_ts	= 0,
610 	.n_per_out	= 0,
611 	.n_pins		= 0,
612 	.pps		= 0,
613 	.adjfreq	= mlx5_ptp_adjfreq,
614 	.adjtime	= mlx5_ptp_adjtime,
615 	.gettimex64	= mlx5_ptp_gettimex,
616 	.settime64	= mlx5_ptp_settime,
617 	.enable		= NULL,
618 	.verify		= NULL,
619 };
620 
mlx5_query_mtpps_pin_mode(struct mlx5_core_dev * mdev,u8 pin,u32 * mtpps,u32 mtpps_size)621 static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
622 				     u32 *mtpps, u32 mtpps_size)
623 {
624 	u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
625 
626 	MLX5_SET(mtpps_reg, in, pin, pin);
627 
628 	return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
629 				    mtpps_size, MLX5_REG_MTPPS, 0, 0);
630 }
631 
mlx5_get_pps_pin_mode(struct mlx5_clock * clock,u8 pin)632 static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
633 {
634 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
635 
636 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
637 	u8 mode;
638 	int err;
639 
640 	err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
641 	if (err || !MLX5_GET(mtpps_reg, out, enable))
642 		return PTP_PF_NONE;
643 
644 	mode = MLX5_GET(mtpps_reg, out, pin_mode);
645 
646 	if (mode == MLX5_PIN_MODE_IN)
647 		return PTP_PF_EXTTS;
648 	else if (mode == MLX5_PIN_MODE_OUT)
649 		return PTP_PF_PEROUT;
650 
651 	return PTP_PF_NONE;
652 }
653 
mlx5_init_pin_config(struct mlx5_clock * clock)654 static void mlx5_init_pin_config(struct mlx5_clock *clock)
655 {
656 	int i;
657 
658 	if (!clock->ptp_info.n_pins)
659 		return;
660 
661 	clock->ptp_info.pin_config =
662 			kcalloc(clock->ptp_info.n_pins,
663 				sizeof(*clock->ptp_info.pin_config),
664 				GFP_KERNEL);
665 	if (!clock->ptp_info.pin_config)
666 		return;
667 	clock->ptp_info.enable = mlx5_ptp_enable;
668 	clock->ptp_info.verify = mlx5_ptp_verify;
669 	clock->ptp_info.pps = 1;
670 
671 	for (i = 0; i < clock->ptp_info.n_pins; i++) {
672 		snprintf(clock->ptp_info.pin_config[i].name,
673 			 sizeof(clock->ptp_info.pin_config[i].name),
674 			 "mlx5_pps%d", i);
675 		clock->ptp_info.pin_config[i].index = i;
676 		clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
677 		clock->ptp_info.pin_config[i].chan = 0;
678 	}
679 }
680 
mlx5_get_pps_caps(struct mlx5_core_dev * mdev)681 static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
682 {
683 	struct mlx5_clock *clock = &mdev->clock;
684 	u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
685 
686 	mlx5_query_mtpps(mdev, out, sizeof(out));
687 
688 	clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
689 					  cap_number_of_pps_pins);
690 	clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
691 					    cap_max_num_of_pps_in_pins);
692 	clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
693 					     cap_max_num_of_pps_out_pins);
694 
695 	clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
696 	clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
697 	clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
698 	clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
699 	clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
700 	clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
701 	clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
702 	clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
703 }
704 
ts_next_sec(struct timespec64 * ts)705 static void ts_next_sec(struct timespec64 *ts)
706 {
707 	ts->tv_sec += 1;
708 	ts->tv_nsec = 0;
709 }
710 
perout_conf_next_event_timer(struct mlx5_core_dev * mdev,struct mlx5_clock * clock)711 static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
712 					struct mlx5_clock *clock)
713 {
714 	struct timespec64 ts;
715 	s64 target_ns;
716 
717 	mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
718 	ts_next_sec(&ts);
719 	target_ns = timespec64_to_ns(&ts);
720 
721 	return find_target_cycles(mdev, target_ns);
722 }
723 
mlx5_pps_event(struct notifier_block * nb,unsigned long type,void * data)724 static int mlx5_pps_event(struct notifier_block *nb,
725 			  unsigned long type, void *data)
726 {
727 	struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
728 	struct ptp_clock_event ptp_event;
729 	struct mlx5_eqe *eqe = data;
730 	int pin = eqe->data.pps.pin;
731 	struct mlx5_core_dev *mdev;
732 	unsigned long flags;
733 	u64 ns;
734 
735 	mdev = container_of(clock, struct mlx5_core_dev, clock);
736 
737 	switch (clock->ptp_info.pin_config[pin].func) {
738 	case PTP_PF_EXTTS:
739 		ptp_event.index = pin;
740 		ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
741 			mlx5_real_time_cyc2time(clock,
742 						be64_to_cpu(eqe->data.pps.time_stamp)) :
743 			mlx5_timecounter_cyc2time(clock,
744 						  be64_to_cpu(eqe->data.pps.time_stamp));
745 		if (clock->pps_info.enabled) {
746 			ptp_event.type = PTP_CLOCK_PPSUSR;
747 			ptp_event.pps_times.ts_real =
748 					ns_to_timespec64(ptp_event.timestamp);
749 		} else {
750 			ptp_event.type = PTP_CLOCK_EXTTS;
751 		}
752 		/* TODOL clock->ptp can be NULL if ptp_clock_register fails */
753 		ptp_clock_event(clock->ptp, &ptp_event);
754 		break;
755 	case PTP_PF_PEROUT:
756 		ns = perout_conf_next_event_timer(mdev, clock);
757 		write_seqlock_irqsave(&clock->lock, flags);
758 		clock->pps_info.start[pin] = ns;
759 		write_sequnlock_irqrestore(&clock->lock, flags);
760 		schedule_work(&clock->pps_info.out_work);
761 		break;
762 	default:
763 		mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
764 			      clock->ptp_info.pin_config[pin].func);
765 	}
766 
767 	return NOTIFY_OK;
768 }
769 
mlx5_timecounter_init(struct mlx5_core_dev * mdev)770 static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
771 {
772 	struct mlx5_clock *clock = &mdev->clock;
773 	struct mlx5_timer *timer = &clock->timer;
774 	u32 dev_freq;
775 
776 	dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
777 	timer->cycles.read = read_internal_timer;
778 	timer->cycles.shift = MLX5_CYCLES_SHIFT;
779 	timer->cycles.mult = clocksource_khz2mult(dev_freq,
780 						  timer->cycles.shift);
781 	timer->nominal_c_mult = timer->cycles.mult;
782 	timer->cycles.mask = CLOCKSOURCE_MASK(41);
783 
784 	timecounter_init(&timer->tc, &timer->cycles,
785 			 ktime_to_ns(ktime_get_real()));
786 }
787 
mlx5_init_overflow_period(struct mlx5_clock * clock)788 static void mlx5_init_overflow_period(struct mlx5_clock *clock)
789 {
790 	struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
791 	struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
792 	struct mlx5_timer *timer = &clock->timer;
793 	u64 overflow_cycles;
794 	u64 frac = 0;
795 	u64 ns;
796 
797 	/* Calculate period in seconds to call the overflow watchdog - to make
798 	 * sure counter is checked at least twice every wrap around.
799 	 * The period is calculated as the minimum between max HW cycles count
800 	 * (The clock source mask) and max amount of cycles that can be
801 	 * multiplied by clock multiplier where the result doesn't exceed
802 	 * 64bits.
803 	 */
804 	overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
805 	overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
806 
807 	ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
808 				 frac, &frac);
809 	do_div(ns, NSEC_PER_SEC / HZ);
810 	timer->overflow_period = ns;
811 
812 	INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
813 	if (timer->overflow_period)
814 		schedule_delayed_work(&timer->overflow_work, 0);
815 	else
816 		mlx5_core_warn(mdev,
817 			       "invalid overflow period, overflow_work is not scheduled\n");
818 
819 	if (clock_info)
820 		clock_info->overflow_period = timer->overflow_period;
821 }
822 
mlx5_init_clock_info(struct mlx5_core_dev * mdev)823 static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
824 {
825 	struct mlx5_clock *clock = &mdev->clock;
826 	struct mlx5_ib_clock_info *info;
827 	struct mlx5_timer *timer;
828 
829 	mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
830 	if (!mdev->clock_info) {
831 		mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
832 		return;
833 	}
834 
835 	info = mdev->clock_info;
836 	timer = &clock->timer;
837 
838 	info->nsec = timer->tc.nsec;
839 	info->cycles = timer->tc.cycle_last;
840 	info->mask = timer->cycles.mask;
841 	info->mult = timer->nominal_c_mult;
842 	info->shift = timer->cycles.shift;
843 	info->frac = timer->tc.frac;
844 }
845 
mlx5_init_timer_clock(struct mlx5_core_dev * mdev)846 static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
847 {
848 	struct mlx5_clock *clock = &mdev->clock;
849 
850 	mlx5_timecounter_init(mdev);
851 	mlx5_init_clock_info(mdev);
852 	mlx5_init_overflow_period(clock);
853 	clock->ptp_info = mlx5_ptp_clock_info;
854 
855 	if (mlx5_real_time_mode(mdev)) {
856 		struct timespec64 ts;
857 
858 		ktime_get_real_ts64(&ts);
859 		mlx5_ptp_settime(&clock->ptp_info, &ts);
860 	}
861 }
862 
mlx5_init_pps(struct mlx5_core_dev * mdev)863 static void mlx5_init_pps(struct mlx5_core_dev *mdev)
864 {
865 	struct mlx5_clock *clock = &mdev->clock;
866 
867 	if (!MLX5_PPS_CAP(mdev))
868 		return;
869 
870 	mlx5_get_pps_caps(mdev);
871 	mlx5_init_pin_config(clock);
872 }
873 
mlx5_init_clock(struct mlx5_core_dev * mdev)874 void mlx5_init_clock(struct mlx5_core_dev *mdev)
875 {
876 	struct mlx5_clock *clock = &mdev->clock;
877 
878 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
879 		mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
880 		return;
881 	}
882 
883 	seqlock_init(&clock->lock);
884 	mlx5_init_timer_clock(mdev);
885 	INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
886 
887 	/* Configure the PHC */
888 	clock->ptp_info = mlx5_ptp_clock_info;
889 
890 	/* Initialize 1PPS data structures */
891 	mlx5_init_pps(mdev);
892 
893 	clock->ptp = ptp_clock_register(&clock->ptp_info,
894 					&mdev->pdev->dev);
895 	if (IS_ERR(clock->ptp)) {
896 		mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
897 			       PTR_ERR(clock->ptp));
898 		clock->ptp = NULL;
899 	}
900 
901 	MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT);
902 	mlx5_eq_notifier_register(mdev, &clock->pps_nb);
903 }
904 
mlx5_cleanup_clock(struct mlx5_core_dev * mdev)905 void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
906 {
907 	struct mlx5_clock *clock = &mdev->clock;
908 
909 	if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
910 		return;
911 
912 	mlx5_eq_notifier_unregister(mdev, &clock->pps_nb);
913 	if (clock->ptp) {
914 		ptp_clock_unregister(clock->ptp);
915 		clock->ptp = NULL;
916 	}
917 
918 	cancel_work_sync(&clock->pps_info.out_work);
919 	cancel_delayed_work_sync(&clock->timer.overflow_work);
920 
921 	if (mdev->clock_info) {
922 		free_page((unsigned long)mdev->clock_info);
923 		mdev->clock_info = NULL;
924 	}
925 
926 	kfree(clock->ptp_info.pin_config);
927 }
928