• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * TimeSync API driver.
3  *
4  * Copyright 2016 Google Inc.
5  * Copyright 2016 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 #include <linux/debugfs.h>
10 #include <linux/hrtimer.h>
11 #include "greybus.h"
12 #include "timesync.h"
13 #include "greybus_trace.h"
14 
15 /*
16  * Minimum inter-strobe value of one millisecond is chosen because it
17  * just-about fits the common definition of a jiffy.
18  *
19  * Maximum value OTOH is constrained by the number of bits the SVC can fit
20  * into a 16 bit up-counter. The SVC configures the timer in microseconds
21  * so the maximum allowable value is 65535 microseconds. We clip that value
22  * to 10000 microseconds for the sake of using nice round base 10 numbers
23  * and since right-now there's no imaginable use-case requiring anything
24  * other than a one millisecond inter-strobe time, let alone something
25  * higher than ten milliseconds.
26  */
27 #define GB_TIMESYNC_STROBE_DELAY_US		1000
28 #define GB_TIMESYNC_DEFAULT_OFFSET_US		1000
29 
30 /* Work queue timers long, short and SVC strobe timeout */
31 #define GB_TIMESYNC_DELAYED_WORK_LONG		msecs_to_jiffies(10)
32 #define GB_TIMESYNC_DELAYED_WORK_SHORT		msecs_to_jiffies(1)
33 #define GB_TIMESYNC_MAX_WAIT_SVC		msecs_to_jiffies(5000)
34 #define GB_TIMESYNC_KTIME_UPDATE		msecs_to_jiffies(1000)
35 #define GB_TIMESYNC_MAX_KTIME_CONVERSION	15
36 
37 /* Maximum number of times we'll retry a failed synchronous sync */
38 #define GB_TIMESYNC_MAX_RETRIES			5
39 
40 /* Reported nanoseconds/femtoseconds per clock */
41 static u64 gb_timesync_ns_per_clock;
42 static u64 gb_timesync_fs_per_clock;
43 
44 /* Maximum difference we will accept converting FrameTime to ktime */
45 static u32 gb_timesync_max_ktime_diff;
46 
47 /* Reported clock rate */
48 static unsigned long gb_timesync_clock_rate;
49 
50 /* Workqueue */
51 static void gb_timesync_worker(struct work_struct *work);
52 
53 /* List of SVCs with one FrameTime per SVC */
54 static LIST_HEAD(gb_timesync_svc_list);
55 
56 /* Synchronize parallel contexts accessing a valid timesync_svc pointer */
57 static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
58 
59 /* Structure to convert from FrameTime to timespec/ktime */
60 struct gb_timesync_frame_time_data {
61 	u64 frame_time;
62 	struct timespec ts;
63 };
64 
65 struct gb_timesync_svc {
66 	struct list_head list;
67 	struct list_head interface_list;
68 	struct gb_svc *svc;
69 	struct gb_timesync_host_device *timesync_hd;
70 
71 	spinlock_t spinlock;	/* Per SVC spinlock to sync with ISR */
72 	struct mutex mutex;	/* Per SVC mutex for regular synchronization */
73 
74 	struct dentry *frame_time_dentry;
75 	struct dentry *frame_ktime_dentry;
76 	struct workqueue_struct *work_queue;
77 	wait_queue_head_t wait_queue;
78 	struct delayed_work delayed_work;
79 	struct timer_list ktime_timer;
80 
81 	/* The current local FrameTime */
82 	u64 frame_time_offset;
83 	struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
84 	struct gb_timesync_frame_time_data ktime_data;
85 
86 	/* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
87 	u64 svc_ping_frame_time;
88 	u64 ap_ping_frame_time;
89 
90 	/* Transitory settings */
91 	u32 strobe_mask;
92 	bool offset_down;
93 	bool print_ping;
94 	bool capture_ping;
95 	int strobe;
96 
97 	/* Current state */
98 	int state;
99 };
100 
101 struct gb_timesync_host_device {
102 	struct list_head list;
103 	struct gb_host_device *hd;
104 	u64 ping_frame_time;
105 };
106 
107 struct gb_timesync_interface {
108 	struct list_head list;
109 	struct gb_interface *interface;
110 	u64 ping_frame_time;
111 };
112 
113 enum gb_timesync_state {
114 	GB_TIMESYNC_STATE_INVALID		= 0,
115 	GB_TIMESYNC_STATE_INACTIVE		= 1,
116 	GB_TIMESYNC_STATE_INIT			= 2,
117 	GB_TIMESYNC_STATE_WAIT_SVC		= 3,
118 	GB_TIMESYNC_STATE_AUTHORITATIVE		= 4,
119 	GB_TIMESYNC_STATE_PING			= 5,
120 	GB_TIMESYNC_STATE_ACTIVE		= 6,
121 };
122 
123 static void gb_timesync_ktime_timer_fn(unsigned long data);
124 
gb_timesync_adjust_count(struct gb_timesync_svc * timesync_svc,u64 counts)125 static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
126 				    u64 counts)
127 {
128 	if (timesync_svc->offset_down)
129 		return counts - timesync_svc->frame_time_offset;
130 	else
131 		return counts + timesync_svc->frame_time_offset;
132 }
133 
134 /*
135  * This function provides the authoritative FrameTime to a calling function. It
136  * is designed to be lockless and should remain that way the caller is assumed
137  * to be state-aware.
138  */
__gb_timesync_get_frame_time(struct gb_timesync_svc * timesync_svc)139 static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
140 {
141 	u64 clocks = gb_timesync_platform_get_counter();
142 
143 	return gb_timesync_adjust_count(timesync_svc, clocks);
144 }
145 
gb_timesync_schedule_svc_timeout(struct gb_timesync_svc * timesync_svc)146 static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
147 					     *timesync_svc)
148 {
149 	queue_delayed_work(timesync_svc->work_queue,
150 			   &timesync_svc->delayed_work,
151 			   GB_TIMESYNC_MAX_WAIT_SVC);
152 }
153 
gb_timesync_set_state(struct gb_timesync_svc * timesync_svc,int state)154 static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
155 				  int state)
156 {
157 	switch (state) {
158 	case GB_TIMESYNC_STATE_INVALID:
159 		timesync_svc->state = state;
160 		wake_up(&timesync_svc->wait_queue);
161 		break;
162 	case GB_TIMESYNC_STATE_INACTIVE:
163 		timesync_svc->state = state;
164 		wake_up(&timesync_svc->wait_queue);
165 		break;
166 	case GB_TIMESYNC_STATE_INIT:
167 		if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
168 			timesync_svc->strobe = 0;
169 			timesync_svc->frame_time_offset = 0;
170 			timesync_svc->state = state;
171 			cancel_delayed_work(&timesync_svc->delayed_work);
172 			queue_delayed_work(timesync_svc->work_queue,
173 					   &timesync_svc->delayed_work,
174 					   GB_TIMESYNC_DELAYED_WORK_LONG);
175 		}
176 		break;
177 	case GB_TIMESYNC_STATE_WAIT_SVC:
178 		if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
179 			timesync_svc->state = state;
180 		break;
181 	case GB_TIMESYNC_STATE_AUTHORITATIVE:
182 		if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
183 			timesync_svc->state = state;
184 			cancel_delayed_work(&timesync_svc->delayed_work);
185 			queue_delayed_work(timesync_svc->work_queue,
186 					   &timesync_svc->delayed_work, 0);
187 		}
188 		break;
189 	case GB_TIMESYNC_STATE_PING:
190 		if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
191 			timesync_svc->state = state;
192 			queue_delayed_work(timesync_svc->work_queue,
193 					   &timesync_svc->delayed_work,
194 					   GB_TIMESYNC_DELAYED_WORK_SHORT);
195 		}
196 		break;
197 	case GB_TIMESYNC_STATE_ACTIVE:
198 		if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
199 		    timesync_svc->state == GB_TIMESYNC_STATE_PING) {
200 			timesync_svc->state = state;
201 			wake_up(&timesync_svc->wait_queue);
202 		}
203 		break;
204 	}
205 
206 	if (WARN_ON(timesync_svc->state != state)) {
207 		pr_err("Invalid state transition %d=>%d\n",
208 		       timesync_svc->state, state);
209 	}
210 }
211 
gb_timesync_set_state_atomic(struct gb_timesync_svc * timesync_svc,int state)212 static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
213 					 int state)
214 {
215 	unsigned long flags;
216 
217 	spin_lock_irqsave(&timesync_svc->spinlock, flags);
218 	gb_timesync_set_state(timesync_svc, state);
219 	spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
220 }
221 
gb_timesync_diff(u64 x,u64 y)222 static u64 gb_timesync_diff(u64 x, u64 y)
223 {
224 	if (x > y)
225 		return x - y;
226 	else
227 		return y - x;
228 }
229 
gb_timesync_adjust_to_svc(struct gb_timesync_svc * svc,u64 svc_frame_time,u64 ap_frame_time)230 static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
231 				      u64 svc_frame_time, u64 ap_frame_time)
232 {
233 	if (svc_frame_time > ap_frame_time) {
234 		svc->frame_time_offset = svc_frame_time - ap_frame_time;
235 		svc->offset_down = false;
236 	} else {
237 		svc->frame_time_offset = ap_frame_time - svc_frame_time;
238 		svc->offset_down = true;
239 	}
240 }
241 
242 /*
243  * Associate a FrameTime with a ktime timestamp represented as struct timespec
244  * Requires the calling context to hold timesync_svc->mutex
245  */
gb_timesync_store_ktime(struct gb_timesync_svc * timesync_svc,struct timespec ts,u64 frame_time)246 static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
247 				    struct timespec ts, u64 frame_time)
248 {
249 	timesync_svc->ktime_data.ts = ts;
250 	timesync_svc->ktime_data.frame_time = frame_time;
251 }
252 
253 /*
254  * Find the two pulses that best-match our expected inter-strobe gap and
255  * then calculate the difference between the SVC time at the second pulse
256  * to the local time at the second pulse.
257  */
gb_timesync_collate_frame_time(struct gb_timesync_svc * timesync_svc,u64 * frame_time)258 static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
259 					   u64 *frame_time)
260 {
261 	int i = 0;
262 	u64 delta, ap_frame_time;
263 	u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
264 	u64 least = 0;
265 
266 	for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
267 		delta = timesync_svc->strobe_data[i].frame_time -
268 			timesync_svc->strobe_data[i - 1].frame_time;
269 		delta *= gb_timesync_ns_per_clock;
270 		delta = gb_timesync_diff(delta, strobe_delay_ns);
271 
272 		if (!least || delta < least) {
273 			least = delta;
274 			gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
275 						  timesync_svc->strobe_data[i].frame_time);
276 
277 			ap_frame_time = timesync_svc->strobe_data[i].frame_time;
278 			ap_frame_time = gb_timesync_adjust_count(timesync_svc,
279 								 ap_frame_time);
280 			gb_timesync_store_ktime(timesync_svc,
281 						timesync_svc->strobe_data[i].ts,
282 						ap_frame_time);
283 
284 			pr_debug("adjust %s local %llu svc %llu delta %llu\n",
285 				 timesync_svc->offset_down ? "down" : "up",
286 				 timesync_svc->strobe_data[i].frame_time,
287 				 frame_time[i], delta);
288 		}
289 	}
290 }
291 
gb_timesync_teardown(struct gb_timesync_svc * timesync_svc)292 static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
293 {
294 	struct gb_timesync_interface *timesync_interface;
295 	struct gb_svc *svc = timesync_svc->svc;
296 	struct gb_interface *interface;
297 	struct gb_host_device *hd;
298 	int ret;
299 
300 	list_for_each_entry(timesync_interface,
301 			    &timesync_svc->interface_list, list) {
302 		interface = timesync_interface->interface;
303 		ret = gb_interface_timesync_disable(interface);
304 		if (ret) {
305 			dev_err(&interface->dev,
306 				"interface timesync_disable %d\n", ret);
307 		}
308 	}
309 
310 	hd = timesync_svc->timesync_hd->hd;
311 	ret = hd->driver->timesync_disable(hd);
312 	if (ret < 0) {
313 		dev_err(&hd->dev, "host timesync_disable %d\n",
314 			ret);
315 	}
316 
317 	gb_svc_timesync_wake_pins_release(svc);
318 	gb_svc_timesync_disable(svc);
319 	gb_timesync_platform_unlock_bus();
320 
321 	gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
322 }
323 
gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc * timesync_svc,int ret)324 static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
325 						*timesync_svc, int ret)
326 {
327 	if (ret == -EAGAIN) {
328 		gb_timesync_set_state(timesync_svc, timesync_svc->state);
329 	} else {
330 		pr_err("Failed to lock timesync bus %d\n", ret);
331 		gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
332 	}
333 }
334 
gb_timesync_enable(struct gb_timesync_svc * timesync_svc)335 static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
336 {
337 	struct gb_svc *svc = timesync_svc->svc;
338 	struct gb_host_device *hd;
339 	struct gb_timesync_interface *timesync_interface;
340 	struct gb_interface *interface;
341 	u64 init_frame_time;
342 	unsigned long clock_rate = gb_timesync_clock_rate;
343 	int ret;
344 
345 	/*
346 	 * Get access to the wake pins in the AP and SVC
347 	 * Release these pins either in gb_timesync_teardown() or in
348 	 * gb_timesync_authoritative()
349 	 */
350 	ret = gb_timesync_platform_lock_bus(timesync_svc);
351 	if (ret < 0) {
352 		gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
353 		return;
354 	}
355 	ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
356 	if (ret) {
357 		dev_err(&svc->dev,
358 			"gb_svc_timesync_wake_pins_acquire %d\n", ret);
359 		gb_timesync_teardown(timesync_svc);
360 		return;
361 	}
362 
363 	/* Choose an initial time in the future */
364 	init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
365 
366 	/* Send enable command to all relevant participants */
367 	list_for_each_entry(timesync_interface, &timesync_svc->interface_list,
368 			    list) {
369 		interface = timesync_interface->interface;
370 		ret = gb_interface_timesync_enable(interface,
371 						   GB_TIMESYNC_MAX_STROBES,
372 						   init_frame_time,
373 						   GB_TIMESYNC_STROBE_DELAY_US,
374 						   clock_rate);
375 		if (ret) {
376 			dev_err(&interface->dev,
377 				"interface timesync_enable %d\n", ret);
378 		}
379 	}
380 
381 	hd = timesync_svc->timesync_hd->hd;
382 	ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
383 					  init_frame_time,
384 					  GB_TIMESYNC_STROBE_DELAY_US,
385 					  clock_rate);
386 	if (ret < 0) {
387 		dev_err(&hd->dev, "host timesync_enable %d\n",
388 			ret);
389 	}
390 
391 	gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
392 	ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
393 				     init_frame_time,
394 				     GB_TIMESYNC_STROBE_DELAY_US,
395 				     clock_rate);
396 	if (ret) {
397 		dev_err(&svc->dev,
398 			"gb_svc_timesync_enable %d\n", ret);
399 		gb_timesync_teardown(timesync_svc);
400 		return;
401 	}
402 
403 	/* Schedule a timeout waiting for SVC to complete strobing */
404 	gb_timesync_schedule_svc_timeout(timesync_svc);
405 }
406 
gb_timesync_authoritative(struct gb_timesync_svc * timesync_svc)407 static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
408 {
409 	struct gb_svc *svc = timesync_svc->svc;
410 	struct gb_host_device *hd;
411 	struct gb_timesync_interface *timesync_interface;
412 	struct gb_interface *interface;
413 	u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
414 	int ret;
415 
416 	/* Get authoritative time from SVC and adjust local clock */
417 	ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
418 	if (ret) {
419 		dev_err(&svc->dev,
420 			"gb_svc_timesync_authoritative %d\n", ret);
421 		gb_timesync_teardown(timesync_svc);
422 		return;
423 	}
424 	gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
425 
426 	/* Transmit authoritative time to downstream slaves */
427 	hd = timesync_svc->timesync_hd->hd;
428 	ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
429 	if (ret < 0)
430 		dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
431 
432 	list_for_each_entry(timesync_interface,
433 			    &timesync_svc->interface_list, list) {
434 		interface = timesync_interface->interface;
435 		ret = gb_interface_timesync_authoritative(
436 						interface,
437 						svc_frame_time);
438 		if (ret) {
439 			dev_err(&interface->dev,
440 				"interface timesync_authoritative %d\n", ret);
441 		}
442 	}
443 
444 	/* Release wake pins */
445 	gb_svc_timesync_wake_pins_release(svc);
446 	gb_timesync_platform_unlock_bus();
447 
448 	/* Transition to state ACTIVE */
449 	gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
450 
451 	/* Schedule a ping to verify the synchronized system time */
452 	timesync_svc->print_ping = true;
453 	gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
454 }
455 
__gb_timesync_get_status(struct gb_timesync_svc * timesync_svc)456 static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
457 {
458 	int ret = -EINVAL;
459 
460 	switch (timesync_svc->state) {
461 	case GB_TIMESYNC_STATE_INVALID:
462 	case GB_TIMESYNC_STATE_INACTIVE:
463 		ret = -ENODEV;
464 		break;
465 	case GB_TIMESYNC_STATE_INIT:
466 	case GB_TIMESYNC_STATE_WAIT_SVC:
467 	case GB_TIMESYNC_STATE_AUTHORITATIVE:
468 		ret = -EAGAIN;
469 		break;
470 	case GB_TIMESYNC_STATE_PING:
471 	case GB_TIMESYNC_STATE_ACTIVE:
472 		ret = 0;
473 		break;
474 	}
475 	return ret;
476 }
477 
478 /*
479  * This routine takes a FrameTime and derives the difference with-respect
480  * to a reference FrameTime/ktime pair. It then returns the calculated
481  * ktime based on the difference between the supplied FrameTime and
482  * the reference FrameTime.
483  *
484  * The time difference is calculated to six decimal places. Taking 19.2MHz
485  * as an example this means we have 52.083333~ nanoseconds per clock or
486  * 52083333~ femtoseconds per clock.
487  *
488  * Naively taking the count difference and converting to
489  * seconds/nanoseconds would quickly see the 0.0833 component produce
490  * noticeable errors. For example a time difference of one second would
491  * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
492  *
493  * In contrast calculating in femtoseconds the same example of 19200000 *
494  * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
495  *
496  * Continuing the example of 19.2 MHz we cap the maximum error difference
497  * at a worst-case 0.3 microseconds over a potential calculation window of
498  * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
499  * seconds older/younger than the reference time with a maximum error of
500  * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
501  */
gb_timesync_to_timespec(struct gb_timesync_svc * timesync_svc,u64 frame_time,struct timespec * ts)502 static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
503 				   u64 frame_time, struct timespec *ts)
504 {
505 	unsigned long flags;
506 	u64 delta_fs, counts, sec, nsec;
507 	bool add;
508 	int ret = 0;
509 
510 	memset(ts, 0x00, sizeof(*ts));
511 	mutex_lock(&timesync_svc->mutex);
512 	spin_lock_irqsave(&timesync_svc->spinlock, flags);
513 
514 	ret = __gb_timesync_get_status(timesync_svc);
515 	if (ret)
516 		goto done;
517 
518 	/* Support calculating ktime upwards or downwards from the reference */
519 	if (frame_time < timesync_svc->ktime_data.frame_time) {
520 		add = false;
521 		counts = timesync_svc->ktime_data.frame_time - frame_time;
522 	} else {
523 		add = true;
524 		counts = frame_time - timesync_svc->ktime_data.frame_time;
525 	}
526 
527 	/* Enforce the .23 of a usecond boundary @ 19.2MHz */
528 	if (counts > gb_timesync_max_ktime_diff) {
529 		ret = -EINVAL;
530 		goto done;
531 	}
532 
533 	/* Determine the time difference in femtoseconds */
534 	delta_fs = counts * gb_timesync_fs_per_clock;
535 
536 	/* Convert to seconds */
537 	sec = delta_fs;
538 	do_div(sec, NSEC_PER_SEC);
539 	do_div(sec, 1000000UL);
540 
541 	/* Get the nanosecond remainder */
542 	nsec = do_div(delta_fs, sec);
543 	do_div(nsec, 1000000UL);
544 
545 	if (add) {
546 		/* Add the calculated offset - overflow nanoseconds upwards */
547 		ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
548 		ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
549 		if (ts->tv_nsec >= NSEC_PER_SEC) {
550 			ts->tv_sec++;
551 			ts->tv_nsec -= NSEC_PER_SEC;
552 		}
553 	} else {
554 		/* Subtract the difference over/underflow as necessary */
555 		if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
556 			sec++;
557 			nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
558 			nsec = do_div(nsec, NSEC_PER_SEC);
559 		} else {
560 			nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
561 		}
562 		/* Cannot return a negative second value */
563 		if (sec > timesync_svc->ktime_data.ts.tv_sec) {
564 			ret = -EINVAL;
565 			goto done;
566 		}
567 		ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
568 		ts->tv_nsec = nsec;
569 	}
570 done:
571 	spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
572 	mutex_unlock(&timesync_svc->mutex);
573 	return ret;
574 }
575 
gb_timesync_log_frame_time(struct gb_timesync_svc * timesync_svc,char * buf,size_t buflen)576 static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
577 					 char *buf, size_t buflen)
578 {
579 	struct gb_svc *svc = timesync_svc->svc;
580 	struct gb_host_device *hd;
581 	struct gb_timesync_interface *timesync_interface;
582 	struct gb_interface *interface;
583 	unsigned int len;
584 	size_t off;
585 
586 	/* AP/SVC */
587 	off = snprintf(buf, buflen, "%s frametime: ap=%llu %s=%llu ",
588 		       greybus_bus_type.name,
589 		       timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
590 		       timesync_svc->svc_ping_frame_time);
591 	len = buflen - off;
592 
593 	/* APB/GPB */
594 	if (len < buflen) {
595 		hd = timesync_svc->timesync_hd->hd;
596 		off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
597 				timesync_svc->timesync_hd->ping_frame_time);
598 		len = buflen - off;
599 	}
600 
601 	list_for_each_entry(timesync_interface,
602 			    &timesync_svc->interface_list, list) {
603 		if (len < buflen) {
604 			interface = timesync_interface->interface;
605 			off += snprintf(&buf[off], len, "%s=%llu ",
606 					dev_name(&interface->dev),
607 					timesync_interface->ping_frame_time);
608 			len = buflen - off;
609 		}
610 	}
611 	if (len < buflen)
612 		off += snprintf(&buf[off], len, "\n");
613 	return off;
614 }
615 
gb_timesync_log_frame_ktime(struct gb_timesync_svc * timesync_svc,char * buf,size_t buflen)616 static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
617 					  char *buf, size_t buflen)
618 {
619 	struct gb_svc *svc = timesync_svc->svc;
620 	struct gb_host_device *hd;
621 	struct gb_timesync_interface *timesync_interface;
622 	struct gb_interface *interface;
623 	struct timespec ts;
624 	unsigned int len;
625 	size_t off;
626 
627 	/* AP */
628 	gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
629 				&ts);
630 	off = snprintf(buf, buflen, "%s frametime: ap=%lu.%lu ",
631 		       greybus_bus_type.name, ts.tv_sec, ts.tv_nsec);
632 	len = buflen - off;
633 	if (len >= buflen)
634 		goto done;
635 
636 	/* SVC */
637 	gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
638 				&ts);
639 	off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
640 			ts.tv_sec, ts.tv_nsec);
641 	len = buflen - off;
642 	if (len >= buflen)
643 		goto done;
644 
645 	/* APB/GPB */
646 	hd = timesync_svc->timesync_hd->hd;
647 	gb_timesync_to_timespec(timesync_svc,
648 				timesync_svc->timesync_hd->ping_frame_time,
649 				&ts);
650 	off += snprintf(&buf[off], len, "%s=%lu.%lu ",
651 			dev_name(&hd->dev),
652 			ts.tv_sec, ts.tv_nsec);
653 	len = buflen - off;
654 	if (len >= buflen)
655 		goto done;
656 
657 	list_for_each_entry(timesync_interface,
658 			    &timesync_svc->interface_list, list) {
659 		interface = timesync_interface->interface;
660 		gb_timesync_to_timespec(timesync_svc,
661 					timesync_interface->ping_frame_time,
662 					&ts);
663 		off += snprintf(&buf[off], len, "%s=%lu.%lu ",
664 				dev_name(&interface->dev),
665 				ts.tv_sec, ts.tv_nsec);
666 		len = buflen - off;
667 		if (len >= buflen)
668 			goto done;
669 	}
670 	off += snprintf(&buf[off], len, "\n");
671 done:
672 	return off;
673 }
674 
675 /*
676  * Send an SVC initiated wake 'ping' to each TimeSync participant.
677  * Get the FrameTime from each participant associated with the wake
678  * ping.
679  */
gb_timesync_ping(struct gb_timesync_svc * timesync_svc)680 static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
681 {
682 	struct gb_svc *svc = timesync_svc->svc;
683 	struct gb_host_device *hd;
684 	struct gb_timesync_interface *timesync_interface;
685 	struct gb_control *control;
686 	u64 *ping_frame_time;
687 	int ret;
688 
689 	/* Get access to the wake pins in the AP and SVC */
690 	ret = gb_timesync_platform_lock_bus(timesync_svc);
691 	if (ret < 0) {
692 		gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
693 		return;
694 	}
695 	ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
696 	if (ret) {
697 		dev_err(&svc->dev,
698 			"gb_svc_timesync_wake_pins_acquire %d\n", ret);
699 		gb_timesync_teardown(timesync_svc);
700 		return;
701 	}
702 
703 	/* Have SVC generate a timesync ping */
704 	timesync_svc->capture_ping = true;
705 	timesync_svc->svc_ping_frame_time = 0;
706 	ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time);
707 	timesync_svc->capture_ping = false;
708 	if (ret) {
709 		dev_err(&svc->dev,
710 			"gb_svc_timesync_ping %d\n", ret);
711 		gb_timesync_teardown(timesync_svc);
712 		return;
713 	}
714 
715 	/* Get the ping FrameTime from each APB/GPB */
716 	hd = timesync_svc->timesync_hd->hd;
717 	timesync_svc->timesync_hd->ping_frame_time = 0;
718 	ret = hd->driver->timesync_get_last_event(hd,
719 		&timesync_svc->timesync_hd->ping_frame_time);
720 	if (ret)
721 		dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
722 
723 	list_for_each_entry(timesync_interface,
724 			    &timesync_svc->interface_list, list) {
725 		control = timesync_interface->interface->control;
726 		timesync_interface->ping_frame_time = 0;
727 		ping_frame_time = &timesync_interface->ping_frame_time;
728 		ret = gb_control_timesync_get_last_event(control,
729 							 ping_frame_time);
730 		if (ret) {
731 			dev_err(&timesync_interface->interface->dev,
732 				"gb_control_timesync_get_last_event %d\n", ret);
733 		}
734 	}
735 
736 	/* Ping success - move to timesync active */
737 	gb_svc_timesync_wake_pins_release(svc);
738 	gb_timesync_platform_unlock_bus();
739 	gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
740 }
741 
gb_timesync_log_ping_time(struct gb_timesync_svc * timesync_svc)742 static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
743 {
744 	char *buf;
745 
746 	if (!timesync_svc->print_ping)
747 		return;
748 
749 	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
750 	if (buf) {
751 		gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
752 		dev_dbg(&timesync_svc->svc->dev, "%s", buf);
753 		kfree(buf);
754 	}
755 }
756 
757 /*
758  * Perform the actual work of scheduled TimeSync logic.
759  */
gb_timesync_worker(struct work_struct * work)760 static void gb_timesync_worker(struct work_struct *work)
761 {
762 	struct delayed_work *delayed_work = to_delayed_work(work);
763 	struct gb_timesync_svc *timesync_svc =
764 		container_of(delayed_work, struct gb_timesync_svc, delayed_work);
765 
766 	mutex_lock(&timesync_svc->mutex);
767 
768 	switch (timesync_svc->state) {
769 	case GB_TIMESYNC_STATE_INIT:
770 		gb_timesync_enable(timesync_svc);
771 		break;
772 
773 	case GB_TIMESYNC_STATE_WAIT_SVC:
774 		dev_err(&timesync_svc->svc->dev,
775 			"timeout SVC strobe completion %d/%d\n",
776 			timesync_svc->strobe, GB_TIMESYNC_MAX_STROBES);
777 		gb_timesync_teardown(timesync_svc);
778 		break;
779 
780 	case GB_TIMESYNC_STATE_AUTHORITATIVE:
781 		gb_timesync_authoritative(timesync_svc);
782 		break;
783 
784 	case GB_TIMESYNC_STATE_PING:
785 		gb_timesync_ping(timesync_svc);
786 		gb_timesync_log_ping_time(timesync_svc);
787 		break;
788 
789 	default:
790 		pr_err("Invalid state %d for delayed work\n",
791 		       timesync_svc->state);
792 		break;
793 	}
794 
795 	mutex_unlock(&timesync_svc->mutex);
796 }
797 
798 /*
799  * Schedule a new TimeSync INIT or PING operation serialized w/r to
800  * gb_timesync_worker().
801  */
gb_timesync_schedule(struct gb_timesync_svc * timesync_svc,int state)802 static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
803 {
804 	int ret = 0;
805 
806 	if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
807 		return -EINVAL;
808 
809 	mutex_lock(&timesync_svc->mutex);
810 	if (timesync_svc->state !=  GB_TIMESYNC_STATE_INVALID) {
811 		gb_timesync_set_state_atomic(timesync_svc, state);
812 	} else {
813 		ret = -ENODEV;
814 	}
815 	mutex_unlock(&timesync_svc->mutex);
816 	return ret;
817 }
818 
__gb_timesync_schedule_synchronous(struct gb_timesync_svc * timesync_svc,int state)819 static int __gb_timesync_schedule_synchronous(
820 	struct gb_timesync_svc *timesync_svc, int state)
821 {
822 	unsigned long flags;
823 	int ret;
824 
825 	ret = gb_timesync_schedule(timesync_svc, state);
826 	if (ret)
827 		return ret;
828 
829 	ret = wait_event_interruptible(timesync_svc->wait_queue,
830 			(timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
831 			 timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
832 			 timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
833 	if (ret)
834 		return ret;
835 
836 	mutex_lock(&timesync_svc->mutex);
837 	spin_lock_irqsave(&timesync_svc->spinlock, flags);
838 
839 	ret = __gb_timesync_get_status(timesync_svc);
840 
841 	spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
842 	mutex_unlock(&timesync_svc->mutex);
843 
844 	return ret;
845 }
846 
gb_timesync_find_timesync_svc(struct gb_host_device * hd)847 static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
848 	struct gb_host_device *hd)
849 {
850 	struct gb_timesync_svc *timesync_svc;
851 
852 	list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
853 		if (timesync_svc->svc == hd->svc)
854 			return timesync_svc;
855 	}
856 	return NULL;
857 }
858 
gb_timesync_find_timesync_interface(struct gb_timesync_svc * timesync_svc,struct gb_interface * interface)859 static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
860 	struct gb_timesync_svc *timesync_svc,
861 	struct gb_interface *interface)
862 {
863 	struct gb_timesync_interface *timesync_interface;
864 
865 	list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) {
866 		if (timesync_interface->interface == interface)
867 			return timesync_interface;
868 	}
869 	return NULL;
870 }
871 
gb_timesync_schedule_synchronous(struct gb_interface * interface)872 int gb_timesync_schedule_synchronous(struct gb_interface *interface)
873 {
874 	int ret;
875 	struct gb_timesync_svc *timesync_svc;
876 	int retries;
877 
878 	if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
879 		return 0;
880 
881 	mutex_lock(&gb_timesync_svc_list_mutex);
882 	for (retries = 0; retries < GB_TIMESYNC_MAX_RETRIES; retries++) {
883 		timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
884 		if (!timesync_svc) {
885 			ret = -ENODEV;
886 			goto done;
887 		}
888 
889 		ret = __gb_timesync_schedule_synchronous(timesync_svc,
890 						 GB_TIMESYNC_STATE_INIT);
891 		if (!ret)
892 			break;
893 	}
894 	if (ret && retries == GB_TIMESYNC_MAX_RETRIES)
895 		ret = -ETIMEDOUT;
896 done:
897 	mutex_unlock(&gb_timesync_svc_list_mutex);
898 	return ret;
899 }
900 EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
901 
gb_timesync_schedule_asynchronous(struct gb_interface * interface)902 void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
903 {
904 	struct gb_timesync_svc *timesync_svc;
905 
906 	if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
907 		return;
908 
909 	mutex_lock(&gb_timesync_svc_list_mutex);
910 	timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
911 	if (!timesync_svc)
912 		goto done;
913 
914 	gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
915 done:
916 	mutex_unlock(&gb_timesync_svc_list_mutex);
917 	return;
918 }
919 EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
920 
gb_timesync_ping_read(struct file * file,char __user * ubuf,size_t len,loff_t * offset,bool ktime)921 static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
922 				     size_t len, loff_t *offset, bool ktime)
923 {
924 	struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
925 	char *buf;
926 	ssize_t ret = 0;
927 
928 	mutex_lock(&gb_timesync_svc_list_mutex);
929 	mutex_lock(&timesync_svc->mutex);
930 	if (list_empty(&timesync_svc->interface_list))
931 		ret = -ENODEV;
932 	timesync_svc->print_ping = false;
933 	mutex_unlock(&timesync_svc->mutex);
934 	if (ret)
935 		goto done;
936 
937 	ret = __gb_timesync_schedule_synchronous(timesync_svc,
938 						 GB_TIMESYNC_STATE_PING);
939 	if (ret)
940 		goto done;
941 
942 	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
943 	if (!buf) {
944 		ret = -ENOMEM;
945 		goto done;
946 	}
947 
948 	if (ktime)
949 		ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
950 	else
951 		ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
952 	if (ret > 0)
953 		ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
954 	kfree(buf);
955 done:
956 	mutex_unlock(&gb_timesync_svc_list_mutex);
957 	return ret;
958 }
959 
gb_timesync_ping_read_frame_time(struct file * file,char __user * buf,size_t len,loff_t * offset)960 static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
961 						char __user *buf,
962 						size_t len, loff_t *offset)
963 {
964 	return gb_timesync_ping_read(file, buf, len, offset, false);
965 }
966 
gb_timesync_ping_read_frame_ktime(struct file * file,char __user * buf,size_t len,loff_t * offset)967 static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
968 						 char __user *buf,
969 						 size_t len, loff_t *offset)
970 {
971 	return gb_timesync_ping_read(file, buf, len, offset, true);
972 }
973 
974 static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
975 	.read		= gb_timesync_ping_read_frame_time,
976 };
977 
978 static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
979 	.read		= gb_timesync_ping_read_frame_ktime,
980 };
981 
gb_timesync_hd_add(struct gb_timesync_svc * timesync_svc,struct gb_host_device * hd)982 static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
983 			      struct gb_host_device *hd)
984 {
985 	struct gb_timesync_host_device *timesync_hd;
986 
987 	timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
988 	if (!timesync_hd)
989 		return -ENOMEM;
990 
991 	WARN_ON(timesync_svc->timesync_hd);
992 	timesync_hd->hd = hd;
993 	timesync_svc->timesync_hd = timesync_hd;
994 
995 	return 0;
996 }
997 
gb_timesync_hd_remove(struct gb_timesync_svc * timesync_svc,struct gb_host_device * hd)998 static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
999 				  struct gb_host_device *hd)
1000 {
1001 	if (timesync_svc->timesync_hd->hd == hd) {
1002 		kfree(timesync_svc->timesync_hd);
1003 		timesync_svc->timesync_hd = NULL;
1004 		return;
1005 	}
1006 	WARN_ON(1);
1007 }
1008 
gb_timesync_svc_add(struct gb_svc * svc)1009 int gb_timesync_svc_add(struct gb_svc *svc)
1010 {
1011 	struct gb_timesync_svc *timesync_svc;
1012 	int ret;
1013 
1014 	timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
1015 	if (!timesync_svc)
1016 		return -ENOMEM;
1017 
1018 	timesync_svc->work_queue =
1019 		create_singlethread_workqueue("gb-timesync-work_queue");
1020 
1021 	if (!timesync_svc->work_queue) {
1022 		kfree(timesync_svc);
1023 		return -ENOMEM;
1024 	}
1025 
1026 	mutex_lock(&gb_timesync_svc_list_mutex);
1027 	INIT_LIST_HEAD(&timesync_svc->interface_list);
1028 	INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker);
1029 	mutex_init(&timesync_svc->mutex);
1030 	spin_lock_init(&timesync_svc->spinlock);
1031 	init_waitqueue_head(&timesync_svc->wait_queue);
1032 
1033 	timesync_svc->svc = svc;
1034 	timesync_svc->frame_time_offset = 0;
1035 	timesync_svc->capture_ping = false;
1036 	gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
1037 
1038 	timesync_svc->frame_time_dentry =
1039 		debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
1040 				    timesync_svc,
1041 				    &gb_timesync_debugfs_frame_time_ops);
1042 	timesync_svc->frame_ktime_dentry =
1043 		debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
1044 				    timesync_svc,
1045 				    &gb_timesync_debugfs_frame_ktime_ops);
1046 
1047 	list_add(&timesync_svc->list, &gb_timesync_svc_list);
1048 	ret = gb_timesync_hd_add(timesync_svc, svc->hd);
1049 	if (ret) {
1050 		list_del(&timesync_svc->list);
1051 		debugfs_remove(timesync_svc->frame_ktime_dentry);
1052 		debugfs_remove(timesync_svc->frame_time_dentry);
1053 		destroy_workqueue(timesync_svc->work_queue);
1054 		kfree(timesync_svc);
1055 		goto done;
1056 	}
1057 
1058 	init_timer(&timesync_svc->ktime_timer);
1059 	timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
1060 	timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
1061 	timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
1062 	add_timer(&timesync_svc->ktime_timer);
1063 done:
1064 	mutex_unlock(&gb_timesync_svc_list_mutex);
1065 	return ret;
1066 }
1067 EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
1068 
gb_timesync_svc_remove(struct gb_svc * svc)1069 void gb_timesync_svc_remove(struct gb_svc *svc)
1070 {
1071 	struct gb_timesync_svc *timesync_svc;
1072 	struct gb_timesync_interface *timesync_interface;
1073 	struct gb_timesync_interface *next;
1074 
1075 	mutex_lock(&gb_timesync_svc_list_mutex);
1076 	timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1077 	if (!timesync_svc)
1078 		goto done;
1079 
1080 	cancel_delayed_work_sync(&timesync_svc->delayed_work);
1081 
1082 	mutex_lock(&timesync_svc->mutex);
1083 
1084 	gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
1085 	del_timer_sync(&timesync_svc->ktime_timer);
1086 	gb_timesync_teardown(timesync_svc);
1087 
1088 	gb_timesync_hd_remove(timesync_svc, svc->hd);
1089 	list_for_each_entry_safe(timesync_interface, next,
1090 				 &timesync_svc->interface_list, list) {
1091 		list_del(&timesync_interface->list);
1092 		kfree(timesync_interface);
1093 	}
1094 	debugfs_remove(timesync_svc->frame_ktime_dentry);
1095 	debugfs_remove(timesync_svc->frame_time_dentry);
1096 	destroy_workqueue(timesync_svc->work_queue);
1097 	list_del(&timesync_svc->list);
1098 
1099 	mutex_unlock(&timesync_svc->mutex);
1100 
1101 	kfree(timesync_svc);
1102 done:
1103 	mutex_unlock(&gb_timesync_svc_list_mutex);
1104 }
1105 EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
1106 
1107 /*
1108  * Add a Greybus Interface to the set of TimeSync Interfaces.
1109  */
gb_timesync_interface_add(struct gb_interface * interface)1110 int gb_timesync_interface_add(struct gb_interface *interface)
1111 {
1112 	struct gb_timesync_svc *timesync_svc;
1113 	struct gb_timesync_interface *timesync_interface;
1114 	int ret = 0;
1115 
1116 	if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1117 		return 0;
1118 
1119 	mutex_lock(&gb_timesync_svc_list_mutex);
1120 	timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1121 	if (!timesync_svc) {
1122 		ret = -ENODEV;
1123 		goto done;
1124 	}
1125 
1126 	timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
1127 	if (!timesync_interface) {
1128 		ret = -ENOMEM;
1129 		goto done;
1130 	}
1131 
1132 	mutex_lock(&timesync_svc->mutex);
1133 	timesync_interface->interface = interface;
1134 	list_add(&timesync_interface->list, &timesync_svc->interface_list);
1135 	timesync_svc->strobe_mask |= 1 << interface->interface_id;
1136 	mutex_unlock(&timesync_svc->mutex);
1137 
1138 done:
1139 	mutex_unlock(&gb_timesync_svc_list_mutex);
1140 	return ret;
1141 }
1142 EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
1143 
1144 /*
1145  * Remove a Greybus Interface from the set of TimeSync Interfaces.
1146  */
gb_timesync_interface_remove(struct gb_interface * interface)1147 void gb_timesync_interface_remove(struct gb_interface *interface)
1148 {
1149 	struct gb_timesync_svc *timesync_svc;
1150 	struct gb_timesync_interface *timesync_interface;
1151 
1152 	if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1153 		return;
1154 
1155 	mutex_lock(&gb_timesync_svc_list_mutex);
1156 	timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1157 	if (!timesync_svc)
1158 		goto done;
1159 
1160 	timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
1161 								 interface);
1162 	if (!timesync_interface)
1163 		goto done;
1164 
1165 	mutex_lock(&timesync_svc->mutex);
1166 	timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
1167 	list_del(&timesync_interface->list);
1168 	kfree(timesync_interface);
1169 	mutex_unlock(&timesync_svc->mutex);
1170 done:
1171 	mutex_unlock(&gb_timesync_svc_list_mutex);
1172 }
1173 EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
1174 
1175 /*
1176  * Give the authoritative FrameTime to the calling function. Returns zero if we
1177  * are not in GB_TIMESYNC_STATE_ACTIVE.
1178  */
gb_timesync_get_frame_time(struct gb_timesync_svc * timesync_svc)1179 static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
1180 {
1181 	unsigned long flags;
1182 	u64 ret;
1183 
1184 	spin_lock_irqsave(&timesync_svc->spinlock, flags);
1185 	if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
1186 		ret = __gb_timesync_get_frame_time(timesync_svc);
1187 	else
1188 		ret = 0;
1189 	spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1190 	return ret;
1191 }
1192 
gb_timesync_get_frame_time_by_interface(struct gb_interface * interface)1193 u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
1194 {
1195 	struct gb_timesync_svc *timesync_svc;
1196 	u64 ret = 0;
1197 
1198 	mutex_lock(&gb_timesync_svc_list_mutex);
1199 	timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1200 	if (!timesync_svc)
1201 		goto done;
1202 
1203 	ret = gb_timesync_get_frame_time(timesync_svc);
1204 done:
1205 	mutex_unlock(&gb_timesync_svc_list_mutex);
1206 	return ret;
1207 }
1208 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
1209 
gb_timesync_get_frame_time_by_svc(struct gb_svc * svc)1210 u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
1211 {
1212 	struct gb_timesync_svc *timesync_svc;
1213 	u64 ret = 0;
1214 
1215 	mutex_lock(&gb_timesync_svc_list_mutex);
1216 	timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1217 	if (!timesync_svc)
1218 		goto done;
1219 
1220 	ret = gb_timesync_get_frame_time(timesync_svc);
1221 done:
1222 	mutex_unlock(&gb_timesync_svc_list_mutex);
1223 	return ret;
1224 }
1225 EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
1226 
1227 /* Incrementally updates the conversion base from FrameTime to ktime */
gb_timesync_ktime_timer_fn(unsigned long data)1228 static void gb_timesync_ktime_timer_fn(unsigned long data)
1229 {
1230 	struct gb_timesync_svc *timesync_svc =
1231 		(struct gb_timesync_svc *)data;
1232 	unsigned long flags;
1233 	u64 frame_time;
1234 	struct timespec ts;
1235 
1236 	spin_lock_irqsave(&timesync_svc->spinlock, flags);
1237 
1238 	if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
1239 		goto done;
1240 
1241 	ktime_get_ts(&ts);
1242 	frame_time = __gb_timesync_get_frame_time(timesync_svc);
1243 	gb_timesync_store_ktime(timesync_svc, ts, frame_time);
1244 
1245 done:
1246 	spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1247 	mod_timer(&timesync_svc->ktime_timer,
1248 		  jiffies + GB_TIMESYNC_KTIME_UPDATE);
1249 }
1250 
gb_timesync_to_timespec_by_svc(struct gb_svc * svc,u64 frame_time,struct timespec * ts)1251 int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
1252 				   struct timespec *ts)
1253 {
1254 	struct gb_timesync_svc *timesync_svc;
1255 	int ret = 0;
1256 
1257 	mutex_lock(&gb_timesync_svc_list_mutex);
1258 	timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1259 	if (!timesync_svc) {
1260 		ret = -ENODEV;
1261 		goto done;
1262 	}
1263 	ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1264 done:
1265 	mutex_unlock(&gb_timesync_svc_list_mutex);
1266 	return ret;
1267 }
1268 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
1269 
gb_timesync_to_timespec_by_interface(struct gb_interface * interface,u64 frame_time,struct timespec * ts)1270 int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
1271 					 u64 frame_time, struct timespec *ts)
1272 {
1273 	struct gb_timesync_svc *timesync_svc;
1274 	int ret = 0;
1275 
1276 	mutex_lock(&gb_timesync_svc_list_mutex);
1277 	timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1278 	if (!timesync_svc) {
1279 		ret = -ENODEV;
1280 		goto done;
1281 	}
1282 
1283 	ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1284 done:
1285 	mutex_unlock(&gb_timesync_svc_list_mutex);
1286 	return ret;
1287 }
1288 EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
1289 
gb_timesync_irq(struct gb_timesync_svc * timesync_svc)1290 void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
1291 {
1292 	unsigned long flags;
1293 	u64 strobe_time;
1294 	bool strobe_is_ping = true;
1295 	struct timespec ts;
1296 
1297 	ktime_get_ts(&ts);
1298 	strobe_time = __gb_timesync_get_frame_time(timesync_svc);
1299 
1300 	spin_lock_irqsave(&timesync_svc->spinlock, flags);
1301 
1302 	if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
1303 		if (!timesync_svc->capture_ping)
1304 			goto done_nolog;
1305 		timesync_svc->ap_ping_frame_time = strobe_time;
1306 		goto done_log;
1307 	} else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
1308 		goto done_nolog;
1309 	}
1310 
1311 	timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
1312 	timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
1313 
1314 	if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
1315 		gb_timesync_set_state(timesync_svc,
1316 				      GB_TIMESYNC_STATE_AUTHORITATIVE);
1317 	}
1318 	strobe_is_ping = false;
1319 done_log:
1320 	trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
1321 			      GB_TIMESYNC_MAX_STROBES, strobe_time);
1322 done_nolog:
1323 	spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1324 }
1325 EXPORT_SYMBOL(gb_timesync_irq);
1326 
gb_timesync_init(void)1327 int __init gb_timesync_init(void)
1328 {
1329 	int ret = 0;
1330 
1331 	ret = gb_timesync_platform_init();
1332 	if (ret) {
1333 		pr_err("timesync platform init fail!\n");
1334 		return ret;
1335 	}
1336 
1337 	gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
1338 
1339 	/* Calculate nanoseconds and femtoseconds per clock */
1340 	gb_timesync_fs_per_clock = FSEC_PER_SEC;
1341 	do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
1342 	gb_timesync_ns_per_clock = NSEC_PER_SEC;
1343 	do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
1344 
1345 	/* Calculate the maximum number of clocks we will convert to ktime */
1346 	gb_timesync_max_ktime_diff =
1347 		GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
1348 
1349 	pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
1350 		gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
1351 	return 0;
1352 }
1353 
gb_timesync_exit(void)1354 void gb_timesync_exit(void)
1355 {
1356 	gb_timesync_platform_exit();
1357 }
1358