1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * This file contains the functions which manage clocksource drivers.
4 *
5 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/device.h>
11 #include <linux/clocksource.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
15 #include <linux/tick.h>
16 #include <linux/kthread.h>
17 #include <linux/prandom.h>
18 #include <linux/cpu.h>
19
20 #include "tick-internal.h"
21 #include "timekeeping_internal.h"
22
cycles_to_nsec_safe(struct clocksource * cs,u64 start,u64 end)23 static noinline u64 cycles_to_nsec_safe(struct clocksource *cs, u64 start, u64 end)
24 {
25 u64 delta = clocksource_delta(end, start, cs->mask, cs->max_raw_delta);
26
27 if (likely(delta < cs->max_cycles))
28 return clocksource_cyc2ns(delta, cs->mult, cs->shift);
29
30 return mul_u64_u32_shr(delta, cs->mult, cs->shift);
31 }
32
33 /**
34 * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks
35 * @mult: pointer to mult variable
36 * @shift: pointer to shift variable
37 * @from: frequency to convert from
38 * @to: frequency to convert to
39 * @maxsec: guaranteed runtime conversion range in seconds
40 *
41 * The function evaluates the shift/mult pair for the scaled math
42 * operations of clocksources and clockevents.
43 *
44 * @to and @from are frequency values in HZ. For clock sources @to is
45 * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
46 * event @to is the counter frequency and @from is NSEC_PER_SEC.
47 *
48 * The @maxsec conversion range argument controls the time frame in
49 * seconds which must be covered by the runtime conversion with the
50 * calculated mult and shift factors. This guarantees that no 64bit
51 * overflow happens when the input value of the conversion is
52 * multiplied with the calculated mult factor. Larger ranges may
53 * reduce the conversion accuracy by choosing smaller mult and shift
54 * factors.
55 */
56 void
clocks_calc_mult_shift(u32 * mult,u32 * shift,u32 from,u32 to,u32 maxsec)57 clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
58 {
59 u64 tmp;
60 u32 sft, sftacc= 32;
61
62 /*
63 * Calculate the shift factor which is limiting the conversion
64 * range:
65 */
66 tmp = ((u64)maxsec * from) >> 32;
67 while (tmp) {
68 tmp >>=1;
69 sftacc--;
70 }
71
72 /*
73 * Find the conversion shift/mult pair which has the best
74 * accuracy and fits the maxsec conversion range:
75 */
76 for (sft = 32; sft > 0; sft--) {
77 tmp = (u64) to << sft;
78 tmp += from / 2;
79 do_div(tmp, from);
80 if ((tmp >> sftacc) == 0)
81 break;
82 }
83 *mult = tmp;
84 *shift = sft;
85 }
86 EXPORT_SYMBOL_GPL(clocks_calc_mult_shift);
87
88 /*[Clocksource internal variables]---------
89 * curr_clocksource:
90 * currently selected clocksource.
91 * suspend_clocksource:
92 * used to calculate the suspend time.
93 * clocksource_list:
94 * linked list with the registered clocksources
95 * clocksource_mutex:
96 * protects manipulations to curr_clocksource and the clocksource_list
97 * override_name:
98 * Name of the user-specified clocksource.
99 */
100 static struct clocksource *curr_clocksource;
101 static struct clocksource *suspend_clocksource;
102 static LIST_HEAD(clocksource_list);
103 static DEFINE_MUTEX(clocksource_mutex);
104 static char override_name[CS_NAME_LEN];
105 static int finished_booting;
106 static u64 suspend_start;
107
108 /*
109 * Interval: 0.5sec.
110 */
111 #define WATCHDOG_INTERVAL (HZ >> 1)
112 #define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
113
114 /*
115 * Threshold: 0.0312s, when doubled: 0.0625s.
116 */
117 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 5)
118
119 /*
120 * Maximum permissible delay between two readouts of the watchdog
121 * clocksource surrounding a read of the clocksource being validated.
122 * This delay could be due to SMIs, NMIs, or to VCPU preemptions. Used as
123 * a lower bound for cs->uncertainty_margin values when registering clocks.
124 *
125 * The default of 500 parts per million is based on NTP's limits.
126 * If a clocksource is good enough for NTP, it is good enough for us!
127 *
128 * In other words, by default, even if a clocksource is extremely
129 * precise (for example, with a sub-nanosecond period), the maximum
130 * permissible skew between the clocksource watchdog and the clocksource
131 * under test is not permitted to go below the 500ppm minimum defined
132 * by MAX_SKEW_USEC. This 500ppm minimum may be overridden using the
133 * CLOCKSOURCE_WATCHDOG_MAX_SKEW_US Kconfig option.
134 */
135 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
136 #define MAX_SKEW_USEC CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US
137 #else
138 #define MAX_SKEW_USEC (125 * WATCHDOG_INTERVAL / HZ)
139 #endif
140
141 /*
142 * Default for maximum permissible skew when cs->uncertainty_margin is
143 * not specified, and the lower bound even when cs->uncertainty_margin
144 * is specified. This is also the default that is used when registering
145 * clocks with unspecifed cs->uncertainty_margin, so this macro is used
146 * even in CONFIG_CLOCKSOURCE_WATCHDOG=n kernels.
147 */
148 #define WATCHDOG_MAX_SKEW (MAX_SKEW_USEC * NSEC_PER_USEC)
149
150 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
151 static void clocksource_watchdog_work(struct work_struct *work);
152 static void clocksource_select(void);
153
154 static LIST_HEAD(watchdog_list);
155 static struct clocksource *watchdog;
156 static struct timer_list watchdog_timer;
157 static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
158 static DEFINE_SPINLOCK(watchdog_lock);
159 static int watchdog_running;
160 static atomic_t watchdog_reset_pending;
161 static int64_t watchdog_max_interval;
162
clocksource_watchdog_lock(unsigned long * flags)163 static inline void clocksource_watchdog_lock(unsigned long *flags)
164 {
165 spin_lock_irqsave(&watchdog_lock, *flags);
166 }
167
clocksource_watchdog_unlock(unsigned long * flags)168 static inline void clocksource_watchdog_unlock(unsigned long *flags)
169 {
170 spin_unlock_irqrestore(&watchdog_lock, *flags);
171 }
172
173 static int clocksource_watchdog_kthread(void *data);
174 static void __clocksource_change_rating(struct clocksource *cs, int rating);
175
clocksource_watchdog_work(struct work_struct * work)176 static void clocksource_watchdog_work(struct work_struct *work)
177 {
178 /*
179 * We cannot directly run clocksource_watchdog_kthread() here, because
180 * clocksource_select() calls timekeeping_notify() which uses
181 * stop_machine(). One cannot use stop_machine() from a workqueue() due
182 * lock inversions wrt CPU hotplug.
183 *
184 * Also, we only ever run this work once or twice during the lifetime
185 * of the kernel, so there is no point in creating a more permanent
186 * kthread for this.
187 *
188 * If kthread_run fails the next watchdog scan over the
189 * watchdog_list will find the unstable clock again.
190 */
191 kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
192 }
193
__clocksource_unstable(struct clocksource * cs)194 static void __clocksource_unstable(struct clocksource *cs)
195 {
196 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
197 cs->flags |= CLOCK_SOURCE_UNSTABLE;
198
199 /*
200 * If the clocksource is registered clocksource_watchdog_kthread() will
201 * re-rate and re-select.
202 */
203 if (list_empty(&cs->list)) {
204 cs->rating = 0;
205 return;
206 }
207
208 if (cs->mark_unstable)
209 cs->mark_unstable(cs);
210
211 /* kick clocksource_watchdog_kthread() */
212 if (finished_booting)
213 schedule_work(&watchdog_work);
214 }
215
216 /**
217 * clocksource_mark_unstable - mark clocksource unstable via watchdog
218 * @cs: clocksource to be marked unstable
219 *
220 * This function is called by the x86 TSC code to mark clocksources as unstable;
221 * it defers demotion and re-selection to a kthread.
222 */
clocksource_mark_unstable(struct clocksource * cs)223 void clocksource_mark_unstable(struct clocksource *cs)
224 {
225 unsigned long flags;
226
227 spin_lock_irqsave(&watchdog_lock, flags);
228 if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
229 if (!list_empty(&cs->list) && list_empty(&cs->wd_list))
230 list_add(&cs->wd_list, &watchdog_list);
231 __clocksource_unstable(cs);
232 }
233 spin_unlock_irqrestore(&watchdog_lock, flags);
234 }
235
236 static int verify_n_cpus = 8;
237 module_param(verify_n_cpus, int, 0644);
238
239 enum wd_read_status {
240 WD_READ_SUCCESS,
241 WD_READ_UNSTABLE,
242 WD_READ_SKIP
243 };
244
cs_watchdog_read(struct clocksource * cs,u64 * csnow,u64 * wdnow)245 static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow, u64 *wdnow)
246 {
247 int64_t md = 2 * watchdog->uncertainty_margin;
248 unsigned int nretries, max_retries;
249 int64_t wd_delay, wd_seq_delay;
250 u64 wd_end, wd_end2;
251
252 max_retries = clocksource_get_max_watchdog_retry();
253 for (nretries = 0; nretries <= max_retries; nretries++) {
254 local_irq_disable();
255 *wdnow = watchdog->read(watchdog);
256 *csnow = cs->read(cs);
257 wd_end = watchdog->read(watchdog);
258 wd_end2 = watchdog->read(watchdog);
259 local_irq_enable();
260
261 wd_delay = cycles_to_nsec_safe(watchdog, *wdnow, wd_end);
262 if (wd_delay <= md + cs->uncertainty_margin) {
263 if (nretries > 1 && nretries >= max_retries) {
264 pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
265 smp_processor_id(), watchdog->name, nretries);
266 }
267 return WD_READ_SUCCESS;
268 }
269
270 /*
271 * Now compute delay in consecutive watchdog read to see if
272 * there is too much external interferences that cause
273 * significant delay in reading both clocksource and watchdog.
274 *
275 * If consecutive WD read-back delay > md, report
276 * system busy, reinit the watchdog and skip the current
277 * watchdog test.
278 */
279 wd_seq_delay = cycles_to_nsec_safe(watchdog, wd_end, wd_end2);
280 if (wd_seq_delay > md)
281 goto skip_test;
282 }
283
284 pr_warn("timekeeping watchdog on CPU%d: wd-%s-wd excessive read-back delay of %lldns vs. limit of %ldns, wd-wd read-back delay only %lldns, attempt %d, marking %s unstable\n",
285 smp_processor_id(), cs->name, wd_delay, WATCHDOG_MAX_SKEW, wd_seq_delay, nretries, cs->name);
286 return WD_READ_UNSTABLE;
287
288 skip_test:
289 pr_info("timekeeping watchdog on CPU%d: %s wd-wd read-back delay of %lldns\n",
290 smp_processor_id(), watchdog->name, wd_seq_delay);
291 pr_info("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n",
292 cs->name, wd_delay);
293 return WD_READ_SKIP;
294 }
295
296 static u64 csnow_mid;
297 static cpumask_t cpus_ahead;
298 static cpumask_t cpus_behind;
299 static cpumask_t cpus_chosen;
300
clocksource_verify_choose_cpus(void)301 static void clocksource_verify_choose_cpus(void)
302 {
303 int cpu, i, n = verify_n_cpus;
304
305 if (n < 0 || n >= num_online_cpus()) {
306 /* Check all of the CPUs. */
307 cpumask_copy(&cpus_chosen, cpu_online_mask);
308 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
309 return;
310 }
311
312 /* If no checking desired, or no other CPU to check, leave. */
313 cpumask_clear(&cpus_chosen);
314 if (n == 0 || num_online_cpus() <= 1)
315 return;
316
317 /* Make sure to select at least one CPU other than the current CPU. */
318 cpu = cpumask_first(cpu_online_mask);
319 if (cpu == smp_processor_id())
320 cpu = cpumask_next(cpu, cpu_online_mask);
321 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
322 return;
323 cpumask_set_cpu(cpu, &cpus_chosen);
324
325 /* Force a sane value for the boot parameter. */
326 if (n > nr_cpu_ids)
327 n = nr_cpu_ids;
328
329 /*
330 * Randomly select the specified number of CPUs. If the same
331 * CPU is selected multiple times, that CPU is checked only once,
332 * and no replacement CPU is selected. This gracefully handles
333 * situations where verify_n_cpus is greater than the number of
334 * CPUs that are currently online.
335 */
336 for (i = 1; i < n; i++) {
337 cpu = get_random_u32_below(nr_cpu_ids);
338 cpu = cpumask_next(cpu - 1, cpu_online_mask);
339 if (cpu >= nr_cpu_ids)
340 cpu = cpumask_first(cpu_online_mask);
341 if (!WARN_ON_ONCE(cpu >= nr_cpu_ids))
342 cpumask_set_cpu(cpu, &cpus_chosen);
343 }
344
345 /* Don't verify ourselves. */
346 cpumask_clear_cpu(smp_processor_id(), &cpus_chosen);
347 }
348
clocksource_verify_one_cpu(void * csin)349 static void clocksource_verify_one_cpu(void *csin)
350 {
351 struct clocksource *cs = (struct clocksource *)csin;
352
353 csnow_mid = cs->read(cs);
354 }
355
clocksource_verify_percpu(struct clocksource * cs)356 void clocksource_verify_percpu(struct clocksource *cs)
357 {
358 int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
359 u64 csnow_begin, csnow_end;
360 int cpu, testcpu;
361 s64 delta;
362
363 if (verify_n_cpus == 0)
364 return;
365 cpumask_clear(&cpus_ahead);
366 cpumask_clear(&cpus_behind);
367 cpus_read_lock();
368 migrate_disable();
369 clocksource_verify_choose_cpus();
370 if (cpumask_empty(&cpus_chosen)) {
371 migrate_enable();
372 cpus_read_unlock();
373 pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
374 return;
375 }
376 testcpu = smp_processor_id();
377 pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
378 cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
379 preempt_disable();
380 for_each_cpu(cpu, &cpus_chosen) {
381 if (cpu == testcpu)
382 continue;
383 csnow_begin = cs->read(cs);
384 smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
385 csnow_end = cs->read(cs);
386 delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
387 if (delta < 0)
388 cpumask_set_cpu(cpu, &cpus_behind);
389 delta = (csnow_end - csnow_mid) & cs->mask;
390 if (delta < 0)
391 cpumask_set_cpu(cpu, &cpus_ahead);
392 cs_nsec = cycles_to_nsec_safe(cs, csnow_begin, csnow_end);
393 if (cs_nsec > cs_nsec_max)
394 cs_nsec_max = cs_nsec;
395 if (cs_nsec < cs_nsec_min)
396 cs_nsec_min = cs_nsec;
397 }
398 preempt_enable();
399 migrate_enable();
400 cpus_read_unlock();
401 if (!cpumask_empty(&cpus_ahead))
402 pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
403 cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
404 if (!cpumask_empty(&cpus_behind))
405 pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n",
406 cpumask_pr_args(&cpus_behind), testcpu, cs->name);
407 if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
408 pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
409 testcpu, cs_nsec_min, cs_nsec_max, cs->name);
410 }
411 EXPORT_SYMBOL_GPL(clocksource_verify_percpu);
412
clocksource_reset_watchdog(void)413 static inline void clocksource_reset_watchdog(void)
414 {
415 struct clocksource *cs;
416
417 list_for_each_entry(cs, &watchdog_list, wd_list)
418 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
419 }
420
421
clocksource_watchdog(struct timer_list * unused)422 static void clocksource_watchdog(struct timer_list *unused)
423 {
424 int64_t wd_nsec, cs_nsec, interval;
425 u64 csnow, wdnow, cslast, wdlast;
426 int next_cpu, reset_pending;
427 struct clocksource *cs;
428 enum wd_read_status read_ret;
429 unsigned long extra_wait = 0;
430 u32 md;
431
432 spin_lock(&watchdog_lock);
433 if (!watchdog_running)
434 goto out;
435
436 reset_pending = atomic_read(&watchdog_reset_pending);
437
438 list_for_each_entry(cs, &watchdog_list, wd_list) {
439
440 /* Clocksource already marked unstable? */
441 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
442 if (finished_booting)
443 schedule_work(&watchdog_work);
444 continue;
445 }
446
447 read_ret = cs_watchdog_read(cs, &csnow, &wdnow);
448
449 if (read_ret == WD_READ_UNSTABLE) {
450 /* Clock readout unreliable, so give it up. */
451 __clocksource_unstable(cs);
452 continue;
453 }
454
455 /*
456 * When WD_READ_SKIP is returned, it means the system is likely
457 * under very heavy load, where the latency of reading
458 * watchdog/clocksource is very big, and affect the accuracy of
459 * watchdog check. So give system some space and suspend the
460 * watchdog check for 5 minutes.
461 */
462 if (read_ret == WD_READ_SKIP) {
463 /*
464 * As the watchdog timer will be suspended, and
465 * cs->last could keep unchanged for 5 minutes, reset
466 * the counters.
467 */
468 clocksource_reset_watchdog();
469 extra_wait = HZ * 300;
470 break;
471 }
472
473 /* Clocksource initialized ? */
474 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
475 atomic_read(&watchdog_reset_pending)) {
476 cs->flags |= CLOCK_SOURCE_WATCHDOG;
477 cs->wd_last = wdnow;
478 cs->cs_last = csnow;
479 continue;
480 }
481
482 wd_nsec = cycles_to_nsec_safe(watchdog, cs->wd_last, wdnow);
483 cs_nsec = cycles_to_nsec_safe(cs, cs->cs_last, csnow);
484 wdlast = cs->wd_last; /* save these in case we print them */
485 cslast = cs->cs_last;
486 cs->cs_last = csnow;
487 cs->wd_last = wdnow;
488
489 if (atomic_read(&watchdog_reset_pending))
490 continue;
491
492 /*
493 * The processing of timer softirqs can get delayed (usually
494 * on account of ksoftirqd not getting to run in a timely
495 * manner), which causes the watchdog interval to stretch.
496 * Skew detection may fail for longer watchdog intervals
497 * on account of fixed margins being used.
498 * Some clocksources, e.g. acpi_pm, cannot tolerate
499 * watchdog intervals longer than a few seconds.
500 */
501 interval = max(cs_nsec, wd_nsec);
502 if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
503 if (system_state > SYSTEM_SCHEDULING &&
504 interval > 2 * watchdog_max_interval) {
505 watchdog_max_interval = interval;
506 pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
507 cs_nsec, wd_nsec);
508 }
509 watchdog_timer.expires = jiffies;
510 continue;
511 }
512
513 /* Check the deviation from the watchdog clocksource. */
514 md = cs->uncertainty_margin + watchdog->uncertainty_margin;
515 if (abs(cs_nsec - wd_nsec) > md) {
516 s64 cs_wd_msec;
517 s64 wd_msec;
518 u32 wd_rem;
519
520 pr_warn("timekeeping watchdog on CPU%d: Marking clocksource '%s' as unstable because the skew is too large:\n",
521 smp_processor_id(), cs->name);
522 pr_warn(" '%s' wd_nsec: %lld wd_now: %llx wd_last: %llx mask: %llx\n",
523 watchdog->name, wd_nsec, wdnow, wdlast, watchdog->mask);
524 pr_warn(" '%s' cs_nsec: %lld cs_now: %llx cs_last: %llx mask: %llx\n",
525 cs->name, cs_nsec, csnow, cslast, cs->mask);
526 cs_wd_msec = div_s64_rem(cs_nsec - wd_nsec, 1000 * 1000, &wd_rem);
527 wd_msec = div_s64_rem(wd_nsec, 1000 * 1000, &wd_rem);
528 pr_warn(" Clocksource '%s' skewed %lld ns (%lld ms) over watchdog '%s' interval of %lld ns (%lld ms)\n",
529 cs->name, cs_nsec - wd_nsec, cs_wd_msec, watchdog->name, wd_nsec, wd_msec);
530 if (curr_clocksource == cs)
531 pr_warn(" '%s' is current clocksource.\n", cs->name);
532 else if (curr_clocksource)
533 pr_warn(" '%s' (not '%s') is current clocksource.\n", curr_clocksource->name, cs->name);
534 else
535 pr_warn(" No current clocksource.\n");
536 __clocksource_unstable(cs);
537 continue;
538 }
539
540 if (cs == curr_clocksource && cs->tick_stable)
541 cs->tick_stable(cs);
542
543 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
544 (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
545 (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
546 /* Mark it valid for high-res. */
547 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
548
549 /*
550 * clocksource_done_booting() will sort it if
551 * finished_booting is not set yet.
552 */
553 if (!finished_booting)
554 continue;
555
556 /*
557 * If this is not the current clocksource let
558 * the watchdog thread reselect it. Due to the
559 * change to high res this clocksource might
560 * be preferred now. If it is the current
561 * clocksource let the tick code know about
562 * that change.
563 */
564 if (cs != curr_clocksource) {
565 cs->flags |= CLOCK_SOURCE_RESELECT;
566 schedule_work(&watchdog_work);
567 } else {
568 tick_clock_notify();
569 }
570 }
571 }
572
573 /*
574 * We only clear the watchdog_reset_pending, when we did a
575 * full cycle through all clocksources.
576 */
577 if (reset_pending)
578 atomic_dec(&watchdog_reset_pending);
579
580 /*
581 * Cycle through CPUs to check if the CPUs stay synchronized
582 * to each other.
583 */
584 next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
585 if (next_cpu >= nr_cpu_ids)
586 next_cpu = cpumask_first(cpu_online_mask);
587
588 /*
589 * Arm timer if not already pending: could race with concurrent
590 * pair clocksource_stop_watchdog() clocksource_start_watchdog().
591 */
592 if (!timer_pending(&watchdog_timer)) {
593 watchdog_timer.expires += WATCHDOG_INTERVAL + extra_wait;
594 add_timer_on(&watchdog_timer, next_cpu);
595 }
596 out:
597 spin_unlock(&watchdog_lock);
598 }
599
clocksource_start_watchdog(void)600 static inline void clocksource_start_watchdog(void)
601 {
602 if (watchdog_running || !watchdog || list_empty(&watchdog_list))
603 return;
604 timer_setup(&watchdog_timer, clocksource_watchdog, 0);
605 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
606 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
607 watchdog_running = 1;
608 }
609
clocksource_stop_watchdog(void)610 static inline void clocksource_stop_watchdog(void)
611 {
612 if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
613 return;
614 del_timer(&watchdog_timer);
615 watchdog_running = 0;
616 }
617
clocksource_resume_watchdog(void)618 static void clocksource_resume_watchdog(void)
619 {
620 atomic_inc(&watchdog_reset_pending);
621 }
622
clocksource_enqueue_watchdog(struct clocksource * cs)623 static void clocksource_enqueue_watchdog(struct clocksource *cs)
624 {
625 INIT_LIST_HEAD(&cs->wd_list);
626
627 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
628 /* cs is a clocksource to be watched. */
629 list_add(&cs->wd_list, &watchdog_list);
630 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
631 } else {
632 /* cs is a watchdog. */
633 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
634 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
635 }
636 }
637
clocksource_select_watchdog(bool fallback)638 static void clocksource_select_watchdog(bool fallback)
639 {
640 struct clocksource *cs, *old_wd;
641 unsigned long flags;
642
643 spin_lock_irqsave(&watchdog_lock, flags);
644 /* save current watchdog */
645 old_wd = watchdog;
646 if (fallback)
647 watchdog = NULL;
648
649 list_for_each_entry(cs, &clocksource_list, list) {
650 /* cs is a clocksource to be watched. */
651 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
652 continue;
653
654 /* Skip current if we were requested for a fallback. */
655 if (fallback && cs == old_wd)
656 continue;
657
658 /* Pick the best watchdog. */
659 if (!watchdog || cs->rating > watchdog->rating)
660 watchdog = cs;
661 }
662 /* If we failed to find a fallback restore the old one. */
663 if (!watchdog)
664 watchdog = old_wd;
665
666 /* If we changed the watchdog we need to reset cycles. */
667 if (watchdog != old_wd)
668 clocksource_reset_watchdog();
669
670 /* Check if the watchdog timer needs to be started. */
671 clocksource_start_watchdog();
672 spin_unlock_irqrestore(&watchdog_lock, flags);
673 }
674
clocksource_dequeue_watchdog(struct clocksource * cs)675 static void clocksource_dequeue_watchdog(struct clocksource *cs)
676 {
677 if (cs != watchdog) {
678 if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
679 /* cs is a watched clocksource. */
680 list_del_init(&cs->wd_list);
681 /* Check if the watchdog timer needs to be stopped. */
682 clocksource_stop_watchdog();
683 }
684 }
685 }
686
__clocksource_watchdog_kthread(void)687 static int __clocksource_watchdog_kthread(void)
688 {
689 struct clocksource *cs, *tmp;
690 unsigned long flags;
691 int select = 0;
692
693 /* Do any required per-CPU skew verification. */
694 if (curr_clocksource &&
695 curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
696 curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
697 clocksource_verify_percpu(curr_clocksource);
698
699 spin_lock_irqsave(&watchdog_lock, flags);
700 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
701 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
702 list_del_init(&cs->wd_list);
703 __clocksource_change_rating(cs, 0);
704 select = 1;
705 }
706 if (cs->flags & CLOCK_SOURCE_RESELECT) {
707 cs->flags &= ~CLOCK_SOURCE_RESELECT;
708 select = 1;
709 }
710 }
711 /* Check if the watchdog timer needs to be stopped. */
712 clocksource_stop_watchdog();
713 spin_unlock_irqrestore(&watchdog_lock, flags);
714
715 return select;
716 }
717
clocksource_watchdog_kthread(void * data)718 static int clocksource_watchdog_kthread(void *data)
719 {
720 mutex_lock(&clocksource_mutex);
721 if (__clocksource_watchdog_kthread())
722 clocksource_select();
723 mutex_unlock(&clocksource_mutex);
724 return 0;
725 }
726
clocksource_is_watchdog(struct clocksource * cs)727 static bool clocksource_is_watchdog(struct clocksource *cs)
728 {
729 return cs == watchdog;
730 }
731
732 #else /* CONFIG_CLOCKSOURCE_WATCHDOG */
733
clocksource_enqueue_watchdog(struct clocksource * cs)734 static void clocksource_enqueue_watchdog(struct clocksource *cs)
735 {
736 if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
737 cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
738 }
739
clocksource_select_watchdog(bool fallback)740 static void clocksource_select_watchdog(bool fallback) { }
clocksource_dequeue_watchdog(struct clocksource * cs)741 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
clocksource_resume_watchdog(void)742 static inline void clocksource_resume_watchdog(void) { }
__clocksource_watchdog_kthread(void)743 static inline int __clocksource_watchdog_kthread(void) { return 0; }
clocksource_is_watchdog(struct clocksource * cs)744 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
clocksource_mark_unstable(struct clocksource * cs)745 void clocksource_mark_unstable(struct clocksource *cs) { }
746
clocksource_watchdog_lock(unsigned long * flags)747 static inline void clocksource_watchdog_lock(unsigned long *flags) { }
clocksource_watchdog_unlock(unsigned long * flags)748 static inline void clocksource_watchdog_unlock(unsigned long *flags) { }
749
750 #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
751
clocksource_is_suspend(struct clocksource * cs)752 static bool clocksource_is_suspend(struct clocksource *cs)
753 {
754 return cs == suspend_clocksource;
755 }
756
__clocksource_suspend_select(struct clocksource * cs)757 static void __clocksource_suspend_select(struct clocksource *cs)
758 {
759 /*
760 * Skip the clocksource which will be stopped in suspend state.
761 */
762 if (!(cs->flags & CLOCK_SOURCE_SUSPEND_NONSTOP))
763 return;
764
765 /*
766 * The nonstop clocksource can be selected as the suspend clocksource to
767 * calculate the suspend time, so it should not supply suspend/resume
768 * interfaces to suspend the nonstop clocksource when system suspends.
769 */
770 if (cs->suspend || cs->resume) {
771 pr_warn("Nonstop clocksource %s should not supply suspend/resume interfaces\n",
772 cs->name);
773 }
774
775 /* Pick the best rating. */
776 if (!suspend_clocksource || cs->rating > suspend_clocksource->rating)
777 suspend_clocksource = cs;
778 }
779
780 /**
781 * clocksource_suspend_select - Select the best clocksource for suspend timing
782 * @fallback: if select a fallback clocksource
783 */
clocksource_suspend_select(bool fallback)784 static void clocksource_suspend_select(bool fallback)
785 {
786 struct clocksource *cs, *old_suspend;
787
788 old_suspend = suspend_clocksource;
789 if (fallback)
790 suspend_clocksource = NULL;
791
792 list_for_each_entry(cs, &clocksource_list, list) {
793 /* Skip current if we were requested for a fallback. */
794 if (fallback && cs == old_suspend)
795 continue;
796
797 __clocksource_suspend_select(cs);
798 }
799 }
800
801 /**
802 * clocksource_start_suspend_timing - Start measuring the suspend timing
803 * @cs: current clocksource from timekeeping
804 * @start_cycles: current cycles from timekeeping
805 *
806 * This function will save the start cycle values of suspend timer to calculate
807 * the suspend time when resuming system.
808 *
809 * This function is called late in the suspend process from timekeeping_suspend(),
810 * that means processes are frozen, non-boot cpus and interrupts are disabled
811 * now. It is therefore possible to start the suspend timer without taking the
812 * clocksource mutex.
813 */
clocksource_start_suspend_timing(struct clocksource * cs,u64 start_cycles)814 void clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles)
815 {
816 if (!suspend_clocksource)
817 return;
818
819 /*
820 * If current clocksource is the suspend timer, we should use the
821 * tkr_mono.cycle_last value as suspend_start to avoid same reading
822 * from suspend timer.
823 */
824 if (clocksource_is_suspend(cs)) {
825 suspend_start = start_cycles;
826 return;
827 }
828
829 if (suspend_clocksource->enable &&
830 suspend_clocksource->enable(suspend_clocksource)) {
831 pr_warn_once("Failed to enable the non-suspend-able clocksource.\n");
832 return;
833 }
834
835 suspend_start = suspend_clocksource->read(suspend_clocksource);
836 }
837
838 /**
839 * clocksource_stop_suspend_timing - Stop measuring the suspend timing
840 * @cs: current clocksource from timekeeping
841 * @cycle_now: current cycles from timekeeping
842 *
843 * This function will calculate the suspend time from suspend timer.
844 *
845 * Returns nanoseconds since suspend started, 0 if no usable suspend clocksource.
846 *
847 * This function is called early in the resume process from timekeeping_resume(),
848 * that means there is only one cpu, no processes are running and the interrupts
849 * are disabled. It is therefore possible to stop the suspend timer without
850 * taking the clocksource mutex.
851 */
clocksource_stop_suspend_timing(struct clocksource * cs,u64 cycle_now)852 u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 cycle_now)
853 {
854 u64 now, nsec = 0;
855
856 if (!suspend_clocksource)
857 return 0;
858
859 /*
860 * If current clocksource is the suspend timer, we should use the
861 * tkr_mono.cycle_last value from timekeeping as current cycle to
862 * avoid same reading from suspend timer.
863 */
864 if (clocksource_is_suspend(cs))
865 now = cycle_now;
866 else
867 now = suspend_clocksource->read(suspend_clocksource);
868
869 if (now > suspend_start)
870 nsec = cycles_to_nsec_safe(suspend_clocksource, suspend_start, now);
871
872 /*
873 * Disable the suspend timer to save power if current clocksource is
874 * not the suspend timer.
875 */
876 if (!clocksource_is_suspend(cs) && suspend_clocksource->disable)
877 suspend_clocksource->disable(suspend_clocksource);
878
879 return nsec;
880 }
881
882 /**
883 * clocksource_suspend - suspend the clocksource(s)
884 */
clocksource_suspend(void)885 void clocksource_suspend(void)
886 {
887 struct clocksource *cs;
888
889 list_for_each_entry_reverse(cs, &clocksource_list, list)
890 if (cs->suspend)
891 cs->suspend(cs);
892 }
893
894 /**
895 * clocksource_resume - resume the clocksource(s)
896 */
clocksource_resume(void)897 void clocksource_resume(void)
898 {
899 struct clocksource *cs;
900
901 list_for_each_entry(cs, &clocksource_list, list)
902 if (cs->resume)
903 cs->resume(cs);
904
905 clocksource_resume_watchdog();
906 }
907
908 /**
909 * clocksource_touch_watchdog - Update watchdog
910 *
911 * Update the watchdog after exception contexts such as kgdb so as not
912 * to incorrectly trip the watchdog. This might fail when the kernel
913 * was stopped in code which holds watchdog_lock.
914 */
clocksource_touch_watchdog(void)915 void clocksource_touch_watchdog(void)
916 {
917 clocksource_resume_watchdog();
918 }
919
920 /**
921 * clocksource_max_adjustment- Returns max adjustment amount
922 * @cs: Pointer to clocksource
923 *
924 */
clocksource_max_adjustment(struct clocksource * cs)925 static u32 clocksource_max_adjustment(struct clocksource *cs)
926 {
927 u64 ret;
928 /*
929 * We won't try to correct for more than 11% adjustments (110,000 ppm),
930 */
931 ret = (u64)cs->mult * 11;
932 do_div(ret,100);
933 return (u32)ret;
934 }
935
936 /**
937 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
938 * @mult: cycle to nanosecond multiplier
939 * @shift: cycle to nanosecond divisor (power of two)
940 * @maxadj: maximum adjustment value to mult (~11%)
941 * @mask: bitmask for two's complement subtraction of non 64 bit counters
942 * @max_cyc: maximum cycle value before potential overflow (does not include
943 * any safety margin)
944 *
945 * NOTE: This function includes a safety margin of 50%, in other words, we
946 * return half the number of nanoseconds the hardware counter can technically
947 * cover. This is done so that we can potentially detect problems caused by
948 * delayed timers or bad hardware, which might result in time intervals that
949 * are larger than what the math used can handle without overflows.
950 */
clocks_calc_max_nsecs(u32 mult,u32 shift,u32 maxadj,u64 mask,u64 * max_cyc)951 u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
952 {
953 u64 max_nsecs, max_cycles;
954
955 /*
956 * Calculate the maximum number of cycles that we can pass to the
957 * cyc2ns() function without overflowing a 64-bit result.
958 */
959 max_cycles = ULLONG_MAX;
960 do_div(max_cycles, mult+maxadj);
961
962 /*
963 * The actual maximum number of cycles we can defer the clocksource is
964 * determined by the minimum of max_cycles and mask.
965 * Note: Here we subtract the maxadj to make sure we don't sleep for
966 * too long if there's a large negative adjustment.
967 */
968 max_cycles = min(max_cycles, mask);
969 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
970
971 /* return the max_cycles value as well if requested */
972 if (max_cyc)
973 *max_cyc = max_cycles;
974
975 /* Return 50% of the actual maximum, so we can detect bad values */
976 max_nsecs >>= 1;
977
978 return max_nsecs;
979 }
980
981 /**
982 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
983 * @cs: Pointer to clocksource to be updated
984 *
985 */
clocksource_update_max_deferment(struct clocksource * cs)986 static inline void clocksource_update_max_deferment(struct clocksource *cs)
987 {
988 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
989 cs->maxadj, cs->mask,
990 &cs->max_cycles);
991
992 /*
993 * Threshold for detecting negative motion in clocksource_delta().
994 *
995 * Allow for 0.875 of the counter width so that overly long idle
996 * sleeps, which go slightly over mask/2, do not trigger the
997 * negative motion detection.
998 */
999 cs->max_raw_delta = (cs->mask >> 1) + (cs->mask >> 2) + (cs->mask >> 3);
1000 }
1001
clocksource_find_best(bool oneshot,bool skipcur)1002 static struct clocksource *clocksource_find_best(bool oneshot, bool skipcur)
1003 {
1004 struct clocksource *cs;
1005
1006 if (!finished_booting || list_empty(&clocksource_list))
1007 return NULL;
1008
1009 /*
1010 * We pick the clocksource with the highest rating. If oneshot
1011 * mode is active, we pick the highres valid clocksource with
1012 * the best rating.
1013 */
1014 list_for_each_entry(cs, &clocksource_list, list) {
1015 if (skipcur && cs == curr_clocksource)
1016 continue;
1017 if (oneshot && !(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1018 continue;
1019 return cs;
1020 }
1021 return NULL;
1022 }
1023
__clocksource_select(bool skipcur)1024 static void __clocksource_select(bool skipcur)
1025 {
1026 bool oneshot = tick_oneshot_mode_active();
1027 struct clocksource *best, *cs;
1028
1029 /* Find the best suitable clocksource */
1030 best = clocksource_find_best(oneshot, skipcur);
1031 if (!best)
1032 return;
1033
1034 if (!strlen(override_name))
1035 goto found;
1036
1037 /* Check for the override clocksource. */
1038 list_for_each_entry(cs, &clocksource_list, list) {
1039 if (skipcur && cs == curr_clocksource)
1040 continue;
1041 if (strcmp(cs->name, override_name) != 0)
1042 continue;
1043 /*
1044 * Check to make sure we don't switch to a non-highres
1045 * capable clocksource if the tick code is in oneshot
1046 * mode (highres or nohz)
1047 */
1048 if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
1049 /* Override clocksource cannot be used. */
1050 if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
1051 pr_warn("Override clocksource %s is unstable and not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
1052 cs->name);
1053 override_name[0] = 0;
1054 } else {
1055 /*
1056 * The override cannot be currently verified.
1057 * Deferring to let the watchdog check.
1058 */
1059 pr_info("Override clocksource %s is not currently HRT compatible - deferring\n",
1060 cs->name);
1061 }
1062 } else
1063 /* Override clocksource can be used. */
1064 best = cs;
1065 break;
1066 }
1067
1068 found:
1069 if (curr_clocksource != best && !timekeeping_notify(best)) {
1070 pr_info("Switched to clocksource %s\n", best->name);
1071 curr_clocksource = best;
1072 }
1073 }
1074
1075 /**
1076 * clocksource_select - Select the best clocksource available
1077 *
1078 * Private function. Must hold clocksource_mutex when called.
1079 *
1080 * Select the clocksource with the best rating, or the clocksource,
1081 * which is selected by userspace override.
1082 */
clocksource_select(void)1083 static void clocksource_select(void)
1084 {
1085 __clocksource_select(false);
1086 }
1087
clocksource_select_fallback(void)1088 static void clocksource_select_fallback(void)
1089 {
1090 __clocksource_select(true);
1091 }
1092
1093 /*
1094 * clocksource_done_booting - Called near the end of core bootup
1095 *
1096 * Hack to avoid lots of clocksource churn at boot time.
1097 * We use fs_initcall because we want this to start before
1098 * device_initcall but after subsys_initcall.
1099 */
clocksource_done_booting(void)1100 static int __init clocksource_done_booting(void)
1101 {
1102 mutex_lock(&clocksource_mutex);
1103 curr_clocksource = clocksource_default_clock();
1104 finished_booting = 1;
1105 /*
1106 * Run the watchdog first to eliminate unstable clock sources
1107 */
1108 __clocksource_watchdog_kthread();
1109 clocksource_select();
1110 mutex_unlock(&clocksource_mutex);
1111 return 0;
1112 }
1113 fs_initcall(clocksource_done_booting);
1114
1115 /*
1116 * Enqueue the clocksource sorted by rating
1117 */
clocksource_enqueue(struct clocksource * cs)1118 static void clocksource_enqueue(struct clocksource *cs)
1119 {
1120 struct list_head *entry = &clocksource_list;
1121 struct clocksource *tmp;
1122
1123 list_for_each_entry(tmp, &clocksource_list, list) {
1124 /* Keep track of the place, where to insert */
1125 if (tmp->rating < cs->rating)
1126 break;
1127 entry = &tmp->list;
1128 }
1129 list_add(&cs->list, entry);
1130 }
1131
1132 /**
1133 * __clocksource_update_freq_scale - Used update clocksource with new freq
1134 * @cs: clocksource to be registered
1135 * @scale: Scale factor multiplied against freq to get clocksource hz
1136 * @freq: clocksource frequency (cycles per second) divided by scale
1137 *
1138 * This should only be called from the clocksource->enable() method.
1139 *
1140 * This *SHOULD NOT* be called directly! Please use the
1141 * __clocksource_update_freq_hz() or __clocksource_update_freq_khz() helper
1142 * functions.
1143 */
__clocksource_update_freq_scale(struct clocksource * cs,u32 scale,u32 freq)1144 void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq)
1145 {
1146 u64 sec;
1147
1148 /*
1149 * Default clocksources are *special* and self-define their mult/shift.
1150 * But, you're not special, so you should specify a freq value.
1151 */
1152 if (freq) {
1153 /*
1154 * Calc the maximum number of seconds which we can run before
1155 * wrapping around. For clocksources which have a mask > 32-bit
1156 * we need to limit the max sleep time to have a good
1157 * conversion precision. 10 minutes is still a reasonable
1158 * amount. That results in a shift value of 24 for a
1159 * clocksource with mask >= 40-bit and f >= 4GHz. That maps to
1160 * ~ 0.06ppm granularity for NTP.
1161 */
1162 sec = cs->mask;
1163 do_div(sec, freq);
1164 do_div(sec, scale);
1165 if (!sec)
1166 sec = 1;
1167 else if (sec > 600 && cs->mask > UINT_MAX)
1168 sec = 600;
1169
1170 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
1171 NSEC_PER_SEC / scale, sec * scale);
1172 }
1173
1174 /*
1175 * If the uncertainty margin is not specified, calculate it. If
1176 * both scale and freq are non-zero, calculate the clock period, but
1177 * bound below at 2*WATCHDOG_MAX_SKEW, that is, 500ppm by default.
1178 * However, if either of scale or freq is zero, be very conservative
1179 * and take the tens-of-milliseconds WATCHDOG_THRESHOLD value
1180 * for the uncertainty margin. Allow stupidly small uncertainty
1181 * margins to be specified by the caller for testing purposes,
1182 * but warn to discourage production use of this capability.
1183 *
1184 * Bottom line: The sum of the uncertainty margins of the
1185 * watchdog clocksource and the clocksource under test will be at
1186 * least 500ppm by default. For more information, please see the
1187 * comment preceding CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US above.
1188 */
1189 if (scale && freq && !cs->uncertainty_margin) {
1190 cs->uncertainty_margin = NSEC_PER_SEC / (scale * freq);
1191 if (cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW)
1192 cs->uncertainty_margin = 2 * WATCHDOG_MAX_SKEW;
1193 } else if (!cs->uncertainty_margin) {
1194 cs->uncertainty_margin = WATCHDOG_THRESHOLD;
1195 }
1196 WARN_ON_ONCE(cs->uncertainty_margin < 2 * WATCHDOG_MAX_SKEW);
1197
1198 /*
1199 * Ensure clocksources that have large 'mult' values don't overflow
1200 * when adjusted.
1201 */
1202 cs->maxadj = clocksource_max_adjustment(cs);
1203 while (freq && ((cs->mult + cs->maxadj < cs->mult)
1204 || (cs->mult - cs->maxadj > cs->mult))) {
1205 cs->mult >>= 1;
1206 cs->shift--;
1207 cs->maxadj = clocksource_max_adjustment(cs);
1208 }
1209
1210 /*
1211 * Only warn for *special* clocksources that self-define
1212 * their mult/shift values and don't specify a freq.
1213 */
1214 WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
1215 "timekeeping: Clocksource %s might overflow on 11%% adjustment\n",
1216 cs->name);
1217
1218 clocksource_update_max_deferment(cs);
1219
1220 pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
1221 cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
1222 }
1223 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
1224
1225 /**
1226 * __clocksource_register_scale - Used to install new clocksources
1227 * @cs: clocksource to be registered
1228 * @scale: Scale factor multiplied against freq to get clocksource hz
1229 * @freq: clocksource frequency (cycles per second) divided by scale
1230 *
1231 * Returns -EBUSY if registration fails, zero otherwise.
1232 *
1233 * This *SHOULD NOT* be called directly! Please use the
1234 * clocksource_register_hz() or clocksource_register_khz helper functions.
1235 */
__clocksource_register_scale(struct clocksource * cs,u32 scale,u32 freq)1236 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
1237 {
1238 unsigned long flags;
1239
1240 clocksource_arch_init(cs);
1241
1242 if (WARN_ON_ONCE((unsigned int)cs->id >= CSID_MAX))
1243 cs->id = CSID_GENERIC;
1244 if (cs->vdso_clock_mode < 0 ||
1245 cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1246 pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
1247 cs->name, cs->vdso_clock_mode);
1248 cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1249 }
1250
1251 /* Initialize mult/shift and max_idle_ns */
1252 __clocksource_update_freq_scale(cs, scale, freq);
1253
1254 /* Add clocksource to the clocksource list */
1255 mutex_lock(&clocksource_mutex);
1256
1257 clocksource_watchdog_lock(&flags);
1258 clocksource_enqueue(cs);
1259 clocksource_enqueue_watchdog(cs);
1260 clocksource_watchdog_unlock(&flags);
1261
1262 clocksource_select();
1263 clocksource_select_watchdog(false);
1264 __clocksource_suspend_select(cs);
1265 mutex_unlock(&clocksource_mutex);
1266 return 0;
1267 }
1268 EXPORT_SYMBOL_GPL(__clocksource_register_scale);
1269
__clocksource_change_rating(struct clocksource * cs,int rating)1270 static void __clocksource_change_rating(struct clocksource *cs, int rating)
1271 {
1272 list_del(&cs->list);
1273 cs->rating = rating;
1274 clocksource_enqueue(cs);
1275 }
1276
1277 /**
1278 * clocksource_change_rating - Change the rating of a registered clocksource
1279 * @cs: clocksource to be changed
1280 * @rating: new rating
1281 */
clocksource_change_rating(struct clocksource * cs,int rating)1282 void clocksource_change_rating(struct clocksource *cs, int rating)
1283 {
1284 unsigned long flags;
1285
1286 mutex_lock(&clocksource_mutex);
1287 clocksource_watchdog_lock(&flags);
1288 __clocksource_change_rating(cs, rating);
1289 clocksource_watchdog_unlock(&flags);
1290
1291 clocksource_select();
1292 clocksource_select_watchdog(false);
1293 clocksource_suspend_select(false);
1294 mutex_unlock(&clocksource_mutex);
1295 }
1296 EXPORT_SYMBOL(clocksource_change_rating);
1297
1298 /*
1299 * Unbind clocksource @cs. Called with clocksource_mutex held
1300 */
clocksource_unbind(struct clocksource * cs)1301 static int clocksource_unbind(struct clocksource *cs)
1302 {
1303 unsigned long flags;
1304
1305 if (clocksource_is_watchdog(cs)) {
1306 /* Select and try to install a replacement watchdog. */
1307 clocksource_select_watchdog(true);
1308 if (clocksource_is_watchdog(cs))
1309 return -EBUSY;
1310 }
1311
1312 if (cs == curr_clocksource) {
1313 /* Select and try to install a replacement clock source */
1314 clocksource_select_fallback();
1315 if (curr_clocksource == cs)
1316 return -EBUSY;
1317 }
1318
1319 if (clocksource_is_suspend(cs)) {
1320 /*
1321 * Select and try to install a replacement suspend clocksource.
1322 * If no replacement suspend clocksource, we will just let the
1323 * clocksource go and have no suspend clocksource.
1324 */
1325 clocksource_suspend_select(true);
1326 }
1327
1328 clocksource_watchdog_lock(&flags);
1329 clocksource_dequeue_watchdog(cs);
1330 list_del_init(&cs->list);
1331 clocksource_watchdog_unlock(&flags);
1332
1333 return 0;
1334 }
1335
1336 /**
1337 * clocksource_unregister - remove a registered clocksource
1338 * @cs: clocksource to be unregistered
1339 */
clocksource_unregister(struct clocksource * cs)1340 int clocksource_unregister(struct clocksource *cs)
1341 {
1342 int ret = 0;
1343
1344 mutex_lock(&clocksource_mutex);
1345 if (!list_empty(&cs->list))
1346 ret = clocksource_unbind(cs);
1347 mutex_unlock(&clocksource_mutex);
1348 return ret;
1349 }
1350 EXPORT_SYMBOL(clocksource_unregister);
1351
1352 #ifdef CONFIG_SYSFS
1353 /**
1354 * current_clocksource_show - sysfs interface for current clocksource
1355 * @dev: unused
1356 * @attr: unused
1357 * @buf: char buffer to be filled with clocksource list
1358 *
1359 * Provides sysfs interface for listing current clocksource.
1360 */
current_clocksource_show(struct device * dev,struct device_attribute * attr,char * buf)1361 static ssize_t current_clocksource_show(struct device *dev,
1362 struct device_attribute *attr,
1363 char *buf)
1364 {
1365 ssize_t count = 0;
1366
1367 mutex_lock(&clocksource_mutex);
1368 count = sysfs_emit(buf, "%s\n", curr_clocksource->name);
1369 mutex_unlock(&clocksource_mutex);
1370
1371 return count;
1372 }
1373
sysfs_get_uname(const char * buf,char * dst,size_t cnt)1374 ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
1375 {
1376 size_t ret = cnt;
1377
1378 /* strings from sysfs write are not 0 terminated! */
1379 if (!cnt || cnt >= CS_NAME_LEN)
1380 return -EINVAL;
1381
1382 /* strip of \n: */
1383 if (buf[cnt-1] == '\n')
1384 cnt--;
1385 if (cnt > 0)
1386 memcpy(dst, buf, cnt);
1387 dst[cnt] = 0;
1388 return ret;
1389 }
1390
1391 /**
1392 * current_clocksource_store - interface for manually overriding clocksource
1393 * @dev: unused
1394 * @attr: unused
1395 * @buf: name of override clocksource
1396 * @count: length of buffer
1397 *
1398 * Takes input from sysfs interface for manually overriding the default
1399 * clocksource selection.
1400 */
current_clocksource_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1401 static ssize_t current_clocksource_store(struct device *dev,
1402 struct device_attribute *attr,
1403 const char *buf, size_t count)
1404 {
1405 ssize_t ret;
1406
1407 mutex_lock(&clocksource_mutex);
1408
1409 ret = sysfs_get_uname(buf, override_name, count);
1410 if (ret >= 0)
1411 clocksource_select();
1412
1413 mutex_unlock(&clocksource_mutex);
1414
1415 return ret;
1416 }
1417 static DEVICE_ATTR_RW(current_clocksource);
1418
1419 /**
1420 * unbind_clocksource_store - interface for manually unbinding clocksource
1421 * @dev: unused
1422 * @attr: unused
1423 * @buf: unused
1424 * @count: length of buffer
1425 *
1426 * Takes input from sysfs interface for manually unbinding a clocksource.
1427 */
unbind_clocksource_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1428 static ssize_t unbind_clocksource_store(struct device *dev,
1429 struct device_attribute *attr,
1430 const char *buf, size_t count)
1431 {
1432 struct clocksource *cs;
1433 char name[CS_NAME_LEN];
1434 ssize_t ret;
1435
1436 ret = sysfs_get_uname(buf, name, count);
1437 if (ret < 0)
1438 return ret;
1439
1440 ret = -ENODEV;
1441 mutex_lock(&clocksource_mutex);
1442 list_for_each_entry(cs, &clocksource_list, list) {
1443 if (strcmp(cs->name, name))
1444 continue;
1445 ret = clocksource_unbind(cs);
1446 break;
1447 }
1448 mutex_unlock(&clocksource_mutex);
1449
1450 return ret ? ret : count;
1451 }
1452 static DEVICE_ATTR_WO(unbind_clocksource);
1453
1454 /**
1455 * available_clocksource_show - sysfs interface for listing clocksource
1456 * @dev: unused
1457 * @attr: unused
1458 * @buf: char buffer to be filled with clocksource list
1459 *
1460 * Provides sysfs interface for listing registered clocksources
1461 */
available_clocksource_show(struct device * dev,struct device_attribute * attr,char * buf)1462 static ssize_t available_clocksource_show(struct device *dev,
1463 struct device_attribute *attr,
1464 char *buf)
1465 {
1466 struct clocksource *src;
1467 ssize_t count = 0;
1468
1469 mutex_lock(&clocksource_mutex);
1470 list_for_each_entry(src, &clocksource_list, list) {
1471 /*
1472 * Don't show non-HRES clocksource if the tick code is
1473 * in one shot mode (highres=on or nohz=on)
1474 */
1475 if (!tick_oneshot_mode_active() ||
1476 (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
1477 count += snprintf(buf + count,
1478 max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
1479 "%s ", src->name);
1480 }
1481 mutex_unlock(&clocksource_mutex);
1482
1483 count += snprintf(buf + count,
1484 max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
1485
1486 return count;
1487 }
1488 static DEVICE_ATTR_RO(available_clocksource);
1489
1490 static struct attribute *clocksource_attrs[] = {
1491 &dev_attr_current_clocksource.attr,
1492 &dev_attr_unbind_clocksource.attr,
1493 &dev_attr_available_clocksource.attr,
1494 NULL
1495 };
1496 ATTRIBUTE_GROUPS(clocksource);
1497
1498 static const struct bus_type clocksource_subsys = {
1499 .name = "clocksource",
1500 .dev_name = "clocksource",
1501 };
1502
1503 static struct device device_clocksource = {
1504 .id = 0,
1505 .bus = &clocksource_subsys,
1506 .groups = clocksource_groups,
1507 };
1508
init_clocksource_sysfs(void)1509 static int __init init_clocksource_sysfs(void)
1510 {
1511 int error = subsys_system_register(&clocksource_subsys, NULL);
1512
1513 if (!error)
1514 error = device_register(&device_clocksource);
1515
1516 return error;
1517 }
1518
1519 device_initcall(init_clocksource_sysfs);
1520 #endif /* CONFIG_SYSFS */
1521
1522 /**
1523 * boot_override_clocksource - boot clock override
1524 * @str: override name
1525 *
1526 * Takes a clocksource= boot argument and uses it
1527 * as the clocksource override name.
1528 */
boot_override_clocksource(char * str)1529 static int __init boot_override_clocksource(char* str)
1530 {
1531 mutex_lock(&clocksource_mutex);
1532 if (str)
1533 strscpy(override_name, str, sizeof(override_name));
1534 mutex_unlock(&clocksource_mutex);
1535 return 1;
1536 }
1537
1538 __setup("clocksource=", boot_override_clocksource);
1539
1540 /**
1541 * boot_override_clock - Compatibility layer for deprecated boot option
1542 * @str: override name
1543 *
1544 * DEPRECATED! Takes a clock= boot argument and uses it
1545 * as the clocksource override name
1546 */
boot_override_clock(char * str)1547 static int __init boot_override_clock(char* str)
1548 {
1549 if (!strcmp(str, "pmtmr")) {
1550 pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
1551 return boot_override_clocksource("acpi_pm");
1552 }
1553 pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
1554 return boot_override_clocksource(str);
1555 }
1556
1557 __setup("clock=", boot_override_clock);
1558