1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/kernel.h>
4 #include <linux/sched.h>
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/timer.h>
8 #include <linux/acpi_pmtmr.h>
9 #include <linux/cpufreq.h>
10 #include <linux/delay.h>
11 #include <linux/clocksource.h>
12 #include <linux/percpu.h>
13 #include <linux/timex.h>
14 #include <linux/static_key.h>
15
16 #include <asm/hpet.h>
17 #include <asm/timer.h>
18 #include <asm/vgtod.h>
19 #include <asm/time.h>
20 #include <asm/delay.h>
21 #include <asm/hypervisor.h>
22 #include <asm/nmi.h>
23 #include <asm/x86_init.h>
24 #include <asm/geode.h>
25
26 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
27 EXPORT_SYMBOL(cpu_khz);
28
29 unsigned int __read_mostly tsc_khz;
30 EXPORT_SYMBOL(tsc_khz);
31
32 /*
33 * TSC can be unstable due to cpufreq or due to unsynced TSCs
34 */
35 static int __read_mostly tsc_unstable;
36
37 /* native_sched_clock() is called before tsc_init(), so
38 we must start with the TSC soft disabled to prevent
39 erroneous rdtsc usage on !cpu_has_tsc processors */
40 static int __read_mostly tsc_disabled = -1;
41
42 static DEFINE_STATIC_KEY_FALSE(__use_tsc);
43
44 int tsc_clocksource_reliable;
45
46 /*
47 * Use a ring-buffer like data structure, where a writer advances the head by
48 * writing a new data entry and a reader advances the tail when it observes a
49 * new entry.
50 *
51 * Writers are made to wait on readers until there's space to write a new
52 * entry.
53 *
54 * This means that we can always use an {offset, mul} pair to compute a ns
55 * value that is 'roughly' in the right direction, even if we're writing a new
56 * {offset, mul} pair during the clock read.
57 *
58 * The down-side is that we can no longer guarantee strict monotonicity anymore
59 * (assuming the TSC was that to begin with), because while we compute the
60 * intersection point of the two clock slopes and make sure the time is
61 * continuous at the point of switching; we can no longer guarantee a reader is
62 * strictly before or after the switch point.
63 *
64 * It does mean a reader no longer needs to disable IRQs in order to avoid
65 * CPU-Freq updates messing with his times, and similarly an NMI reader will
66 * no longer run the risk of hitting half-written state.
67 */
68
69 struct cyc2ns {
70 struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */
71 struct cyc2ns_data *head; /* 48 + 8 = 56 */
72 struct cyc2ns_data *tail; /* 56 + 8 = 64 */
73 }; /* exactly fits one cacheline */
74
75 static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
76
cyc2ns_read_begin(void)77 struct cyc2ns_data *cyc2ns_read_begin(void)
78 {
79 struct cyc2ns_data *head;
80
81 preempt_disable();
82
83 head = this_cpu_read(cyc2ns.head);
84 /*
85 * Ensure we observe the entry when we observe the pointer to it.
86 * matches the wmb from cyc2ns_write_end().
87 */
88 smp_read_barrier_depends();
89 head->__count++;
90 barrier();
91
92 return head;
93 }
94
cyc2ns_read_end(struct cyc2ns_data * head)95 void cyc2ns_read_end(struct cyc2ns_data *head)
96 {
97 barrier();
98 /*
99 * If we're the outer most nested read; update the tail pointer
100 * when we're done. This notifies possible pending writers
101 * that we've observed the head pointer and that the other
102 * entry is now free.
103 */
104 if (!--head->__count) {
105 /*
106 * x86-TSO does not reorder writes with older reads;
107 * therefore once this write becomes visible to another
108 * cpu, we must be finished reading the cyc2ns_data.
109 *
110 * matches with cyc2ns_write_begin().
111 */
112 this_cpu_write(cyc2ns.tail, head);
113 }
114 preempt_enable();
115 }
116
117 /*
118 * Begin writing a new @data entry for @cpu.
119 *
120 * Assumes some sort of write side lock; currently 'provided' by the assumption
121 * that cpufreq will call its notifiers sequentially.
122 */
cyc2ns_write_begin(int cpu)123 static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
124 {
125 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
126 struct cyc2ns_data *data = c2n->data;
127
128 if (data == c2n->head)
129 data++;
130
131 /* XXX send an IPI to @cpu in order to guarantee a read? */
132
133 /*
134 * When we observe the tail write from cyc2ns_read_end(),
135 * the cpu must be done with that entry and its safe
136 * to start writing to it.
137 */
138 while (c2n->tail == data)
139 cpu_relax();
140
141 return data;
142 }
143
cyc2ns_write_end(int cpu,struct cyc2ns_data * data)144 static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
145 {
146 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
147
148 /*
149 * Ensure the @data writes are visible before we publish the
150 * entry. Matches the data-depencency in cyc2ns_read_begin().
151 */
152 smp_wmb();
153
154 ACCESS_ONCE(c2n->head) = data;
155 }
156
157 /*
158 * Accelerators for sched_clock()
159 * convert from cycles(64bits) => nanoseconds (64bits)
160 * basic equation:
161 * ns = cycles / (freq / ns_per_sec)
162 * ns = cycles * (ns_per_sec / freq)
163 * ns = cycles * (10^9 / (cpu_khz * 10^3))
164 * ns = cycles * (10^6 / cpu_khz)
165 *
166 * Then we use scaling math (suggested by george@mvista.com) to get:
167 * ns = cycles * (10^6 * SC / cpu_khz) / SC
168 * ns = cycles * cyc2ns_scale / SC
169 *
170 * And since SC is a constant power of two, we can convert the div
171 * into a shift. The larger SC is, the more accurate the conversion, but
172 * cyc2ns_scale needs to be a 32-bit value so that 32-bit multiplication
173 * (64-bit result) can be used.
174 *
175 * We can use khz divisor instead of mhz to keep a better precision.
176 * (mathieu.desnoyers@polymtl.ca)
177 *
178 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
179 */
180
cyc2ns_data_init(struct cyc2ns_data * data)181 static void cyc2ns_data_init(struct cyc2ns_data *data)
182 {
183 data->cyc2ns_mul = 0;
184 data->cyc2ns_shift = 0;
185 data->cyc2ns_offset = 0;
186 data->__count = 0;
187 }
188
cyc2ns_init(int cpu)189 static void cyc2ns_init(int cpu)
190 {
191 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
192
193 cyc2ns_data_init(&c2n->data[0]);
194 cyc2ns_data_init(&c2n->data[1]);
195
196 c2n->head = c2n->data;
197 c2n->tail = c2n->data;
198 }
199
cycles_2_ns(unsigned long long cyc)200 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
201 {
202 struct cyc2ns_data *data, *tail;
203 unsigned long long ns;
204
205 /*
206 * See cyc2ns_read_*() for details; replicated in order to avoid
207 * an extra few instructions that came with the abstraction.
208 * Notable, it allows us to only do the __count and tail update
209 * dance when its actually needed.
210 */
211
212 preempt_disable_notrace();
213 data = this_cpu_read(cyc2ns.head);
214 tail = this_cpu_read(cyc2ns.tail);
215
216 if (likely(data == tail)) {
217 ns = data->cyc2ns_offset;
218 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
219 } else {
220 data->__count++;
221
222 barrier();
223
224 ns = data->cyc2ns_offset;
225 ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
226
227 barrier();
228
229 if (!--data->__count)
230 this_cpu_write(cyc2ns.tail, data);
231 }
232 preempt_enable_notrace();
233
234 return ns;
235 }
236
set_cyc2ns_scale(unsigned long cpu_khz,int cpu)237 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
238 {
239 unsigned long long tsc_now, ns_now;
240 struct cyc2ns_data *data;
241 unsigned long flags;
242
243 local_irq_save(flags);
244 sched_clock_idle_sleep_event();
245
246 if (!cpu_khz)
247 goto done;
248
249 data = cyc2ns_write_begin(cpu);
250
251 tsc_now = rdtsc();
252 ns_now = cycles_2_ns(tsc_now);
253
254 /*
255 * Compute a new multiplier as per the above comment and ensure our
256 * time function is continuous; see the comment near struct
257 * cyc2ns_data.
258 */
259 clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
260 NSEC_PER_MSEC, 0);
261
262 /*
263 * cyc2ns_shift is exported via arch_perf_update_userpage() where it is
264 * not expected to be greater than 31 due to the original published
265 * conversion algorithm shifting a 32-bit value (now specifies a 64-bit
266 * value) - refer perf_event_mmap_page documentation in perf_event.h.
267 */
268 if (data->cyc2ns_shift == 32) {
269 data->cyc2ns_shift = 31;
270 data->cyc2ns_mul >>= 1;
271 }
272
273 data->cyc2ns_offset = ns_now -
274 mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
275
276 cyc2ns_write_end(cpu, data);
277
278 done:
279 sched_clock_idle_wakeup_event(0);
280 local_irq_restore(flags);
281 }
282 /*
283 * Scheduler clock - returns current time in nanosec units.
284 */
native_sched_clock(void)285 u64 native_sched_clock(void)
286 {
287 if (static_branch_likely(&__use_tsc)) {
288 u64 tsc_now = rdtsc();
289
290 /* return the value in ns */
291 return cycles_2_ns(tsc_now);
292 }
293
294 /*
295 * Fall back to jiffies if there's no TSC available:
296 * ( But note that we still use it if the TSC is marked
297 * unstable. We do this because unlike Time Of Day,
298 * the scheduler clock tolerates small errors and it's
299 * very important for it to be as fast as the platform
300 * can achieve it. )
301 */
302
303 /* No locking but a rare wrong value is not a big deal: */
304 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
305 }
306
307 /*
308 * Generate a sched_clock if you already have a TSC value.
309 */
native_sched_clock_from_tsc(u64 tsc)310 u64 native_sched_clock_from_tsc(u64 tsc)
311 {
312 return cycles_2_ns(tsc);
313 }
314
315 /* We need to define a real function for sched_clock, to override the
316 weak default version */
317 #ifdef CONFIG_PARAVIRT
sched_clock(void)318 unsigned long long sched_clock(void)
319 {
320 return paravirt_sched_clock();
321 }
322 #else
323 unsigned long long
324 sched_clock(void) __attribute__((alias("native_sched_clock")));
325 #endif
326
check_tsc_unstable(void)327 int check_tsc_unstable(void)
328 {
329 return tsc_unstable;
330 }
331 EXPORT_SYMBOL_GPL(check_tsc_unstable);
332
check_tsc_disabled(void)333 int check_tsc_disabled(void)
334 {
335 return tsc_disabled;
336 }
337 EXPORT_SYMBOL_GPL(check_tsc_disabled);
338
339 #ifdef CONFIG_X86_TSC
notsc_setup(char * str)340 int __init notsc_setup(char *str)
341 {
342 pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
343 tsc_disabled = 1;
344 return 1;
345 }
346 #else
347 /*
348 * disable flag for tsc. Takes effect by clearing the TSC cpu flag
349 * in cpu/common.c
350 */
notsc_setup(char * str)351 int __init notsc_setup(char *str)
352 {
353 setup_clear_cpu_cap(X86_FEATURE_TSC);
354 return 1;
355 }
356 #endif
357
358 __setup("notsc", notsc_setup);
359
360 static int no_sched_irq_time;
361
tsc_setup(char * str)362 static int __init tsc_setup(char *str)
363 {
364 if (!strcmp(str, "reliable"))
365 tsc_clocksource_reliable = 1;
366 if (!strncmp(str, "noirqtime", 9))
367 no_sched_irq_time = 1;
368 if (!strcmp(str, "unstable"))
369 mark_tsc_unstable("boot parameter");
370 return 1;
371 }
372
373 __setup("tsc=", tsc_setup);
374
375 #define MAX_RETRIES 5
376 #define SMI_TRESHOLD 50000
377
378 /*
379 * Read TSC and the reference counters. Take care of SMI disturbance
380 */
tsc_read_refs(u64 * p,int hpet)381 static u64 tsc_read_refs(u64 *p, int hpet)
382 {
383 u64 t1, t2;
384 int i;
385
386 for (i = 0; i < MAX_RETRIES; i++) {
387 t1 = get_cycles();
388 if (hpet)
389 *p = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
390 else
391 *p = acpi_pm_read_early();
392 t2 = get_cycles();
393 if ((t2 - t1) < SMI_TRESHOLD)
394 return t2;
395 }
396 return ULLONG_MAX;
397 }
398
399 /*
400 * Calculate the TSC frequency from HPET reference
401 */
calc_hpet_ref(u64 deltatsc,u64 hpet1,u64 hpet2)402 static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
403 {
404 u64 tmp;
405
406 if (hpet2 < hpet1)
407 hpet2 += 0x100000000ULL;
408 hpet2 -= hpet1;
409 tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
410 do_div(tmp, 1000000);
411 deltatsc = div64_u64(deltatsc, tmp);
412
413 return (unsigned long) deltatsc;
414 }
415
416 /*
417 * Calculate the TSC frequency from PMTimer reference
418 */
calc_pmtimer_ref(u64 deltatsc,u64 pm1,u64 pm2)419 static unsigned long calc_pmtimer_ref(u64 deltatsc, u64 pm1, u64 pm2)
420 {
421 u64 tmp;
422
423 if (!pm1 && !pm2)
424 return ULONG_MAX;
425
426 if (pm2 < pm1)
427 pm2 += (u64)ACPI_PM_OVRRUN;
428 pm2 -= pm1;
429 tmp = pm2 * 1000000000LL;
430 do_div(tmp, PMTMR_TICKS_PER_SEC);
431 do_div(deltatsc, tmp);
432
433 return (unsigned long) deltatsc;
434 }
435
436 #define CAL_MS 10
437 #define CAL_LATCH (PIT_TICK_RATE / (1000 / CAL_MS))
438 #define CAL_PIT_LOOPS 1000
439
440 #define CAL2_MS 50
441 #define CAL2_LATCH (PIT_TICK_RATE / (1000 / CAL2_MS))
442 #define CAL2_PIT_LOOPS 5000
443
444
445 /*
446 * Try to calibrate the TSC against the Programmable
447 * Interrupt Timer and return the frequency of the TSC
448 * in kHz.
449 *
450 * Return ULONG_MAX on failure to calibrate.
451 */
pit_calibrate_tsc(u32 latch,unsigned long ms,int loopmin)452 static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
453 {
454 u64 tsc, t1, t2, delta;
455 unsigned long tscmin, tscmax;
456 int pitcnt;
457
458 /* Set the Gate high, disable speaker */
459 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
460
461 /*
462 * Setup CTC channel 2* for mode 0, (interrupt on terminal
463 * count mode), binary count. Set the latch register to 50ms
464 * (LSB then MSB) to begin countdown.
465 */
466 outb(0xb0, 0x43);
467 outb(latch & 0xff, 0x42);
468 outb(latch >> 8, 0x42);
469
470 tsc = t1 = t2 = get_cycles();
471
472 pitcnt = 0;
473 tscmax = 0;
474 tscmin = ULONG_MAX;
475 while ((inb(0x61) & 0x20) == 0) {
476 t2 = get_cycles();
477 delta = t2 - tsc;
478 tsc = t2;
479 if ((unsigned long) delta < tscmin)
480 tscmin = (unsigned int) delta;
481 if ((unsigned long) delta > tscmax)
482 tscmax = (unsigned int) delta;
483 pitcnt++;
484 }
485
486 /*
487 * Sanity checks:
488 *
489 * If we were not able to read the PIT more than loopmin
490 * times, then we have been hit by a massive SMI
491 *
492 * If the maximum is 10 times larger than the minimum,
493 * then we got hit by an SMI as well.
494 */
495 if (pitcnt < loopmin || tscmax > 10 * tscmin)
496 return ULONG_MAX;
497
498 /* Calculate the PIT value */
499 delta = t2 - t1;
500 do_div(delta, ms);
501 return delta;
502 }
503
504 /*
505 * This reads the current MSB of the PIT counter, and
506 * checks if we are running on sufficiently fast and
507 * non-virtualized hardware.
508 *
509 * Our expectations are:
510 *
511 * - the PIT is running at roughly 1.19MHz
512 *
513 * - each IO is going to take about 1us on real hardware,
514 * but we allow it to be much faster (by a factor of 10) or
515 * _slightly_ slower (ie we allow up to a 2us read+counter
516 * update - anything else implies a unacceptably slow CPU
517 * or PIT for the fast calibration to work.
518 *
519 * - with 256 PIT ticks to read the value, we have 214us to
520 * see the same MSB (and overhead like doing a single TSC
521 * read per MSB value etc).
522 *
523 * - We're doing 2 reads per loop (LSB, MSB), and we expect
524 * them each to take about a microsecond on real hardware.
525 * So we expect a count value of around 100. But we'll be
526 * generous, and accept anything over 50.
527 *
528 * - if the PIT is stuck, and we see *many* more reads, we
529 * return early (and the next caller of pit_expect_msb()
530 * then consider it a failure when they don't see the
531 * next expected value).
532 *
533 * These expectations mean that we know that we have seen the
534 * transition from one expected value to another with a fairly
535 * high accuracy, and we didn't miss any events. We can thus
536 * use the TSC value at the transitions to calculate a pretty
537 * good value for the TSC frequencty.
538 */
pit_verify_msb(unsigned char val)539 static inline int pit_verify_msb(unsigned char val)
540 {
541 /* Ignore LSB */
542 inb(0x42);
543 return inb(0x42) == val;
544 }
545
pit_expect_msb(unsigned char val,u64 * tscp,unsigned long * deltap)546 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
547 {
548 int count;
549 u64 tsc = 0, prev_tsc = 0;
550
551 for (count = 0; count < 50000; count++) {
552 if (!pit_verify_msb(val))
553 break;
554 prev_tsc = tsc;
555 tsc = get_cycles();
556 }
557 *deltap = get_cycles() - prev_tsc;
558 *tscp = tsc;
559
560 /*
561 * We require _some_ success, but the quality control
562 * will be based on the error terms on the TSC values.
563 */
564 return count > 5;
565 }
566
567 /*
568 * How many MSB values do we want to see? We aim for
569 * a maximum error rate of 500ppm (in practice the
570 * real error is much smaller), but refuse to spend
571 * more than 50ms on it.
572 */
573 #define MAX_QUICK_PIT_MS 50
574 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
575
quick_pit_calibrate(void)576 static unsigned long quick_pit_calibrate(void)
577 {
578 int i;
579 u64 tsc, delta;
580 unsigned long d1, d2;
581
582 /* Set the Gate high, disable speaker */
583 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
584
585 /*
586 * Counter 2, mode 0 (one-shot), binary count
587 *
588 * NOTE! Mode 2 decrements by two (and then the
589 * output is flipped each time, giving the same
590 * final output frequency as a decrement-by-one),
591 * so mode 0 is much better when looking at the
592 * individual counts.
593 */
594 outb(0xb0, 0x43);
595
596 /* Start at 0xffff */
597 outb(0xff, 0x42);
598 outb(0xff, 0x42);
599
600 /*
601 * The PIT starts counting at the next edge, so we
602 * need to delay for a microsecond. The easiest way
603 * to do that is to just read back the 16-bit counter
604 * once from the PIT.
605 */
606 pit_verify_msb(0);
607
608 if (pit_expect_msb(0xff, &tsc, &d1)) {
609 for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
610 if (!pit_expect_msb(0xff-i, &delta, &d2))
611 break;
612
613 delta -= tsc;
614
615 /*
616 * Extrapolate the error and fail fast if the error will
617 * never be below 500 ppm.
618 */
619 if (i == 1 &&
620 d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
621 return 0;
622
623 /*
624 * Iterate until the error is less than 500 ppm
625 */
626 if (d1+d2 >= delta >> 11)
627 continue;
628
629 /*
630 * Check the PIT one more time to verify that
631 * all TSC reads were stable wrt the PIT.
632 *
633 * This also guarantees serialization of the
634 * last cycle read ('d2') in pit_expect_msb.
635 */
636 if (!pit_verify_msb(0xfe - i))
637 break;
638 goto success;
639 }
640 }
641 pr_info("Fast TSC calibration failed\n");
642 return 0;
643
644 success:
645 /*
646 * Ok, if we get here, then we've seen the
647 * MSB of the PIT decrement 'i' times, and the
648 * error has shrunk to less than 500 ppm.
649 *
650 * As a result, we can depend on there not being
651 * any odd delays anywhere, and the TSC reads are
652 * reliable (within the error).
653 *
654 * kHz = ticks / time-in-seconds / 1000;
655 * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
656 * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
657 */
658 delta *= PIT_TICK_RATE;
659 do_div(delta, i*256*1000);
660 pr_info("Fast TSC calibration using PIT\n");
661 return delta;
662 }
663
664 /**
665 * native_calibrate_tsc - calibrate the tsc on boot
666 */
native_calibrate_tsc(void)667 unsigned long native_calibrate_tsc(void)
668 {
669 u64 tsc1, tsc2, delta, ref1, ref2;
670 unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
671 unsigned long flags, latch, ms, fast_calibrate;
672 int hpet = is_hpet_enabled(), i, loopmin;
673
674 /* Calibrate TSC using MSR for Intel Atom SoCs */
675 local_irq_save(flags);
676 fast_calibrate = try_msr_calibrate_tsc();
677 local_irq_restore(flags);
678 if (fast_calibrate)
679 return fast_calibrate;
680
681 local_irq_save(flags);
682 fast_calibrate = quick_pit_calibrate();
683 local_irq_restore(flags);
684 if (fast_calibrate)
685 return fast_calibrate;
686
687 /*
688 * Run 5 calibration loops to get the lowest frequency value
689 * (the best estimate). We use two different calibration modes
690 * here:
691 *
692 * 1) PIT loop. We set the PIT Channel 2 to oneshot mode and
693 * load a timeout of 50ms. We read the time right after we
694 * started the timer and wait until the PIT count down reaches
695 * zero. In each wait loop iteration we read the TSC and check
696 * the delta to the previous read. We keep track of the min
697 * and max values of that delta. The delta is mostly defined
698 * by the IO time of the PIT access, so we can detect when a
699 * SMI/SMM disturbance happened between the two reads. If the
700 * maximum time is significantly larger than the minimum time,
701 * then we discard the result and have another try.
702 *
703 * 2) Reference counter. If available we use the HPET or the
704 * PMTIMER as a reference to check the sanity of that value.
705 * We use separate TSC readouts and check inside of the
706 * reference read for a SMI/SMM disturbance. We dicard
707 * disturbed values here as well. We do that around the PIT
708 * calibration delay loop as we have to wait for a certain
709 * amount of time anyway.
710 */
711
712 /* Preset PIT loop values */
713 latch = CAL_LATCH;
714 ms = CAL_MS;
715 loopmin = CAL_PIT_LOOPS;
716
717 for (i = 0; i < 3; i++) {
718 unsigned long tsc_pit_khz;
719
720 /*
721 * Read the start value and the reference count of
722 * hpet/pmtimer when available. Then do the PIT
723 * calibration, which will take at least 50ms, and
724 * read the end value.
725 */
726 local_irq_save(flags);
727 tsc1 = tsc_read_refs(&ref1, hpet);
728 tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
729 tsc2 = tsc_read_refs(&ref2, hpet);
730 local_irq_restore(flags);
731
732 /* Pick the lowest PIT TSC calibration so far */
733 tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
734
735 /* hpet or pmtimer available ? */
736 if (ref1 == ref2)
737 continue;
738
739 /* Check, whether the sampling was disturbed by an SMI */
740 if (tsc1 == ULLONG_MAX || tsc2 == ULLONG_MAX)
741 continue;
742
743 tsc2 = (tsc2 - tsc1) * 1000000LL;
744 if (hpet)
745 tsc2 = calc_hpet_ref(tsc2, ref1, ref2);
746 else
747 tsc2 = calc_pmtimer_ref(tsc2, ref1, ref2);
748
749 tsc_ref_min = min(tsc_ref_min, (unsigned long) tsc2);
750
751 /* Check the reference deviation */
752 delta = ((u64) tsc_pit_min) * 100;
753 do_div(delta, tsc_ref_min);
754
755 /*
756 * If both calibration results are inside a 10% window
757 * then we can be sure, that the calibration
758 * succeeded. We break out of the loop right away. We
759 * use the reference value, as it is more precise.
760 */
761 if (delta >= 90 && delta <= 110) {
762 pr_info("PIT calibration matches %s. %d loops\n",
763 hpet ? "HPET" : "PMTIMER", i + 1);
764 return tsc_ref_min;
765 }
766
767 /*
768 * Check whether PIT failed more than once. This
769 * happens in virtualized environments. We need to
770 * give the virtual PC a slightly longer timeframe for
771 * the HPET/PMTIMER to make the result precise.
772 */
773 if (i == 1 && tsc_pit_min == ULONG_MAX) {
774 latch = CAL2_LATCH;
775 ms = CAL2_MS;
776 loopmin = CAL2_PIT_LOOPS;
777 }
778 }
779
780 /*
781 * Now check the results.
782 */
783 if (tsc_pit_min == ULONG_MAX) {
784 /* PIT gave no useful value */
785 pr_warn("Unable to calibrate against PIT\n");
786
787 /* We don't have an alternative source, disable TSC */
788 if (!hpet && !ref1 && !ref2) {
789 pr_notice("No reference (HPET/PMTIMER) available\n");
790 return 0;
791 }
792
793 /* The alternative source failed as well, disable TSC */
794 if (tsc_ref_min == ULONG_MAX) {
795 pr_warn("HPET/PMTIMER calibration failed\n");
796 return 0;
797 }
798
799 /* Use the alternative source */
800 pr_info("using %s reference calibration\n",
801 hpet ? "HPET" : "PMTIMER");
802
803 return tsc_ref_min;
804 }
805
806 /* We don't have an alternative source, use the PIT calibration value */
807 if (!hpet && !ref1 && !ref2) {
808 pr_info("Using PIT calibration value\n");
809 return tsc_pit_min;
810 }
811
812 /* The alternative source failed, use the PIT calibration value */
813 if (tsc_ref_min == ULONG_MAX) {
814 pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
815 return tsc_pit_min;
816 }
817
818 /*
819 * The calibration values differ too much. In doubt, we use
820 * the PIT value as we know that there are PMTIMERs around
821 * running at double speed. At least we let the user know:
822 */
823 pr_warn("PIT calibration deviates from %s: %lu %lu\n",
824 hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
825 pr_info("Using PIT calibration value\n");
826 return tsc_pit_min;
827 }
828
recalibrate_cpu_khz(void)829 int recalibrate_cpu_khz(void)
830 {
831 #ifndef CONFIG_SMP
832 unsigned long cpu_khz_old = cpu_khz;
833
834 if (cpu_has_tsc) {
835 tsc_khz = x86_platform.calibrate_tsc();
836 cpu_khz = tsc_khz;
837 cpu_data(0).loops_per_jiffy =
838 cpufreq_scale(cpu_data(0).loops_per_jiffy,
839 cpu_khz_old, cpu_khz);
840 return 0;
841 } else
842 return -ENODEV;
843 #else
844 return -ENODEV;
845 #endif
846 }
847
848 EXPORT_SYMBOL(recalibrate_cpu_khz);
849
850
851 static unsigned long long cyc2ns_suspend;
852
tsc_save_sched_clock_state(void)853 void tsc_save_sched_clock_state(void)
854 {
855 if (!sched_clock_stable())
856 return;
857
858 cyc2ns_suspend = sched_clock();
859 }
860
861 /*
862 * Even on processors with invariant TSC, TSC gets reset in some the
863 * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
864 * arbitrary value (still sync'd across cpu's) during resume from such sleep
865 * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
866 * that sched_clock() continues from the point where it was left off during
867 * suspend.
868 */
tsc_restore_sched_clock_state(void)869 void tsc_restore_sched_clock_state(void)
870 {
871 unsigned long long offset;
872 unsigned long flags;
873 int cpu;
874
875 if (!sched_clock_stable())
876 return;
877
878 local_irq_save(flags);
879
880 /*
881 * We're comming out of suspend, there's no concurrency yet; don't
882 * bother being nice about the RCU stuff, just write to both
883 * data fields.
884 */
885
886 this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
887 this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
888
889 offset = cyc2ns_suspend - sched_clock();
890
891 for_each_possible_cpu(cpu) {
892 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
893 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
894 }
895
896 local_irq_restore(flags);
897 }
898
899 #ifdef CONFIG_CPU_FREQ
900
901 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
902 * changes.
903 *
904 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
905 * not that important because current Opteron setups do not support
906 * scaling on SMP anyroads.
907 *
908 * Should fix up last_tsc too. Currently gettimeofday in the
909 * first tick after the change will be slightly wrong.
910 */
911
912 static unsigned int ref_freq;
913 static unsigned long loops_per_jiffy_ref;
914 static unsigned long tsc_khz_ref;
915
time_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)916 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
917 void *data)
918 {
919 struct cpufreq_freqs *freq = data;
920 unsigned long *lpj;
921
922 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
923 return 0;
924
925 lpj = &boot_cpu_data.loops_per_jiffy;
926 #ifdef CONFIG_SMP
927 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
928 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
929 #endif
930
931 if (!ref_freq) {
932 ref_freq = freq->old;
933 loops_per_jiffy_ref = *lpj;
934 tsc_khz_ref = tsc_khz;
935 }
936 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
937 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
938 *lpj = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
939
940 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
941 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
942 mark_tsc_unstable("cpufreq changes");
943
944 set_cyc2ns_scale(tsc_khz, freq->cpu);
945 }
946
947 return 0;
948 }
949
950 static struct notifier_block time_cpufreq_notifier_block = {
951 .notifier_call = time_cpufreq_notifier
952 };
953
cpufreq_tsc(void)954 static int __init cpufreq_tsc(void)
955 {
956 if (!cpu_has_tsc)
957 return 0;
958 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
959 return 0;
960 cpufreq_register_notifier(&time_cpufreq_notifier_block,
961 CPUFREQ_TRANSITION_NOTIFIER);
962 return 0;
963 }
964
965 core_initcall(cpufreq_tsc);
966
967 #endif /* CONFIG_CPU_FREQ */
968
969 /* clocksource code */
970
971 static struct clocksource clocksource_tsc;
972
973 /*
974 * We used to compare the TSC to the cycle_last value in the clocksource
975 * structure to avoid a nasty time-warp. This can be observed in a
976 * very small window right after one CPU updated cycle_last under
977 * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
978 * is smaller than the cycle_last reference value due to a TSC which
979 * is slighty behind. This delta is nowhere else observable, but in
980 * that case it results in a forward time jump in the range of hours
981 * due to the unsigned delta calculation of the time keeping core
982 * code, which is necessary to support wrapping clocksources like pm
983 * timer.
984 *
985 * This sanity check is now done in the core timekeeping code.
986 * checking the result of read_tsc() - cycle_last for being negative.
987 * That works because CLOCKSOURCE_MASK(64) does not mask out any bit.
988 */
read_tsc(struct clocksource * cs)989 static cycle_t read_tsc(struct clocksource *cs)
990 {
991 return (cycle_t)rdtsc_ordered();
992 }
993
994 /*
995 * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
996 */
997 static struct clocksource clocksource_tsc = {
998 .name = "tsc",
999 .rating = 300,
1000 .read = read_tsc,
1001 .mask = CLOCKSOURCE_MASK(64),
1002 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
1003 CLOCK_SOURCE_MUST_VERIFY,
1004 .archdata = { .vclock_mode = VCLOCK_TSC },
1005 };
1006
mark_tsc_unstable(char * reason)1007 void mark_tsc_unstable(char *reason)
1008 {
1009 if (!tsc_unstable) {
1010 tsc_unstable = 1;
1011 clear_sched_clock_stable();
1012 disable_sched_clock_irqtime();
1013 pr_info("Marking TSC unstable due to %s\n", reason);
1014 /* Change only the rating, when not registered */
1015 if (clocksource_tsc.mult)
1016 clocksource_mark_unstable(&clocksource_tsc);
1017 else {
1018 clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
1019 clocksource_tsc.rating = 0;
1020 }
1021 }
1022 }
1023
1024 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1025
check_system_tsc_reliable(void)1026 static void __init check_system_tsc_reliable(void)
1027 {
1028 #if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1029 if (is_geode_lx()) {
1030 /* RTSC counts during suspend */
1031 #define RTSC_SUSP 0x100
1032 unsigned long res_low, res_high;
1033
1034 rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1035 /* Geode_LX - the OLPC CPU has a very reliable TSC */
1036 if (res_low & RTSC_SUSP)
1037 tsc_clocksource_reliable = 1;
1038 }
1039 #endif
1040 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1041 tsc_clocksource_reliable = 1;
1042 }
1043
1044 /*
1045 * Make an educated guess if the TSC is trustworthy and synchronized
1046 * over all CPUs.
1047 */
unsynchronized_tsc(void)1048 int unsynchronized_tsc(void)
1049 {
1050 if (!cpu_has_tsc || tsc_unstable)
1051 return 1;
1052
1053 #ifdef CONFIG_SMP
1054 if (apic_is_clustered_box())
1055 return 1;
1056 #endif
1057
1058 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
1059 return 0;
1060
1061 if (tsc_clocksource_reliable)
1062 return 0;
1063 /*
1064 * Intel systems are normally all synchronized.
1065 * Exceptions must mark TSC as unstable:
1066 */
1067 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1068 /* assume multi socket systems are not synchronized: */
1069 if (num_possible_cpus() > 1)
1070 return 1;
1071 }
1072
1073 return 0;
1074 }
1075
1076
1077 static void tsc_refine_calibration_work(struct work_struct *work);
1078 static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work);
1079 /**
1080 * tsc_refine_calibration_work - Further refine tsc freq calibration
1081 * @work - ignored.
1082 *
1083 * This functions uses delayed work over a period of a
1084 * second to further refine the TSC freq value. Since this is
1085 * timer based, instead of loop based, we don't block the boot
1086 * process while this longer calibration is done.
1087 *
1088 * If there are any calibration anomalies (too many SMIs, etc),
1089 * or the refined calibration is off by 1% of the fast early
1090 * calibration, we throw out the new calibration and use the
1091 * early calibration.
1092 */
tsc_refine_calibration_work(struct work_struct * work)1093 static void tsc_refine_calibration_work(struct work_struct *work)
1094 {
1095 static u64 tsc_start = -1, ref_start;
1096 static int hpet;
1097 u64 tsc_stop, ref_stop, delta;
1098 unsigned long freq;
1099
1100 /* Don't bother refining TSC on unstable systems */
1101 if (check_tsc_unstable())
1102 goto out;
1103
1104 /*
1105 * Since the work is started early in boot, we may be
1106 * delayed the first time we expire. So set the workqueue
1107 * again once we know timers are working.
1108 */
1109 if (tsc_start == -1) {
1110 /*
1111 * Only set hpet once, to avoid mixing hardware
1112 * if the hpet becomes enabled later.
1113 */
1114 hpet = is_hpet_enabled();
1115 schedule_delayed_work(&tsc_irqwork, HZ);
1116 tsc_start = tsc_read_refs(&ref_start, hpet);
1117 return;
1118 }
1119
1120 tsc_stop = tsc_read_refs(&ref_stop, hpet);
1121
1122 /* hpet or pmtimer available ? */
1123 if (ref_start == ref_stop)
1124 goto out;
1125
1126 /* Check, whether the sampling was disturbed by an SMI */
1127 if (tsc_start == ULLONG_MAX || tsc_stop == ULLONG_MAX)
1128 goto out;
1129
1130 delta = tsc_stop - tsc_start;
1131 delta *= 1000000LL;
1132 if (hpet)
1133 freq = calc_hpet_ref(delta, ref_start, ref_stop);
1134 else
1135 freq = calc_pmtimer_ref(delta, ref_start, ref_stop);
1136
1137 /* Make sure we're within 1% */
1138 if (abs(tsc_khz - freq) > tsc_khz/100)
1139 goto out;
1140
1141 tsc_khz = freq;
1142 pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
1143 (unsigned long)tsc_khz / 1000,
1144 (unsigned long)tsc_khz % 1000);
1145
1146 out:
1147 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1148 }
1149
1150
init_tsc_clocksource(void)1151 static int __init init_tsc_clocksource(void)
1152 {
1153 if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
1154 return 0;
1155
1156 if (tsc_clocksource_reliable)
1157 clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
1158 /* lower the rating if we already know its unstable: */
1159 if (check_tsc_unstable()) {
1160 clocksource_tsc.rating = 0;
1161 clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
1162 }
1163
1164 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
1165 clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
1166
1167 /*
1168 * Trust the results of the earlier calibration on systems
1169 * exporting a reliable TSC.
1170 */
1171 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) {
1172 clocksource_register_khz(&clocksource_tsc, tsc_khz);
1173 return 0;
1174 }
1175
1176 schedule_delayed_work(&tsc_irqwork, 0);
1177 return 0;
1178 }
1179 /*
1180 * We use device_initcall here, to ensure we run after the hpet
1181 * is fully initialized, which may occur at fs_initcall time.
1182 */
1183 device_initcall(init_tsc_clocksource);
1184
tsc_init(void)1185 void __init tsc_init(void)
1186 {
1187 u64 lpj;
1188 int cpu;
1189
1190 x86_init.timers.tsc_pre_init();
1191
1192 if (!cpu_has_tsc) {
1193 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1194 return;
1195 }
1196
1197 tsc_khz = x86_platform.calibrate_tsc();
1198 cpu_khz = tsc_khz;
1199
1200 if (!tsc_khz) {
1201 mark_tsc_unstable("could not calculate TSC khz");
1202 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1203 return;
1204 }
1205
1206 pr_info("Detected %lu.%03lu MHz processor\n",
1207 (unsigned long)cpu_khz / 1000,
1208 (unsigned long)cpu_khz % 1000);
1209
1210 /*
1211 * Secondary CPUs do not run through tsc_init(), so set up
1212 * all the scale factors for all CPUs, assuming the same
1213 * speed as the bootup CPU. (cpufreq notifiers will fix this
1214 * up if their speed diverges)
1215 */
1216 for_each_possible_cpu(cpu) {
1217 cyc2ns_init(cpu);
1218 set_cyc2ns_scale(cpu_khz, cpu);
1219 }
1220
1221 if (tsc_disabled > 0)
1222 return;
1223
1224 /* now allow native_sched_clock() to use rdtsc */
1225
1226 tsc_disabled = 0;
1227 static_branch_enable(&__use_tsc);
1228
1229 if (!no_sched_irq_time)
1230 enable_sched_clock_irqtime();
1231
1232 lpj = ((u64)tsc_khz * 1000);
1233 do_div(lpj, HZ);
1234 lpj_fine = lpj;
1235
1236 use_tsc_delay();
1237
1238 if (unsynchronized_tsc())
1239 mark_tsc_unstable("TSCs unsynchronized");
1240
1241 check_system_tsc_reliable();
1242 }
1243
1244 #ifdef CONFIG_SMP
1245 /*
1246 * If we have a constant TSC and are using the TSC for the delay loop,
1247 * we can skip clock calibration if another cpu in the same socket has already
1248 * been calibrated. This assumes that CONSTANT_TSC applies to all
1249 * cpus in the socket - this should be a safe assumption.
1250 */
calibrate_delay_is_known(void)1251 unsigned long calibrate_delay_is_known(void)
1252 {
1253 int i, cpu = smp_processor_id();
1254
1255 if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
1256 return 0;
1257
1258 for_each_online_cpu(i)
1259 if (cpu_data(i).phys_proc_id == cpu_data(cpu).phys_proc_id)
1260 return cpu_data(i).loops_per_jiffy;
1261 return 0;
1262 }
1263 #endif
1264