1 /*
2 * Xen time implementation.
3 *
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
7 *
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
9 */
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/clocksource.h>
13 #include <linux/clockchips.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/math64.h>
16 #include <linux/gfp.h>
17 #include <linux/slab.h>
18 #include <linux/pvclock_gtod.h>
19
20 #include <asm/pvclock.h>
21 #include <asm/xen/hypervisor.h>
22 #include <asm/xen/hypercall.h>
23
24 #include <xen/events.h>
25 #include <xen/features.h>
26 #include <xen/interface/xen.h>
27 #include <xen/interface/vcpu.h>
28
29 #include "xen-ops.h"
30
31 /* Xen may fire a timer up to this many ns early */
32 #define TIMER_SLOP 100000
33 #define NS_PER_TICK (1000000000LL / HZ)
34
35 /* runstate info updated by Xen */
36 static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
37
38 /* snapshots of runstate info */
39 static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
40
41 /* unused ns of stolen time */
42 static DEFINE_PER_CPU(u64, xen_residual_stolen);
43
44 /* return an consistent snapshot of 64-bit time/counter value */
get64(const u64 * p)45 static u64 get64(const u64 *p)
46 {
47 u64 ret;
48
49 if (BITS_PER_LONG < 64) {
50 u32 *p32 = (u32 *)p;
51 u32 h, l;
52
53 /*
54 * Read high then low, and then make sure high is
55 * still the same; this will only loop if low wraps
56 * and carries into high.
57 * XXX some clean way to make this endian-proof?
58 */
59 do {
60 h = p32[1];
61 barrier();
62 l = p32[0];
63 barrier();
64 } while (p32[1] != h);
65
66 ret = (((u64)h) << 32) | l;
67 } else
68 ret = *p;
69
70 return ret;
71 }
72
73 /*
74 * Runstate accounting
75 */
get_runstate_snapshot(struct vcpu_runstate_info * res)76 static void get_runstate_snapshot(struct vcpu_runstate_info *res)
77 {
78 u64 state_time;
79 struct vcpu_runstate_info *state;
80
81 BUG_ON(preemptible());
82
83 state = this_cpu_ptr(&xen_runstate);
84
85 /*
86 * The runstate info is always updated by the hypervisor on
87 * the current CPU, so there's no need to use anything
88 * stronger than a compiler barrier when fetching it.
89 */
90 do {
91 state_time = get64(&state->state_entry_time);
92 barrier();
93 *res = *state;
94 barrier();
95 } while (get64(&state->state_entry_time) != state_time);
96 }
97
98 /* return true when a vcpu could run but has no real cpu to run on */
xen_vcpu_stolen(int vcpu)99 bool xen_vcpu_stolen(int vcpu)
100 {
101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
102 }
103
xen_setup_runstate_info(int cpu)104 void xen_setup_runstate_info(int cpu)
105 {
106 struct vcpu_register_runstate_memory_area area;
107
108 area.addr.v = &per_cpu(xen_runstate, cpu);
109
110 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
111 cpu, &area))
112 BUG();
113 }
114
do_stolen_accounting(void)115 static void do_stolen_accounting(void)
116 {
117 struct vcpu_runstate_info state;
118 struct vcpu_runstate_info *snap;
119 s64 runnable, offline, stolen;
120 cputime_t ticks;
121
122 get_runstate_snapshot(&state);
123
124 WARN_ON(state.state != RUNSTATE_running);
125
126 snap = this_cpu_ptr(&xen_runstate_snapshot);
127
128 /* work out how much time the VCPU has not been runn*ing* */
129 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
130 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
131
132 *snap = state;
133
134 /* Add the appropriate number of ticks of stolen time,
135 including any left-overs from last time. */
136 stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
137
138 if (stolen < 0)
139 stolen = 0;
140
141 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
142 __this_cpu_write(xen_residual_stolen, stolen);
143 account_steal_ticks(ticks);
144 }
145
146 /* Get the TSC speed from Xen */
xen_tsc_khz(void)147 static unsigned long xen_tsc_khz(void)
148 {
149 struct pvclock_vcpu_time_info *info =
150 &HYPERVISOR_shared_info->vcpu_info[0].time;
151
152 return pvclock_tsc_khz(info);
153 }
154
xen_clocksource_read(void)155 cycle_t xen_clocksource_read(void)
156 {
157 struct pvclock_vcpu_time_info *src;
158 cycle_t ret;
159
160 preempt_disable_notrace();
161 src = &__this_cpu_read(xen_vcpu)->time;
162 ret = pvclock_clocksource_read(src);
163 preempt_enable_notrace();
164 return ret;
165 }
166
xen_clocksource_get_cycles(struct clocksource * cs)167 static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
168 {
169 return xen_clocksource_read();
170 }
171
xen_read_wallclock(struct timespec * ts)172 static void xen_read_wallclock(struct timespec *ts)
173 {
174 struct shared_info *s = HYPERVISOR_shared_info;
175 struct pvclock_wall_clock *wall_clock = &(s->wc);
176 struct pvclock_vcpu_time_info *vcpu_time;
177
178 vcpu_time = &get_cpu_var(xen_vcpu)->time;
179 pvclock_read_wallclock(wall_clock, vcpu_time, ts);
180 put_cpu_var(xen_vcpu);
181 }
182
xen_get_wallclock(struct timespec * now)183 static void xen_get_wallclock(struct timespec *now)
184 {
185 xen_read_wallclock(now);
186 }
187
xen_set_wallclock(const struct timespec * now)188 static int xen_set_wallclock(const struct timespec *now)
189 {
190 return -1;
191 }
192
xen_pvclock_gtod_notify(struct notifier_block * nb,unsigned long was_set,void * priv)193 static int xen_pvclock_gtod_notify(struct notifier_block *nb,
194 unsigned long was_set, void *priv)
195 {
196 /* Protected by the calling core code serialization */
197 static struct timespec next_sync;
198
199 struct xen_platform_op op;
200 struct timespec now;
201
202 now = __current_kernel_time();
203
204 /*
205 * We only take the expensive HV call when the clock was set
206 * or when the 11 minutes RTC synchronization time elapsed.
207 */
208 if (!was_set && timespec_compare(&now, &next_sync) < 0)
209 return NOTIFY_OK;
210
211 op.cmd = XENPF_settime;
212 op.u.settime.secs = now.tv_sec;
213 op.u.settime.nsecs = now.tv_nsec;
214 op.u.settime.system_time = xen_clocksource_read();
215
216 (void)HYPERVISOR_dom0_op(&op);
217
218 /*
219 * Move the next drift compensation time 11 minutes
220 * ahead. That's emulating the sync_cmos_clock() update for
221 * the hardware RTC.
222 */
223 next_sync = now;
224 next_sync.tv_sec += 11 * 60;
225
226 return NOTIFY_OK;
227 }
228
229 static struct notifier_block xen_pvclock_gtod_notifier = {
230 .notifier_call = xen_pvclock_gtod_notify,
231 };
232
233 static struct clocksource xen_clocksource __read_mostly = {
234 .name = "xen",
235 .rating = 400,
236 .read = xen_clocksource_get_cycles,
237 .mask = ~0,
238 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
239 };
240
241 /*
242 Xen clockevent implementation
243
244 Xen has two clockevent implementations:
245
246 The old timer_op one works with all released versions of Xen prior
247 to version 3.0.4. This version of the hypervisor provides a
248 single-shot timer with nanosecond resolution. However, sharing the
249 same event channel is a 100Hz tick which is delivered while the
250 vcpu is running. We don't care about or use this tick, but it will
251 cause the core time code to think the timer fired too soon, and
252 will end up resetting it each time. It could be filtered, but
253 doing so has complications when the ktime clocksource is not yet
254 the xen clocksource (ie, at boot time).
255
256 The new vcpu_op-based timer interface allows the tick timer period
257 to be changed or turned off. The tick timer is not useful as a
258 periodic timer because events are only delivered to running vcpus.
259 The one-shot timer can report when a timeout is in the past, so
260 set_next_event is capable of returning -ETIME when appropriate.
261 This interface is used when available.
262 */
263
264
265 /*
266 Get a hypervisor absolute time. In theory we could maintain an
267 offset between the kernel's time and the hypervisor's time, and
268 apply that to a kernel's absolute timeout. Unfortunately the
269 hypervisor and kernel times can drift even if the kernel is using
270 the Xen clocksource, because ntp can warp the kernel's clocksource.
271 */
get_abs_timeout(unsigned long delta)272 static s64 get_abs_timeout(unsigned long delta)
273 {
274 return xen_clocksource_read() + delta;
275 }
276
xen_timerop_shutdown(struct clock_event_device * evt)277 static int xen_timerop_shutdown(struct clock_event_device *evt)
278 {
279 /* cancel timeout */
280 HYPERVISOR_set_timer_op(0);
281
282 return 0;
283 }
284
xen_timerop_set_next_event(unsigned long delta,struct clock_event_device * evt)285 static int xen_timerop_set_next_event(unsigned long delta,
286 struct clock_event_device *evt)
287 {
288 WARN_ON(!clockevent_state_oneshot(evt));
289
290 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
291 BUG();
292
293 /* We may have missed the deadline, but there's no real way of
294 knowing for sure. If the event was in the past, then we'll
295 get an immediate interrupt. */
296
297 return 0;
298 }
299
300 static const struct clock_event_device xen_timerop_clockevent = {
301 .name = "xen",
302 .features = CLOCK_EVT_FEAT_ONESHOT,
303
304 .max_delta_ns = 0xffffffff,
305 .min_delta_ns = TIMER_SLOP,
306
307 .mult = 1,
308 .shift = 0,
309 .rating = 500,
310
311 .set_state_shutdown = xen_timerop_shutdown,
312 .set_next_event = xen_timerop_set_next_event,
313 };
314
xen_vcpuop_shutdown(struct clock_event_device * evt)315 static int xen_vcpuop_shutdown(struct clock_event_device *evt)
316 {
317 int cpu = smp_processor_id();
318
319 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
320 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
321 BUG();
322
323 return 0;
324 }
325
xen_vcpuop_set_oneshot(struct clock_event_device * evt)326 static int xen_vcpuop_set_oneshot(struct clock_event_device *evt)
327 {
328 int cpu = smp_processor_id();
329
330 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
331 BUG();
332
333 return 0;
334 }
335
xen_vcpuop_set_next_event(unsigned long delta,struct clock_event_device * evt)336 static int xen_vcpuop_set_next_event(unsigned long delta,
337 struct clock_event_device *evt)
338 {
339 int cpu = smp_processor_id();
340 struct vcpu_set_singleshot_timer single;
341 int ret;
342
343 WARN_ON(!clockevent_state_oneshot(evt));
344
345 single.timeout_abs_ns = get_abs_timeout(delta);
346 /* Get an event anyway, even if the timeout is already expired */
347 single.flags = 0;
348
349 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
350 BUG_ON(ret != 0);
351
352 return ret;
353 }
354
355 static const struct clock_event_device xen_vcpuop_clockevent = {
356 .name = "xen",
357 .features = CLOCK_EVT_FEAT_ONESHOT,
358
359 .max_delta_ns = 0xffffffff,
360 .min_delta_ns = TIMER_SLOP,
361
362 .mult = 1,
363 .shift = 0,
364 .rating = 500,
365
366 .set_state_shutdown = xen_vcpuop_shutdown,
367 .set_state_oneshot = xen_vcpuop_set_oneshot,
368 .set_next_event = xen_vcpuop_set_next_event,
369 };
370
371 static const struct clock_event_device *xen_clockevent =
372 &xen_timerop_clockevent;
373
374 struct xen_clock_event_device {
375 struct clock_event_device evt;
376 char name[16];
377 };
378 static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
379
xen_timer_interrupt(int irq,void * dev_id)380 static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
381 {
382 struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
383 irqreturn_t ret;
384
385 ret = IRQ_NONE;
386 if (evt->event_handler) {
387 evt->event_handler(evt);
388 ret = IRQ_HANDLED;
389 }
390
391 do_stolen_accounting();
392
393 return ret;
394 }
395
xen_teardown_timer(int cpu)396 void xen_teardown_timer(int cpu)
397 {
398 struct clock_event_device *evt;
399 BUG_ON(cpu == 0);
400 evt = &per_cpu(xen_clock_events, cpu).evt;
401
402 if (evt->irq >= 0) {
403 unbind_from_irqhandler(evt->irq, NULL);
404 evt->irq = -1;
405 }
406 }
407
xen_setup_timer(int cpu)408 void xen_setup_timer(int cpu)
409 {
410 struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
411 struct clock_event_device *evt = &xevt->evt;
412 int irq;
413
414 WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
415 if (evt->irq >= 0)
416 xen_teardown_timer(cpu);
417
418 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
419
420 snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
421
422 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
423 IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
424 IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
425 xevt->name, NULL);
426 (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
427
428 memcpy(evt, xen_clockevent, sizeof(*evt));
429
430 evt->cpumask = cpumask_of(cpu);
431 evt->irq = irq;
432 }
433
434
xen_setup_cpu_clockevents(void)435 void xen_setup_cpu_clockevents(void)
436 {
437 clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
438 }
439
xen_timer_resume(void)440 void xen_timer_resume(void)
441 {
442 int cpu;
443
444 pvclock_resume();
445
446 if (xen_clockevent != &xen_vcpuop_clockevent)
447 return;
448
449 for_each_online_cpu(cpu) {
450 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
451 BUG();
452 }
453 }
454
455 static const struct pv_time_ops xen_time_ops __initconst = {
456 .sched_clock = xen_clocksource_read,
457 };
458
xen_time_init(void)459 static void __init xen_time_init(void)
460 {
461 int cpu = smp_processor_id();
462 struct timespec tp;
463
464 /* As Dom0 is never moved, no penalty on using TSC there */
465 if (xen_initial_domain())
466 xen_clocksource.rating = 275;
467
468 clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
469
470 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
471 /* Successfully turned off 100Hz tick, so we have the
472 vcpuop-based timer interface */
473 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
474 xen_clockevent = &xen_vcpuop_clockevent;
475 }
476
477 /* Set initial system time with full resolution */
478 xen_read_wallclock(&tp);
479 do_settimeofday(&tp);
480
481 setup_force_cpu_cap(X86_FEATURE_TSC);
482
483 xen_setup_runstate_info(cpu);
484 xen_setup_timer(cpu);
485 xen_setup_cpu_clockevents();
486
487 if (xen_initial_domain())
488 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
489 }
490
xen_init_time_ops(void)491 void __init xen_init_time_ops(void)
492 {
493 pv_time_ops = xen_time_ops;
494
495 x86_init.timers.timer_init = xen_time_init;
496 x86_init.timers.setup_percpu_clockev = x86_init_noop;
497 x86_cpuinit.setup_percpu_clockev = x86_init_noop;
498
499 x86_platform.calibrate_tsc = xen_tsc_khz;
500 x86_platform.get_wallclock = xen_get_wallclock;
501 /* Dom0 uses the native method to set the hardware RTC. */
502 if (!xen_initial_domain())
503 x86_platform.set_wallclock = xen_set_wallclock;
504 }
505
506 #ifdef CONFIG_XEN_PVHVM
xen_hvm_setup_cpu_clockevents(void)507 static void xen_hvm_setup_cpu_clockevents(void)
508 {
509 int cpu = smp_processor_id();
510 xen_setup_runstate_info(cpu);
511 /*
512 * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
513 * doing it xen_hvm_cpu_notify (which gets called by smp_init during
514 * early bootup and also during CPU hotplug events).
515 */
516 xen_setup_cpu_clockevents();
517 }
518
xen_hvm_init_time_ops(void)519 void __init xen_hvm_init_time_ops(void)
520 {
521 /* vector callback is needed otherwise we cannot receive interrupts
522 * on cpu > 0 and at this point we don't know how many cpus are
523 * available */
524 if (!xen_have_vector_callback)
525 return;
526 if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
527 printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
528 "disable pv timer\n");
529 return;
530 }
531
532 pv_time_ops = xen_time_ops;
533 x86_init.timers.setup_percpu_clockev = xen_time_init;
534 x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
535
536 x86_platform.calibrate_tsc = xen_tsc_khz;
537 x86_platform.get_wallclock = xen_get_wallclock;
538 x86_platform.set_wallclock = xen_set_wallclock;
539 }
540 #endif
541