1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 * Copyright (C) 2012-2014 Cisco Systems
6 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 * Copyright (C) 2019 Intel Corporation
8 */
9
10 #include <linux/clockchips.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/jiffies.h>
14 #include <linux/mm.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <linux/threads.h>
18 #include <asm/irq.h>
19 #include <asm/param.h>
20 #include <kern_util.h>
21 #include <os.h>
22 #include <linux/time-internal.h>
23 #include <linux/um_timetravel.h>
24 #include <shared/init.h>
25
26 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
27 enum time_travel_mode time_travel_mode;
28 EXPORT_SYMBOL_GPL(time_travel_mode);
29
30 static bool time_travel_start_set;
31 static unsigned long long time_travel_start;
32 static unsigned long long time_travel_time;
33 static LIST_HEAD(time_travel_events);
34 static unsigned long long time_travel_timer_interval;
35 static unsigned long long time_travel_next_event;
36 static struct time_travel_event time_travel_timer_event;
37 static int time_travel_ext_fd = -1;
38 static unsigned int time_travel_ext_waiting;
39 static bool time_travel_ext_prev_request_valid;
40 static unsigned long long time_travel_ext_prev_request;
41 static bool time_travel_ext_free_until_valid;
42 static unsigned long long time_travel_ext_free_until;
43
time_travel_set_time(unsigned long long ns)44 static void time_travel_set_time(unsigned long long ns)
45 {
46 if (unlikely(ns < time_travel_time))
47 panic("time-travel: time goes backwards %lld -> %lld\n",
48 time_travel_time, ns);
49 time_travel_time = ns;
50 }
51
52 enum time_travel_message_handling {
53 TTMH_IDLE,
54 TTMH_POLL,
55 TTMH_READ,
56 };
57
time_travel_handle_message(struct um_timetravel_msg * msg,enum time_travel_message_handling mode)58 static void time_travel_handle_message(struct um_timetravel_msg *msg,
59 enum time_travel_message_handling mode)
60 {
61 struct um_timetravel_msg resp = {
62 .op = UM_TIMETRAVEL_ACK,
63 };
64 int ret;
65
66 /*
67 * Poll outside the locked section (if we're not called to only read
68 * the response) so we can get interrupts for e.g. virtio while we're
69 * here, but then we need to lock to not get interrupted between the
70 * read of the message and write of the ACK.
71 */
72 if (mode != TTMH_READ) {
73 bool disabled = irqs_disabled();
74
75 BUG_ON(mode == TTMH_IDLE && !disabled);
76
77 if (disabled)
78 local_irq_enable();
79 while (os_poll(1, &time_travel_ext_fd) != 0) {
80 /* nothing */
81 }
82 if (disabled)
83 local_irq_disable();
84 }
85
86 ret = os_read_file(time_travel_ext_fd, msg, sizeof(*msg));
87
88 if (ret == 0)
89 panic("time-travel external link is broken\n");
90 if (ret != sizeof(*msg))
91 panic("invalid time-travel message - %d bytes\n", ret);
92
93 switch (msg->op) {
94 default:
95 WARN_ONCE(1, "time-travel: unexpected message %lld\n",
96 (unsigned long long)msg->op);
97 break;
98 case UM_TIMETRAVEL_ACK:
99 return;
100 case UM_TIMETRAVEL_RUN:
101 time_travel_set_time(msg->time);
102 break;
103 case UM_TIMETRAVEL_FREE_UNTIL:
104 time_travel_ext_free_until_valid = true;
105 time_travel_ext_free_until = msg->time;
106 break;
107 }
108
109 resp.seq = msg->seq;
110 os_write_file(time_travel_ext_fd, &resp, sizeof(resp));
111 }
112
time_travel_ext_req(u32 op,u64 time)113 static u64 time_travel_ext_req(u32 op, u64 time)
114 {
115 static int seq;
116 int mseq = ++seq;
117 struct um_timetravel_msg msg = {
118 .op = op,
119 .time = time,
120 .seq = mseq,
121 };
122 unsigned long flags;
123
124 /*
125 * We need to save interrupts here and only restore when we
126 * got the ACK - otherwise we can get interrupted and send
127 * another request while we're still waiting for an ACK, but
128 * the peer doesn't know we got interrupted and will send
129 * the ACKs in the same order as the message, but we'd need
130 * to see them in the opposite order ...
131 *
132 * This wouldn't matter *too* much, but some ACKs carry the
133 * current time (for UM_TIMETRAVEL_GET) and getting another
134 * ACK without a time would confuse us a lot!
135 *
136 * The sequence number assignment that happens here lets us
137 * debug such message handling issues more easily.
138 */
139 local_irq_save(flags);
140 os_write_file(time_travel_ext_fd, &msg, sizeof(msg));
141
142 while (msg.op != UM_TIMETRAVEL_ACK)
143 time_travel_handle_message(&msg, TTMH_READ);
144
145 if (msg.seq != mseq)
146 panic("time-travel: ACK message has different seqno! op=%d, seq=%d != %d time=%lld\n",
147 msg.op, msg.seq, mseq, msg.time);
148
149 if (op == UM_TIMETRAVEL_GET)
150 time_travel_set_time(msg.time);
151 local_irq_restore(flags);
152
153 return msg.time;
154 }
155
__time_travel_wait_readable(int fd)156 void __time_travel_wait_readable(int fd)
157 {
158 int fds[2] = { fd, time_travel_ext_fd };
159 int ret;
160
161 if (time_travel_mode != TT_MODE_EXTERNAL)
162 return;
163
164 while ((ret = os_poll(2, fds))) {
165 struct um_timetravel_msg msg;
166
167 if (ret == 1)
168 time_travel_handle_message(&msg, TTMH_READ);
169 }
170 }
171 EXPORT_SYMBOL_GPL(__time_travel_wait_readable);
172
time_travel_ext_update_request(unsigned long long time)173 static void time_travel_ext_update_request(unsigned long long time)
174 {
175 if (time_travel_mode != TT_MODE_EXTERNAL)
176 return;
177
178 /* asked for exactly this time previously */
179 if (time_travel_ext_prev_request_valid &&
180 time == time_travel_ext_prev_request)
181 return;
182
183 time_travel_ext_prev_request = time;
184 time_travel_ext_prev_request_valid = true;
185 time_travel_ext_req(UM_TIMETRAVEL_REQUEST, time);
186 }
187
__time_travel_propagate_time(void)188 void __time_travel_propagate_time(void)
189 {
190 time_travel_ext_req(UM_TIMETRAVEL_UPDATE, time_travel_time);
191 }
192 EXPORT_SYMBOL_GPL(__time_travel_propagate_time);
193
194 /* returns true if we must do a wait to the simtime device */
time_travel_ext_request(unsigned long long time)195 static bool time_travel_ext_request(unsigned long long time)
196 {
197 /*
198 * If we received an external sync point ("free until") then we
199 * don't have to request/wait for anything until then, unless
200 * we're already waiting.
201 */
202 if (!time_travel_ext_waiting && time_travel_ext_free_until_valid &&
203 time < time_travel_ext_free_until)
204 return false;
205
206 time_travel_ext_update_request(time);
207 return true;
208 }
209
time_travel_ext_wait(bool idle)210 static void time_travel_ext_wait(bool idle)
211 {
212 struct um_timetravel_msg msg = {
213 .op = UM_TIMETRAVEL_ACK,
214 };
215
216 time_travel_ext_prev_request_valid = false;
217 time_travel_ext_waiting++;
218
219 time_travel_ext_req(UM_TIMETRAVEL_WAIT, -1);
220
221 /*
222 * Here we are deep in the idle loop, so we have to break out of the
223 * kernel abstraction in a sense and implement this in terms of the
224 * UML system waiting on the VQ interrupt while sleeping, when we get
225 * the signal it'll call time_travel_ext_vq_notify_done() completing the
226 * call.
227 */
228 while (msg.op != UM_TIMETRAVEL_RUN)
229 time_travel_handle_message(&msg, idle ? TTMH_IDLE : TTMH_POLL);
230
231 time_travel_ext_waiting--;
232
233 /* we might request more stuff while polling - reset when we run */
234 time_travel_ext_prev_request_valid = false;
235 }
236
time_travel_ext_get_time(void)237 static void time_travel_ext_get_time(void)
238 {
239 time_travel_ext_req(UM_TIMETRAVEL_GET, -1);
240 }
241
__time_travel_update_time(unsigned long long ns,bool idle)242 static void __time_travel_update_time(unsigned long long ns, bool idle)
243 {
244 if (time_travel_mode == TT_MODE_EXTERNAL && time_travel_ext_request(ns))
245 time_travel_ext_wait(idle);
246 else
247 time_travel_set_time(ns);
248 }
249
time_travel_first_event(void)250 static struct time_travel_event *time_travel_first_event(void)
251 {
252 return list_first_entry_or_null(&time_travel_events,
253 struct time_travel_event,
254 list);
255 }
256
__time_travel_add_event(struct time_travel_event * e,unsigned long long time)257 static void __time_travel_add_event(struct time_travel_event *e,
258 unsigned long long time)
259 {
260 struct time_travel_event *tmp;
261 bool inserted = false;
262
263 if (e->pending)
264 return;
265
266 e->pending = true;
267 e->time = time;
268
269 list_for_each_entry(tmp, &time_travel_events, list) {
270 /*
271 * Add the new entry before one with higher time,
272 * or if they're equal and both on stack, because
273 * in that case we need to unwind the stack in the
274 * right order, and the later event (timer sleep
275 * or such) must be dequeued first.
276 */
277 if ((tmp->time > e->time) ||
278 (tmp->time == e->time && tmp->onstack && e->onstack)) {
279 list_add_tail(&e->list, &tmp->list);
280 inserted = true;
281 break;
282 }
283 }
284
285 if (!inserted)
286 list_add_tail(&e->list, &time_travel_events);
287
288 tmp = time_travel_first_event();
289 time_travel_ext_update_request(tmp->time);
290 time_travel_next_event = tmp->time;
291 }
292
time_travel_add_event(struct time_travel_event * e,unsigned long long time)293 static void time_travel_add_event(struct time_travel_event *e,
294 unsigned long long time)
295 {
296 if (WARN_ON(!e->fn))
297 return;
298
299 __time_travel_add_event(e, time);
300 }
301
time_travel_periodic_timer(struct time_travel_event * e)302 void time_travel_periodic_timer(struct time_travel_event *e)
303 {
304 time_travel_add_event(&time_travel_timer_event,
305 time_travel_time + time_travel_timer_interval);
306 deliver_alarm();
307 }
308
time_travel_deliver_event(struct time_travel_event * e)309 static void time_travel_deliver_event(struct time_travel_event *e)
310 {
311 if (e == &time_travel_timer_event) {
312 /*
313 * deliver_alarm() does the irq_enter/irq_exit
314 * by itself, so must handle it specially here
315 */
316 e->fn(e);
317 } else {
318 unsigned long flags;
319
320 local_irq_save(flags);
321 irq_enter();
322 e->fn(e);
323 irq_exit();
324 local_irq_restore(flags);
325 }
326 }
327
time_travel_del_event(struct time_travel_event * e)328 static bool time_travel_del_event(struct time_travel_event *e)
329 {
330 if (!e->pending)
331 return false;
332 list_del(&e->list);
333 e->pending = false;
334 return true;
335 }
336
time_travel_update_time(unsigned long long next,bool idle)337 static void time_travel_update_time(unsigned long long next, bool idle)
338 {
339 struct time_travel_event ne = {
340 .onstack = true,
341 };
342 struct time_travel_event *e;
343 bool finished = idle;
344
345 /* add it without a handler - we deal with that specifically below */
346 __time_travel_add_event(&ne, next);
347
348 do {
349 e = time_travel_first_event();
350
351 BUG_ON(!e);
352 __time_travel_update_time(e->time, idle);
353
354 /* new events may have been inserted while we were waiting */
355 if (e == time_travel_first_event()) {
356 BUG_ON(!time_travel_del_event(e));
357 BUG_ON(time_travel_time != e->time);
358
359 if (e == &ne) {
360 finished = true;
361 } else {
362 if (e->onstack)
363 panic("On-stack event dequeued outside of the stack! time=%lld, event time=%lld, event=%pS\n",
364 time_travel_time, e->time, e);
365 time_travel_deliver_event(e);
366 }
367 }
368
369 e = time_travel_first_event();
370 if (e)
371 time_travel_ext_update_request(e->time);
372 } while (ne.pending && !finished);
373
374 time_travel_del_event(&ne);
375 }
376
time_travel_ndelay(unsigned long nsec)377 void time_travel_ndelay(unsigned long nsec)
378 {
379 time_travel_update_time(time_travel_time + nsec, false);
380 }
381 EXPORT_SYMBOL(time_travel_ndelay);
382
time_travel_add_irq_event(struct time_travel_event * e)383 void time_travel_add_irq_event(struct time_travel_event *e)
384 {
385 BUG_ON(time_travel_mode != TT_MODE_EXTERNAL);
386
387 time_travel_ext_get_time();
388 /*
389 * We could model interrupt latency here, for now just
390 * don't have any latency at all and request the exact
391 * same time (again) to run the interrupt...
392 */
393 time_travel_add_event(e, time_travel_time);
394 }
395 EXPORT_SYMBOL_GPL(time_travel_add_irq_event);
396
time_travel_oneshot_timer(struct time_travel_event * e)397 static void time_travel_oneshot_timer(struct time_travel_event *e)
398 {
399 deliver_alarm();
400 }
401
time_travel_sleep(unsigned long long duration)402 void time_travel_sleep(unsigned long long duration)
403 {
404 unsigned long long next = time_travel_time + duration;
405
406 if (time_travel_mode == TT_MODE_BASIC)
407 os_timer_disable();
408
409 time_travel_update_time(next, true);
410
411 if (time_travel_mode == TT_MODE_BASIC &&
412 time_travel_timer_event.pending) {
413 if (time_travel_timer_event.fn == time_travel_periodic_timer) {
414 /*
415 * This is somewhat wrong - we should get the first
416 * one sooner like the os_timer_one_shot() below...
417 */
418 os_timer_set_interval(time_travel_timer_interval);
419 } else {
420 os_timer_one_shot(time_travel_timer_event.time - next);
421 }
422 }
423 }
424
time_travel_handle_real_alarm(void)425 static void time_travel_handle_real_alarm(void)
426 {
427 time_travel_set_time(time_travel_next_event);
428
429 time_travel_del_event(&time_travel_timer_event);
430
431 if (time_travel_timer_event.fn == time_travel_periodic_timer)
432 time_travel_add_event(&time_travel_timer_event,
433 time_travel_time +
434 time_travel_timer_interval);
435 }
436
time_travel_set_interval(unsigned long long interval)437 static void time_travel_set_interval(unsigned long long interval)
438 {
439 time_travel_timer_interval = interval;
440 }
441
time_travel_connect_external(const char * socket)442 static int time_travel_connect_external(const char *socket)
443 {
444 const char *sep;
445 unsigned long long id = (unsigned long long)-1;
446 int rc;
447
448 if ((sep = strchr(socket, ':'))) {
449 char buf[25] = {};
450 if (sep - socket > sizeof(buf) - 1)
451 goto invalid_number;
452
453 memcpy(buf, socket, sep - socket);
454 if (kstrtoull(buf, 0, &id)) {
455 invalid_number:
456 panic("time-travel: invalid external ID in string '%s'\n",
457 socket);
458 return -EINVAL;
459 }
460
461 socket = sep + 1;
462 }
463
464 rc = os_connect_socket(socket);
465 if (rc < 0) {
466 panic("time-travel: failed to connect to external socket %s\n",
467 socket);
468 return rc;
469 }
470
471 time_travel_ext_fd = rc;
472
473 time_travel_ext_req(UM_TIMETRAVEL_START, id);
474
475 return 1;
476 }
477 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
478 #define time_travel_start_set 0
479 #define time_travel_start 0
480 #define time_travel_time 0
481
time_travel_update_time(unsigned long long ns,bool retearly)482 static inline void time_travel_update_time(unsigned long long ns, bool retearly)
483 {
484 }
485
time_travel_handle_real_alarm(void)486 static inline void time_travel_handle_real_alarm(void)
487 {
488 }
489
time_travel_set_interval(unsigned long long interval)490 static void time_travel_set_interval(unsigned long long interval)
491 {
492 }
493
494 /* fail link if this actually gets used */
495 extern u64 time_travel_ext_req(u32 op, u64 time);
496
497 /* these are empty macros so the struct/fn need not exist */
498 #define time_travel_add_event(e, time) do { } while (0)
499 #define time_travel_del_event(e) do { } while (0)
500 #endif
501
timer_handler(int sig,struct siginfo * unused_si,struct uml_pt_regs * regs)502 void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
503 {
504 unsigned long flags;
505
506 /*
507 * In basic time-travel mode we still get real interrupts
508 * (signals) but since we don't read time from the OS, we
509 * must update the simulated time here to the expiry when
510 * we get a signal.
511 * This is not the case in inf-cpu mode, since there we
512 * never get any real signals from the OS.
513 */
514 if (time_travel_mode == TT_MODE_BASIC)
515 time_travel_handle_real_alarm();
516
517 local_irq_save(flags);
518 do_IRQ(TIMER_IRQ, regs);
519 local_irq_restore(flags);
520 }
521
itimer_shutdown(struct clock_event_device * evt)522 static int itimer_shutdown(struct clock_event_device *evt)
523 {
524 if (time_travel_mode != TT_MODE_OFF)
525 time_travel_del_event(&time_travel_timer_event);
526
527 if (time_travel_mode != TT_MODE_INFCPU &&
528 time_travel_mode != TT_MODE_EXTERNAL)
529 os_timer_disable();
530
531 return 0;
532 }
533
itimer_set_periodic(struct clock_event_device * evt)534 static int itimer_set_periodic(struct clock_event_device *evt)
535 {
536 unsigned long long interval = NSEC_PER_SEC / HZ;
537
538 if (time_travel_mode != TT_MODE_OFF) {
539 time_travel_del_event(&time_travel_timer_event);
540 time_travel_set_event_fn(&time_travel_timer_event,
541 time_travel_periodic_timer);
542 time_travel_set_interval(interval);
543 time_travel_add_event(&time_travel_timer_event,
544 time_travel_time + interval);
545 }
546
547 if (time_travel_mode != TT_MODE_INFCPU &&
548 time_travel_mode != TT_MODE_EXTERNAL)
549 os_timer_set_interval(interval);
550
551 return 0;
552 }
553
itimer_next_event(unsigned long delta,struct clock_event_device * evt)554 static int itimer_next_event(unsigned long delta,
555 struct clock_event_device *evt)
556 {
557 delta += 1;
558
559 if (time_travel_mode != TT_MODE_OFF) {
560 time_travel_del_event(&time_travel_timer_event);
561 time_travel_set_event_fn(&time_travel_timer_event,
562 time_travel_oneshot_timer);
563 time_travel_add_event(&time_travel_timer_event,
564 time_travel_time + delta);
565 }
566
567 if (time_travel_mode != TT_MODE_INFCPU &&
568 time_travel_mode != TT_MODE_EXTERNAL)
569 return os_timer_one_shot(delta);
570
571 return 0;
572 }
573
itimer_one_shot(struct clock_event_device * evt)574 static int itimer_one_shot(struct clock_event_device *evt)
575 {
576 return itimer_next_event(0, evt);
577 }
578
579 static struct clock_event_device timer_clockevent = {
580 .name = "posix-timer",
581 .rating = 250,
582 .cpumask = cpu_possible_mask,
583 .features = CLOCK_EVT_FEAT_PERIODIC |
584 CLOCK_EVT_FEAT_ONESHOT,
585 .set_state_shutdown = itimer_shutdown,
586 .set_state_periodic = itimer_set_periodic,
587 .set_state_oneshot = itimer_one_shot,
588 .set_next_event = itimer_next_event,
589 .shift = 0,
590 .max_delta_ns = 0xffffffff,
591 .max_delta_ticks = 0xffffffff,
592 .min_delta_ns = TIMER_MIN_DELTA,
593 .min_delta_ticks = TIMER_MIN_DELTA, // microsecond resolution should be enough for anyone, same as 640K RAM
594 .irq = 0,
595 .mult = 1,
596 };
597
um_timer(int irq,void * dev)598 static irqreturn_t um_timer(int irq, void *dev)
599 {
600 if (get_current()->mm != NULL)
601 {
602 /* userspace - relay signal, results in correct userspace timers */
603 os_alarm_process(get_current()->mm->context.id.u.pid);
604 }
605
606 (*timer_clockevent.event_handler)(&timer_clockevent);
607
608 return IRQ_HANDLED;
609 }
610
timer_read(struct clocksource * cs)611 static u64 timer_read(struct clocksource *cs)
612 {
613 if (time_travel_mode != TT_MODE_OFF) {
614 /*
615 * We make reading the timer cost a bit so that we don't get
616 * stuck in loops that expect time to move more than the
617 * exact requested sleep amount, e.g. python's socket server,
618 * see https://bugs.python.org/issue37026.
619 *
620 * However, don't do that when we're in interrupt or such as
621 * then we might recurse into our own processing, and get to
622 * even more waiting, and that's not good - it messes up the
623 * "what do I do next" and onstack event we use to know when
624 * to return from time_travel_update_time().
625 */
626 if (!irqs_disabled() && !in_interrupt() && !in_softirq())
627 time_travel_update_time(time_travel_time +
628 TIMER_MULTIPLIER,
629 false);
630 return time_travel_time / TIMER_MULTIPLIER;
631 }
632
633 return os_nsecs() / TIMER_MULTIPLIER;
634 }
635
636 static struct clocksource timer_clocksource = {
637 .name = "timer",
638 .rating = 300,
639 .read = timer_read,
640 .mask = CLOCKSOURCE_MASK(64),
641 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
642 };
643
um_timer_setup(void)644 static void __init um_timer_setup(void)
645 {
646 int err;
647
648 err = request_irq(TIMER_IRQ, um_timer, IRQF_TIMER, "hr timer", NULL);
649 if (err != 0)
650 printk(KERN_ERR "register_timer : request_irq failed - "
651 "errno = %d\n", -err);
652
653 err = os_timer_create();
654 if (err != 0) {
655 printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
656 return;
657 }
658
659 err = clocksource_register_hz(&timer_clocksource, NSEC_PER_SEC/TIMER_MULTIPLIER);
660 if (err) {
661 printk(KERN_ERR "clocksource_register_hz returned %d\n", err);
662 return;
663 }
664 clockevents_register_device(&timer_clockevent);
665 }
666
read_persistent_clock64(struct timespec64 * ts)667 void read_persistent_clock64(struct timespec64 *ts)
668 {
669 long long nsecs;
670
671 if (time_travel_start_set)
672 nsecs = time_travel_start + time_travel_time;
673 else if (time_travel_mode == TT_MODE_EXTERNAL)
674 nsecs = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
675 else
676 nsecs = os_persistent_clock_emulation();
677
678 set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
679 nsecs % NSEC_PER_SEC);
680 }
681
time_init(void)682 void __init time_init(void)
683 {
684 timer_set_signal_handler();
685 late_time_init = um_timer_setup;
686 }
687
688 #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
calibrate_delay_is_known(void)689 unsigned long calibrate_delay_is_known(void)
690 {
691 if (time_travel_mode == TT_MODE_INFCPU ||
692 time_travel_mode == TT_MODE_EXTERNAL)
693 return 1;
694 return 0;
695 }
696
setup_time_travel(char * str)697 int setup_time_travel(char *str)
698 {
699 if (strcmp(str, "=inf-cpu") == 0) {
700 time_travel_mode = TT_MODE_INFCPU;
701 timer_clockevent.name = "time-travel-timer-infcpu";
702 timer_clocksource.name = "time-travel-clock";
703 return 1;
704 }
705
706 if (strncmp(str, "=ext:", 5) == 0) {
707 time_travel_mode = TT_MODE_EXTERNAL;
708 timer_clockevent.name = "time-travel-timer-external";
709 timer_clocksource.name = "time-travel-clock-external";
710 return time_travel_connect_external(str + 5);
711 }
712
713 if (!*str) {
714 time_travel_mode = TT_MODE_BASIC;
715 timer_clockevent.name = "time-travel-timer";
716 timer_clocksource.name = "time-travel-clock";
717 return 1;
718 }
719
720 return -EINVAL;
721 }
722
723 __setup("time-travel", setup_time_travel);
724 __uml_help(setup_time_travel,
725 "time-travel\n"
726 "This option just enables basic time travel mode, in which the clock/timers\n"
727 "inside the UML instance skip forward when there's nothing to do, rather than\n"
728 "waiting for real time to elapse. However, instance CPU speed is limited by\n"
729 "the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
730 "clock (but quicker when there's nothing to do).\n"
731 "\n"
732 "time-travel=inf-cpu\n"
733 "This enables time travel mode with infinite processing power, in which there\n"
734 "are no wall clock timers, and any CPU processing happens - as seen from the\n"
735 "guest - instantly. This can be useful for accurate simulation regardless of\n"
736 "debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
737 "easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
738 "\n"
739 "time-travel=ext:[ID:]/path/to/socket\n"
740 "This enables time travel mode similar to =inf-cpu, except the system will\n"
741 "use the given socket to coordinate with a central scheduler, in order to\n"
742 "have more than one system simultaneously be on simulated time. The virtio\n"
743 "driver code in UML knows about this so you can also simulate networks and\n"
744 "devices using it, assuming the device has the right capabilities.\n"
745 "The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
746
setup_time_travel_start(char * str)747 int setup_time_travel_start(char *str)
748 {
749 int err;
750
751 err = kstrtoull(str, 0, &time_travel_start);
752 if (err)
753 return err;
754
755 time_travel_start_set = 1;
756 return 1;
757 }
758
759 __setup("time-travel-start", setup_time_travel_start);
760 __uml_help(setup_time_travel_start,
761 "time-travel-start=<seconds>\n"
762 "Configure the UML instance's wall clock to start at this value rather than\n"
763 "the host's wall clock at the time of UML boot.\n");
764 #endif
765