• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <nvhe/clock.h>
3 
4 #include <asm/arch_timer.h>
5 #include <asm/div64.h>
6 
7 static struct kvm_nvhe_clock_data trace_clock_data;
8 
9 /*
10  * Update without any locks! This is fine because tracing, the sole user of this
11  * clock is ordering the memory and protects from races between read and
12  * updates.
13  */
trace_clock_update(struct kvm_nvhe_clock_data * data)14 void trace_clock_update(struct kvm_nvhe_clock_data *data)
15 {
16 	trace_clock_data.mult = data->mult;
17 	trace_clock_data.shift = data->shift;
18 	trace_clock_data.epoch_ns = data->epoch_ns;
19 	trace_clock_data.epoch_cyc = data->epoch_cyc;
20 }
21 
22 /*
23  * This clock is relying on host provided slope and epoch values to return
24  * something synchronized with the host. The downside is we can't trust the
25  * output which must not be used for anything else than debugging.
26  */
trace_clock(void)27 u64 trace_clock(void)
28 {
29 	u64 cyc = __arch_counter_get_cntpct() - trace_clock_data.epoch_cyc;
30 	__uint128_t ns;
31 
32 	/*
33 	 * The host kernel can avoid the 64-bits overflow of the multiplication
34 	 * by updating the epoch value with a timer (see
35 	 * kernel/time/clocksource.c). The hypervisor doesn't have that option,
36 	 * so let's do a more costly 128-bits mult here.
37 	 */
38 	ns = (__uint128_t)cyc * trace_clock_data.mult;
39 	ns >>= trace_clock_data.shift;
40 
41 	return (u64)ns + trace_clock_data.epoch_ns;
42 }
43