1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic sched_clock() support, to extend low level hardware time
4 * counters to full 64-bit ns values.
5 */
6 #include <linux/clocksource.h>
7 #include <linux/init.h>
8 #include <linux/jiffies.h>
9 #include <linux/ktime.h>
10 #include <linux/kernel.h>
11 #include <linux/math.h>
12 #include <linux/moduleparam.h>
13 #include <linux/sched.h>
14 #include <linux/sched/clock.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/hrtimer.h>
17 #include <linux/sched_clock.h>
18 #include <linux/seqlock.h>
19 #include <linux/bitops.h>
20 #include <trace/hooks/epoch.h>
21
22 #include "timekeeping.h"
23
24 /**
25 * struct clock_data - all data needed for sched_clock() (including
26 * registration of a new clock source)
27 *
28 * @seq: Sequence counter for protecting updates. The lowest
29 * bit is the index for @read_data.
30 * @read_data: Data required to read from sched_clock.
31 * @wrap_kt: Duration for which clock can run before wrapping.
32 * @rate: Tick rate of the registered clock.
33 * @actual_read_sched_clock: Registered hardware level clock read function.
34 *
35 * The ordering of this structure has been chosen to optimize cache
36 * performance. In particular 'seq' and 'read_data[0]' (combined) should fit
37 * into a single 64-byte cache line.
38 */
39 struct clock_data {
40 seqcount_latch_t seq;
41 struct clock_read_data read_data[2];
42 ktime_t wrap_kt;
43 unsigned long rate;
44
45 u64 (*actual_read_sched_clock)(void);
46 };
47
48 static struct hrtimer sched_clock_timer;
49 static int irqtime = -1;
50
51 core_param(irqtime, irqtime, int, 0400);
52
jiffy_sched_clock_read(void)53 static u64 notrace jiffy_sched_clock_read(void)
54 {
55 /*
56 * We don't need to use get_jiffies_64 on 32-bit arches here
57 * because we register with BITS_PER_LONG
58 */
59 return (u64)(jiffies - INITIAL_JIFFIES);
60 }
61
62 static struct clock_data cd ____cacheline_aligned = {
63 .read_data[0] = { .mult = NSEC_PER_SEC / HZ,
64 .read_sched_clock = jiffy_sched_clock_read, },
65 .actual_read_sched_clock = jiffy_sched_clock_read,
66 };
67
cyc_to_ns(u64 cyc,u32 mult,u32 shift)68 static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
69 {
70 return (cyc * mult) >> shift;
71 }
72
sched_clock_read_begin(unsigned int * seq)73 notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
74 {
75 *seq = raw_read_seqcount_latch(&cd.seq);
76 return cd.read_data + (*seq & 1);
77 }
78
sched_clock_read_retry(unsigned int seq)79 notrace int sched_clock_read_retry(unsigned int seq)
80 {
81 return read_seqcount_latch_retry(&cd.seq, seq);
82 }
83
sched_clock(void)84 unsigned long long notrace sched_clock(void)
85 {
86 u64 cyc, res;
87 unsigned int seq;
88 struct clock_read_data *rd;
89
90 do {
91 rd = sched_clock_read_begin(&seq);
92
93 cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
94 rd->sched_clock_mask;
95 res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
96 } while (sched_clock_read_retry(seq));
97
98 return res;
99 }
100
101 /*
102 * Updating the data required to read the clock.
103 *
104 * sched_clock() will never observe mis-matched data even if called from
105 * an NMI. We do this by maintaining an odd/even copy of the data and
106 * steering sched_clock() to one or the other using a sequence counter.
107 * In order to preserve the data cache profile of sched_clock() as much
108 * as possible the system reverts back to the even copy when the update
109 * completes; the odd copy is used *only* during an update.
110 */
update_clock_read_data(struct clock_read_data * rd)111 static void update_clock_read_data(struct clock_read_data *rd)
112 {
113 /* update the backup (odd) copy with the new data */
114 cd.read_data[1] = *rd;
115
116 /* steer readers towards the odd copy */
117 raw_write_seqcount_latch(&cd.seq);
118
119 /* now its safe for us to update the normal (even) copy */
120 cd.read_data[0] = *rd;
121
122 /* switch readers back to the even copy */
123 raw_write_seqcount_latch(&cd.seq);
124 }
125
126 /*
127 * Atomically update the sched_clock() epoch.
128 */
update_sched_clock(void)129 static void update_sched_clock(void)
130 {
131 u64 cyc;
132 u64 ns;
133 struct clock_read_data rd;
134
135 rd = cd.read_data[0];
136
137 cyc = cd.actual_read_sched_clock();
138 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
139
140 rd.epoch_ns = ns;
141 rd.epoch_cyc = cyc;
142
143 update_clock_read_data(&rd);
144 }
145
sched_clock_poll(struct hrtimer * hrt)146 static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
147 {
148 update_sched_clock();
149 hrtimer_forward_now(hrt, cd.wrap_kt);
150
151 return HRTIMER_RESTART;
152 }
153
sched_clock_register(u64 (* read)(void),int bits,unsigned long rate)154 void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
155 {
156 u64 res, wrap, new_mask, new_epoch, cyc, ns;
157 u32 new_mult, new_shift;
158 unsigned long r, flags;
159 char r_unit;
160 struct clock_read_data rd;
161
162 if (cd.rate > rate)
163 return;
164
165 /* Cannot register a sched_clock with interrupts on */
166 local_irq_save(flags);
167
168 /* Calculate the mult/shift to convert counter ticks to ns. */
169 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
170
171 new_mask = CLOCKSOURCE_MASK(bits);
172 cd.rate = rate;
173
174 /* Calculate how many nanosecs until we risk wrapping */
175 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
176 cd.wrap_kt = ns_to_ktime(wrap);
177
178 rd = cd.read_data[0];
179
180 /* Update epoch for new counter and update 'epoch_ns' from old counter*/
181 new_epoch = read();
182 cyc = cd.actual_read_sched_clock();
183 ns = rd.epoch_ns + cyc_to_ns((cyc - rd.epoch_cyc) & rd.sched_clock_mask, rd.mult, rd.shift);
184 cd.actual_read_sched_clock = read;
185
186 rd.read_sched_clock = read;
187 rd.sched_clock_mask = new_mask;
188 rd.mult = new_mult;
189 rd.shift = new_shift;
190 rd.epoch_cyc = new_epoch;
191 rd.epoch_ns = ns;
192
193 update_clock_read_data(&rd);
194
195 if (sched_clock_timer.function != NULL) {
196 /* update timeout for clock wrap */
197 hrtimer_start(&sched_clock_timer, cd.wrap_kt,
198 HRTIMER_MODE_REL_HARD);
199 }
200
201 r = rate;
202 if (r >= 4000000) {
203 r = DIV_ROUND_CLOSEST(r, 1000000);
204 r_unit = 'M';
205 } else if (r >= 4000) {
206 r = DIV_ROUND_CLOSEST(r, 1000);
207 r_unit = 'k';
208 } else {
209 r_unit = ' ';
210 }
211
212 /* Calculate the ns resolution of this counter */
213 res = cyc_to_ns(1ULL, new_mult, new_shift);
214
215 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
216 bits, r, r_unit, res, wrap);
217
218 /* Enable IRQ time accounting if we have a fast enough sched_clock() */
219 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
220 enable_sched_clock_irqtime();
221
222 local_irq_restore(flags);
223
224 pr_debug("Registered %pS as sched_clock source\n", read);
225 }
226 EXPORT_SYMBOL_GPL(sched_clock_register);
227
generic_sched_clock_init(void)228 void __init generic_sched_clock_init(void)
229 {
230 /*
231 * If no sched_clock() function has been provided at that point,
232 * make it the final one.
233 */
234 if (cd.actual_read_sched_clock == jiffy_sched_clock_read)
235 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
236
237 update_sched_clock();
238
239 /*
240 * Start the timer to keep sched_clock() properly updated and
241 * sets the initial epoch.
242 */
243 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
244 sched_clock_timer.function = sched_clock_poll;
245 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
246 }
247
248 /*
249 * Clock read function for use when the clock is suspended.
250 *
251 * This function makes it appear to sched_clock() as if the clock
252 * stopped counting at its last update.
253 *
254 * This function must only be called from the critical
255 * section in sched_clock(). It relies on the read_seqcount_retry()
256 * at the end of the critical section to be sure we observe the
257 * correct copy of 'epoch_cyc'.
258 */
suspended_sched_clock_read(void)259 static u64 notrace suspended_sched_clock_read(void)
260 {
261 unsigned int seq = raw_read_seqcount_latch(&cd.seq);
262
263 return cd.read_data[seq & 1].epoch_cyc;
264 }
265
sched_clock_suspend(void)266 int sched_clock_suspend(void)
267 {
268 struct clock_read_data *rd = &cd.read_data[0];
269
270 update_sched_clock();
271 hrtimer_cancel(&sched_clock_timer);
272 rd->read_sched_clock = suspended_sched_clock_read;
273 trace_android_vh_show_suspend_epoch_val(rd->epoch_ns, rd->epoch_cyc);
274
275 return 0;
276 }
277
sched_clock_resume(void)278 void sched_clock_resume(void)
279 {
280 struct clock_read_data *rd = &cd.read_data[0];
281
282 rd->epoch_cyc = cd.actual_read_sched_clock();
283 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
284 rd->read_sched_clock = cd.actual_read_sched_clock;
285 trace_android_vh_show_resume_epoch_val(rd->epoch_cyc);
286 }
287
288 static struct syscore_ops sched_clock_ops = {
289 .suspend = sched_clock_suspend,
290 .resume = sched_clock_resume,
291 };
292
sched_clock_syscore_init(void)293 static int __init sched_clock_syscore_init(void)
294 {
295 register_syscore_ops(&sched_clock_ops);
296
297 return 0;
298 }
299 device_initcall(sched_clock_syscore_init);
300