• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Userspace implementations of gettimeofday() and friends.
3 *
4 * Copyright (C) 2012 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21#include <linux/linkage.h>
22#include <asm/asm-offsets.h>
23#include <asm/unistd.h>
24
25#define NSEC_PER_SEC_LO16	0xca00
26#define NSEC_PER_SEC_HI16	0x3b9a
27
28vdso_data	.req	x6
29seqcnt		.req	w7
30w_tmp		.req	w8
31x_tmp		.req	x8
32
33/*
34 * Conventions for macro arguments:
35 * - An argument is write-only if its name starts with "res".
36 * - All other arguments are read-only, unless otherwise specified.
37 */
38
39	.macro	seqcnt_acquire
409999:	ldr	seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
41	tbnz	seqcnt, #0, 9999b
42	dmb	ishld
43	.endm
44
45	.macro	seqcnt_check fail
46	dmb	ishld
47	ldr	w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
48	cmp	w_tmp, seqcnt
49	b.ne	\fail
50	.endm
51
52	.macro	syscall_check fail
53	ldr	w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
54	cbnz	w_tmp, \fail
55	.endm
56
57	.macro get_nsec_per_sec res
58	mov	\res, #NSEC_PER_SEC_LO16
59	movk	\res, #NSEC_PER_SEC_HI16, lsl #16
60	.endm
61
62	/*
63	 * Returns the clock delta, in nanoseconds left-shifted by the clock
64	 * shift.
65	 */
66	.macro	get_clock_shifted_nsec res, cycle_last, mult
67	/* Read the virtual counter. */
68	isb
69	mrs	x_tmp, cntvct_el0
70	/* Calculate cycle delta and convert to ns. */
71	sub	\res, x_tmp, \cycle_last
72	/* We can only guarantee 56 bits of precision. */
73	movn	x_tmp, #0xff00, lsl #48
74	and	\res, x_tmp, \res
75	mul	\res, \res, \mult
76	/*
77	 * Fake address dependency from the value computed from the counter
78	 * register to subsequent data page accesses so that the sequence
79	 * locking also orders the read of the counter.
80	 */
81	and	x_tmp, \res, xzr
82	add	vdso_data, vdso_data, x_tmp
83	.endm
84
85	/*
86	 * Returns in res_{sec,nsec} the REALTIME timespec, based on the
87	 * "wall time" (xtime) and the clock_mono delta.
88	 */
89	.macro	get_ts_realtime res_sec, res_nsec, \
90			clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
91	add	\res_nsec, \clock_nsec, \xtime_nsec
92	udiv	x_tmp, \res_nsec, \nsec_to_sec
93	add	\res_sec, \xtime_sec, x_tmp
94	msub	\res_nsec, x_tmp, \nsec_to_sec, \res_nsec
95	.endm
96
97	/*
98	 * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
99	 * used for CLOCK_MONOTONIC_RAW.
100	 */
101	.macro	get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
102	udiv	\res_sec, \clock_nsec, \nsec_to_sec
103	msub	\res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
104	.endm
105
106	/* sec and nsec are modified in place. */
107	.macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
108	/* Add timespec. */
109	add	\sec, \sec, \ts_sec
110	add	\nsec, \nsec, \ts_nsec
111
112	/* Normalise the new timespec. */
113	cmp	\nsec, \nsec_to_sec
114	b.lt	9999f
115	sub	\nsec, \nsec, \nsec_to_sec
116	add	\sec, \sec, #1
1179999:
118	cmp	\nsec, #0
119	b.ge	9998f
120	add	\nsec, \nsec, \nsec_to_sec
121	sub	\sec, \sec, #1
1229998:
123	.endm
124
125	.macro clock_gettime_return, shift=0
126	.if \shift == 1
127	lsr	x11, x11, x12
128	.endif
129	stp	x10, x11, [x1, #TSPEC_TV_SEC]
130	mov	x0, xzr
131	ret
132	.endm
133
134	.macro jump_slot jumptable, index, label
135	.if (. - \jumptable) != 4 * (\index)
136	.error "Jump slot index mismatch"
137	.endif
138	b	\label
139	.endm
140
141	.text
142
143/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
144ENTRY(__kernel_gettimeofday)
145	.cfi_startproc
146	adr	vdso_data, _vdso_data
147	/* If tv is NULL, skip to the timezone code. */
148	cbz	x0, 2f
149
150	/* Compute the time of day. */
1511:	seqcnt_acquire
152	syscall_check fail=4f
153	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
154	/* w11 = cs_mono_mult, w12 = cs_shift */
155	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
156	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
157
158	get_nsec_per_sec res=x9
159	lsl	x9, x9, x12
160
161	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
162	seqcnt_check fail=1b
163	get_ts_realtime res_sec=x10, res_nsec=x11, \
164		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
165
166	/* Convert ns to us. */
167	mov	x13, #1000
168	lsl	x13, x13, x12
169	udiv	x11, x11, x13
170	stp	x10, x11, [x0, #TVAL_TV_SEC]
1712:
172	/* If tz is NULL, return 0. */
173	cbz	x1, 3f
174	ldp	w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
175	stp	w4, w5, [x1, #TZ_MINWEST]
1763:
177	mov	x0, xzr
178	ret
1794:
180	/* Syscall fallback. */
181	mov	x8, #__NR_gettimeofday
182	svc	#0
183	ret
184	.cfi_endproc
185ENDPROC(__kernel_gettimeofday)
186
187#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
188
189/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
190ENTRY(__kernel_clock_gettime)
191	.cfi_startproc
192	cmp	w0, #JUMPSLOT_MAX
193	b.hi	syscall
194	adr	vdso_data, _vdso_data
195	adr	x_tmp, jumptable
196	add	x_tmp, x_tmp, w0, uxtw #2
197	br	x_tmp
198
199	ALIGN
200jumptable:
201	jump_slot jumptable, CLOCK_REALTIME, realtime
202	jump_slot jumptable, CLOCK_MONOTONIC, monotonic
203	b	syscall
204	b	syscall
205	jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
206	jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
207	jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
208
209	.if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
210	.error	"Wrong jumptable size"
211	.endif
212
213	ALIGN
214realtime:
215	seqcnt_acquire
216	syscall_check fail=syscall
217	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
218	/* w11 = cs_mono_mult, w12 = cs_shift */
219	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
220	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
221
222	/* All computations are done with left-shifted nsecs. */
223	get_nsec_per_sec res=x9
224	lsl	x9, x9, x12
225
226	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
227	seqcnt_check fail=realtime
228	get_ts_realtime res_sec=x10, res_nsec=x11, \
229		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
230	clock_gettime_return, shift=1
231
232	ALIGN
233monotonic:
234	seqcnt_acquire
235	syscall_check fail=syscall
236	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
237	/* w11 = cs_mono_mult, w12 = cs_shift */
238	ldp	w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
239	ldp	x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
240	ldp	x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
241
242	/* All computations are done with left-shifted nsecs. */
243	lsl	x4, x4, x12
244	get_nsec_per_sec res=x9
245	lsl	x9, x9, x12
246
247	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
248	seqcnt_check fail=monotonic
249	get_ts_realtime res_sec=x10, res_nsec=x11, \
250		clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
251
252	add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
253	clock_gettime_return, shift=1
254
255	ALIGN
256monotonic_raw:
257	seqcnt_acquire
258	syscall_check fail=syscall
259	ldr	x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
260	/* w11 = cs_raw_mult, w12 = cs_shift */
261	ldp	w12, w11, [vdso_data, #VDSO_CS_SHIFT]
262	ldp	x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
263
264	/* All computations are done with left-shifted nsecs. */
265	get_nsec_per_sec res=x9
266	lsl	x9, x9, x12
267
268	get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
269	seqcnt_check fail=monotonic_raw
270	get_ts_clock_raw res_sec=x10, res_nsec=x11, \
271		clock_nsec=x15, nsec_to_sec=x9
272
273	add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
274	clock_gettime_return, shift=1
275
276	ALIGN
277realtime_coarse:
278	seqcnt_acquire
279	ldp	x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
280	seqcnt_check fail=realtime_coarse
281	clock_gettime_return
282
283	ALIGN
284monotonic_coarse:
285	seqcnt_acquire
286	ldp	x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
287	ldp	x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
288	seqcnt_check fail=monotonic_coarse
289
290	/* Computations are done in (non-shifted) nsecs. */
291	get_nsec_per_sec res=x9
292	add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
293	clock_gettime_return
294
295	ALIGN
296syscall: /* Syscall fallback. */
297	mov	x8, #__NR_clock_gettime
298	svc	#0
299	ret
300	.cfi_endproc
301ENDPROC(__kernel_clock_gettime)
302
303/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
304ENTRY(__kernel_clock_getres)
305	.cfi_startproc
306	cmp	w0, #CLOCK_REALTIME
307	ccmp	w0, #CLOCK_MONOTONIC, #0x4, ne
308	ccmp	w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
309	b.ne	1f
310
311	adr	vdso_data, _vdso_data
312	ldr	w2, [vdso_data, #CLOCK_REALTIME_RES]
313	b	2f
3141:
315	cmp	w0, #CLOCK_REALTIME_COARSE
316	ccmp	w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
317	b.ne	4f
318	ldr	x2, 5f
3192:
320	cbz	x1, 3f
321	stp	xzr, x2, [x1]
322
3233:	/* res == NULL. */
324	mov	w0, wzr
325	ret
326
3274:	/* Syscall fallback. */
328	mov	x8, #__NR_clock_getres
329	svc	#0
330	ret
3315:
332	.quad	CLOCK_COARSE_RES
333	.cfi_endproc
334ENDPROC(__kernel_clock_getres)
335