• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Userland implementation of gettimeofday() for 32 bits processes in a
3 * ppc64 kernel for use in the vDSO
4 *
5 * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
6 *                    IBM Corp.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <asm/processor.h>
14#include <asm/ppc_asm.h>
15#include <asm/vdso.h>
16#include <asm/asm-offsets.h>
17#include <asm/unistd.h>
18
19/* Offset for the low 32-bit part of a field of long type */
20#ifdef CONFIG_PPC64
21#define LOPART	4
22#define TSPEC_TV_SEC	TSPC64_TV_SEC+LOPART
23#else
24#define LOPART	0
25#define TSPEC_TV_SEC	TSPC32_TV_SEC
26#endif
27
28	.text
29/*
30 * Exact prototype of gettimeofday
31 *
32 * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
33 *
34 */
35V_FUNCTION_BEGIN(__kernel_gettimeofday)
36  .cfi_startproc
37	mflr	r12
38  .cfi_register lr,r12
39
40	mr	r10,r3			/* r10 saves tv */
41	mr	r11,r4			/* r11 saves tz */
42	bl	__get_datapage@local	/* get data page */
43	mr	r9, r3			/* datapage ptr in r9 */
44	cmplwi	r10,0			/* check if tv is NULL */
45	beq	3f
46	lis	r7,1000000@ha		/* load up USEC_PER_SEC */
47	addi	r7,r7,1000000@l		/* so we get microseconds in r4 */
48	bl	__do_get_tspec@local	/* get sec/usec from tb & kernel */
49	stw	r3,TVAL32_TV_SEC(r10)
50	stw	r4,TVAL32_TV_USEC(r10)
51
523:	cmplwi	r11,0			/* check if tz is NULL */
53	beq	1f
54	lwz	r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
55	lwz	r5,CFG_TZ_DSTTIME(r9)
56	stw	r4,TZONE_TZ_MINWEST(r11)
57	stw	r5,TZONE_TZ_DSTTIME(r11)
58
591:	mtlr	r12
60	crclr	cr0*4+so
61	li	r3,0
62	blr
63  .cfi_endproc
64V_FUNCTION_END(__kernel_gettimeofday)
65
66/*
67 * Exact prototype of clock_gettime()
68 *
69 * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
70 *
71 */
72V_FUNCTION_BEGIN(__kernel_clock_gettime)
73  .cfi_startproc
74	/* Check for supported clock IDs */
75	cmpli	cr0,r3,CLOCK_REALTIME
76	cmpli	cr1,r3,CLOCK_MONOTONIC
77	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
78	bne	cr0,99f
79
80	mflr	r12			/* r12 saves lr */
81  .cfi_register lr,r12
82	mr	r11,r4			/* r11 saves tp */
83	bl	__get_datapage@local	/* get data page */
84	mr	r9,r3			/* datapage ptr in r9 */
85	lis	r7,NSEC_PER_SEC@h	/* want nanoseconds */
86	ori	r7,r7,NSEC_PER_SEC@l
8750:	bl	__do_get_tspec@local	/* get sec/nsec from tb & kernel */
88	bne	cr1,80f			/* not monotonic -> all done */
89
90	/*
91	 * CLOCK_MONOTONIC
92	 */
93
94	/* now we must fixup using wall to monotonic. We need to snapshot
95	 * that value and do the counter trick again. Fortunately, we still
96	 * have the counter value in r8 that was returned by __do_get_xsec.
97	 * At this point, r3,r4 contain our sec/nsec values, r5 and r6
98	 * can be used, r7 contains NSEC_PER_SEC.
99	 */
100
101	lwz	r5,WTOM_CLOCK_SEC(r9)
102	lwz	r6,WTOM_CLOCK_NSEC(r9)
103
104	/* We now have our offset in r5,r6. We create a fake dependency
105	 * on that value and re-check the counter
106	 */
107	or	r0,r6,r5
108	xor	r0,r0,r0
109	add	r9,r9,r0
110	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
111        cmpl    cr0,r8,r0		/* check if updated */
112	bne-	50b
113
114	/* Calculate and store result. Note that this mimics the C code,
115	 * which may cause funny results if nsec goes negative... is that
116	 * possible at all ?
117	 */
118	add	r3,r3,r5
119	add	r4,r4,r6
120	cmpw	cr0,r4,r7
121	cmpwi	cr1,r4,0
122	blt	1f
123	subf	r4,r7,r4
124	addi	r3,r3,1
1251:	bge	cr1,80f
126	addi	r3,r3,-1
127	add	r4,r4,r7
128
12980:	stw	r3,TSPC32_TV_SEC(r11)
130	stw	r4,TSPC32_TV_NSEC(r11)
131
132	mtlr	r12
133	crclr	cr0*4+so
134	li	r3,0
135	blr
136
137	/*
138	 * syscall fallback
139	 */
14099:
141	li	r0,__NR_clock_gettime
142  .cfi_restore lr
143	sc
144	blr
145  .cfi_endproc
146V_FUNCTION_END(__kernel_clock_gettime)
147
148
149/*
150 * Exact prototype of clock_getres()
151 *
152 * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
153 *
154 */
155V_FUNCTION_BEGIN(__kernel_clock_getres)
156  .cfi_startproc
157	/* Check for supported clock IDs */
158	cmpwi	cr0,r3,CLOCK_REALTIME
159	cmpwi	cr1,r3,CLOCK_MONOTONIC
160	cror	cr0*4+eq,cr0*4+eq,cr1*4+eq
161	bne	cr0,99f
162
163	mflr	r12
164  .cfi_register lr,r12
165	bl	__get_datapage@local	/* get data page */
166	lwz	r5, CLOCK_HRTIMER_RES(r3)
167	mtlr	r12
168	li	r3,0
169	cmpli	cr0,r4,0
170	crclr	cr0*4+so
171	beqlr
172	stw	r3,TSPC32_TV_SEC(r4)
173	stw	r5,TSPC32_TV_NSEC(r4)
174	blr
175
176	/*
177	 * syscall fallback
178	 */
17999:
180	li	r0,__NR_clock_getres
181	sc
182	blr
183  .cfi_endproc
184V_FUNCTION_END(__kernel_clock_getres)
185
186
187/*
188 * Exact prototype of time()
189 *
190 * time_t time(time *t);
191 *
192 */
193V_FUNCTION_BEGIN(__kernel_time)
194  .cfi_startproc
195	mflr	r12
196  .cfi_register lr,r12
197
198	mr	r11,r3			/* r11 holds t */
199	bl	__get_datapage@local
200	mr	r9, r3			/* datapage ptr in r9 */
201
202	lwz	r3,STAMP_XTIME+TSPEC_TV_SEC(r9)
203
204	cmplwi	r11,0			/* check if t is NULL */
205	beq	2f
206	stw	r3,0(r11)		/* store result at *t */
2072:	mtlr	r12
208	crclr	cr0*4+so
209	blr
210  .cfi_endproc
211V_FUNCTION_END(__kernel_time)
212
213/*
214 * This is the core of clock_gettime() and gettimeofday(),
215 * it returns the current time in r3 (seconds) and r4.
216 * On entry, r7 gives the resolution of r4, either USEC_PER_SEC
217 * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
218 * It expects the datapage ptr in r9 and doesn't clobber it.
219 * It clobbers r0, r5 and r6.
220 * On return, r8 contains the counter value that can be reused.
221 * This clobbers cr0 but not any other cr field.
222 */
223__do_get_tspec:
224  .cfi_startproc
225	/* Check for update count & load values. We use the low
226	 * order 32 bits of the update count
227	 */
2281:	lwz	r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
229	andi.	r0,r8,1			/* pending update ? loop */
230	bne-	1b
231	xor	r0,r8,r8		/* create dependency */
232	add	r9,r9,r0
233
234	/* Load orig stamp (offset to TB) */
235	lwz	r5,CFG_TB_ORIG_STAMP(r9)
236	lwz	r6,(CFG_TB_ORIG_STAMP+4)(r9)
237
238	/* Get a stable TB value */
239#ifdef CONFIG_8xx
2402:	mftbu	r3
241	mftbl	r4
242	mftbu	r0
243#else
2442:	mfspr	r3, SPRN_TBRU
245	mfspr	r4, SPRN_TBRL
246	mfspr	r0, SPRN_TBRU
247#endif
248	cmplw	cr0,r3,r0
249	bne-	2b
250
251	/* Subtract tb orig stamp and shift left 12 bits.
252	 */
253	subfc	r4,r6,r4
254	subfe	r0,r5,r3
255	slwi	r0,r0,12
256	rlwimi.	r0,r4,12,20,31
257	slwi	r4,r4,12
258
259	/*
260	 * Load scale factor & do multiplication.
261	 * We only use the high 32 bits of the tb_to_xs value.
262	 * Even with a 1GHz timebase clock, the high 32 bits of
263	 * tb_to_xs will be at least 4 million, so the error from
264	 * ignoring the low 32 bits will be no more than 0.25ppm.
265	 * The error will just make the clock run very very slightly
266	 * slow until the next time the kernel updates the VDSO data,
267	 * at which point the clock will catch up to the kernel's value,
268	 * so there is no long-term error accumulation.
269	 */
270	lwz	r5,CFG_TB_TO_XS(r9)	/* load values */
271	mulhwu	r4,r4,r5
272	li	r3,0
273
274	beq+	4f			/* skip high part computation if 0 */
275	mulhwu	r3,r0,r5
276	mullw	r5,r0,r5
277	addc	r4,r4,r5
278	addze	r3,r3
2794:
280	/* At this point, we have seconds since the xtime stamp
281	 * as a 32.32 fixed-point number in r3 and r4.
282	 * Load & add the xtime stamp.
283	 */
284	lwz	r5,STAMP_XTIME+TSPEC_TV_SEC(r9)
285	lwz	r6,STAMP_SEC_FRAC(r9)
286	addc	r4,r4,r6
287	adde	r3,r3,r5
288
289	/* We create a fake dependency on the result in r3/r4
290	 * and re-check the counter
291	 */
292	or	r6,r4,r3
293	xor	r0,r6,r6
294	add	r9,r9,r0
295	lwz	r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
296        cmplw	cr0,r8,r0		/* check if updated */
297	bne-	1b
298
299	mulhwu	r4,r4,r7		/* convert to micro or nanoseconds */
300
301	blr
302  .cfi_endproc
303