1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
3 #define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
4
5 #include <asm/page.h>
6
7 #ifdef __ASSEMBLY__
8
9 #include <asm/ppc_asm.h>
10
11 /*
12 * The macros sets two stack frames, one for the caller and one for the callee
13 * because there are no requirement for the caller to set a stack frame when
14 * calling VDSO so it may have omitted to set one, especially on PPC64
15 */
16
17 .macro cvdso_call funct
18 .cfi_startproc
19 PPC_STLU r1, -PPC_MIN_STKFRM(r1)
20 mflr r0
21 .cfi_register lr, r0
22 PPC_STLU r1, -PPC_MIN_STKFRM(r1)
23 PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
24 #ifdef __powerpc64__
25 PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
26 #endif
27 get_datapage r5
28 addi r5, r5, VDSO_DATA_OFFSET
29 bl DOTSYM(\funct)
30 PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
31 #ifdef __powerpc64__
32 PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
33 #endif
34 cmpwi r3, 0
35 mtlr r0
36 .cfi_restore lr
37 addi r1, r1, 2 * PPC_MIN_STKFRM
38 crclr so
39 beqlr+
40 crset so
41 neg r3, r3
42 blr
43 .cfi_endproc
44 .endm
45
46 .macro cvdso_call_time funct
47 .cfi_startproc
48 PPC_STLU r1, -PPC_MIN_STKFRM(r1)
49 mflr r0
50 .cfi_register lr, r0
51 PPC_STLU r1, -PPC_MIN_STKFRM(r1)
52 PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
53 #ifdef __powerpc64__
54 PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
55 #endif
56 get_datapage r4
57 addi r4, r4, VDSO_DATA_OFFSET
58 bl DOTSYM(\funct)
59 PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
60 #ifdef __powerpc64__
61 PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
62 #endif
63 crclr so
64 mtlr r0
65 .cfi_restore lr
66 addi r1, r1, 2 * PPC_MIN_STKFRM
67 blr
68 .cfi_endproc
69 .endm
70
71 #else
72
73 #include <asm/vdso/timebase.h>
74 #include <asm/barrier.h>
75 #include <asm/unistd.h>
76 #include <uapi/linux/time.h>
77
78 #define VDSO_HAS_CLOCK_GETRES 1
79
80 #define VDSO_HAS_TIME 1
81
do_syscall_2(const unsigned long _r0,const unsigned long _r3,const unsigned long _r4)82 static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3,
83 const unsigned long _r4)
84 {
85 register long r0 asm("r0") = _r0;
86 register unsigned long r3 asm("r3") = _r3;
87 register unsigned long r4 asm("r4") = _r4;
88 register int ret asm ("r3");
89
90 asm volatile(
91 " sc\n"
92 " bns+ 1f\n"
93 " neg %0, %0\n"
94 "1:\n"
95 : "=r" (ret), "+r" (r4), "+r" (r0)
96 : "r" (r3)
97 : "memory", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr");
98
99 return ret;
100 }
101
102 static __always_inline
gettimeofday_fallback(struct __kernel_old_timeval * _tv,struct timezone * _tz)103 int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz)
104 {
105 return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
106 }
107
108 #ifdef __powerpc64__
109
110 static __always_inline
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)111 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
112 {
113 return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
114 }
115
116 static __always_inline
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)117 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
118 {
119 return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
120 }
121
122 #else
123
124 #define BUILD_VDSO32 1
125
126 static __always_inline
clock_gettime_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)127 int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
128 {
129 return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
130 }
131
132 static __always_inline
clock_getres_fallback(clockid_t _clkid,struct __kernel_timespec * _ts)133 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
134 {
135 return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
136 }
137
138 static __always_inline
clock_gettime32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)139 int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
140 {
141 return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
142 }
143
144 static __always_inline
clock_getres32_fallback(clockid_t _clkid,struct old_timespec32 * _ts)145 int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
146 {
147 return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
148 }
149 #endif
150
__arch_get_hw_counter(s32 clock_mode,const struct vdso_data * vd)151 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
152 const struct vdso_data *vd)
153 {
154 return get_tb();
155 }
156
157 const struct vdso_data *__arch_get_vdso_data(void);
158
159 #ifdef CONFIG_TIME_NS
160 static __always_inline
__arch_get_timens_vdso_data(const struct vdso_data * vd)161 const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
162 {
163 return (void *)vd + PAGE_SIZE;
164 }
165 #endif
166
vdso_clocksource_ok(const struct vdso_data * vd)167 static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
168 {
169 return true;
170 }
171 #define vdso_clocksource_ok vdso_clocksource_ok
172
173 /*
174 * powerpc specific delta calculation.
175 *
176 * This variant removes the masking of the subtraction because the
177 * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX
178 * which would result in a pointless operation. The compiler cannot
179 * optimize it away as the mask comes from the vdso data and is not compile
180 * time constant.
181 */
vdso_calc_delta(u64 cycles,u64 last,u64 mask,u32 mult)182 static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
183 {
184 return (cycles - last) * mult;
185 }
186 #define vdso_calc_delta vdso_calc_delta
187
188 #ifndef __powerpc64__
vdso_shift_ns(u64 ns,unsigned long shift)189 static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift)
190 {
191 u32 hi = ns >> 32;
192 u32 lo = ns;
193
194 lo >>= shift;
195 lo |= hi << (32 - shift);
196 hi >>= shift;
197
198 if (likely(hi == 0))
199 return lo;
200
201 return ((u64)hi << 32) | lo;
202 }
203 #define vdso_shift_ns vdso_shift_ns
204 #endif
205
206 #ifdef __powerpc64__
207 int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
208 const struct vdso_data *vd);
209 int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
210 const struct vdso_data *vd);
211 #else
212 int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
213 const struct vdso_data *vd);
214 int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
215 const struct vdso_data *vd);
216 int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
217 const struct vdso_data *vd);
218 #endif
219 int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
220 const struct vdso_data *vd);
221 __kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time,
222 const struct vdso_data *vd);
223 #endif /* __ASSEMBLY__ */
224
225 #endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */
226