1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3
4 #include <linux/compiler.h>
5 #include <linux/hrtimer.h>
6 #include <linux/time.h>
7 #include <asm/io.h>
8 #include <asm/barrier.h>
9 #include <asm/bug.h>
10 #include <asm/page.h>
11 #include <asm/unistd.h>
12 #include <asm/vdso_datapage.h>
13 #include <asm/vdso_timer_info.h>
14 #include <asm/asm-offsets.h>
15
16 #define X(x) #x
17 #define Y(x) X(x)
18
19 extern struct vdso_data *__get_datapage(void);
20 extern struct vdso_data *__get_timerpage(void);
21
__vdso_read_begin(const struct vdso_data * vdata)22 static notrace unsigned int __vdso_read_begin(const struct vdso_data *vdata)
23 {
24 u32 seq;
25 repeat:
26 seq = READ_ONCE(vdata->seq_count);
27 if (seq & 1) {
28 cpu_relax();
29 goto repeat;
30 }
31 return seq;
32 }
33
vdso_read_begin(const struct vdso_data * vdata)34 static notrace unsigned int vdso_read_begin(const struct vdso_data *vdata)
35 {
36 unsigned int seq;
37
38 seq = __vdso_read_begin(vdata);
39
40 smp_rmb(); /* Pairs with smp_wmb in vdso_write_end */
41 return seq;
42 }
43
vdso_read_retry(const struct vdso_data * vdata,u32 start)44 static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start)
45 {
46 smp_rmb(); /* Pairs with smp_wmb in vdso_write_begin */
47 return vdata->seq_count != start;
48 }
49
clock_gettime_fallback(clockid_t _clkid,struct __kernel_old_timespec * _ts)50 static notrace long clock_gettime_fallback(clockid_t _clkid,
51 struct __kernel_old_timespec *_ts)
52 {
53 register struct __kernel_old_timespec *ts asm("$r1") = _ts;
54 register clockid_t clkid asm("$r0") = _clkid;
55 register long ret asm("$r0");
56
57 asm volatile ("movi $r15, %3\n"
58 "syscall 0x0\n"
59 :"=r" (ret)
60 :"r"(clkid), "r"(ts), "i"(__NR_clock_gettime)
61 :"$r15", "memory");
62
63 return ret;
64 }
65
do_realtime_coarse(struct __kernel_old_timespec * ts,struct vdso_data * vdata)66 static notrace int do_realtime_coarse(struct __kernel_old_timespec *ts,
67 struct vdso_data *vdata)
68 {
69 u32 seq;
70
71 do {
72 seq = vdso_read_begin(vdata);
73
74 ts->tv_sec = vdata->xtime_coarse_sec;
75 ts->tv_nsec = vdata->xtime_coarse_nsec;
76
77 } while (vdso_read_retry(vdata, seq));
78 return 0;
79 }
80
do_monotonic_coarse(struct __kernel_old_timespec * ts,struct vdso_data * vdata)81 static notrace int do_monotonic_coarse(struct __kernel_old_timespec *ts,
82 struct vdso_data *vdata)
83 {
84 u32 seq;
85 u64 ns;
86
87 do {
88 seq = vdso_read_begin(vdata);
89
90 ts->tv_sec = vdata->xtime_coarse_sec + vdata->wtm_clock_sec;
91 ns = vdata->xtime_coarse_nsec + vdata->wtm_clock_nsec;
92
93 } while (vdso_read_retry(vdata, seq));
94
95 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
96 ts->tv_nsec = ns;
97
98 return 0;
99 }
100
vgetsns(struct vdso_data * vdso)101 static notrace inline u64 vgetsns(struct vdso_data *vdso)
102 {
103 u32 cycle_now;
104 u32 cycle_delta;
105 u32 *timer_cycle_base;
106
107 timer_cycle_base =
108 (u32 *) ((char *)__get_timerpage() + vdso->cycle_count_offset);
109 cycle_now = readl_relaxed(timer_cycle_base);
110 if (true == vdso->cycle_count_down)
111 cycle_now = ~(*timer_cycle_base);
112 cycle_delta = cycle_now - (u32) vdso->cs_cycle_last;
113 return ((u64) cycle_delta & vdso->cs_mask) * vdso->cs_mult;
114 }
115
do_realtime(struct __kernel_old_timespec * ts,struct vdso_data * vdata)116 static notrace int do_realtime(struct __kernel_old_timespec *ts, struct vdso_data *vdata)
117 {
118 unsigned count;
119 u64 ns;
120 do {
121 count = vdso_read_begin(vdata);
122 ts->tv_sec = vdata->xtime_clock_sec;
123 ns = vdata->xtime_clock_nsec;
124 ns += vgetsns(vdata);
125 ns >>= vdata->cs_shift;
126 } while (vdso_read_retry(vdata, count));
127
128 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
129 ts->tv_nsec = ns;
130
131 return 0;
132 }
133
do_monotonic(struct __kernel_old_timespec * ts,struct vdso_data * vdata)134 static notrace int do_monotonic(struct __kernel_old_timespec *ts, struct vdso_data *vdata)
135 {
136 u64 ns;
137 u32 seq;
138
139 do {
140 seq = vdso_read_begin(vdata);
141
142 ts->tv_sec = vdata->xtime_clock_sec;
143 ns = vdata->xtime_clock_nsec;
144 ns += vgetsns(vdata);
145 ns >>= vdata->cs_shift;
146
147 ts->tv_sec += vdata->wtm_clock_sec;
148 ns += vdata->wtm_clock_nsec;
149
150 } while (vdso_read_retry(vdata, seq));
151
152 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
153 ts->tv_nsec = ns;
154
155 return 0;
156 }
157
__vdso_clock_gettime(clockid_t clkid,struct __kernel_old_timespec * ts)158 notrace int __vdso_clock_gettime(clockid_t clkid, struct __kernel_old_timespec *ts)
159 {
160 struct vdso_data *vdata;
161 int ret = -1;
162
163 vdata = __get_datapage();
164 if (vdata->cycle_count_offset == EMPTY_REG_OFFSET)
165 return clock_gettime_fallback(clkid, ts);
166
167 switch (clkid) {
168 case CLOCK_REALTIME_COARSE:
169 ret = do_realtime_coarse(ts, vdata);
170 break;
171 case CLOCK_MONOTONIC_COARSE:
172 ret = do_monotonic_coarse(ts, vdata);
173 break;
174 case CLOCK_REALTIME:
175 ret = do_realtime(ts, vdata);
176 break;
177 case CLOCK_MONOTONIC:
178 ret = do_monotonic(ts, vdata);
179 break;
180 default:
181 break;
182 }
183
184 if (ret)
185 ret = clock_gettime_fallback(clkid, ts);
186
187 return ret;
188 }
189
clock_getres_fallback(clockid_t _clk_id,struct __kernel_old_timespec * _res)190 static notrace int clock_getres_fallback(clockid_t _clk_id,
191 struct __kernel_old_timespec *_res)
192 {
193 register clockid_t clk_id asm("$r0") = _clk_id;
194 register struct __kernel_old_timespec *res asm("$r1") = _res;
195 register int ret asm("$r0");
196
197 asm volatile ("movi $r15, %3\n"
198 "syscall 0x0\n"
199 :"=r" (ret)
200 :"r"(clk_id), "r"(res), "i"(__NR_clock_getres)
201 :"$r15", "memory");
202
203 return ret;
204 }
205
__vdso_clock_getres(clockid_t clk_id,struct __kernel_old_timespec * res)206 notrace int __vdso_clock_getres(clockid_t clk_id, struct __kernel_old_timespec *res)
207 {
208 struct vdso_data *vdata = __get_datapage();
209
210 if (res == NULL)
211 return 0;
212 switch (clk_id) {
213 case CLOCK_REALTIME:
214 case CLOCK_MONOTONIC:
215 case CLOCK_MONOTONIC_RAW:
216 res->tv_sec = 0;
217 res->tv_nsec = vdata->hrtimer_res;
218 break;
219 case CLOCK_REALTIME_COARSE:
220 case CLOCK_MONOTONIC_COARSE:
221 res->tv_sec = 0;
222 res->tv_nsec = CLOCK_COARSE_RES;
223 break;
224 default:
225 return clock_getres_fallback(clk_id, res);
226 }
227 return 0;
228 }
229
gettimeofday_fallback(struct __kernel_old_timeval * _tv,struct timezone * _tz)230 static notrace inline int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
231 struct timezone *_tz)
232 {
233 register struct __kernel_old_timeval *tv asm("$r0") = _tv;
234 register struct timezone *tz asm("$r1") = _tz;
235 register int ret asm("$r0");
236
237 asm volatile ("movi $r15, %3\n"
238 "syscall 0x0\n"
239 :"=r" (ret)
240 :"r"(tv), "r"(tz), "i"(__NR_gettimeofday)
241 :"$r15", "memory");
242
243 return ret;
244 }
245
__vdso_gettimeofday(struct __kernel_old_timeval * tv,struct timezone * tz)246 notrace int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
247 {
248 struct __kernel_old_timespec ts;
249 struct vdso_data *vdata;
250 int ret;
251
252 vdata = __get_datapage();
253
254 if (vdata->cycle_count_offset == EMPTY_REG_OFFSET)
255 return gettimeofday_fallback(tv, tz);
256
257 ret = do_realtime(&ts, vdata);
258
259 if (tv) {
260 tv->tv_sec = ts.tv_sec;
261 tv->tv_usec = ts.tv_nsec / 1000;
262 }
263 if (tz) {
264 tz->tz_minuteswest = vdata->tz_minuteswest;
265 tz->tz_dsttime = vdata->tz_dsttime;
266 }
267
268 return ret;
269 }
270