1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Implement CPU time clocks for the POSIX clock interface.
4 */
5
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
18 #include <linux/task_work.h>
19
20 #include "posix-timers.h"
21
22 static void posix_cpu_timer_rearm(struct k_itimer *timer);
23
posix_cputimers_group_init(struct posix_cputimers * pct,u64 cpu_limit)24 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
25 {
26 posix_cputimers_init(pct);
27 if (cpu_limit != RLIM_INFINITY) {
28 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
29 pct->timers_active = true;
30 }
31 }
32
33 /*
34 * Called after updating RLIMIT_CPU to run cpu timer and update
35 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
36 * necessary. Needs siglock protection since other code may update the
37 * expiration cache as well.
38 *
39 * Returns 0 on success, -ESRCH on failure. Can fail if the task is exiting and
40 * we cannot lock_task_sighand. Cannot fail if task is current.
41 */
update_rlimit_cpu(struct task_struct * task,unsigned long rlim_new)42 int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
43 {
44 u64 nsecs = rlim_new * NSEC_PER_SEC;
45 unsigned long irq_fl;
46
47 if (!lock_task_sighand(task, &irq_fl))
48 return -ESRCH;
49 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
50 unlock_task_sighand(task, &irq_fl);
51 return 0;
52 }
53
54 /*
55 * Functions for validating access to tasks.
56 */
pid_for_clock(const clockid_t clock,bool gettime)57 static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
58 {
59 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
60 const pid_t upid = CPUCLOCK_PID(clock);
61 struct pid *pid;
62
63 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
64 return NULL;
65
66 /*
67 * If the encoded PID is 0, then the timer is targeted at current
68 * or the process to which current belongs.
69 */
70 if (upid == 0)
71 return thread ? task_pid(current) : task_tgid(current);
72
73 pid = find_vpid(upid);
74 if (!pid)
75 return NULL;
76
77 if (thread) {
78 struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
79 return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
80 }
81
82 /*
83 * For clock_gettime(PROCESS) allow finding the process by
84 * with the pid of the current task. The code needs the tgid
85 * of the process so that pid_task(pid, PIDTYPE_TGID) can be
86 * used to find the process.
87 */
88 if (gettime && (pid == task_pid(current)))
89 return task_tgid(current);
90
91 /*
92 * For processes require that pid identifies a process.
93 */
94 return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
95 }
96
validate_clock_permissions(const clockid_t clock)97 static inline int validate_clock_permissions(const clockid_t clock)
98 {
99 int ret;
100
101 rcu_read_lock();
102 ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
103 rcu_read_unlock();
104
105 return ret;
106 }
107
clock_pid_type(const clockid_t clock)108 static inline enum pid_type clock_pid_type(const clockid_t clock)
109 {
110 return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
111 }
112
cpu_timer_task_rcu(struct k_itimer * timer)113 static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
114 {
115 return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
116 }
117
118 /*
119 * Update expiry time from increment, and increase overrun count,
120 * given the current clock sample.
121 */
bump_cpu_timer(struct k_itimer * timer,u64 now)122 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
123 {
124 u64 delta, incr, expires = timer->it.cpu.node.expires;
125 int i;
126
127 if (!timer->it_interval)
128 return expires;
129
130 if (now < expires)
131 return expires;
132
133 incr = timer->it_interval;
134 delta = now + incr - expires;
135
136 /* Don't use (incr*2 < delta), incr*2 might overflow. */
137 for (i = 0; incr < delta - incr; i++)
138 incr = incr << 1;
139
140 for (; i >= 0; incr >>= 1, i--) {
141 if (delta < incr)
142 continue;
143
144 timer->it.cpu.node.expires += incr;
145 timer->it_overrun += 1LL << i;
146 delta -= incr;
147 }
148 return timer->it.cpu.node.expires;
149 }
150
151 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
expiry_cache_is_inactive(const struct posix_cputimers * pct)152 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
153 {
154 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
155 ~pct->bases[CPUCLOCK_VIRT].nextevt |
156 ~pct->bases[CPUCLOCK_SCHED].nextevt);
157 }
158
159 static int
posix_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)160 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
161 {
162 int error = validate_clock_permissions(which_clock);
163
164 if (!error) {
165 tp->tv_sec = 0;
166 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
167 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
168 /*
169 * If sched_clock is using a cycle counter, we
170 * don't have any idea of its true resolution
171 * exported, but it is much more than 1s/HZ.
172 */
173 tp->tv_nsec = 1;
174 }
175 }
176 return error;
177 }
178
179 static int
posix_cpu_clock_set(const clockid_t clock,const struct timespec64 * tp)180 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
181 {
182 int error = validate_clock_permissions(clock);
183
184 /*
185 * You can never reset a CPU clock, but we check for other errors
186 * in the call before failing with EPERM.
187 */
188 return error ? : -EPERM;
189 }
190
191 /*
192 * Sample a per-thread clock for the given task. clkid is validated.
193 */
cpu_clock_sample(const clockid_t clkid,struct task_struct * p)194 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
195 {
196 u64 utime, stime;
197
198 if (clkid == CPUCLOCK_SCHED)
199 return task_sched_runtime(p);
200
201 task_cputime(p, &utime, &stime);
202
203 switch (clkid) {
204 case CPUCLOCK_PROF:
205 return utime + stime;
206 case CPUCLOCK_VIRT:
207 return utime;
208 default:
209 WARN_ON_ONCE(1);
210 }
211 return 0;
212 }
213
store_samples(u64 * samples,u64 stime,u64 utime,u64 rtime)214 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
215 {
216 samples[CPUCLOCK_PROF] = stime + utime;
217 samples[CPUCLOCK_VIRT] = utime;
218 samples[CPUCLOCK_SCHED] = rtime;
219 }
220
task_sample_cputime(struct task_struct * p,u64 * samples)221 static void task_sample_cputime(struct task_struct *p, u64 *samples)
222 {
223 u64 stime, utime;
224
225 task_cputime(p, &utime, &stime);
226 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
227 }
228
proc_sample_cputime_atomic(struct task_cputime_atomic * at,u64 * samples)229 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
230 u64 *samples)
231 {
232 u64 stime, utime, rtime;
233
234 utime = atomic64_read(&at->utime);
235 stime = atomic64_read(&at->stime);
236 rtime = atomic64_read(&at->sum_exec_runtime);
237 store_samples(samples, stime, utime, rtime);
238 }
239
240 /*
241 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
242 * to avoid race conditions with concurrent updates to cputime.
243 */
__update_gt_cputime(atomic64_t * cputime,u64 sum_cputime)244 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
245 {
246 u64 curr_cputime;
247 retry:
248 curr_cputime = atomic64_read(cputime);
249 if (sum_cputime > curr_cputime) {
250 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
251 goto retry;
252 }
253 }
254
update_gt_cputime(struct task_cputime_atomic * cputime_atomic,struct task_cputime * sum)255 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
256 struct task_cputime *sum)
257 {
258 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
259 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
260 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
261 }
262
263 /**
264 * thread_group_sample_cputime - Sample cputime for a given task
265 * @tsk: Task for which cputime needs to be started
266 * @samples: Storage for time samples
267 *
268 * Called from sys_getitimer() to calculate the expiry time of an active
269 * timer. That means group cputime accounting is already active. Called
270 * with task sighand lock held.
271 *
272 * Updates @times with an uptodate sample of the thread group cputimes.
273 */
thread_group_sample_cputime(struct task_struct * tsk,u64 * samples)274 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
275 {
276 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
277 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
278
279 WARN_ON_ONCE(!pct->timers_active);
280
281 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
282 }
283
284 /**
285 * thread_group_start_cputime - Start cputime and return a sample
286 * @tsk: Task for which cputime needs to be started
287 * @samples: Storage for time samples
288 *
289 * The thread group cputime accounting is avoided when there are no posix
290 * CPU timers armed. Before starting a timer it's required to check whether
291 * the time accounting is active. If not, a full update of the atomic
292 * accounting store needs to be done and the accounting enabled.
293 *
294 * Updates @times with an uptodate sample of the thread group cputimes.
295 */
thread_group_start_cputime(struct task_struct * tsk,u64 * samples)296 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
297 {
298 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
299 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
300
301 lockdep_assert_task_sighand_held(tsk);
302
303 /* Check if cputimer isn't running. This is accessed without locking. */
304 if (!READ_ONCE(pct->timers_active)) {
305 struct task_cputime sum;
306
307 /*
308 * The POSIX timer interface allows for absolute time expiry
309 * values through the TIMER_ABSTIME flag, therefore we have
310 * to synchronize the timer to the clock every time we start it.
311 */
312 thread_group_cputime(tsk, &sum);
313 update_gt_cputime(&cputimer->cputime_atomic, &sum);
314
315 /*
316 * We're setting timers_active without a lock. Ensure this
317 * only gets written to in one operation. We set it after
318 * update_gt_cputime() as a small optimization, but
319 * barriers are not required because update_gt_cputime()
320 * can handle concurrent updates.
321 */
322 WRITE_ONCE(pct->timers_active, true);
323 }
324 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
325 }
326
__thread_group_cputime(struct task_struct * tsk,u64 * samples)327 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
328 {
329 struct task_cputime ct;
330
331 thread_group_cputime(tsk, &ct);
332 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
333 }
334
335 /*
336 * Sample a process (thread group) clock for the given task clkid. If the
337 * group's cputime accounting is already enabled, read the atomic
338 * store. Otherwise a full update is required. clkid is already validated.
339 */
cpu_clock_sample_group(const clockid_t clkid,struct task_struct * p,bool start)340 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
341 bool start)
342 {
343 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
344 struct posix_cputimers *pct = &p->signal->posix_cputimers;
345 u64 samples[CPUCLOCK_MAX];
346
347 if (!READ_ONCE(pct->timers_active)) {
348 if (start)
349 thread_group_start_cputime(p, samples);
350 else
351 __thread_group_cputime(p, samples);
352 } else {
353 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
354 }
355
356 return samples[clkid];
357 }
358
posix_cpu_clock_get(const clockid_t clock,struct timespec64 * tp)359 static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
360 {
361 const clockid_t clkid = CPUCLOCK_WHICH(clock);
362 struct task_struct *tsk;
363 u64 t;
364
365 rcu_read_lock();
366 tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
367 if (!tsk) {
368 rcu_read_unlock();
369 return -EINVAL;
370 }
371
372 if (CPUCLOCK_PERTHREAD(clock))
373 t = cpu_clock_sample(clkid, tsk);
374 else
375 t = cpu_clock_sample_group(clkid, tsk, false);
376 rcu_read_unlock();
377
378 *tp = ns_to_timespec64(t);
379 return 0;
380 }
381
382 /*
383 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
384 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
385 * new timer already all-zeros initialized.
386 */
posix_cpu_timer_create(struct k_itimer * new_timer)387 static int posix_cpu_timer_create(struct k_itimer *new_timer)
388 {
389 static struct lock_class_key posix_cpu_timers_key;
390 struct pid *pid;
391
392 rcu_read_lock();
393 pid = pid_for_clock(new_timer->it_clock, false);
394 if (!pid) {
395 rcu_read_unlock();
396 return -EINVAL;
397 }
398
399 /*
400 * If posix timer expiry is handled in task work context then
401 * timer::it_lock can be taken without disabling interrupts as all
402 * other locking happens in task context. This requires a separate
403 * lock class key otherwise regular posix timer expiry would record
404 * the lock class being taken in interrupt context and generate a
405 * false positive warning.
406 */
407 if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
408 lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
409
410 new_timer->kclock = &clock_posix_cpu;
411 timerqueue_init(&new_timer->it.cpu.node);
412 new_timer->it.cpu.pid = get_pid(pid);
413 rcu_read_unlock();
414 return 0;
415 }
416
timer_base(struct k_itimer * timer,struct task_struct * tsk)417 static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
418 struct task_struct *tsk)
419 {
420 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
421
422 if (CPUCLOCK_PERTHREAD(timer->it_clock))
423 return tsk->posix_cputimers.bases + clkidx;
424 else
425 return tsk->signal->posix_cputimers.bases + clkidx;
426 }
427
428 /*
429 * Force recalculating the base earliest expiration on the next tick.
430 * This will also re-evaluate the need to keep around the process wide
431 * cputime counter and tick dependency and eventually shut these down
432 * if necessary.
433 */
trigger_base_recalc_expires(struct k_itimer * timer,struct task_struct * tsk)434 static void trigger_base_recalc_expires(struct k_itimer *timer,
435 struct task_struct *tsk)
436 {
437 struct posix_cputimer_base *base = timer_base(timer, tsk);
438
439 base->nextevt = 0;
440 }
441
442 /*
443 * Dequeue the timer and reset the base if it was its earliest expiration.
444 * It makes sure the next tick recalculates the base next expiration so we
445 * don't keep the costly process wide cputime counter around for a random
446 * amount of time, along with the tick dependency.
447 *
448 * If another timer gets queued between this and the next tick, its
449 * expiration will update the base next event if necessary on the next
450 * tick.
451 */
disarm_timer(struct k_itimer * timer,struct task_struct * p)452 static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
453 {
454 struct cpu_timer *ctmr = &timer->it.cpu;
455 struct posix_cputimer_base *base;
456
457 if (!cpu_timer_dequeue(ctmr))
458 return;
459
460 base = timer_base(timer, p);
461 if (cpu_timer_getexpires(ctmr) == base->nextevt)
462 trigger_base_recalc_expires(timer, p);
463 }
464
465
466 /*
467 * Clean up a CPU-clock timer that is about to be destroyed.
468 * This is called from timer deletion with the timer already locked.
469 * If we return TIMER_RETRY, it's necessary to release the timer's lock
470 * and try again. (This happens when the timer is in the middle of firing.)
471 */
posix_cpu_timer_del(struct k_itimer * timer)472 static int posix_cpu_timer_del(struct k_itimer *timer)
473 {
474 struct cpu_timer *ctmr = &timer->it.cpu;
475 struct sighand_struct *sighand;
476 struct task_struct *p;
477 unsigned long flags;
478 int ret = 0;
479
480 rcu_read_lock();
481 p = cpu_timer_task_rcu(timer);
482 if (!p)
483 goto out;
484
485 /*
486 * Protect against sighand release/switch in exit/exec and process/
487 * thread timer list entry concurrent read/writes.
488 */
489 sighand = lock_task_sighand(p, &flags);
490 if (unlikely(sighand == NULL)) {
491 /*
492 * This raced with the reaping of the task. The exit cleanup
493 * should have removed this timer from the timer queue.
494 */
495 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
496 } else {
497 if (timer->it.cpu.firing)
498 ret = TIMER_RETRY;
499 else
500 disarm_timer(timer, p);
501
502 unlock_task_sighand(p, &flags);
503 }
504
505 out:
506 rcu_read_unlock();
507 if (!ret)
508 put_pid(ctmr->pid);
509
510 return ret;
511 }
512
cleanup_timerqueue(struct timerqueue_head * head)513 static void cleanup_timerqueue(struct timerqueue_head *head)
514 {
515 struct timerqueue_node *node;
516 struct cpu_timer *ctmr;
517
518 while ((node = timerqueue_getnext(head))) {
519 timerqueue_del(head, node);
520 ctmr = container_of(node, struct cpu_timer, node);
521 ctmr->head = NULL;
522 }
523 }
524
525 /*
526 * Clean out CPU timers which are still armed when a thread exits. The
527 * timers are only removed from the list. No other updates are done. The
528 * corresponding posix timers are still accessible, but cannot be rearmed.
529 *
530 * This must be called with the siglock held.
531 */
cleanup_timers(struct posix_cputimers * pct)532 static void cleanup_timers(struct posix_cputimers *pct)
533 {
534 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
535 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
536 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
537 }
538
539 /*
540 * These are both called with the siglock held, when the current thread
541 * is being reaped. When the final (leader) thread in the group is reaped,
542 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
543 */
posix_cpu_timers_exit(struct task_struct * tsk)544 void posix_cpu_timers_exit(struct task_struct *tsk)
545 {
546 cleanup_timers(&tsk->posix_cputimers);
547 }
posix_cpu_timers_exit_group(struct task_struct * tsk)548 void posix_cpu_timers_exit_group(struct task_struct *tsk)
549 {
550 cleanup_timers(&tsk->signal->posix_cputimers);
551 }
552
553 /*
554 * Insert the timer on the appropriate list before any timers that
555 * expire later. This must be called with the sighand lock held.
556 */
arm_timer(struct k_itimer * timer,struct task_struct * p)557 static void arm_timer(struct k_itimer *timer, struct task_struct *p)
558 {
559 struct posix_cputimer_base *base = timer_base(timer, p);
560 struct cpu_timer *ctmr = &timer->it.cpu;
561 u64 newexp = cpu_timer_getexpires(ctmr);
562
563 if (!cpu_timer_enqueue(&base->tqhead, ctmr))
564 return;
565
566 /*
567 * We are the new earliest-expiring POSIX 1.b timer, hence
568 * need to update expiration cache. Take into account that
569 * for process timers we share expiration cache with itimers
570 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
571 */
572 if (newexp < base->nextevt)
573 base->nextevt = newexp;
574
575 if (CPUCLOCK_PERTHREAD(timer->it_clock))
576 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
577 else
578 tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER);
579 }
580
581 /*
582 * The timer is locked, fire it and arrange for its reload.
583 */
cpu_timer_fire(struct k_itimer * timer)584 static void cpu_timer_fire(struct k_itimer *timer)
585 {
586 struct cpu_timer *ctmr = &timer->it.cpu;
587
588 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
589 /*
590 * User don't want any signal.
591 */
592 cpu_timer_setexpires(ctmr, 0);
593 } else if (unlikely(timer->sigq == NULL)) {
594 /*
595 * This a special case for clock_nanosleep,
596 * not a normal timer from sys_timer_create.
597 */
598 wake_up_process(timer->it_process);
599 cpu_timer_setexpires(ctmr, 0);
600 } else if (!timer->it_interval) {
601 /*
602 * One-shot timer. Clear it as soon as it's fired.
603 */
604 posix_timer_event(timer, 0);
605 cpu_timer_setexpires(ctmr, 0);
606 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
607 /*
608 * The signal did not get queued because the signal
609 * was ignored, so we won't get any callback to
610 * reload the timer. But we need to keep it
611 * ticking in case the signal is deliverable next time.
612 */
613 posix_cpu_timer_rearm(timer);
614 ++timer->it_requeue_pending;
615 }
616 }
617
618 /*
619 * Guts of sys_timer_settime for CPU timers.
620 * This is called with the timer locked and interrupts disabled.
621 * If we return TIMER_RETRY, it's necessary to release the timer's lock
622 * and try again. (This happens when the timer is in the middle of firing.)
623 */
posix_cpu_timer_set(struct k_itimer * timer,int timer_flags,struct itimerspec64 * new,struct itimerspec64 * old)624 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
625 struct itimerspec64 *new, struct itimerspec64 *old)
626 {
627 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
628 u64 old_expires, new_expires, old_incr, val;
629 struct cpu_timer *ctmr = &timer->it.cpu;
630 struct sighand_struct *sighand;
631 struct task_struct *p;
632 unsigned long flags;
633 int ret = 0;
634
635 rcu_read_lock();
636 p = cpu_timer_task_rcu(timer);
637 if (!p) {
638 /*
639 * If p has just been reaped, we can no
640 * longer get any information about it at all.
641 */
642 rcu_read_unlock();
643 return -ESRCH;
644 }
645
646 /*
647 * Use the to_ktime conversion because that clamps the maximum
648 * value to KTIME_MAX and avoid multiplication overflows.
649 */
650 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
651
652 /*
653 * Protect against sighand release/switch in exit/exec and p->cpu_timers
654 * and p->signal->cpu_timers read/write in arm_timer()
655 */
656 sighand = lock_task_sighand(p, &flags);
657 /*
658 * If p has just been reaped, we can no
659 * longer get any information about it at all.
660 */
661 if (unlikely(sighand == NULL)) {
662 rcu_read_unlock();
663 return -ESRCH;
664 }
665
666 /*
667 * Disarm any old timer after extracting its expiry time.
668 */
669 old_incr = timer->it_interval;
670 old_expires = cpu_timer_getexpires(ctmr);
671
672 if (unlikely(timer->it.cpu.firing)) {
673 timer->it.cpu.firing = -1;
674 ret = TIMER_RETRY;
675 } else {
676 cpu_timer_dequeue(ctmr);
677 }
678
679 /*
680 * We need to sample the current value to convert the new
681 * value from to relative and absolute, and to convert the
682 * old value from absolute to relative. To set a process
683 * timer, we need a sample to balance the thread expiry
684 * times (in arm_timer). With an absolute time, we must
685 * check if it's already passed. In short, we need a sample.
686 */
687 if (CPUCLOCK_PERTHREAD(timer->it_clock))
688 val = cpu_clock_sample(clkid, p);
689 else
690 val = cpu_clock_sample_group(clkid, p, true);
691
692 if (old) {
693 if (old_expires == 0) {
694 old->it_value.tv_sec = 0;
695 old->it_value.tv_nsec = 0;
696 } else {
697 /*
698 * Update the timer in case it has overrun already.
699 * If it has, we'll report it as having overrun and
700 * with the next reloaded timer already ticking,
701 * though we are swallowing that pending
702 * notification here to install the new setting.
703 */
704 u64 exp = bump_cpu_timer(timer, val);
705
706 if (val < exp) {
707 old_expires = exp - val;
708 old->it_value = ns_to_timespec64(old_expires);
709 } else {
710 old->it_value.tv_nsec = 1;
711 old->it_value.tv_sec = 0;
712 }
713 }
714 }
715
716 if (unlikely(ret)) {
717 /*
718 * We are colliding with the timer actually firing.
719 * Punt after filling in the timer's old value, and
720 * disable this firing since we are already reporting
721 * it as an overrun (thanks to bump_cpu_timer above).
722 */
723 unlock_task_sighand(p, &flags);
724 goto out;
725 }
726
727 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
728 new_expires += val;
729 }
730
731 /*
732 * Install the new expiry time (or zero).
733 * For a timer with no notification action, we don't actually
734 * arm the timer (we'll just fake it for timer_gettime).
735 */
736 cpu_timer_setexpires(ctmr, new_expires);
737 if (new_expires != 0 && val < new_expires) {
738 arm_timer(timer, p);
739 }
740
741 unlock_task_sighand(p, &flags);
742 /*
743 * Install the new reload setting, and
744 * set up the signal and overrun bookkeeping.
745 */
746 timer->it_interval = timespec64_to_ktime(new->it_interval);
747
748 /*
749 * This acts as a modification timestamp for the timer,
750 * so any automatic reload attempt will punt on seeing
751 * that we have reset the timer manually.
752 */
753 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
754 ~REQUEUE_PENDING;
755 timer->it_overrun_last = 0;
756 timer->it_overrun = -1;
757
758 if (val >= new_expires) {
759 if (new_expires != 0) {
760 /*
761 * The designated time already passed, so we notify
762 * immediately, even if the thread never runs to
763 * accumulate more time on this clock.
764 */
765 cpu_timer_fire(timer);
766 }
767
768 /*
769 * Make sure we don't keep around the process wide cputime
770 * counter or the tick dependency if they are not necessary.
771 */
772 sighand = lock_task_sighand(p, &flags);
773 if (!sighand)
774 goto out;
775
776 if (!cpu_timer_queued(ctmr))
777 trigger_base_recalc_expires(timer, p);
778
779 unlock_task_sighand(p, &flags);
780 }
781 out:
782 rcu_read_unlock();
783 if (old)
784 old->it_interval = ns_to_timespec64(old_incr);
785
786 return ret;
787 }
788
posix_cpu_timer_get(struct k_itimer * timer,struct itimerspec64 * itp)789 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
790 {
791 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
792 struct cpu_timer *ctmr = &timer->it.cpu;
793 u64 now, expires = cpu_timer_getexpires(ctmr);
794 struct task_struct *p;
795
796 rcu_read_lock();
797 p = cpu_timer_task_rcu(timer);
798 if (!p)
799 goto out;
800
801 /*
802 * Easy part: convert the reload time.
803 */
804 itp->it_interval = ktime_to_timespec64(timer->it_interval);
805
806 if (!expires)
807 goto out;
808
809 /*
810 * Sample the clock to take the difference with the expiry time.
811 */
812 if (CPUCLOCK_PERTHREAD(timer->it_clock))
813 now = cpu_clock_sample(clkid, p);
814 else
815 now = cpu_clock_sample_group(clkid, p, false);
816
817 if (now < expires) {
818 itp->it_value = ns_to_timespec64(expires - now);
819 } else {
820 /*
821 * The timer should have expired already, but the firing
822 * hasn't taken place yet. Say it's just about to expire.
823 */
824 itp->it_value.tv_nsec = 1;
825 itp->it_value.tv_sec = 0;
826 }
827 out:
828 rcu_read_unlock();
829 }
830
831 #define MAX_COLLECTED 20
832
collect_timerqueue(struct timerqueue_head * head,struct list_head * firing,u64 now)833 static u64 collect_timerqueue(struct timerqueue_head *head,
834 struct list_head *firing, u64 now)
835 {
836 struct timerqueue_node *next;
837 int i = 0;
838
839 while ((next = timerqueue_getnext(head))) {
840 struct cpu_timer *ctmr;
841 u64 expires;
842
843 ctmr = container_of(next, struct cpu_timer, node);
844 expires = cpu_timer_getexpires(ctmr);
845 /* Limit the number of timers to expire at once */
846 if (++i == MAX_COLLECTED || now < expires)
847 return expires;
848
849 ctmr->firing = 1;
850 /* See posix_cpu_timer_wait_running() */
851 rcu_assign_pointer(ctmr->handling, current);
852 cpu_timer_dequeue(ctmr);
853 list_add_tail(&ctmr->elist, firing);
854 }
855
856 return U64_MAX;
857 }
858
collect_posix_cputimers(struct posix_cputimers * pct,u64 * samples,struct list_head * firing)859 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
860 struct list_head *firing)
861 {
862 struct posix_cputimer_base *base = pct->bases;
863 int i;
864
865 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
866 base->nextevt = collect_timerqueue(&base->tqhead, firing,
867 samples[i]);
868 }
869 }
870
check_dl_overrun(struct task_struct * tsk)871 static inline void check_dl_overrun(struct task_struct *tsk)
872 {
873 if (tsk->dl.dl_overrun) {
874 tsk->dl.dl_overrun = 0;
875 send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
876 }
877 }
878
check_rlimit(u64 time,u64 limit,int signo,bool rt,bool hard)879 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
880 {
881 if (time < limit)
882 return false;
883
884 if (print_fatal_signals) {
885 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
886 rt ? "RT" : "CPU", hard ? "hard" : "soft",
887 current->comm, task_pid_nr(current));
888 }
889 send_signal_locked(signo, SEND_SIG_PRIV, current, PIDTYPE_TGID);
890 return true;
891 }
892
893 /*
894 * Check for any per-thread CPU timers that have fired and move them off
895 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
896 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
897 */
check_thread_timers(struct task_struct * tsk,struct list_head * firing)898 static void check_thread_timers(struct task_struct *tsk,
899 struct list_head *firing)
900 {
901 struct posix_cputimers *pct = &tsk->posix_cputimers;
902 u64 samples[CPUCLOCK_MAX];
903 unsigned long soft;
904
905 if (dl_task(tsk))
906 check_dl_overrun(tsk);
907
908 if (expiry_cache_is_inactive(pct))
909 return;
910
911 task_sample_cputime(tsk, samples);
912 collect_posix_cputimers(pct, samples, firing);
913
914 /*
915 * Check for the special case thread timers.
916 */
917 soft = task_rlimit(tsk, RLIMIT_RTTIME);
918 if (soft != RLIM_INFINITY) {
919 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
920 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
921 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
922
923 /* At the hard limit, send SIGKILL. No further action. */
924 if (hard != RLIM_INFINITY &&
925 check_rlimit(rttime, hard, SIGKILL, true, true))
926 return;
927
928 /* At the soft limit, send a SIGXCPU every second */
929 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
930 soft += USEC_PER_SEC;
931 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
932 }
933 }
934
935 if (expiry_cache_is_inactive(pct))
936 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
937 }
938
stop_process_timers(struct signal_struct * sig)939 static inline void stop_process_timers(struct signal_struct *sig)
940 {
941 struct posix_cputimers *pct = &sig->posix_cputimers;
942
943 /* Turn off the active flag. This is done without locking. */
944 WRITE_ONCE(pct->timers_active, false);
945 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
946 }
947
check_cpu_itimer(struct task_struct * tsk,struct cpu_itimer * it,u64 * expires,u64 cur_time,int signo)948 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
949 u64 *expires, u64 cur_time, int signo)
950 {
951 if (!it->expires)
952 return;
953
954 if (cur_time >= it->expires) {
955 if (it->incr)
956 it->expires += it->incr;
957 else
958 it->expires = 0;
959
960 trace_itimer_expire(signo == SIGPROF ?
961 ITIMER_PROF : ITIMER_VIRTUAL,
962 task_tgid(tsk), cur_time);
963 send_signal_locked(signo, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
964 }
965
966 if (it->expires && it->expires < *expires)
967 *expires = it->expires;
968 }
969
970 /*
971 * Check for any per-thread CPU timers that have fired and move them
972 * off the tsk->*_timers list onto the firing list. Per-thread timers
973 * have already been taken off.
974 */
check_process_timers(struct task_struct * tsk,struct list_head * firing)975 static void check_process_timers(struct task_struct *tsk,
976 struct list_head *firing)
977 {
978 struct signal_struct *const sig = tsk->signal;
979 struct posix_cputimers *pct = &sig->posix_cputimers;
980 u64 samples[CPUCLOCK_MAX];
981 unsigned long soft;
982
983 /*
984 * If there are no active process wide timers (POSIX 1.b, itimers,
985 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
986 * processing when there is already another task handling them.
987 */
988 if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
989 return;
990
991 /*
992 * Signify that a thread is checking for process timers.
993 * Write access to this field is protected by the sighand lock.
994 */
995 pct->expiry_active = true;
996
997 /*
998 * Collect the current process totals. Group accounting is active
999 * so the sample can be taken directly.
1000 */
1001 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
1002 collect_posix_cputimers(pct, samples, firing);
1003
1004 /*
1005 * Check for the special case process timers.
1006 */
1007 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
1008 &pct->bases[CPUCLOCK_PROF].nextevt,
1009 samples[CPUCLOCK_PROF], SIGPROF);
1010 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
1011 &pct->bases[CPUCLOCK_VIRT].nextevt,
1012 samples[CPUCLOCK_VIRT], SIGVTALRM);
1013
1014 soft = task_rlimit(tsk, RLIMIT_CPU);
1015 if (soft != RLIM_INFINITY) {
1016 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
1017 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
1018 u64 ptime = samples[CPUCLOCK_PROF];
1019 u64 softns = (u64)soft * NSEC_PER_SEC;
1020 u64 hardns = (u64)hard * NSEC_PER_SEC;
1021
1022 /* At the hard limit, send SIGKILL. No further action. */
1023 if (hard != RLIM_INFINITY &&
1024 check_rlimit(ptime, hardns, SIGKILL, false, true))
1025 return;
1026
1027 /* At the soft limit, send a SIGXCPU every second */
1028 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
1029 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
1030 softns += NSEC_PER_SEC;
1031 }
1032
1033 /* Update the expiry cache */
1034 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
1035 pct->bases[CPUCLOCK_PROF].nextevt = softns;
1036 }
1037
1038 if (expiry_cache_is_inactive(pct))
1039 stop_process_timers(sig);
1040
1041 pct->expiry_active = false;
1042 }
1043
1044 /*
1045 * This is called from the signal code (via posixtimer_rearm)
1046 * when the last timer signal was delivered and we have to reload the timer.
1047 */
posix_cpu_timer_rearm(struct k_itimer * timer)1048 static void posix_cpu_timer_rearm(struct k_itimer *timer)
1049 {
1050 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
1051 struct task_struct *p;
1052 struct sighand_struct *sighand;
1053 unsigned long flags;
1054 u64 now;
1055
1056 rcu_read_lock();
1057 p = cpu_timer_task_rcu(timer);
1058 if (!p)
1059 goto out;
1060
1061 /* Protect timer list r/w in arm_timer() */
1062 sighand = lock_task_sighand(p, &flags);
1063 if (unlikely(sighand == NULL))
1064 goto out;
1065
1066 /*
1067 * Fetch the current sample and update the timer's expiry time.
1068 */
1069 if (CPUCLOCK_PERTHREAD(timer->it_clock))
1070 now = cpu_clock_sample(clkid, p);
1071 else
1072 now = cpu_clock_sample_group(clkid, p, true);
1073
1074 bump_cpu_timer(timer, now);
1075
1076 /*
1077 * Now re-arm for the new expiry time.
1078 */
1079 arm_timer(timer, p);
1080 unlock_task_sighand(p, &flags);
1081 out:
1082 rcu_read_unlock();
1083 }
1084
1085 /**
1086 * task_cputimers_expired - Check whether posix CPU timers are expired
1087 *
1088 * @samples: Array of current samples for the CPUCLOCK clocks
1089 * @pct: Pointer to a posix_cputimers container
1090 *
1091 * Returns true if any member of @samples is greater than the corresponding
1092 * member of @pct->bases[CLK].nextevt. False otherwise
1093 */
1094 static inline bool
task_cputimers_expired(const u64 * samples,struct posix_cputimers * pct)1095 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1096 {
1097 int i;
1098
1099 for (i = 0; i < CPUCLOCK_MAX; i++) {
1100 if (samples[i] >= pct->bases[i].nextevt)
1101 return true;
1102 }
1103 return false;
1104 }
1105
1106 /**
1107 * fastpath_timer_check - POSIX CPU timers fast path.
1108 *
1109 * @tsk: The task (thread) being checked.
1110 *
1111 * Check the task and thread group timers. If both are zero (there are no
1112 * timers set) return false. Otherwise snapshot the task and thread group
1113 * timers and compare them with the corresponding expiration times. Return
1114 * true if a timer has expired, else return false.
1115 */
fastpath_timer_check(struct task_struct * tsk)1116 static inline bool fastpath_timer_check(struct task_struct *tsk)
1117 {
1118 struct posix_cputimers *pct = &tsk->posix_cputimers;
1119 struct signal_struct *sig;
1120
1121 if (!expiry_cache_is_inactive(pct)) {
1122 u64 samples[CPUCLOCK_MAX];
1123
1124 task_sample_cputime(tsk, samples);
1125 if (task_cputimers_expired(samples, pct))
1126 return true;
1127 }
1128
1129 sig = tsk->signal;
1130 pct = &sig->posix_cputimers;
1131 /*
1132 * Check if thread group timers expired when timers are active and
1133 * no other thread in the group is already handling expiry for
1134 * thread group cputimers. These fields are read without the
1135 * sighand lock. However, this is fine because this is meant to be
1136 * a fastpath heuristic to determine whether we should try to
1137 * acquire the sighand lock to handle timer expiry.
1138 *
1139 * In the worst case scenario, if concurrently timers_active is set
1140 * or expiry_active is cleared, but the current thread doesn't see
1141 * the change yet, the timer checks are delayed until the next
1142 * thread in the group gets a scheduler interrupt to handle the
1143 * timer. This isn't an issue in practice because these types of
1144 * delays with signals actually getting sent are expected.
1145 */
1146 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1147 u64 samples[CPUCLOCK_MAX];
1148
1149 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1150 samples);
1151
1152 if (task_cputimers_expired(samples, pct))
1153 return true;
1154 }
1155
1156 if (dl_task(tsk) && tsk->dl.dl_overrun)
1157 return true;
1158
1159 return false;
1160 }
1161
1162 static void handle_posix_cpu_timers(struct task_struct *tsk);
1163
1164 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
posix_cpu_timers_work(struct callback_head * work)1165 static void posix_cpu_timers_work(struct callback_head *work)
1166 {
1167 struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
1168
1169 mutex_lock(&cw->mutex);
1170 handle_posix_cpu_timers(current);
1171 mutex_unlock(&cw->mutex);
1172 }
1173
1174 /*
1175 * Invoked from the posix-timer core when a cancel operation failed because
1176 * the timer is marked firing. The caller holds rcu_read_lock(), which
1177 * protects the timer and the task which is expiring it from being freed.
1178 */
posix_cpu_timer_wait_running(struct k_itimer * timr)1179 static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1180 {
1181 struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
1182
1183 /* Has the handling task completed expiry already? */
1184 if (!tsk)
1185 return;
1186
1187 /* Ensure that the task cannot go away */
1188 get_task_struct(tsk);
1189 /* Now drop the RCU protection so the mutex can be locked */
1190 rcu_read_unlock();
1191 /* Wait on the expiry mutex */
1192 mutex_lock(&tsk->posix_cputimers_work.mutex);
1193 /* Release it immediately again. */
1194 mutex_unlock(&tsk->posix_cputimers_work.mutex);
1195 /* Drop the task reference. */
1196 put_task_struct(tsk);
1197 /* Relock RCU so the callsite is balanced */
1198 rcu_read_lock();
1199 }
1200
posix_cpu_timer_wait_running_nsleep(struct k_itimer * timr)1201 static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1202 {
1203 /* Ensure that timr->it.cpu.handling task cannot go away */
1204 rcu_read_lock();
1205 spin_unlock_irq(&timr->it_lock);
1206 posix_cpu_timer_wait_running(timr);
1207 rcu_read_unlock();
1208 /* @timr is on stack and is valid */
1209 spin_lock_irq(&timr->it_lock);
1210 }
1211
1212 /*
1213 * Clear existing posix CPU timers task work.
1214 */
clear_posix_cputimers_work(struct task_struct * p)1215 void clear_posix_cputimers_work(struct task_struct *p)
1216 {
1217 /*
1218 * A copied work entry from the old task is not meaningful, clear it.
1219 * N.B. init_task_work will not do this.
1220 */
1221 memset(&p->posix_cputimers_work.work, 0,
1222 sizeof(p->posix_cputimers_work.work));
1223 init_task_work(&p->posix_cputimers_work.work,
1224 posix_cpu_timers_work);
1225 mutex_init(&p->posix_cputimers_work.mutex);
1226 p->posix_cputimers_work.scheduled = false;
1227 }
1228
1229 /*
1230 * Initialize posix CPU timers task work in init task. Out of line to
1231 * keep the callback static and to avoid header recursion hell.
1232 */
posix_cputimers_init_work(void)1233 void __init posix_cputimers_init_work(void)
1234 {
1235 clear_posix_cputimers_work(current);
1236 }
1237
1238 /*
1239 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1240 * in hard interrupt context or in task context with interrupts
1241 * disabled. Aside of that the writer/reader interaction is always in the
1242 * context of the current task, which means they are strict per CPU.
1243 */
posix_cpu_timers_work_scheduled(struct task_struct * tsk)1244 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1245 {
1246 return tsk->posix_cputimers_work.scheduled;
1247 }
1248
__run_posix_cpu_timers(struct task_struct * tsk)1249 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1250 {
1251 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1252 return;
1253
1254 /* Schedule task work to actually expire the timers */
1255 tsk->posix_cputimers_work.scheduled = true;
1256 task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
1257 }
1258
posix_cpu_timers_enable_work(struct task_struct * tsk,unsigned long start)1259 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1260 unsigned long start)
1261 {
1262 bool ret = true;
1263
1264 /*
1265 * On !RT kernels interrupts are disabled while collecting expired
1266 * timers, so no tick can happen and the fast path check can be
1267 * reenabled without further checks.
1268 */
1269 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
1270 tsk->posix_cputimers_work.scheduled = false;
1271 return true;
1272 }
1273
1274 /*
1275 * On RT enabled kernels ticks can happen while the expired timers
1276 * are collected under sighand lock. But any tick which observes
1277 * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
1278 * checks. So reenabling the tick work has do be done carefully:
1279 *
1280 * Disable interrupts and run the fast path check if jiffies have
1281 * advanced since the collecting of expired timers started. If
1282 * jiffies have not advanced or the fast path check did not find
1283 * newly expired timers, reenable the fast path check in the timer
1284 * interrupt. If there are newly expired timers, return false and
1285 * let the collection loop repeat.
1286 */
1287 local_irq_disable();
1288 if (start != jiffies && fastpath_timer_check(tsk))
1289 ret = false;
1290 else
1291 tsk->posix_cputimers_work.scheduled = false;
1292 local_irq_enable();
1293
1294 return ret;
1295 }
1296 #else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
__run_posix_cpu_timers(struct task_struct * tsk)1297 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1298 {
1299 lockdep_posixtimer_enter();
1300 handle_posix_cpu_timers(tsk);
1301 lockdep_posixtimer_exit();
1302 }
1303
posix_cpu_timer_wait_running(struct k_itimer * timr)1304 static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1305 {
1306 cpu_relax();
1307 }
1308
posix_cpu_timer_wait_running_nsleep(struct k_itimer * timr)1309 static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1310 {
1311 spin_unlock_irq(&timr->it_lock);
1312 cpu_relax();
1313 spin_lock_irq(&timr->it_lock);
1314 }
1315
posix_cpu_timers_work_scheduled(struct task_struct * tsk)1316 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1317 {
1318 return false;
1319 }
1320
posix_cpu_timers_enable_work(struct task_struct * tsk,unsigned long start)1321 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1322 unsigned long start)
1323 {
1324 return true;
1325 }
1326 #endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1327
handle_posix_cpu_timers(struct task_struct * tsk)1328 static void handle_posix_cpu_timers(struct task_struct *tsk)
1329 {
1330 struct k_itimer *timer, *next;
1331 unsigned long flags, start;
1332 LIST_HEAD(firing);
1333
1334 if (!lock_task_sighand(tsk, &flags))
1335 return;
1336
1337 do {
1338 /*
1339 * On RT locking sighand lock does not disable interrupts,
1340 * so this needs to be careful vs. ticks. Store the current
1341 * jiffies value.
1342 */
1343 start = READ_ONCE(jiffies);
1344 barrier();
1345
1346 /*
1347 * Here we take off tsk->signal->cpu_timers[N] and
1348 * tsk->cpu_timers[N] all the timers that are firing, and
1349 * put them on the firing list.
1350 */
1351 check_thread_timers(tsk, &firing);
1352
1353 check_process_timers(tsk, &firing);
1354
1355 /*
1356 * The above timer checks have updated the expiry cache and
1357 * because nothing can have queued or modified timers after
1358 * sighand lock was taken above it is guaranteed to be
1359 * consistent. So the next timer interrupt fastpath check
1360 * will find valid data.
1361 *
1362 * If timer expiry runs in the timer interrupt context then
1363 * the loop is not relevant as timers will be directly
1364 * expired in interrupt context. The stub function below
1365 * returns always true which allows the compiler to
1366 * optimize the loop out.
1367 *
1368 * If timer expiry is deferred to task work context then
1369 * the following rules apply:
1370 *
1371 * - On !RT kernels no tick can have happened on this CPU
1372 * after sighand lock was acquired because interrupts are
1373 * disabled. So reenabling task work before dropping
1374 * sighand lock and reenabling interrupts is race free.
1375 *
1376 * - On RT kernels ticks might have happened but the tick
1377 * work ignored posix CPU timer handling because the
1378 * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
1379 * must be done very carefully including a check whether
1380 * ticks have happened since the start of the timer
1381 * expiry checks. posix_cpu_timers_enable_work() takes
1382 * care of that and eventually lets the expiry checks
1383 * run again.
1384 */
1385 } while (!posix_cpu_timers_enable_work(tsk, start));
1386
1387 /*
1388 * We must release sighand lock before taking any timer's lock.
1389 * There is a potential race with timer deletion here, as the
1390 * siglock now protects our private firing list. We have set
1391 * the firing flag in each timer, so that a deletion attempt
1392 * that gets the timer lock before we do will give it up and
1393 * spin until we've taken care of that timer below.
1394 */
1395 unlock_task_sighand(tsk, &flags);
1396
1397 /*
1398 * Now that all the timers on our list have the firing flag,
1399 * no one will touch their list entries but us. We'll take
1400 * each timer's lock before clearing its firing flag, so no
1401 * timer call will interfere.
1402 */
1403 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1404 int cpu_firing;
1405
1406 /*
1407 * spin_lock() is sufficient here even independent of the
1408 * expiry context. If expiry happens in hard interrupt
1409 * context it's obvious. For task work context it's safe
1410 * because all other operations on timer::it_lock happen in
1411 * task context (syscall or exit).
1412 */
1413 spin_lock(&timer->it_lock);
1414 list_del_init(&timer->it.cpu.elist);
1415 cpu_firing = timer->it.cpu.firing;
1416 timer->it.cpu.firing = 0;
1417 /*
1418 * The firing flag is -1 if we collided with a reset
1419 * of the timer, which already reported this
1420 * almost-firing as an overrun. So don't generate an event.
1421 */
1422 if (likely(cpu_firing >= 0))
1423 cpu_timer_fire(timer);
1424 /* See posix_cpu_timer_wait_running() */
1425 rcu_assign_pointer(timer->it.cpu.handling, NULL);
1426 spin_unlock(&timer->it_lock);
1427 }
1428 }
1429
1430 /*
1431 * This is called from the timer interrupt handler. The irq handler has
1432 * already updated our counts. We need to check if any timers fire now.
1433 * Interrupts are disabled.
1434 */
run_posix_cpu_timers(void)1435 void run_posix_cpu_timers(void)
1436 {
1437 struct task_struct *tsk = current;
1438
1439 lockdep_assert_irqs_disabled();
1440
1441 /*
1442 * If the actual expiry is deferred to task work context and the
1443 * work is already scheduled there is no point to do anything here.
1444 */
1445 if (posix_cpu_timers_work_scheduled(tsk))
1446 return;
1447
1448 /*
1449 * The fast path checks that there are no expired thread or thread
1450 * group timers. If that's so, just return.
1451 */
1452 if (!fastpath_timer_check(tsk))
1453 return;
1454
1455 __run_posix_cpu_timers(tsk);
1456 }
1457
1458 /*
1459 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1460 * The tsk->sighand->siglock must be held by the caller.
1461 */
set_process_cpu_timer(struct task_struct * tsk,unsigned int clkid,u64 * newval,u64 * oldval)1462 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1463 u64 *newval, u64 *oldval)
1464 {
1465 u64 now, *nextevt;
1466
1467 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1468 return;
1469
1470 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1471 now = cpu_clock_sample_group(clkid, tsk, true);
1472
1473 if (oldval) {
1474 /*
1475 * We are setting itimer. The *oldval is absolute and we update
1476 * it to be relative, *newval argument is relative and we update
1477 * it to be absolute.
1478 */
1479 if (*oldval) {
1480 if (*oldval <= now) {
1481 /* Just about to fire. */
1482 *oldval = TICK_NSEC;
1483 } else {
1484 *oldval -= now;
1485 }
1486 }
1487
1488 if (*newval)
1489 *newval += now;
1490 }
1491
1492 /*
1493 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1494 * expiry cache is also used by RLIMIT_CPU!.
1495 */
1496 if (*newval < *nextevt)
1497 *nextevt = *newval;
1498
1499 tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER);
1500 }
1501
do_cpu_nanosleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1502 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1503 const struct timespec64 *rqtp)
1504 {
1505 struct itimerspec64 it;
1506 struct k_itimer timer;
1507 u64 expires;
1508 int error;
1509
1510 /*
1511 * Set up a temporary timer and then wait for it to go off.
1512 */
1513 memset(&timer, 0, sizeof timer);
1514 spin_lock_init(&timer.it_lock);
1515 timer.it_clock = which_clock;
1516 timer.it_overrun = -1;
1517 error = posix_cpu_timer_create(&timer);
1518 timer.it_process = current;
1519
1520 if (!error) {
1521 static struct itimerspec64 zero_it;
1522 struct restart_block *restart;
1523
1524 memset(&it, 0, sizeof(it));
1525 it.it_value = *rqtp;
1526
1527 spin_lock_irq(&timer.it_lock);
1528 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1529 if (error) {
1530 spin_unlock_irq(&timer.it_lock);
1531 return error;
1532 }
1533
1534 while (!signal_pending(current)) {
1535 if (!cpu_timer_getexpires(&timer.it.cpu)) {
1536 /*
1537 * Our timer fired and was reset, below
1538 * deletion can not fail.
1539 */
1540 posix_cpu_timer_del(&timer);
1541 spin_unlock_irq(&timer.it_lock);
1542 return 0;
1543 }
1544
1545 /*
1546 * Block until cpu_timer_fire (or a signal) wakes us.
1547 */
1548 __set_current_state(TASK_INTERRUPTIBLE);
1549 spin_unlock_irq(&timer.it_lock);
1550 schedule();
1551 spin_lock_irq(&timer.it_lock);
1552 }
1553
1554 /*
1555 * We were interrupted by a signal.
1556 */
1557 expires = cpu_timer_getexpires(&timer.it.cpu);
1558 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1559 if (!error) {
1560 /* Timer is now unarmed, deletion can not fail. */
1561 posix_cpu_timer_del(&timer);
1562 } else {
1563 while (error == TIMER_RETRY) {
1564 posix_cpu_timer_wait_running_nsleep(&timer);
1565 error = posix_cpu_timer_del(&timer);
1566 }
1567 }
1568
1569 spin_unlock_irq(&timer.it_lock);
1570
1571 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1572 /*
1573 * It actually did fire already.
1574 */
1575 return 0;
1576 }
1577
1578 error = -ERESTART_RESTARTBLOCK;
1579 /*
1580 * Report back to the user the time still remaining.
1581 */
1582 restart = ¤t->restart_block;
1583 restart->nanosleep.expires = expires;
1584 if (restart->nanosleep.type != TT_NONE)
1585 error = nanosleep_copyout(restart, &it.it_value);
1586 }
1587
1588 return error;
1589 }
1590
1591 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1592
posix_cpu_nsleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1593 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1594 const struct timespec64 *rqtp)
1595 {
1596 struct restart_block *restart_block = ¤t->restart_block;
1597 int error;
1598
1599 /*
1600 * Diagnose required errors first.
1601 */
1602 if (CPUCLOCK_PERTHREAD(which_clock) &&
1603 (CPUCLOCK_PID(which_clock) == 0 ||
1604 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1605 return -EINVAL;
1606
1607 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1608
1609 if (error == -ERESTART_RESTARTBLOCK) {
1610
1611 if (flags & TIMER_ABSTIME)
1612 return -ERESTARTNOHAND;
1613
1614 restart_block->nanosleep.clockid = which_clock;
1615 set_restart_fn(restart_block, posix_cpu_nsleep_restart);
1616 }
1617 return error;
1618 }
1619
posix_cpu_nsleep_restart(struct restart_block * restart_block)1620 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1621 {
1622 clockid_t which_clock = restart_block->nanosleep.clockid;
1623 struct timespec64 t;
1624
1625 t = ns_to_timespec64(restart_block->nanosleep.expires);
1626
1627 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1628 }
1629
1630 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1631 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1632
process_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)1633 static int process_cpu_clock_getres(const clockid_t which_clock,
1634 struct timespec64 *tp)
1635 {
1636 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1637 }
process_cpu_clock_get(const clockid_t which_clock,struct timespec64 * tp)1638 static int process_cpu_clock_get(const clockid_t which_clock,
1639 struct timespec64 *tp)
1640 {
1641 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1642 }
process_cpu_timer_create(struct k_itimer * timer)1643 static int process_cpu_timer_create(struct k_itimer *timer)
1644 {
1645 timer->it_clock = PROCESS_CLOCK;
1646 return posix_cpu_timer_create(timer);
1647 }
process_cpu_nsleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1648 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1649 const struct timespec64 *rqtp)
1650 {
1651 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1652 }
thread_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)1653 static int thread_cpu_clock_getres(const clockid_t which_clock,
1654 struct timespec64 *tp)
1655 {
1656 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1657 }
thread_cpu_clock_get(const clockid_t which_clock,struct timespec64 * tp)1658 static int thread_cpu_clock_get(const clockid_t which_clock,
1659 struct timespec64 *tp)
1660 {
1661 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1662 }
thread_cpu_timer_create(struct k_itimer * timer)1663 static int thread_cpu_timer_create(struct k_itimer *timer)
1664 {
1665 timer->it_clock = THREAD_CLOCK;
1666 return posix_cpu_timer_create(timer);
1667 }
1668
1669 const struct k_clock clock_posix_cpu = {
1670 .clock_getres = posix_cpu_clock_getres,
1671 .clock_set = posix_cpu_clock_set,
1672 .clock_get_timespec = posix_cpu_clock_get,
1673 .timer_create = posix_cpu_timer_create,
1674 .nsleep = posix_cpu_nsleep,
1675 .timer_set = posix_cpu_timer_set,
1676 .timer_del = posix_cpu_timer_del,
1677 .timer_get = posix_cpu_timer_get,
1678 .timer_rearm = posix_cpu_timer_rearm,
1679 .timer_wait_running = posix_cpu_timer_wait_running,
1680 };
1681
1682 const struct k_clock clock_process = {
1683 .clock_getres = process_cpu_clock_getres,
1684 .clock_get_timespec = process_cpu_clock_get,
1685 .timer_create = process_cpu_timer_create,
1686 .nsleep = process_cpu_nsleep,
1687 };
1688
1689 const struct k_clock clock_thread = {
1690 .clock_getres = thread_cpu_clock_getres,
1691 .clock_get_timespec = thread_cpu_clock_get,
1692 .timer_create = thread_cpu_timer_create,
1693 };
1694