1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Implement CPU time clocks for the POSIX clock interface.
4 */
5
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
18
19 #include "posix-timers.h"
20
21 static void posix_cpu_timer_rearm(struct k_itimer *timer);
22
posix_cputimers_group_init(struct posix_cputimers * pct,u64 cpu_limit)23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
24 {
25 posix_cputimers_init(pct);
26 if (cpu_limit != RLIM_INFINITY) {
27 pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
28 pct->timers_active = true;
29 }
30 }
31
32 /*
33 * Called after updating RLIMIT_CPU to run cpu timer and update
34 * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
35 * necessary. Needs siglock protection since other code may update the
36 * expiration cache as well.
37 */
update_rlimit_cpu(struct task_struct * task,unsigned long rlim_new)38 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
39 {
40 u64 nsecs = rlim_new * NSEC_PER_SEC;
41
42 spin_lock_irq(&task->sighand->siglock);
43 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
44 spin_unlock_irq(&task->sighand->siglock);
45 }
46
47 /*
48 * Functions for validating access to tasks.
49 */
pid_for_clock(const clockid_t clock,bool gettime)50 static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
51 {
52 const bool thread = !!CPUCLOCK_PERTHREAD(clock);
53 const pid_t upid = CPUCLOCK_PID(clock);
54 struct pid *pid;
55
56 if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
57 return NULL;
58
59 /*
60 * If the encoded PID is 0, then the timer is targeted at current
61 * or the process to which current belongs.
62 */
63 if (upid == 0)
64 return thread ? task_pid(current) : task_tgid(current);
65
66 pid = find_vpid(upid);
67 if (!pid)
68 return NULL;
69
70 if (thread) {
71 struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
72 return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
73 }
74
75 /*
76 * For clock_gettime(PROCESS) allow finding the process by
77 * with the pid of the current task. The code needs the tgid
78 * of the process so that pid_task(pid, PIDTYPE_TGID) can be
79 * used to find the process.
80 */
81 if (gettime && (pid == task_pid(current)))
82 return task_tgid(current);
83
84 /*
85 * For processes require that pid identifies a process.
86 */
87 return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
88 }
89
validate_clock_permissions(const clockid_t clock)90 static inline int validate_clock_permissions(const clockid_t clock)
91 {
92 int ret;
93
94 rcu_read_lock();
95 ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
96 rcu_read_unlock();
97
98 return ret;
99 }
100
clock_pid_type(const clockid_t clock)101 static inline enum pid_type clock_pid_type(const clockid_t clock)
102 {
103 return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
104 }
105
cpu_timer_task_rcu(struct k_itimer * timer)106 static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
107 {
108 return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
109 }
110
111 /*
112 * Update expiry time from increment, and increase overrun count,
113 * given the current clock sample.
114 */
bump_cpu_timer(struct k_itimer * timer,u64 now)115 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
116 {
117 u64 delta, incr, expires = timer->it.cpu.node.expires;
118 int i;
119
120 if (!timer->it_interval)
121 return expires;
122
123 if (now < expires)
124 return expires;
125
126 incr = timer->it_interval;
127 delta = now + incr - expires;
128
129 /* Don't use (incr*2 < delta), incr*2 might overflow. */
130 for (i = 0; incr < delta - incr; i++)
131 incr = incr << 1;
132
133 for (; i >= 0; incr >>= 1, i--) {
134 if (delta < incr)
135 continue;
136
137 timer->it.cpu.node.expires += incr;
138 timer->it_overrun += 1LL << i;
139 delta -= incr;
140 }
141 return timer->it.cpu.node.expires;
142 }
143
144 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
expiry_cache_is_inactive(const struct posix_cputimers * pct)145 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
146 {
147 return !(~pct->bases[CPUCLOCK_PROF].nextevt |
148 ~pct->bases[CPUCLOCK_VIRT].nextevt |
149 ~pct->bases[CPUCLOCK_SCHED].nextevt);
150 }
151
152 static int
posix_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)153 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
154 {
155 int error = validate_clock_permissions(which_clock);
156
157 if (!error) {
158 tp->tv_sec = 0;
159 tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
160 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
161 /*
162 * If sched_clock is using a cycle counter, we
163 * don't have any idea of its true resolution
164 * exported, but it is much more than 1s/HZ.
165 */
166 tp->tv_nsec = 1;
167 }
168 }
169 return error;
170 }
171
172 static int
posix_cpu_clock_set(const clockid_t clock,const struct timespec64 * tp)173 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
174 {
175 int error = validate_clock_permissions(clock);
176
177 /*
178 * You can never reset a CPU clock, but we check for other errors
179 * in the call before failing with EPERM.
180 */
181 return error ? : -EPERM;
182 }
183
184 /*
185 * Sample a per-thread clock for the given task. clkid is validated.
186 */
cpu_clock_sample(const clockid_t clkid,struct task_struct * p)187 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
188 {
189 u64 utime, stime;
190
191 if (clkid == CPUCLOCK_SCHED)
192 return task_sched_runtime(p);
193
194 task_cputime(p, &utime, &stime);
195
196 switch (clkid) {
197 case CPUCLOCK_PROF:
198 return utime + stime;
199 case CPUCLOCK_VIRT:
200 return utime;
201 default:
202 WARN_ON_ONCE(1);
203 }
204 return 0;
205 }
206
store_samples(u64 * samples,u64 stime,u64 utime,u64 rtime)207 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
208 {
209 samples[CPUCLOCK_PROF] = stime + utime;
210 samples[CPUCLOCK_VIRT] = utime;
211 samples[CPUCLOCK_SCHED] = rtime;
212 }
213
task_sample_cputime(struct task_struct * p,u64 * samples)214 static void task_sample_cputime(struct task_struct *p, u64 *samples)
215 {
216 u64 stime, utime;
217
218 task_cputime(p, &utime, &stime);
219 store_samples(samples, stime, utime, p->se.sum_exec_runtime);
220 }
221
proc_sample_cputime_atomic(struct task_cputime_atomic * at,u64 * samples)222 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
223 u64 *samples)
224 {
225 u64 stime, utime, rtime;
226
227 utime = atomic64_read(&at->utime);
228 stime = atomic64_read(&at->stime);
229 rtime = atomic64_read(&at->sum_exec_runtime);
230 store_samples(samples, stime, utime, rtime);
231 }
232
233 /*
234 * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
235 * to avoid race conditions with concurrent updates to cputime.
236 */
__update_gt_cputime(atomic64_t * cputime,u64 sum_cputime)237 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
238 {
239 u64 curr_cputime;
240 retry:
241 curr_cputime = atomic64_read(cputime);
242 if (sum_cputime > curr_cputime) {
243 if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
244 goto retry;
245 }
246 }
247
update_gt_cputime(struct task_cputime_atomic * cputime_atomic,struct task_cputime * sum)248 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
249 struct task_cputime *sum)
250 {
251 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
252 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
253 __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
254 }
255
256 /**
257 * thread_group_sample_cputime - Sample cputime for a given task
258 * @tsk: Task for which cputime needs to be started
259 * @samples: Storage for time samples
260 *
261 * Called from sys_getitimer() to calculate the expiry time of an active
262 * timer. That means group cputime accounting is already active. Called
263 * with task sighand lock held.
264 *
265 * Updates @times with an uptodate sample of the thread group cputimes.
266 */
thread_group_sample_cputime(struct task_struct * tsk,u64 * samples)267 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
268 {
269 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
270 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
271
272 WARN_ON_ONCE(!pct->timers_active);
273
274 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
275 }
276
277 /**
278 * thread_group_start_cputime - Start cputime and return a sample
279 * @tsk: Task for which cputime needs to be started
280 * @samples: Storage for time samples
281 *
282 * The thread group cputime accouting is avoided when there are no posix
283 * CPU timers armed. Before starting a timer it's required to check whether
284 * the time accounting is active. If not, a full update of the atomic
285 * accounting store needs to be done and the accounting enabled.
286 *
287 * Updates @times with an uptodate sample of the thread group cputimes.
288 */
thread_group_start_cputime(struct task_struct * tsk,u64 * samples)289 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
290 {
291 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
292 struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
293
294 /* Check if cputimer isn't running. This is accessed without locking. */
295 if (!READ_ONCE(pct->timers_active)) {
296 struct task_cputime sum;
297
298 /*
299 * The POSIX timer interface allows for absolute time expiry
300 * values through the TIMER_ABSTIME flag, therefore we have
301 * to synchronize the timer to the clock every time we start it.
302 */
303 thread_group_cputime(tsk, &sum);
304 update_gt_cputime(&cputimer->cputime_atomic, &sum);
305
306 /*
307 * We're setting timers_active without a lock. Ensure this
308 * only gets written to in one operation. We set it after
309 * update_gt_cputime() as a small optimization, but
310 * barriers are not required because update_gt_cputime()
311 * can handle concurrent updates.
312 */
313 WRITE_ONCE(pct->timers_active, true);
314 }
315 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
316 }
317
__thread_group_cputime(struct task_struct * tsk,u64 * samples)318 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
319 {
320 struct task_cputime ct;
321
322 thread_group_cputime(tsk, &ct);
323 store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
324 }
325
326 /*
327 * Sample a process (thread group) clock for the given task clkid. If the
328 * group's cputime accounting is already enabled, read the atomic
329 * store. Otherwise a full update is required. clkid is already validated.
330 */
cpu_clock_sample_group(const clockid_t clkid,struct task_struct * p,bool start)331 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
332 bool start)
333 {
334 struct thread_group_cputimer *cputimer = &p->signal->cputimer;
335 struct posix_cputimers *pct = &p->signal->posix_cputimers;
336 u64 samples[CPUCLOCK_MAX];
337
338 if (!READ_ONCE(pct->timers_active)) {
339 if (start)
340 thread_group_start_cputime(p, samples);
341 else
342 __thread_group_cputime(p, samples);
343 } else {
344 proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
345 }
346
347 return samples[clkid];
348 }
349
posix_cpu_clock_get(const clockid_t clock,struct timespec64 * tp)350 static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
351 {
352 const clockid_t clkid = CPUCLOCK_WHICH(clock);
353 struct task_struct *tsk;
354 u64 t;
355
356 rcu_read_lock();
357 tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
358 if (!tsk) {
359 rcu_read_unlock();
360 return -EINVAL;
361 }
362
363 if (CPUCLOCK_PERTHREAD(clock))
364 t = cpu_clock_sample(clkid, tsk);
365 else
366 t = cpu_clock_sample_group(clkid, tsk, false);
367 rcu_read_unlock();
368
369 *tp = ns_to_timespec64(t);
370 return 0;
371 }
372
373 /*
374 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
375 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
376 * new timer already all-zeros initialized.
377 */
posix_cpu_timer_create(struct k_itimer * new_timer)378 static int posix_cpu_timer_create(struct k_itimer *new_timer)
379 {
380 static struct lock_class_key posix_cpu_timers_key;
381 struct pid *pid;
382
383 rcu_read_lock();
384 pid = pid_for_clock(new_timer->it_clock, false);
385 if (!pid) {
386 rcu_read_unlock();
387 return -EINVAL;
388 }
389
390 /*
391 * If posix timer expiry is handled in task work context then
392 * timer::it_lock can be taken without disabling interrupts as all
393 * other locking happens in task context. This requires a seperate
394 * lock class key otherwise regular posix timer expiry would record
395 * the lock class being taken in interrupt context and generate a
396 * false positive warning.
397 */
398 if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
399 lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
400
401 new_timer->kclock = &clock_posix_cpu;
402 timerqueue_init(&new_timer->it.cpu.node);
403 new_timer->it.cpu.pid = get_pid(pid);
404 rcu_read_unlock();
405 return 0;
406 }
407
408 /*
409 * Clean up a CPU-clock timer that is about to be destroyed.
410 * This is called from timer deletion with the timer already locked.
411 * If we return TIMER_RETRY, it's necessary to release the timer's lock
412 * and try again. (This happens when the timer is in the middle of firing.)
413 */
posix_cpu_timer_del(struct k_itimer * timer)414 static int posix_cpu_timer_del(struct k_itimer *timer)
415 {
416 struct cpu_timer *ctmr = &timer->it.cpu;
417 struct sighand_struct *sighand;
418 struct task_struct *p;
419 unsigned long flags;
420 int ret = 0;
421
422 rcu_read_lock();
423 p = cpu_timer_task_rcu(timer);
424 if (!p)
425 goto out;
426
427 /*
428 * Protect against sighand release/switch in exit/exec and process/
429 * thread timer list entry concurrent read/writes.
430 */
431 sighand = lock_task_sighand(p, &flags);
432 if (unlikely(sighand == NULL)) {
433 /*
434 * This raced with the reaping of the task. The exit cleanup
435 * should have removed this timer from the timer queue.
436 */
437 WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
438 } else {
439 if (timer->it.cpu.firing)
440 ret = TIMER_RETRY;
441 else
442 cpu_timer_dequeue(ctmr);
443
444 unlock_task_sighand(p, &flags);
445 }
446
447 out:
448 rcu_read_unlock();
449 if (!ret)
450 put_pid(ctmr->pid);
451
452 return ret;
453 }
454
cleanup_timerqueue(struct timerqueue_head * head)455 static void cleanup_timerqueue(struct timerqueue_head *head)
456 {
457 struct timerqueue_node *node;
458 struct cpu_timer *ctmr;
459
460 while ((node = timerqueue_getnext(head))) {
461 timerqueue_del(head, node);
462 ctmr = container_of(node, struct cpu_timer, node);
463 ctmr->head = NULL;
464 }
465 }
466
467 /*
468 * Clean out CPU timers which are still armed when a thread exits. The
469 * timers are only removed from the list. No other updates are done. The
470 * corresponding posix timers are still accessible, but cannot be rearmed.
471 *
472 * This must be called with the siglock held.
473 */
cleanup_timers(struct posix_cputimers * pct)474 static void cleanup_timers(struct posix_cputimers *pct)
475 {
476 cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
477 cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
478 cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
479 }
480
481 /*
482 * These are both called with the siglock held, when the current thread
483 * is being reaped. When the final (leader) thread in the group is reaped,
484 * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
485 */
posix_cpu_timers_exit(struct task_struct * tsk)486 void posix_cpu_timers_exit(struct task_struct *tsk)
487 {
488 cleanup_timers(&tsk->posix_cputimers);
489 }
posix_cpu_timers_exit_group(struct task_struct * tsk)490 void posix_cpu_timers_exit_group(struct task_struct *tsk)
491 {
492 cleanup_timers(&tsk->signal->posix_cputimers);
493 }
494
495 /*
496 * Insert the timer on the appropriate list before any timers that
497 * expire later. This must be called with the sighand lock held.
498 */
arm_timer(struct k_itimer * timer,struct task_struct * p)499 static void arm_timer(struct k_itimer *timer, struct task_struct *p)
500 {
501 int clkidx = CPUCLOCK_WHICH(timer->it_clock);
502 struct cpu_timer *ctmr = &timer->it.cpu;
503 u64 newexp = cpu_timer_getexpires(ctmr);
504 struct posix_cputimer_base *base;
505
506 if (CPUCLOCK_PERTHREAD(timer->it_clock))
507 base = p->posix_cputimers.bases + clkidx;
508 else
509 base = p->signal->posix_cputimers.bases + clkidx;
510
511 if (!cpu_timer_enqueue(&base->tqhead, ctmr))
512 return;
513
514 /*
515 * We are the new earliest-expiring POSIX 1.b timer, hence
516 * need to update expiration cache. Take into account that
517 * for process timers we share expiration cache with itimers
518 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
519 */
520 if (newexp < base->nextevt)
521 base->nextevt = newexp;
522
523 if (CPUCLOCK_PERTHREAD(timer->it_clock))
524 tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
525 else
526 tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
527 }
528
529 /*
530 * The timer is locked, fire it and arrange for its reload.
531 */
cpu_timer_fire(struct k_itimer * timer)532 static void cpu_timer_fire(struct k_itimer *timer)
533 {
534 struct cpu_timer *ctmr = &timer->it.cpu;
535
536 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
537 /*
538 * User don't want any signal.
539 */
540 cpu_timer_setexpires(ctmr, 0);
541 } else if (unlikely(timer->sigq == NULL)) {
542 /*
543 * This a special case for clock_nanosleep,
544 * not a normal timer from sys_timer_create.
545 */
546 wake_up_process(timer->it_process);
547 cpu_timer_setexpires(ctmr, 0);
548 } else if (!timer->it_interval) {
549 /*
550 * One-shot timer. Clear it as soon as it's fired.
551 */
552 posix_timer_event(timer, 0);
553 cpu_timer_setexpires(ctmr, 0);
554 } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
555 /*
556 * The signal did not get queued because the signal
557 * was ignored, so we won't get any callback to
558 * reload the timer. But we need to keep it
559 * ticking in case the signal is deliverable next time.
560 */
561 posix_cpu_timer_rearm(timer);
562 ++timer->it_requeue_pending;
563 }
564 }
565
566 /*
567 * Guts of sys_timer_settime for CPU timers.
568 * This is called with the timer locked and interrupts disabled.
569 * If we return TIMER_RETRY, it's necessary to release the timer's lock
570 * and try again. (This happens when the timer is in the middle of firing.)
571 */
posix_cpu_timer_set(struct k_itimer * timer,int timer_flags,struct itimerspec64 * new,struct itimerspec64 * old)572 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
573 struct itimerspec64 *new, struct itimerspec64 *old)
574 {
575 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
576 u64 old_expires, new_expires, old_incr, val;
577 struct cpu_timer *ctmr = &timer->it.cpu;
578 struct sighand_struct *sighand;
579 struct task_struct *p;
580 unsigned long flags;
581 int ret = 0;
582
583 rcu_read_lock();
584 p = cpu_timer_task_rcu(timer);
585 if (!p) {
586 /*
587 * If p has just been reaped, we can no
588 * longer get any information about it at all.
589 */
590 rcu_read_unlock();
591 return -ESRCH;
592 }
593
594 /*
595 * Use the to_ktime conversion because that clamps the maximum
596 * value to KTIME_MAX and avoid multiplication overflows.
597 */
598 new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
599
600 /*
601 * Protect against sighand release/switch in exit/exec and p->cpu_timers
602 * and p->signal->cpu_timers read/write in arm_timer()
603 */
604 sighand = lock_task_sighand(p, &flags);
605 /*
606 * If p has just been reaped, we can no
607 * longer get any information about it at all.
608 */
609 if (unlikely(sighand == NULL)) {
610 rcu_read_unlock();
611 return -ESRCH;
612 }
613
614 /*
615 * Disarm any old timer after extracting its expiry time.
616 */
617 old_incr = timer->it_interval;
618 old_expires = cpu_timer_getexpires(ctmr);
619
620 if (unlikely(timer->it.cpu.firing)) {
621 timer->it.cpu.firing = -1;
622 ret = TIMER_RETRY;
623 } else {
624 cpu_timer_dequeue(ctmr);
625 }
626
627 /*
628 * We need to sample the current value to convert the new
629 * value from to relative and absolute, and to convert the
630 * old value from absolute to relative. To set a process
631 * timer, we need a sample to balance the thread expiry
632 * times (in arm_timer). With an absolute time, we must
633 * check if it's already passed. In short, we need a sample.
634 */
635 if (CPUCLOCK_PERTHREAD(timer->it_clock))
636 val = cpu_clock_sample(clkid, p);
637 else
638 val = cpu_clock_sample_group(clkid, p, true);
639
640 if (old) {
641 if (old_expires == 0) {
642 old->it_value.tv_sec = 0;
643 old->it_value.tv_nsec = 0;
644 } else {
645 /*
646 * Update the timer in case it has overrun already.
647 * If it has, we'll report it as having overrun and
648 * with the next reloaded timer already ticking,
649 * though we are swallowing that pending
650 * notification here to install the new setting.
651 */
652 u64 exp = bump_cpu_timer(timer, val);
653
654 if (val < exp) {
655 old_expires = exp - val;
656 old->it_value = ns_to_timespec64(old_expires);
657 } else {
658 old->it_value.tv_nsec = 1;
659 old->it_value.tv_sec = 0;
660 }
661 }
662 }
663
664 if (unlikely(ret)) {
665 /*
666 * We are colliding with the timer actually firing.
667 * Punt after filling in the timer's old value, and
668 * disable this firing since we are already reporting
669 * it as an overrun (thanks to bump_cpu_timer above).
670 */
671 unlock_task_sighand(p, &flags);
672 goto out;
673 }
674
675 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
676 new_expires += val;
677 }
678
679 /*
680 * Install the new expiry time (or zero).
681 * For a timer with no notification action, we don't actually
682 * arm the timer (we'll just fake it for timer_gettime).
683 */
684 cpu_timer_setexpires(ctmr, new_expires);
685 if (new_expires != 0 && val < new_expires) {
686 arm_timer(timer, p);
687 }
688
689 unlock_task_sighand(p, &flags);
690 /*
691 * Install the new reload setting, and
692 * set up the signal and overrun bookkeeping.
693 */
694 timer->it_interval = timespec64_to_ktime(new->it_interval);
695
696 /*
697 * This acts as a modification timestamp for the timer,
698 * so any automatic reload attempt will punt on seeing
699 * that we have reset the timer manually.
700 */
701 timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
702 ~REQUEUE_PENDING;
703 timer->it_overrun_last = 0;
704 timer->it_overrun = -1;
705
706 if (new_expires != 0 && !(val < new_expires)) {
707 /*
708 * The designated time already passed, so we notify
709 * immediately, even if the thread never runs to
710 * accumulate more time on this clock.
711 */
712 cpu_timer_fire(timer);
713 }
714
715 ret = 0;
716 out:
717 rcu_read_unlock();
718 if (old)
719 old->it_interval = ns_to_timespec64(old_incr);
720
721 return ret;
722 }
723
posix_cpu_timer_get(struct k_itimer * timer,struct itimerspec64 * itp)724 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
725 {
726 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
727 struct cpu_timer *ctmr = &timer->it.cpu;
728 u64 now, expires = cpu_timer_getexpires(ctmr);
729 struct task_struct *p;
730
731 rcu_read_lock();
732 p = cpu_timer_task_rcu(timer);
733 if (!p)
734 goto out;
735
736 /*
737 * Easy part: convert the reload time.
738 */
739 itp->it_interval = ktime_to_timespec64(timer->it_interval);
740
741 if (!expires)
742 goto out;
743
744 /*
745 * Sample the clock to take the difference with the expiry time.
746 */
747 if (CPUCLOCK_PERTHREAD(timer->it_clock))
748 now = cpu_clock_sample(clkid, p);
749 else
750 now = cpu_clock_sample_group(clkid, p, false);
751
752 if (now < expires) {
753 itp->it_value = ns_to_timespec64(expires - now);
754 } else {
755 /*
756 * The timer should have expired already, but the firing
757 * hasn't taken place yet. Say it's just about to expire.
758 */
759 itp->it_value.tv_nsec = 1;
760 itp->it_value.tv_sec = 0;
761 }
762 out:
763 rcu_read_unlock();
764 }
765
766 #define MAX_COLLECTED 20
767
collect_timerqueue(struct timerqueue_head * head,struct list_head * firing,u64 now)768 static u64 collect_timerqueue(struct timerqueue_head *head,
769 struct list_head *firing, u64 now)
770 {
771 struct timerqueue_node *next;
772 int i = 0;
773
774 while ((next = timerqueue_getnext(head))) {
775 struct cpu_timer *ctmr;
776 u64 expires;
777
778 ctmr = container_of(next, struct cpu_timer, node);
779 expires = cpu_timer_getexpires(ctmr);
780 /* Limit the number of timers to expire at once */
781 if (++i == MAX_COLLECTED || now < expires)
782 return expires;
783
784 ctmr->firing = 1;
785 cpu_timer_dequeue(ctmr);
786 list_add_tail(&ctmr->elist, firing);
787 }
788
789 return U64_MAX;
790 }
791
collect_posix_cputimers(struct posix_cputimers * pct,u64 * samples,struct list_head * firing)792 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
793 struct list_head *firing)
794 {
795 struct posix_cputimer_base *base = pct->bases;
796 int i;
797
798 for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
799 base->nextevt = collect_timerqueue(&base->tqhead, firing,
800 samples[i]);
801 }
802 }
803
check_dl_overrun(struct task_struct * tsk)804 static inline void check_dl_overrun(struct task_struct *tsk)
805 {
806 if (tsk->dl.dl_overrun) {
807 tsk->dl.dl_overrun = 0;
808 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
809 }
810 }
811
check_rlimit(u64 time,u64 limit,int signo,bool rt,bool hard)812 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
813 {
814 if (time < limit)
815 return false;
816
817 if (print_fatal_signals) {
818 pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
819 rt ? "RT" : "CPU", hard ? "hard" : "soft",
820 current->comm, task_pid_nr(current));
821 }
822 __group_send_sig_info(signo, SEND_SIG_PRIV, current);
823 return true;
824 }
825
826 /*
827 * Check for any per-thread CPU timers that have fired and move them off
828 * the tsk->cpu_timers[N] list onto the firing list. Here we update the
829 * tsk->it_*_expires values to reflect the remaining thread CPU timers.
830 */
check_thread_timers(struct task_struct * tsk,struct list_head * firing)831 static void check_thread_timers(struct task_struct *tsk,
832 struct list_head *firing)
833 {
834 struct posix_cputimers *pct = &tsk->posix_cputimers;
835 u64 samples[CPUCLOCK_MAX];
836 unsigned long soft;
837
838 if (dl_task(tsk))
839 check_dl_overrun(tsk);
840
841 if (expiry_cache_is_inactive(pct))
842 return;
843
844 task_sample_cputime(tsk, samples);
845 collect_posix_cputimers(pct, samples, firing);
846
847 /*
848 * Check for the special case thread timers.
849 */
850 soft = task_rlimit(tsk, RLIMIT_RTTIME);
851 if (soft != RLIM_INFINITY) {
852 /* Task RT timeout is accounted in jiffies. RTTIME is usec */
853 unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
854 unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
855
856 /* At the hard limit, send SIGKILL. No further action. */
857 if (hard != RLIM_INFINITY &&
858 check_rlimit(rttime, hard, SIGKILL, true, true))
859 return;
860
861 /* At the soft limit, send a SIGXCPU every second */
862 if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
863 soft += USEC_PER_SEC;
864 tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
865 }
866 }
867
868 if (expiry_cache_is_inactive(pct))
869 tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
870 }
871
stop_process_timers(struct signal_struct * sig)872 static inline void stop_process_timers(struct signal_struct *sig)
873 {
874 struct posix_cputimers *pct = &sig->posix_cputimers;
875
876 /* Turn off the active flag. This is done without locking. */
877 WRITE_ONCE(pct->timers_active, false);
878 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
879 }
880
check_cpu_itimer(struct task_struct * tsk,struct cpu_itimer * it,u64 * expires,u64 cur_time,int signo)881 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
882 u64 *expires, u64 cur_time, int signo)
883 {
884 if (!it->expires)
885 return;
886
887 if (cur_time >= it->expires) {
888 if (it->incr)
889 it->expires += it->incr;
890 else
891 it->expires = 0;
892
893 trace_itimer_expire(signo == SIGPROF ?
894 ITIMER_PROF : ITIMER_VIRTUAL,
895 task_tgid(tsk), cur_time);
896 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
897 }
898
899 if (it->expires && it->expires < *expires)
900 *expires = it->expires;
901 }
902
903 /*
904 * Check for any per-thread CPU timers that have fired and move them
905 * off the tsk->*_timers list onto the firing list. Per-thread timers
906 * have already been taken off.
907 */
check_process_timers(struct task_struct * tsk,struct list_head * firing)908 static void check_process_timers(struct task_struct *tsk,
909 struct list_head *firing)
910 {
911 struct signal_struct *const sig = tsk->signal;
912 struct posix_cputimers *pct = &sig->posix_cputimers;
913 u64 samples[CPUCLOCK_MAX];
914 unsigned long soft;
915
916 /*
917 * If there are no active process wide timers (POSIX 1.b, itimers,
918 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
919 * processing when there is already another task handling them.
920 */
921 if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
922 return;
923
924 /*
925 * Signify that a thread is checking for process timers.
926 * Write access to this field is protected by the sighand lock.
927 */
928 pct->expiry_active = true;
929
930 /*
931 * Collect the current process totals. Group accounting is active
932 * so the sample can be taken directly.
933 */
934 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
935 collect_posix_cputimers(pct, samples, firing);
936
937 /*
938 * Check for the special case process timers.
939 */
940 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
941 &pct->bases[CPUCLOCK_PROF].nextevt,
942 samples[CPUCLOCK_PROF], SIGPROF);
943 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
944 &pct->bases[CPUCLOCK_VIRT].nextevt,
945 samples[CPUCLOCK_VIRT], SIGVTALRM);
946
947 soft = task_rlimit(tsk, RLIMIT_CPU);
948 if (soft != RLIM_INFINITY) {
949 /* RLIMIT_CPU is in seconds. Samples are nanoseconds */
950 unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
951 u64 ptime = samples[CPUCLOCK_PROF];
952 u64 softns = (u64)soft * NSEC_PER_SEC;
953 u64 hardns = (u64)hard * NSEC_PER_SEC;
954
955 /* At the hard limit, send SIGKILL. No further action. */
956 if (hard != RLIM_INFINITY &&
957 check_rlimit(ptime, hardns, SIGKILL, false, true))
958 return;
959
960 /* At the soft limit, send a SIGXCPU every second */
961 if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
962 sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
963 softns += NSEC_PER_SEC;
964 }
965
966 /* Update the expiry cache */
967 if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
968 pct->bases[CPUCLOCK_PROF].nextevt = softns;
969 }
970
971 if (expiry_cache_is_inactive(pct))
972 stop_process_timers(sig);
973
974 pct->expiry_active = false;
975 }
976
977 /*
978 * This is called from the signal code (via posixtimer_rearm)
979 * when the last timer signal was delivered and we have to reload the timer.
980 */
posix_cpu_timer_rearm(struct k_itimer * timer)981 static void posix_cpu_timer_rearm(struct k_itimer *timer)
982 {
983 clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
984 struct task_struct *p;
985 struct sighand_struct *sighand;
986 unsigned long flags;
987 u64 now;
988
989 rcu_read_lock();
990 p = cpu_timer_task_rcu(timer);
991 if (!p)
992 goto out;
993
994 /* Protect timer list r/w in arm_timer() */
995 sighand = lock_task_sighand(p, &flags);
996 if (unlikely(sighand == NULL))
997 goto out;
998
999 /*
1000 * Fetch the current sample and update the timer's expiry time.
1001 */
1002 if (CPUCLOCK_PERTHREAD(timer->it_clock))
1003 now = cpu_clock_sample(clkid, p);
1004 else
1005 now = cpu_clock_sample_group(clkid, p, true);
1006
1007 bump_cpu_timer(timer, now);
1008
1009 /*
1010 * Now re-arm for the new expiry time.
1011 */
1012 arm_timer(timer, p);
1013 unlock_task_sighand(p, &flags);
1014 out:
1015 rcu_read_unlock();
1016 }
1017
1018 /**
1019 * task_cputimers_expired - Check whether posix CPU timers are expired
1020 *
1021 * @samples: Array of current samples for the CPUCLOCK clocks
1022 * @pct: Pointer to a posix_cputimers container
1023 *
1024 * Returns true if any member of @samples is greater than the corresponding
1025 * member of @pct->bases[CLK].nextevt. False otherwise
1026 */
1027 static inline bool
task_cputimers_expired(const u64 * samples,struct posix_cputimers * pct)1028 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1029 {
1030 int i;
1031
1032 for (i = 0; i < CPUCLOCK_MAX; i++) {
1033 if (samples[i] >= pct->bases[i].nextevt)
1034 return true;
1035 }
1036 return false;
1037 }
1038
1039 /**
1040 * fastpath_timer_check - POSIX CPU timers fast path.
1041 *
1042 * @tsk: The task (thread) being checked.
1043 *
1044 * Check the task and thread group timers. If both are zero (there are no
1045 * timers set) return false. Otherwise snapshot the task and thread group
1046 * timers and compare them with the corresponding expiration times. Return
1047 * true if a timer has expired, else return false.
1048 */
fastpath_timer_check(struct task_struct * tsk)1049 static inline bool fastpath_timer_check(struct task_struct *tsk)
1050 {
1051 struct posix_cputimers *pct = &tsk->posix_cputimers;
1052 struct signal_struct *sig;
1053
1054 if (!expiry_cache_is_inactive(pct)) {
1055 u64 samples[CPUCLOCK_MAX];
1056
1057 task_sample_cputime(tsk, samples);
1058 if (task_cputimers_expired(samples, pct))
1059 return true;
1060 }
1061
1062 sig = tsk->signal;
1063 pct = &sig->posix_cputimers;
1064 /*
1065 * Check if thread group timers expired when timers are active and
1066 * no other thread in the group is already handling expiry for
1067 * thread group cputimers. These fields are read without the
1068 * sighand lock. However, this is fine because this is meant to be
1069 * a fastpath heuristic to determine whether we should try to
1070 * acquire the sighand lock to handle timer expiry.
1071 *
1072 * In the worst case scenario, if concurrently timers_active is set
1073 * or expiry_active is cleared, but the current thread doesn't see
1074 * the change yet, the timer checks are delayed until the next
1075 * thread in the group gets a scheduler interrupt to handle the
1076 * timer. This isn't an issue in practice because these types of
1077 * delays with signals actually getting sent are expected.
1078 */
1079 if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1080 u64 samples[CPUCLOCK_MAX];
1081
1082 proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1083 samples);
1084
1085 if (task_cputimers_expired(samples, pct))
1086 return true;
1087 }
1088
1089 if (dl_task(tsk) && tsk->dl.dl_overrun)
1090 return true;
1091
1092 return false;
1093 }
1094
1095 static void handle_posix_cpu_timers(struct task_struct *tsk);
1096
1097 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
posix_cpu_timers_work(struct callback_head * work)1098 static void posix_cpu_timers_work(struct callback_head *work)
1099 {
1100 handle_posix_cpu_timers(current);
1101 }
1102
1103 /*
1104 * Clear existing posix CPU timers task work.
1105 */
clear_posix_cputimers_work(struct task_struct * p)1106 void clear_posix_cputimers_work(struct task_struct *p)
1107 {
1108 /*
1109 * A copied work entry from the old task is not meaningful, clear it.
1110 * N.B. init_task_work will not do this.
1111 */
1112 memset(&p->posix_cputimers_work.work, 0,
1113 sizeof(p->posix_cputimers_work.work));
1114 init_task_work(&p->posix_cputimers_work.work,
1115 posix_cpu_timers_work);
1116 p->posix_cputimers_work.scheduled = false;
1117 }
1118
1119 /*
1120 * Initialize posix CPU timers task work in init task. Out of line to
1121 * keep the callback static and to avoid header recursion hell.
1122 */
posix_cputimers_init_work(void)1123 void __init posix_cputimers_init_work(void)
1124 {
1125 clear_posix_cputimers_work(current);
1126 }
1127
1128 /*
1129 * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1130 * in hard interrupt context or in task context with interrupts
1131 * disabled. Aside of that the writer/reader interaction is always in the
1132 * context of the current task, which means they are strict per CPU.
1133 */
posix_cpu_timers_work_scheduled(struct task_struct * tsk)1134 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1135 {
1136 return tsk->posix_cputimers_work.scheduled;
1137 }
1138
__run_posix_cpu_timers(struct task_struct * tsk)1139 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1140 {
1141 if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1142 return;
1143
1144 /* Schedule task work to actually expire the timers */
1145 tsk->posix_cputimers_work.scheduled = true;
1146 task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
1147 }
1148
posix_cpu_timers_enable_work(struct task_struct * tsk,unsigned long start)1149 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1150 unsigned long start)
1151 {
1152 bool ret = true;
1153
1154 /*
1155 * On !RT kernels interrupts are disabled while collecting expired
1156 * timers, so no tick can happen and the fast path check can be
1157 * reenabled without further checks.
1158 */
1159 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
1160 tsk->posix_cputimers_work.scheduled = false;
1161 return true;
1162 }
1163
1164 /*
1165 * On RT enabled kernels ticks can happen while the expired timers
1166 * are collected under sighand lock. But any tick which observes
1167 * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
1168 * checks. So reenabling the tick work has do be done carefully:
1169 *
1170 * Disable interrupts and run the fast path check if jiffies have
1171 * advanced since the collecting of expired timers started. If
1172 * jiffies have not advanced or the fast path check did not find
1173 * newly expired timers, reenable the fast path check in the timer
1174 * interrupt. If there are newly expired timers, return false and
1175 * let the collection loop repeat.
1176 */
1177 local_irq_disable();
1178 if (start != jiffies && fastpath_timer_check(tsk))
1179 ret = false;
1180 else
1181 tsk->posix_cputimers_work.scheduled = false;
1182 local_irq_enable();
1183
1184 return ret;
1185 }
1186 #else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
__run_posix_cpu_timers(struct task_struct * tsk)1187 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1188 {
1189 lockdep_posixtimer_enter();
1190 handle_posix_cpu_timers(tsk);
1191 lockdep_posixtimer_exit();
1192 }
1193
posix_cpu_timers_work_scheduled(struct task_struct * tsk)1194 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1195 {
1196 return false;
1197 }
1198
posix_cpu_timers_enable_work(struct task_struct * tsk,unsigned long start)1199 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1200 unsigned long start)
1201 {
1202 return true;
1203 }
1204 #endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1205
handle_posix_cpu_timers(struct task_struct * tsk)1206 static void handle_posix_cpu_timers(struct task_struct *tsk)
1207 {
1208 struct k_itimer *timer, *next;
1209 unsigned long flags, start;
1210 LIST_HEAD(firing);
1211
1212 if (!lock_task_sighand(tsk, &flags))
1213 return;
1214
1215 do {
1216 /*
1217 * On RT locking sighand lock does not disable interrupts,
1218 * so this needs to be careful vs. ticks. Store the current
1219 * jiffies value.
1220 */
1221 start = READ_ONCE(jiffies);
1222 barrier();
1223
1224 /*
1225 * Here we take off tsk->signal->cpu_timers[N] and
1226 * tsk->cpu_timers[N] all the timers that are firing, and
1227 * put them on the firing list.
1228 */
1229 check_thread_timers(tsk, &firing);
1230
1231 check_process_timers(tsk, &firing);
1232
1233 /*
1234 * The above timer checks have updated the exipry cache and
1235 * because nothing can have queued or modified timers after
1236 * sighand lock was taken above it is guaranteed to be
1237 * consistent. So the next timer interrupt fastpath check
1238 * will find valid data.
1239 *
1240 * If timer expiry runs in the timer interrupt context then
1241 * the loop is not relevant as timers will be directly
1242 * expired in interrupt context. The stub function below
1243 * returns always true which allows the compiler to
1244 * optimize the loop out.
1245 *
1246 * If timer expiry is deferred to task work context then
1247 * the following rules apply:
1248 *
1249 * - On !RT kernels no tick can have happened on this CPU
1250 * after sighand lock was acquired because interrupts are
1251 * disabled. So reenabling task work before dropping
1252 * sighand lock and reenabling interrupts is race free.
1253 *
1254 * - On RT kernels ticks might have happened but the tick
1255 * work ignored posix CPU timer handling because the
1256 * CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
1257 * must be done very carefully including a check whether
1258 * ticks have happened since the start of the timer
1259 * expiry checks. posix_cpu_timers_enable_work() takes
1260 * care of that and eventually lets the expiry checks
1261 * run again.
1262 */
1263 } while (!posix_cpu_timers_enable_work(tsk, start));
1264
1265 /*
1266 * We must release sighand lock before taking any timer's lock.
1267 * There is a potential race with timer deletion here, as the
1268 * siglock now protects our private firing list. We have set
1269 * the firing flag in each timer, so that a deletion attempt
1270 * that gets the timer lock before we do will give it up and
1271 * spin until we've taken care of that timer below.
1272 */
1273 unlock_task_sighand(tsk, &flags);
1274
1275 /*
1276 * Now that all the timers on our list have the firing flag,
1277 * no one will touch their list entries but us. We'll take
1278 * each timer's lock before clearing its firing flag, so no
1279 * timer call will interfere.
1280 */
1281 list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1282 int cpu_firing;
1283
1284 /*
1285 * spin_lock() is sufficient here even independent of the
1286 * expiry context. If expiry happens in hard interrupt
1287 * context it's obvious. For task work context it's safe
1288 * because all other operations on timer::it_lock happen in
1289 * task context (syscall or exit).
1290 */
1291 spin_lock(&timer->it_lock);
1292 list_del_init(&timer->it.cpu.elist);
1293 cpu_firing = timer->it.cpu.firing;
1294 timer->it.cpu.firing = 0;
1295 /*
1296 * The firing flag is -1 if we collided with a reset
1297 * of the timer, which already reported this
1298 * almost-firing as an overrun. So don't generate an event.
1299 */
1300 if (likely(cpu_firing >= 0))
1301 cpu_timer_fire(timer);
1302 spin_unlock(&timer->it_lock);
1303 }
1304 }
1305
1306 /*
1307 * This is called from the timer interrupt handler. The irq handler has
1308 * already updated our counts. We need to check if any timers fire now.
1309 * Interrupts are disabled.
1310 */
run_posix_cpu_timers(void)1311 void run_posix_cpu_timers(void)
1312 {
1313 struct task_struct *tsk = current;
1314
1315 lockdep_assert_irqs_disabled();
1316
1317 /*
1318 * If the actual expiry is deferred to task work context and the
1319 * work is already scheduled there is no point to do anything here.
1320 */
1321 if (posix_cpu_timers_work_scheduled(tsk))
1322 return;
1323
1324 /*
1325 * The fast path checks that there are no expired thread or thread
1326 * group timers. If that's so, just return.
1327 */
1328 if (!fastpath_timer_check(tsk))
1329 return;
1330
1331 __run_posix_cpu_timers(tsk);
1332 }
1333
1334 /*
1335 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1336 * The tsk->sighand->siglock must be held by the caller.
1337 */
set_process_cpu_timer(struct task_struct * tsk,unsigned int clkid,u64 * newval,u64 * oldval)1338 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1339 u64 *newval, u64 *oldval)
1340 {
1341 u64 now, *nextevt;
1342
1343 if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1344 return;
1345
1346 nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1347 now = cpu_clock_sample_group(clkid, tsk, true);
1348
1349 if (oldval) {
1350 /*
1351 * We are setting itimer. The *oldval is absolute and we update
1352 * it to be relative, *newval argument is relative and we update
1353 * it to be absolute.
1354 */
1355 if (*oldval) {
1356 if (*oldval <= now) {
1357 /* Just about to fire. */
1358 *oldval = TICK_NSEC;
1359 } else {
1360 *oldval -= now;
1361 }
1362 }
1363
1364 if (!*newval)
1365 return;
1366 *newval += now;
1367 }
1368
1369 /*
1370 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1371 * expiry cache is also used by RLIMIT_CPU!.
1372 */
1373 if (*newval < *nextevt)
1374 *nextevt = *newval;
1375
1376 tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1377 }
1378
do_cpu_nanosleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1379 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1380 const struct timespec64 *rqtp)
1381 {
1382 struct itimerspec64 it;
1383 struct k_itimer timer;
1384 u64 expires;
1385 int error;
1386
1387 /*
1388 * Set up a temporary timer and then wait for it to go off.
1389 */
1390 memset(&timer, 0, sizeof timer);
1391 spin_lock_init(&timer.it_lock);
1392 timer.it_clock = which_clock;
1393 timer.it_overrun = -1;
1394 error = posix_cpu_timer_create(&timer);
1395 timer.it_process = current;
1396
1397 if (!error) {
1398 static struct itimerspec64 zero_it;
1399 struct restart_block *restart;
1400
1401 memset(&it, 0, sizeof(it));
1402 it.it_value = *rqtp;
1403
1404 spin_lock_irq(&timer.it_lock);
1405 error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1406 if (error) {
1407 spin_unlock_irq(&timer.it_lock);
1408 return error;
1409 }
1410
1411 while (!signal_pending(current)) {
1412 if (!cpu_timer_getexpires(&timer.it.cpu)) {
1413 /*
1414 * Our timer fired and was reset, below
1415 * deletion can not fail.
1416 */
1417 posix_cpu_timer_del(&timer);
1418 spin_unlock_irq(&timer.it_lock);
1419 return 0;
1420 }
1421
1422 /*
1423 * Block until cpu_timer_fire (or a signal) wakes us.
1424 */
1425 __set_current_state(TASK_INTERRUPTIBLE);
1426 spin_unlock_irq(&timer.it_lock);
1427 schedule();
1428 spin_lock_irq(&timer.it_lock);
1429 }
1430
1431 /*
1432 * We were interrupted by a signal.
1433 */
1434 expires = cpu_timer_getexpires(&timer.it.cpu);
1435 error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1436 if (!error) {
1437 /*
1438 * Timer is now unarmed, deletion can not fail.
1439 */
1440 posix_cpu_timer_del(&timer);
1441 }
1442 spin_unlock_irq(&timer.it_lock);
1443
1444 while (error == TIMER_RETRY) {
1445 /*
1446 * We need to handle case when timer was or is in the
1447 * middle of firing. In other cases we already freed
1448 * resources.
1449 */
1450 spin_lock_irq(&timer.it_lock);
1451 error = posix_cpu_timer_del(&timer);
1452 spin_unlock_irq(&timer.it_lock);
1453 }
1454
1455 if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1456 /*
1457 * It actually did fire already.
1458 */
1459 return 0;
1460 }
1461
1462 error = -ERESTART_RESTARTBLOCK;
1463 /*
1464 * Report back to the user the time still remaining.
1465 */
1466 restart = ¤t->restart_block;
1467 restart->nanosleep.expires = expires;
1468 if (restart->nanosleep.type != TT_NONE)
1469 error = nanosleep_copyout(restart, &it.it_value);
1470 }
1471
1472 return error;
1473 }
1474
1475 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1476
posix_cpu_nsleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1477 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1478 const struct timespec64 *rqtp)
1479 {
1480 struct restart_block *restart_block = ¤t->restart_block;
1481 int error;
1482
1483 /*
1484 * Diagnose required errors first.
1485 */
1486 if (CPUCLOCK_PERTHREAD(which_clock) &&
1487 (CPUCLOCK_PID(which_clock) == 0 ||
1488 CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1489 return -EINVAL;
1490
1491 error = do_cpu_nanosleep(which_clock, flags, rqtp);
1492
1493 if (error == -ERESTART_RESTARTBLOCK) {
1494
1495 if (flags & TIMER_ABSTIME)
1496 return -ERESTARTNOHAND;
1497
1498 restart_block->nanosleep.clockid = which_clock;
1499 set_restart_fn(restart_block, posix_cpu_nsleep_restart);
1500 }
1501 return error;
1502 }
1503
posix_cpu_nsleep_restart(struct restart_block * restart_block)1504 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1505 {
1506 clockid_t which_clock = restart_block->nanosleep.clockid;
1507 struct timespec64 t;
1508
1509 t = ns_to_timespec64(restart_block->nanosleep.expires);
1510
1511 return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1512 }
1513
1514 #define PROCESS_CLOCK make_process_cpuclock(0, CPUCLOCK_SCHED)
1515 #define THREAD_CLOCK make_thread_cpuclock(0, CPUCLOCK_SCHED)
1516
process_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)1517 static int process_cpu_clock_getres(const clockid_t which_clock,
1518 struct timespec64 *tp)
1519 {
1520 return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1521 }
process_cpu_clock_get(const clockid_t which_clock,struct timespec64 * tp)1522 static int process_cpu_clock_get(const clockid_t which_clock,
1523 struct timespec64 *tp)
1524 {
1525 return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1526 }
process_cpu_timer_create(struct k_itimer * timer)1527 static int process_cpu_timer_create(struct k_itimer *timer)
1528 {
1529 timer->it_clock = PROCESS_CLOCK;
1530 return posix_cpu_timer_create(timer);
1531 }
process_cpu_nsleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1532 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1533 const struct timespec64 *rqtp)
1534 {
1535 return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1536 }
thread_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)1537 static int thread_cpu_clock_getres(const clockid_t which_clock,
1538 struct timespec64 *tp)
1539 {
1540 return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1541 }
thread_cpu_clock_get(const clockid_t which_clock,struct timespec64 * tp)1542 static int thread_cpu_clock_get(const clockid_t which_clock,
1543 struct timespec64 *tp)
1544 {
1545 return posix_cpu_clock_get(THREAD_CLOCK, tp);
1546 }
thread_cpu_timer_create(struct k_itimer * timer)1547 static int thread_cpu_timer_create(struct k_itimer *timer)
1548 {
1549 timer->it_clock = THREAD_CLOCK;
1550 return posix_cpu_timer_create(timer);
1551 }
1552
1553 const struct k_clock clock_posix_cpu = {
1554 .clock_getres = posix_cpu_clock_getres,
1555 .clock_set = posix_cpu_clock_set,
1556 .clock_get_timespec = posix_cpu_clock_get,
1557 .timer_create = posix_cpu_timer_create,
1558 .nsleep = posix_cpu_nsleep,
1559 .timer_set = posix_cpu_timer_set,
1560 .timer_del = posix_cpu_timer_del,
1561 .timer_get = posix_cpu_timer_get,
1562 .timer_rearm = posix_cpu_timer_rearm,
1563 };
1564
1565 const struct k_clock clock_process = {
1566 .clock_getres = process_cpu_clock_getres,
1567 .clock_get_timespec = process_cpu_clock_get,
1568 .timer_create = process_cpu_timer_create,
1569 .nsleep = process_cpu_nsleep,
1570 };
1571
1572 const struct k_clock clock_thread = {
1573 .clock_getres = thread_cpu_clock_getres,
1574 .clock_get_timespec = thread_cpu_clock_get,
1575 .timer_create = thread_cpu_timer_create,
1576 };
1577