• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implement CPU time clocks for the POSIX clock interface.
4  */
5 
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
18 
19 #include "posix-timers.h"
20 
21 static void posix_cpu_timer_rearm(struct k_itimer *timer);
22 
posix_cputimers_group_init(struct posix_cputimers * pct,u64 cpu_limit)23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
24 {
25 	posix_cputimers_init(pct);
26 	if (cpu_limit != RLIM_INFINITY) {
27 		pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
28 		pct->timers_active = true;
29 	}
30 }
31 
32 /*
33  * Called after updating RLIMIT_CPU to run cpu timer and update
34  * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
35  * necessary. Needs siglock protection since other code may update the
36  * expiration cache as well.
37  */
update_rlimit_cpu(struct task_struct * task,unsigned long rlim_new)38 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
39 {
40 	u64 nsecs = rlim_new * NSEC_PER_SEC;
41 
42 	spin_lock_irq(&task->sighand->siglock);
43 	set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
44 	spin_unlock_irq(&task->sighand->siglock);
45 }
46 
47 /*
48  * Functions for validating access to tasks.
49  */
pid_for_clock(const clockid_t clock,bool gettime)50 static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
51 {
52 	const bool thread = !!CPUCLOCK_PERTHREAD(clock);
53 	const pid_t upid = CPUCLOCK_PID(clock);
54 	struct pid *pid;
55 
56 	if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
57 		return NULL;
58 
59 	/*
60 	 * If the encoded PID is 0, then the timer is targeted at current
61 	 * or the process to which current belongs.
62 	 */
63 	if (upid == 0)
64 		return thread ? task_pid(current) : task_tgid(current);
65 
66 	pid = find_vpid(upid);
67 	if (!pid)
68 		return NULL;
69 
70 	if (thread) {
71 		struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
72 		return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
73 	}
74 
75 	/*
76 	 * For clock_gettime(PROCESS) allow finding the process by
77 	 * with the pid of the current task.  The code needs the tgid
78 	 * of the process so that pid_task(pid, PIDTYPE_TGID) can be
79 	 * used to find the process.
80 	 */
81 	if (gettime && (pid == task_pid(current)))
82 		return task_tgid(current);
83 
84 	/*
85 	 * For processes require that pid identifies a process.
86 	 */
87 	return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
88 }
89 
validate_clock_permissions(const clockid_t clock)90 static inline int validate_clock_permissions(const clockid_t clock)
91 {
92 	int ret;
93 
94 	rcu_read_lock();
95 	ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
96 	rcu_read_unlock();
97 
98 	return ret;
99 }
100 
clock_pid_type(const clockid_t clock)101 static inline enum pid_type clock_pid_type(const clockid_t clock)
102 {
103 	return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
104 }
105 
cpu_timer_task_rcu(struct k_itimer * timer)106 static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
107 {
108 	return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
109 }
110 
111 /*
112  * Update expiry time from increment, and increase overrun count,
113  * given the current clock sample.
114  */
bump_cpu_timer(struct k_itimer * timer,u64 now)115 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
116 {
117 	u64 delta, incr, expires = timer->it.cpu.node.expires;
118 	int i;
119 
120 	if (!timer->it_interval)
121 		return expires;
122 
123 	if (now < expires)
124 		return expires;
125 
126 	incr = timer->it_interval;
127 	delta = now + incr - expires;
128 
129 	/* Don't use (incr*2 < delta), incr*2 might overflow. */
130 	for (i = 0; incr < delta - incr; i++)
131 		incr = incr << 1;
132 
133 	for (; i >= 0; incr >>= 1, i--) {
134 		if (delta < incr)
135 			continue;
136 
137 		timer->it.cpu.node.expires += incr;
138 		timer->it_overrun += 1LL << i;
139 		delta -= incr;
140 	}
141 	return timer->it.cpu.node.expires;
142 }
143 
144 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
expiry_cache_is_inactive(const struct posix_cputimers * pct)145 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
146 {
147 	return !(~pct->bases[CPUCLOCK_PROF].nextevt |
148 		 ~pct->bases[CPUCLOCK_VIRT].nextevt |
149 		 ~pct->bases[CPUCLOCK_SCHED].nextevt);
150 }
151 
152 static int
posix_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)153 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
154 {
155 	int error = validate_clock_permissions(which_clock);
156 
157 	if (!error) {
158 		tp->tv_sec = 0;
159 		tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
160 		if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
161 			/*
162 			 * If sched_clock is using a cycle counter, we
163 			 * don't have any idea of its true resolution
164 			 * exported, but it is much more than 1s/HZ.
165 			 */
166 			tp->tv_nsec = 1;
167 		}
168 	}
169 	return error;
170 }
171 
172 static int
posix_cpu_clock_set(const clockid_t clock,const struct timespec64 * tp)173 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
174 {
175 	int error = validate_clock_permissions(clock);
176 
177 	/*
178 	 * You can never reset a CPU clock, but we check for other errors
179 	 * in the call before failing with EPERM.
180 	 */
181 	return error ? : -EPERM;
182 }
183 
184 /*
185  * Sample a per-thread clock for the given task. clkid is validated.
186  */
cpu_clock_sample(const clockid_t clkid,struct task_struct * p)187 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
188 {
189 	u64 utime, stime;
190 
191 	if (clkid == CPUCLOCK_SCHED)
192 		return task_sched_runtime(p);
193 
194 	task_cputime(p, &utime, &stime);
195 
196 	switch (clkid) {
197 	case CPUCLOCK_PROF:
198 		return utime + stime;
199 	case CPUCLOCK_VIRT:
200 		return utime;
201 	default:
202 		WARN_ON_ONCE(1);
203 	}
204 	return 0;
205 }
206 
store_samples(u64 * samples,u64 stime,u64 utime,u64 rtime)207 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
208 {
209 	samples[CPUCLOCK_PROF] = stime + utime;
210 	samples[CPUCLOCK_VIRT] = utime;
211 	samples[CPUCLOCK_SCHED] = rtime;
212 }
213 
task_sample_cputime(struct task_struct * p,u64 * samples)214 static void task_sample_cputime(struct task_struct *p, u64 *samples)
215 {
216 	u64 stime, utime;
217 
218 	task_cputime(p, &utime, &stime);
219 	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
220 }
221 
proc_sample_cputime_atomic(struct task_cputime_atomic * at,u64 * samples)222 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
223 				       u64 *samples)
224 {
225 	u64 stime, utime, rtime;
226 
227 	utime = atomic64_read(&at->utime);
228 	stime = atomic64_read(&at->stime);
229 	rtime = atomic64_read(&at->sum_exec_runtime);
230 	store_samples(samples, stime, utime, rtime);
231 }
232 
233 /*
234  * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
235  * to avoid race conditions with concurrent updates to cputime.
236  */
__update_gt_cputime(atomic64_t * cputime,u64 sum_cputime)237 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
238 {
239 	u64 curr_cputime;
240 retry:
241 	curr_cputime = atomic64_read(cputime);
242 	if (sum_cputime > curr_cputime) {
243 		if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
244 			goto retry;
245 	}
246 }
247 
update_gt_cputime(struct task_cputime_atomic * cputime_atomic,struct task_cputime * sum)248 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
249 			      struct task_cputime *sum)
250 {
251 	__update_gt_cputime(&cputime_atomic->utime, sum->utime);
252 	__update_gt_cputime(&cputime_atomic->stime, sum->stime);
253 	__update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
254 }
255 
256 /**
257  * thread_group_sample_cputime - Sample cputime for a given task
258  * @tsk:	Task for which cputime needs to be started
259  * @samples:	Storage for time samples
260  *
261  * Called from sys_getitimer() to calculate the expiry time of an active
262  * timer. That means group cputime accounting is already active. Called
263  * with task sighand lock held.
264  *
265  * Updates @times with an uptodate sample of the thread group cputimes.
266  */
thread_group_sample_cputime(struct task_struct * tsk,u64 * samples)267 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
268 {
269 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
270 	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
271 
272 	WARN_ON_ONCE(!pct->timers_active);
273 
274 	proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
275 }
276 
277 /**
278  * thread_group_start_cputime - Start cputime and return a sample
279  * @tsk:	Task for which cputime needs to be started
280  * @samples:	Storage for time samples
281  *
282  * The thread group cputime accouting is avoided when there are no posix
283  * CPU timers armed. Before starting a timer it's required to check whether
284  * the time accounting is active. If not, a full update of the atomic
285  * accounting store needs to be done and the accounting enabled.
286  *
287  * Updates @times with an uptodate sample of the thread group cputimes.
288  */
thread_group_start_cputime(struct task_struct * tsk,u64 * samples)289 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
290 {
291 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
292 	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
293 
294 	/* Check if cputimer isn't running. This is accessed without locking. */
295 	if (!READ_ONCE(pct->timers_active)) {
296 		struct task_cputime sum;
297 
298 		/*
299 		 * The POSIX timer interface allows for absolute time expiry
300 		 * values through the TIMER_ABSTIME flag, therefore we have
301 		 * to synchronize the timer to the clock every time we start it.
302 		 */
303 		thread_group_cputime(tsk, &sum);
304 		update_gt_cputime(&cputimer->cputime_atomic, &sum);
305 
306 		/*
307 		 * We're setting timers_active without a lock. Ensure this
308 		 * only gets written to in one operation. We set it after
309 		 * update_gt_cputime() as a small optimization, but
310 		 * barriers are not required because update_gt_cputime()
311 		 * can handle concurrent updates.
312 		 */
313 		WRITE_ONCE(pct->timers_active, true);
314 	}
315 	proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
316 }
317 
__thread_group_cputime(struct task_struct * tsk,u64 * samples)318 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
319 {
320 	struct task_cputime ct;
321 
322 	thread_group_cputime(tsk, &ct);
323 	store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
324 }
325 
326 /*
327  * Sample a process (thread group) clock for the given task clkid. If the
328  * group's cputime accounting is already enabled, read the atomic
329  * store. Otherwise a full update is required.  clkid is already validated.
330  */
cpu_clock_sample_group(const clockid_t clkid,struct task_struct * p,bool start)331 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
332 				  bool start)
333 {
334 	struct thread_group_cputimer *cputimer = &p->signal->cputimer;
335 	struct posix_cputimers *pct = &p->signal->posix_cputimers;
336 	u64 samples[CPUCLOCK_MAX];
337 
338 	if (!READ_ONCE(pct->timers_active)) {
339 		if (start)
340 			thread_group_start_cputime(p, samples);
341 		else
342 			__thread_group_cputime(p, samples);
343 	} else {
344 		proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
345 	}
346 
347 	return samples[clkid];
348 }
349 
posix_cpu_clock_get(const clockid_t clock,struct timespec64 * tp)350 static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
351 {
352 	const clockid_t clkid = CPUCLOCK_WHICH(clock);
353 	struct task_struct *tsk;
354 	u64 t;
355 
356 	rcu_read_lock();
357 	tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
358 	if (!tsk) {
359 		rcu_read_unlock();
360 		return -EINVAL;
361 	}
362 
363 	if (CPUCLOCK_PERTHREAD(clock))
364 		t = cpu_clock_sample(clkid, tsk);
365 	else
366 		t = cpu_clock_sample_group(clkid, tsk, false);
367 	rcu_read_unlock();
368 
369 	*tp = ns_to_timespec64(t);
370 	return 0;
371 }
372 
373 /*
374  * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
375  * This is called from sys_timer_create() and do_cpu_nanosleep() with the
376  * new timer already all-zeros initialized.
377  */
posix_cpu_timer_create(struct k_itimer * new_timer)378 static int posix_cpu_timer_create(struct k_itimer *new_timer)
379 {
380 	static struct lock_class_key posix_cpu_timers_key;
381 	struct pid *pid;
382 
383 	rcu_read_lock();
384 	pid = pid_for_clock(new_timer->it_clock, false);
385 	if (!pid) {
386 		rcu_read_unlock();
387 		return -EINVAL;
388 	}
389 
390 	/*
391 	 * If posix timer expiry is handled in task work context then
392 	 * timer::it_lock can be taken without disabling interrupts as all
393 	 * other locking happens in task context. This requires a seperate
394 	 * lock class key otherwise regular posix timer expiry would record
395 	 * the lock class being taken in interrupt context and generate a
396 	 * false positive warning.
397 	 */
398 	if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
399 		lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
400 
401 	new_timer->kclock = &clock_posix_cpu;
402 	timerqueue_init(&new_timer->it.cpu.node);
403 	new_timer->it.cpu.pid = get_pid(pid);
404 	rcu_read_unlock();
405 	return 0;
406 }
407 
408 /*
409  * Clean up a CPU-clock timer that is about to be destroyed.
410  * This is called from timer deletion with the timer already locked.
411  * If we return TIMER_RETRY, it's necessary to release the timer's lock
412  * and try again.  (This happens when the timer is in the middle of firing.)
413  */
posix_cpu_timer_del(struct k_itimer * timer)414 static int posix_cpu_timer_del(struct k_itimer *timer)
415 {
416 	struct cpu_timer *ctmr = &timer->it.cpu;
417 	struct sighand_struct *sighand;
418 	struct task_struct *p;
419 	unsigned long flags;
420 	int ret = 0;
421 
422 	rcu_read_lock();
423 	p = cpu_timer_task_rcu(timer);
424 	if (!p)
425 		goto out;
426 
427 	/*
428 	 * Protect against sighand release/switch in exit/exec and process/
429 	 * thread timer list entry concurrent read/writes.
430 	 */
431 	sighand = lock_task_sighand(p, &flags);
432 	if (unlikely(sighand == NULL)) {
433 		/*
434 		 * This raced with the reaping of the task. The exit cleanup
435 		 * should have removed this timer from the timer queue.
436 		 */
437 		WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
438 	} else {
439 		if (timer->it.cpu.firing)
440 			ret = TIMER_RETRY;
441 		else
442 			cpu_timer_dequeue(ctmr);
443 
444 		unlock_task_sighand(p, &flags);
445 	}
446 
447 out:
448 	rcu_read_unlock();
449 	if (!ret)
450 		put_pid(ctmr->pid);
451 
452 	return ret;
453 }
454 
cleanup_timerqueue(struct timerqueue_head * head)455 static void cleanup_timerqueue(struct timerqueue_head *head)
456 {
457 	struct timerqueue_node *node;
458 	struct cpu_timer *ctmr;
459 
460 	while ((node = timerqueue_getnext(head))) {
461 		timerqueue_del(head, node);
462 		ctmr = container_of(node, struct cpu_timer, node);
463 		ctmr->head = NULL;
464 	}
465 }
466 
467 /*
468  * Clean out CPU timers which are still armed when a thread exits. The
469  * timers are only removed from the list. No other updates are done. The
470  * corresponding posix timers are still accessible, but cannot be rearmed.
471  *
472  * This must be called with the siglock held.
473  */
cleanup_timers(struct posix_cputimers * pct)474 static void cleanup_timers(struct posix_cputimers *pct)
475 {
476 	cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
477 	cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
478 	cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
479 }
480 
481 /*
482  * These are both called with the siglock held, when the current thread
483  * is being reaped.  When the final (leader) thread in the group is reaped,
484  * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
485  */
posix_cpu_timers_exit(struct task_struct * tsk)486 void posix_cpu_timers_exit(struct task_struct *tsk)
487 {
488 	cleanup_timers(&tsk->posix_cputimers);
489 }
posix_cpu_timers_exit_group(struct task_struct * tsk)490 void posix_cpu_timers_exit_group(struct task_struct *tsk)
491 {
492 	cleanup_timers(&tsk->signal->posix_cputimers);
493 }
494 
495 /*
496  * Insert the timer on the appropriate list before any timers that
497  * expire later.  This must be called with the sighand lock held.
498  */
arm_timer(struct k_itimer * timer,struct task_struct * p)499 static void arm_timer(struct k_itimer *timer, struct task_struct *p)
500 {
501 	int clkidx = CPUCLOCK_WHICH(timer->it_clock);
502 	struct cpu_timer *ctmr = &timer->it.cpu;
503 	u64 newexp = cpu_timer_getexpires(ctmr);
504 	struct posix_cputimer_base *base;
505 
506 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
507 		base = p->posix_cputimers.bases + clkidx;
508 	else
509 		base = p->signal->posix_cputimers.bases + clkidx;
510 
511 	if (!cpu_timer_enqueue(&base->tqhead, ctmr))
512 		return;
513 
514 	/*
515 	 * We are the new earliest-expiring POSIX 1.b timer, hence
516 	 * need to update expiration cache. Take into account that
517 	 * for process timers we share expiration cache with itimers
518 	 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
519 	 */
520 	if (newexp < base->nextevt)
521 		base->nextevt = newexp;
522 
523 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
524 		tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
525 	else
526 		tick_dep_set_signal(p->signal, TICK_DEP_BIT_POSIX_TIMER);
527 }
528 
529 /*
530  * The timer is locked, fire it and arrange for its reload.
531  */
cpu_timer_fire(struct k_itimer * timer)532 static void cpu_timer_fire(struct k_itimer *timer)
533 {
534 	struct cpu_timer *ctmr = &timer->it.cpu;
535 
536 	if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
537 		/*
538 		 * User don't want any signal.
539 		 */
540 		cpu_timer_setexpires(ctmr, 0);
541 	} else if (unlikely(timer->sigq == NULL)) {
542 		/*
543 		 * This a special case for clock_nanosleep,
544 		 * not a normal timer from sys_timer_create.
545 		 */
546 		wake_up_process(timer->it_process);
547 		cpu_timer_setexpires(ctmr, 0);
548 	} else if (!timer->it_interval) {
549 		/*
550 		 * One-shot timer.  Clear it as soon as it's fired.
551 		 */
552 		posix_timer_event(timer, 0);
553 		cpu_timer_setexpires(ctmr, 0);
554 	} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
555 		/*
556 		 * The signal did not get queued because the signal
557 		 * was ignored, so we won't get any callback to
558 		 * reload the timer.  But we need to keep it
559 		 * ticking in case the signal is deliverable next time.
560 		 */
561 		posix_cpu_timer_rearm(timer);
562 		++timer->it_requeue_pending;
563 	}
564 }
565 
566 /*
567  * Guts of sys_timer_settime for CPU timers.
568  * This is called with the timer locked and interrupts disabled.
569  * If we return TIMER_RETRY, it's necessary to release the timer's lock
570  * and try again.  (This happens when the timer is in the middle of firing.)
571  */
posix_cpu_timer_set(struct k_itimer * timer,int timer_flags,struct itimerspec64 * new,struct itimerspec64 * old)572 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
573 			       struct itimerspec64 *new, struct itimerspec64 *old)
574 {
575 	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
576 	u64 old_expires, new_expires, old_incr, val;
577 	struct cpu_timer *ctmr = &timer->it.cpu;
578 	struct sighand_struct *sighand;
579 	struct task_struct *p;
580 	unsigned long flags;
581 	int ret = 0;
582 
583 	rcu_read_lock();
584 	p = cpu_timer_task_rcu(timer);
585 	if (!p) {
586 		/*
587 		 * If p has just been reaped, we can no
588 		 * longer get any information about it at all.
589 		 */
590 		rcu_read_unlock();
591 		return -ESRCH;
592 	}
593 
594 	/*
595 	 * Use the to_ktime conversion because that clamps the maximum
596 	 * value to KTIME_MAX and avoid multiplication overflows.
597 	 */
598 	new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
599 
600 	/*
601 	 * Protect against sighand release/switch in exit/exec and p->cpu_timers
602 	 * and p->signal->cpu_timers read/write in arm_timer()
603 	 */
604 	sighand = lock_task_sighand(p, &flags);
605 	/*
606 	 * If p has just been reaped, we can no
607 	 * longer get any information about it at all.
608 	 */
609 	if (unlikely(sighand == NULL)) {
610 		rcu_read_unlock();
611 		return -ESRCH;
612 	}
613 
614 	/*
615 	 * Disarm any old timer after extracting its expiry time.
616 	 */
617 	old_incr = timer->it_interval;
618 	old_expires = cpu_timer_getexpires(ctmr);
619 
620 	if (unlikely(timer->it.cpu.firing)) {
621 		timer->it.cpu.firing = -1;
622 		ret = TIMER_RETRY;
623 	} else {
624 		cpu_timer_dequeue(ctmr);
625 	}
626 
627 	/*
628 	 * We need to sample the current value to convert the new
629 	 * value from to relative and absolute, and to convert the
630 	 * old value from absolute to relative.  To set a process
631 	 * timer, we need a sample to balance the thread expiry
632 	 * times (in arm_timer).  With an absolute time, we must
633 	 * check if it's already passed.  In short, we need a sample.
634 	 */
635 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
636 		val = cpu_clock_sample(clkid, p);
637 	else
638 		val = cpu_clock_sample_group(clkid, p, true);
639 
640 	if (old) {
641 		if (old_expires == 0) {
642 			old->it_value.tv_sec = 0;
643 			old->it_value.tv_nsec = 0;
644 		} else {
645 			/*
646 			 * Update the timer in case it has overrun already.
647 			 * If it has, we'll report it as having overrun and
648 			 * with the next reloaded timer already ticking,
649 			 * though we are swallowing that pending
650 			 * notification here to install the new setting.
651 			 */
652 			u64 exp = bump_cpu_timer(timer, val);
653 
654 			if (val < exp) {
655 				old_expires = exp - val;
656 				old->it_value = ns_to_timespec64(old_expires);
657 			} else {
658 				old->it_value.tv_nsec = 1;
659 				old->it_value.tv_sec = 0;
660 			}
661 		}
662 	}
663 
664 	if (unlikely(ret)) {
665 		/*
666 		 * We are colliding with the timer actually firing.
667 		 * Punt after filling in the timer's old value, and
668 		 * disable this firing since we are already reporting
669 		 * it as an overrun (thanks to bump_cpu_timer above).
670 		 */
671 		unlock_task_sighand(p, &flags);
672 		goto out;
673 	}
674 
675 	if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
676 		new_expires += val;
677 	}
678 
679 	/*
680 	 * Install the new expiry time (or zero).
681 	 * For a timer with no notification action, we don't actually
682 	 * arm the timer (we'll just fake it for timer_gettime).
683 	 */
684 	cpu_timer_setexpires(ctmr, new_expires);
685 	if (new_expires != 0 && val < new_expires) {
686 		arm_timer(timer, p);
687 	}
688 
689 	unlock_task_sighand(p, &flags);
690 	/*
691 	 * Install the new reload setting, and
692 	 * set up the signal and overrun bookkeeping.
693 	 */
694 	timer->it_interval = timespec64_to_ktime(new->it_interval);
695 
696 	/*
697 	 * This acts as a modification timestamp for the timer,
698 	 * so any automatic reload attempt will punt on seeing
699 	 * that we have reset the timer manually.
700 	 */
701 	timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
702 		~REQUEUE_PENDING;
703 	timer->it_overrun_last = 0;
704 	timer->it_overrun = -1;
705 
706 	if (new_expires != 0 && !(val < new_expires)) {
707 		/*
708 		 * The designated time already passed, so we notify
709 		 * immediately, even if the thread never runs to
710 		 * accumulate more time on this clock.
711 		 */
712 		cpu_timer_fire(timer);
713 	}
714 
715 	ret = 0;
716  out:
717 	rcu_read_unlock();
718 	if (old)
719 		old->it_interval = ns_to_timespec64(old_incr);
720 
721 	return ret;
722 }
723 
posix_cpu_timer_get(struct k_itimer * timer,struct itimerspec64 * itp)724 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
725 {
726 	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
727 	struct cpu_timer *ctmr = &timer->it.cpu;
728 	u64 now, expires = cpu_timer_getexpires(ctmr);
729 	struct task_struct *p;
730 
731 	rcu_read_lock();
732 	p = cpu_timer_task_rcu(timer);
733 	if (!p)
734 		goto out;
735 
736 	/*
737 	 * Easy part: convert the reload time.
738 	 */
739 	itp->it_interval = ktime_to_timespec64(timer->it_interval);
740 
741 	if (!expires)
742 		goto out;
743 
744 	/*
745 	 * Sample the clock to take the difference with the expiry time.
746 	 */
747 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
748 		now = cpu_clock_sample(clkid, p);
749 	else
750 		now = cpu_clock_sample_group(clkid, p, false);
751 
752 	if (now < expires) {
753 		itp->it_value = ns_to_timespec64(expires - now);
754 	} else {
755 		/*
756 		 * The timer should have expired already, but the firing
757 		 * hasn't taken place yet.  Say it's just about to expire.
758 		 */
759 		itp->it_value.tv_nsec = 1;
760 		itp->it_value.tv_sec = 0;
761 	}
762 out:
763 	rcu_read_unlock();
764 }
765 
766 #define MAX_COLLECTED	20
767 
collect_timerqueue(struct timerqueue_head * head,struct list_head * firing,u64 now)768 static u64 collect_timerqueue(struct timerqueue_head *head,
769 			      struct list_head *firing, u64 now)
770 {
771 	struct timerqueue_node *next;
772 	int i = 0;
773 
774 	while ((next = timerqueue_getnext(head))) {
775 		struct cpu_timer *ctmr;
776 		u64 expires;
777 
778 		ctmr = container_of(next, struct cpu_timer, node);
779 		expires = cpu_timer_getexpires(ctmr);
780 		/* Limit the number of timers to expire at once */
781 		if (++i == MAX_COLLECTED || now < expires)
782 			return expires;
783 
784 		ctmr->firing = 1;
785 		/* See posix_cpu_timer_wait_running() */
786 		rcu_assign_pointer(ctmr->handling, current);
787 		cpu_timer_dequeue(ctmr);
788 		list_add_tail(&ctmr->elist, firing);
789 	}
790 
791 	return U64_MAX;
792 }
793 
collect_posix_cputimers(struct posix_cputimers * pct,u64 * samples,struct list_head * firing)794 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
795 				    struct list_head *firing)
796 {
797 	struct posix_cputimer_base *base = pct->bases;
798 	int i;
799 
800 	for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
801 		base->nextevt = collect_timerqueue(&base->tqhead, firing,
802 						    samples[i]);
803 	}
804 }
805 
check_dl_overrun(struct task_struct * tsk)806 static inline void check_dl_overrun(struct task_struct *tsk)
807 {
808 	if (tsk->dl.dl_overrun) {
809 		tsk->dl.dl_overrun = 0;
810 		__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
811 	}
812 }
813 
check_rlimit(u64 time,u64 limit,int signo,bool rt,bool hard)814 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
815 {
816 	if (time < limit)
817 		return false;
818 
819 	if (print_fatal_signals) {
820 		pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
821 			rt ? "RT" : "CPU", hard ? "hard" : "soft",
822 			current->comm, task_pid_nr(current));
823 	}
824 	__group_send_sig_info(signo, SEND_SIG_PRIV, current);
825 	return true;
826 }
827 
828 /*
829  * Check for any per-thread CPU timers that have fired and move them off
830  * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
831  * tsk->it_*_expires values to reflect the remaining thread CPU timers.
832  */
check_thread_timers(struct task_struct * tsk,struct list_head * firing)833 static void check_thread_timers(struct task_struct *tsk,
834 				struct list_head *firing)
835 {
836 	struct posix_cputimers *pct = &tsk->posix_cputimers;
837 	u64 samples[CPUCLOCK_MAX];
838 	unsigned long soft;
839 
840 	if (dl_task(tsk))
841 		check_dl_overrun(tsk);
842 
843 	if (expiry_cache_is_inactive(pct))
844 		return;
845 
846 	task_sample_cputime(tsk, samples);
847 	collect_posix_cputimers(pct, samples, firing);
848 
849 	/*
850 	 * Check for the special case thread timers.
851 	 */
852 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
853 	if (soft != RLIM_INFINITY) {
854 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
855 		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
856 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
857 
858 		/* At the hard limit, send SIGKILL. No further action. */
859 		if (hard != RLIM_INFINITY &&
860 		    check_rlimit(rttime, hard, SIGKILL, true, true))
861 			return;
862 
863 		/* At the soft limit, send a SIGXCPU every second */
864 		if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
865 			soft += USEC_PER_SEC;
866 			tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
867 		}
868 	}
869 
870 	if (expiry_cache_is_inactive(pct))
871 		tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
872 }
873 
stop_process_timers(struct signal_struct * sig)874 static inline void stop_process_timers(struct signal_struct *sig)
875 {
876 	struct posix_cputimers *pct = &sig->posix_cputimers;
877 
878 	/* Turn off the active flag. This is done without locking. */
879 	WRITE_ONCE(pct->timers_active, false);
880 	tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
881 }
882 
check_cpu_itimer(struct task_struct * tsk,struct cpu_itimer * it,u64 * expires,u64 cur_time,int signo)883 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
884 			     u64 *expires, u64 cur_time, int signo)
885 {
886 	if (!it->expires)
887 		return;
888 
889 	if (cur_time >= it->expires) {
890 		if (it->incr)
891 			it->expires += it->incr;
892 		else
893 			it->expires = 0;
894 
895 		trace_itimer_expire(signo == SIGPROF ?
896 				    ITIMER_PROF : ITIMER_VIRTUAL,
897 				    task_tgid(tsk), cur_time);
898 		__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
899 	}
900 
901 	if (it->expires && it->expires < *expires)
902 		*expires = it->expires;
903 }
904 
905 /*
906  * Check for any per-thread CPU timers that have fired and move them
907  * off the tsk->*_timers list onto the firing list.  Per-thread timers
908  * have already been taken off.
909  */
check_process_timers(struct task_struct * tsk,struct list_head * firing)910 static void check_process_timers(struct task_struct *tsk,
911 				 struct list_head *firing)
912 {
913 	struct signal_struct *const sig = tsk->signal;
914 	struct posix_cputimers *pct = &sig->posix_cputimers;
915 	u64 samples[CPUCLOCK_MAX];
916 	unsigned long soft;
917 
918 	/*
919 	 * If there are no active process wide timers (POSIX 1.b, itimers,
920 	 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
921 	 * processing when there is already another task handling them.
922 	 */
923 	if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
924 		return;
925 
926 	/*
927 	 * Signify that a thread is checking for process timers.
928 	 * Write access to this field is protected by the sighand lock.
929 	 */
930 	pct->expiry_active = true;
931 
932 	/*
933 	 * Collect the current process totals. Group accounting is active
934 	 * so the sample can be taken directly.
935 	 */
936 	proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
937 	collect_posix_cputimers(pct, samples, firing);
938 
939 	/*
940 	 * Check for the special case process timers.
941 	 */
942 	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
943 			 &pct->bases[CPUCLOCK_PROF].nextevt,
944 			 samples[CPUCLOCK_PROF], SIGPROF);
945 	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
946 			 &pct->bases[CPUCLOCK_VIRT].nextevt,
947 			 samples[CPUCLOCK_VIRT], SIGVTALRM);
948 
949 	soft = task_rlimit(tsk, RLIMIT_CPU);
950 	if (soft != RLIM_INFINITY) {
951 		/* RLIMIT_CPU is in seconds. Samples are nanoseconds */
952 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
953 		u64 ptime = samples[CPUCLOCK_PROF];
954 		u64 softns = (u64)soft * NSEC_PER_SEC;
955 		u64 hardns = (u64)hard * NSEC_PER_SEC;
956 
957 		/* At the hard limit, send SIGKILL. No further action. */
958 		if (hard != RLIM_INFINITY &&
959 		    check_rlimit(ptime, hardns, SIGKILL, false, true))
960 			return;
961 
962 		/* At the soft limit, send a SIGXCPU every second */
963 		if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
964 			sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
965 			softns += NSEC_PER_SEC;
966 		}
967 
968 		/* Update the expiry cache */
969 		if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
970 			pct->bases[CPUCLOCK_PROF].nextevt = softns;
971 	}
972 
973 	if (expiry_cache_is_inactive(pct))
974 		stop_process_timers(sig);
975 
976 	pct->expiry_active = false;
977 }
978 
979 /*
980  * This is called from the signal code (via posixtimer_rearm)
981  * when the last timer signal was delivered and we have to reload the timer.
982  */
posix_cpu_timer_rearm(struct k_itimer * timer)983 static void posix_cpu_timer_rearm(struct k_itimer *timer)
984 {
985 	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
986 	struct task_struct *p;
987 	struct sighand_struct *sighand;
988 	unsigned long flags;
989 	u64 now;
990 
991 	rcu_read_lock();
992 	p = cpu_timer_task_rcu(timer);
993 	if (!p)
994 		goto out;
995 
996 	/* Protect timer list r/w in arm_timer() */
997 	sighand = lock_task_sighand(p, &flags);
998 	if (unlikely(sighand == NULL))
999 		goto out;
1000 
1001 	/*
1002 	 * Fetch the current sample and update the timer's expiry time.
1003 	 */
1004 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
1005 		now = cpu_clock_sample(clkid, p);
1006 	else
1007 		now = cpu_clock_sample_group(clkid, p, true);
1008 
1009 	bump_cpu_timer(timer, now);
1010 
1011 	/*
1012 	 * Now re-arm for the new expiry time.
1013 	 */
1014 	arm_timer(timer, p);
1015 	unlock_task_sighand(p, &flags);
1016 out:
1017 	rcu_read_unlock();
1018 }
1019 
1020 /**
1021  * task_cputimers_expired - Check whether posix CPU timers are expired
1022  *
1023  * @samples:	Array of current samples for the CPUCLOCK clocks
1024  * @pct:	Pointer to a posix_cputimers container
1025  *
1026  * Returns true if any member of @samples is greater than the corresponding
1027  * member of @pct->bases[CLK].nextevt. False otherwise
1028  */
1029 static inline bool
task_cputimers_expired(const u64 * samples,struct posix_cputimers * pct)1030 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1031 {
1032 	int i;
1033 
1034 	for (i = 0; i < CPUCLOCK_MAX; i++) {
1035 		if (samples[i] >= pct->bases[i].nextevt)
1036 			return true;
1037 	}
1038 	return false;
1039 }
1040 
1041 /**
1042  * fastpath_timer_check - POSIX CPU timers fast path.
1043  *
1044  * @tsk:	The task (thread) being checked.
1045  *
1046  * Check the task and thread group timers.  If both are zero (there are no
1047  * timers set) return false.  Otherwise snapshot the task and thread group
1048  * timers and compare them with the corresponding expiration times.  Return
1049  * true if a timer has expired, else return false.
1050  */
fastpath_timer_check(struct task_struct * tsk)1051 static inline bool fastpath_timer_check(struct task_struct *tsk)
1052 {
1053 	struct posix_cputimers *pct = &tsk->posix_cputimers;
1054 	struct signal_struct *sig;
1055 
1056 	if (!expiry_cache_is_inactive(pct)) {
1057 		u64 samples[CPUCLOCK_MAX];
1058 
1059 		task_sample_cputime(tsk, samples);
1060 		if (task_cputimers_expired(samples, pct))
1061 			return true;
1062 	}
1063 
1064 	sig = tsk->signal;
1065 	pct = &sig->posix_cputimers;
1066 	/*
1067 	 * Check if thread group timers expired when timers are active and
1068 	 * no other thread in the group is already handling expiry for
1069 	 * thread group cputimers. These fields are read without the
1070 	 * sighand lock. However, this is fine because this is meant to be
1071 	 * a fastpath heuristic to determine whether we should try to
1072 	 * acquire the sighand lock to handle timer expiry.
1073 	 *
1074 	 * In the worst case scenario, if concurrently timers_active is set
1075 	 * or expiry_active is cleared, but the current thread doesn't see
1076 	 * the change yet, the timer checks are delayed until the next
1077 	 * thread in the group gets a scheduler interrupt to handle the
1078 	 * timer. This isn't an issue in practice because these types of
1079 	 * delays with signals actually getting sent are expected.
1080 	 */
1081 	if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1082 		u64 samples[CPUCLOCK_MAX];
1083 
1084 		proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1085 					   samples);
1086 
1087 		if (task_cputimers_expired(samples, pct))
1088 			return true;
1089 	}
1090 
1091 	if (dl_task(tsk) && tsk->dl.dl_overrun)
1092 		return true;
1093 
1094 	return false;
1095 }
1096 
1097 static void handle_posix_cpu_timers(struct task_struct *tsk);
1098 
1099 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
posix_cpu_timers_work(struct callback_head * work)1100 static void posix_cpu_timers_work(struct callback_head *work)
1101 {
1102 	struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
1103 
1104 	mutex_lock(&cw->mutex);
1105 	handle_posix_cpu_timers(current);
1106 	mutex_unlock(&cw->mutex);
1107 }
1108 
1109 /*
1110  * Invoked from the posix-timer core when a cancel operation failed because
1111  * the timer is marked firing. The caller holds rcu_read_lock(), which
1112  * protects the timer and the task which is expiring it from being freed.
1113  */
posix_cpu_timer_wait_running(struct k_itimer * timr)1114 static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1115 {
1116 	struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
1117 
1118 	/* Has the handling task completed expiry already? */
1119 	if (!tsk)
1120 		return;
1121 
1122 	/* Ensure that the task cannot go away */
1123 	get_task_struct(tsk);
1124 	/* Now drop the RCU protection so the mutex can be locked */
1125 	rcu_read_unlock();
1126 	/* Wait on the expiry mutex */
1127 	mutex_lock(&tsk->posix_cputimers_work.mutex);
1128 	/* Release it immediately again. */
1129 	mutex_unlock(&tsk->posix_cputimers_work.mutex);
1130 	/* Drop the task reference. */
1131 	put_task_struct(tsk);
1132 	/* Relock RCU so the callsite is balanced */
1133 	rcu_read_lock();
1134 }
1135 
posix_cpu_timer_wait_running_nsleep(struct k_itimer * timr)1136 static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1137 {
1138 	/* Ensure that timr->it.cpu.handling task cannot go away */
1139 	rcu_read_lock();
1140 	spin_unlock_irq(&timr->it_lock);
1141 	posix_cpu_timer_wait_running(timr);
1142 	rcu_read_unlock();
1143 	/* @timr is on stack and is valid */
1144 	spin_lock_irq(&timr->it_lock);
1145 }
1146 
1147 /*
1148  * Clear existing posix CPU timers task work.
1149  */
clear_posix_cputimers_work(struct task_struct * p)1150 void clear_posix_cputimers_work(struct task_struct *p)
1151 {
1152 	/*
1153 	 * A copied work entry from the old task is not meaningful, clear it.
1154 	 * N.B. init_task_work will not do this.
1155 	 */
1156 	memset(&p->posix_cputimers_work.work, 0,
1157 	       sizeof(p->posix_cputimers_work.work));
1158 	init_task_work(&p->posix_cputimers_work.work,
1159 		       posix_cpu_timers_work);
1160 	mutex_init(&p->posix_cputimers_work.mutex);
1161 	p->posix_cputimers_work.scheduled = false;
1162 }
1163 
1164 /*
1165  * Initialize posix CPU timers task work in init task. Out of line to
1166  * keep the callback static and to avoid header recursion hell.
1167  */
posix_cputimers_init_work(void)1168 void __init posix_cputimers_init_work(void)
1169 {
1170 	clear_posix_cputimers_work(current);
1171 }
1172 
1173 /*
1174  * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1175  * in hard interrupt context or in task context with interrupts
1176  * disabled. Aside of that the writer/reader interaction is always in the
1177  * context of the current task, which means they are strict per CPU.
1178  */
posix_cpu_timers_work_scheduled(struct task_struct * tsk)1179 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1180 {
1181 	return tsk->posix_cputimers_work.scheduled;
1182 }
1183 
__run_posix_cpu_timers(struct task_struct * tsk)1184 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1185 {
1186 	if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1187 		return;
1188 
1189 	/* Schedule task work to actually expire the timers */
1190 	tsk->posix_cputimers_work.scheduled = true;
1191 	task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
1192 }
1193 
posix_cpu_timers_enable_work(struct task_struct * tsk,unsigned long start)1194 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1195 						unsigned long start)
1196 {
1197 	bool ret = true;
1198 
1199 	/*
1200 	 * On !RT kernels interrupts are disabled while collecting expired
1201 	 * timers, so no tick can happen and the fast path check can be
1202 	 * reenabled without further checks.
1203 	 */
1204 	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
1205 		tsk->posix_cputimers_work.scheduled = false;
1206 		return true;
1207 	}
1208 
1209 	/*
1210 	 * On RT enabled kernels ticks can happen while the expired timers
1211 	 * are collected under sighand lock. But any tick which observes
1212 	 * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
1213 	 * checks. So reenabling the tick work has do be done carefully:
1214 	 *
1215 	 * Disable interrupts and run the fast path check if jiffies have
1216 	 * advanced since the collecting of expired timers started. If
1217 	 * jiffies have not advanced or the fast path check did not find
1218 	 * newly expired timers, reenable the fast path check in the timer
1219 	 * interrupt. If there are newly expired timers, return false and
1220 	 * let the collection loop repeat.
1221 	 */
1222 	local_irq_disable();
1223 	if (start != jiffies && fastpath_timer_check(tsk))
1224 		ret = false;
1225 	else
1226 		tsk->posix_cputimers_work.scheduled = false;
1227 	local_irq_enable();
1228 
1229 	return ret;
1230 }
1231 #else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
__run_posix_cpu_timers(struct task_struct * tsk)1232 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1233 {
1234 	lockdep_posixtimer_enter();
1235 	handle_posix_cpu_timers(tsk);
1236 	lockdep_posixtimer_exit();
1237 }
1238 
posix_cpu_timer_wait_running(struct k_itimer * timr)1239 static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1240 {
1241 	cpu_relax();
1242 }
1243 
posix_cpu_timer_wait_running_nsleep(struct k_itimer * timr)1244 static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1245 {
1246 	spin_unlock_irq(&timr->it_lock);
1247 	cpu_relax();
1248 	spin_lock_irq(&timr->it_lock);
1249 }
1250 
posix_cpu_timers_work_scheduled(struct task_struct * tsk)1251 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1252 {
1253 	return false;
1254 }
1255 
posix_cpu_timers_enable_work(struct task_struct * tsk,unsigned long start)1256 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1257 						unsigned long start)
1258 {
1259 	return true;
1260 }
1261 #endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1262 
handle_posix_cpu_timers(struct task_struct * tsk)1263 static void handle_posix_cpu_timers(struct task_struct *tsk)
1264 {
1265 	struct k_itimer *timer, *next;
1266 	unsigned long flags, start;
1267 	LIST_HEAD(firing);
1268 
1269 	if (!lock_task_sighand(tsk, &flags))
1270 		return;
1271 
1272 	do {
1273 		/*
1274 		 * On RT locking sighand lock does not disable interrupts,
1275 		 * so this needs to be careful vs. ticks. Store the current
1276 		 * jiffies value.
1277 		 */
1278 		start = READ_ONCE(jiffies);
1279 		barrier();
1280 
1281 		/*
1282 		 * Here we take off tsk->signal->cpu_timers[N] and
1283 		 * tsk->cpu_timers[N] all the timers that are firing, and
1284 		 * put them on the firing list.
1285 		 */
1286 		check_thread_timers(tsk, &firing);
1287 
1288 		check_process_timers(tsk, &firing);
1289 
1290 		/*
1291 		 * The above timer checks have updated the exipry cache and
1292 		 * because nothing can have queued or modified timers after
1293 		 * sighand lock was taken above it is guaranteed to be
1294 		 * consistent. So the next timer interrupt fastpath check
1295 		 * will find valid data.
1296 		 *
1297 		 * If timer expiry runs in the timer interrupt context then
1298 		 * the loop is not relevant as timers will be directly
1299 		 * expired in interrupt context. The stub function below
1300 		 * returns always true which allows the compiler to
1301 		 * optimize the loop out.
1302 		 *
1303 		 * If timer expiry is deferred to task work context then
1304 		 * the following rules apply:
1305 		 *
1306 		 * - On !RT kernels no tick can have happened on this CPU
1307 		 *   after sighand lock was acquired because interrupts are
1308 		 *   disabled. So reenabling task work before dropping
1309 		 *   sighand lock and reenabling interrupts is race free.
1310 		 *
1311 		 * - On RT kernels ticks might have happened but the tick
1312 		 *   work ignored posix CPU timer handling because the
1313 		 *   CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
1314 		 *   must be done very carefully including a check whether
1315 		 *   ticks have happened since the start of the timer
1316 		 *   expiry checks. posix_cpu_timers_enable_work() takes
1317 		 *   care of that and eventually lets the expiry checks
1318 		 *   run again.
1319 		 */
1320 	} while (!posix_cpu_timers_enable_work(tsk, start));
1321 
1322 	/*
1323 	 * We must release sighand lock before taking any timer's lock.
1324 	 * There is a potential race with timer deletion here, as the
1325 	 * siglock now protects our private firing list.  We have set
1326 	 * the firing flag in each timer, so that a deletion attempt
1327 	 * that gets the timer lock before we do will give it up and
1328 	 * spin until we've taken care of that timer below.
1329 	 */
1330 	unlock_task_sighand(tsk, &flags);
1331 
1332 	/*
1333 	 * Now that all the timers on our list have the firing flag,
1334 	 * no one will touch their list entries but us.  We'll take
1335 	 * each timer's lock before clearing its firing flag, so no
1336 	 * timer call will interfere.
1337 	 */
1338 	list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1339 		int cpu_firing;
1340 
1341 		/*
1342 		 * spin_lock() is sufficient here even independent of the
1343 		 * expiry context. If expiry happens in hard interrupt
1344 		 * context it's obvious. For task work context it's safe
1345 		 * because all other operations on timer::it_lock happen in
1346 		 * task context (syscall or exit).
1347 		 */
1348 		spin_lock(&timer->it_lock);
1349 		list_del_init(&timer->it.cpu.elist);
1350 		cpu_firing = timer->it.cpu.firing;
1351 		timer->it.cpu.firing = 0;
1352 		/*
1353 		 * The firing flag is -1 if we collided with a reset
1354 		 * of the timer, which already reported this
1355 		 * almost-firing as an overrun.  So don't generate an event.
1356 		 */
1357 		if (likely(cpu_firing >= 0))
1358 			cpu_timer_fire(timer);
1359 		/* See posix_cpu_timer_wait_running() */
1360 		rcu_assign_pointer(timer->it.cpu.handling, NULL);
1361 		spin_unlock(&timer->it_lock);
1362 	}
1363 }
1364 
1365 /*
1366  * This is called from the timer interrupt handler.  The irq handler has
1367  * already updated our counts.  We need to check if any timers fire now.
1368  * Interrupts are disabled.
1369  */
run_posix_cpu_timers(void)1370 void run_posix_cpu_timers(void)
1371 {
1372 	struct task_struct *tsk = current;
1373 
1374 	lockdep_assert_irqs_disabled();
1375 
1376 	/*
1377 	 * If the actual expiry is deferred to task work context and the
1378 	 * work is already scheduled there is no point to do anything here.
1379 	 */
1380 	if (posix_cpu_timers_work_scheduled(tsk))
1381 		return;
1382 
1383 	/*
1384 	 * The fast path checks that there are no expired thread or thread
1385 	 * group timers.  If that's so, just return.
1386 	 */
1387 	if (!fastpath_timer_check(tsk))
1388 		return;
1389 
1390 	__run_posix_cpu_timers(tsk);
1391 }
1392 
1393 /*
1394  * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1395  * The tsk->sighand->siglock must be held by the caller.
1396  */
set_process_cpu_timer(struct task_struct * tsk,unsigned int clkid,u64 * newval,u64 * oldval)1397 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1398 			   u64 *newval, u64 *oldval)
1399 {
1400 	u64 now, *nextevt;
1401 
1402 	if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1403 		return;
1404 
1405 	nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1406 	now = cpu_clock_sample_group(clkid, tsk, true);
1407 
1408 	if (oldval) {
1409 		/*
1410 		 * We are setting itimer. The *oldval is absolute and we update
1411 		 * it to be relative, *newval argument is relative and we update
1412 		 * it to be absolute.
1413 		 */
1414 		if (*oldval) {
1415 			if (*oldval <= now) {
1416 				/* Just about to fire. */
1417 				*oldval = TICK_NSEC;
1418 			} else {
1419 				*oldval -= now;
1420 			}
1421 		}
1422 
1423 		if (!*newval)
1424 			return;
1425 		*newval += now;
1426 	}
1427 
1428 	/*
1429 	 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1430 	 * expiry cache is also used by RLIMIT_CPU!.
1431 	 */
1432 	if (*newval < *nextevt)
1433 		*nextevt = *newval;
1434 
1435 	tick_dep_set_signal(tsk->signal, TICK_DEP_BIT_POSIX_TIMER);
1436 }
1437 
do_cpu_nanosleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1438 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1439 			    const struct timespec64 *rqtp)
1440 {
1441 	struct itimerspec64 it;
1442 	struct k_itimer timer;
1443 	u64 expires;
1444 	int error;
1445 
1446 	/*
1447 	 * Set up a temporary timer and then wait for it to go off.
1448 	 */
1449 	memset(&timer, 0, sizeof timer);
1450 	spin_lock_init(&timer.it_lock);
1451 	timer.it_clock = which_clock;
1452 	timer.it_overrun = -1;
1453 	error = posix_cpu_timer_create(&timer);
1454 	timer.it_process = current;
1455 
1456 	if (!error) {
1457 		static struct itimerspec64 zero_it;
1458 		struct restart_block *restart;
1459 
1460 		memset(&it, 0, sizeof(it));
1461 		it.it_value = *rqtp;
1462 
1463 		spin_lock_irq(&timer.it_lock);
1464 		error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1465 		if (error) {
1466 			spin_unlock_irq(&timer.it_lock);
1467 			return error;
1468 		}
1469 
1470 		while (!signal_pending(current)) {
1471 			if (!cpu_timer_getexpires(&timer.it.cpu)) {
1472 				/*
1473 				 * Our timer fired and was reset, below
1474 				 * deletion can not fail.
1475 				 */
1476 				posix_cpu_timer_del(&timer);
1477 				spin_unlock_irq(&timer.it_lock);
1478 				return 0;
1479 			}
1480 
1481 			/*
1482 			 * Block until cpu_timer_fire (or a signal) wakes us.
1483 			 */
1484 			__set_current_state(TASK_INTERRUPTIBLE);
1485 			spin_unlock_irq(&timer.it_lock);
1486 			schedule();
1487 			spin_lock_irq(&timer.it_lock);
1488 		}
1489 
1490 		/*
1491 		 * We were interrupted by a signal.
1492 		 */
1493 		expires = cpu_timer_getexpires(&timer.it.cpu);
1494 		error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1495 		if (!error) {
1496 			/* Timer is now unarmed, deletion can not fail. */
1497 			posix_cpu_timer_del(&timer);
1498 		} else {
1499 			while (error == TIMER_RETRY) {
1500 				posix_cpu_timer_wait_running_nsleep(&timer);
1501 				error = posix_cpu_timer_del(&timer);
1502 			}
1503 		}
1504 
1505 		spin_unlock_irq(&timer.it_lock);
1506 
1507 		if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1508 			/*
1509 			 * It actually did fire already.
1510 			 */
1511 			return 0;
1512 		}
1513 
1514 		error = -ERESTART_RESTARTBLOCK;
1515 		/*
1516 		 * Report back to the user the time still remaining.
1517 		 */
1518 		restart = &current->restart_block;
1519 		restart->nanosleep.expires = expires;
1520 		if (restart->nanosleep.type != TT_NONE)
1521 			error = nanosleep_copyout(restart, &it.it_value);
1522 	}
1523 
1524 	return error;
1525 }
1526 
1527 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1528 
posix_cpu_nsleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1529 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1530 			    const struct timespec64 *rqtp)
1531 {
1532 	struct restart_block *restart_block = &current->restart_block;
1533 	int error;
1534 
1535 	/*
1536 	 * Diagnose required errors first.
1537 	 */
1538 	if (CPUCLOCK_PERTHREAD(which_clock) &&
1539 	    (CPUCLOCK_PID(which_clock) == 0 ||
1540 	     CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1541 		return -EINVAL;
1542 
1543 	error = do_cpu_nanosleep(which_clock, flags, rqtp);
1544 
1545 	if (error == -ERESTART_RESTARTBLOCK) {
1546 
1547 		if (flags & TIMER_ABSTIME)
1548 			return -ERESTARTNOHAND;
1549 
1550 		restart_block->nanosleep.clockid = which_clock;
1551 		set_restart_fn(restart_block, posix_cpu_nsleep_restart);
1552 	}
1553 	return error;
1554 }
1555 
posix_cpu_nsleep_restart(struct restart_block * restart_block)1556 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1557 {
1558 	clockid_t which_clock = restart_block->nanosleep.clockid;
1559 	struct timespec64 t;
1560 
1561 	t = ns_to_timespec64(restart_block->nanosleep.expires);
1562 
1563 	return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1564 }
1565 
1566 #define PROCESS_CLOCK	make_process_cpuclock(0, CPUCLOCK_SCHED)
1567 #define THREAD_CLOCK	make_thread_cpuclock(0, CPUCLOCK_SCHED)
1568 
process_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)1569 static int process_cpu_clock_getres(const clockid_t which_clock,
1570 				    struct timespec64 *tp)
1571 {
1572 	return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1573 }
process_cpu_clock_get(const clockid_t which_clock,struct timespec64 * tp)1574 static int process_cpu_clock_get(const clockid_t which_clock,
1575 				 struct timespec64 *tp)
1576 {
1577 	return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1578 }
process_cpu_timer_create(struct k_itimer * timer)1579 static int process_cpu_timer_create(struct k_itimer *timer)
1580 {
1581 	timer->it_clock = PROCESS_CLOCK;
1582 	return posix_cpu_timer_create(timer);
1583 }
process_cpu_nsleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1584 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1585 			      const struct timespec64 *rqtp)
1586 {
1587 	return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1588 }
thread_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)1589 static int thread_cpu_clock_getres(const clockid_t which_clock,
1590 				   struct timespec64 *tp)
1591 {
1592 	return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1593 }
thread_cpu_clock_get(const clockid_t which_clock,struct timespec64 * tp)1594 static int thread_cpu_clock_get(const clockid_t which_clock,
1595 				struct timespec64 *tp)
1596 {
1597 	return posix_cpu_clock_get(THREAD_CLOCK, tp);
1598 }
thread_cpu_timer_create(struct k_itimer * timer)1599 static int thread_cpu_timer_create(struct k_itimer *timer)
1600 {
1601 	timer->it_clock = THREAD_CLOCK;
1602 	return posix_cpu_timer_create(timer);
1603 }
1604 
1605 const struct k_clock clock_posix_cpu = {
1606 	.clock_getres		= posix_cpu_clock_getres,
1607 	.clock_set		= posix_cpu_clock_set,
1608 	.clock_get_timespec	= posix_cpu_clock_get,
1609 	.timer_create		= posix_cpu_timer_create,
1610 	.nsleep			= posix_cpu_nsleep,
1611 	.timer_set		= posix_cpu_timer_set,
1612 	.timer_del		= posix_cpu_timer_del,
1613 	.timer_get		= posix_cpu_timer_get,
1614 	.timer_rearm		= posix_cpu_timer_rearm,
1615 	.timer_wait_running	= posix_cpu_timer_wait_running,
1616 };
1617 
1618 const struct k_clock clock_process = {
1619 	.clock_getres		= process_cpu_clock_getres,
1620 	.clock_get_timespec	= process_cpu_clock_get,
1621 	.timer_create		= process_cpu_timer_create,
1622 	.nsleep			= process_cpu_nsleep,
1623 };
1624 
1625 const struct k_clock clock_thread = {
1626 	.clock_getres		= thread_cpu_clock_getres,
1627 	.clock_get_timespec	= thread_cpu_clock_get,
1628 	.timer_create		= thread_cpu_timer_create,
1629 };
1630