• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implement CPU time clocks for the POSIX clock interface.
4  */
5 
6 #include <linux/sched/signal.h>
7 #include <linux/sched/cputime.h>
8 #include <linux/posix-timers.h>
9 #include <linux/errno.h>
10 #include <linux/math64.h>
11 #include <linux/uaccess.h>
12 #include <linux/kernel_stat.h>
13 #include <trace/events/timer.h>
14 #include <linux/tick.h>
15 #include <linux/workqueue.h>
16 #include <linux/compat.h>
17 #include <linux/sched/deadline.h>
18 
19 #include "posix-timers.h"
20 
21 static void posix_cpu_timer_rearm(struct k_itimer *timer);
22 
posix_cputimers_group_init(struct posix_cputimers * pct,u64 cpu_limit)23 void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit)
24 {
25 	posix_cputimers_init(pct);
26 	if (cpu_limit != RLIM_INFINITY) {
27 		pct->bases[CPUCLOCK_PROF].nextevt = cpu_limit * NSEC_PER_SEC;
28 		pct->timers_active = true;
29 	}
30 }
31 
32 /*
33  * Called after updating RLIMIT_CPU to run cpu timer and update
34  * tsk->signal->posix_cputimers.bases[clock].nextevt expiration cache if
35  * necessary. Needs siglock protection since other code may update the
36  * expiration cache as well.
37  */
update_rlimit_cpu(struct task_struct * task,unsigned long rlim_new)38 void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
39 {
40 	u64 nsecs = rlim_new * NSEC_PER_SEC;
41 
42 	spin_lock_irq(&task->sighand->siglock);
43 	set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
44 	spin_unlock_irq(&task->sighand->siglock);
45 }
46 
47 /*
48  * Functions for validating access to tasks.
49  */
pid_for_clock(const clockid_t clock,bool gettime)50 static struct pid *pid_for_clock(const clockid_t clock, bool gettime)
51 {
52 	const bool thread = !!CPUCLOCK_PERTHREAD(clock);
53 	const pid_t upid = CPUCLOCK_PID(clock);
54 	struct pid *pid;
55 
56 	if (CPUCLOCK_WHICH(clock) >= CPUCLOCK_MAX)
57 		return NULL;
58 
59 	/*
60 	 * If the encoded PID is 0, then the timer is targeted at current
61 	 * or the process to which current belongs.
62 	 */
63 	if (upid == 0)
64 		return thread ? task_pid(current) : task_tgid(current);
65 
66 	pid = find_vpid(upid);
67 	if (!pid)
68 		return NULL;
69 
70 	if (thread) {
71 		struct task_struct *tsk = pid_task(pid, PIDTYPE_PID);
72 		return (tsk && same_thread_group(tsk, current)) ? pid : NULL;
73 	}
74 
75 	/*
76 	 * For clock_gettime(PROCESS) allow finding the process by
77 	 * with the pid of the current task.  The code needs the tgid
78 	 * of the process so that pid_task(pid, PIDTYPE_TGID) can be
79 	 * used to find the process.
80 	 */
81 	if (gettime && (pid == task_pid(current)))
82 		return task_tgid(current);
83 
84 	/*
85 	 * For processes require that pid identifies a process.
86 	 */
87 	return pid_has_task(pid, PIDTYPE_TGID) ? pid : NULL;
88 }
89 
validate_clock_permissions(const clockid_t clock)90 static inline int validate_clock_permissions(const clockid_t clock)
91 {
92 	int ret;
93 
94 	rcu_read_lock();
95 	ret = pid_for_clock(clock, false) ? 0 : -EINVAL;
96 	rcu_read_unlock();
97 
98 	return ret;
99 }
100 
clock_pid_type(const clockid_t clock)101 static inline enum pid_type clock_pid_type(const clockid_t clock)
102 {
103 	return CPUCLOCK_PERTHREAD(clock) ? PIDTYPE_PID : PIDTYPE_TGID;
104 }
105 
cpu_timer_task_rcu(struct k_itimer * timer)106 static inline struct task_struct *cpu_timer_task_rcu(struct k_itimer *timer)
107 {
108 	return pid_task(timer->it.cpu.pid, clock_pid_type(timer->it_clock));
109 }
110 
111 /*
112  * Update expiry time from increment, and increase overrun count,
113  * given the current clock sample.
114  */
bump_cpu_timer(struct k_itimer * timer,u64 now)115 static u64 bump_cpu_timer(struct k_itimer *timer, u64 now)
116 {
117 	u64 delta, incr, expires = timer->it.cpu.node.expires;
118 	int i;
119 
120 	if (!timer->it_interval)
121 		return expires;
122 
123 	if (now < expires)
124 		return expires;
125 
126 	incr = timer->it_interval;
127 	delta = now + incr - expires;
128 
129 	/* Don't use (incr*2 < delta), incr*2 might overflow. */
130 	for (i = 0; incr < delta - incr; i++)
131 		incr = incr << 1;
132 
133 	for (; i >= 0; incr >>= 1, i--) {
134 		if (delta < incr)
135 			continue;
136 
137 		timer->it.cpu.node.expires += incr;
138 		timer->it_overrun += 1LL << i;
139 		delta -= incr;
140 	}
141 	return timer->it.cpu.node.expires;
142 }
143 
144 /* Check whether all cache entries contain U64_MAX, i.e. eternal expiry time */
expiry_cache_is_inactive(const struct posix_cputimers * pct)145 static inline bool expiry_cache_is_inactive(const struct posix_cputimers *pct)
146 {
147 	return !(~pct->bases[CPUCLOCK_PROF].nextevt |
148 		 ~pct->bases[CPUCLOCK_VIRT].nextevt |
149 		 ~pct->bases[CPUCLOCK_SCHED].nextevt);
150 }
151 
152 static int
posix_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)153 posix_cpu_clock_getres(const clockid_t which_clock, struct timespec64 *tp)
154 {
155 	int error = validate_clock_permissions(which_clock);
156 
157 	if (!error) {
158 		tp->tv_sec = 0;
159 		tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ);
160 		if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
161 			/*
162 			 * If sched_clock is using a cycle counter, we
163 			 * don't have any idea of its true resolution
164 			 * exported, but it is much more than 1s/HZ.
165 			 */
166 			tp->tv_nsec = 1;
167 		}
168 	}
169 	return error;
170 }
171 
172 static int
posix_cpu_clock_set(const clockid_t clock,const struct timespec64 * tp)173 posix_cpu_clock_set(const clockid_t clock, const struct timespec64 *tp)
174 {
175 	int error = validate_clock_permissions(clock);
176 
177 	/*
178 	 * You can never reset a CPU clock, but we check for other errors
179 	 * in the call before failing with EPERM.
180 	 */
181 	return error ? : -EPERM;
182 }
183 
184 /*
185  * Sample a per-thread clock for the given task. clkid is validated.
186  */
cpu_clock_sample(const clockid_t clkid,struct task_struct * p)187 static u64 cpu_clock_sample(const clockid_t clkid, struct task_struct *p)
188 {
189 	u64 utime, stime;
190 
191 	if (clkid == CPUCLOCK_SCHED)
192 		return task_sched_runtime(p);
193 
194 	task_cputime(p, &utime, &stime);
195 
196 	switch (clkid) {
197 	case CPUCLOCK_PROF:
198 		return utime + stime;
199 	case CPUCLOCK_VIRT:
200 		return utime;
201 	default:
202 		WARN_ON_ONCE(1);
203 	}
204 	return 0;
205 }
206 
store_samples(u64 * samples,u64 stime,u64 utime,u64 rtime)207 static inline void store_samples(u64 *samples, u64 stime, u64 utime, u64 rtime)
208 {
209 	samples[CPUCLOCK_PROF] = stime + utime;
210 	samples[CPUCLOCK_VIRT] = utime;
211 	samples[CPUCLOCK_SCHED] = rtime;
212 }
213 
task_sample_cputime(struct task_struct * p,u64 * samples)214 static void task_sample_cputime(struct task_struct *p, u64 *samples)
215 {
216 	u64 stime, utime;
217 
218 	task_cputime(p, &utime, &stime);
219 	store_samples(samples, stime, utime, p->se.sum_exec_runtime);
220 }
221 
proc_sample_cputime_atomic(struct task_cputime_atomic * at,u64 * samples)222 static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
223 				       u64 *samples)
224 {
225 	u64 stime, utime, rtime;
226 
227 	utime = atomic64_read(&at->utime);
228 	stime = atomic64_read(&at->stime);
229 	rtime = atomic64_read(&at->sum_exec_runtime);
230 	store_samples(samples, stime, utime, rtime);
231 }
232 
233 /*
234  * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
235  * to avoid race conditions with concurrent updates to cputime.
236  */
__update_gt_cputime(atomic64_t * cputime,u64 sum_cputime)237 static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
238 {
239 	u64 curr_cputime;
240 retry:
241 	curr_cputime = atomic64_read(cputime);
242 	if (sum_cputime > curr_cputime) {
243 		if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
244 			goto retry;
245 	}
246 }
247 
update_gt_cputime(struct task_cputime_atomic * cputime_atomic,struct task_cputime * sum)248 static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
249 			      struct task_cputime *sum)
250 {
251 	__update_gt_cputime(&cputime_atomic->utime, sum->utime);
252 	__update_gt_cputime(&cputime_atomic->stime, sum->stime);
253 	__update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
254 }
255 
256 /**
257  * thread_group_sample_cputime - Sample cputime for a given task
258  * @tsk:	Task for which cputime needs to be started
259  * @samples:	Storage for time samples
260  *
261  * Called from sys_getitimer() to calculate the expiry time of an active
262  * timer. That means group cputime accounting is already active. Called
263  * with task sighand lock held.
264  *
265  * Updates @times with an uptodate sample of the thread group cputimes.
266  */
thread_group_sample_cputime(struct task_struct * tsk,u64 * samples)267 void thread_group_sample_cputime(struct task_struct *tsk, u64 *samples)
268 {
269 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
270 	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
271 
272 	WARN_ON_ONCE(!pct->timers_active);
273 
274 	proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
275 }
276 
277 /**
278  * thread_group_start_cputime - Start cputime and return a sample
279  * @tsk:	Task for which cputime needs to be started
280  * @samples:	Storage for time samples
281  *
282  * The thread group cputime accounting is avoided when there are no posix
283  * CPU timers armed. Before starting a timer it's required to check whether
284  * the time accounting is active. If not, a full update of the atomic
285  * accounting store needs to be done and the accounting enabled.
286  *
287  * Updates @times with an uptodate sample of the thread group cputimes.
288  */
thread_group_start_cputime(struct task_struct * tsk,u64 * samples)289 static void thread_group_start_cputime(struct task_struct *tsk, u64 *samples)
290 {
291 	struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
292 	struct posix_cputimers *pct = &tsk->signal->posix_cputimers;
293 
294 	lockdep_assert_task_sighand_held(tsk);
295 
296 	/* Check if cputimer isn't running. This is accessed without locking. */
297 	if (!READ_ONCE(pct->timers_active)) {
298 		struct task_cputime sum;
299 
300 		/*
301 		 * The POSIX timer interface allows for absolute time expiry
302 		 * values through the TIMER_ABSTIME flag, therefore we have
303 		 * to synchronize the timer to the clock every time we start it.
304 		 */
305 		thread_group_cputime(tsk, &sum);
306 		update_gt_cputime(&cputimer->cputime_atomic, &sum);
307 
308 		/*
309 		 * We're setting timers_active without a lock. Ensure this
310 		 * only gets written to in one operation. We set it after
311 		 * update_gt_cputime() as a small optimization, but
312 		 * barriers are not required because update_gt_cputime()
313 		 * can handle concurrent updates.
314 		 */
315 		WRITE_ONCE(pct->timers_active, true);
316 	}
317 	proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
318 }
319 
__thread_group_cputime(struct task_struct * tsk,u64 * samples)320 static void __thread_group_cputime(struct task_struct *tsk, u64 *samples)
321 {
322 	struct task_cputime ct;
323 
324 	thread_group_cputime(tsk, &ct);
325 	store_samples(samples, ct.stime, ct.utime, ct.sum_exec_runtime);
326 }
327 
328 /*
329  * Sample a process (thread group) clock for the given task clkid. If the
330  * group's cputime accounting is already enabled, read the atomic
331  * store. Otherwise a full update is required.  clkid is already validated.
332  */
cpu_clock_sample_group(const clockid_t clkid,struct task_struct * p,bool start)333 static u64 cpu_clock_sample_group(const clockid_t clkid, struct task_struct *p,
334 				  bool start)
335 {
336 	struct thread_group_cputimer *cputimer = &p->signal->cputimer;
337 	struct posix_cputimers *pct = &p->signal->posix_cputimers;
338 	u64 samples[CPUCLOCK_MAX];
339 
340 	if (!READ_ONCE(pct->timers_active)) {
341 		if (start)
342 			thread_group_start_cputime(p, samples);
343 		else
344 			__thread_group_cputime(p, samples);
345 	} else {
346 		proc_sample_cputime_atomic(&cputimer->cputime_atomic, samples);
347 	}
348 
349 	return samples[clkid];
350 }
351 
posix_cpu_clock_get(const clockid_t clock,struct timespec64 * tp)352 static int posix_cpu_clock_get(const clockid_t clock, struct timespec64 *tp)
353 {
354 	const clockid_t clkid = CPUCLOCK_WHICH(clock);
355 	struct task_struct *tsk;
356 	u64 t;
357 
358 	rcu_read_lock();
359 	tsk = pid_task(pid_for_clock(clock, true), clock_pid_type(clock));
360 	if (!tsk) {
361 		rcu_read_unlock();
362 		return -EINVAL;
363 	}
364 
365 	if (CPUCLOCK_PERTHREAD(clock))
366 		t = cpu_clock_sample(clkid, tsk);
367 	else
368 		t = cpu_clock_sample_group(clkid, tsk, false);
369 	rcu_read_unlock();
370 
371 	*tp = ns_to_timespec64(t);
372 	return 0;
373 }
374 
375 /*
376  * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
377  * This is called from sys_timer_create() and do_cpu_nanosleep() with the
378  * new timer already all-zeros initialized.
379  */
posix_cpu_timer_create(struct k_itimer * new_timer)380 static int posix_cpu_timer_create(struct k_itimer *new_timer)
381 {
382 	static struct lock_class_key posix_cpu_timers_key;
383 	struct pid *pid;
384 
385 	rcu_read_lock();
386 	pid = pid_for_clock(new_timer->it_clock, false);
387 	if (!pid) {
388 		rcu_read_unlock();
389 		return -EINVAL;
390 	}
391 
392 	/*
393 	 * If posix timer expiry is handled in task work context then
394 	 * timer::it_lock can be taken without disabling interrupts as all
395 	 * other locking happens in task context. This requires a separate
396 	 * lock class key otherwise regular posix timer expiry would record
397 	 * the lock class being taken in interrupt context and generate a
398 	 * false positive warning.
399 	 */
400 	if (IS_ENABLED(CONFIG_POSIX_CPU_TIMERS_TASK_WORK))
401 		lockdep_set_class(&new_timer->it_lock, &posix_cpu_timers_key);
402 
403 	new_timer->kclock = &clock_posix_cpu;
404 	timerqueue_init(&new_timer->it.cpu.node);
405 	new_timer->it.cpu.pid = get_pid(pid);
406 	rcu_read_unlock();
407 	return 0;
408 }
409 
timer_base(struct k_itimer * timer,struct task_struct * tsk)410 static struct posix_cputimer_base *timer_base(struct k_itimer *timer,
411 					      struct task_struct *tsk)
412 {
413 	int clkidx = CPUCLOCK_WHICH(timer->it_clock);
414 
415 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
416 		return tsk->posix_cputimers.bases + clkidx;
417 	else
418 		return tsk->signal->posix_cputimers.bases + clkidx;
419 }
420 
421 /*
422  * Force recalculating the base earliest expiration on the next tick.
423  * This will also re-evaluate the need to keep around the process wide
424  * cputime counter and tick dependency and eventually shut these down
425  * if necessary.
426  */
trigger_base_recalc_expires(struct k_itimer * timer,struct task_struct * tsk)427 static void trigger_base_recalc_expires(struct k_itimer *timer,
428 					struct task_struct *tsk)
429 {
430 	struct posix_cputimer_base *base = timer_base(timer, tsk);
431 
432 	base->nextevt = 0;
433 }
434 
435 /*
436  * Dequeue the timer and reset the base if it was its earliest expiration.
437  * It makes sure the next tick recalculates the base next expiration so we
438  * don't keep the costly process wide cputime counter around for a random
439  * amount of time, along with the tick dependency.
440  *
441  * If another timer gets queued between this and the next tick, its
442  * expiration will update the base next event if necessary on the next
443  * tick.
444  */
disarm_timer(struct k_itimer * timer,struct task_struct * p)445 static void disarm_timer(struct k_itimer *timer, struct task_struct *p)
446 {
447 	struct cpu_timer *ctmr = &timer->it.cpu;
448 	struct posix_cputimer_base *base;
449 
450 	if (!cpu_timer_dequeue(ctmr))
451 		return;
452 
453 	base = timer_base(timer, p);
454 	if (cpu_timer_getexpires(ctmr) == base->nextevt)
455 		trigger_base_recalc_expires(timer, p);
456 }
457 
458 
459 /*
460  * Clean up a CPU-clock timer that is about to be destroyed.
461  * This is called from timer deletion with the timer already locked.
462  * If we return TIMER_RETRY, it's necessary to release the timer's lock
463  * and try again.  (This happens when the timer is in the middle of firing.)
464  */
posix_cpu_timer_del(struct k_itimer * timer)465 static int posix_cpu_timer_del(struct k_itimer *timer)
466 {
467 	struct cpu_timer *ctmr = &timer->it.cpu;
468 	struct sighand_struct *sighand;
469 	struct task_struct *p;
470 	unsigned long flags;
471 	int ret = 0;
472 
473 	rcu_read_lock();
474 	p = cpu_timer_task_rcu(timer);
475 	if (!p)
476 		goto out;
477 
478 	/*
479 	 * Protect against sighand release/switch in exit/exec and process/
480 	 * thread timer list entry concurrent read/writes.
481 	 */
482 	sighand = lock_task_sighand(p, &flags);
483 	if (unlikely(sighand == NULL)) {
484 		/*
485 		 * This raced with the reaping of the task. The exit cleanup
486 		 * should have removed this timer from the timer queue.
487 		 */
488 		WARN_ON_ONCE(ctmr->head || timerqueue_node_queued(&ctmr->node));
489 	} else {
490 		if (timer->it.cpu.firing)
491 			ret = TIMER_RETRY;
492 		else
493 			disarm_timer(timer, p);
494 
495 		unlock_task_sighand(p, &flags);
496 	}
497 
498 out:
499 	rcu_read_unlock();
500 	if (!ret)
501 		put_pid(ctmr->pid);
502 
503 	return ret;
504 }
505 
cleanup_timerqueue(struct timerqueue_head * head)506 static void cleanup_timerqueue(struct timerqueue_head *head)
507 {
508 	struct timerqueue_node *node;
509 	struct cpu_timer *ctmr;
510 
511 	while ((node = timerqueue_getnext(head))) {
512 		timerqueue_del(head, node);
513 		ctmr = container_of(node, struct cpu_timer, node);
514 		ctmr->head = NULL;
515 	}
516 }
517 
518 /*
519  * Clean out CPU timers which are still armed when a thread exits. The
520  * timers are only removed from the list. No other updates are done. The
521  * corresponding posix timers are still accessible, but cannot be rearmed.
522  *
523  * This must be called with the siglock held.
524  */
cleanup_timers(struct posix_cputimers * pct)525 static void cleanup_timers(struct posix_cputimers *pct)
526 {
527 	cleanup_timerqueue(&pct->bases[CPUCLOCK_PROF].tqhead);
528 	cleanup_timerqueue(&pct->bases[CPUCLOCK_VIRT].tqhead);
529 	cleanup_timerqueue(&pct->bases[CPUCLOCK_SCHED].tqhead);
530 }
531 
532 /*
533  * These are both called with the siglock held, when the current thread
534  * is being reaped.  When the final (leader) thread in the group is reaped,
535  * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit.
536  */
posix_cpu_timers_exit(struct task_struct * tsk)537 void posix_cpu_timers_exit(struct task_struct *tsk)
538 {
539 	cleanup_timers(&tsk->posix_cputimers);
540 }
posix_cpu_timers_exit_group(struct task_struct * tsk)541 void posix_cpu_timers_exit_group(struct task_struct *tsk)
542 {
543 	cleanup_timers(&tsk->signal->posix_cputimers);
544 }
545 
546 /*
547  * Insert the timer on the appropriate list before any timers that
548  * expire later.  This must be called with the sighand lock held.
549  */
arm_timer(struct k_itimer * timer,struct task_struct * p)550 static void arm_timer(struct k_itimer *timer, struct task_struct *p)
551 {
552 	struct posix_cputimer_base *base = timer_base(timer, p);
553 	struct cpu_timer *ctmr = &timer->it.cpu;
554 	u64 newexp = cpu_timer_getexpires(ctmr);
555 
556 	if (!cpu_timer_enqueue(&base->tqhead, ctmr))
557 		return;
558 
559 	/*
560 	 * We are the new earliest-expiring POSIX 1.b timer, hence
561 	 * need to update expiration cache. Take into account that
562 	 * for process timers we share expiration cache with itimers
563 	 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
564 	 */
565 	if (newexp < base->nextevt)
566 		base->nextevt = newexp;
567 
568 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
569 		tick_dep_set_task(p, TICK_DEP_BIT_POSIX_TIMER);
570 	else
571 		tick_dep_set_signal(p, TICK_DEP_BIT_POSIX_TIMER);
572 }
573 
574 /*
575  * The timer is locked, fire it and arrange for its reload.
576  */
cpu_timer_fire(struct k_itimer * timer)577 static void cpu_timer_fire(struct k_itimer *timer)
578 {
579 	struct cpu_timer *ctmr = &timer->it.cpu;
580 
581 	if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
582 		/*
583 		 * User don't want any signal.
584 		 */
585 		cpu_timer_setexpires(ctmr, 0);
586 	} else if (unlikely(timer->sigq == NULL)) {
587 		/*
588 		 * This a special case for clock_nanosleep,
589 		 * not a normal timer from sys_timer_create.
590 		 */
591 		wake_up_process(timer->it_process);
592 		cpu_timer_setexpires(ctmr, 0);
593 	} else if (!timer->it_interval) {
594 		/*
595 		 * One-shot timer.  Clear it as soon as it's fired.
596 		 */
597 		posix_timer_event(timer, 0);
598 		cpu_timer_setexpires(ctmr, 0);
599 	} else if (posix_timer_event(timer, ++timer->it_requeue_pending)) {
600 		/*
601 		 * The signal did not get queued because the signal
602 		 * was ignored, so we won't get any callback to
603 		 * reload the timer.  But we need to keep it
604 		 * ticking in case the signal is deliverable next time.
605 		 */
606 		posix_cpu_timer_rearm(timer);
607 		++timer->it_requeue_pending;
608 	}
609 }
610 
611 /*
612  * Guts of sys_timer_settime for CPU timers.
613  * This is called with the timer locked and interrupts disabled.
614  * If we return TIMER_RETRY, it's necessary to release the timer's lock
615  * and try again.  (This happens when the timer is in the middle of firing.)
616  */
posix_cpu_timer_set(struct k_itimer * timer,int timer_flags,struct itimerspec64 * new,struct itimerspec64 * old)617 static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
618 			       struct itimerspec64 *new, struct itimerspec64 *old)
619 {
620 	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
621 	u64 old_expires, new_expires, old_incr, val;
622 	struct cpu_timer *ctmr = &timer->it.cpu;
623 	struct sighand_struct *sighand;
624 	struct task_struct *p;
625 	unsigned long flags;
626 	int ret = 0;
627 
628 	rcu_read_lock();
629 	p = cpu_timer_task_rcu(timer);
630 	if (!p) {
631 		/*
632 		 * If p has just been reaped, we can no
633 		 * longer get any information about it at all.
634 		 */
635 		rcu_read_unlock();
636 		return -ESRCH;
637 	}
638 
639 	/*
640 	 * Use the to_ktime conversion because that clamps the maximum
641 	 * value to KTIME_MAX and avoid multiplication overflows.
642 	 */
643 	new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value));
644 
645 	/*
646 	 * Protect against sighand release/switch in exit/exec and p->cpu_timers
647 	 * and p->signal->cpu_timers read/write in arm_timer()
648 	 */
649 	sighand = lock_task_sighand(p, &flags);
650 	/*
651 	 * If p has just been reaped, we can no
652 	 * longer get any information about it at all.
653 	 */
654 	if (unlikely(sighand == NULL)) {
655 		rcu_read_unlock();
656 		return -ESRCH;
657 	}
658 
659 	/*
660 	 * Disarm any old timer after extracting its expiry time.
661 	 */
662 	old_incr = timer->it_interval;
663 	old_expires = cpu_timer_getexpires(ctmr);
664 
665 	if (unlikely(timer->it.cpu.firing)) {
666 		timer->it.cpu.firing = -1;
667 		ret = TIMER_RETRY;
668 	} else {
669 		cpu_timer_dequeue(ctmr);
670 	}
671 
672 	/*
673 	 * We need to sample the current value to convert the new
674 	 * value from to relative and absolute, and to convert the
675 	 * old value from absolute to relative.  To set a process
676 	 * timer, we need a sample to balance the thread expiry
677 	 * times (in arm_timer).  With an absolute time, we must
678 	 * check if it's already passed.  In short, we need a sample.
679 	 */
680 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
681 		val = cpu_clock_sample(clkid, p);
682 	else
683 		val = cpu_clock_sample_group(clkid, p, true);
684 
685 	if (old) {
686 		if (old_expires == 0) {
687 			old->it_value.tv_sec = 0;
688 			old->it_value.tv_nsec = 0;
689 		} else {
690 			/*
691 			 * Update the timer in case it has overrun already.
692 			 * If it has, we'll report it as having overrun and
693 			 * with the next reloaded timer already ticking,
694 			 * though we are swallowing that pending
695 			 * notification here to install the new setting.
696 			 */
697 			u64 exp = bump_cpu_timer(timer, val);
698 
699 			if (val < exp) {
700 				old_expires = exp - val;
701 				old->it_value = ns_to_timespec64(old_expires);
702 			} else {
703 				old->it_value.tv_nsec = 1;
704 				old->it_value.tv_sec = 0;
705 			}
706 		}
707 	}
708 
709 	if (unlikely(ret)) {
710 		/*
711 		 * We are colliding with the timer actually firing.
712 		 * Punt after filling in the timer's old value, and
713 		 * disable this firing since we are already reporting
714 		 * it as an overrun (thanks to bump_cpu_timer above).
715 		 */
716 		unlock_task_sighand(p, &flags);
717 		goto out;
718 	}
719 
720 	if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
721 		new_expires += val;
722 	}
723 
724 	/*
725 	 * Install the new expiry time (or zero).
726 	 * For a timer with no notification action, we don't actually
727 	 * arm the timer (we'll just fake it for timer_gettime).
728 	 */
729 	cpu_timer_setexpires(ctmr, new_expires);
730 	if (new_expires != 0 && val < new_expires) {
731 		arm_timer(timer, p);
732 	}
733 
734 	unlock_task_sighand(p, &flags);
735 	/*
736 	 * Install the new reload setting, and
737 	 * set up the signal and overrun bookkeeping.
738 	 */
739 	timer->it_interval = timespec64_to_ktime(new->it_interval);
740 
741 	/*
742 	 * This acts as a modification timestamp for the timer,
743 	 * so any automatic reload attempt will punt on seeing
744 	 * that we have reset the timer manually.
745 	 */
746 	timer->it_requeue_pending = (timer->it_requeue_pending + 2) &
747 		~REQUEUE_PENDING;
748 	timer->it_overrun_last = 0;
749 	timer->it_overrun = -1;
750 
751 	if (val >= new_expires) {
752 		if (new_expires != 0) {
753 			/*
754 			 * The designated time already passed, so we notify
755 			 * immediately, even if the thread never runs to
756 			 * accumulate more time on this clock.
757 			 */
758 			cpu_timer_fire(timer);
759 		}
760 
761 		/*
762 		 * Make sure we don't keep around the process wide cputime
763 		 * counter or the tick dependency if they are not necessary.
764 		 */
765 		sighand = lock_task_sighand(p, &flags);
766 		if (!sighand)
767 			goto out;
768 
769 		if (!cpu_timer_queued(ctmr))
770 			trigger_base_recalc_expires(timer, p);
771 
772 		unlock_task_sighand(p, &flags);
773 	}
774  out:
775 	rcu_read_unlock();
776 	if (old)
777 		old->it_interval = ns_to_timespec64(old_incr);
778 
779 	return ret;
780 }
781 
posix_cpu_timer_get(struct k_itimer * timer,struct itimerspec64 * itp)782 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
783 {
784 	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
785 	struct cpu_timer *ctmr = &timer->it.cpu;
786 	u64 now, expires = cpu_timer_getexpires(ctmr);
787 	struct task_struct *p;
788 
789 	rcu_read_lock();
790 	p = cpu_timer_task_rcu(timer);
791 	if (!p)
792 		goto out;
793 
794 	/*
795 	 * Easy part: convert the reload time.
796 	 */
797 	itp->it_interval = ktime_to_timespec64(timer->it_interval);
798 
799 	if (!expires)
800 		goto out;
801 
802 	/*
803 	 * Sample the clock to take the difference with the expiry time.
804 	 */
805 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
806 		now = cpu_clock_sample(clkid, p);
807 	else
808 		now = cpu_clock_sample_group(clkid, p, false);
809 
810 	if (now < expires) {
811 		itp->it_value = ns_to_timespec64(expires - now);
812 	} else {
813 		/*
814 		 * The timer should have expired already, but the firing
815 		 * hasn't taken place yet.  Say it's just about to expire.
816 		 */
817 		itp->it_value.tv_nsec = 1;
818 		itp->it_value.tv_sec = 0;
819 	}
820 out:
821 	rcu_read_unlock();
822 }
823 
824 #define MAX_COLLECTED	20
825 
collect_timerqueue(struct timerqueue_head * head,struct list_head * firing,u64 now)826 static u64 collect_timerqueue(struct timerqueue_head *head,
827 			      struct list_head *firing, u64 now)
828 {
829 	struct timerqueue_node *next;
830 	int i = 0;
831 
832 	while ((next = timerqueue_getnext(head))) {
833 		struct cpu_timer *ctmr;
834 		u64 expires;
835 
836 		ctmr = container_of(next, struct cpu_timer, node);
837 		expires = cpu_timer_getexpires(ctmr);
838 		/* Limit the number of timers to expire at once */
839 		if (++i == MAX_COLLECTED || now < expires)
840 			return expires;
841 
842 		ctmr->firing = 1;
843 		/* See posix_cpu_timer_wait_running() */
844 		rcu_assign_pointer(ctmr->handling, current);
845 		cpu_timer_dequeue(ctmr);
846 		list_add_tail(&ctmr->elist, firing);
847 	}
848 
849 	return U64_MAX;
850 }
851 
collect_posix_cputimers(struct posix_cputimers * pct,u64 * samples,struct list_head * firing)852 static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
853 				    struct list_head *firing)
854 {
855 	struct posix_cputimer_base *base = pct->bases;
856 	int i;
857 
858 	for (i = 0; i < CPUCLOCK_MAX; i++, base++) {
859 		base->nextevt = collect_timerqueue(&base->tqhead, firing,
860 						    samples[i]);
861 	}
862 }
863 
check_dl_overrun(struct task_struct * tsk)864 static inline void check_dl_overrun(struct task_struct *tsk)
865 {
866 	if (tsk->dl.dl_overrun) {
867 		tsk->dl.dl_overrun = 0;
868 		__group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
869 	}
870 }
871 
check_rlimit(u64 time,u64 limit,int signo,bool rt,bool hard)872 static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
873 {
874 	if (time < limit)
875 		return false;
876 
877 	if (print_fatal_signals) {
878 		pr_info("%s Watchdog Timeout (%s): %s[%d]\n",
879 			rt ? "RT" : "CPU", hard ? "hard" : "soft",
880 			current->comm, task_pid_nr(current));
881 	}
882 	__group_send_sig_info(signo, SEND_SIG_PRIV, current);
883 	return true;
884 }
885 
886 /*
887  * Check for any per-thread CPU timers that have fired and move them off
888  * the tsk->cpu_timers[N] list onto the firing list.  Here we update the
889  * tsk->it_*_expires values to reflect the remaining thread CPU timers.
890  */
check_thread_timers(struct task_struct * tsk,struct list_head * firing)891 static void check_thread_timers(struct task_struct *tsk,
892 				struct list_head *firing)
893 {
894 	struct posix_cputimers *pct = &tsk->posix_cputimers;
895 	u64 samples[CPUCLOCK_MAX];
896 	unsigned long soft;
897 
898 	if (dl_task(tsk))
899 		check_dl_overrun(tsk);
900 
901 	if (expiry_cache_is_inactive(pct))
902 		return;
903 
904 	task_sample_cputime(tsk, samples);
905 	collect_posix_cputimers(pct, samples, firing);
906 
907 	/*
908 	 * Check for the special case thread timers.
909 	 */
910 	soft = task_rlimit(tsk, RLIMIT_RTTIME);
911 	if (soft != RLIM_INFINITY) {
912 		/* Task RT timeout is accounted in jiffies. RTTIME is usec */
913 		unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
914 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
915 
916 		/* At the hard limit, send SIGKILL. No further action. */
917 		if (hard != RLIM_INFINITY &&
918 		    check_rlimit(rttime, hard, SIGKILL, true, true))
919 			return;
920 
921 		/* At the soft limit, send a SIGXCPU every second */
922 		if (check_rlimit(rttime, soft, SIGXCPU, true, false)) {
923 			soft += USEC_PER_SEC;
924 			tsk->signal->rlim[RLIMIT_RTTIME].rlim_cur = soft;
925 		}
926 	}
927 
928 	if (expiry_cache_is_inactive(pct))
929 		tick_dep_clear_task(tsk, TICK_DEP_BIT_POSIX_TIMER);
930 }
931 
stop_process_timers(struct signal_struct * sig)932 static inline void stop_process_timers(struct signal_struct *sig)
933 {
934 	struct posix_cputimers *pct = &sig->posix_cputimers;
935 
936 	/* Turn off the active flag. This is done without locking. */
937 	WRITE_ONCE(pct->timers_active, false);
938 	tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
939 }
940 
check_cpu_itimer(struct task_struct * tsk,struct cpu_itimer * it,u64 * expires,u64 cur_time,int signo)941 static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
942 			     u64 *expires, u64 cur_time, int signo)
943 {
944 	if (!it->expires)
945 		return;
946 
947 	if (cur_time >= it->expires) {
948 		if (it->incr)
949 			it->expires += it->incr;
950 		else
951 			it->expires = 0;
952 
953 		trace_itimer_expire(signo == SIGPROF ?
954 				    ITIMER_PROF : ITIMER_VIRTUAL,
955 				    task_tgid(tsk), cur_time);
956 		__group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
957 	}
958 
959 	if (it->expires && it->expires < *expires)
960 		*expires = it->expires;
961 }
962 
963 /*
964  * Check for any per-thread CPU timers that have fired and move them
965  * off the tsk->*_timers list onto the firing list.  Per-thread timers
966  * have already been taken off.
967  */
check_process_timers(struct task_struct * tsk,struct list_head * firing)968 static void check_process_timers(struct task_struct *tsk,
969 				 struct list_head *firing)
970 {
971 	struct signal_struct *const sig = tsk->signal;
972 	struct posix_cputimers *pct = &sig->posix_cputimers;
973 	u64 samples[CPUCLOCK_MAX];
974 	unsigned long soft;
975 
976 	/*
977 	 * If there are no active process wide timers (POSIX 1.b, itimers,
978 	 * RLIMIT_CPU) nothing to check. Also skip the process wide timer
979 	 * processing when there is already another task handling them.
980 	 */
981 	if (!READ_ONCE(pct->timers_active) || pct->expiry_active)
982 		return;
983 
984 	/*
985 	 * Signify that a thread is checking for process timers.
986 	 * Write access to this field is protected by the sighand lock.
987 	 */
988 	pct->expiry_active = true;
989 
990 	/*
991 	 * Collect the current process totals. Group accounting is active
992 	 * so the sample can be taken directly.
993 	 */
994 	proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic, samples);
995 	collect_posix_cputimers(pct, samples, firing);
996 
997 	/*
998 	 * Check for the special case process timers.
999 	 */
1000 	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF],
1001 			 &pct->bases[CPUCLOCK_PROF].nextevt,
1002 			 samples[CPUCLOCK_PROF], SIGPROF);
1003 	check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT],
1004 			 &pct->bases[CPUCLOCK_VIRT].nextevt,
1005 			 samples[CPUCLOCK_VIRT], SIGVTALRM);
1006 
1007 	soft = task_rlimit(tsk, RLIMIT_CPU);
1008 	if (soft != RLIM_INFINITY) {
1009 		/* RLIMIT_CPU is in seconds. Samples are nanoseconds */
1010 		unsigned long hard = task_rlimit_max(tsk, RLIMIT_CPU);
1011 		u64 ptime = samples[CPUCLOCK_PROF];
1012 		u64 softns = (u64)soft * NSEC_PER_SEC;
1013 		u64 hardns = (u64)hard * NSEC_PER_SEC;
1014 
1015 		/* At the hard limit, send SIGKILL. No further action. */
1016 		if (hard != RLIM_INFINITY &&
1017 		    check_rlimit(ptime, hardns, SIGKILL, false, true))
1018 			return;
1019 
1020 		/* At the soft limit, send a SIGXCPU every second */
1021 		if (check_rlimit(ptime, softns, SIGXCPU, false, false)) {
1022 			sig->rlim[RLIMIT_CPU].rlim_cur = soft + 1;
1023 			softns += NSEC_PER_SEC;
1024 		}
1025 
1026 		/* Update the expiry cache */
1027 		if (softns < pct->bases[CPUCLOCK_PROF].nextevt)
1028 			pct->bases[CPUCLOCK_PROF].nextevt = softns;
1029 	}
1030 
1031 	if (expiry_cache_is_inactive(pct))
1032 		stop_process_timers(sig);
1033 
1034 	pct->expiry_active = false;
1035 }
1036 
1037 /*
1038  * This is called from the signal code (via posixtimer_rearm)
1039  * when the last timer signal was delivered and we have to reload the timer.
1040  */
posix_cpu_timer_rearm(struct k_itimer * timer)1041 static void posix_cpu_timer_rearm(struct k_itimer *timer)
1042 {
1043 	clockid_t clkid = CPUCLOCK_WHICH(timer->it_clock);
1044 	struct task_struct *p;
1045 	struct sighand_struct *sighand;
1046 	unsigned long flags;
1047 	u64 now;
1048 
1049 	rcu_read_lock();
1050 	p = cpu_timer_task_rcu(timer);
1051 	if (!p)
1052 		goto out;
1053 
1054 	/* Protect timer list r/w in arm_timer() */
1055 	sighand = lock_task_sighand(p, &flags);
1056 	if (unlikely(sighand == NULL))
1057 		goto out;
1058 
1059 	/*
1060 	 * Fetch the current sample and update the timer's expiry time.
1061 	 */
1062 	if (CPUCLOCK_PERTHREAD(timer->it_clock))
1063 		now = cpu_clock_sample(clkid, p);
1064 	else
1065 		now = cpu_clock_sample_group(clkid, p, true);
1066 
1067 	bump_cpu_timer(timer, now);
1068 
1069 	/*
1070 	 * Now re-arm for the new expiry time.
1071 	 */
1072 	arm_timer(timer, p);
1073 	unlock_task_sighand(p, &flags);
1074 out:
1075 	rcu_read_unlock();
1076 }
1077 
1078 /**
1079  * task_cputimers_expired - Check whether posix CPU timers are expired
1080  *
1081  * @samples:	Array of current samples for the CPUCLOCK clocks
1082  * @pct:	Pointer to a posix_cputimers container
1083  *
1084  * Returns true if any member of @samples is greater than the corresponding
1085  * member of @pct->bases[CLK].nextevt. False otherwise
1086  */
1087 static inline bool
task_cputimers_expired(const u64 * samples,struct posix_cputimers * pct)1088 task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
1089 {
1090 	int i;
1091 
1092 	for (i = 0; i < CPUCLOCK_MAX; i++) {
1093 		if (samples[i] >= pct->bases[i].nextevt)
1094 			return true;
1095 	}
1096 	return false;
1097 }
1098 
1099 /**
1100  * fastpath_timer_check - POSIX CPU timers fast path.
1101  *
1102  * @tsk:	The task (thread) being checked.
1103  *
1104  * Check the task and thread group timers.  If both are zero (there are no
1105  * timers set) return false.  Otherwise snapshot the task and thread group
1106  * timers and compare them with the corresponding expiration times.  Return
1107  * true if a timer has expired, else return false.
1108  */
fastpath_timer_check(struct task_struct * tsk)1109 static inline bool fastpath_timer_check(struct task_struct *tsk)
1110 {
1111 	struct posix_cputimers *pct = &tsk->posix_cputimers;
1112 	struct signal_struct *sig;
1113 
1114 	if (!expiry_cache_is_inactive(pct)) {
1115 		u64 samples[CPUCLOCK_MAX];
1116 
1117 		task_sample_cputime(tsk, samples);
1118 		if (task_cputimers_expired(samples, pct))
1119 			return true;
1120 	}
1121 
1122 	sig = tsk->signal;
1123 	pct = &sig->posix_cputimers;
1124 	/*
1125 	 * Check if thread group timers expired when timers are active and
1126 	 * no other thread in the group is already handling expiry for
1127 	 * thread group cputimers. These fields are read without the
1128 	 * sighand lock. However, this is fine because this is meant to be
1129 	 * a fastpath heuristic to determine whether we should try to
1130 	 * acquire the sighand lock to handle timer expiry.
1131 	 *
1132 	 * In the worst case scenario, if concurrently timers_active is set
1133 	 * or expiry_active is cleared, but the current thread doesn't see
1134 	 * the change yet, the timer checks are delayed until the next
1135 	 * thread in the group gets a scheduler interrupt to handle the
1136 	 * timer. This isn't an issue in practice because these types of
1137 	 * delays with signals actually getting sent are expected.
1138 	 */
1139 	if (READ_ONCE(pct->timers_active) && !READ_ONCE(pct->expiry_active)) {
1140 		u64 samples[CPUCLOCK_MAX];
1141 
1142 		proc_sample_cputime_atomic(&sig->cputimer.cputime_atomic,
1143 					   samples);
1144 
1145 		if (task_cputimers_expired(samples, pct))
1146 			return true;
1147 	}
1148 
1149 	if (dl_task(tsk) && tsk->dl.dl_overrun)
1150 		return true;
1151 
1152 	return false;
1153 }
1154 
1155 static void handle_posix_cpu_timers(struct task_struct *tsk);
1156 
1157 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
posix_cpu_timers_work(struct callback_head * work)1158 static void posix_cpu_timers_work(struct callback_head *work)
1159 {
1160 	struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
1161 
1162 	mutex_lock(&cw->mutex);
1163 	handle_posix_cpu_timers(current);
1164 	mutex_unlock(&cw->mutex);
1165 }
1166 
1167 /*
1168  * Invoked from the posix-timer core when a cancel operation failed because
1169  * the timer is marked firing. The caller holds rcu_read_lock(), which
1170  * protects the timer and the task which is expiring it from being freed.
1171  */
posix_cpu_timer_wait_running(struct k_itimer * timr)1172 static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1173 {
1174 	struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
1175 
1176 	/* Has the handling task completed expiry already? */
1177 	if (!tsk)
1178 		return;
1179 
1180 	/* Ensure that the task cannot go away */
1181 	get_task_struct(tsk);
1182 	/* Now drop the RCU protection so the mutex can be locked */
1183 	rcu_read_unlock();
1184 	/* Wait on the expiry mutex */
1185 	mutex_lock(&tsk->posix_cputimers_work.mutex);
1186 	/* Release it immediately again. */
1187 	mutex_unlock(&tsk->posix_cputimers_work.mutex);
1188 	/* Drop the task reference. */
1189 	put_task_struct(tsk);
1190 	/* Relock RCU so the callsite is balanced */
1191 	rcu_read_lock();
1192 }
1193 
posix_cpu_timer_wait_running_nsleep(struct k_itimer * timr)1194 static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1195 {
1196 	/* Ensure that timr->it.cpu.handling task cannot go away */
1197 	rcu_read_lock();
1198 	spin_unlock_irq(&timr->it_lock);
1199 	posix_cpu_timer_wait_running(timr);
1200 	rcu_read_unlock();
1201 	/* @timr is on stack and is valid */
1202 	spin_lock_irq(&timr->it_lock);
1203 }
1204 
1205 /*
1206  * Clear existing posix CPU timers task work.
1207  */
clear_posix_cputimers_work(struct task_struct * p)1208 void clear_posix_cputimers_work(struct task_struct *p)
1209 {
1210 	/*
1211 	 * A copied work entry from the old task is not meaningful, clear it.
1212 	 * N.B. init_task_work will not do this.
1213 	 */
1214 	memset(&p->posix_cputimers_work.work, 0,
1215 	       sizeof(p->posix_cputimers_work.work));
1216 	init_task_work(&p->posix_cputimers_work.work,
1217 		       posix_cpu_timers_work);
1218 	mutex_init(&p->posix_cputimers_work.mutex);
1219 	p->posix_cputimers_work.scheduled = false;
1220 }
1221 
1222 /*
1223  * Initialize posix CPU timers task work in init task. Out of line to
1224  * keep the callback static and to avoid header recursion hell.
1225  */
posix_cputimers_init_work(void)1226 void __init posix_cputimers_init_work(void)
1227 {
1228 	clear_posix_cputimers_work(current);
1229 }
1230 
1231 /*
1232  * Note: All operations on tsk->posix_cputimer_work.scheduled happen either
1233  * in hard interrupt context or in task context with interrupts
1234  * disabled. Aside of that the writer/reader interaction is always in the
1235  * context of the current task, which means they are strict per CPU.
1236  */
posix_cpu_timers_work_scheduled(struct task_struct * tsk)1237 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1238 {
1239 	return tsk->posix_cputimers_work.scheduled;
1240 }
1241 
__run_posix_cpu_timers(struct task_struct * tsk)1242 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1243 {
1244 	if (WARN_ON_ONCE(tsk->posix_cputimers_work.scheduled))
1245 		return;
1246 
1247 	/* Schedule task work to actually expire the timers */
1248 	tsk->posix_cputimers_work.scheduled = true;
1249 	task_work_add(tsk, &tsk->posix_cputimers_work.work, TWA_RESUME);
1250 }
1251 
posix_cpu_timers_enable_work(struct task_struct * tsk,unsigned long start)1252 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1253 						unsigned long start)
1254 {
1255 	bool ret = true;
1256 
1257 	/*
1258 	 * On !RT kernels interrupts are disabled while collecting expired
1259 	 * timers, so no tick can happen and the fast path check can be
1260 	 * reenabled without further checks.
1261 	 */
1262 	if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
1263 		tsk->posix_cputimers_work.scheduled = false;
1264 		return true;
1265 	}
1266 
1267 	/*
1268 	 * On RT enabled kernels ticks can happen while the expired timers
1269 	 * are collected under sighand lock. But any tick which observes
1270 	 * the CPUTIMERS_WORK_SCHEDULED bit set, does not run the fastpath
1271 	 * checks. So reenabling the tick work has do be done carefully:
1272 	 *
1273 	 * Disable interrupts and run the fast path check if jiffies have
1274 	 * advanced since the collecting of expired timers started. If
1275 	 * jiffies have not advanced or the fast path check did not find
1276 	 * newly expired timers, reenable the fast path check in the timer
1277 	 * interrupt. If there are newly expired timers, return false and
1278 	 * let the collection loop repeat.
1279 	 */
1280 	local_irq_disable();
1281 	if (start != jiffies && fastpath_timer_check(tsk))
1282 		ret = false;
1283 	else
1284 		tsk->posix_cputimers_work.scheduled = false;
1285 	local_irq_enable();
1286 
1287 	return ret;
1288 }
1289 #else /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
__run_posix_cpu_timers(struct task_struct * tsk)1290 static inline void __run_posix_cpu_timers(struct task_struct *tsk)
1291 {
1292 	lockdep_posixtimer_enter();
1293 	handle_posix_cpu_timers(tsk);
1294 	lockdep_posixtimer_exit();
1295 }
1296 
posix_cpu_timer_wait_running(struct k_itimer * timr)1297 static void posix_cpu_timer_wait_running(struct k_itimer *timr)
1298 {
1299 	cpu_relax();
1300 }
1301 
posix_cpu_timer_wait_running_nsleep(struct k_itimer * timr)1302 static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
1303 {
1304 	spin_unlock_irq(&timr->it_lock);
1305 	cpu_relax();
1306 	spin_lock_irq(&timr->it_lock);
1307 }
1308 
posix_cpu_timers_work_scheduled(struct task_struct * tsk)1309 static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
1310 {
1311 	return false;
1312 }
1313 
posix_cpu_timers_enable_work(struct task_struct * tsk,unsigned long start)1314 static inline bool posix_cpu_timers_enable_work(struct task_struct *tsk,
1315 						unsigned long start)
1316 {
1317 	return true;
1318 }
1319 #endif /* CONFIG_POSIX_CPU_TIMERS_TASK_WORK */
1320 
handle_posix_cpu_timers(struct task_struct * tsk)1321 static void handle_posix_cpu_timers(struct task_struct *tsk)
1322 {
1323 	struct k_itimer *timer, *next;
1324 	unsigned long flags, start;
1325 	LIST_HEAD(firing);
1326 
1327 	if (!lock_task_sighand(tsk, &flags))
1328 		return;
1329 
1330 	do {
1331 		/*
1332 		 * On RT locking sighand lock does not disable interrupts,
1333 		 * so this needs to be careful vs. ticks. Store the current
1334 		 * jiffies value.
1335 		 */
1336 		start = READ_ONCE(jiffies);
1337 		barrier();
1338 
1339 		/*
1340 		 * Here we take off tsk->signal->cpu_timers[N] and
1341 		 * tsk->cpu_timers[N] all the timers that are firing, and
1342 		 * put them on the firing list.
1343 		 */
1344 		check_thread_timers(tsk, &firing);
1345 
1346 		check_process_timers(tsk, &firing);
1347 
1348 		/*
1349 		 * The above timer checks have updated the expiry cache and
1350 		 * because nothing can have queued or modified timers after
1351 		 * sighand lock was taken above it is guaranteed to be
1352 		 * consistent. So the next timer interrupt fastpath check
1353 		 * will find valid data.
1354 		 *
1355 		 * If timer expiry runs in the timer interrupt context then
1356 		 * the loop is not relevant as timers will be directly
1357 		 * expired in interrupt context. The stub function below
1358 		 * returns always true which allows the compiler to
1359 		 * optimize the loop out.
1360 		 *
1361 		 * If timer expiry is deferred to task work context then
1362 		 * the following rules apply:
1363 		 *
1364 		 * - On !RT kernels no tick can have happened on this CPU
1365 		 *   after sighand lock was acquired because interrupts are
1366 		 *   disabled. So reenabling task work before dropping
1367 		 *   sighand lock and reenabling interrupts is race free.
1368 		 *
1369 		 * - On RT kernels ticks might have happened but the tick
1370 		 *   work ignored posix CPU timer handling because the
1371 		 *   CPUTIMERS_WORK_SCHEDULED bit is set. Reenabling work
1372 		 *   must be done very carefully including a check whether
1373 		 *   ticks have happened since the start of the timer
1374 		 *   expiry checks. posix_cpu_timers_enable_work() takes
1375 		 *   care of that and eventually lets the expiry checks
1376 		 *   run again.
1377 		 */
1378 	} while (!posix_cpu_timers_enable_work(tsk, start));
1379 
1380 	/*
1381 	 * We must release sighand lock before taking any timer's lock.
1382 	 * There is a potential race with timer deletion here, as the
1383 	 * siglock now protects our private firing list.  We have set
1384 	 * the firing flag in each timer, so that a deletion attempt
1385 	 * that gets the timer lock before we do will give it up and
1386 	 * spin until we've taken care of that timer below.
1387 	 */
1388 	unlock_task_sighand(tsk, &flags);
1389 
1390 	/*
1391 	 * Now that all the timers on our list have the firing flag,
1392 	 * no one will touch their list entries but us.  We'll take
1393 	 * each timer's lock before clearing its firing flag, so no
1394 	 * timer call will interfere.
1395 	 */
1396 	list_for_each_entry_safe(timer, next, &firing, it.cpu.elist) {
1397 		int cpu_firing;
1398 
1399 		/*
1400 		 * spin_lock() is sufficient here even independent of the
1401 		 * expiry context. If expiry happens in hard interrupt
1402 		 * context it's obvious. For task work context it's safe
1403 		 * because all other operations on timer::it_lock happen in
1404 		 * task context (syscall or exit).
1405 		 */
1406 		spin_lock(&timer->it_lock);
1407 		list_del_init(&timer->it.cpu.elist);
1408 		cpu_firing = timer->it.cpu.firing;
1409 		timer->it.cpu.firing = 0;
1410 		/*
1411 		 * The firing flag is -1 if we collided with a reset
1412 		 * of the timer, which already reported this
1413 		 * almost-firing as an overrun.  So don't generate an event.
1414 		 */
1415 		if (likely(cpu_firing >= 0))
1416 			cpu_timer_fire(timer);
1417 		/* See posix_cpu_timer_wait_running() */
1418 		rcu_assign_pointer(timer->it.cpu.handling, NULL);
1419 		spin_unlock(&timer->it_lock);
1420 	}
1421 }
1422 
1423 /*
1424  * This is called from the timer interrupt handler.  The irq handler has
1425  * already updated our counts.  We need to check if any timers fire now.
1426  * Interrupts are disabled.
1427  */
run_posix_cpu_timers(void)1428 void run_posix_cpu_timers(void)
1429 {
1430 	struct task_struct *tsk = current;
1431 
1432 	lockdep_assert_irqs_disabled();
1433 
1434 	/*
1435 	 * If the actual expiry is deferred to task work context and the
1436 	 * work is already scheduled there is no point to do anything here.
1437 	 */
1438 	if (posix_cpu_timers_work_scheduled(tsk))
1439 		return;
1440 
1441 	/*
1442 	 * The fast path checks that there are no expired thread or thread
1443 	 * group timers.  If that's so, just return.
1444 	 */
1445 	if (!fastpath_timer_check(tsk))
1446 		return;
1447 
1448 	__run_posix_cpu_timers(tsk);
1449 }
1450 
1451 /*
1452  * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1453  * The tsk->sighand->siglock must be held by the caller.
1454  */
set_process_cpu_timer(struct task_struct * tsk,unsigned int clkid,u64 * newval,u64 * oldval)1455 void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
1456 			   u64 *newval, u64 *oldval)
1457 {
1458 	u64 now, *nextevt;
1459 
1460 	if (WARN_ON_ONCE(clkid >= CPUCLOCK_SCHED))
1461 		return;
1462 
1463 	nextevt = &tsk->signal->posix_cputimers.bases[clkid].nextevt;
1464 	now = cpu_clock_sample_group(clkid, tsk, true);
1465 
1466 	if (oldval) {
1467 		/*
1468 		 * We are setting itimer. The *oldval is absolute and we update
1469 		 * it to be relative, *newval argument is relative and we update
1470 		 * it to be absolute.
1471 		 */
1472 		if (*oldval) {
1473 			if (*oldval <= now) {
1474 				/* Just about to fire. */
1475 				*oldval = TICK_NSEC;
1476 			} else {
1477 				*oldval -= now;
1478 			}
1479 		}
1480 
1481 		if (*newval)
1482 			*newval += now;
1483 	}
1484 
1485 	/*
1486 	 * Update expiration cache if this is the earliest timer. CPUCLOCK_PROF
1487 	 * expiry cache is also used by RLIMIT_CPU!.
1488 	 */
1489 	if (*newval < *nextevt)
1490 		*nextevt = *newval;
1491 
1492 	tick_dep_set_signal(tsk, TICK_DEP_BIT_POSIX_TIMER);
1493 }
1494 
do_cpu_nanosleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1495 static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1496 			    const struct timespec64 *rqtp)
1497 {
1498 	struct itimerspec64 it;
1499 	struct k_itimer timer;
1500 	u64 expires;
1501 	int error;
1502 
1503 	/*
1504 	 * Set up a temporary timer and then wait for it to go off.
1505 	 */
1506 	memset(&timer, 0, sizeof timer);
1507 	spin_lock_init(&timer.it_lock);
1508 	timer.it_clock = which_clock;
1509 	timer.it_overrun = -1;
1510 	error = posix_cpu_timer_create(&timer);
1511 	timer.it_process = current;
1512 
1513 	if (!error) {
1514 		static struct itimerspec64 zero_it;
1515 		struct restart_block *restart;
1516 
1517 		memset(&it, 0, sizeof(it));
1518 		it.it_value = *rqtp;
1519 
1520 		spin_lock_irq(&timer.it_lock);
1521 		error = posix_cpu_timer_set(&timer, flags, &it, NULL);
1522 		if (error) {
1523 			spin_unlock_irq(&timer.it_lock);
1524 			return error;
1525 		}
1526 
1527 		while (!signal_pending(current)) {
1528 			if (!cpu_timer_getexpires(&timer.it.cpu)) {
1529 				/*
1530 				 * Our timer fired and was reset, below
1531 				 * deletion can not fail.
1532 				 */
1533 				posix_cpu_timer_del(&timer);
1534 				spin_unlock_irq(&timer.it_lock);
1535 				return 0;
1536 			}
1537 
1538 			/*
1539 			 * Block until cpu_timer_fire (or a signal) wakes us.
1540 			 */
1541 			__set_current_state(TASK_INTERRUPTIBLE);
1542 			spin_unlock_irq(&timer.it_lock);
1543 			schedule();
1544 			spin_lock_irq(&timer.it_lock);
1545 		}
1546 
1547 		/*
1548 		 * We were interrupted by a signal.
1549 		 */
1550 		expires = cpu_timer_getexpires(&timer.it.cpu);
1551 		error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
1552 		if (!error) {
1553 			/* Timer is now unarmed, deletion can not fail. */
1554 			posix_cpu_timer_del(&timer);
1555 		} else {
1556 			while (error == TIMER_RETRY) {
1557 				posix_cpu_timer_wait_running_nsleep(&timer);
1558 				error = posix_cpu_timer_del(&timer);
1559 			}
1560 		}
1561 
1562 		spin_unlock_irq(&timer.it_lock);
1563 
1564 		if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
1565 			/*
1566 			 * It actually did fire already.
1567 			 */
1568 			return 0;
1569 		}
1570 
1571 		error = -ERESTART_RESTARTBLOCK;
1572 		/*
1573 		 * Report back to the user the time still remaining.
1574 		 */
1575 		restart = &current->restart_block;
1576 		restart->nanosleep.expires = expires;
1577 		if (restart->nanosleep.type != TT_NONE)
1578 			error = nanosleep_copyout(restart, &it.it_value);
1579 	}
1580 
1581 	return error;
1582 }
1583 
1584 static long posix_cpu_nsleep_restart(struct restart_block *restart_block);
1585 
posix_cpu_nsleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1586 static int posix_cpu_nsleep(const clockid_t which_clock, int flags,
1587 			    const struct timespec64 *rqtp)
1588 {
1589 	struct restart_block *restart_block = &current->restart_block;
1590 	int error;
1591 
1592 	/*
1593 	 * Diagnose required errors first.
1594 	 */
1595 	if (CPUCLOCK_PERTHREAD(which_clock) &&
1596 	    (CPUCLOCK_PID(which_clock) == 0 ||
1597 	     CPUCLOCK_PID(which_clock) == task_pid_vnr(current)))
1598 		return -EINVAL;
1599 
1600 	error = do_cpu_nanosleep(which_clock, flags, rqtp);
1601 
1602 	if (error == -ERESTART_RESTARTBLOCK) {
1603 
1604 		if (flags & TIMER_ABSTIME)
1605 			return -ERESTARTNOHAND;
1606 
1607 		restart_block->nanosleep.clockid = which_clock;
1608 		set_restart_fn(restart_block, posix_cpu_nsleep_restart);
1609 	}
1610 	return error;
1611 }
1612 
posix_cpu_nsleep_restart(struct restart_block * restart_block)1613 static long posix_cpu_nsleep_restart(struct restart_block *restart_block)
1614 {
1615 	clockid_t which_clock = restart_block->nanosleep.clockid;
1616 	struct timespec64 t;
1617 
1618 	t = ns_to_timespec64(restart_block->nanosleep.expires);
1619 
1620 	return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t);
1621 }
1622 
1623 #define PROCESS_CLOCK	make_process_cpuclock(0, CPUCLOCK_SCHED)
1624 #define THREAD_CLOCK	make_thread_cpuclock(0, CPUCLOCK_SCHED)
1625 
process_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)1626 static int process_cpu_clock_getres(const clockid_t which_clock,
1627 				    struct timespec64 *tp)
1628 {
1629 	return posix_cpu_clock_getres(PROCESS_CLOCK, tp);
1630 }
process_cpu_clock_get(const clockid_t which_clock,struct timespec64 * tp)1631 static int process_cpu_clock_get(const clockid_t which_clock,
1632 				 struct timespec64 *tp)
1633 {
1634 	return posix_cpu_clock_get(PROCESS_CLOCK, tp);
1635 }
process_cpu_timer_create(struct k_itimer * timer)1636 static int process_cpu_timer_create(struct k_itimer *timer)
1637 {
1638 	timer->it_clock = PROCESS_CLOCK;
1639 	return posix_cpu_timer_create(timer);
1640 }
process_cpu_nsleep(const clockid_t which_clock,int flags,const struct timespec64 * rqtp)1641 static int process_cpu_nsleep(const clockid_t which_clock, int flags,
1642 			      const struct timespec64 *rqtp)
1643 {
1644 	return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp);
1645 }
thread_cpu_clock_getres(const clockid_t which_clock,struct timespec64 * tp)1646 static int thread_cpu_clock_getres(const clockid_t which_clock,
1647 				   struct timespec64 *tp)
1648 {
1649 	return posix_cpu_clock_getres(THREAD_CLOCK, tp);
1650 }
thread_cpu_clock_get(const clockid_t which_clock,struct timespec64 * tp)1651 static int thread_cpu_clock_get(const clockid_t which_clock,
1652 				struct timespec64 *tp)
1653 {
1654 	return posix_cpu_clock_get(THREAD_CLOCK, tp);
1655 }
thread_cpu_timer_create(struct k_itimer * timer)1656 static int thread_cpu_timer_create(struct k_itimer *timer)
1657 {
1658 	timer->it_clock = THREAD_CLOCK;
1659 	return posix_cpu_timer_create(timer);
1660 }
1661 
1662 const struct k_clock clock_posix_cpu = {
1663 	.clock_getres		= posix_cpu_clock_getres,
1664 	.clock_set		= posix_cpu_clock_set,
1665 	.clock_get_timespec	= posix_cpu_clock_get,
1666 	.timer_create		= posix_cpu_timer_create,
1667 	.nsleep			= posix_cpu_nsleep,
1668 	.timer_set		= posix_cpu_timer_set,
1669 	.timer_del		= posix_cpu_timer_del,
1670 	.timer_get		= posix_cpu_timer_get,
1671 	.timer_rearm		= posix_cpu_timer_rearm,
1672 	.timer_wait_running	= posix_cpu_timer_wait_running,
1673 };
1674 
1675 const struct k_clock clock_process = {
1676 	.clock_getres		= process_cpu_clock_getres,
1677 	.clock_get_timespec	= process_cpu_clock_get,
1678 	.timer_create		= process_cpu_timer_create,
1679 	.nsleep			= process_cpu_nsleep,
1680 };
1681 
1682 const struct k_clock clock_thread = {
1683 	.clock_getres		= thread_cpu_clock_getres,
1684 	.clock_get_timespec	= thread_cpu_clock_get,
1685 	.timer_create		= thread_cpu_timer_create,
1686 };
1687