• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4 
5 /*
6  * Define 'struct task_struct' and provide the main scheduler
7  * APIs (schedule(), wakeup variants, etc.)
8  */
9 
10 #include <uapi/linux/sched.h>
11 
12 #include <asm/current.h>
13 
14 #include <linux/pid.h>
15 #include <linux/sem.h>
16 #include <linux/shm.h>
17 #include <linux/kmsan_types.h>
18 #include <linux/mutex.h>
19 #include <linux/plist.h>
20 #include <linux/hrtimer.h>
21 #include <linux/irqflags.h>
22 #include <linux/seccomp.h>
23 #include <linux/nodemask.h>
24 #include <linux/rcupdate.h>
25 #include <linux/refcount.h>
26 #include <linux/resource.h>
27 #include <linux/latencytop.h>
28 #include <linux/sched/prio.h>
29 #include <linux/sched/types.h>
30 #include <linux/signal_types.h>
31 #include <linux/syscall_user_dispatch.h>
32 #include <linux/mm_types_task.h>
33 #include <linux/task_io_accounting.h>
34 #include <linux/posix-timers.h>
35 #include <linux/rseq.h>
36 #include <linux/seqlock.h>
37 #include <linux/kcsan.h>
38 #include <linux/rv.h>
39 #include <linux/livepatch_sched.h>
40 #include <asm/kmap_size.h>
41 
42 /* task_struct member predeclarations (sorted alphabetically): */
43 struct audit_context;
44 struct bio_list;
45 struct blk_plug;
46 struct bpf_local_storage;
47 struct bpf_run_ctx;
48 struct capture_control;
49 struct cfs_rq;
50 struct fs_struct;
51 struct futex_pi_state;
52 struct io_context;
53 struct io_uring_task;
54 struct mempolicy;
55 struct nameidata;
56 struct nsproxy;
57 struct perf_event_context;
58 struct pid_namespace;
59 struct pipe_inode_info;
60 struct rcu_node;
61 struct reclaim_state;
62 struct robust_list_head;
63 struct root_domain;
64 struct rq;
65 struct sched_attr;
66 struct sched_param;
67 struct seq_file;
68 struct sighand_struct;
69 struct signal_struct;
70 struct task_delay_info;
71 struct task_group;
72 struct user_event_mm;
73 
74 /*
75  * Task state bitmask. NOTE! These bits are also
76  * encoded in fs/proc/array.c: get_task_state().
77  *
78  * We have two separate sets of flags: task->__state
79  * is about runnability, while task->exit_state are
80  * about the task exiting. Confusing, but this way
81  * modifying one set can't modify the other one by
82  * mistake.
83  */
84 
85 /* Used in tsk->__state: */
86 #define TASK_RUNNING			0x00000000
87 #define TASK_INTERRUPTIBLE		0x00000001
88 #define TASK_UNINTERRUPTIBLE		0x00000002
89 #define __TASK_STOPPED			0x00000004
90 #define __TASK_TRACED			0x00000008
91 /* Used in tsk->exit_state: */
92 #define EXIT_DEAD			0x00000010
93 #define EXIT_ZOMBIE			0x00000020
94 #define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
95 /* Used in tsk->__state again: */
96 #define TASK_PARKED			0x00000040
97 #define TASK_DEAD			0x00000080
98 #define TASK_WAKEKILL			0x00000100
99 #define TASK_WAKING			0x00000200
100 #define TASK_NOLOAD			0x00000400
101 #define TASK_NEW			0x00000800
102 #define TASK_RTLOCK_WAIT		0x00001000
103 #define TASK_FREEZABLE			0x00002000
104 #define __TASK_FREEZABLE_UNSAFE	       (0x00004000 * IS_ENABLED(CONFIG_LOCKDEP))
105 #define TASK_FROZEN			0x00008000
106 #define TASK_STATE_MAX			0x00010000
107 
108 #define TASK_ANY			(TASK_STATE_MAX-1)
109 
110 /*
111  * DO NOT ADD ANY NEW USERS !
112  */
113 #define TASK_FREEZABLE_UNSAFE		(TASK_FREEZABLE | __TASK_FREEZABLE_UNSAFE)
114 
115 /* Convenience macros for the sake of set_current_state: */
116 #define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
117 #define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
118 #define TASK_TRACED			__TASK_TRACED
119 
120 #define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
121 
122 /* Convenience macros for the sake of wake_up(): */
123 #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
124 
125 /* get_task_state(): */
126 #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
127 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
128 					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
129 					 TASK_PARKED)
130 
131 #define task_is_running(task)		(READ_ONCE((task)->__state) == TASK_RUNNING)
132 
133 #define task_is_traced(task)		((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
134 #define task_is_stopped(task)		((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
135 #define task_is_stopped_or_traced(task)	((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0)
136 
137 /*
138  * Special states are those that do not use the normal wait-loop pattern. See
139  * the comment with set_special_state().
140  */
141 #define is_special_task_state(state)				\
142 	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
143 
144 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
145 # define debug_normal_state_change(state_value)				\
146 	do {								\
147 		WARN_ON_ONCE(is_special_task_state(state_value));	\
148 		current->task_state_change = _THIS_IP_;			\
149 	} while (0)
150 
151 # define debug_special_state_change(state_value)			\
152 	do {								\
153 		WARN_ON_ONCE(!is_special_task_state(state_value));	\
154 		current->task_state_change = _THIS_IP_;			\
155 	} while (0)
156 
157 # define debug_rtlock_wait_set_state()					\
158 	do {								 \
159 		current->saved_state_change = current->task_state_change;\
160 		current->task_state_change = _THIS_IP_;			 \
161 	} while (0)
162 
163 # define debug_rtlock_wait_restore_state()				\
164 	do {								 \
165 		current->task_state_change = current->saved_state_change;\
166 	} while (0)
167 
168 #else
169 # define debug_normal_state_change(cond)	do { } while (0)
170 # define debug_special_state_change(cond)	do { } while (0)
171 # define debug_rtlock_wait_set_state()		do { } while (0)
172 # define debug_rtlock_wait_restore_state()	do { } while (0)
173 #endif
174 
175 /*
176  * set_current_state() includes a barrier so that the write of current->__state
177  * is correctly serialised wrt the caller's subsequent test of whether to
178  * actually sleep:
179  *
180  *   for (;;) {
181  *	set_current_state(TASK_UNINTERRUPTIBLE);
182  *	if (CONDITION)
183  *	   break;
184  *
185  *	schedule();
186  *   }
187  *   __set_current_state(TASK_RUNNING);
188  *
189  * If the caller does not need such serialisation (because, for instance, the
190  * CONDITION test and condition change and wakeup are under the same lock) then
191  * use __set_current_state().
192  *
193  * The above is typically ordered against the wakeup, which does:
194  *
195  *   CONDITION = 1;
196  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
197  *
198  * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
199  * accessing p->__state.
200  *
201  * Wakeup will do: if (@state & p->__state) p->__state = TASK_RUNNING, that is,
202  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
203  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
204  *
205  * However, with slightly different timing the wakeup TASK_RUNNING store can
206  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
207  * a problem either because that will result in one extra go around the loop
208  * and our @cond test will save the day.
209  *
210  * Also see the comments of try_to_wake_up().
211  */
212 #define __set_current_state(state_value)				\
213 	do {								\
214 		debug_normal_state_change((state_value));		\
215 		WRITE_ONCE(current->__state, (state_value));		\
216 	} while (0)
217 
218 #define set_current_state(state_value)					\
219 	do {								\
220 		debug_normal_state_change((state_value));		\
221 		smp_store_mb(current->__state, (state_value));		\
222 	} while (0)
223 
224 /*
225  * set_special_state() should be used for those states when the blocking task
226  * can not use the regular condition based wait-loop. In that case we must
227  * serialize against wakeups such that any possible in-flight TASK_RUNNING
228  * stores will not collide with our state change.
229  */
230 #define set_special_state(state_value)					\
231 	do {								\
232 		unsigned long flags; /* may shadow */			\
233 									\
234 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
235 		debug_special_state_change((state_value));		\
236 		WRITE_ONCE(current->__state, (state_value));		\
237 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
238 	} while (0)
239 
240 /*
241  * PREEMPT_RT specific variants for "sleeping" spin/rwlocks
242  *
243  * RT's spin/rwlock substitutions are state preserving. The state of the
244  * task when blocking on the lock is saved in task_struct::saved_state and
245  * restored after the lock has been acquired.  These operations are
246  * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT
247  * lock related wakeups while the task is blocked on the lock are
248  * redirected to operate on task_struct::saved_state to ensure that these
249  * are not dropped. On restore task_struct::saved_state is set to
250  * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail.
251  *
252  * The lock operation looks like this:
253  *
254  *	current_save_and_set_rtlock_wait_state();
255  *	for (;;) {
256  *		if (try_lock())
257  *			break;
258  *		raw_spin_unlock_irq(&lock->wait_lock);
259  *		schedule_rtlock();
260  *		raw_spin_lock_irq(&lock->wait_lock);
261  *		set_current_state(TASK_RTLOCK_WAIT);
262  *	}
263  *	current_restore_rtlock_saved_state();
264  */
265 #define current_save_and_set_rtlock_wait_state()			\
266 	do {								\
267 		lockdep_assert_irqs_disabled();				\
268 		raw_spin_lock(&current->pi_lock);			\
269 		current->saved_state = current->__state;		\
270 		debug_rtlock_wait_set_state();				\
271 		WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT);		\
272 		raw_spin_unlock(&current->pi_lock);			\
273 	} while (0);
274 
275 #define current_restore_rtlock_saved_state()				\
276 	do {								\
277 		lockdep_assert_irqs_disabled();				\
278 		raw_spin_lock(&current->pi_lock);			\
279 		debug_rtlock_wait_restore_state();			\
280 		WRITE_ONCE(current->__state, current->saved_state);	\
281 		current->saved_state = TASK_RUNNING;			\
282 		raw_spin_unlock(&current->pi_lock);			\
283 	} while (0);
284 
285 #define get_current_state()	READ_ONCE(current->__state)
286 
287 /*
288  * Define the task command name length as enum, then it can be visible to
289  * BPF programs.
290  */
291 enum {
292 	TASK_COMM_LEN = 16,
293 };
294 
295 extern void scheduler_tick(void);
296 
297 #define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
298 
299 extern long schedule_timeout(long timeout);
300 extern long schedule_timeout_interruptible(long timeout);
301 extern long schedule_timeout_killable(long timeout);
302 extern long schedule_timeout_uninterruptible(long timeout);
303 extern long schedule_timeout_idle(long timeout);
304 asmlinkage void schedule(void);
305 extern void schedule_preempt_disabled(void);
306 asmlinkage void preempt_schedule_irq(void);
307 #ifdef CONFIG_PREEMPT_RT
308  extern void schedule_rtlock(void);
309 #endif
310 
311 extern int __must_check io_schedule_prepare(void);
312 extern void io_schedule_finish(int token);
313 extern long io_schedule_timeout(long timeout);
314 extern void io_schedule(void);
315 
316 /**
317  * struct prev_cputime - snapshot of system and user cputime
318  * @utime: time spent in user mode
319  * @stime: time spent in system mode
320  * @lock: protects the above two fields
321  *
322  * Stores previous user/system time values such that we can guarantee
323  * monotonicity.
324  */
325 struct prev_cputime {
326 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
327 	u64				utime;
328 	u64				stime;
329 	raw_spinlock_t			lock;
330 #endif
331 };
332 
333 enum vtime_state {
334 	/* Task is sleeping or running in a CPU with VTIME inactive: */
335 	VTIME_INACTIVE = 0,
336 	/* Task is idle */
337 	VTIME_IDLE,
338 	/* Task runs in kernelspace in a CPU with VTIME active: */
339 	VTIME_SYS,
340 	/* Task runs in userspace in a CPU with VTIME active: */
341 	VTIME_USER,
342 	/* Task runs as guests in a CPU with VTIME active: */
343 	VTIME_GUEST,
344 };
345 
346 struct vtime {
347 	seqcount_t		seqcount;
348 	unsigned long long	starttime;
349 	enum vtime_state	state;
350 	unsigned int		cpu;
351 	u64			utime;
352 	u64			stime;
353 	u64			gtime;
354 };
355 
356 /*
357  * Utilization clamp constraints.
358  * @UCLAMP_MIN:	Minimum utilization
359  * @UCLAMP_MAX:	Maximum utilization
360  * @UCLAMP_CNT:	Utilization clamp constraints count
361  */
362 enum uclamp_id {
363 	UCLAMP_MIN = 0,
364 	UCLAMP_MAX,
365 	UCLAMP_CNT
366 };
367 
368 #ifdef CONFIG_SMP
369 extern struct root_domain def_root_domain;
370 extern struct mutex sched_domains_mutex;
371 #endif
372 
373 struct sched_info {
374 #ifdef CONFIG_SCHED_INFO
375 	/* Cumulative counters: */
376 
377 	/* # of times we have run on this CPU: */
378 	unsigned long			pcount;
379 
380 	/* Time spent waiting on a runqueue: */
381 	unsigned long long		run_delay;
382 
383 	/* Timestamps: */
384 
385 	/* When did we last run on a CPU? */
386 	unsigned long long		last_arrival;
387 
388 	/* When were we last queued to run? */
389 	unsigned long long		last_queued;
390 
391 #endif /* CONFIG_SCHED_INFO */
392 };
393 
394 /*
395  * Integer metrics need fixed point arithmetic, e.g., sched/fair
396  * has a few: load, load_avg, util_avg, freq, and capacity.
397  *
398  * We define a basic fixed point arithmetic range, and then formalize
399  * all these metrics based on that basic range.
400  */
401 # define SCHED_FIXEDPOINT_SHIFT		10
402 # define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
403 
404 /* Increase resolution of cpu_capacity calculations */
405 # define SCHED_CAPACITY_SHIFT		SCHED_FIXEDPOINT_SHIFT
406 # define SCHED_CAPACITY_SCALE		(1L << SCHED_CAPACITY_SHIFT)
407 
408 struct load_weight {
409 	unsigned long			weight;
410 	u32				inv_weight;
411 };
412 
413 /**
414  * struct util_est - Estimation utilization of FAIR tasks
415  * @enqueued: instantaneous estimated utilization of a task/cpu
416  * @ewma:     the Exponential Weighted Moving Average (EWMA)
417  *            utilization of a task
418  *
419  * Support data structure to track an Exponential Weighted Moving Average
420  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
421  * average each time a task completes an activation. Sample's weight is chosen
422  * so that the EWMA will be relatively insensitive to transient changes to the
423  * task's workload.
424  *
425  * The enqueued attribute has a slightly different meaning for tasks and cpus:
426  * - task:   the task's util_avg at last task dequeue time
427  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
428  * Thus, the util_est.enqueued of a task represents the contribution on the
429  * estimated utilization of the CPU where that task is currently enqueued.
430  *
431  * Only for tasks we track a moving average of the past instantaneous
432  * estimated utilization. This allows to absorb sporadic drops in utilization
433  * of an otherwise almost periodic task.
434  *
435  * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
436  * updates. When a task is dequeued, its util_est should not be updated if its
437  * util_avg has not been updated in the meantime.
438  * This information is mapped into the MSB bit of util_est.enqueued at dequeue
439  * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
440  * for a task) it is safe to use MSB.
441  */
442 struct util_est {
443 	unsigned int			enqueued;
444 	unsigned int			ewma;
445 #define UTIL_EST_WEIGHT_SHIFT		2
446 #define UTIL_AVG_UNCHANGED		0x80000000
447 } __attribute__((__aligned__(sizeof(u64))));
448 
449 /*
450  * The load/runnable/util_avg accumulates an infinite geometric series
451  * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
452  *
453  * [load_avg definition]
454  *
455  *   load_avg = runnable% * scale_load_down(load)
456  *
457  * [runnable_avg definition]
458  *
459  *   runnable_avg = runnable% * SCHED_CAPACITY_SCALE
460  *
461  * [util_avg definition]
462  *
463  *   util_avg = running% * SCHED_CAPACITY_SCALE
464  *
465  * where runnable% is the time ratio that a sched_entity is runnable and
466  * running% the time ratio that a sched_entity is running.
467  *
468  * For cfs_rq, they are the aggregated values of all runnable and blocked
469  * sched_entities.
470  *
471  * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
472  * capacity scaling. The scaling is done through the rq_clock_pelt that is used
473  * for computing those signals (see update_rq_clock_pelt())
474  *
475  * N.B., the above ratios (runnable% and running%) themselves are in the
476  * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
477  * to as large a range as necessary. This is for example reflected by
478  * util_avg's SCHED_CAPACITY_SCALE.
479  *
480  * [Overflow issue]
481  *
482  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
483  * with the highest load (=88761), always runnable on a single cfs_rq,
484  * and should not overflow as the number already hits PID_MAX_LIMIT.
485  *
486  * For all other cases (including 32-bit kernels), struct load_weight's
487  * weight will overflow first before we do, because:
488  *
489  *    Max(load_avg) <= Max(load.weight)
490  *
491  * Then it is the load_weight's responsibility to consider overflow
492  * issues.
493  */
494 struct sched_avg {
495 	u64				last_update_time;
496 	u64				load_sum;
497 	u64				runnable_sum;
498 	u32				util_sum;
499 	u32				period_contrib;
500 	unsigned long			load_avg;
501 	unsigned long			runnable_avg;
502 	unsigned long			util_avg;
503 	struct util_est			util_est;
504 } ____cacheline_aligned;
505 
506 struct sched_statistics {
507 #ifdef CONFIG_SCHEDSTATS
508 	u64				wait_start;
509 	u64				wait_max;
510 	u64				wait_count;
511 	u64				wait_sum;
512 	u64				iowait_count;
513 	u64				iowait_sum;
514 
515 	u64				sleep_start;
516 	u64				sleep_max;
517 	s64				sum_sleep_runtime;
518 
519 	u64				block_start;
520 	u64				block_max;
521 	s64				sum_block_runtime;
522 
523 	u64				exec_max;
524 	u64				slice_max;
525 
526 	u64				nr_migrations_cold;
527 	u64				nr_failed_migrations_affine;
528 	u64				nr_failed_migrations_running;
529 	u64				nr_failed_migrations_hot;
530 	u64				nr_forced_migrations;
531 
532 	u64				nr_wakeups;
533 	u64				nr_wakeups_sync;
534 	u64				nr_wakeups_migrate;
535 	u64				nr_wakeups_local;
536 	u64				nr_wakeups_remote;
537 	u64				nr_wakeups_affine;
538 	u64				nr_wakeups_affine_attempts;
539 	u64				nr_wakeups_passive;
540 	u64				nr_wakeups_idle;
541 
542 #ifdef CONFIG_SCHED_CORE
543 	u64				core_forceidle_sum;
544 #endif
545 #endif /* CONFIG_SCHEDSTATS */
546 } ____cacheline_aligned;
547 
548 struct sched_entity {
549 	/* For load-balancing: */
550 	struct load_weight		load;
551 	struct rb_node			run_node;
552 	u64				deadline;
553 	u64				min_deadline;
554 
555 	struct list_head		group_node;
556 	unsigned int			on_rq;
557 
558 	u64				exec_start;
559 	u64				sum_exec_runtime;
560 	u64				prev_sum_exec_runtime;
561 	u64				vruntime;
562 	s64				vlag;
563 	u64				slice;
564 
565 	u64				nr_migrations;
566 
567 #ifdef CONFIG_FAIR_GROUP_SCHED
568 	int				depth;
569 	struct sched_entity		*parent;
570 	/* rq on which this entity is (to be) queued: */
571 	struct cfs_rq			*cfs_rq;
572 	/* rq "owned" by this entity/group: */
573 	struct cfs_rq			*my_q;
574 	/* cached value of my_q->h_nr_running */
575 	unsigned long			runnable_weight;
576 #endif
577 
578 #ifdef CONFIG_SCHED_LATENCY_NICE
579 	int				latency_weight;
580 #endif
581 
582 #ifdef CONFIG_SMP
583 	/*
584 	 * Per entity load average tracking.
585 	 *
586 	 * Put into separate cache line so it does not
587 	 * collide with read-mostly values above.
588 	 */
589 	struct sched_avg		avg;
590 #endif
591 };
592 
593 struct sched_rt_entity {
594 	struct list_head		run_list;
595 	unsigned long			timeout;
596 	unsigned long			watchdog_stamp;
597 	unsigned int			time_slice;
598 	unsigned short			on_rq;
599 	unsigned short			on_list;
600 
601 	struct sched_rt_entity		*back;
602 #ifdef CONFIG_RT_GROUP_SCHED
603 	struct sched_rt_entity		*parent;
604 	/* rq on which this entity is (to be) queued: */
605 	struct rt_rq			*rt_rq;
606 	/* rq "owned" by this entity/group: */
607 	struct rt_rq			*my_q;
608 #endif
609 } __randomize_layout;
610 
611 struct sched_dl_entity {
612 	struct rb_node			rb_node;
613 
614 	/*
615 	 * Original scheduling parameters. Copied here from sched_attr
616 	 * during sched_setattr(), they will remain the same until
617 	 * the next sched_setattr().
618 	 */
619 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
620 	u64				dl_deadline;	/* Relative deadline of each instance	*/
621 	u64				dl_period;	/* Separation of two instances (period) */
622 	u64				dl_bw;		/* dl_runtime / dl_period		*/
623 	u64				dl_density;	/* dl_runtime / dl_deadline		*/
624 
625 	/*
626 	 * Actual scheduling parameters. Initialized with the values above,
627 	 * they are continuously updated during task execution. Note that
628 	 * the remaining runtime could be < 0 in case we are in overrun.
629 	 */
630 	s64				runtime;	/* Remaining runtime for this instance	*/
631 	u64				deadline;	/* Absolute deadline for this instance	*/
632 	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
633 
634 	/*
635 	 * Some bool flags:
636 	 *
637 	 * @dl_throttled tells if we exhausted the runtime. If so, the
638 	 * task has to wait for a replenishment to be performed at the
639 	 * next firing of dl_timer.
640 	 *
641 	 * @dl_yielded tells if task gave up the CPU before consuming
642 	 * all its available runtime during the last job.
643 	 *
644 	 * @dl_non_contending tells if the task is inactive while still
645 	 * contributing to the active utilization. In other words, it
646 	 * indicates if the inactive timer has been armed and its handler
647 	 * has not been executed yet. This flag is useful to avoid race
648 	 * conditions between the inactive timer handler and the wakeup
649 	 * code.
650 	 *
651 	 * @dl_overrun tells if the task asked to be informed about runtime
652 	 * overruns.
653 	 */
654 	unsigned int			dl_throttled      : 1;
655 	unsigned int			dl_yielded        : 1;
656 	unsigned int			dl_non_contending : 1;
657 	unsigned int			dl_overrun	  : 1;
658 
659 	/*
660 	 * Bandwidth enforcement timer. Each -deadline task has its
661 	 * own bandwidth to be enforced, thus we need one timer per task.
662 	 */
663 	struct hrtimer			dl_timer;
664 
665 	/*
666 	 * Inactive timer, responsible for decreasing the active utilization
667 	 * at the "0-lag time". When a -deadline task blocks, it contributes
668 	 * to GRUB's active utilization until the "0-lag time", hence a
669 	 * timer is needed to decrease the active utilization at the correct
670 	 * time.
671 	 */
672 	struct hrtimer inactive_timer;
673 
674 #ifdef CONFIG_RT_MUTEXES
675 	/*
676 	 * Priority Inheritance. When a DEADLINE scheduling entity is boosted
677 	 * pi_se points to the donor, otherwise points to the dl_se it belongs
678 	 * to (the original one/itself).
679 	 */
680 	struct sched_dl_entity *pi_se;
681 #endif
682 };
683 
684 #ifdef CONFIG_UCLAMP_TASK
685 /* Number of utilization clamp buckets (shorter alias) */
686 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
687 
688 /*
689  * Utilization clamp for a scheduling entity
690  * @value:		clamp value "assigned" to a se
691  * @bucket_id:		bucket index corresponding to the "assigned" value
692  * @active:		the se is currently refcounted in a rq's bucket
693  * @user_defined:	the requested clamp value comes from user-space
694  *
695  * The bucket_id is the index of the clamp bucket matching the clamp value
696  * which is pre-computed and stored to avoid expensive integer divisions from
697  * the fast path.
698  *
699  * The active bit is set whenever a task has got an "effective" value assigned,
700  * which can be different from the clamp value "requested" from user-space.
701  * This allows to know a task is refcounted in the rq's bucket corresponding
702  * to the "effective" bucket_id.
703  *
704  * The user_defined bit is set whenever a task has got a task-specific clamp
705  * value requested from userspace, i.e. the system defaults apply to this task
706  * just as a restriction. This allows to relax default clamps when a less
707  * restrictive task-specific value has been requested, thus allowing to
708  * implement a "nice" semantic. For example, a task running with a 20%
709  * default boost can still drop its own boosting to 0%.
710  */
711 struct uclamp_se {
712 	unsigned int value		: bits_per(SCHED_CAPACITY_SCALE);
713 	unsigned int bucket_id		: bits_per(UCLAMP_BUCKETS);
714 	unsigned int active		: 1;
715 	unsigned int user_defined	: 1;
716 };
717 #endif /* CONFIG_UCLAMP_TASK */
718 
719 union rcu_special {
720 	struct {
721 		u8			blocked;
722 		u8			need_qs;
723 		u8			exp_hint; /* Hint for performance. */
724 		u8			need_mb; /* Readers need smp_mb(). */
725 	} b; /* Bits. */
726 	u32 s; /* Set of bits. */
727 };
728 
729 enum perf_event_task_context {
730 	perf_invalid_context = -1,
731 	perf_hw_context = 0,
732 	perf_sw_context,
733 	perf_nr_task_contexts,
734 };
735 
736 struct wake_q_node {
737 	struct wake_q_node *next;
738 };
739 
740 struct kmap_ctrl {
741 #ifdef CONFIG_KMAP_LOCAL
742 	int				idx;
743 	pte_t				pteval[KM_MAX_IDX];
744 #endif
745 };
746 
747 struct task_struct {
748 #ifdef CONFIG_THREAD_INFO_IN_TASK
749 	/*
750 	 * For reasons of header soup (see current_thread_info()), this
751 	 * must be the first element of task_struct.
752 	 */
753 	struct thread_info		thread_info;
754 #endif
755 	unsigned int			__state;
756 
757 #ifdef CONFIG_PREEMPT_RT
758 	/* saved state for "spinlock sleepers" */
759 	unsigned int			saved_state;
760 #endif
761 
762 	/*
763 	 * This begins the randomizable portion of task_struct. Only
764 	 * scheduling-critical items should be added above here.
765 	 */
766 	randomized_struct_fields_start
767 
768 	void				*stack;
769 	refcount_t			usage;
770 	/* Per task flags (PF_*), defined further below: */
771 	unsigned int			flags;
772 	unsigned int			ptrace;
773 
774 #ifdef CONFIG_SMP
775 	int				on_cpu;
776 	struct __call_single_node	wake_entry;
777 	unsigned int			wakee_flips;
778 	unsigned long			wakee_flip_decay_ts;
779 	struct task_struct		*last_wakee;
780 
781 	/*
782 	 * recent_used_cpu is initially set as the last CPU used by a task
783 	 * that wakes affine another task. Waker/wakee relationships can
784 	 * push tasks around a CPU where each wakeup moves to the next one.
785 	 * Tracking a recently used CPU allows a quick search for a recently
786 	 * used CPU that may be idle.
787 	 */
788 	int				recent_used_cpu;
789 	int				wake_cpu;
790 #endif
791 	int				on_rq;
792 
793 	int				prio;
794 	int				static_prio;
795 	int				normal_prio;
796 	unsigned int			rt_priority;
797 #ifdef CONFIG_SCHED_LATENCY_NICE
798 	int				latency_prio;
799 #endif
800 
801 	struct sched_entity		se;
802 	struct sched_rt_entity		rt;
803 	struct sched_dl_entity		dl;
804 	const struct sched_class	*sched_class;
805 
806 #ifdef CONFIG_SCHED_CORE
807 	struct rb_node			core_node;
808 	unsigned long			core_cookie;
809 	unsigned int			core_occupation;
810 #endif
811 
812 #ifdef CONFIG_CGROUP_SCHED
813 	struct task_group		*sched_task_group;
814 #endif
815 
816 #ifdef CONFIG_UCLAMP_TASK
817 	/*
818 	 * Clamp values requested for a scheduling entity.
819 	 * Must be updated with task_rq_lock() held.
820 	 */
821 	struct uclamp_se		uclamp_req[UCLAMP_CNT];
822 	/*
823 	 * Effective clamp values used for a scheduling entity.
824 	 * Must be updated with task_rq_lock() held.
825 	 */
826 	struct uclamp_se		uclamp[UCLAMP_CNT];
827 #endif
828 
829 	struct sched_statistics         stats;
830 
831 #ifdef CONFIG_PREEMPT_NOTIFIERS
832 	/* List of struct preempt_notifier: */
833 	struct hlist_head		preempt_notifiers;
834 #endif
835 
836 #ifdef CONFIG_BLK_DEV_IO_TRACE
837 	unsigned int			btrace_seq;
838 #endif
839 
840 	unsigned int			policy;
841 	int				nr_cpus_allowed;
842 	const cpumask_t			*cpus_ptr;
843 	cpumask_t			*user_cpus_ptr;
844 	cpumask_t			cpus_mask;
845 	void				*migration_pending;
846 #ifdef CONFIG_SMP
847 	unsigned short			migration_disabled;
848 #endif
849 	unsigned short			migration_flags;
850 
851 #ifdef CONFIG_PREEMPT_RCU
852 	int				rcu_read_lock_nesting;
853 	union rcu_special		rcu_read_unlock_special;
854 	struct list_head		rcu_node_entry;
855 	struct rcu_node			*rcu_blocked_node;
856 #endif /* #ifdef CONFIG_PREEMPT_RCU */
857 
858 #ifdef CONFIG_TASKS_RCU
859 	unsigned long			rcu_tasks_nvcsw;
860 	u8				rcu_tasks_holdout;
861 	u8				rcu_tasks_idx;
862 	int				rcu_tasks_idle_cpu;
863 	struct list_head		rcu_tasks_holdout_list;
864 #endif /* #ifdef CONFIG_TASKS_RCU */
865 
866 #ifdef CONFIG_TASKS_TRACE_RCU
867 	int				trc_reader_nesting;
868 	int				trc_ipi_to_cpu;
869 	union rcu_special		trc_reader_special;
870 	struct list_head		trc_holdout_list;
871 	struct list_head		trc_blkd_node;
872 	int				trc_blkd_cpu;
873 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
874 
875 	struct sched_info		sched_info;
876 
877 	struct list_head		tasks;
878 #ifdef CONFIG_SMP
879 	struct plist_node		pushable_tasks;
880 	struct rb_node			pushable_dl_tasks;
881 #endif
882 
883 	struct mm_struct		*mm;
884 	struct mm_struct		*active_mm;
885 
886 	int				exit_state;
887 	int				exit_code;
888 	int				exit_signal;
889 	/* The signal sent when the parent dies: */
890 	int				pdeath_signal;
891 	/* JOBCTL_*, siglock protected: */
892 	unsigned long			jobctl;
893 
894 	/* Used for emulating ABI behavior of previous Linux versions: */
895 	unsigned int			personality;
896 
897 	/* Scheduler bits, serialized by scheduler locks: */
898 	unsigned			sched_reset_on_fork:1;
899 	unsigned			sched_contributes_to_load:1;
900 	unsigned			sched_migrated:1;
901 
902 	/* Force alignment to the next boundary: */
903 	unsigned			:0;
904 
905 	/* Unserialized, strictly 'current' */
906 
907 	/*
908 	 * This field must not be in the scheduler word above due to wakelist
909 	 * queueing no longer being serialized by p->on_cpu. However:
910 	 *
911 	 * p->XXX = X;			ttwu()
912 	 * schedule()			  if (p->on_rq && ..) // false
913 	 *   smp_mb__after_spinlock();	  if (smp_load_acquire(&p->on_cpu) && //true
914 	 *   deactivate_task()		      ttwu_queue_wakelist())
915 	 *     p->on_rq = 0;			p->sched_remote_wakeup = Y;
916 	 *
917 	 * guarantees all stores of 'current' are visible before
918 	 * ->sched_remote_wakeup gets used, so it can be in this word.
919 	 */
920 	unsigned			sched_remote_wakeup:1;
921 
922 	/* Bit to tell LSMs we're in execve(): */
923 	unsigned			in_execve:1;
924 	unsigned			in_iowait:1;
925 #ifndef TIF_RESTORE_SIGMASK
926 	unsigned			restore_sigmask:1;
927 #endif
928 #ifdef CONFIG_MEMCG
929 	unsigned			in_user_fault:1;
930 #endif
931 #ifdef CONFIG_LRU_GEN
932 	/* whether the LRU algorithm may apply to this access */
933 	unsigned			in_lru_fault:1;
934 #endif
935 #ifdef CONFIG_COMPAT_BRK
936 	unsigned			brk_randomized:1;
937 #endif
938 #ifdef CONFIG_CGROUPS
939 	/* disallow userland-initiated cgroup migration */
940 	unsigned			no_cgroup_migration:1;
941 	/* task is frozen/stopped (used by the cgroup freezer) */
942 	unsigned			frozen:1;
943 #endif
944 #ifdef CONFIG_BLK_CGROUP
945 	unsigned			use_memdelay:1;
946 #endif
947 #ifdef CONFIG_PSI
948 	/* Stalled due to lack of memory */
949 	unsigned			in_memstall:1;
950 #endif
951 #ifdef CONFIG_PAGE_OWNER
952 	/* Used by page_owner=on to detect recursion in page tracking. */
953 	unsigned			in_page_owner:1;
954 #endif
955 #ifdef CONFIG_EVENTFD
956 	/* Recursion prevention for eventfd_signal() */
957 	unsigned			in_eventfd:1;
958 #endif
959 #ifdef CONFIG_IOMMU_SVA
960 	unsigned			pasid_activated:1;
961 #endif
962 #ifdef	CONFIG_CPU_SUP_INTEL
963 	unsigned			reported_split_lock:1;
964 #endif
965 #ifdef CONFIG_TASK_DELAY_ACCT
966 	/* delay due to memory thrashing */
967 	unsigned                        in_thrashing:1;
968 #endif
969 
970 	unsigned long			atomic_flags; /* Flags requiring atomic access. */
971 
972 	struct restart_block		restart_block;
973 
974 	pid_t				pid;
975 	pid_t				tgid;
976 
977 #ifdef CONFIG_STACKPROTECTOR
978 	/* Canary value for the -fstack-protector GCC feature: */
979 	unsigned long			stack_canary;
980 #endif
981 	/*
982 	 * Pointers to the (original) parent process, youngest child, younger sibling,
983 	 * older sibling, respectively.  (p->father can be replaced with
984 	 * p->real_parent->pid)
985 	 */
986 
987 	/* Real parent process: */
988 	struct task_struct __rcu	*real_parent;
989 
990 	/* Recipient of SIGCHLD, wait4() reports: */
991 	struct task_struct __rcu	*parent;
992 
993 	/*
994 	 * Children/sibling form the list of natural children:
995 	 */
996 	struct list_head		children;
997 	struct list_head		sibling;
998 	struct task_struct		*group_leader;
999 
1000 	/*
1001 	 * 'ptraced' is the list of tasks this task is using ptrace() on.
1002 	 *
1003 	 * This includes both natural children and PTRACE_ATTACH targets.
1004 	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1005 	 */
1006 	struct list_head		ptraced;
1007 	struct list_head		ptrace_entry;
1008 
1009 	/* PID/PID hash table linkage. */
1010 	struct pid			*thread_pid;
1011 	struct hlist_node		pid_links[PIDTYPE_MAX];
1012 	struct list_head		thread_group;
1013 	struct list_head		thread_node;
1014 
1015 	struct completion		*vfork_done;
1016 
1017 	/* CLONE_CHILD_SETTID: */
1018 	int __user			*set_child_tid;
1019 
1020 	/* CLONE_CHILD_CLEARTID: */
1021 	int __user			*clear_child_tid;
1022 
1023 	/* PF_KTHREAD | PF_IO_WORKER */
1024 	void				*worker_private;
1025 
1026 	u64				utime;
1027 	u64				stime;
1028 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1029 	u64				utimescaled;
1030 	u64				stimescaled;
1031 #endif
1032 	u64				gtime;
1033 	struct prev_cputime		prev_cputime;
1034 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1035 	struct vtime			vtime;
1036 #endif
1037 
1038 #ifdef CONFIG_NO_HZ_FULL
1039 	atomic_t			tick_dep_mask;
1040 #endif
1041 	/* Context switch counts: */
1042 	unsigned long			nvcsw;
1043 	unsigned long			nivcsw;
1044 
1045 	/* Monotonic time in nsecs: */
1046 	u64				start_time;
1047 
1048 	/* Boot based time in nsecs: */
1049 	u64				start_boottime;
1050 
1051 	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1052 	unsigned long			min_flt;
1053 	unsigned long			maj_flt;
1054 
1055 	/* Empty if CONFIG_POSIX_CPUTIMERS=n */
1056 	struct posix_cputimers		posix_cputimers;
1057 
1058 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1059 	struct posix_cputimers_work	posix_cputimers_work;
1060 #endif
1061 
1062 	/* Process credentials: */
1063 
1064 	/* Tracer's credentials at attach: */
1065 	const struct cred __rcu		*ptracer_cred;
1066 
1067 	/* Objective and real subjective task credentials (COW): */
1068 	const struct cred __rcu		*real_cred;
1069 
1070 	/* Effective (overridable) subjective task credentials (COW): */
1071 	const struct cred __rcu		*cred;
1072 
1073 #ifdef CONFIG_KEYS
1074 	/* Cached requested key. */
1075 	struct key			*cached_requested_key;
1076 #endif
1077 
1078 	/*
1079 	 * executable name, excluding path.
1080 	 *
1081 	 * - normally initialized setup_new_exec()
1082 	 * - access it with [gs]et_task_comm()
1083 	 * - lock it with task_lock()
1084 	 */
1085 	char				comm[TASK_COMM_LEN];
1086 
1087 	struct nameidata		*nameidata;
1088 
1089 #ifdef CONFIG_SYSVIPC
1090 	struct sysv_sem			sysvsem;
1091 	struct sysv_shm			sysvshm;
1092 #endif
1093 #ifdef CONFIG_DETECT_HUNG_TASK
1094 	unsigned long			last_switch_count;
1095 	unsigned long			last_switch_time;
1096 #endif
1097 	/* Filesystem information: */
1098 	struct fs_struct		*fs;
1099 
1100 	/* Open file information: */
1101 	struct files_struct		*files;
1102 
1103 #ifdef CONFIG_IO_URING
1104 	struct io_uring_task		*io_uring;
1105 #endif
1106 
1107 	/* Namespaces: */
1108 	struct nsproxy			*nsproxy;
1109 
1110 	/* Signal handlers: */
1111 	struct signal_struct		*signal;
1112 	struct sighand_struct __rcu		*sighand;
1113 	sigset_t			blocked;
1114 	sigset_t			real_blocked;
1115 	/* Restored if set_restore_sigmask() was used: */
1116 	sigset_t			saved_sigmask;
1117 	struct sigpending		pending;
1118 	unsigned long			sas_ss_sp;
1119 	size_t				sas_ss_size;
1120 	unsigned int			sas_ss_flags;
1121 
1122 	struct callback_head		*task_works;
1123 
1124 #ifdef CONFIG_AUDIT
1125 #ifdef CONFIG_AUDITSYSCALL
1126 	struct audit_context		*audit_context;
1127 #endif
1128 	kuid_t				loginuid;
1129 	unsigned int			sessionid;
1130 #endif
1131 	struct seccomp			seccomp;
1132 	struct syscall_user_dispatch	syscall_dispatch;
1133 
1134 	/* Thread group tracking: */
1135 	u64				parent_exec_id;
1136 	u64				self_exec_id;
1137 
1138 	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1139 	spinlock_t			alloc_lock;
1140 
1141 	/* Protection of the PI data structures: */
1142 	raw_spinlock_t			pi_lock;
1143 
1144 	struct wake_q_node		wake_q;
1145 
1146 #ifdef CONFIG_RT_MUTEXES
1147 	/* PI waiters blocked on a rt_mutex held by this task: */
1148 	struct rb_root_cached		pi_waiters;
1149 	/* Updated under owner's pi_lock and rq lock */
1150 	struct task_struct		*pi_top_task;
1151 	/* Deadlock detection and priority inheritance handling: */
1152 	struct rt_mutex_waiter		*pi_blocked_on;
1153 #endif
1154 
1155 #ifdef CONFIG_DEBUG_MUTEXES
1156 	/* Mutex deadlock detection: */
1157 	struct mutex_waiter		*blocked_on;
1158 #endif
1159 
1160 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1161 	int				non_block_count;
1162 #endif
1163 
1164 #ifdef CONFIG_TRACE_IRQFLAGS
1165 	struct irqtrace_events		irqtrace;
1166 	unsigned int			hardirq_threaded;
1167 	u64				hardirq_chain_key;
1168 	int				softirqs_enabled;
1169 	int				softirq_context;
1170 	int				irq_config;
1171 #endif
1172 #ifdef CONFIG_PREEMPT_RT
1173 	int				softirq_disable_cnt;
1174 #endif
1175 
1176 #ifdef CONFIG_LOCKDEP
1177 # define MAX_LOCK_DEPTH			48UL
1178 	u64				curr_chain_key;
1179 	int				lockdep_depth;
1180 	unsigned int			lockdep_recursion;
1181 	struct held_lock		held_locks[MAX_LOCK_DEPTH];
1182 #endif
1183 
1184 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1185 	unsigned int			in_ubsan;
1186 #endif
1187 
1188 	/* Journalling filesystem info: */
1189 	void				*journal_info;
1190 
1191 	/* Stacked block device info: */
1192 	struct bio_list			*bio_list;
1193 
1194 	/* Stack plugging: */
1195 	struct blk_plug			*plug;
1196 
1197 	/* VM state: */
1198 	struct reclaim_state		*reclaim_state;
1199 
1200 	struct io_context		*io_context;
1201 
1202 #ifdef CONFIG_COMPACTION
1203 	struct capture_control		*capture_control;
1204 #endif
1205 	/* Ptrace state: */
1206 	unsigned long			ptrace_message;
1207 	kernel_siginfo_t		*last_siginfo;
1208 
1209 	struct task_io_accounting	ioac;
1210 #ifdef CONFIG_PSI
1211 	/* Pressure stall state */
1212 	unsigned int			psi_flags;
1213 #endif
1214 #ifdef CONFIG_TASK_XACCT
1215 	/* Accumulated RSS usage: */
1216 	u64				acct_rss_mem1;
1217 	/* Accumulated virtual memory usage: */
1218 	u64				acct_vm_mem1;
1219 	/* stime + utime since last update: */
1220 	u64				acct_timexpd;
1221 #endif
1222 #ifdef CONFIG_CPUSETS
1223 	/* Protected by ->alloc_lock: */
1224 	nodemask_t			mems_allowed;
1225 	/* Sequence number to catch updates: */
1226 	seqcount_spinlock_t		mems_allowed_seq;
1227 	int				cpuset_mem_spread_rotor;
1228 	int				cpuset_slab_spread_rotor;
1229 #endif
1230 #ifdef CONFIG_CGROUPS
1231 	/* Control Group info protected by css_set_lock: */
1232 	struct css_set __rcu		*cgroups;
1233 	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
1234 	struct list_head		cg_list;
1235 #endif
1236 #ifdef CONFIG_X86_CPU_RESCTRL
1237 	u32				closid;
1238 	u32				rmid;
1239 #endif
1240 #ifdef CONFIG_FUTEX
1241 	struct robust_list_head __user	*robust_list;
1242 #ifdef CONFIG_COMPAT
1243 	struct compat_robust_list_head __user *compat_robust_list;
1244 #endif
1245 	struct list_head		pi_state_list;
1246 	struct futex_pi_state		*pi_state_cache;
1247 	struct mutex			futex_exit_mutex;
1248 	unsigned int			futex_state;
1249 #endif
1250 #ifdef CONFIG_PERF_EVENTS
1251 	struct perf_event_context	*perf_event_ctxp;
1252 	struct mutex			perf_event_mutex;
1253 	struct list_head		perf_event_list;
1254 #endif
1255 #ifdef CONFIG_DEBUG_PREEMPT
1256 	unsigned long			preempt_disable_ip;
1257 #endif
1258 #ifdef CONFIG_NUMA
1259 	/* Protected by alloc_lock: */
1260 	struct mempolicy		*mempolicy;
1261 	short				il_prev;
1262 	short				pref_node_fork;
1263 #endif
1264 #ifdef CONFIG_NUMA_BALANCING
1265 	int				numa_scan_seq;
1266 	unsigned int			numa_scan_period;
1267 	unsigned int			numa_scan_period_max;
1268 	int				numa_preferred_nid;
1269 	unsigned long			numa_migrate_retry;
1270 	/* Migration stamp: */
1271 	u64				node_stamp;
1272 	u64				last_task_numa_placement;
1273 	u64				last_sum_exec_runtime;
1274 	struct callback_head		numa_work;
1275 
1276 	/*
1277 	 * This pointer is only modified for current in syscall and
1278 	 * pagefault context (and for tasks being destroyed), so it can be read
1279 	 * from any of the following contexts:
1280 	 *  - RCU read-side critical section
1281 	 *  - current->numa_group from everywhere
1282 	 *  - task's runqueue locked, task not running
1283 	 */
1284 	struct numa_group __rcu		*numa_group;
1285 
1286 	/*
1287 	 * numa_faults is an array split into four regions:
1288 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1289 	 * in this precise order.
1290 	 *
1291 	 * faults_memory: Exponential decaying average of faults on a per-node
1292 	 * basis. Scheduling placement decisions are made based on these
1293 	 * counts. The values remain static for the duration of a PTE scan.
1294 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1295 	 * hinting fault was incurred.
1296 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1297 	 * during the current scan window. When the scan completes, the counts
1298 	 * in faults_memory and faults_cpu decay and these values are copied.
1299 	 */
1300 	unsigned long			*numa_faults;
1301 	unsigned long			total_numa_faults;
1302 
1303 	/*
1304 	 * numa_faults_locality tracks if faults recorded during the last
1305 	 * scan window were remote/local or failed to migrate. The task scan
1306 	 * period is adapted based on the locality of the faults with different
1307 	 * weights depending on whether they were shared or private faults
1308 	 */
1309 	unsigned long			numa_faults_locality[3];
1310 
1311 	unsigned long			numa_pages_migrated;
1312 #endif /* CONFIG_NUMA_BALANCING */
1313 
1314 #ifdef CONFIG_RSEQ
1315 	struct rseq __user *rseq;
1316 	u32 rseq_len;
1317 	u32 rseq_sig;
1318 	/*
1319 	 * RmW on rseq_event_mask must be performed atomically
1320 	 * with respect to preemption.
1321 	 */
1322 	unsigned long rseq_event_mask;
1323 #endif
1324 
1325 #ifdef CONFIG_SCHED_MM_CID
1326 	int				mm_cid;		/* Current cid in mm */
1327 	int				last_mm_cid;	/* Most recent cid in mm */
1328 	int				migrate_from_cpu;
1329 	int				mm_cid_active;	/* Whether cid bitmap is active */
1330 	struct callback_head		cid_work;
1331 #endif
1332 
1333 	struct tlbflush_unmap_batch	tlb_ubc;
1334 
1335 	/* Cache last used pipe for splice(): */
1336 	struct pipe_inode_info		*splice_pipe;
1337 
1338 	struct page_frag		task_frag;
1339 
1340 #ifdef CONFIG_TASK_DELAY_ACCT
1341 	struct task_delay_info		*delays;
1342 #endif
1343 
1344 #ifdef CONFIG_FAULT_INJECTION
1345 	int				make_it_fail;
1346 	unsigned int			fail_nth;
1347 #endif
1348 	/*
1349 	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1350 	 * balance_dirty_pages() for a dirty throttling pause:
1351 	 */
1352 	int				nr_dirtied;
1353 	int				nr_dirtied_pause;
1354 	/* Start of a write-and-pause period: */
1355 	unsigned long			dirty_paused_when;
1356 
1357 #ifdef CONFIG_LATENCYTOP
1358 	int				latency_record_count;
1359 	struct latency_record		latency_record[LT_SAVECOUNT];
1360 #endif
1361 	/*
1362 	 * Time slack values; these are used to round up poll() and
1363 	 * select() etc timeout values. These are in nanoseconds.
1364 	 */
1365 	u64				timer_slack_ns;
1366 	u64				default_timer_slack_ns;
1367 
1368 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
1369 	unsigned int			kasan_depth;
1370 #endif
1371 
1372 #ifdef CONFIG_KCSAN
1373 	struct kcsan_ctx		kcsan_ctx;
1374 #ifdef CONFIG_TRACE_IRQFLAGS
1375 	struct irqtrace_events		kcsan_save_irqtrace;
1376 #endif
1377 #ifdef CONFIG_KCSAN_WEAK_MEMORY
1378 	int				kcsan_stack_depth;
1379 #endif
1380 #endif
1381 
1382 #ifdef CONFIG_KMSAN
1383 	struct kmsan_ctx		kmsan_ctx;
1384 #endif
1385 
1386 #if IS_ENABLED(CONFIG_KUNIT)
1387 	struct kunit			*kunit_test;
1388 #endif
1389 
1390 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1391 	/* Index of current stored address in ret_stack: */
1392 	int				curr_ret_stack;
1393 	int				curr_ret_depth;
1394 
1395 	/* Stack of return addresses for return function tracing: */
1396 	struct ftrace_ret_stack		*ret_stack;
1397 
1398 	/* Timestamp for last schedule: */
1399 	unsigned long long		ftrace_timestamp;
1400 
1401 	/*
1402 	 * Number of functions that haven't been traced
1403 	 * because of depth overrun:
1404 	 */
1405 	atomic_t			trace_overrun;
1406 
1407 	/* Pause tracing: */
1408 	atomic_t			tracing_graph_pause;
1409 #endif
1410 
1411 #ifdef CONFIG_TRACING
1412 	/* Bitmask and counter of trace recursion: */
1413 	unsigned long			trace_recursion;
1414 #endif /* CONFIG_TRACING */
1415 
1416 #ifdef CONFIG_KCOV
1417 	/* See kernel/kcov.c for more details. */
1418 
1419 	/* Coverage collection mode enabled for this task (0 if disabled): */
1420 	unsigned int			kcov_mode;
1421 
1422 	/* Size of the kcov_area: */
1423 	unsigned int			kcov_size;
1424 
1425 	/* Buffer for coverage collection: */
1426 	void				*kcov_area;
1427 
1428 	/* KCOV descriptor wired with this task or NULL: */
1429 	struct kcov			*kcov;
1430 
1431 	/* KCOV common handle for remote coverage collection: */
1432 	u64				kcov_handle;
1433 
1434 	/* KCOV sequence number: */
1435 	int				kcov_sequence;
1436 
1437 	/* Collect coverage from softirq context: */
1438 	unsigned int			kcov_softirq;
1439 #endif
1440 
1441 #ifdef CONFIG_MEMCG
1442 	struct mem_cgroup		*memcg_in_oom;
1443 	gfp_t				memcg_oom_gfp_mask;
1444 	int				memcg_oom_order;
1445 
1446 	/* Number of pages to reclaim on returning to userland: */
1447 	unsigned int			memcg_nr_pages_over_high;
1448 
1449 	/* Used by memcontrol for targeted memcg charge: */
1450 	struct mem_cgroup		*active_memcg;
1451 #endif
1452 
1453 #ifdef CONFIG_BLK_CGROUP
1454 	struct gendisk			*throttle_disk;
1455 #endif
1456 
1457 #ifdef CONFIG_UPROBES
1458 	struct uprobe_task		*utask;
1459 #endif
1460 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1461 	unsigned int			sequential_io;
1462 	unsigned int			sequential_io_avg;
1463 #endif
1464 	struct kmap_ctrl		kmap_ctrl;
1465 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1466 	unsigned long			task_state_change;
1467 # ifdef CONFIG_PREEMPT_RT
1468 	unsigned long			saved_state_change;
1469 # endif
1470 #endif
1471 	struct rcu_head			rcu;
1472 	refcount_t			rcu_users;
1473 	int				pagefault_disabled;
1474 #ifdef CONFIG_MMU
1475 	struct task_struct		*oom_reaper_list;
1476 	struct timer_list		oom_reaper_timer;
1477 #endif
1478 #ifdef CONFIG_VMAP_STACK
1479 	struct vm_struct		*stack_vm_area;
1480 #endif
1481 #ifdef CONFIG_THREAD_INFO_IN_TASK
1482 	/* A live task holds one reference: */
1483 	refcount_t			stack_refcount;
1484 #endif
1485 #ifdef CONFIG_LIVEPATCH
1486 	int patch_state;
1487 #endif
1488 #ifdef CONFIG_SECURITY
1489 	/* Used by LSM modules for access restriction: */
1490 	void				*security;
1491 #endif
1492 #ifdef CONFIG_BPF_SYSCALL
1493 	/* Used by BPF task local storage */
1494 	struct bpf_local_storage __rcu	*bpf_storage;
1495 	/* Used for BPF run context */
1496 	struct bpf_run_ctx		*bpf_ctx;
1497 #endif
1498 
1499 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1500 	unsigned long			lowest_stack;
1501 	unsigned long			prev_lowest_stack;
1502 #endif
1503 
1504 #ifdef CONFIG_X86_MCE
1505 	void __user			*mce_vaddr;
1506 	__u64				mce_kflags;
1507 	u64				mce_addr;
1508 	__u64				mce_ripv : 1,
1509 					mce_whole_page : 1,
1510 					__mce_reserved : 62;
1511 	struct callback_head		mce_kill_me;
1512 	int				mce_count;
1513 #endif
1514 
1515 #ifdef CONFIG_KRETPROBES
1516 	struct llist_head               kretprobe_instances;
1517 #endif
1518 #ifdef CONFIG_RETHOOK
1519 	struct llist_head               rethooks;
1520 #endif
1521 
1522 #ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH
1523 	/*
1524 	 * If L1D flush is supported on mm context switch
1525 	 * then we use this callback head to queue kill work
1526 	 * to kill tasks that are not running on SMT disabled
1527 	 * cores
1528 	 */
1529 	struct callback_head		l1d_flush_kill;
1530 #endif
1531 
1532 #ifdef CONFIG_RV
1533 	/*
1534 	 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1535 	 * If we find justification for more monitors, we can think
1536 	 * about adding more or developing a dynamic method. So far,
1537 	 * none of these are justified.
1538 	 */
1539 	union rv_task_monitor		rv[RV_PER_TASK_MONITORS];
1540 #endif
1541 
1542 #ifdef CONFIG_USER_EVENTS
1543 	struct user_event_mm		*user_event_mm;
1544 #endif
1545 
1546 #ifdef CONFIG_ACCESS_TOKENID
1547 	u64				token;
1548 	u64				ftoken;
1549 #endif
1550 
1551 	/*
1552 	 * New fields for task_struct should be added above here, so that
1553 	 * they are included in the randomized portion of task_struct.
1554 	 */
1555 	randomized_struct_fields_end
1556 
1557 	/* CPU-specific state of this task: */
1558 	struct thread_struct		thread;
1559 
1560 	/*
1561 	 * WARNING: on x86, 'thread_struct' contains a variable-sized
1562 	 * structure.  It *MUST* be at the end of 'task_struct'.
1563 	 *
1564 	 * Do not put anything below here!
1565 	 */
1566 };
1567 
task_pid(struct task_struct * task)1568 static inline struct pid *task_pid(struct task_struct *task)
1569 {
1570 	return task->thread_pid;
1571 }
1572 
1573 /*
1574  * the helpers to get the task's different pids as they are seen
1575  * from various namespaces
1576  *
1577  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1578  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1579  *                     current.
1580  * task_xid_nr_ns()  : id seen from the ns specified;
1581  *
1582  * see also pid_nr() etc in include/linux/pid.h
1583  */
1584 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1585 
task_pid_nr(struct task_struct * tsk)1586 static inline pid_t task_pid_nr(struct task_struct *tsk)
1587 {
1588 	return tsk->pid;
1589 }
1590 
task_pid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1591 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1592 {
1593 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1594 }
1595 
task_pid_vnr(struct task_struct * tsk)1596 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1597 {
1598 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1599 }
1600 
1601 
task_tgid_nr(struct task_struct * tsk)1602 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1603 {
1604 	return tsk->tgid;
1605 }
1606 
1607 /**
1608  * pid_alive - check that a task structure is not stale
1609  * @p: Task structure to be checked.
1610  *
1611  * Test if a process is not yet dead (at most zombie state)
1612  * If pid_alive fails, then pointers within the task structure
1613  * can be stale and must not be dereferenced.
1614  *
1615  * Return: 1 if the process is alive. 0 otherwise.
1616  */
pid_alive(const struct task_struct * p)1617 static inline int pid_alive(const struct task_struct *p)
1618 {
1619 	return p->thread_pid != NULL;
1620 }
1621 
task_pgrp_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1622 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1623 {
1624 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1625 }
1626 
task_pgrp_vnr(struct task_struct * tsk)1627 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1628 {
1629 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1630 }
1631 
1632 
task_session_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1633 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1634 {
1635 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1636 }
1637 
task_session_vnr(struct task_struct * tsk)1638 static inline pid_t task_session_vnr(struct task_struct *tsk)
1639 {
1640 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1641 }
1642 
task_tgid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1643 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1644 {
1645 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1646 }
1647 
task_tgid_vnr(struct task_struct * tsk)1648 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1649 {
1650 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1651 }
1652 
task_ppid_nr_ns(const struct task_struct * tsk,struct pid_namespace * ns)1653 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1654 {
1655 	pid_t pid = 0;
1656 
1657 	rcu_read_lock();
1658 	if (pid_alive(tsk))
1659 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1660 	rcu_read_unlock();
1661 
1662 	return pid;
1663 }
1664 
task_ppid_nr(const struct task_struct * tsk)1665 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1666 {
1667 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1668 }
1669 
1670 /* Obsolete, do not use: */
task_pgrp_nr(struct task_struct * tsk)1671 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1672 {
1673 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1674 }
1675 
1676 #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
1677 #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
1678 
__task_state_index(unsigned int tsk_state,unsigned int tsk_exit_state)1679 static inline unsigned int __task_state_index(unsigned int tsk_state,
1680 					      unsigned int tsk_exit_state)
1681 {
1682 	unsigned int state = (tsk_state | tsk_exit_state) & TASK_REPORT;
1683 
1684 	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1685 
1686 	if ((tsk_state & TASK_IDLE) == TASK_IDLE)
1687 		state = TASK_REPORT_IDLE;
1688 
1689 	/*
1690 	 * We're lying here, but rather than expose a completely new task state
1691 	 * to userspace, we can make this appear as if the task has gone through
1692 	 * a regular rt_mutex_lock() call.
1693 	 */
1694 	if (tsk_state & TASK_RTLOCK_WAIT)
1695 		state = TASK_UNINTERRUPTIBLE;
1696 
1697 	return fls(state);
1698 }
1699 
task_state_index(struct task_struct * tsk)1700 static inline unsigned int task_state_index(struct task_struct *tsk)
1701 {
1702 	return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state);
1703 }
1704 
task_index_to_char(unsigned int state)1705 static inline char task_index_to_char(unsigned int state)
1706 {
1707 	static const char state_char[] = "RSDTtXZPI";
1708 
1709 	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1710 
1711 	return state_char[state];
1712 }
1713 
task_state_to_char(struct task_struct * tsk)1714 static inline char task_state_to_char(struct task_struct *tsk)
1715 {
1716 	return task_index_to_char(task_state_index(tsk));
1717 }
1718 
1719 /**
1720  * is_global_init - check if a task structure is init. Since init
1721  * is free to have sub-threads we need to check tgid.
1722  * @tsk: Task structure to be checked.
1723  *
1724  * Check if a task structure is the first user space task the kernel created.
1725  *
1726  * Return: 1 if the task structure is init. 0 otherwise.
1727  */
is_global_init(struct task_struct * tsk)1728 static inline int is_global_init(struct task_struct *tsk)
1729 {
1730 	return task_tgid_nr(tsk) == 1;
1731 }
1732 
1733 extern struct pid *cad_pid;
1734 
1735 /*
1736  * Per process flags
1737  */
1738 #define PF_VCPU			0x00000001	/* I'm a virtual CPU */
1739 #define PF_IDLE			0x00000002	/* I am an IDLE thread */
1740 #define PF_EXITING		0x00000004	/* Getting shut down */
1741 #define PF_POSTCOREDUMP		0x00000008	/* Coredumps should ignore this task */
1742 #define PF_IO_WORKER		0x00000010	/* Task is an IO worker */
1743 #define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
1744 #define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
1745 #define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
1746 #define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
1747 #define PF_DUMPCORE		0x00000200	/* Dumped core */
1748 #define PF_SIGNALED		0x00000400	/* Killed by a signal */
1749 #define PF_MEMALLOC		0x00000800	/* Allocating memory */
1750 #define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
1751 #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
1752 #define PF_USER_WORKER		0x00004000	/* Kernel thread cloned from userspace thread */
1753 #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
1754 #define PF__HOLE__00010000	0x00010000
1755 #define PF_FROZEN		PF__HOLE__00010000	/* Frozen for system suspend */
1756 #define PF_KSWAPD		0x00020000	/* I am kswapd */
1757 #define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
1758 #define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1759 #define PF_LOCAL_THROTTLE	0x00100000	/* Throttle writes only against the bdi I write to,
1760 						 * I am cleaning dirty pages from some other bdi. */
1761 #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
1762 #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1763 #define PF__HOLE__00800000	0x00800000
1764 #define PF__HOLE__01000000	0x01000000
1765 #define PF__HOLE__02000000	0x02000000
1766 #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_mask */
1767 #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
1768 #define PF_MEMALLOC_PIN		0x10000000	/* Allocation context constrained to zones which allow long term pinning. */
1769 #define PF__HOLE__20000000	0x20000000
1770 #define PF__HOLE__40000000	0x40000000
1771 #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
1772 
1773 /*
1774  * Only the _current_ task can read/write to tsk->flags, but other
1775  * tasks can access tsk->flags in readonly mode for example
1776  * with tsk_used_math (like during threaded core dumping).
1777  * There is however an exception to this rule during ptrace
1778  * or during fork: the ptracer task is allowed to write to the
1779  * child->flags of its traced child (same goes for fork, the parent
1780  * can write to the child->flags), because we're guaranteed the
1781  * child is not running and in turn not changing child->flags
1782  * at the same time the parent does it.
1783  */
1784 #define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
1785 #define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
1786 #define clear_used_math()			clear_stopped_child_used_math(current)
1787 #define set_used_math()				set_stopped_child_used_math(current)
1788 
1789 #define conditional_stopped_child_used_math(condition, child) \
1790 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1791 
1792 #define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
1793 
1794 #define copy_to_stopped_child_used_math(child) \
1795 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1796 
1797 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1798 #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
1799 #define used_math()				tsk_used_math(current)
1800 
is_percpu_thread(void)1801 static __always_inline bool is_percpu_thread(void)
1802 {
1803 #ifdef CONFIG_SMP
1804 	return (current->flags & PF_NO_SETAFFINITY) &&
1805 		(current->nr_cpus_allowed  == 1);
1806 #else
1807 	return true;
1808 #endif
1809 }
1810 
1811 /* Per-process atomic flags. */
1812 #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
1813 #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
1814 #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1815 #define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
1816 #define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
1817 #define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */
1818 #define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */
1819 #define PFA_SPEC_SSB_NOEXEC		7	/* Speculative Store Bypass clear on execve() */
1820 
1821 #define TASK_PFA_TEST(name, func)					\
1822 	static inline bool task_##func(struct task_struct *p)		\
1823 	{ return test_bit(PFA_##name, &p->atomic_flags); }
1824 
1825 #define TASK_PFA_SET(name, func)					\
1826 	static inline void task_set_##func(struct task_struct *p)	\
1827 	{ set_bit(PFA_##name, &p->atomic_flags); }
1828 
1829 #define TASK_PFA_CLEAR(name, func)					\
1830 	static inline void task_clear_##func(struct task_struct *p)	\
1831 	{ clear_bit(PFA_##name, &p->atomic_flags); }
1832 
TASK_PFA_TEST(NO_NEW_PRIVS,no_new_privs)1833 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1834 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1835 
1836 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1837 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1838 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1839 
1840 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1841 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1842 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1843 
1844 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1845 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1846 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1847 
1848 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1849 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1850 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1851 
1852 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1853 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1854 
1855 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1856 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1857 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1858 
1859 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1860 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1861 
1862 static inline void
1863 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1864 {
1865 	current->flags &= ~flags;
1866 	current->flags |= orig_flags & flags;
1867 }
1868 
1869 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1870 extern int task_can_attach(struct task_struct *p);
1871 extern int dl_bw_alloc(int cpu, u64 dl_bw);
1872 extern void dl_bw_free(int cpu, u64 dl_bw);
1873 #ifdef CONFIG_SMP
1874 
1875 /* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
1876 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1877 
1878 /**
1879  * set_cpus_allowed_ptr - set CPU affinity mask of a task
1880  * @p: the task
1881  * @new_mask: CPU affinity mask
1882  *
1883  * Return: zero if successful, or a negative error code
1884  */
1885 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1886 extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
1887 extern void release_user_cpus_ptr(struct task_struct *p);
1888 extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask);
1889 extern void force_compatible_cpus_allowed_ptr(struct task_struct *p);
1890 extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p);
1891 #else
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)1892 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1893 {
1894 }
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)1895 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1896 {
1897 	if (!cpumask_test_cpu(0, new_mask))
1898 		return -EINVAL;
1899 	return 0;
1900 }
dup_user_cpus_ptr(struct task_struct * dst,struct task_struct * src,int node)1901 static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node)
1902 {
1903 	if (src->user_cpus_ptr)
1904 		return -EINVAL;
1905 	return 0;
1906 }
release_user_cpus_ptr(struct task_struct * p)1907 static inline void release_user_cpus_ptr(struct task_struct *p)
1908 {
1909 	WARN_ON(p->user_cpus_ptr);
1910 }
1911 
dl_task_check_affinity(struct task_struct * p,const struct cpumask * mask)1912 static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask)
1913 {
1914 	return 0;
1915 }
1916 #endif
1917 
1918 extern int yield_to(struct task_struct *p, bool preempt);
1919 extern void set_user_nice(struct task_struct *p, long nice);
1920 extern int task_prio(const struct task_struct *p);
1921 
1922 /**
1923  * task_nice - return the nice value of a given task.
1924  * @p: the task in question.
1925  *
1926  * Return: The nice value [ -20 ... 0 ... 19 ].
1927  */
task_nice(const struct task_struct * p)1928 static inline int task_nice(const struct task_struct *p)
1929 {
1930 	return PRIO_TO_NICE((p)->static_prio);
1931 }
1932 
1933 extern int can_nice(const struct task_struct *p, const int nice);
1934 extern int task_curr(const struct task_struct *p);
1935 extern int idle_cpu(int cpu);
1936 extern int available_idle_cpu(int cpu);
1937 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1938 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1939 extern void sched_set_fifo(struct task_struct *p);
1940 extern void sched_set_fifo_low(struct task_struct *p);
1941 extern void sched_set_normal(struct task_struct *p, int nice);
1942 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1943 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1944 extern struct task_struct *idle_task(int cpu);
1945 
1946 /**
1947  * is_idle_task - is the specified task an idle task?
1948  * @p: the task in question.
1949  *
1950  * Return: 1 if @p is an idle task. 0 otherwise.
1951  */
is_idle_task(const struct task_struct * p)1952 static __always_inline bool is_idle_task(const struct task_struct *p)
1953 {
1954 	return !!(p->flags & PF_IDLE);
1955 }
1956 
1957 extern struct task_struct *curr_task(int cpu);
1958 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1959 
1960 void yield(void);
1961 
1962 union thread_union {
1963 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1964 	struct task_struct task;
1965 #endif
1966 #ifndef CONFIG_THREAD_INFO_IN_TASK
1967 	struct thread_info thread_info;
1968 #endif
1969 	unsigned long stack[THREAD_SIZE/sizeof(long)];
1970 };
1971 
1972 #ifndef CONFIG_THREAD_INFO_IN_TASK
1973 extern struct thread_info init_thread_info;
1974 #endif
1975 
1976 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1977 
1978 #ifdef CONFIG_THREAD_INFO_IN_TASK
1979 # define task_thread_info(task)	(&(task)->thread_info)
1980 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1981 # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1982 #endif
1983 
1984 /*
1985  * find a task by one of its numerical ids
1986  *
1987  * find_task_by_pid_ns():
1988  *      finds a task by its pid in the specified namespace
1989  * find_task_by_vpid():
1990  *      finds a task by its virtual pid
1991  *
1992  * see also find_vpid() etc in include/linux/pid.h
1993  */
1994 
1995 extern struct task_struct *find_task_by_vpid(pid_t nr);
1996 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1997 
1998 /*
1999  * find a task by its virtual pid and get the task struct
2000  */
2001 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
2002 
2003 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2004 extern int wake_up_process(struct task_struct *tsk);
2005 extern void wake_up_new_task(struct task_struct *tsk);
2006 
2007 #ifdef CONFIG_SMP
2008 extern void kick_process(struct task_struct *tsk);
2009 #else
kick_process(struct task_struct * tsk)2010 static inline void kick_process(struct task_struct *tsk) { }
2011 #endif
2012 
2013 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2014 
set_task_comm(struct task_struct * tsk,const char * from)2015 static inline void set_task_comm(struct task_struct *tsk, const char *from)
2016 {
2017 	__set_task_comm(tsk, from, false);
2018 }
2019 
2020 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
2021 #define get_task_comm(buf, tsk) ({			\
2022 	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
2023 	__get_task_comm(buf, sizeof(buf), tsk);		\
2024 })
2025 
2026 #ifdef CONFIG_SMP
scheduler_ipi(void)2027 static __always_inline void scheduler_ipi(void)
2028 {
2029 	/*
2030 	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
2031 	 * TIF_NEED_RESCHED remotely (for the first time) will also send
2032 	 * this IPI.
2033 	 */
2034 	preempt_fold_need_resched();
2035 }
2036 #else
scheduler_ipi(void)2037 static inline void scheduler_ipi(void) { }
2038 #endif
2039 
2040 extern unsigned long wait_task_inactive(struct task_struct *, unsigned int match_state);
2041 
2042 /*
2043  * Set thread flags in other task's structures.
2044  * See asm/thread_info.h for TIF_xxxx flags available:
2045  */
set_tsk_thread_flag(struct task_struct * tsk,int flag)2046 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2047 {
2048 	set_ti_thread_flag(task_thread_info(tsk), flag);
2049 }
2050 
clear_tsk_thread_flag(struct task_struct * tsk,int flag)2051 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2052 {
2053 	clear_ti_thread_flag(task_thread_info(tsk), flag);
2054 }
2055 
update_tsk_thread_flag(struct task_struct * tsk,int flag,bool value)2056 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
2057 					  bool value)
2058 {
2059 	update_ti_thread_flag(task_thread_info(tsk), flag, value);
2060 }
2061 
test_and_set_tsk_thread_flag(struct task_struct * tsk,int flag)2062 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2063 {
2064 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2065 }
2066 
test_and_clear_tsk_thread_flag(struct task_struct * tsk,int flag)2067 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2068 {
2069 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2070 }
2071 
test_tsk_thread_flag(struct task_struct * tsk,int flag)2072 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2073 {
2074 	return test_ti_thread_flag(task_thread_info(tsk), flag);
2075 }
2076 
set_tsk_need_resched(struct task_struct * tsk)2077 static inline void set_tsk_need_resched(struct task_struct *tsk)
2078 {
2079 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2080 }
2081 
clear_tsk_need_resched(struct task_struct * tsk)2082 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2083 {
2084 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2085 }
2086 
test_tsk_need_resched(struct task_struct * tsk)2087 static inline int test_tsk_need_resched(struct task_struct *tsk)
2088 {
2089 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2090 }
2091 
2092 /*
2093  * cond_resched() and cond_resched_lock(): latency reduction via
2094  * explicit rescheduling in places that are safe. The return
2095  * value indicates whether a reschedule was done in fact.
2096  * cond_resched_lock() will drop the spinlock before scheduling,
2097  */
2098 #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
2099 extern int __cond_resched(void);
2100 
2101 #if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
2102 
2103 void sched_dynamic_klp_enable(void);
2104 void sched_dynamic_klp_disable(void);
2105 
2106 DECLARE_STATIC_CALL(cond_resched, __cond_resched);
2107 
_cond_resched(void)2108 static __always_inline int _cond_resched(void)
2109 {
2110 	return static_call_mod(cond_resched)();
2111 }
2112 
2113 #elif defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
2114 
2115 extern int dynamic_cond_resched(void);
2116 
_cond_resched(void)2117 static __always_inline int _cond_resched(void)
2118 {
2119 	return dynamic_cond_resched();
2120 }
2121 
2122 #else /* !CONFIG_PREEMPTION */
2123 
_cond_resched(void)2124 static inline int _cond_resched(void)
2125 {
2126 	klp_sched_try_switch();
2127 	return __cond_resched();
2128 }
2129 
2130 #endif /* PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
2131 
2132 #else /* CONFIG_PREEMPTION && !CONFIG_PREEMPT_DYNAMIC */
2133 
_cond_resched(void)2134 static inline int _cond_resched(void)
2135 {
2136 	klp_sched_try_switch();
2137 	return 0;
2138 }
2139 
2140 #endif /* !CONFIG_PREEMPTION || CONFIG_PREEMPT_DYNAMIC */
2141 
2142 #define cond_resched() ({			\
2143 	__might_resched(__FILE__, __LINE__, 0);	\
2144 	_cond_resched();			\
2145 })
2146 
2147 extern int __cond_resched_lock(spinlock_t *lock);
2148 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2149 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2150 
2151 #define MIGHT_RESCHED_RCU_SHIFT		8
2152 #define MIGHT_RESCHED_PREEMPT_MASK	((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2153 
2154 #ifndef CONFIG_PREEMPT_RT
2155 /*
2156  * Non RT kernels have an elevated preempt count due to the held lock,
2157  * but are not allowed to be inside a RCU read side critical section
2158  */
2159 # define PREEMPT_LOCK_RESCHED_OFFSETS	PREEMPT_LOCK_OFFSET
2160 #else
2161 /*
2162  * spin/rw_lock() on RT implies rcu_read_lock(). The might_sleep() check in
2163  * cond_resched*lock() has to take that into account because it checks for
2164  * preempt_count() and rcu_preempt_depth().
2165  */
2166 # define PREEMPT_LOCK_RESCHED_OFFSETS	\
2167 	(PREEMPT_LOCK_OFFSET + (1U << MIGHT_RESCHED_RCU_SHIFT))
2168 #endif
2169 
2170 #define cond_resched_lock(lock) ({						\
2171 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2172 	__cond_resched_lock(lock);						\
2173 })
2174 
2175 #define cond_resched_rwlock_read(lock) ({					\
2176 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2177 	__cond_resched_rwlock_read(lock);					\
2178 })
2179 
2180 #define cond_resched_rwlock_write(lock) ({					\
2181 	__might_resched(__FILE__, __LINE__, PREEMPT_LOCK_RESCHED_OFFSETS);	\
2182 	__cond_resched_rwlock_write(lock);					\
2183 })
2184 
cond_resched_rcu(void)2185 static inline void cond_resched_rcu(void)
2186 {
2187 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2188 	rcu_read_unlock();
2189 	cond_resched();
2190 	rcu_read_lock();
2191 #endif
2192 }
2193 
2194 #ifdef CONFIG_PREEMPT_DYNAMIC
2195 
2196 extern bool preempt_model_none(void);
2197 extern bool preempt_model_voluntary(void);
2198 extern bool preempt_model_full(void);
2199 
2200 #else
2201 
preempt_model_none(void)2202 static inline bool preempt_model_none(void)
2203 {
2204 	return IS_ENABLED(CONFIG_PREEMPT_NONE);
2205 }
preempt_model_voluntary(void)2206 static inline bool preempt_model_voluntary(void)
2207 {
2208 	return IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY);
2209 }
preempt_model_full(void)2210 static inline bool preempt_model_full(void)
2211 {
2212 	return IS_ENABLED(CONFIG_PREEMPT);
2213 }
2214 
2215 #endif
2216 
preempt_model_rt(void)2217 static inline bool preempt_model_rt(void)
2218 {
2219 	return IS_ENABLED(CONFIG_PREEMPT_RT);
2220 }
2221 
2222 /*
2223  * Does the preemption model allow non-cooperative preemption?
2224  *
2225  * For !CONFIG_PREEMPT_DYNAMIC kernels this is an exact match with
2226  * CONFIG_PREEMPTION; for CONFIG_PREEMPT_DYNAMIC this doesn't work as the
2227  * kernel is *built* with CONFIG_PREEMPTION=y but may run with e.g. the
2228  * PREEMPT_NONE model.
2229  */
preempt_model_preemptible(void)2230 static inline bool preempt_model_preemptible(void)
2231 {
2232 	return preempt_model_full() || preempt_model_rt();
2233 }
2234 
2235 /*
2236  * Does a critical section need to be broken due to another
2237  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
2238  * but a general need for low latency)
2239  */
spin_needbreak(spinlock_t * lock)2240 static inline int spin_needbreak(spinlock_t *lock)
2241 {
2242 #ifdef CONFIG_PREEMPTION
2243 	return spin_is_contended(lock);
2244 #else
2245 	return 0;
2246 #endif
2247 }
2248 
2249 /*
2250  * Check if a rwlock is contended.
2251  * Returns non-zero if there is another task waiting on the rwlock.
2252  * Returns zero if the lock is not contended or the system / underlying
2253  * rwlock implementation does not support contention detection.
2254  * Technically does not depend on CONFIG_PREEMPTION, but a general need
2255  * for low latency.
2256  */
rwlock_needbreak(rwlock_t * lock)2257 static inline int rwlock_needbreak(rwlock_t *lock)
2258 {
2259 #ifdef CONFIG_PREEMPTION
2260 	return rwlock_is_contended(lock);
2261 #else
2262 	return 0;
2263 #endif
2264 }
2265 
need_resched(void)2266 static __always_inline bool need_resched(void)
2267 {
2268 	return unlikely(tif_need_resched());
2269 }
2270 
2271 /*
2272  * Wrappers for p->thread_info->cpu access. No-op on UP.
2273  */
2274 #ifdef CONFIG_SMP
2275 
task_cpu(const struct task_struct * p)2276 static inline unsigned int task_cpu(const struct task_struct *p)
2277 {
2278 	return READ_ONCE(task_thread_info(p)->cpu);
2279 }
2280 
2281 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2282 
2283 #else
2284 
task_cpu(const struct task_struct * p)2285 static inline unsigned int task_cpu(const struct task_struct *p)
2286 {
2287 	return 0;
2288 }
2289 
set_task_cpu(struct task_struct * p,unsigned int cpu)2290 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2291 {
2292 }
2293 
2294 #endif /* CONFIG_SMP */
2295 
2296 extern bool sched_task_on_rq(struct task_struct *p);
2297 extern unsigned long get_wchan(struct task_struct *p);
2298 extern struct task_struct *cpu_curr_snapshot(int cpu);
2299 
2300 /*
2301  * In order to reduce various lock holder preemption latencies provide an
2302  * interface to see if a vCPU is currently running or not.
2303  *
2304  * This allows us to terminate optimistic spin loops and block, analogous to
2305  * the native optimistic spin heuristic of testing if the lock owner task is
2306  * running or not.
2307  */
2308 #ifndef vcpu_is_preempted
vcpu_is_preempted(int cpu)2309 static inline bool vcpu_is_preempted(int cpu)
2310 {
2311 	return false;
2312 }
2313 #endif
2314 
2315 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2316 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2317 
2318 #ifndef TASK_SIZE_OF
2319 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2320 #endif
2321 
2322 #ifdef CONFIG_SMP
owner_on_cpu(struct task_struct * owner)2323 static inline bool owner_on_cpu(struct task_struct *owner)
2324 {
2325 	/*
2326 	 * As lock holder preemption issue, we both skip spinning if
2327 	 * task is not on cpu or its cpu is preempted
2328 	 */
2329 	return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
2330 }
2331 
2332 /* Returns effective CPU energy utilization, as seen by the scheduler */
2333 unsigned long sched_cpu_util(int cpu);
2334 #endif /* CONFIG_SMP */
2335 
2336 #ifdef CONFIG_RSEQ
2337 
2338 /*
2339  * Map the event mask on the user-space ABI enum rseq_cs_flags
2340  * for direct mask checks.
2341  */
2342 enum rseq_event_mask_bits {
2343 	RSEQ_EVENT_PREEMPT_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2344 	RSEQ_EVENT_SIGNAL_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2345 	RSEQ_EVENT_MIGRATE_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2346 };
2347 
2348 enum rseq_event_mask {
2349 	RSEQ_EVENT_PREEMPT	= (1U << RSEQ_EVENT_PREEMPT_BIT),
2350 	RSEQ_EVENT_SIGNAL	= (1U << RSEQ_EVENT_SIGNAL_BIT),
2351 	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
2352 };
2353 
rseq_set_notify_resume(struct task_struct * t)2354 static inline void rseq_set_notify_resume(struct task_struct *t)
2355 {
2356 	if (t->rseq)
2357 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2358 }
2359 
2360 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2361 
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)2362 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2363 					     struct pt_regs *regs)
2364 {
2365 	if (current->rseq)
2366 		__rseq_handle_notify_resume(ksig, regs);
2367 }
2368 
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)2369 static inline void rseq_signal_deliver(struct ksignal *ksig,
2370 				       struct pt_regs *regs)
2371 {
2372 	preempt_disable();
2373 	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
2374 	preempt_enable();
2375 	rseq_handle_notify_resume(ksig, regs);
2376 }
2377 
2378 /* rseq_preempt() requires preemption to be disabled. */
rseq_preempt(struct task_struct * t)2379 static inline void rseq_preempt(struct task_struct *t)
2380 {
2381 	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2382 	rseq_set_notify_resume(t);
2383 }
2384 
2385 /* rseq_migrate() requires preemption to be disabled. */
rseq_migrate(struct task_struct * t)2386 static inline void rseq_migrate(struct task_struct *t)
2387 {
2388 	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2389 	rseq_set_notify_resume(t);
2390 }
2391 
2392 /*
2393  * If parent process has a registered restartable sequences area, the
2394  * child inherits. Unregister rseq for a clone with CLONE_VM set.
2395  */
rseq_fork(struct task_struct * t,unsigned long clone_flags)2396 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2397 {
2398 	if (clone_flags & CLONE_VM) {
2399 		t->rseq = NULL;
2400 		t->rseq_len = 0;
2401 		t->rseq_sig = 0;
2402 		t->rseq_event_mask = 0;
2403 	} else {
2404 		t->rseq = current->rseq;
2405 		t->rseq_len = current->rseq_len;
2406 		t->rseq_sig = current->rseq_sig;
2407 		t->rseq_event_mask = current->rseq_event_mask;
2408 	}
2409 }
2410 
rseq_execve(struct task_struct * t)2411 static inline void rseq_execve(struct task_struct *t)
2412 {
2413 	t->rseq = NULL;
2414 	t->rseq_len = 0;
2415 	t->rseq_sig = 0;
2416 	t->rseq_event_mask = 0;
2417 }
2418 
2419 #else
2420 
rseq_set_notify_resume(struct task_struct * t)2421 static inline void rseq_set_notify_resume(struct task_struct *t)
2422 {
2423 }
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)2424 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2425 					     struct pt_regs *regs)
2426 {
2427 }
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)2428 static inline void rseq_signal_deliver(struct ksignal *ksig,
2429 				       struct pt_regs *regs)
2430 {
2431 }
rseq_preempt(struct task_struct * t)2432 static inline void rseq_preempt(struct task_struct *t)
2433 {
2434 }
rseq_migrate(struct task_struct * t)2435 static inline void rseq_migrate(struct task_struct *t)
2436 {
2437 }
rseq_fork(struct task_struct * t,unsigned long clone_flags)2438 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2439 {
2440 }
rseq_execve(struct task_struct * t)2441 static inline void rseq_execve(struct task_struct *t)
2442 {
2443 }
2444 
2445 #endif
2446 
2447 #ifdef CONFIG_DEBUG_RSEQ
2448 
2449 void rseq_syscall(struct pt_regs *regs);
2450 
2451 #else
2452 
rseq_syscall(struct pt_regs * regs)2453 static inline void rseq_syscall(struct pt_regs *regs)
2454 {
2455 }
2456 
2457 #endif
2458 
2459 #ifdef CONFIG_SCHED_CORE
2460 extern void sched_core_free(struct task_struct *tsk);
2461 extern void sched_core_fork(struct task_struct *p);
2462 extern int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type,
2463 				unsigned long uaddr);
2464 extern int sched_core_idle_cpu(int cpu);
2465 #else
sched_core_free(struct task_struct * tsk)2466 static inline void sched_core_free(struct task_struct *tsk) { }
sched_core_fork(struct task_struct * p)2467 static inline void sched_core_fork(struct task_struct *p) { }
sched_core_idle_cpu(int cpu)2468 static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); }
2469 #endif
2470 
2471 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
2472 
2473 #endif
2474