• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4 
5 /*
6  * Define 'struct task_struct' and provide the main scheduler
7  * APIs (schedule(), wakeup variants, etc.)
8  */
9 
10 #include <uapi/linux/sched.h>
11 
12 #include <asm/current.h>
13 
14 #include <linux/pid.h>
15 #include <linux/sem.h>
16 #include <linux/shm.h>
17 #include <linux/kcov.h>
18 #include <linux/mutex.h>
19 #include <linux/plist.h>
20 #include <linux/hrtimer.h>
21 #include <linux/irqflags.h>
22 #include <linux/seccomp.h>
23 #include <linux/nodemask.h>
24 #include <linux/rcupdate.h>
25 #include <linux/refcount.h>
26 #include <linux/resource.h>
27 #include <linux/latencytop.h>
28 #include <linux/sched/prio.h>
29 #include <linux/sched/types.h>
30 #include <linux/signal_types.h>
31 #include <linux/mm_types_task.h>
32 #include <linux/task_io_accounting.h>
33 #include <linux/posix-timers.h>
34 #include <linux/rseq.h>
35 #include <linux/seqlock.h>
36 #include <linux/kcsan.h>
37 #include <linux/sched/rtg.h>
38 
39 /* task_struct member predeclarations (sorted alphabetically): */
40 struct audit_context;
41 struct backing_dev_info;
42 struct bio_list;
43 struct blk_plug;
44 struct bpf_run_ctx;
45 struct capture_control;
46 struct cfs_rq;
47 struct fs_struct;
48 struct futex_pi_state;
49 struct io_context;
50 struct mempolicy;
51 struct nameidata;
52 struct nsproxy;
53 struct perf_event_context;
54 struct pid_namespace;
55 struct pipe_inode_info;
56 struct rcu_node;
57 #ifdef CONFIG_RECLAIM_ACCT
58 struct reclaim_acct;
59 #endif
60 struct reclaim_state;
61 struct robust_list_head;
62 struct root_domain;
63 struct rq;
64 struct sched_attr;
65 struct sched_param;
66 struct seq_file;
67 struct sighand_struct;
68 struct signal_struct;
69 struct task_delay_info;
70 struct task_group;
71 struct io_uring_task;
72 
73 /*
74  * Task state bitmask. NOTE! These bits are also
75  * encoded in fs/proc/array.c: get_task_state().
76  *
77  * We have two separate sets of flags: task->state
78  * is about runnability, while task->exit_state are
79  * about the task exiting. Confusing, but this way
80  * modifying one set can't modify the other one by
81  * mistake.
82  */
83 
84 /* Used in tsk->state: */
85 #define TASK_RUNNING			0x0000
86 #define TASK_INTERRUPTIBLE		0x0001
87 #define TASK_UNINTERRUPTIBLE		0x0002
88 #define __TASK_STOPPED			0x0004
89 #define __TASK_TRACED			0x0008
90 /* Used in tsk->exit_state: */
91 #define EXIT_DEAD			0x0010
92 #define EXIT_ZOMBIE			0x0020
93 #define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
94 /* Used in tsk->state again: */
95 #define TASK_PARKED			0x0040
96 #define TASK_DEAD			0x0080
97 #define TASK_WAKEKILL			0x0100
98 #define TASK_WAKING			0x0200
99 #define TASK_NOLOAD			0x0400
100 #define TASK_NEW			0x0800
101 #define TASK_STATE_MAX			0x1000
102 
103 /* Convenience macros for the sake of set_current_state: */
104 #define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
105 #define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
106 #define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)
107 
108 #define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
109 
110 /* Convenience macros for the sake of wake_up(): */
111 #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
112 
113 /* get_task_state(): */
114 #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
115 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
116 					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
117 					 TASK_PARKED)
118 
119 #define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)
120 
121 #define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)
122 
123 #define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
124 
125 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
126 
127 /*
128  * Special states are those that do not use the normal wait-loop pattern. See
129  * the comment with set_special_state().
130  */
131 #define is_special_task_state(state)				\
132 	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
133 
134 #define __set_current_state(state_value)			\
135 	do {							\
136 		WARN_ON_ONCE(is_special_task_state(state_value));\
137 		current->task_state_change = _THIS_IP_;		\
138 		current->state = (state_value);			\
139 	} while (0)
140 
141 #define set_current_state(state_value)				\
142 	do {							\
143 		WARN_ON_ONCE(is_special_task_state(state_value));\
144 		current->task_state_change = _THIS_IP_;		\
145 		smp_store_mb(current->state, (state_value));	\
146 	} while (0)
147 
148 #define set_special_state(state_value)					\
149 	do {								\
150 		unsigned long flags; /* may shadow */			\
151 		WARN_ON_ONCE(!is_special_task_state(state_value));	\
152 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
153 		current->task_state_change = _THIS_IP_;			\
154 		current->state = (state_value);				\
155 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
156 	} while (0)
157 #else
158 /*
159  * set_current_state() includes a barrier so that the write of current->state
160  * is correctly serialised wrt the caller's subsequent test of whether to
161  * actually sleep:
162  *
163  *   for (;;) {
164  *	set_current_state(TASK_UNINTERRUPTIBLE);
165  *	if (CONDITION)
166  *	   break;
167  *
168  *	schedule();
169  *   }
170  *   __set_current_state(TASK_RUNNING);
171  *
172  * If the caller does not need such serialisation (because, for instance, the
173  * CONDITION test and condition change and wakeup are under the same lock) then
174  * use __set_current_state().
175  *
176  * The above is typically ordered against the wakeup, which does:
177  *
178  *   CONDITION = 1;
179  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
180  *
181  * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
182  * accessing p->state.
183  *
184  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
185  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
186  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
187  *
188  * However, with slightly different timing the wakeup TASK_RUNNING store can
189  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
190  * a problem either because that will result in one extra go around the loop
191  * and our @cond test will save the day.
192  *
193  * Also see the comments of try_to_wake_up().
194  */
195 #define __set_current_state(state_value)				\
196 	current->state = (state_value)
197 
198 #define set_current_state(state_value)					\
199 	smp_store_mb(current->state, (state_value))
200 
201 /*
202  * set_special_state() should be used for those states when the blocking task
203  * can not use the regular condition based wait-loop. In that case we must
204  * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
205  * will not collide with our state change.
206  */
207 #define set_special_state(state_value)					\
208 	do {								\
209 		unsigned long flags; /* may shadow */			\
210 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
211 		current->state = (state_value);				\
212 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
213 	} while (0)
214 
215 #endif
216 
217 /* Task command name length: */
218 #define TASK_COMM_LEN			16
219 
220 enum task_event {
221 	PUT_PREV_TASK   = 0,
222 	PICK_NEXT_TASK  = 1,
223 	TASK_WAKE       = 2,
224 	TASK_MIGRATE    = 3,
225 	TASK_UPDATE     = 4,
226 	IRQ_UPDATE      = 5,
227 };
228 
229 /* Note: this need to be in sync with migrate_type_names array */
230 enum migrate_types {
231 	GROUP_TO_RQ,
232 	RQ_TO_GROUP,
233 };
234 
235 #ifdef CONFIG_CPU_ISOLATION_OPT
236 extern int sched_isolate_count(const cpumask_t *mask, bool include_offline);
237 extern int sched_isolate_cpu(int cpu);
238 extern int sched_unisolate_cpu(int cpu);
239 extern int sched_unisolate_cpu_unlocked(int cpu);
240 #else
sched_isolate_count(const cpumask_t * mask,bool include_offline)241 static inline int sched_isolate_count(const cpumask_t *mask,
242 				      bool include_offline)
243 {
244 	cpumask_t count_mask;
245 
246 	if (include_offline)
247 		cpumask_andnot(&count_mask, mask, cpu_online_mask);
248 	else
249 		return 0;
250 
251 	return cpumask_weight(&count_mask);
252 }
253 
sched_isolate_cpu(int cpu)254 static inline int sched_isolate_cpu(int cpu)
255 {
256 	return 0;
257 }
258 
sched_unisolate_cpu(int cpu)259 static inline int sched_unisolate_cpu(int cpu)
260 {
261 	return 0;
262 }
263 
sched_unisolate_cpu_unlocked(int cpu)264 static inline int sched_unisolate_cpu_unlocked(int cpu)
265 {
266 	return 0;
267 }
268 #endif
269 
270 extern void scheduler_tick(void);
271 
272 #define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
273 
274 extern long schedule_timeout(long timeout);
275 extern long schedule_timeout_interruptible(long timeout);
276 extern long schedule_timeout_killable(long timeout);
277 extern long schedule_timeout_uninterruptible(long timeout);
278 extern long schedule_timeout_idle(long timeout);
279 asmlinkage void schedule(void);
280 extern void schedule_preempt_disabled(void);
281 asmlinkage void preempt_schedule_irq(void);
282 
283 extern int __must_check io_schedule_prepare(void);
284 extern void io_schedule_finish(int token);
285 extern long io_schedule_timeout(long timeout);
286 extern void io_schedule(void);
287 
288 /**
289  * struct prev_cputime - snapshot of system and user cputime
290  * @utime: time spent in user mode
291  * @stime: time spent in system mode
292  * @lock: protects the above two fields
293  *
294  * Stores previous user/system time values such that we can guarantee
295  * monotonicity.
296  */
297 struct prev_cputime {
298 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
299 	u64				utime;
300 	u64				stime;
301 	raw_spinlock_t			lock;
302 #endif
303 };
304 
305 enum vtime_state {
306 	/* Task is sleeping or running in a CPU with VTIME inactive: */
307 	VTIME_INACTIVE = 0,
308 	/* Task is idle */
309 	VTIME_IDLE,
310 	/* Task runs in kernelspace in a CPU with VTIME active: */
311 	VTIME_SYS,
312 	/* Task runs in userspace in a CPU with VTIME active: */
313 	VTIME_USER,
314 	/* Task runs as guests in a CPU with VTIME active: */
315 	VTIME_GUEST,
316 };
317 
318 struct vtime {
319 	seqcount_t		seqcount;
320 	unsigned long long	starttime;
321 	enum vtime_state	state;
322 	unsigned int		cpu;
323 	u64			utime;
324 	u64			stime;
325 	u64			gtime;
326 };
327 
328 /*
329  * Utilization clamp constraints.
330  * @UCLAMP_MIN:	Minimum utilization
331  * @UCLAMP_MAX:	Maximum utilization
332  * @UCLAMP_CNT:	Utilization clamp constraints count
333  */
334 enum uclamp_id {
335 	UCLAMP_MIN = 0,
336 	UCLAMP_MAX,
337 	UCLAMP_CNT
338 };
339 
340 #ifdef CONFIG_SMP
341 extern struct root_domain def_root_domain;
342 extern struct mutex sched_domains_mutex;
343 #endif
344 
345 struct sched_info {
346 #ifdef CONFIG_SCHED_INFO
347 	/* Cumulative counters: */
348 
349 	/* # of times we have run on this CPU: */
350 	unsigned long			pcount;
351 
352 	/* Time spent waiting on a runqueue: */
353 	unsigned long long		run_delay;
354 
355 	/* Timestamps: */
356 
357 	/* When did we last run on a CPU? */
358 	unsigned long long		last_arrival;
359 
360 	/* When were we last queued to run? */
361 	unsigned long long		last_queued;
362 
363 #endif /* CONFIG_SCHED_INFO */
364 };
365 
366 /*
367  * Integer metrics need fixed point arithmetic, e.g., sched/fair
368  * has a few: load, load_avg, util_avg, freq, and capacity.
369  *
370  * We define a basic fixed point arithmetic range, and then formalize
371  * all these metrics based on that basic range.
372  */
373 # define SCHED_FIXEDPOINT_SHIFT		10
374 # define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
375 
376 /* Increase resolution of cpu_capacity calculations */
377 # define SCHED_CAPACITY_SHIFT		SCHED_FIXEDPOINT_SHIFT
378 # define SCHED_CAPACITY_SCALE		(1L << SCHED_CAPACITY_SHIFT)
379 
380 struct load_weight {
381 	unsigned long			weight;
382 	u32				inv_weight;
383 };
384 
385 /**
386  * struct util_est - Estimation utilization of FAIR tasks
387  * @enqueued: instantaneous estimated utilization of a task/cpu
388  * @ewma:     the Exponential Weighted Moving Average (EWMA)
389  *            utilization of a task
390  *
391  * Support data structure to track an Exponential Weighted Moving Average
392  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
393  * average each time a task completes an activation. Sample's weight is chosen
394  * so that the EWMA will be relatively insensitive to transient changes to the
395  * task's workload.
396  *
397  * The enqueued attribute has a slightly different meaning for tasks and cpus:
398  * - task:   the task's util_avg at last task dequeue time
399  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
400  * Thus, the util_est.enqueued of a task represents the contribution on the
401  * estimated utilization of the CPU where that task is currently enqueued.
402  *
403  * Only for tasks we track a moving average of the past instantaneous
404  * estimated utilization. This allows to absorb sporadic drops in utilization
405  * of an otherwise almost periodic task.
406  *
407  * The UTIL_AVG_UNCHANGED flag is used to synchronize util_est with util_avg
408  * updates. When a task is dequeued, its util_est should not be updated if its
409  * util_avg has not been updated in the meantime.
410  * This information is mapped into the MSB bit of util_est.enqueued at dequeue
411  * time. Since max value of util_est.enqueued for a task is 1024 (PELT util_avg
412  * for a task) it is safe to use MSB.
413  */
414 struct util_est {
415 	unsigned int			enqueued;
416 	unsigned int			ewma;
417 #define UTIL_EST_WEIGHT_SHIFT		2
418 #define UTIL_AVG_UNCHANGED		0x80000000
419 } __attribute__((__aligned__(sizeof(u64))));
420 
421 /*
422  * The load/runnable/util_avg accumulates an infinite geometric series
423  * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
424  *
425  * [load_avg definition]
426  *
427  *   load_avg = runnable% * scale_load_down(load)
428  *
429  * [runnable_avg definition]
430  *
431  *   runnable_avg = runnable% * SCHED_CAPACITY_SCALE
432  *
433  * [util_avg definition]
434  *
435  *   util_avg = running% * SCHED_CAPACITY_SCALE
436  *
437  * where runnable% is the time ratio that a sched_entity is runnable and
438  * running% the time ratio that a sched_entity is running.
439  *
440  * For cfs_rq, they are the aggregated values of all runnable and blocked
441  * sched_entities.
442  *
443  * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
444  * capacity scaling. The scaling is done through the rq_clock_pelt that is used
445  * for computing those signals (see update_rq_clock_pelt())
446  *
447  * N.B., the above ratios (runnable% and running%) themselves are in the
448  * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
449  * to as large a range as necessary. This is for example reflected by
450  * util_avg's SCHED_CAPACITY_SCALE.
451  *
452  * [Overflow issue]
453  *
454  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
455  * with the highest load (=88761), always runnable on a single cfs_rq,
456  * and should not overflow as the number already hits PID_MAX_LIMIT.
457  *
458  * For all other cases (including 32-bit kernels), struct load_weight's
459  * weight will overflow first before we do, because:
460  *
461  *    Max(load_avg) <= Max(load.weight)
462  *
463  * Then it is the load_weight's responsibility to consider overflow
464  * issues.
465  */
466 struct sched_avg {
467 	u64				last_update_time;
468 	u64				load_sum;
469 	u64				runnable_sum;
470 	u32				util_sum;
471 	u32				period_contrib;
472 	unsigned long			load_avg;
473 	unsigned long			runnable_avg;
474 	unsigned long			util_avg;
475 	struct util_est			util_est;
476 } ____cacheline_aligned;
477 
478 struct sched_statistics {
479 #ifdef CONFIG_SCHEDSTATS
480 	u64				wait_start;
481 	u64				wait_max;
482 	u64				wait_count;
483 	u64				wait_sum;
484 	u64				iowait_count;
485 	u64				iowait_sum;
486 
487 	u64				sleep_start;
488 	u64				sleep_max;
489 	s64				sum_sleep_runtime;
490 
491 	u64				block_start;
492 	u64				block_max;
493 	u64				exec_max;
494 	u64				slice_max;
495 
496 	u64				nr_migrations_cold;
497 	u64				nr_failed_migrations_affine;
498 	u64				nr_failed_migrations_running;
499 	u64				nr_failed_migrations_hot;
500 	u64				nr_forced_migrations;
501 
502 	u64				nr_wakeups;
503 	u64				nr_wakeups_sync;
504 	u64				nr_wakeups_migrate;
505 	u64				nr_wakeups_local;
506 	u64				nr_wakeups_remote;
507 	u64				nr_wakeups_affine;
508 	u64				nr_wakeups_affine_attempts;
509 	u64				nr_wakeups_passive;
510 	u64				nr_wakeups_idle;
511 #endif
512 };
513 
514 struct sched_entity {
515 	/* For load-balancing: */
516 	struct load_weight		load;
517 	struct rb_node			run_node;
518 	struct list_head		group_node;
519 	unsigned int			on_rq;
520 
521 	u64				exec_start;
522 	u64				sum_exec_runtime;
523 	u64				vruntime;
524 	u64				prev_sum_exec_runtime;
525 
526 	u64				nr_migrations;
527 
528 	struct sched_statistics		statistics;
529 
530 #ifdef CONFIG_FAIR_GROUP_SCHED
531 	int				depth;
532 	struct sched_entity		*parent;
533 	/* rq on which this entity is (to be) queued: */
534 	struct cfs_rq			*cfs_rq;
535 	/* rq "owned" by this entity/group: */
536 	struct cfs_rq			*my_q;
537 	/* cached value of my_q->h_nr_running */
538 	unsigned long			runnable_weight;
539 #endif
540 
541 #ifdef CONFIG_SCHED_LATENCY_NICE
542 	int				latency_weight;
543 #endif
544 
545 #ifdef CONFIG_SMP
546 	/*
547 	 * Per entity load average tracking.
548 	 *
549 	 * Put into separate cache line so it does not
550 	 * collide with read-mostly values above.
551 	 */
552 	struct sched_avg		avg;
553 #endif
554 };
555 
556 #ifdef CONFIG_SCHED_WALT
557 extern void sched_exit(struct task_struct *p);
558 extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
559 extern u32 sched_get_init_task_load(struct task_struct *p);
560 extern void free_task_load_ptrs(struct task_struct *p);
561 #define RAVG_HIST_SIZE_MAX  5
562 struct ravg {
563 	/*
564 	 * 'mark_start' marks the beginning of an event (task waking up, task
565 	 * starting to execute, task being preempted) within a window
566 	 *
567 	 * 'sum' represents how runnable a task has been within current
568 	 * window. It incorporates both running time and wait time and is
569 	 * frequency scaled.
570 	 *
571 	 * 'sum_history' keeps track of history of 'sum' seen over previous
572 	 * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
573 	 * ignored.
574 	 *
575 	 * 'demand' represents maximum sum seen over previous
576 	 * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
577 	 * demand for tasks.
578 	 *
579 	 * 'curr_window_cpu' represents task's contribution to cpu busy time on
580 	 * various CPUs in the current window
581 	 *
582 	 * 'prev_window_cpu' represents task's contribution to cpu busy time on
583 	 * various CPUs in the previous window
584 	 *
585 	 * 'curr_window' represents the sum of all entries in curr_window_cpu
586 	 *
587 	 * 'prev_window' represents the sum of all entries in prev_window_cpu
588 	 *
589 	 */
590 	u64 mark_start;
591 	u32 sum, demand;
592 	u32 sum_history[RAVG_HIST_SIZE_MAX];
593 	u32 *curr_window_cpu, *prev_window_cpu;
594 	u32 curr_window, prev_window;
595 	u16 active_windows;
596 	u16 demand_scaled;
597 };
598 #else
sched_exit(struct task_struct * p)599 static inline void sched_exit(struct task_struct *p) { }
free_task_load_ptrs(struct task_struct * p)600 static inline void free_task_load_ptrs(struct task_struct *p) { }
601 #endif /* CONFIG_SCHED_WALT */
602 
603 struct sched_rt_entity {
604 	struct list_head		run_list;
605 	unsigned long			timeout;
606 	unsigned long			watchdog_stamp;
607 	unsigned int			time_slice;
608 	unsigned short			on_rq;
609 	unsigned short			on_list;
610 
611 	struct sched_rt_entity		*back;
612 #ifdef CONFIG_RT_GROUP_SCHED
613 	struct sched_rt_entity		*parent;
614 	/* rq on which this entity is (to be) queued: */
615 	struct rt_rq			*rt_rq;
616 	/* rq "owned" by this entity/group: */
617 	struct rt_rq			*my_q;
618 #endif
619 } __randomize_layout;
620 
621 struct sched_dl_entity {
622 	struct rb_node			rb_node;
623 
624 	/*
625 	 * Original scheduling parameters. Copied here from sched_attr
626 	 * during sched_setattr(), they will remain the same until
627 	 * the next sched_setattr().
628 	 */
629 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
630 	u64				dl_deadline;	/* Relative deadline of each instance	*/
631 	u64				dl_period;	/* Separation of two instances (period) */
632 	u64				dl_bw;		/* dl_runtime / dl_period		*/
633 	u64				dl_density;	/* dl_runtime / dl_deadline		*/
634 
635 	/*
636 	 * Actual scheduling parameters. Initialized with the values above,
637 	 * they are continuously updated during task execution. Note that
638 	 * the remaining runtime could be < 0 in case we are in overrun.
639 	 */
640 	s64				runtime;	/* Remaining runtime for this instance	*/
641 	u64				deadline;	/* Absolute deadline for this instance	*/
642 	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
643 
644 	/*
645 	 * Some bool flags:
646 	 *
647 	 * @dl_throttled tells if we exhausted the runtime. If so, the
648 	 * task has to wait for a replenishment to be performed at the
649 	 * next firing of dl_timer.
650 	 *
651 	 * @dl_yielded tells if task gave up the CPU before consuming
652 	 * all its available runtime during the last job.
653 	 *
654 	 * @dl_non_contending tells if the task is inactive while still
655 	 * contributing to the active utilization. In other words, it
656 	 * indicates if the inactive timer has been armed and its handler
657 	 * has not been executed yet. This flag is useful to avoid race
658 	 * conditions between the inactive timer handler and the wakeup
659 	 * code.
660 	 *
661 	 * @dl_overrun tells if the task asked to be informed about runtime
662 	 * overruns.
663 	 */
664 	unsigned int			dl_throttled      : 1;
665 	unsigned int			dl_yielded        : 1;
666 	unsigned int			dl_non_contending : 1;
667 	unsigned int			dl_overrun	  : 1;
668 
669 	/*
670 	 * Bandwidth enforcement timer. Each -deadline task has its
671 	 * own bandwidth to be enforced, thus we need one timer per task.
672 	 */
673 	struct hrtimer			dl_timer;
674 
675 	/*
676 	 * Inactive timer, responsible for decreasing the active utilization
677 	 * at the "0-lag time". When a -deadline task blocks, it contributes
678 	 * to GRUB's active utilization until the "0-lag time", hence a
679 	 * timer is needed to decrease the active utilization at the correct
680 	 * time.
681 	 */
682 	struct hrtimer inactive_timer;
683 
684 #ifdef CONFIG_RT_MUTEXES
685 	/*
686 	 * Priority Inheritance. When a DEADLINE scheduling entity is boosted
687 	 * pi_se points to the donor, otherwise points to the dl_se it belongs
688 	 * to (the original one/itself).
689 	 */
690 	struct sched_dl_entity *pi_se;
691 #endif
692 };
693 
694 #ifdef CONFIG_UCLAMP_TASK
695 /* Number of utilization clamp buckets (shorter alias) */
696 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
697 
698 /*
699  * Utilization clamp for a scheduling entity
700  * @value:		clamp value "assigned" to a se
701  * @bucket_id:		bucket index corresponding to the "assigned" value
702  * @active:		the se is currently refcounted in a rq's bucket
703  * @user_defined:	the requested clamp value comes from user-space
704  *
705  * The bucket_id is the index of the clamp bucket matching the clamp value
706  * which is pre-computed and stored to avoid expensive integer divisions from
707  * the fast path.
708  *
709  * The active bit is set whenever a task has got an "effective" value assigned,
710  * which can be different from the clamp value "requested" from user-space.
711  * This allows to know a task is refcounted in the rq's bucket corresponding
712  * to the "effective" bucket_id.
713  *
714  * The user_defined bit is set whenever a task has got a task-specific clamp
715  * value requested from userspace, i.e. the system defaults apply to this task
716  * just as a restriction. This allows to relax default clamps when a less
717  * restrictive task-specific value has been requested, thus allowing to
718  * implement a "nice" semantic. For example, a task running with a 20%
719  * default boost can still drop its own boosting to 0%.
720  */
721 struct uclamp_se {
722 	unsigned int value		: bits_per(SCHED_CAPACITY_SCALE);
723 	unsigned int bucket_id		: bits_per(UCLAMP_BUCKETS);
724 	unsigned int active		: 1;
725 	unsigned int user_defined	: 1;
726 };
727 #endif /* CONFIG_UCLAMP_TASK */
728 
729 union rcu_special {
730 	struct {
731 		u8			blocked;
732 		u8			need_qs;
733 		u8			exp_hint; /* Hint for performance. */
734 		u8			need_mb; /* Readers need smp_mb(). */
735 	} b; /* Bits. */
736 	u32 s; /* Set of bits. */
737 };
738 
739 enum perf_event_task_context {
740 	perf_invalid_context = -1,
741 	perf_hw_context = 0,
742 	perf_sw_context,
743 	perf_nr_task_contexts,
744 };
745 
746 struct wake_q_node {
747 	struct wake_q_node *next;
748 };
749 
750 #ifdef CONFIG_QOS_CTRL
751 struct qos_task_struct {
752 	/*
753 	 * 'in_qos' marks the qos level o current task, greater value for
754 	 * greater qos, range from (NO_QOS, NR_QOS)
755 	 *
756 	 *
757 	 * 'qos_list' use to track task with qos supply in auth_struct
758 	 */
759 	int                 in_qos;
760 	struct list_head    qos_list;
761 };
762 #endif
763 
764 struct task_struct {
765 #ifdef CONFIG_THREAD_INFO_IN_TASK
766 	/*
767 	 * For reasons of header soup (see current_thread_info()), this
768 	 * must be the first element of task_struct.
769 	 */
770 	struct thread_info		thread_info;
771 #endif
772 	/* -1 unrunnable, 0 runnable, >0 stopped: */
773 	volatile long			state;
774 
775 	/*
776 	 * This begins the randomizable portion of task_struct. Only
777 	 * scheduling-critical items should be added above here.
778 	 */
779 	randomized_struct_fields_start
780 
781 	void				*stack;
782 	refcount_t			usage;
783 	/* Per task flags (PF_*), defined further below: */
784 	unsigned int			flags;
785 	unsigned int			ptrace;
786 
787 #ifdef CONFIG_SMP
788 	int				on_cpu;
789 	struct __call_single_node	wake_entry;
790 #ifdef CONFIG_THREAD_INFO_IN_TASK
791 	/* Current CPU: */
792 	unsigned int			cpu;
793 #endif
794 	unsigned int			wakee_flips;
795 	unsigned long			wakee_flip_decay_ts;
796 	struct task_struct		*last_wakee;
797 
798 	/*
799 	 * recent_used_cpu is initially set as the last CPU used by a task
800 	 * that wakes affine another task. Waker/wakee relationships can
801 	 * push tasks around a CPU where each wakeup moves to the next one.
802 	 * Tracking a recently used CPU allows a quick search for a recently
803 	 * used CPU that may be idle.
804 	 */
805 	int				recent_used_cpu;
806 	int				wake_cpu;
807 #endif
808 	int				on_rq;
809 
810 	int				prio;
811 	int				static_prio;
812 	int				normal_prio;
813 	unsigned int			rt_priority;
814 #ifdef CONFIG_SCHED_LATENCY_NICE
815 	int				latency_prio;
816 #endif
817 
818 	const struct sched_class	*sched_class;
819 	struct sched_entity		se;
820 	struct sched_rt_entity		rt;
821 #ifdef CONFIG_SCHED_WALT
822 	struct ravg ravg;
823 	/*
824 	 * 'init_load_pct' represents the initial task load assigned to children
825 	 * of this task
826 	 */
827 	u32 init_load_pct;
828 	u64 last_sleep_ts;
829 #endif
830 
831 #ifdef CONFIG_SCHED_RTG
832 	int rtg_depth;
833 	struct related_thread_group	*grp;
834 	struct list_head		grp_list;
835 #endif
836 
837 #ifdef CONFIG_CGROUP_SCHED
838 	struct task_group		*sched_task_group;
839 #endif
840 	struct sched_dl_entity		dl;
841 
842 #ifdef CONFIG_UCLAMP_TASK
843 	/*
844 	 * Clamp values requested for a scheduling entity.
845 	 * Must be updated with task_rq_lock() held.
846 	 */
847 	struct uclamp_se		uclamp_req[UCLAMP_CNT];
848 	/*
849 	 * Effective clamp values used for a scheduling entity.
850 	 * Must be updated with task_rq_lock() held.
851 	 */
852 	struct uclamp_se		uclamp[UCLAMP_CNT];
853 #endif
854 
855 #ifdef CONFIG_PREEMPT_NOTIFIERS
856 	/* List of struct preempt_notifier: */
857 	struct hlist_head		preempt_notifiers;
858 #endif
859 
860 #ifdef CONFIG_BLK_DEV_IO_TRACE
861 	unsigned int			btrace_seq;
862 #endif
863 
864 	unsigned int			policy;
865 	int				nr_cpus_allowed;
866 	const cpumask_t			*cpus_ptr;
867 	cpumask_t			cpus_mask;
868 
869 #ifdef CONFIG_PREEMPT_RCU
870 	int				rcu_read_lock_nesting;
871 	union rcu_special		rcu_read_unlock_special;
872 	struct list_head		rcu_node_entry;
873 	struct rcu_node			*rcu_blocked_node;
874 #endif /* #ifdef CONFIG_PREEMPT_RCU */
875 
876 #ifdef CONFIG_TASKS_RCU
877 	unsigned long			rcu_tasks_nvcsw;
878 	u8				rcu_tasks_holdout;
879 	u8				rcu_tasks_idx;
880 	int				rcu_tasks_idle_cpu;
881 	struct list_head		rcu_tasks_holdout_list;
882 #endif /* #ifdef CONFIG_TASKS_RCU */
883 
884 #ifdef CONFIG_TASKS_TRACE_RCU
885 	int				trc_reader_nesting;
886 	int				trc_ipi_to_cpu;
887 	union rcu_special		trc_reader_special;
888 	bool				trc_reader_checked;
889 	struct list_head		trc_holdout_list;
890 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
891 
892 	struct sched_info		sched_info;
893 
894 	struct list_head		tasks;
895 #ifdef CONFIG_SMP
896 	struct plist_node		pushable_tasks;
897 	struct rb_node			pushable_dl_tasks;
898 #endif
899 
900 	struct mm_struct		*mm;
901 	struct mm_struct		*active_mm;
902 
903 	/* Per-thread vma caching: */
904 	struct vmacache			vmacache;
905 
906 #ifdef SPLIT_RSS_COUNTING
907 	struct task_rss_stat		rss_stat;
908 #endif
909 	int				exit_state;
910 	int				exit_code;
911 	int				exit_signal;
912 	/* The signal sent when the parent dies: */
913 	int				pdeath_signal;
914 	/* JOBCTL_*, siglock protected: */
915 	unsigned long			jobctl;
916 
917 	/* Used for emulating ABI behavior of previous Linux versions: */
918 	unsigned int			personality;
919 
920 	/* Scheduler bits, serialized by scheduler locks: */
921 	unsigned			sched_reset_on_fork:1;
922 	unsigned			sched_contributes_to_load:1;
923 	unsigned			sched_migrated:1;
924 #ifdef CONFIG_PSI
925 	unsigned			sched_psi_wake_requeue:1;
926 #endif
927 
928 	/* Force alignment to the next boundary: */
929 	unsigned			:0;
930 
931 	/* Unserialized, strictly 'current' */
932 
933 	/*
934 	 * This field must not be in the scheduler word above due to wakelist
935 	 * queueing no longer being serialized by p->on_cpu. However:
936 	 *
937 	 * p->XXX = X;			ttwu()
938 	 * schedule()			  if (p->on_rq && ..) // false
939 	 *   smp_mb__after_spinlock();	  if (smp_load_acquire(&p->on_cpu) && //true
940 	 *   deactivate_task()		      ttwu_queue_wakelist())
941 	 *     p->on_rq = 0;			p->sched_remote_wakeup = Y;
942 	 *
943 	 * guarantees all stores of 'current' are visible before
944 	 * ->sched_remote_wakeup gets used, so it can be in this word.
945 	 */
946 	unsigned			sched_remote_wakeup:1;
947 
948 	/* Bit to tell LSMs we're in execve(): */
949 	unsigned			in_execve:1;
950 	unsigned			in_iowait:1;
951 #ifndef TIF_RESTORE_SIGMASK
952 	unsigned			restore_sigmask:1;
953 #endif
954 #ifdef CONFIG_MEMCG
955 	unsigned			in_user_fault:1;
956 #endif
957 #ifdef CONFIG_COMPAT_BRK
958 	unsigned			brk_randomized:1;
959 #endif
960 #ifdef CONFIG_CGROUPS
961 	/* disallow userland-initiated cgroup migration */
962 	unsigned			no_cgroup_migration:1;
963 	/* task is frozen/stopped (used by the cgroup freezer) */
964 	unsigned			frozen:1;
965 #endif
966 #ifdef CONFIG_BLK_CGROUP
967 	unsigned			use_memdelay:1;
968 #endif
969 #ifdef CONFIG_PSI
970 	/* Stalled due to lack of memory */
971 	unsigned			in_memstall:1;
972 #endif
973 
974 	unsigned long			atomic_flags; /* Flags requiring atomic access. */
975 
976 	struct restart_block		restart_block;
977 
978 	pid_t				pid;
979 	pid_t				tgid;
980 
981 #ifdef CONFIG_STACKPROTECTOR
982 	/* Canary value for the -fstack-protector GCC feature: */
983 	unsigned long			stack_canary;
984 #endif
985 	/*
986 	 * Pointers to the (original) parent process, youngest child, younger sibling,
987 	 * older sibling, respectively.  (p->father can be replaced with
988 	 * p->real_parent->pid)
989 	 */
990 
991 	/* Real parent process: */
992 	struct task_struct __rcu	*real_parent;
993 
994 	/* Recipient of SIGCHLD, wait4() reports: */
995 	struct task_struct __rcu	*parent;
996 
997 	/*
998 	 * Children/sibling form the list of natural children:
999 	 */
1000 	struct list_head		children;
1001 	struct list_head		sibling;
1002 	struct task_struct		*group_leader;
1003 
1004 	/*
1005 	 * 'ptraced' is the list of tasks this task is using ptrace() on.
1006 	 *
1007 	 * This includes both natural children and PTRACE_ATTACH targets.
1008 	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1009 	 */
1010 	struct list_head		ptraced;
1011 	struct list_head		ptrace_entry;
1012 
1013 	/* PID/PID hash table linkage. */
1014 	struct pid			*thread_pid;
1015 	struct hlist_node		pid_links[PIDTYPE_MAX];
1016 	struct list_head		thread_group;
1017 	struct list_head		thread_node;
1018 
1019 	struct completion		*vfork_done;
1020 
1021 	/* CLONE_CHILD_SETTID: */
1022 	int __user			*set_child_tid;
1023 
1024 	/* CLONE_CHILD_CLEARTID: */
1025 	int __user			*clear_child_tid;
1026 
1027 	/* PF_IO_WORKER */
1028 	void				*pf_io_worker;
1029 
1030 	u64				utime;
1031 	u64				stime;
1032 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1033 	u64				utimescaled;
1034 	u64				stimescaled;
1035 #endif
1036 	u64				gtime;
1037 	struct prev_cputime		prev_cputime;
1038 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1039 	struct vtime			vtime;
1040 #endif
1041 
1042 #ifdef CONFIG_NO_HZ_FULL
1043 	atomic_t			tick_dep_mask;
1044 #endif
1045 	/* Context switch counts: */
1046 	unsigned long			nvcsw;
1047 	unsigned long			nivcsw;
1048 
1049 	/* Monotonic time in nsecs: */
1050 	u64				start_time;
1051 
1052 	/* Boot based time in nsecs: */
1053 	u64				start_boottime;
1054 
1055 	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1056 	unsigned long			min_flt;
1057 	unsigned long			maj_flt;
1058 
1059 	/* Empty if CONFIG_POSIX_CPUTIMERS=n */
1060 	struct posix_cputimers		posix_cputimers;
1061 
1062 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
1063 	struct posix_cputimers_work	posix_cputimers_work;
1064 #endif
1065 
1066 	/* Process credentials: */
1067 
1068 	/* Tracer's credentials at attach: */
1069 	const struct cred __rcu		*ptracer_cred;
1070 
1071 	/* Objective and real subjective task credentials (COW): */
1072 	const struct cred __rcu		*real_cred;
1073 
1074 	/* Effective (overridable) subjective task credentials (COW): */
1075 	const struct cred __rcu		*cred;
1076 
1077 #ifdef CONFIG_KEYS
1078 	/* Cached requested key. */
1079 	struct key			*cached_requested_key;
1080 #endif
1081 
1082 	/*
1083 	 * executable name, excluding path.
1084 	 *
1085 	 * - normally initialized setup_new_exec()
1086 	 * - access it with [gs]et_task_comm()
1087 	 * - lock it with task_lock()
1088 	 */
1089 	char				comm[TASK_COMM_LEN];
1090 
1091 	struct nameidata		*nameidata;
1092 
1093 #ifdef CONFIG_SYSVIPC
1094 	struct sysv_sem			sysvsem;
1095 	struct sysv_shm			sysvshm;
1096 #endif
1097 #ifdef CONFIG_DETECT_HUNG_TASK
1098 	unsigned long			last_switch_count;
1099 	unsigned long			last_switch_time;
1100 #endif
1101 	/* Filesystem information: */
1102 	struct fs_struct		*fs;
1103 
1104 	/* Open file information: */
1105 	struct files_struct		*files;
1106 
1107 #ifdef CONFIG_IO_URING
1108 	struct io_uring_task		*io_uring;
1109 #endif
1110 
1111 	/* Namespaces: */
1112 	struct nsproxy			*nsproxy;
1113 
1114 	/* Signal handlers: */
1115 	struct signal_struct		*signal;
1116 	struct sighand_struct __rcu		*sighand;
1117 	sigset_t			blocked;
1118 	sigset_t			real_blocked;
1119 	/* Restored if set_restore_sigmask() was used: */
1120 	sigset_t			saved_sigmask;
1121 	struct sigpending		pending;
1122 	unsigned long			sas_ss_sp;
1123 	size_t				sas_ss_size;
1124 	unsigned int			sas_ss_flags;
1125 
1126 	struct callback_head		*task_works;
1127 
1128 #ifdef CONFIG_AUDIT
1129 #ifdef CONFIG_AUDITSYSCALL
1130 	struct audit_context		*audit_context;
1131 #endif
1132 	kuid_t				loginuid;
1133 	unsigned int			sessionid;
1134 #endif
1135 	struct seccomp			seccomp;
1136 
1137 	/* Thread group tracking: */
1138 	u64				parent_exec_id;
1139 	u64				self_exec_id;
1140 
1141 	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1142 	spinlock_t			alloc_lock;
1143 
1144 	/* Protection of the PI data structures: */
1145 	raw_spinlock_t			pi_lock;
1146 
1147 	struct wake_q_node		wake_q;
1148 
1149 #ifdef CONFIG_RT_MUTEXES
1150 	/* PI waiters blocked on a rt_mutex held by this task: */
1151 	struct rb_root_cached		pi_waiters;
1152 	/* Updated under owner's pi_lock and rq lock */
1153 	struct task_struct		*pi_top_task;
1154 	/* Deadlock detection and priority inheritance handling: */
1155 	struct rt_mutex_waiter		*pi_blocked_on;
1156 #endif
1157 
1158 #ifdef CONFIG_DEBUG_MUTEXES
1159 	/* Mutex deadlock detection: */
1160 	struct mutex_waiter		*blocked_on;
1161 #endif
1162 
1163 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1164 	int				non_block_count;
1165 #endif
1166 
1167 #ifdef CONFIG_TRACE_IRQFLAGS
1168 	struct irqtrace_events		irqtrace;
1169 	unsigned int			hardirq_threaded;
1170 	u64				hardirq_chain_key;
1171 	int				softirqs_enabled;
1172 	int				softirq_context;
1173 	int				irq_config;
1174 #endif
1175 
1176 #ifdef CONFIG_LOCKDEP
1177 # define MAX_LOCK_DEPTH			48UL
1178 	u64				curr_chain_key;
1179 	int				lockdep_depth;
1180 	unsigned int			lockdep_recursion;
1181 	struct held_lock		held_locks[MAX_LOCK_DEPTH];
1182 #endif
1183 
1184 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1185 	unsigned int			in_ubsan;
1186 #endif
1187 
1188 	/* Journalling filesystem info: */
1189 	void				*journal_info;
1190 
1191 	/* Stacked block device info: */
1192 	struct bio_list			*bio_list;
1193 
1194 #ifdef CONFIG_BLOCK
1195 	/* Stack plugging: */
1196 	struct blk_plug			*plug;
1197 #endif
1198 
1199 	/* VM state: */
1200 	struct reclaim_state		*reclaim_state;
1201 
1202 	struct backing_dev_info		*backing_dev_info;
1203 
1204 	struct io_context		*io_context;
1205 
1206 #ifdef CONFIG_COMPACTION
1207 	struct capture_control		*capture_control;
1208 #endif
1209 	/* Ptrace state: */
1210 	unsigned long			ptrace_message;
1211 	kernel_siginfo_t		*last_siginfo;
1212 
1213 	struct task_io_accounting	ioac;
1214 #ifdef CONFIG_PSI
1215 	/* Pressure stall state */
1216 	unsigned int			psi_flags;
1217 #endif
1218 #ifdef CONFIG_TASK_XACCT
1219 	/* Accumulated RSS usage: */
1220 	u64				acct_rss_mem1;
1221 	/* Accumulated virtual memory usage: */
1222 	u64				acct_vm_mem1;
1223 	/* stime + utime since last update: */
1224 	u64				acct_timexpd;
1225 #endif
1226 #ifdef CONFIG_CPUSETS
1227 	/* Protected by ->alloc_lock: */
1228 	nodemask_t			mems_allowed;
1229 	/* Seqence number to catch updates: */
1230 	seqcount_spinlock_t		mems_allowed_seq;
1231 	int				cpuset_mem_spread_rotor;
1232 	int				cpuset_slab_spread_rotor;
1233 #endif
1234 #ifdef CONFIG_CGROUPS
1235 	/* Control Group info protected by css_set_lock: */
1236 	struct css_set __rcu		*cgroups;
1237 	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
1238 	struct list_head		cg_list;
1239 #endif
1240 #ifdef CONFIG_X86_CPU_RESCTRL
1241 	u32				closid;
1242 	u32				rmid;
1243 #endif
1244 #ifdef CONFIG_FUTEX
1245 	struct robust_list_head __user	*robust_list;
1246 #ifdef CONFIG_COMPAT
1247 	struct compat_robust_list_head __user *compat_robust_list;
1248 #endif
1249 	struct list_head		pi_state_list;
1250 	struct futex_pi_state		*pi_state_cache;
1251 	struct mutex			futex_exit_mutex;
1252 	unsigned int			futex_state;
1253 #endif
1254 #ifdef CONFIG_PERF_EVENTS
1255 	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
1256 	struct mutex			perf_event_mutex;
1257 	struct list_head		perf_event_list;
1258 #endif
1259 #ifdef CONFIG_DEBUG_PREEMPT
1260 	unsigned long			preempt_disable_ip;
1261 #endif
1262 #ifdef CONFIG_NUMA
1263 	/* Protected by alloc_lock: */
1264 	struct mempolicy		*mempolicy;
1265 	short				il_prev;
1266 	short				pref_node_fork;
1267 #endif
1268 #ifdef CONFIG_NUMA_BALANCING
1269 	int				numa_scan_seq;
1270 	unsigned int			numa_scan_period;
1271 	unsigned int			numa_scan_period_max;
1272 	int				numa_preferred_nid;
1273 	unsigned long			numa_migrate_retry;
1274 	/* Migration stamp: */
1275 	u64				node_stamp;
1276 	u64				last_task_numa_placement;
1277 	u64				last_sum_exec_runtime;
1278 	struct callback_head		numa_work;
1279 
1280 	/*
1281 	 * This pointer is only modified for current in syscall and
1282 	 * pagefault context (and for tasks being destroyed), so it can be read
1283 	 * from any of the following contexts:
1284 	 *  - RCU read-side critical section
1285 	 *  - current->numa_group from everywhere
1286 	 *  - task's runqueue locked, task not running
1287 	 */
1288 	struct numa_group __rcu		*numa_group;
1289 
1290 	/*
1291 	 * numa_faults is an array split into four regions:
1292 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1293 	 * in this precise order.
1294 	 *
1295 	 * faults_memory: Exponential decaying average of faults on a per-node
1296 	 * basis. Scheduling placement decisions are made based on these
1297 	 * counts. The values remain static for the duration of a PTE scan.
1298 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1299 	 * hinting fault was incurred.
1300 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1301 	 * during the current scan window. When the scan completes, the counts
1302 	 * in faults_memory and faults_cpu decay and these values are copied.
1303 	 */
1304 	unsigned long			*numa_faults;
1305 	unsigned long			total_numa_faults;
1306 
1307 	/*
1308 	 * numa_faults_locality tracks if faults recorded during the last
1309 	 * scan window were remote/local or failed to migrate. The task scan
1310 	 * period is adapted based on the locality of the faults with different
1311 	 * weights depending on whether they were shared or private faults
1312 	 */
1313 	unsigned long			numa_faults_locality[3];
1314 
1315 	unsigned long			numa_pages_migrated;
1316 #endif /* CONFIG_NUMA_BALANCING */
1317 
1318 #ifdef CONFIG_RSEQ
1319 	struct rseq __user *rseq;
1320 	u32 rseq_sig;
1321 	/*
1322 	 * RmW on rseq_event_mask must be performed atomically
1323 	 * with respect to preemption.
1324 	 */
1325 	unsigned long rseq_event_mask;
1326 #endif
1327 
1328 	struct tlbflush_unmap_batch	tlb_ubc;
1329 
1330 	union {
1331 		refcount_t		rcu_users;
1332 		struct rcu_head		rcu;
1333 	};
1334 
1335 	/* Cache last used pipe for splice(): */
1336 	struct pipe_inode_info		*splice_pipe;
1337 
1338 	struct page_frag		task_frag;
1339 
1340 #ifdef CONFIG_TASK_DELAY_ACCT
1341 	struct task_delay_info		*delays;
1342 #endif
1343 
1344 #ifdef CONFIG_RECLAIM_ACCT
1345 	struct reclaim_acct		*reclaim_acct;
1346 #endif
1347 
1348 #ifdef CONFIG_FAULT_INJECTION
1349 	int				make_it_fail;
1350 	unsigned int			fail_nth;
1351 #endif
1352 	/*
1353 	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1354 	 * balance_dirty_pages() for a dirty throttling pause:
1355 	 */
1356 	int				nr_dirtied;
1357 	int				nr_dirtied_pause;
1358 	/* Start of a write-and-pause period: */
1359 	unsigned long			dirty_paused_when;
1360 
1361 #ifdef CONFIG_LATENCYTOP
1362 	int				latency_record_count;
1363 	struct latency_record		latency_record[LT_SAVECOUNT];
1364 #endif
1365 	/*
1366 	 * Time slack values; these are used to round up poll() and
1367 	 * select() etc timeout values. These are in nanoseconds.
1368 	 */
1369 	u64				timer_slack_ns;
1370 	u64				default_timer_slack_ns;
1371 
1372 #ifdef CONFIG_KASAN
1373 	unsigned int			kasan_depth;
1374 #endif
1375 
1376 #ifdef CONFIG_KCSAN
1377 	struct kcsan_ctx		kcsan_ctx;
1378 #ifdef CONFIG_TRACE_IRQFLAGS
1379 	struct irqtrace_events		kcsan_save_irqtrace;
1380 #endif
1381 #endif
1382 
1383 #if IS_ENABLED(CONFIG_KUNIT)
1384 	struct kunit			*kunit_test;
1385 #endif
1386 
1387 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1388 	/* Index of current stored address in ret_stack: */
1389 	int				curr_ret_stack;
1390 	int				curr_ret_depth;
1391 
1392 	/* Stack of return addresses for return function tracing: */
1393 	struct ftrace_ret_stack		*ret_stack;
1394 
1395 	/* Timestamp for last schedule: */
1396 	unsigned long long		ftrace_timestamp;
1397 
1398 	/*
1399 	 * Number of functions that haven't been traced
1400 	 * because of depth overrun:
1401 	 */
1402 	atomic_t			trace_overrun;
1403 
1404 	/* Pause tracing: */
1405 	atomic_t			tracing_graph_pause;
1406 #endif
1407 
1408 #ifdef CONFIG_TRACING
1409 	/* State flags for use by tracers: */
1410 	unsigned long			trace;
1411 
1412 	/* Bitmask and counter of trace recursion: */
1413 	unsigned long			trace_recursion;
1414 #endif /* CONFIG_TRACING */
1415 
1416 #ifdef CONFIG_KCOV
1417 	/* See kernel/kcov.c for more details. */
1418 
1419 	/* Coverage collection mode enabled for this task (0 if disabled): */
1420 	unsigned int			kcov_mode;
1421 
1422 	/* Size of the kcov_area: */
1423 	unsigned int			kcov_size;
1424 
1425 	/* Buffer for coverage collection: */
1426 	void				*kcov_area;
1427 
1428 	/* KCOV descriptor wired with this task or NULL: */
1429 	struct kcov			*kcov;
1430 
1431 	/* KCOV common handle for remote coverage collection: */
1432 	u64				kcov_handle;
1433 
1434 	/* KCOV sequence number: */
1435 	int				kcov_sequence;
1436 
1437 	/* Collect coverage from softirq context: */
1438 	unsigned int			kcov_softirq;
1439 #endif
1440 
1441 #ifdef CONFIG_MEMCG
1442 	struct mem_cgroup		*memcg_in_oom;
1443 	gfp_t				memcg_oom_gfp_mask;
1444 	int				memcg_oom_order;
1445 
1446 	/* Number of pages to reclaim on returning to userland: */
1447 	unsigned int			memcg_nr_pages_over_high;
1448 
1449 	/* Used by memcontrol for targeted memcg charge: */
1450 	struct mem_cgroup		*active_memcg;
1451 #endif
1452 
1453 #ifdef CONFIG_BLK_CGROUP
1454 	struct request_queue		*throttle_queue;
1455 #endif
1456 
1457 #ifdef CONFIG_UPROBES
1458 	struct uprobe_task		*utask;
1459 #endif
1460 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1461 	unsigned int			sequential_io;
1462 	unsigned int			sequential_io_avg;
1463 #endif
1464 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1465 	unsigned long			task_state_change;
1466 #endif
1467 	int				pagefault_disabled;
1468 #ifdef CONFIG_MMU
1469 	struct task_struct		*oom_reaper_list;
1470 	struct timer_list		oom_reaper_timer;
1471 #endif
1472 #ifdef CONFIG_VMAP_STACK
1473 	struct vm_struct		*stack_vm_area;
1474 #endif
1475 #ifdef CONFIG_THREAD_INFO_IN_TASK
1476 	/* A live task holds one reference: */
1477 	refcount_t			stack_refcount;
1478 #endif
1479 #ifdef CONFIG_LIVEPATCH
1480 	int patch_state;
1481 #endif
1482 #ifdef CONFIG_SECURITY
1483 	/* Used by LSM modules for access restriction: */
1484 	void				*security;
1485 #endif
1486 #ifdef CONFIG_BPF_SYSCALL
1487 	/* Used for BPF run context */
1488 	struct bpf_run_ctx		*bpf_ctx;
1489 #endif
1490 
1491 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1492 	unsigned long			lowest_stack;
1493 	unsigned long			prev_lowest_stack;
1494 #endif
1495 
1496 #ifdef CONFIG_X86_MCE
1497 	void __user			*mce_vaddr;
1498 	__u64				mce_kflags;
1499 	u64				mce_addr;
1500 	__u64				mce_ripv : 1,
1501 					mce_whole_page : 1,
1502 					__mce_reserved : 62;
1503 	struct callback_head		mce_kill_me;
1504 	int				mce_count;
1505 #endif
1506 
1507 #ifdef CONFIG_ACCESS_TOKENID
1508 	u64				token;
1509 	u64				ftoken;
1510 #endif
1511 #ifdef CONFIG_QOS_CTRL
1512 	struct qos_task_struct qts;
1513 #endif
1514 	/*
1515 	 * New fields for task_struct should be added above here, so that
1516 	 * they are included in the randomized portion of task_struct.
1517 	 */
1518 	randomized_struct_fields_end
1519 
1520 	/* CPU-specific state of this task: */
1521 	struct thread_struct		thread;
1522 
1523 	/*
1524 	 * WARNING: on x86, 'thread_struct' contains a variable-sized
1525 	 * structure.  It *MUST* be at the end of 'task_struct'.
1526 	 *
1527 	 * Do not put anything below here!
1528 	 */
1529 };
1530 
task_pid(struct task_struct * task)1531 static inline struct pid *task_pid(struct task_struct *task)
1532 {
1533 	return task->thread_pid;
1534 }
1535 
1536 /*
1537  * the helpers to get the task's different pids as they are seen
1538  * from various namespaces
1539  *
1540  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1541  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1542  *                     current.
1543  * task_xid_nr_ns()  : id seen from the ns specified;
1544  *
1545  * see also pid_nr() etc in include/linux/pid.h
1546  */
1547 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1548 
task_pid_nr(struct task_struct * tsk)1549 static inline pid_t task_pid_nr(struct task_struct *tsk)
1550 {
1551 	return tsk->pid;
1552 }
1553 
task_pid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1554 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1555 {
1556 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1557 }
1558 
task_pid_vnr(struct task_struct * tsk)1559 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1560 {
1561 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1562 }
1563 
1564 
task_tgid_nr(struct task_struct * tsk)1565 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1566 {
1567 	return tsk->tgid;
1568 }
1569 
1570 /**
1571  * pid_alive - check that a task structure is not stale
1572  * @p: Task structure to be checked.
1573  *
1574  * Test if a process is not yet dead (at most zombie state)
1575  * If pid_alive fails, then pointers within the task structure
1576  * can be stale and must not be dereferenced.
1577  *
1578  * Return: 1 if the process is alive. 0 otherwise.
1579  */
pid_alive(const struct task_struct * p)1580 static inline int pid_alive(const struct task_struct *p)
1581 {
1582 	return p->thread_pid != NULL;
1583 }
1584 
task_pgrp_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1585 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1586 {
1587 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1588 }
1589 
task_pgrp_vnr(struct task_struct * tsk)1590 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1591 {
1592 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1593 }
1594 
1595 
task_session_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1596 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1597 {
1598 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1599 }
1600 
task_session_vnr(struct task_struct * tsk)1601 static inline pid_t task_session_vnr(struct task_struct *tsk)
1602 {
1603 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1604 }
1605 
task_tgid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1606 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1607 {
1608 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1609 }
1610 
task_tgid_vnr(struct task_struct * tsk)1611 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1612 {
1613 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1614 }
1615 
task_ppid_nr_ns(const struct task_struct * tsk,struct pid_namespace * ns)1616 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1617 {
1618 	pid_t pid = 0;
1619 
1620 	rcu_read_lock();
1621 	if (pid_alive(tsk))
1622 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1623 	rcu_read_unlock();
1624 
1625 	return pid;
1626 }
1627 
task_ppid_nr(const struct task_struct * tsk)1628 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1629 {
1630 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1631 }
1632 
1633 /* Obsolete, do not use: */
task_pgrp_nr(struct task_struct * tsk)1634 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1635 {
1636 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1637 }
1638 
1639 #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
1640 #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
1641 
task_state_index(struct task_struct * tsk)1642 static inline unsigned int task_state_index(struct task_struct *tsk)
1643 {
1644 	unsigned int tsk_state = READ_ONCE(tsk->state);
1645 	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1646 
1647 	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1648 
1649 	if (tsk_state == TASK_IDLE)
1650 		state = TASK_REPORT_IDLE;
1651 
1652 	return fls(state);
1653 }
1654 
task_index_to_char(unsigned int state)1655 static inline char task_index_to_char(unsigned int state)
1656 {
1657 	static const char state_char[] = "RSDTtXZPI";
1658 
1659 	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1660 
1661 	return state_char[state];
1662 }
1663 
task_state_to_char(struct task_struct * tsk)1664 static inline char task_state_to_char(struct task_struct *tsk)
1665 {
1666 	return task_index_to_char(task_state_index(tsk));
1667 }
1668 
1669 /**
1670  * is_global_init - check if a task structure is init. Since init
1671  * is free to have sub-threads we need to check tgid.
1672  * @tsk: Task structure to be checked.
1673  *
1674  * Check if a task structure is the first user space task the kernel created.
1675  *
1676  * Return: 1 if the task structure is init. 0 otherwise.
1677  */
is_global_init(struct task_struct * tsk)1678 static inline int is_global_init(struct task_struct *tsk)
1679 {
1680 	return task_tgid_nr(tsk) == 1;
1681 }
1682 
1683 extern struct pid *cad_pid;
1684 
1685 /*
1686  * Per process flags
1687  */
1688 #define PF_VCPU			0x00000001	/* I'm a virtual CPU */
1689 #define PF_IDLE			0x00000002	/* I am an IDLE thread */
1690 #define PF_EXITING		0x00000004	/* Getting shut down */
1691 #define PF_IO_WORKER		0x00000010	/* Task is an IO worker */
1692 #define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
1693 #define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
1694 #define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
1695 #define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
1696 #define PF_DUMPCORE		0x00000200	/* Dumped core */
1697 #define PF_SIGNALED		0x00000400	/* Killed by a signal */
1698 #define PF_MEMALLOC		0x00000800	/* Allocating memory */
1699 #define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
1700 #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
1701 #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
1702 #define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1703 #define PF_KSWAPD		0x00020000	/* I am kswapd */
1704 #define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
1705 #define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1706 #define PF_LOCAL_THROTTLE	0x00100000	/* Throttle writes only against the bdi I write to,
1707 						 * I am cleaning dirty pages from some other bdi. */
1708 #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
1709 #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1710 #define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
1711 #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_mask */
1712 #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
1713 #define PF_MEMALLOC_NOCMA	0x10000000	/* All allocation request will have _GFP_MOVABLE cleared */
1714 #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
1715 #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
1716 
1717 /*
1718  * Only the _current_ task can read/write to tsk->flags, but other
1719  * tasks can access tsk->flags in readonly mode for example
1720  * with tsk_used_math (like during threaded core dumping).
1721  * There is however an exception to this rule during ptrace
1722  * or during fork: the ptracer task is allowed to write to the
1723  * child->flags of its traced child (same goes for fork, the parent
1724  * can write to the child->flags), because we're guaranteed the
1725  * child is not running and in turn not changing child->flags
1726  * at the same time the parent does it.
1727  */
1728 #define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
1729 #define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
1730 #define clear_used_math()			clear_stopped_child_used_math(current)
1731 #define set_used_math()				set_stopped_child_used_math(current)
1732 
1733 #define conditional_stopped_child_used_math(condition, child) \
1734 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1735 
1736 #define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
1737 
1738 #define copy_to_stopped_child_used_math(child) \
1739 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1740 
1741 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1742 #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
1743 #define used_math()				tsk_used_math(current)
1744 
is_percpu_thread(void)1745 static __always_inline bool is_percpu_thread(void)
1746 {
1747 #ifdef CONFIG_SMP
1748 	return (current->flags & PF_NO_SETAFFINITY) &&
1749 		(current->nr_cpus_allowed  == 1);
1750 #else
1751 	return true;
1752 #endif
1753 }
1754 
1755 /* Per-process atomic flags. */
1756 #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
1757 #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
1758 #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1759 #define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
1760 #define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
1761 #define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */
1762 #define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */
1763 #define PFA_SPEC_SSB_NOEXEC		7	/* Speculative Store Bypass clear on execve() */
1764 
1765 #define TASK_PFA_TEST(name, func)					\
1766 	static inline bool task_##func(struct task_struct *p)		\
1767 	{ return test_bit(PFA_##name, &p->atomic_flags); }
1768 
1769 #define TASK_PFA_SET(name, func)					\
1770 	static inline void task_set_##func(struct task_struct *p)	\
1771 	{ set_bit(PFA_##name, &p->atomic_flags); }
1772 
1773 #define TASK_PFA_CLEAR(name, func)					\
1774 	static inline void task_clear_##func(struct task_struct *p)	\
1775 	{ clear_bit(PFA_##name, &p->atomic_flags); }
1776 
TASK_PFA_TEST(NO_NEW_PRIVS,no_new_privs)1777 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1778 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1779 
1780 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1781 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1782 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1783 
1784 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1785 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1786 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1787 
1788 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1789 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1790 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1791 
1792 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1793 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1794 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1795 
1796 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1797 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1798 
1799 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1800 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1801 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1802 
1803 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1804 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1805 
1806 static inline void
1807 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1808 {
1809 	current->flags &= ~flags;
1810 	current->flags |= orig_flags & flags;
1811 }
1812 
1813 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1814 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus);
1815 #ifdef CONFIG_SMP
1816 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1817 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1818 #else
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)1819 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1820 {
1821 }
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)1822 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1823 {
1824 	if (!cpumask_test_cpu(0, new_mask))
1825 		return -EINVAL;
1826 	return 0;
1827 }
1828 #endif
1829 
1830 extern int yield_to(struct task_struct *p, bool preempt);
1831 extern void set_user_nice(struct task_struct *p, long nice);
1832 extern int task_prio(const struct task_struct *p);
1833 
1834 /**
1835  * task_nice - return the nice value of a given task.
1836  * @p: the task in question.
1837  *
1838  * Return: The nice value [ -20 ... 0 ... 19 ].
1839  */
task_nice(const struct task_struct * p)1840 static inline int task_nice(const struct task_struct *p)
1841 {
1842 	return PRIO_TO_NICE((p)->static_prio);
1843 }
1844 
1845 extern int can_nice(const struct task_struct *p, const int nice);
1846 extern int task_curr(const struct task_struct *p);
1847 extern int idle_cpu(int cpu);
1848 extern int available_idle_cpu(int cpu);
1849 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1850 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1851 extern void sched_set_fifo(struct task_struct *p);
1852 extern void sched_set_fifo_low(struct task_struct *p);
1853 extern void sched_set_normal(struct task_struct *p, int nice);
1854 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1855 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1856 extern struct task_struct *idle_task(int cpu);
1857 
1858 /**
1859  * is_idle_task - is the specified task an idle task?
1860  * @p: the task in question.
1861  *
1862  * Return: 1 if @p is an idle task. 0 otherwise.
1863  */
is_idle_task(const struct task_struct * p)1864 static __always_inline bool is_idle_task(const struct task_struct *p)
1865 {
1866 	return !!(p->flags & PF_IDLE);
1867 }
1868 
1869 extern struct task_struct *curr_task(int cpu);
1870 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1871 
1872 void yield(void);
1873 
1874 union thread_union {
1875 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1876 	struct task_struct task;
1877 #endif
1878 #ifndef CONFIG_THREAD_INFO_IN_TASK
1879 	struct thread_info thread_info;
1880 #endif
1881 	unsigned long stack[THREAD_SIZE/sizeof(long)];
1882 };
1883 
1884 #ifndef CONFIG_THREAD_INFO_IN_TASK
1885 extern struct thread_info init_thread_info;
1886 #endif
1887 
1888 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1889 
1890 #ifdef CONFIG_THREAD_INFO_IN_TASK
task_thread_info(struct task_struct * task)1891 static inline struct thread_info *task_thread_info(struct task_struct *task)
1892 {
1893 	return &task->thread_info;
1894 }
1895 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1896 # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1897 #endif
1898 
1899 /*
1900  * find a task by one of its numerical ids
1901  *
1902  * find_task_by_pid_ns():
1903  *      finds a task by its pid in the specified namespace
1904  * find_task_by_vpid():
1905  *      finds a task by its virtual pid
1906  *
1907  * see also find_vpid() etc in include/linux/pid.h
1908  */
1909 
1910 extern struct task_struct *find_task_by_vpid(pid_t nr);
1911 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1912 
1913 /*
1914  * find a task by its virtual pid and get the task struct
1915  */
1916 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1917 
1918 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1919 extern int wake_up_process(struct task_struct *tsk);
1920 extern void wake_up_new_task(struct task_struct *tsk);
1921 
1922 #ifdef CONFIG_SMP
1923 extern void kick_process(struct task_struct *tsk);
1924 #else
kick_process(struct task_struct * tsk)1925 static inline void kick_process(struct task_struct *tsk) { }
1926 #endif
1927 
1928 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1929 
set_task_comm(struct task_struct * tsk,const char * from)1930 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1931 {
1932 	__set_task_comm(tsk, from, false);
1933 }
1934 
1935 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1936 #define get_task_comm(buf, tsk) ({			\
1937 	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
1938 	__get_task_comm(buf, sizeof(buf), tsk);		\
1939 })
1940 
1941 #ifdef CONFIG_SMP
scheduler_ipi(void)1942 static __always_inline void scheduler_ipi(void)
1943 {
1944 	/*
1945 	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1946 	 * TIF_NEED_RESCHED remotely (for the first time) will also send
1947 	 * this IPI.
1948 	 */
1949 	preempt_fold_need_resched();
1950 }
1951 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1952 #else
scheduler_ipi(void)1953 static inline void scheduler_ipi(void) { }
wait_task_inactive(struct task_struct * p,long match_state)1954 static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1955 {
1956 	return 1;
1957 }
1958 #endif
1959 
1960 /*
1961  * Set thread flags in other task's structures.
1962  * See asm/thread_info.h for TIF_xxxx flags available:
1963  */
set_tsk_thread_flag(struct task_struct * tsk,int flag)1964 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1965 {
1966 	set_ti_thread_flag(task_thread_info(tsk), flag);
1967 }
1968 
clear_tsk_thread_flag(struct task_struct * tsk,int flag)1969 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1970 {
1971 	clear_ti_thread_flag(task_thread_info(tsk), flag);
1972 }
1973 
update_tsk_thread_flag(struct task_struct * tsk,int flag,bool value)1974 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1975 					  bool value)
1976 {
1977 	update_ti_thread_flag(task_thread_info(tsk), flag, value);
1978 }
1979 
test_and_set_tsk_thread_flag(struct task_struct * tsk,int flag)1980 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1981 {
1982 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1983 }
1984 
test_and_clear_tsk_thread_flag(struct task_struct * tsk,int flag)1985 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1986 {
1987 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1988 }
1989 
test_tsk_thread_flag(struct task_struct * tsk,int flag)1990 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1991 {
1992 	return test_ti_thread_flag(task_thread_info(tsk), flag);
1993 }
1994 
set_tsk_need_resched(struct task_struct * tsk)1995 static inline void set_tsk_need_resched(struct task_struct *tsk)
1996 {
1997 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1998 }
1999 
clear_tsk_need_resched(struct task_struct * tsk)2000 static inline void clear_tsk_need_resched(struct task_struct *tsk)
2001 {
2002 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2003 }
2004 
test_tsk_need_resched(struct task_struct * tsk)2005 static inline int test_tsk_need_resched(struct task_struct *tsk)
2006 {
2007 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2008 }
2009 
2010 /*
2011  * cond_resched() and cond_resched_lock(): latency reduction via
2012  * explicit rescheduling in places that are safe. The return
2013  * value indicates whether a reschedule was done in fact.
2014  * cond_resched_lock() will drop the spinlock before scheduling,
2015  */
2016 #ifndef CONFIG_PREEMPTION
2017 extern int _cond_resched(void);
2018 #else
_cond_resched(void)2019 static inline int _cond_resched(void) { return 0; }
2020 #endif
2021 
2022 #define cond_resched() ({			\
2023 	___might_sleep(__FILE__, __LINE__, 0);	\
2024 	_cond_resched();			\
2025 })
2026 
2027 extern int __cond_resched_lock(spinlock_t *lock);
2028 
2029 #define cond_resched_lock(lock) ({				\
2030 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2031 	__cond_resched_lock(lock);				\
2032 })
2033 
cond_resched_rcu(void)2034 static inline void cond_resched_rcu(void)
2035 {
2036 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2037 	rcu_read_unlock();
2038 	cond_resched();
2039 	rcu_read_lock();
2040 #endif
2041 }
2042 
2043 /*
2044  * Does a critical section need to be broken due to another
2045  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
2046  * but a general need for low latency)
2047  */
spin_needbreak(spinlock_t * lock)2048 static inline int spin_needbreak(spinlock_t *lock)
2049 {
2050 #ifdef CONFIG_PREEMPTION
2051 	return spin_is_contended(lock);
2052 #else
2053 	return 0;
2054 #endif
2055 }
2056 
need_resched(void)2057 static __always_inline bool need_resched(void)
2058 {
2059 	return unlikely(tif_need_resched());
2060 }
2061 
2062 /*
2063  * Wrappers for p->thread_info->cpu access. No-op on UP.
2064  */
2065 #ifdef CONFIG_SMP
2066 
task_cpu(const struct task_struct * p)2067 static inline unsigned int task_cpu(const struct task_struct *p)
2068 {
2069 #ifdef CONFIG_THREAD_INFO_IN_TASK
2070 	return READ_ONCE(p->cpu);
2071 #else
2072 	return READ_ONCE(task_thread_info(p)->cpu);
2073 #endif
2074 }
2075 
2076 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2077 
2078 #else
2079 
task_cpu(const struct task_struct * p)2080 static inline unsigned int task_cpu(const struct task_struct *p)
2081 {
2082 	return 0;
2083 }
2084 
set_task_cpu(struct task_struct * p,unsigned int cpu)2085 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2086 {
2087 }
2088 
2089 #endif /* CONFIG_SMP */
2090 
2091 /*
2092  * In order to reduce various lock holder preemption latencies provide an
2093  * interface to see if a vCPU is currently running or not.
2094  *
2095  * This allows us to terminate optimistic spin loops and block, analogous to
2096  * the native optimistic spin heuristic of testing if the lock owner task is
2097  * running or not.
2098  */
2099 #ifndef vcpu_is_preempted
vcpu_is_preempted(int cpu)2100 static inline bool vcpu_is_preempted(int cpu)
2101 {
2102 	return false;
2103 }
2104 #endif
2105 
2106 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2107 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2108 
2109 #ifndef TASK_SIZE_OF
2110 #define TASK_SIZE_OF(tsk)	TASK_SIZE
2111 #endif
2112 
2113 #ifdef CONFIG_RSEQ
2114 
2115 /*
2116  * Map the event mask on the user-space ABI enum rseq_cs_flags
2117  * for direct mask checks.
2118  */
2119 enum rseq_event_mask_bits {
2120 	RSEQ_EVENT_PREEMPT_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
2121 	RSEQ_EVENT_SIGNAL_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
2122 	RSEQ_EVENT_MIGRATE_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
2123 };
2124 
2125 enum rseq_event_mask {
2126 	RSEQ_EVENT_PREEMPT	= (1U << RSEQ_EVENT_PREEMPT_BIT),
2127 	RSEQ_EVENT_SIGNAL	= (1U << RSEQ_EVENT_SIGNAL_BIT),
2128 	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
2129 };
2130 
rseq_set_notify_resume(struct task_struct * t)2131 static inline void rseq_set_notify_resume(struct task_struct *t)
2132 {
2133 	if (t->rseq)
2134 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
2135 }
2136 
2137 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
2138 
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)2139 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2140 					     struct pt_regs *regs)
2141 {
2142 	if (current->rseq)
2143 		__rseq_handle_notify_resume(ksig, regs);
2144 }
2145 
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)2146 static inline void rseq_signal_deliver(struct ksignal *ksig,
2147 				       struct pt_regs *regs)
2148 {
2149 	preempt_disable();
2150 	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
2151 	preempt_enable();
2152 	rseq_handle_notify_resume(ksig, regs);
2153 }
2154 
2155 /* rseq_preempt() requires preemption to be disabled. */
rseq_preempt(struct task_struct * t)2156 static inline void rseq_preempt(struct task_struct *t)
2157 {
2158 	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
2159 	rseq_set_notify_resume(t);
2160 }
2161 
2162 /* rseq_migrate() requires preemption to be disabled. */
rseq_migrate(struct task_struct * t)2163 static inline void rseq_migrate(struct task_struct *t)
2164 {
2165 	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2166 	rseq_set_notify_resume(t);
2167 }
2168 
2169 /*
2170  * If parent process has a registered restartable sequences area, the
2171  * child inherits. Unregister rseq for a clone with CLONE_VM set.
2172  */
rseq_fork(struct task_struct * t,unsigned long clone_flags)2173 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2174 {
2175 	if (clone_flags & CLONE_VM) {
2176 		t->rseq = NULL;
2177 		t->rseq_sig = 0;
2178 		t->rseq_event_mask = 0;
2179 	} else {
2180 		t->rseq = current->rseq;
2181 		t->rseq_sig = current->rseq_sig;
2182 		t->rseq_event_mask = current->rseq_event_mask;
2183 	}
2184 }
2185 
rseq_execve(struct task_struct * t)2186 static inline void rseq_execve(struct task_struct *t)
2187 {
2188 	t->rseq = NULL;
2189 	t->rseq_sig = 0;
2190 	t->rseq_event_mask = 0;
2191 }
2192 
2193 #else
2194 
rseq_set_notify_resume(struct task_struct * t)2195 static inline void rseq_set_notify_resume(struct task_struct *t)
2196 {
2197 }
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)2198 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2199 					     struct pt_regs *regs)
2200 {
2201 }
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)2202 static inline void rseq_signal_deliver(struct ksignal *ksig,
2203 				       struct pt_regs *regs)
2204 {
2205 }
rseq_preempt(struct task_struct * t)2206 static inline void rseq_preempt(struct task_struct *t)
2207 {
2208 }
rseq_migrate(struct task_struct * t)2209 static inline void rseq_migrate(struct task_struct *t)
2210 {
2211 }
rseq_fork(struct task_struct * t,unsigned long clone_flags)2212 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2213 {
2214 }
rseq_execve(struct task_struct * t)2215 static inline void rseq_execve(struct task_struct *t)
2216 {
2217 }
2218 
2219 #endif
2220 
2221 #ifdef CONFIG_DEBUG_RSEQ
2222 
2223 void rseq_syscall(struct pt_regs *regs);
2224 
2225 #else
2226 
rseq_syscall(struct pt_regs * regs)2227 static inline void rseq_syscall(struct pt_regs *regs)
2228 {
2229 }
2230 
2231 #endif
2232 
2233 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2234 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2235 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2236 
2237 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2238 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2239 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2240 
2241 int sched_trace_rq_cpu(struct rq *rq);
2242 int sched_trace_rq_cpu_capacity(struct rq *rq);
2243 int sched_trace_rq_nr_running(struct rq *rq);
2244 
2245 const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2246 
2247 #endif
2248