• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4 
5 /*
6  * Define 'struct task_struct' and provide the main scheduler
7  * APIs (schedule(), wakeup variants, etc.)
8  */
9 
10 #include <uapi/linux/sched.h>
11 
12 #include <asm/current.h>
13 
14 #include <linux/pid.h>
15 #include <linux/sem.h>
16 #include <linux/shm.h>
17 #include <linux/kcov.h>
18 #include <linux/mutex.h>
19 #include <linux/plist.h>
20 #include <linux/hrtimer.h>
21 #include <linux/seccomp.h>
22 #include <linux/nodemask.h>
23 #include <linux/rcupdate.h>
24 #include <linux/refcount.h>
25 #include <linux/resource.h>
26 #include <linux/latencytop.h>
27 #include <linux/sched/prio.h>
28 #include <linux/sched/types.h>
29 #include <linux/signal_types.h>
30 #include <linux/mm_types_task.h>
31 #include <linux/task_io_accounting.h>
32 #include <linux/posix-timers.h>
33 #include <linux/rseq.h>
34 #include <linux/android_kabi.h>
35 #include <linux/android_vendor.h>
36 
37 /* task_struct member predeclarations (sorted alphabetically): */
38 struct audit_context;
39 struct backing_dev_info;
40 struct bio_list;
41 struct blk_plug;
42 struct capture_control;
43 struct cfs_rq;
44 struct fs_struct;
45 struct futex_pi_state;
46 struct io_context;
47 struct mempolicy;
48 struct nameidata;
49 struct nsproxy;
50 struct perf_event_context;
51 struct pid_namespace;
52 struct pipe_inode_info;
53 struct rcu_node;
54 struct reclaim_state;
55 struct robust_list_head;
56 struct root_domain;
57 struct rq;
58 struct sched_attr;
59 struct sched_param;
60 struct seq_file;
61 struct sighand_struct;
62 struct signal_struct;
63 struct task_delay_info;
64 struct task_group;
65 
66 /*
67  * Task state bitmask. NOTE! These bits are also
68  * encoded in fs/proc/array.c: get_task_state().
69  *
70  * We have two separate sets of flags: task->state
71  * is about runnability, while task->exit_state are
72  * about the task exiting. Confusing, but this way
73  * modifying one set can't modify the other one by
74  * mistake.
75  */
76 
77 /* Used in tsk->state: */
78 #define TASK_RUNNING			0x0000
79 #define TASK_INTERRUPTIBLE		0x0001
80 #define TASK_UNINTERRUPTIBLE		0x0002
81 #define __TASK_STOPPED			0x0004
82 #define __TASK_TRACED			0x0008
83 /* Used in tsk->exit_state: */
84 #define EXIT_DEAD			0x0010
85 #define EXIT_ZOMBIE			0x0020
86 #define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
87 /* Used in tsk->state again: */
88 #define TASK_PARKED			0x0040
89 #define TASK_DEAD			0x0080
90 #define TASK_WAKEKILL			0x0100
91 #define TASK_WAKING			0x0200
92 #define TASK_NOLOAD			0x0400
93 #define TASK_NEW			0x0800
94 #define TASK_STATE_MAX			0x1000
95 
96 /* Convenience macros for the sake of set_current_state: */
97 #define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
98 #define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
99 #define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)
100 
101 #define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
102 
103 /* Convenience macros for the sake of wake_up(): */
104 #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
105 
106 /* get_task_state(): */
107 #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
108 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
109 					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
110 					 TASK_PARKED)
111 
112 #define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)
113 
114 #define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)
115 
116 #define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
117 
118 #define task_contributes_to_load(task)	((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
119 					 (task->flags & PF_FROZEN) == 0 && \
120 					 (task->state & TASK_NOLOAD) == 0)
121 
122 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
123 
124 /*
125  * Special states are those that do not use the normal wait-loop pattern. See
126  * the comment with set_special_state().
127  */
128 #define is_special_task_state(state)				\
129 	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
130 
131 #define __set_current_state(state_value)			\
132 	do {							\
133 		WARN_ON_ONCE(is_special_task_state(state_value));\
134 		current->task_state_change = _THIS_IP_;		\
135 		current->state = (state_value);			\
136 	} while (0)
137 
138 #define set_current_state(state_value)				\
139 	do {							\
140 		WARN_ON_ONCE(is_special_task_state(state_value));\
141 		current->task_state_change = _THIS_IP_;		\
142 		smp_store_mb(current->state, (state_value));	\
143 	} while (0)
144 
145 #define set_special_state(state_value)					\
146 	do {								\
147 		unsigned long flags; /* may shadow */			\
148 		WARN_ON_ONCE(!is_special_task_state(state_value));	\
149 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
150 		current->task_state_change = _THIS_IP_;			\
151 		current->state = (state_value);				\
152 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
153 	} while (0)
154 #else
155 /*
156  * set_current_state() includes a barrier so that the write of current->state
157  * is correctly serialised wrt the caller's subsequent test of whether to
158  * actually sleep:
159  *
160  *   for (;;) {
161  *	set_current_state(TASK_UNINTERRUPTIBLE);
162  *	if (!need_sleep)
163  *		break;
164  *
165  *	schedule();
166  *   }
167  *   __set_current_state(TASK_RUNNING);
168  *
169  * If the caller does not need such serialisation (because, for instance, the
170  * condition test and condition change and wakeup are under the same lock) then
171  * use __set_current_state().
172  *
173  * The above is typically ordered against the wakeup, which does:
174  *
175  *   need_sleep = false;
176  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
177  *
178  * where wake_up_state() executes a full memory barrier before accessing the
179  * task state.
180  *
181  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
182  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
183  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
184  *
185  * However, with slightly different timing the wakeup TASK_RUNNING store can
186  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
187  * a problem either because that will result in one extra go around the loop
188  * and our @cond test will save the day.
189  *
190  * Also see the comments of try_to_wake_up().
191  */
192 #define __set_current_state(state_value)				\
193 	current->state = (state_value)
194 
195 #define set_current_state(state_value)					\
196 	smp_store_mb(current->state, (state_value))
197 
198 /*
199  * set_special_state() should be used for those states when the blocking task
200  * can not use the regular condition based wait-loop. In that case we must
201  * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
202  * will not collide with our state change.
203  */
204 #define set_special_state(state_value)					\
205 	do {								\
206 		unsigned long flags; /* may shadow */			\
207 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
208 		current->state = (state_value);				\
209 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
210 	} while (0)
211 
212 #endif
213 
214 /* Task command name length: */
215 #define TASK_COMM_LEN			16
216 
217 extern void scheduler_tick(void);
218 
219 #define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
220 
221 extern long schedule_timeout(long timeout);
222 extern long schedule_timeout_interruptible(long timeout);
223 extern long schedule_timeout_killable(long timeout);
224 extern long schedule_timeout_uninterruptible(long timeout);
225 extern long schedule_timeout_idle(long timeout);
226 asmlinkage void schedule(void);
227 extern void schedule_preempt_disabled(void);
228 asmlinkage void preempt_schedule_irq(void);
229 
230 extern int __must_check io_schedule_prepare(void);
231 extern void io_schedule_finish(int token);
232 extern long io_schedule_timeout(long timeout);
233 extern void io_schedule(void);
234 
235 /**
236  * struct prev_cputime - snapshot of system and user cputime
237  * @utime: time spent in user mode
238  * @stime: time spent in system mode
239  * @lock: protects the above two fields
240  *
241  * Stores previous user/system time values such that we can guarantee
242  * monotonicity.
243  */
244 struct prev_cputime {
245 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
246 	u64				utime;
247 	u64				stime;
248 	raw_spinlock_t			lock;
249 #endif
250 };
251 
252 enum vtime_state {
253 	/* Task is sleeping or running in a CPU with VTIME inactive: */
254 	VTIME_INACTIVE = 0,
255 	/* Task runs in userspace in a CPU with VTIME active: */
256 	VTIME_USER,
257 	/* Task runs in kernelspace in a CPU with VTIME active: */
258 	VTIME_SYS,
259 };
260 
261 struct vtime {
262 	seqcount_t		seqcount;
263 	unsigned long long	starttime;
264 	enum vtime_state	state;
265 	u64			utime;
266 	u64			stime;
267 	u64			gtime;
268 };
269 
270 /*
271  * Utilization clamp constraints.
272  * @UCLAMP_MIN:	Minimum utilization
273  * @UCLAMP_MAX:	Maximum utilization
274  * @UCLAMP_CNT:	Utilization clamp constraints count
275  */
276 enum uclamp_id {
277 	UCLAMP_MIN = 0,
278 	UCLAMP_MAX,
279 	UCLAMP_CNT
280 };
281 
282 #ifdef CONFIG_SMP
283 extern struct root_domain def_root_domain;
284 extern struct mutex sched_domains_mutex;
285 #endif
286 
287 struct sched_info {
288 #ifdef CONFIG_SCHED_INFO
289 	/* Cumulative counters: */
290 
291 	/* # of times we have run on this CPU: */
292 	unsigned long			pcount;
293 
294 	/* Time spent waiting on a runqueue: */
295 	unsigned long long		run_delay;
296 
297 	/* Timestamps: */
298 
299 	/* When did we last run on a CPU? */
300 	unsigned long long		last_arrival;
301 
302 	/* When were we last queued to run? */
303 	unsigned long long		last_queued;
304 
305 #endif /* CONFIG_SCHED_INFO */
306 };
307 
308 /*
309  * Integer metrics need fixed point arithmetic, e.g., sched/fair
310  * has a few: load, load_avg, util_avg, freq, and capacity.
311  *
312  * We define a basic fixed point arithmetic range, and then formalize
313  * all these metrics based on that basic range.
314  */
315 # define SCHED_FIXEDPOINT_SHIFT		10
316 # define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
317 
318 /* Increase resolution of cpu_capacity calculations */
319 # define SCHED_CAPACITY_SHIFT		SCHED_FIXEDPOINT_SHIFT
320 # define SCHED_CAPACITY_SCALE		(1L << SCHED_CAPACITY_SHIFT)
321 
322 struct load_weight {
323 	unsigned long			weight;
324 	u32				inv_weight;
325 };
326 
327 /**
328  * struct util_est - Estimation utilization of FAIR tasks
329  * @enqueued: instantaneous estimated utilization of a task/cpu
330  * @ewma:     the Exponential Weighted Moving Average (EWMA)
331  *            utilization of a task
332  *
333  * Support data structure to track an Exponential Weighted Moving Average
334  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
335  * average each time a task completes an activation. Sample's weight is chosen
336  * so that the EWMA will be relatively insensitive to transient changes to the
337  * task's workload.
338  *
339  * The enqueued attribute has a slightly different meaning for tasks and cpus:
340  * - task:   the task's util_avg at last task dequeue time
341  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
342  * Thus, the util_est.enqueued of a task represents the contribution on the
343  * estimated utilization of the CPU where that task is currently enqueued.
344  *
345  * Only for tasks we track a moving average of the past instantaneous
346  * estimated utilization. This allows to absorb sporadic drops in utilization
347  * of an otherwise almost periodic task.
348  */
349 struct util_est {
350 	unsigned int			enqueued;
351 	unsigned int			ewma;
352 #define UTIL_EST_WEIGHT_SHIFT		2
353 } __attribute__((__aligned__(sizeof(u64))));
354 
355 /*
356  * The load_avg/util_avg accumulates an infinite geometric series
357  * (see __update_load_avg() in kernel/sched/fair.c).
358  *
359  * [load_avg definition]
360  *
361  *   load_avg = runnable% * scale_load_down(load)
362  *
363  * where runnable% is the time ratio that a sched_entity is runnable.
364  * For cfs_rq, it is the aggregated load_avg of all runnable and
365  * blocked sched_entities.
366  *
367  * [util_avg definition]
368  *
369  *   util_avg = running% * SCHED_CAPACITY_SCALE
370  *
371  * where running% is the time ratio that a sched_entity is running on
372  * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
373  * and blocked sched_entities.
374  *
375  * load_avg and util_avg don't direcly factor frequency scaling and CPU
376  * capacity scaling. The scaling is done through the rq_clock_pelt that
377  * is used for computing those signals (see update_rq_clock_pelt())
378  *
379  * N.B., the above ratios (runnable% and running%) themselves are in the
380  * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
381  * to as large a range as necessary. This is for example reflected by
382  * util_avg's SCHED_CAPACITY_SCALE.
383  *
384  * [Overflow issue]
385  *
386  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
387  * with the highest load (=88761), always runnable on a single cfs_rq,
388  * and should not overflow as the number already hits PID_MAX_LIMIT.
389  *
390  * For all other cases (including 32-bit kernels), struct load_weight's
391  * weight will overflow first before we do, because:
392  *
393  *    Max(load_avg) <= Max(load.weight)
394  *
395  * Then it is the load_weight's responsibility to consider overflow
396  * issues.
397  */
398 struct sched_avg {
399 	u64				last_update_time;
400 	u64				load_sum;
401 	u64				runnable_load_sum;
402 	u32				util_sum;
403 	u32				period_contrib;
404 	unsigned long			load_avg;
405 	unsigned long			runnable_load_avg;
406 	unsigned long			util_avg;
407 	struct util_est			util_est;
408 } ____cacheline_aligned;
409 
410 struct sched_statistics {
411 #ifdef CONFIG_SCHEDSTATS
412 	u64				wait_start;
413 	u64				wait_max;
414 	u64				wait_count;
415 	u64				wait_sum;
416 	u64				iowait_count;
417 	u64				iowait_sum;
418 
419 	u64				sleep_start;
420 	u64				sleep_max;
421 	s64				sum_sleep_runtime;
422 
423 	u64				block_start;
424 	u64				block_max;
425 	u64				exec_max;
426 	u64				slice_max;
427 
428 	u64				nr_migrations_cold;
429 	u64				nr_failed_migrations_affine;
430 	u64				nr_failed_migrations_running;
431 	u64				nr_failed_migrations_hot;
432 	u64				nr_forced_migrations;
433 
434 	u64				nr_wakeups;
435 	u64				nr_wakeups_sync;
436 	u64				nr_wakeups_migrate;
437 	u64				nr_wakeups_local;
438 	u64				nr_wakeups_remote;
439 	u64				nr_wakeups_affine;
440 	u64				nr_wakeups_affine_attempts;
441 	u64				nr_wakeups_passive;
442 	u64				nr_wakeups_idle;
443 #endif
444 };
445 
446 struct sched_entity {
447 	/* For load-balancing: */
448 	struct load_weight		load;
449 	unsigned long			runnable_weight;
450 	struct rb_node			run_node;
451 	struct list_head		group_node;
452 	unsigned int			on_rq;
453 
454 	u64				exec_start;
455 	u64				sum_exec_runtime;
456 	u64				vruntime;
457 	u64				prev_sum_exec_runtime;
458 
459 	u64				nr_migrations;
460 
461 	struct sched_statistics		statistics;
462 
463 #ifdef CONFIG_FAIR_GROUP_SCHED
464 	int				depth;
465 	struct sched_entity		*parent;
466 	/* rq on which this entity is (to be) queued: */
467 	struct cfs_rq			*cfs_rq;
468 	/* rq "owned" by this entity/group: */
469 	struct cfs_rq			*my_q;
470 #endif
471 
472 #ifdef CONFIG_SMP
473 	/*
474 	 * Per entity load average tracking.
475 	 *
476 	 * Put into separate cache line so it does not
477 	 * collide with read-mostly values above.
478 	 */
479 	struct sched_avg		avg;
480 #endif
481 
482 	ANDROID_KABI_RESERVE(1);
483 	ANDROID_KABI_RESERVE(2);
484 	ANDROID_KABI_RESERVE(3);
485 	ANDROID_KABI_RESERVE(4);
486 };
487 
488 struct sched_rt_entity {
489 	struct list_head		run_list;
490 	unsigned long			timeout;
491 	unsigned long			watchdog_stamp;
492 	unsigned int			time_slice;
493 	unsigned short			on_rq;
494 	unsigned short			on_list;
495 
496 	struct sched_rt_entity		*back;
497 #ifdef CONFIG_RT_GROUP_SCHED
498 	struct sched_rt_entity		*parent;
499 	/* rq on which this entity is (to be) queued: */
500 	struct rt_rq			*rt_rq;
501 	/* rq "owned" by this entity/group: */
502 	struct rt_rq			*my_q;
503 #endif
504 
505 	ANDROID_KABI_RESERVE(1);
506 	ANDROID_KABI_RESERVE(2);
507 	ANDROID_KABI_RESERVE(3);
508 	ANDROID_KABI_RESERVE(4);
509 } __randomize_layout;
510 
511 struct sched_dl_entity {
512 	struct rb_node			rb_node;
513 
514 	/*
515 	 * Original scheduling parameters. Copied here from sched_attr
516 	 * during sched_setattr(), they will remain the same until
517 	 * the next sched_setattr().
518 	 */
519 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
520 	u64				dl_deadline;	/* Relative deadline of each instance	*/
521 	u64				dl_period;	/* Separation of two instances (period) */
522 	u64				dl_bw;		/* dl_runtime / dl_period		*/
523 	u64				dl_density;	/* dl_runtime / dl_deadline		*/
524 
525 	/*
526 	 * Actual scheduling parameters. Initialized with the values above,
527 	 * they are continuously updated during task execution. Note that
528 	 * the remaining runtime could be < 0 in case we are in overrun.
529 	 */
530 	s64				runtime;	/* Remaining runtime for this instance	*/
531 	u64				deadline;	/* Absolute deadline for this instance	*/
532 	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
533 
534 	/*
535 	 * Some bool flags:
536 	 *
537 	 * @dl_throttled tells if we exhausted the runtime. If so, the
538 	 * task has to wait for a replenishment to be performed at the
539 	 * next firing of dl_timer.
540 	 *
541 	 * @dl_boosted tells if we are boosted due to DI. If so we are
542 	 * outside bandwidth enforcement mechanism (but only until we
543 	 * exit the critical section);
544 	 *
545 	 * @dl_yielded tells if task gave up the CPU before consuming
546 	 * all its available runtime during the last job.
547 	 *
548 	 * @dl_non_contending tells if the task is inactive while still
549 	 * contributing to the active utilization. In other words, it
550 	 * indicates if the inactive timer has been armed and its handler
551 	 * has not been executed yet. This flag is useful to avoid race
552 	 * conditions between the inactive timer handler and the wakeup
553 	 * code.
554 	 *
555 	 * @dl_overrun tells if the task asked to be informed about runtime
556 	 * overruns.
557 	 */
558 	unsigned int			dl_throttled      : 1;
559 	unsigned int			dl_boosted        : 1;
560 	unsigned int			dl_yielded        : 1;
561 	unsigned int			dl_non_contending : 1;
562 	unsigned int			dl_overrun	  : 1;
563 
564 	/*
565 	 * Bandwidth enforcement timer. Each -deadline task has its
566 	 * own bandwidth to be enforced, thus we need one timer per task.
567 	 */
568 	struct hrtimer			dl_timer;
569 
570 	/*
571 	 * Inactive timer, responsible for decreasing the active utilization
572 	 * at the "0-lag time". When a -deadline task blocks, it contributes
573 	 * to GRUB's active utilization until the "0-lag time", hence a
574 	 * timer is needed to decrease the active utilization at the correct
575 	 * time.
576 	 */
577 	struct hrtimer inactive_timer;
578 };
579 
580 #ifdef CONFIG_UCLAMP_TASK
581 /* Number of utilization clamp buckets (shorter alias) */
582 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
583 
584 /*
585  * Utilization clamp for a scheduling entity
586  * @value:		clamp value "assigned" to a se
587  * @bucket_id:		bucket index corresponding to the "assigned" value
588  * @active:		the se is currently refcounted in a rq's bucket
589  * @user_defined:	the requested clamp value comes from user-space
590  *
591  * The bucket_id is the index of the clamp bucket matching the clamp value
592  * which is pre-computed and stored to avoid expensive integer divisions from
593  * the fast path.
594  *
595  * The active bit is set whenever a task has got an "effective" value assigned,
596  * which can be different from the clamp value "requested" from user-space.
597  * This allows to know a task is refcounted in the rq's bucket corresponding
598  * to the "effective" bucket_id.
599  *
600  * The user_defined bit is set whenever a task has got a task-specific clamp
601  * value requested from userspace, i.e. the system defaults apply to this task
602  * just as a restriction. This allows to relax default clamps when a less
603  * restrictive task-specific value has been requested, thus allowing to
604  * implement a "nice" semantic. For example, a task running with a 20%
605  * default boost can still drop its own boosting to 0%.
606  */
607 struct uclamp_se {
608 	unsigned int value		: bits_per(SCHED_CAPACITY_SCALE);
609 	unsigned int bucket_id		: bits_per(UCLAMP_BUCKETS);
610 	unsigned int active		: 1;
611 	unsigned int user_defined	: 1;
612 };
613 #endif /* CONFIG_UCLAMP_TASK */
614 
615 union rcu_special {
616 	struct {
617 		u8			blocked;
618 		u8			need_qs;
619 		u8			exp_hint; /* Hint for performance. */
620 		u8			deferred_qs;
621 	} b; /* Bits. */
622 	u32 s; /* Set of bits. */
623 };
624 
625 enum perf_event_task_context {
626 	perf_invalid_context = -1,
627 	perf_hw_context = 0,
628 	perf_sw_context,
629 	perf_nr_task_contexts,
630 };
631 
632 struct wake_q_node {
633 	struct wake_q_node *next;
634 };
635 
636 struct task_struct {
637 #ifdef CONFIG_THREAD_INFO_IN_TASK
638 	/*
639 	 * For reasons of header soup (see current_thread_info()), this
640 	 * must be the first element of task_struct.
641 	 */
642 	struct thread_info		thread_info;
643 #endif
644 	/* -1 unrunnable, 0 runnable, >0 stopped: */
645 	volatile long			state;
646 
647 	/*
648 	 * This begins the randomizable portion of task_struct. Only
649 	 * scheduling-critical items should be added above here.
650 	 */
651 	randomized_struct_fields_start
652 
653 	void				*stack;
654 	refcount_t			usage;
655 	/* Per task flags (PF_*), defined further below: */
656 	unsigned int			flags;
657 	unsigned int			ptrace;
658 
659 #ifdef CONFIG_SMP
660 	struct llist_node		wake_entry;
661 	int				on_cpu;
662 #ifdef CONFIG_THREAD_INFO_IN_TASK
663 	/* Current CPU: */
664 	unsigned int			cpu;
665 #endif
666 	unsigned int			wakee_flips;
667 	unsigned long			wakee_flip_decay_ts;
668 	struct task_struct		*last_wakee;
669 
670 	/*
671 	 * recent_used_cpu is initially set as the last CPU used by a task
672 	 * that wakes affine another task. Waker/wakee relationships can
673 	 * push tasks around a CPU where each wakeup moves to the next one.
674 	 * Tracking a recently used CPU allows a quick search for a recently
675 	 * used CPU that may be idle.
676 	 */
677 	int				recent_used_cpu;
678 	int				wake_cpu;
679 #endif
680 	int				on_rq;
681 
682 	int				prio;
683 	int				static_prio;
684 	int				normal_prio;
685 	unsigned int			rt_priority;
686 
687 	const struct sched_class	*sched_class;
688 	struct sched_entity		se;
689 	struct sched_rt_entity		rt;
690 #ifdef CONFIG_CGROUP_SCHED
691 	struct task_group		*sched_task_group;
692 #endif
693 	struct sched_dl_entity		dl;
694 
695 #ifdef CONFIG_UCLAMP_TASK
696 	/* Clamp values requested for a scheduling entity */
697 	struct uclamp_se		uclamp_req[UCLAMP_CNT];
698 	/* Effective clamp values used for a scheduling entity */
699 	struct uclamp_se		uclamp[UCLAMP_CNT];
700 #endif
701 
702 #ifdef CONFIG_PREEMPT_NOTIFIERS
703 	/* List of struct preempt_notifier: */
704 	struct hlist_head		preempt_notifiers;
705 #endif
706 
707 #ifdef CONFIG_BLK_DEV_IO_TRACE
708 	unsigned int			btrace_seq;
709 #endif
710 
711 	unsigned int			policy;
712 	int				nr_cpus_allowed;
713 	const cpumask_t			*cpus_ptr;
714 	cpumask_t			cpus_mask;
715 
716 #ifdef CONFIG_PREEMPT_RCU
717 	int				rcu_read_lock_nesting;
718 	union rcu_special		rcu_read_unlock_special;
719 	struct list_head		rcu_node_entry;
720 	struct rcu_node			*rcu_blocked_node;
721 #endif /* #ifdef CONFIG_PREEMPT_RCU */
722 
723 #ifdef CONFIG_TASKS_RCU
724 	unsigned long			rcu_tasks_nvcsw;
725 	u8				rcu_tasks_holdout;
726 	u8				rcu_tasks_idx;
727 	int				rcu_tasks_idle_cpu;
728 	struct list_head		rcu_tasks_holdout_list;
729 #endif /* #ifdef CONFIG_TASKS_RCU */
730 
731 	struct sched_info		sched_info;
732 
733 	struct list_head		tasks;
734 #ifdef CONFIG_SMP
735 	struct plist_node		pushable_tasks;
736 	struct rb_node			pushable_dl_tasks;
737 #endif
738 
739 	struct mm_struct		*mm;
740 	struct mm_struct		*active_mm;
741 
742 	/* Per-thread vma caching: */
743 	struct vmacache			vmacache;
744 
745 #ifdef SPLIT_RSS_COUNTING
746 	struct task_rss_stat		rss_stat;
747 #endif
748 	int				exit_state;
749 	int				exit_code;
750 	int				exit_signal;
751 	/* The signal sent when the parent dies: */
752 	int				pdeath_signal;
753 	/* JOBCTL_*, siglock protected: */
754 	unsigned long			jobctl;
755 
756 	/* Used for emulating ABI behavior of previous Linux versions: */
757 	unsigned int			personality;
758 
759 	/* Scheduler bits, serialized by scheduler locks: */
760 	unsigned			sched_reset_on_fork:1;
761 	unsigned			sched_contributes_to_load:1;
762 	unsigned			sched_migrated:1;
763 	unsigned			sched_remote_wakeup:1;
764 #ifdef CONFIG_PSI
765 	unsigned			sched_psi_wake_requeue:1;
766 #endif
767 
768 	/* Force alignment to the next boundary: */
769 	unsigned			:0;
770 
771 	/* Unserialized, strictly 'current' */
772 
773 	/* Bit to tell LSMs we're in execve(): */
774 	unsigned			in_execve:1;
775 	unsigned			in_iowait:1;
776 #ifndef TIF_RESTORE_SIGMASK
777 	unsigned			restore_sigmask:1;
778 #endif
779 #ifdef CONFIG_MEMCG
780 	unsigned			in_user_fault:1;
781 #endif
782 #ifdef CONFIG_COMPAT_BRK
783 	unsigned			brk_randomized:1;
784 #endif
785 #ifdef CONFIG_CGROUPS
786 	/* disallow userland-initiated cgroup migration */
787 	unsigned			no_cgroup_migration:1;
788 	/* task is frozen/stopped (used by the cgroup freezer) */
789 	unsigned			frozen:1;
790 #endif
791 #ifdef CONFIG_BLK_CGROUP
792 	/* to be used once the psi infrastructure lands upstream. */
793 	unsigned			use_memdelay:1;
794 #endif
795 
796 	unsigned long			atomic_flags; /* Flags requiring atomic access. */
797 
798 	struct restart_block		restart_block;
799 
800 	pid_t				pid;
801 	pid_t				tgid;
802 
803 #ifdef CONFIG_STACKPROTECTOR
804 	/* Canary value for the -fstack-protector GCC feature: */
805 	unsigned long			stack_canary;
806 #endif
807 	/*
808 	 * Pointers to the (original) parent process, youngest child, younger sibling,
809 	 * older sibling, respectively.  (p->father can be replaced with
810 	 * p->real_parent->pid)
811 	 */
812 
813 	/* Real parent process: */
814 	struct task_struct __rcu	*real_parent;
815 
816 	/* Recipient of SIGCHLD, wait4() reports: */
817 	struct task_struct __rcu	*parent;
818 
819 	/*
820 	 * Children/sibling form the list of natural children:
821 	 */
822 	struct list_head		children;
823 	struct list_head		sibling;
824 	struct task_struct		*group_leader;
825 
826 	/*
827 	 * 'ptraced' is the list of tasks this task is using ptrace() on.
828 	 *
829 	 * This includes both natural children and PTRACE_ATTACH targets.
830 	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
831 	 */
832 	struct list_head		ptraced;
833 	struct list_head		ptrace_entry;
834 
835 	/* PID/PID hash table linkage. */
836 	struct pid			*thread_pid;
837 	struct hlist_node		pid_links[PIDTYPE_MAX];
838 	struct list_head		thread_group;
839 	struct list_head		thread_node;
840 
841 	struct completion		*vfork_done;
842 
843 	/* CLONE_CHILD_SETTID: */
844 	int __user			*set_child_tid;
845 
846 	/* CLONE_CHILD_CLEARTID: */
847 	int __user			*clear_child_tid;
848 
849 	u64				utime;
850 	u64				stime;
851 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
852 	u64				utimescaled;
853 	u64				stimescaled;
854 #endif
855 	u64				gtime;
856 #ifdef CONFIG_CPU_FREQ_TIMES
857 	u64				*time_in_state;
858 	unsigned int			max_state;
859 #endif
860 	struct prev_cputime		prev_cputime;
861 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
862 	struct vtime			vtime;
863 #endif
864 
865 #ifdef CONFIG_NO_HZ_FULL
866 	atomic_t			tick_dep_mask;
867 #endif
868 	/* Context switch counts: */
869 	unsigned long			nvcsw;
870 	unsigned long			nivcsw;
871 
872 	/* Monotonic time in nsecs: */
873 	u64				start_time;
874 
875 	/* Boot based time in nsecs: */
876 	u64				real_start_time;
877 
878 	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
879 	unsigned long			min_flt;
880 	unsigned long			maj_flt;
881 
882 	/* Empty if CONFIG_POSIX_CPUTIMERS=n */
883 	struct posix_cputimers		posix_cputimers;
884 
885 	/* Process credentials: */
886 
887 	/* Tracer's credentials at attach: */
888 	const struct cred __rcu		*ptracer_cred;
889 
890 	/* Objective and real subjective task credentials (COW): */
891 	const struct cred __rcu		*real_cred;
892 
893 	/* Effective (overridable) subjective task credentials (COW): */
894 	const struct cred __rcu		*cred;
895 
896 #ifdef CONFIG_KEYS
897 	/* Cached requested key. */
898 	struct key			*cached_requested_key;
899 #endif
900 
901 	/*
902 	 * executable name, excluding path.
903 	 *
904 	 * - normally initialized setup_new_exec()
905 	 * - access it with [gs]et_task_comm()
906 	 * - lock it with task_lock()
907 	 */
908 	char				comm[TASK_COMM_LEN];
909 
910 	struct nameidata		*nameidata;
911 
912 #ifdef CONFIG_SYSVIPC
913 	struct sysv_sem			sysvsem;
914 	struct sysv_shm			sysvshm;
915 #endif
916 #ifdef CONFIG_DETECT_HUNG_TASK
917 	unsigned long			last_switch_count;
918 	unsigned long			last_switch_time;
919 #endif
920 	/* Filesystem information: */
921 	struct fs_struct		*fs;
922 
923 	/* Open file information: */
924 	struct files_struct		*files;
925 
926 	/* Namespaces: */
927 	struct nsproxy			*nsproxy;
928 
929 	/* Signal handlers: */
930 	struct signal_struct		*signal;
931 	struct sighand_struct		*sighand;
932 	sigset_t			blocked;
933 	sigset_t			real_blocked;
934 	/* Restored if set_restore_sigmask() was used: */
935 	sigset_t			saved_sigmask;
936 	struct sigpending		pending;
937 	unsigned long			sas_ss_sp;
938 	size_t				sas_ss_size;
939 	unsigned int			sas_ss_flags;
940 
941 	struct callback_head		*task_works;
942 
943 #ifdef CONFIG_AUDIT
944 #ifdef CONFIG_AUDITSYSCALL
945 	struct audit_context		*audit_context;
946 #endif
947 	kuid_t				loginuid;
948 	unsigned int			sessionid;
949 #endif
950 	struct seccomp			seccomp;
951 
952 	/* Thread group tracking: */
953 	u64				parent_exec_id;
954 	u64				self_exec_id;
955 
956 	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
957 	spinlock_t			alloc_lock;
958 
959 	/* Protection of the PI data structures: */
960 	raw_spinlock_t			pi_lock;
961 
962 	struct wake_q_node		wake_q;
963 
964 #ifdef CONFIG_RT_MUTEXES
965 	/* PI waiters blocked on a rt_mutex held by this task: */
966 	struct rb_root_cached		pi_waiters;
967 	/* Updated under owner's pi_lock and rq lock */
968 	struct task_struct		*pi_top_task;
969 	/* Deadlock detection and priority inheritance handling: */
970 	struct rt_mutex_waiter		*pi_blocked_on;
971 #endif
972 
973 #ifdef CONFIG_DEBUG_MUTEXES
974 	/* Mutex deadlock detection: */
975 	struct mutex_waiter		*blocked_on;
976 #endif
977 
978 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
979 	int				non_block_count;
980 #endif
981 
982 #ifdef CONFIG_TRACE_IRQFLAGS
983 	unsigned int			irq_events;
984 	unsigned long			hardirq_enable_ip;
985 	unsigned long			hardirq_disable_ip;
986 	unsigned int			hardirq_enable_event;
987 	unsigned int			hardirq_disable_event;
988 	int				hardirqs_enabled;
989 	int				hardirq_context;
990 	unsigned long			softirq_disable_ip;
991 	unsigned long			softirq_enable_ip;
992 	unsigned int			softirq_disable_event;
993 	unsigned int			softirq_enable_event;
994 	int				softirqs_enabled;
995 	int				softirq_context;
996 #endif
997 
998 #ifdef CONFIG_LOCKDEP
999 # define MAX_LOCK_DEPTH			48UL
1000 	u64				curr_chain_key;
1001 	int				lockdep_depth;
1002 	unsigned int			lockdep_recursion;
1003 	struct held_lock		held_locks[MAX_LOCK_DEPTH];
1004 #endif
1005 
1006 #ifdef CONFIG_UBSAN
1007 	unsigned int			in_ubsan;
1008 #endif
1009 
1010 	/* Journalling filesystem info: */
1011 	void				*journal_info;
1012 
1013 	/* Stacked block device info: */
1014 	struct bio_list			*bio_list;
1015 
1016 #ifdef CONFIG_BLOCK
1017 	/* Stack plugging: */
1018 	struct blk_plug			*plug;
1019 #endif
1020 
1021 	/* VM state: */
1022 	struct reclaim_state		*reclaim_state;
1023 
1024 	struct backing_dev_info		*backing_dev_info;
1025 
1026 	struct io_context		*io_context;
1027 
1028 #ifdef CONFIG_COMPACTION
1029 	struct capture_control		*capture_control;
1030 #endif
1031 	/* Ptrace state: */
1032 	unsigned long			ptrace_message;
1033 	kernel_siginfo_t		*last_siginfo;
1034 
1035 	struct task_io_accounting	ioac;
1036 #ifdef CONFIG_PSI
1037 	/* Pressure stall state */
1038 	unsigned int			psi_flags;
1039 #endif
1040 #ifdef CONFIG_TASK_XACCT
1041 	/* Accumulated RSS usage: */
1042 	u64				acct_rss_mem1;
1043 	/* Accumulated virtual memory usage: */
1044 	u64				acct_vm_mem1;
1045 	/* stime + utime since last update: */
1046 	u64				acct_timexpd;
1047 #endif
1048 #ifdef CONFIG_CPUSETS
1049 	/* Protected by ->alloc_lock: */
1050 	nodemask_t			mems_allowed;
1051 	/* Seqence number to catch updates: */
1052 	seqcount_t			mems_allowed_seq;
1053 	int				cpuset_mem_spread_rotor;
1054 	int				cpuset_slab_spread_rotor;
1055 #endif
1056 #ifdef CONFIG_CGROUPS
1057 	/* Control Group info protected by css_set_lock: */
1058 	struct css_set __rcu		*cgroups;
1059 	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
1060 	struct list_head		cg_list;
1061 #endif
1062 #ifdef CONFIG_X86_CPU_RESCTRL
1063 	u32				closid;
1064 	u32				rmid;
1065 #endif
1066 #ifdef CONFIG_FUTEX
1067 	struct robust_list_head __user	*robust_list;
1068 #ifdef CONFIG_COMPAT
1069 	struct compat_robust_list_head __user *compat_robust_list;
1070 #endif
1071 	struct list_head		pi_state_list;
1072 	struct futex_pi_state		*pi_state_cache;
1073 	struct mutex			futex_exit_mutex;
1074 	unsigned int			futex_state;
1075 #endif
1076 #ifdef CONFIG_PERF_EVENTS
1077 	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
1078 	struct mutex			perf_event_mutex;
1079 	struct list_head		perf_event_list;
1080 #endif
1081 #ifdef CONFIG_DEBUG_PREEMPT
1082 	unsigned long			preempt_disable_ip;
1083 #endif
1084 #ifdef CONFIG_NUMA
1085 	/* Protected by alloc_lock: */
1086 	struct mempolicy		*mempolicy;
1087 	short				il_prev;
1088 	short				pref_node_fork;
1089 #endif
1090 #ifdef CONFIG_NUMA_BALANCING
1091 	int				numa_scan_seq;
1092 	unsigned int			numa_scan_period;
1093 	unsigned int			numa_scan_period_max;
1094 	int				numa_preferred_nid;
1095 	unsigned long			numa_migrate_retry;
1096 	/* Migration stamp: */
1097 	u64				node_stamp;
1098 	u64				last_task_numa_placement;
1099 	u64				last_sum_exec_runtime;
1100 	struct callback_head		numa_work;
1101 
1102 	/*
1103 	 * This pointer is only modified for current in syscall and
1104 	 * pagefault context (and for tasks being destroyed), so it can be read
1105 	 * from any of the following contexts:
1106 	 *  - RCU read-side critical section
1107 	 *  - current->numa_group from everywhere
1108 	 *  - task's runqueue locked, task not running
1109 	 */
1110 	struct numa_group __rcu		*numa_group;
1111 
1112 	/*
1113 	 * numa_faults is an array split into four regions:
1114 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1115 	 * in this precise order.
1116 	 *
1117 	 * faults_memory: Exponential decaying average of faults on a per-node
1118 	 * basis. Scheduling placement decisions are made based on these
1119 	 * counts. The values remain static for the duration of a PTE scan.
1120 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1121 	 * hinting fault was incurred.
1122 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1123 	 * during the current scan window. When the scan completes, the counts
1124 	 * in faults_memory and faults_cpu decay and these values are copied.
1125 	 */
1126 	unsigned long			*numa_faults;
1127 	unsigned long			total_numa_faults;
1128 
1129 	/*
1130 	 * numa_faults_locality tracks if faults recorded during the last
1131 	 * scan window were remote/local or failed to migrate. The task scan
1132 	 * period is adapted based on the locality of the faults with different
1133 	 * weights depending on whether they were shared or private faults
1134 	 */
1135 	unsigned long			numa_faults_locality[3];
1136 
1137 	unsigned long			numa_pages_migrated;
1138 #endif /* CONFIG_NUMA_BALANCING */
1139 
1140 #ifdef CONFIG_RSEQ
1141 	struct rseq __user *rseq;
1142 	u32 rseq_sig;
1143 	/*
1144 	 * RmW on rseq_event_mask must be performed atomically
1145 	 * with respect to preemption.
1146 	 */
1147 	unsigned long rseq_event_mask;
1148 #endif
1149 
1150 	struct tlbflush_unmap_batch	tlb_ubc;
1151 
1152 	union {
1153 		refcount_t		rcu_users;
1154 		struct rcu_head		rcu;
1155 	};
1156 
1157 	/* Cache last used pipe for splice(): */
1158 	struct pipe_inode_info		*splice_pipe;
1159 
1160 	struct page_frag		task_frag;
1161 
1162 #ifdef CONFIG_TASK_DELAY_ACCT
1163 	struct task_delay_info		*delays;
1164 #endif
1165 
1166 #ifdef CONFIG_FAULT_INJECTION
1167 	int				make_it_fail;
1168 	unsigned int			fail_nth;
1169 #endif
1170 	/*
1171 	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1172 	 * balance_dirty_pages() for a dirty throttling pause:
1173 	 */
1174 	int				nr_dirtied;
1175 	int				nr_dirtied_pause;
1176 	/* Start of a write-and-pause period: */
1177 	unsigned long			dirty_paused_when;
1178 
1179 #ifdef CONFIG_LATENCYTOP
1180 	int				latency_record_count;
1181 	struct latency_record		latency_record[LT_SAVECOUNT];
1182 #endif
1183 	/*
1184 	 * Time slack values; these are used to round up poll() and
1185 	 * select() etc timeout values. These are in nanoseconds.
1186 	 */
1187 	u64				timer_slack_ns;
1188 	u64				default_timer_slack_ns;
1189 
1190 #ifdef CONFIG_KASAN
1191 	unsigned int			kasan_depth;
1192 #endif
1193 
1194 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1195 	/* Index of current stored address in ret_stack: */
1196 	int				curr_ret_stack;
1197 	int				curr_ret_depth;
1198 
1199 	/* Stack of return addresses for return function tracing: */
1200 	struct ftrace_ret_stack		*ret_stack;
1201 
1202 	/* Timestamp for last schedule: */
1203 	unsigned long long		ftrace_timestamp;
1204 
1205 	/*
1206 	 * Number of functions that haven't been traced
1207 	 * because of depth overrun:
1208 	 */
1209 	atomic_t			trace_overrun;
1210 
1211 	/* Pause tracing: */
1212 	atomic_t			tracing_graph_pause;
1213 #endif
1214 
1215 #ifdef CONFIG_TRACING
1216 	/* State flags for use by tracers: */
1217 	unsigned long			trace;
1218 
1219 	/* Bitmask and counter of trace recursion: */
1220 	unsigned long			trace_recursion;
1221 #endif /* CONFIG_TRACING */
1222 
1223 #ifdef CONFIG_KCOV
1224 	/* See kernel/kcov.c for more details. */
1225 
1226 	/* Coverage collection mode enabled for this task (0 if disabled): */
1227 	unsigned int			kcov_mode;
1228 
1229 	/* Size of the kcov_area: */
1230 	unsigned int			kcov_size;
1231 
1232 	/* Buffer for coverage collection: */
1233 	void				*kcov_area;
1234 
1235 	/* KCOV descriptor wired with this task or NULL: */
1236 	struct kcov			*kcov;
1237 
1238 	/* KCOV common handle for remote coverage collection: */
1239 	u64				kcov_handle;
1240 
1241 	/* KCOV sequence number: */
1242 	int				kcov_sequence;
1243 #endif
1244 
1245 #ifdef CONFIG_MEMCG
1246 	struct mem_cgroup		*memcg_in_oom;
1247 	gfp_t				memcg_oom_gfp_mask;
1248 	int				memcg_oom_order;
1249 
1250 	/* Number of pages to reclaim on returning to userland: */
1251 	unsigned int			memcg_nr_pages_over_high;
1252 
1253 	/* Used by memcontrol for targeted memcg charge: */
1254 	struct mem_cgroup		*active_memcg;
1255 #endif
1256 
1257 #ifdef CONFIG_BLK_CGROUP
1258 	struct request_queue		*throttle_queue;
1259 #endif
1260 
1261 #ifdef CONFIG_UPROBES
1262 	struct uprobe_task		*utask;
1263 #endif
1264 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1265 	unsigned int			sequential_io;
1266 	unsigned int			sequential_io_avg;
1267 #endif
1268 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1269 	unsigned long			task_state_change;
1270 #endif
1271 	int				pagefault_disabled;
1272 #ifdef CONFIG_MMU
1273 	struct task_struct		*oom_reaper_list;
1274 #endif
1275 #ifdef CONFIG_VMAP_STACK
1276 	struct vm_struct		*stack_vm_area;
1277 #endif
1278 #ifdef CONFIG_THREAD_INFO_IN_TASK
1279 	/* A live task holds one reference: */
1280 	refcount_t			stack_refcount;
1281 #endif
1282 #ifdef CONFIG_LIVEPATCH
1283 	int patch_state;
1284 #endif
1285 #ifdef CONFIG_SECURITY
1286 	/* Used by LSM modules for access restriction: */
1287 	void				*security;
1288 #endif
1289 
1290 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1291 	unsigned long			lowest_stack;
1292 	unsigned long			prev_lowest_stack;
1293 #endif
1294 
1295 	ANDROID_VENDOR_DATA_ARRAY(1, 3);
1296 
1297 	ANDROID_KABI_RESERVE(1);
1298 	ANDROID_KABI_RESERVE(2);
1299 	ANDROID_KABI_RESERVE(3);
1300 	ANDROID_KABI_RESERVE(4);
1301 	ANDROID_KABI_RESERVE(5);
1302 	ANDROID_KABI_RESERVE(6);
1303 	ANDROID_KABI_RESERVE(7);
1304 	ANDROID_KABI_RESERVE(8);
1305 
1306 	/*
1307 	 * New fields for task_struct should be added above here, so that
1308 	 * they are included in the randomized portion of task_struct.
1309 	 */
1310 	randomized_struct_fields_end
1311 
1312 	/* CPU-specific state of this task: */
1313 	struct thread_struct		thread;
1314 
1315 	/*
1316 	 * WARNING: on x86, 'thread_struct' contains a variable-sized
1317 	 * structure.  It *MUST* be at the end of 'task_struct'.
1318 	 *
1319 	 * Do not put anything below here!
1320 	 */
1321 };
1322 
task_pid(struct task_struct * task)1323 static inline struct pid *task_pid(struct task_struct *task)
1324 {
1325 	return task->thread_pid;
1326 }
1327 
1328 /*
1329  * the helpers to get the task's different pids as they are seen
1330  * from various namespaces
1331  *
1332  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1333  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1334  *                     current.
1335  * task_xid_nr_ns()  : id seen from the ns specified;
1336  *
1337  * see also pid_nr() etc in include/linux/pid.h
1338  */
1339 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1340 
task_pid_nr(struct task_struct * tsk)1341 static inline pid_t task_pid_nr(struct task_struct *tsk)
1342 {
1343 	return tsk->pid;
1344 }
1345 
task_pid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1346 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1347 {
1348 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1349 }
1350 
task_pid_vnr(struct task_struct * tsk)1351 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1352 {
1353 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1354 }
1355 
1356 
task_tgid_nr(struct task_struct * tsk)1357 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1358 {
1359 	return tsk->tgid;
1360 }
1361 
1362 /**
1363  * pid_alive - check that a task structure is not stale
1364  * @p: Task structure to be checked.
1365  *
1366  * Test if a process is not yet dead (at most zombie state)
1367  * If pid_alive fails, then pointers within the task structure
1368  * can be stale and must not be dereferenced.
1369  *
1370  * Return: 1 if the process is alive. 0 otherwise.
1371  */
pid_alive(const struct task_struct * p)1372 static inline int pid_alive(const struct task_struct *p)
1373 {
1374 	return p->thread_pid != NULL;
1375 }
1376 
task_pgrp_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1377 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1378 {
1379 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1380 }
1381 
task_pgrp_vnr(struct task_struct * tsk)1382 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1383 {
1384 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1385 }
1386 
1387 
task_session_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1388 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1389 {
1390 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1391 }
1392 
task_session_vnr(struct task_struct * tsk)1393 static inline pid_t task_session_vnr(struct task_struct *tsk)
1394 {
1395 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1396 }
1397 
task_tgid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1398 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1399 {
1400 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1401 }
1402 
task_tgid_vnr(struct task_struct * tsk)1403 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1404 {
1405 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1406 }
1407 
task_ppid_nr_ns(const struct task_struct * tsk,struct pid_namespace * ns)1408 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1409 {
1410 	pid_t pid = 0;
1411 
1412 	rcu_read_lock();
1413 	if (pid_alive(tsk))
1414 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1415 	rcu_read_unlock();
1416 
1417 	return pid;
1418 }
1419 
task_ppid_nr(const struct task_struct * tsk)1420 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1421 {
1422 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1423 }
1424 
1425 /* Obsolete, do not use: */
task_pgrp_nr(struct task_struct * tsk)1426 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1427 {
1428 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1429 }
1430 
1431 #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
1432 #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
1433 
task_state_index(struct task_struct * tsk)1434 static inline unsigned int task_state_index(struct task_struct *tsk)
1435 {
1436 	unsigned int tsk_state = READ_ONCE(tsk->state);
1437 	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1438 
1439 	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1440 
1441 	if (tsk_state == TASK_IDLE)
1442 		state = TASK_REPORT_IDLE;
1443 
1444 	return fls(state);
1445 }
1446 
task_index_to_char(unsigned int state)1447 static inline char task_index_to_char(unsigned int state)
1448 {
1449 	static const char state_char[] = "RSDTtXZPI";
1450 
1451 	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1452 
1453 	return state_char[state];
1454 }
1455 
task_state_to_char(struct task_struct * tsk)1456 static inline char task_state_to_char(struct task_struct *tsk)
1457 {
1458 	return task_index_to_char(task_state_index(tsk));
1459 }
1460 
1461 /**
1462  * is_global_init - check if a task structure is init. Since init
1463  * is free to have sub-threads we need to check tgid.
1464  * @tsk: Task structure to be checked.
1465  *
1466  * Check if a task structure is the first user space task the kernel created.
1467  *
1468  * Return: 1 if the task structure is init. 0 otherwise.
1469  */
is_global_init(struct task_struct * tsk)1470 static inline int is_global_init(struct task_struct *tsk)
1471 {
1472 	return task_tgid_nr(tsk) == 1;
1473 }
1474 
1475 extern struct pid *cad_pid;
1476 
1477 /*
1478  * Per process flags
1479  */
1480 #define PF_IDLE			0x00000002	/* I am an IDLE thread */
1481 #define PF_EXITING		0x00000004	/* Getting shut down */
1482 #define PF_VCPU			0x00000010	/* I'm a virtual CPU */
1483 #define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
1484 #define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
1485 #define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
1486 #define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
1487 #define PF_DUMPCORE		0x00000200	/* Dumped core */
1488 #define PF_SIGNALED		0x00000400	/* Killed by a signal */
1489 #define PF_MEMALLOC		0x00000800	/* Allocating memory */
1490 #define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
1491 #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
1492 #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
1493 #define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1494 #define PF_KSWAPD		0x00020000	/* I am kswapd */
1495 #define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
1496 #define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1497 #define PF_LESS_THROTTLE	0x00100000	/* Throttle me less: I clean memory */
1498 #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
1499 #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1500 #define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
1501 #define PF_MEMSTALL		0x01000000	/* Stalled due to lack of memory */
1502 #define PF_UMH			0x02000000	/* I'm an Usermodehelper process */
1503 #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_mask */
1504 #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
1505 #define PF_MEMALLOC_NOCMA	0x10000000	/* All allocation request will have _GFP_MOVABLE cleared */
1506 #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
1507 #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
1508 
1509 /*
1510  * Only the _current_ task can read/write to tsk->flags, but other
1511  * tasks can access tsk->flags in readonly mode for example
1512  * with tsk_used_math (like during threaded core dumping).
1513  * There is however an exception to this rule during ptrace
1514  * or during fork: the ptracer task is allowed to write to the
1515  * child->flags of its traced child (same goes for fork, the parent
1516  * can write to the child->flags), because we're guaranteed the
1517  * child is not running and in turn not changing child->flags
1518  * at the same time the parent does it.
1519  */
1520 #define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
1521 #define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
1522 #define clear_used_math()			clear_stopped_child_used_math(current)
1523 #define set_used_math()				set_stopped_child_used_math(current)
1524 
1525 #define conditional_stopped_child_used_math(condition, child) \
1526 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1527 
1528 #define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
1529 
1530 #define copy_to_stopped_child_used_math(child) \
1531 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1532 
1533 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1534 #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
1535 #define used_math()				tsk_used_math(current)
1536 
is_percpu_thread(void)1537 static __always_inline bool is_percpu_thread(void)
1538 {
1539 #ifdef CONFIG_SMP
1540 	return (current->flags & PF_NO_SETAFFINITY) &&
1541 		(current->nr_cpus_allowed  == 1);
1542 #else
1543 	return true;
1544 #endif
1545 }
1546 
1547 /* Per-process atomic flags. */
1548 #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
1549 #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
1550 #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1551 #define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
1552 #define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
1553 #define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */
1554 #define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */
1555 #define PFA_SPEC_SSB_NOEXEC		7	/* Speculative Store Bypass clear on execve() */
1556 
1557 #define TASK_PFA_TEST(name, func)					\
1558 	static inline bool task_##func(struct task_struct *p)		\
1559 	{ return test_bit(PFA_##name, &p->atomic_flags); }
1560 
1561 #define TASK_PFA_SET(name, func)					\
1562 	static inline void task_set_##func(struct task_struct *p)	\
1563 	{ set_bit(PFA_##name, &p->atomic_flags); }
1564 
1565 #define TASK_PFA_CLEAR(name, func)					\
1566 	static inline void task_clear_##func(struct task_struct *p)	\
1567 	{ clear_bit(PFA_##name, &p->atomic_flags); }
1568 
TASK_PFA_TEST(NO_NEW_PRIVS,no_new_privs)1569 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1570 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1571 
1572 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1573 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1574 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1575 
1576 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1577 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1578 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1579 
1580 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1581 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1582 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1583 
1584 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1585 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1586 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1587 
1588 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1589 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1590 
1591 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1592 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1593 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1594 
1595 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1596 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1597 
1598 static inline void
1599 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1600 {
1601 	current->flags &= ~flags;
1602 	current->flags |= orig_flags & flags;
1603 }
1604 
1605 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1606 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1607 #ifdef CONFIG_SMP
1608 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1609 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1610 #else
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)1611 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1612 {
1613 }
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)1614 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1615 {
1616 	if (!cpumask_test_cpu(0, new_mask))
1617 		return -EINVAL;
1618 	return 0;
1619 }
1620 #endif
1621 
1622 extern int yield_to(struct task_struct *p, bool preempt);
1623 extern void set_user_nice(struct task_struct *p, long nice);
1624 extern int task_prio(const struct task_struct *p);
1625 
1626 /**
1627  * task_nice - return the nice value of a given task.
1628  * @p: the task in question.
1629  *
1630  * Return: The nice value [ -20 ... 0 ... 19 ].
1631  */
task_nice(const struct task_struct * p)1632 static inline int task_nice(const struct task_struct *p)
1633 {
1634 	return PRIO_TO_NICE((p)->static_prio);
1635 }
1636 
1637 extern int can_nice(const struct task_struct *p, const int nice);
1638 extern int task_curr(const struct task_struct *p);
1639 extern int idle_cpu(int cpu);
1640 extern int available_idle_cpu(int cpu);
1641 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1642 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1643 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1644 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1645 extern struct task_struct *idle_task(int cpu);
1646 
1647 /**
1648  * is_idle_task - is the specified task an idle task?
1649  * @p: the task in question.
1650  *
1651  * Return: 1 if @p is an idle task. 0 otherwise.
1652  */
is_idle_task(const struct task_struct * p)1653 static inline bool is_idle_task(const struct task_struct *p)
1654 {
1655 	return !!(p->flags & PF_IDLE);
1656 }
1657 
1658 extern struct task_struct *curr_task(int cpu);
1659 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1660 
1661 void yield(void);
1662 
1663 union thread_union {
1664 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1665 	struct task_struct task;
1666 #endif
1667 #ifndef CONFIG_THREAD_INFO_IN_TASK
1668 	struct thread_info thread_info;
1669 #endif
1670 	unsigned long stack[THREAD_SIZE/sizeof(long)];
1671 };
1672 
1673 #ifndef CONFIG_THREAD_INFO_IN_TASK
1674 extern struct thread_info init_thread_info;
1675 #endif
1676 
1677 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1678 
1679 #ifdef CONFIG_THREAD_INFO_IN_TASK
task_thread_info(struct task_struct * task)1680 static inline struct thread_info *task_thread_info(struct task_struct *task)
1681 {
1682 	return &task->thread_info;
1683 }
1684 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1685 # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1686 #endif
1687 
1688 /*
1689  * find a task by one of its numerical ids
1690  *
1691  * find_task_by_pid_ns():
1692  *      finds a task by its pid in the specified namespace
1693  * find_task_by_vpid():
1694  *      finds a task by its virtual pid
1695  *
1696  * see also find_vpid() etc in include/linux/pid.h
1697  */
1698 
1699 extern struct task_struct *find_task_by_vpid(pid_t nr);
1700 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1701 
1702 /*
1703  * find a task by its virtual pid and get the task struct
1704  */
1705 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1706 
1707 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1708 extern int wake_up_process(struct task_struct *tsk);
1709 extern void wake_up_new_task(struct task_struct *tsk);
1710 
1711 #ifdef CONFIG_SMP
1712 extern void kick_process(struct task_struct *tsk);
1713 #else
kick_process(struct task_struct * tsk)1714 static inline void kick_process(struct task_struct *tsk) { }
1715 #endif
1716 
1717 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1718 
set_task_comm(struct task_struct * tsk,const char * from)1719 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1720 {
1721 	__set_task_comm(tsk, from, false);
1722 }
1723 
1724 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1725 #define get_task_comm(buf, tsk) ({			\
1726 	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
1727 	__get_task_comm(buf, sizeof(buf), tsk);		\
1728 })
1729 
1730 #ifdef CONFIG_SMP
1731 void scheduler_ipi(void);
1732 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1733 #else
scheduler_ipi(void)1734 static inline void scheduler_ipi(void) { }
wait_task_inactive(struct task_struct * p,long match_state)1735 static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1736 {
1737 	return 1;
1738 }
1739 #endif
1740 
1741 /*
1742  * Set thread flags in other task's structures.
1743  * See asm/thread_info.h for TIF_xxxx flags available:
1744  */
set_tsk_thread_flag(struct task_struct * tsk,int flag)1745 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1746 {
1747 	set_ti_thread_flag(task_thread_info(tsk), flag);
1748 }
1749 
clear_tsk_thread_flag(struct task_struct * tsk,int flag)1750 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1751 {
1752 	clear_ti_thread_flag(task_thread_info(tsk), flag);
1753 }
1754 
update_tsk_thread_flag(struct task_struct * tsk,int flag,bool value)1755 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1756 					  bool value)
1757 {
1758 	update_ti_thread_flag(task_thread_info(tsk), flag, value);
1759 }
1760 
test_and_set_tsk_thread_flag(struct task_struct * tsk,int flag)1761 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1762 {
1763 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1764 }
1765 
test_and_clear_tsk_thread_flag(struct task_struct * tsk,int flag)1766 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1767 {
1768 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1769 }
1770 
test_tsk_thread_flag(struct task_struct * tsk,int flag)1771 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1772 {
1773 	return test_ti_thread_flag(task_thread_info(tsk), flag);
1774 }
1775 
set_tsk_need_resched(struct task_struct * tsk)1776 static inline void set_tsk_need_resched(struct task_struct *tsk)
1777 {
1778 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1779 }
1780 
clear_tsk_need_resched(struct task_struct * tsk)1781 static inline void clear_tsk_need_resched(struct task_struct *tsk)
1782 {
1783 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1784 }
1785 
test_tsk_need_resched(struct task_struct * tsk)1786 static inline int test_tsk_need_resched(struct task_struct *tsk)
1787 {
1788 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1789 }
1790 
1791 /*
1792  * cond_resched() and cond_resched_lock(): latency reduction via
1793  * explicit rescheduling in places that are safe. The return
1794  * value indicates whether a reschedule was done in fact.
1795  * cond_resched_lock() will drop the spinlock before scheduling,
1796  */
1797 #ifndef CONFIG_PREEMPTION
1798 extern int _cond_resched(void);
1799 #else
_cond_resched(void)1800 static inline int _cond_resched(void) { return 0; }
1801 #endif
1802 
1803 #define cond_resched() ({			\
1804 	___might_sleep(__FILE__, __LINE__, 0);	\
1805 	_cond_resched();			\
1806 })
1807 
1808 extern int __cond_resched_lock(spinlock_t *lock);
1809 
1810 #define cond_resched_lock(lock) ({				\
1811 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1812 	__cond_resched_lock(lock);				\
1813 })
1814 
cond_resched_rcu(void)1815 static inline void cond_resched_rcu(void)
1816 {
1817 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1818 	rcu_read_unlock();
1819 	cond_resched();
1820 	rcu_read_lock();
1821 #endif
1822 }
1823 
1824 /*
1825  * Does a critical section need to be broken due to another
1826  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
1827  * but a general need for low latency)
1828  */
spin_needbreak(spinlock_t * lock)1829 static inline int spin_needbreak(spinlock_t *lock)
1830 {
1831 #ifdef CONFIG_PREEMPTION
1832 	return spin_is_contended(lock);
1833 #else
1834 	return 0;
1835 #endif
1836 }
1837 
need_resched(void)1838 static __always_inline bool need_resched(void)
1839 {
1840 	return unlikely(tif_need_resched());
1841 }
1842 
1843 /*
1844  * Wrappers for p->thread_info->cpu access. No-op on UP.
1845  */
1846 #ifdef CONFIG_SMP
1847 
task_cpu(const struct task_struct * p)1848 static inline unsigned int task_cpu(const struct task_struct *p)
1849 {
1850 #ifdef CONFIG_THREAD_INFO_IN_TASK
1851 	return READ_ONCE(p->cpu);
1852 #else
1853 	return READ_ONCE(task_thread_info(p)->cpu);
1854 #endif
1855 }
1856 
1857 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1858 
1859 #else
1860 
task_cpu(const struct task_struct * p)1861 static inline unsigned int task_cpu(const struct task_struct *p)
1862 {
1863 	return 0;
1864 }
1865 
set_task_cpu(struct task_struct * p,unsigned int cpu)1866 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1867 {
1868 }
1869 
1870 #endif /* CONFIG_SMP */
1871 
1872 /*
1873  * In order to reduce various lock holder preemption latencies provide an
1874  * interface to see if a vCPU is currently running or not.
1875  *
1876  * This allows us to terminate optimistic spin loops and block, analogous to
1877  * the native optimistic spin heuristic of testing if the lock owner task is
1878  * running or not.
1879  */
1880 #ifndef vcpu_is_preempted
vcpu_is_preempted(int cpu)1881 static inline bool vcpu_is_preempted(int cpu)
1882 {
1883 	return false;
1884 }
1885 #endif
1886 
1887 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1888 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1889 
1890 #ifndef TASK_SIZE_OF
1891 #define TASK_SIZE_OF(tsk)	TASK_SIZE
1892 #endif
1893 
1894 #ifdef CONFIG_RSEQ
1895 
1896 /*
1897  * Map the event mask on the user-space ABI enum rseq_cs_flags
1898  * for direct mask checks.
1899  */
1900 enum rseq_event_mask_bits {
1901 	RSEQ_EVENT_PREEMPT_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1902 	RSEQ_EVENT_SIGNAL_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1903 	RSEQ_EVENT_MIGRATE_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1904 };
1905 
1906 enum rseq_event_mask {
1907 	RSEQ_EVENT_PREEMPT	= (1U << RSEQ_EVENT_PREEMPT_BIT),
1908 	RSEQ_EVENT_SIGNAL	= (1U << RSEQ_EVENT_SIGNAL_BIT),
1909 	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
1910 };
1911 
rseq_set_notify_resume(struct task_struct * t)1912 static inline void rseq_set_notify_resume(struct task_struct *t)
1913 {
1914 	if (t->rseq)
1915 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1916 }
1917 
1918 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1919 
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)1920 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1921 					     struct pt_regs *regs)
1922 {
1923 	if (current->rseq)
1924 		__rseq_handle_notify_resume(ksig, regs);
1925 }
1926 
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)1927 static inline void rseq_signal_deliver(struct ksignal *ksig,
1928 				       struct pt_regs *regs)
1929 {
1930 	preempt_disable();
1931 	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
1932 	preempt_enable();
1933 	rseq_handle_notify_resume(ksig, regs);
1934 }
1935 
1936 /* rseq_preempt() requires preemption to be disabled. */
rseq_preempt(struct task_struct * t)1937 static inline void rseq_preempt(struct task_struct *t)
1938 {
1939 	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1940 	rseq_set_notify_resume(t);
1941 }
1942 
1943 /* rseq_migrate() requires preemption to be disabled. */
rseq_migrate(struct task_struct * t)1944 static inline void rseq_migrate(struct task_struct *t)
1945 {
1946 	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
1947 	rseq_set_notify_resume(t);
1948 }
1949 
1950 /*
1951  * If parent process has a registered restartable sequences area, the
1952  * child inherits. Unregister rseq for a clone with CLONE_VM set.
1953  */
rseq_fork(struct task_struct * t,unsigned long clone_flags)1954 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1955 {
1956 	if (clone_flags & CLONE_VM) {
1957 		t->rseq = NULL;
1958 		t->rseq_sig = 0;
1959 		t->rseq_event_mask = 0;
1960 	} else {
1961 		t->rseq = current->rseq;
1962 		t->rseq_sig = current->rseq_sig;
1963 		t->rseq_event_mask = current->rseq_event_mask;
1964 	}
1965 }
1966 
rseq_execve(struct task_struct * t)1967 static inline void rseq_execve(struct task_struct *t)
1968 {
1969 	t->rseq = NULL;
1970 	t->rseq_sig = 0;
1971 	t->rseq_event_mask = 0;
1972 }
1973 
1974 #else
1975 
rseq_set_notify_resume(struct task_struct * t)1976 static inline void rseq_set_notify_resume(struct task_struct *t)
1977 {
1978 }
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)1979 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1980 					     struct pt_regs *regs)
1981 {
1982 }
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)1983 static inline void rseq_signal_deliver(struct ksignal *ksig,
1984 				       struct pt_regs *regs)
1985 {
1986 }
rseq_preempt(struct task_struct * t)1987 static inline void rseq_preempt(struct task_struct *t)
1988 {
1989 }
rseq_migrate(struct task_struct * t)1990 static inline void rseq_migrate(struct task_struct *t)
1991 {
1992 }
rseq_fork(struct task_struct * t,unsigned long clone_flags)1993 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
1994 {
1995 }
rseq_execve(struct task_struct * t)1996 static inline void rseq_execve(struct task_struct *t)
1997 {
1998 }
1999 
2000 #endif
2001 
2002 void __exit_umh(struct task_struct *tsk);
2003 
exit_umh(struct task_struct * tsk)2004 static inline void exit_umh(struct task_struct *tsk)
2005 {
2006 	if (unlikely(tsk->flags & PF_UMH))
2007 		__exit_umh(tsk);
2008 }
2009 
2010 #ifdef CONFIG_DEBUG_RSEQ
2011 
2012 void rseq_syscall(struct pt_regs *regs);
2013 
2014 #else
2015 
rseq_syscall(struct pt_regs * regs)2016 static inline void rseq_syscall(struct pt_regs *regs)
2017 {
2018 }
2019 
2020 #endif
2021 
2022 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2023 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2024 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2025 
2026 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2027 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2028 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2029 
2030 int sched_trace_rq_cpu(struct rq *rq);
2031 
2032 const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2033 
2034 #endif
2035